summaryrefslogtreecommitdiffstats
path: root/js/src/wasm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /js/src/wasm
parentInitial commit. (diff)
downloadfirefox-esr-upstream.tar.xz
firefox-esr-upstream.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'js/src/wasm')
-rw-r--r--js/src/wasm/AsmJS.cpp7383
-rw-r--r--js/src/wasm/AsmJS.h116
-rw-r--r--js/src/wasm/GenerateIntrinsics.py86
-rw-r--r--js/src/wasm/WasmBCClass-inl.h58
-rw-r--r--js/src/wasm/WasmBCClass.h1745
-rw-r--r--js/src/wasm/WasmBCCodegen-inl.h524
-rw-r--r--js/src/wasm/WasmBCDefs.h201
-rw-r--r--js/src/wasm/WasmBCFrame.cpp544
-rw-r--r--js/src/wasm/WasmBCFrame.h1397
-rw-r--r--js/src/wasm/WasmBCMemory.cpp2799
-rw-r--r--js/src/wasm/WasmBCRegDefs-inl.h180
-rw-r--r--js/src/wasm/WasmBCRegDefs.h852
-rw-r--r--js/src/wasm/WasmBCRegMgmt-inl.h486
-rw-r--r--js/src/wasm/WasmBCStk.h345
-rw-r--r--js/src/wasm/WasmBCStkMgmt-inl.h1320
-rw-r--r--js/src/wasm/WasmBaselineCompile.cpp11009
-rw-r--r--js/src/wasm/WasmBaselineCompile.h103
-rw-r--r--js/src/wasm/WasmBinary.cpp335
-rw-r--r--js/src/wasm/WasmBinary.h890
-rw-r--r--js/src/wasm/WasmBuiltins.cpp1980
-rw-r--r--js/src/wasm/WasmBuiltins.h324
-rw-r--r--js/src/wasm/WasmCode.cpp1253
-rw-r--r--js/src/wasm/WasmCode.h874
-rw-r--r--js/src/wasm/WasmCodegenConstants.h76
-rw-r--r--js/src/wasm/WasmCodegenTypes.cpp240
-rw-r--r--js/src/wasm/WasmCodegenTypes.h765
-rw-r--r--js/src/wasm/WasmCompile.cpp919
-rw-r--r--js/src/wasm/WasmCompile.h99
-rw-r--r--js/src/wasm/WasmCompileArgs.h238
-rw-r--r--js/src/wasm/WasmConstants.h1144
-rw-r--r--js/src/wasm/WasmContext.h41
-rw-r--r--js/src/wasm/WasmDebug.cpp528
-rw-r--r--js/src/wasm/WasmDebug.h187
-rw-r--r--js/src/wasm/WasmDebugFrame.cpp176
-rw-r--r--js/src/wasm/WasmDebugFrame.h217
-rw-r--r--js/src/wasm/WasmException.h31
-rw-r--r--js/src/wasm/WasmExprType.h330
-rw-r--r--js/src/wasm/WasmFrame.h409
-rw-r--r--js/src/wasm/WasmFrameIter.cpp1764
-rw-r--r--js/src/wasm/WasmFrameIter.h277
-rw-r--r--js/src/wasm/WasmGC.cpp314
-rw-r--r--js/src/wasm/WasmGC.h495
-rw-r--r--js/src/wasm/WasmGcObject.cpp872
-rw-r--r--js/src/wasm/WasmGcObject.h396
-rw-r--r--js/src/wasm/WasmGenerator.cpp1279
-rw-r--r--js/src/wasm/WasmGenerator.h264
-rw-r--r--js/src/wasm/WasmInitExpr.cpp664
-rw-r--r--js/src/wasm/WasmInitExpr.h101
-rw-r--r--js/src/wasm/WasmInstance-inl.h32
-rw-r--r--js/src/wasm/WasmInstance.cpp2759
-rw-r--r--js/src/wasm/WasmInstance.h512
-rw-r--r--js/src/wasm/WasmInstanceData.h137
-rw-r--r--js/src/wasm/WasmIntrinsic.cpp239
-rw-r--r--js/src/wasm/WasmIntrinsic.h60
-rw-r--r--js/src/wasm/WasmIntrinsic.yaml201
-rw-r--r--js/src/wasm/WasmIonCompile.cpp8691
-rw-r--r--js/src/wasm/WasmIonCompile.h41
-rw-r--r--js/src/wasm/WasmJS.cpp5524
-rw-r--r--js/src/wasm/WasmJS.h580
-rw-r--r--js/src/wasm/WasmLog.cpp81
-rw-r--r--js/src/wasm/WasmLog.h51
-rw-r--r--js/src/wasm/WasmMemory.cpp385
-rw-r--r--js/src/wasm/WasmMemory.h226
-rw-r--r--js/src/wasm/WasmModule.cpp1134
-rw-r--r--js/src/wasm/WasmModule.h225
-rw-r--r--js/src/wasm/WasmModuleTypes.cpp171
-rw-r--r--js/src/wasm/WasmModuleTypes.h632
-rw-r--r--js/src/wasm/WasmOpIter.cpp863
-rw-r--r--js/src/wasm/WasmOpIter.h4239
-rw-r--r--js/src/wasm/WasmProcess.cpp438
-rw-r--r--js/src/wasm/WasmProcess.h76
-rw-r--r--js/src/wasm/WasmRealm.cpp150
-rw-r--r--js/src/wasm/WasmRealm.h79
-rw-r--r--js/src/wasm/WasmSerialize.cpp1230
-rw-r--r--js/src/wasm/WasmSerialize.h296
-rw-r--r--js/src/wasm/WasmShareable.h80
-rw-r--r--js/src/wasm/WasmSignalHandlers.cpp1084
-rw-r--r--js/src/wasm/WasmSignalHandlers.h63
-rw-r--r--js/src/wasm/WasmStubs.cpp3117
-rw-r--r--js/src/wasm/WasmStubs.h370
-rw-r--r--js/src/wasm/WasmTable.cpp473
-rw-r--r--js/src/wasm/WasmTable.h139
-rw-r--r--js/src/wasm/WasmTypeDecls.h99
-rw-r--r--js/src/wasm/WasmTypeDef.cpp550
-rw-r--r--js/src/wasm/WasmTypeDef.h1462
-rw-r--r--js/src/wasm/WasmUtility.h65
-rw-r--r--js/src/wasm/WasmValType.cpp398
-rw-r--r--js/src/wasm/WasmValType.h890
-rw-r--r--js/src/wasm/WasmValidate.cpp3223
-rw-r--r--js/src/wasm/WasmValidate.h308
-rw-r--r--js/src/wasm/WasmValue.cpp927
-rw-r--r--js/src/wasm/WasmValue.h652
-rw-r--r--js/src/wasm/moz.build66
93 files changed, 89648 insertions, 0 deletions
diff --git a/js/src/wasm/AsmJS.cpp b/js/src/wasm/AsmJS.cpp
new file mode 100644
index 0000000000..c13ceeaac3
--- /dev/null
+++ b/js/src/wasm/AsmJS.cpp
@@ -0,0 +1,7383 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2014 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/AsmJS.h"
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Compression.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/ScopeExit.h"
+#include "mozilla/Sprintf.h" // SprintfLiteral
+#include "mozilla/Utf8.h" // mozilla::Utf8Unit
+#include "mozilla/Variant.h"
+
+#include <algorithm>
+#include <new>
+
+#include "jsmath.h"
+
+#include "frontend/BytecodeCompiler.h" // CompileStandaloneFunction
+#include "frontend/FrontendContext.h" // js::FrontendContext
+#include "frontend/FunctionSyntaxKind.h" // FunctionSyntaxKind
+#include "frontend/ParseNode.h"
+#include "frontend/Parser.h"
+#include "frontend/ParserAtom.h" // ParserAtomsTable, TaggedParserAtomIndex
+#include "frontend/SharedContext.h" // TopLevelFunction
+#include "frontend/TaggedParserAtomIndexHasher.h" // TaggedParserAtomIndexHasher
+#include "gc/GC.h"
+#include "gc/Policy.h"
+#include "jit/InlinableNatives.h"
+#include "js/BuildId.h" // JS::BuildIdCharVector
+#include "js/experimental/JitInfo.h"
+#include "js/friend/ErrorMessages.h" // JSMSG_*
+#include "js/MemoryMetrics.h"
+#include "js/Printf.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "js/SourceText.h"
+#include "js/StableStringChars.h"
+#include "js/Wrapper.h"
+#include "util/DifferentialTesting.h"
+#include "util/StringBuffer.h"
+#include "util/Text.h"
+#include "vm/ErrorReporting.h"
+#include "vm/FunctionFlags.h" // js::FunctionFlags
+#include "vm/GeneratorAndAsyncKind.h" // js::GeneratorKind, js::FunctionAsyncKind
+#include "vm/Interpreter.h"
+#include "vm/SelfHosting.h"
+#include "vm/Time.h"
+#include "vm/TypedArrayObject.h"
+#include "vm/Warnings.h" // js::WarnNumberASCII
+#include "wasm/WasmCompile.h"
+#include "wasm/WasmGenerator.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmIonCompile.h"
+#include "wasm/WasmJS.h"
+#include "wasm/WasmSerialize.h"
+#include "wasm/WasmValidate.h"
+
+#include "frontend/SharedContext-inl.h"
+#include "vm/ArrayBufferObject-inl.h"
+#include "vm/JSObject-inl.h"
+#include "wasm/WasmInstance-inl.h"
+
+using namespace js;
+using namespace js::frontend;
+using namespace js::jit;
+using namespace js::wasm;
+
+using JS::AsmJSOption;
+using JS::AutoStableStringChars;
+using JS::GenericNaN;
+using JS::SourceOwnership;
+using JS::SourceText;
+using mozilla::Abs;
+using mozilla::AsVariant;
+using mozilla::CeilingLog2;
+using mozilla::HashGeneric;
+using mozilla::IsNegativeZero;
+using mozilla::IsPositiveZero;
+using mozilla::IsPowerOfTwo;
+using mozilla::Nothing;
+using mozilla::PodZero;
+using mozilla::PositiveInfinity;
+using mozilla::Some;
+using mozilla::Utf8Unit;
+using mozilla::Compression::LZ4;
+
+using FunctionVector = JS::GCVector<JSFunction*>;
+
+/*****************************************************************************/
+
+// A wasm module can either use no memory, a unshared memory (ArrayBuffer) or
+// shared memory (SharedArrayBuffer).
+
+enum class MemoryUsage { None = false, Unshared = 1, Shared = 2 };
+
+// The asm.js valid heap lengths are precisely the WASM valid heap lengths for
+// ARM greater or equal to MinHeapLength
+static const size_t MinHeapLength = PageSize;
+// An asm.js heap can in principle be up to INT32_MAX bytes but requirements
+// on the format restrict it further to the largest pseudo-ARM-immediate.
+// See IsValidAsmJSHeapLength().
+static const uint64_t MaxHeapLength = 0x7f000000;
+
+static uint64_t RoundUpToNextValidAsmJSHeapLength(uint64_t length) {
+ if (length <= MinHeapLength) {
+ return MinHeapLength;
+ }
+
+ return wasm::RoundUpToNextValidARMImmediate(length);
+}
+
+static uint64_t DivideRoundingUp(uint64_t a, uint64_t b) {
+ return (a + (b - 1)) / b;
+}
+
+/*****************************************************************************/
+// asm.js module object
+
+// The asm.js spec recognizes this set of builtin Math functions.
+enum AsmJSMathBuiltinFunction {
+ AsmJSMathBuiltin_sin,
+ AsmJSMathBuiltin_cos,
+ AsmJSMathBuiltin_tan,
+ AsmJSMathBuiltin_asin,
+ AsmJSMathBuiltin_acos,
+ AsmJSMathBuiltin_atan,
+ AsmJSMathBuiltin_ceil,
+ AsmJSMathBuiltin_floor,
+ AsmJSMathBuiltin_exp,
+ AsmJSMathBuiltin_log,
+ AsmJSMathBuiltin_pow,
+ AsmJSMathBuiltin_sqrt,
+ AsmJSMathBuiltin_abs,
+ AsmJSMathBuiltin_atan2,
+ AsmJSMathBuiltin_imul,
+ AsmJSMathBuiltin_fround,
+ AsmJSMathBuiltin_min,
+ AsmJSMathBuiltin_max,
+ AsmJSMathBuiltin_clz32
+};
+
+// LitValPOD is a restricted version of LitVal suitable for asm.js that is
+// always POD.
+
+struct LitValPOD {
+ PackedTypeCode valType_;
+ union U {
+ uint32_t u32_;
+ uint64_t u64_;
+ float f32_;
+ double f64_;
+ } u;
+
+ LitValPOD() = default;
+
+ explicit LitValPOD(uint32_t u32) : valType_(ValType(ValType::I32).packed()) {
+ u.u32_ = u32;
+ }
+ explicit LitValPOD(uint64_t u64) : valType_(ValType(ValType::I64).packed()) {
+ u.u64_ = u64;
+ }
+
+ explicit LitValPOD(float f32) : valType_(ValType(ValType::F32).packed()) {
+ u.f32_ = f32;
+ }
+ explicit LitValPOD(double f64) : valType_(ValType(ValType::F64).packed()) {
+ u.f64_ = f64;
+ }
+
+ LitVal asLitVal() const {
+ switch (valType_.typeCode()) {
+ case TypeCode::I32:
+ return LitVal(u.u32_);
+ case TypeCode::I64:
+ return LitVal(u.u64_);
+ case TypeCode::F32:
+ return LitVal(u.f32_);
+ case TypeCode::F64:
+ return LitVal(u.f64_);
+ default:
+ MOZ_CRASH("Can't happen");
+ }
+ }
+};
+
+static_assert(std::is_pod_v<LitValPOD>,
+ "must be POD to be simply serialized/deserialized");
+
+// An AsmJSGlobal represents a JS global variable in the asm.js module function.
+class AsmJSGlobal {
+ public:
+ enum Which {
+ Variable,
+ FFI,
+ ArrayView,
+ ArrayViewCtor,
+ MathBuiltinFunction,
+ Constant
+ };
+ enum VarInitKind { InitConstant, InitImport };
+ enum ConstantKind { GlobalConstant, MathConstant };
+
+ private:
+ struct CacheablePod {
+ Which which_;
+ union V {
+ struct {
+ VarInitKind initKind_;
+ union U {
+ PackedTypeCode importValType_;
+ LitValPOD val_;
+ } u;
+ } var;
+ uint32_t ffiIndex_;
+ Scalar::Type viewType_;
+ AsmJSMathBuiltinFunction mathBuiltinFunc_;
+ struct {
+ ConstantKind kind_;
+ double value_;
+ } constant;
+ } u;
+ } pod;
+ CacheableChars field_;
+
+ friend class ModuleValidatorShared;
+ template <typename Unit>
+ friend class ModuleValidator;
+
+ public:
+ AsmJSGlobal() = default;
+ AsmJSGlobal(Which which, UniqueChars field) {
+ mozilla::PodZero(&pod); // zero padding for Valgrind
+ pod.which_ = which;
+ field_ = std::move(field);
+ }
+ const char* field() const { return field_.get(); }
+ Which which() const { return pod.which_; }
+ VarInitKind varInitKind() const {
+ MOZ_ASSERT(pod.which_ == Variable);
+ return pod.u.var.initKind_;
+ }
+ LitValPOD varInitVal() const {
+ MOZ_ASSERT(pod.which_ == Variable);
+ MOZ_ASSERT(pod.u.var.initKind_ == InitConstant);
+ return pod.u.var.u.val_;
+ }
+ ValType varInitImportType() const {
+ MOZ_ASSERT(pod.which_ == Variable);
+ MOZ_ASSERT(pod.u.var.initKind_ == InitImport);
+ return ValType(pod.u.var.u.importValType_);
+ }
+ uint32_t ffiIndex() const {
+ MOZ_ASSERT(pod.which_ == FFI);
+ return pod.u.ffiIndex_;
+ }
+ // When a view is created from an imported constructor:
+ // var I32 = stdlib.Int32Array;
+ // var i32 = new I32(buffer);
+ // the second import has nothing to validate and thus has a null field.
+ Scalar::Type viewType() const {
+ MOZ_ASSERT(pod.which_ == ArrayView || pod.which_ == ArrayViewCtor);
+ return pod.u.viewType_;
+ }
+ AsmJSMathBuiltinFunction mathBuiltinFunction() const {
+ MOZ_ASSERT(pod.which_ == MathBuiltinFunction);
+ return pod.u.mathBuiltinFunc_;
+ }
+ ConstantKind constantKind() const {
+ MOZ_ASSERT(pod.which_ == Constant);
+ return pod.u.constant.kind_;
+ }
+ double constantValue() const {
+ MOZ_ASSERT(pod.which_ == Constant);
+ return pod.u.constant.value_;
+ }
+};
+
+using AsmJSGlobalVector = Vector<AsmJSGlobal, 0, SystemAllocPolicy>;
+
+// An AsmJSImport is slightly different than an asm.js FFI function: a single
+// asm.js FFI function can be called with many different signatures. When
+// compiled to wasm, each unique FFI function paired with signature generates a
+// wasm import.
+class AsmJSImport {
+ uint32_t ffiIndex_;
+
+ public:
+ AsmJSImport() = default;
+ explicit AsmJSImport(uint32_t ffiIndex) : ffiIndex_(ffiIndex) {}
+ uint32_t ffiIndex() const { return ffiIndex_; }
+};
+
+using AsmJSImportVector = Vector<AsmJSImport, 0, SystemAllocPolicy>;
+
+// An AsmJSExport logically extends Export with the extra information needed for
+// an asm.js exported function, viz., the offsets in module's source chars in
+// case the function is toString()ed.
+class AsmJSExport {
+ uint32_t funcIndex_ = 0;
+
+ // All fields are treated as cacheable POD:
+ uint32_t startOffsetInModule_ = 0; // Store module-start-relative offsets
+ uint32_t endOffsetInModule_ = 0; // so preserved by serialization.
+
+ public:
+ AsmJSExport() = default;
+ AsmJSExport(uint32_t funcIndex, uint32_t startOffsetInModule,
+ uint32_t endOffsetInModule)
+ : funcIndex_(funcIndex),
+ startOffsetInModule_(startOffsetInModule),
+ endOffsetInModule_(endOffsetInModule) {}
+ uint32_t funcIndex() const { return funcIndex_; }
+ uint32_t startOffsetInModule() const { return startOffsetInModule_; }
+ uint32_t endOffsetInModule() const { return endOffsetInModule_; }
+};
+
+using AsmJSExportVector = Vector<AsmJSExport, 0, SystemAllocPolicy>;
+
+// Holds the immutable guts of an AsmJSModule.
+//
+// AsmJSMetadata is built incrementally by ModuleValidator and then shared
+// immutably between AsmJSModules.
+
+struct AsmJSMetadataCacheablePod {
+ uint32_t numFFIs = 0;
+ uint32_t srcLength = 0;
+ uint32_t srcLengthWithRightBrace = 0;
+
+ AsmJSMetadataCacheablePod() = default;
+};
+
+struct js::AsmJSMetadata : Metadata, AsmJSMetadataCacheablePod {
+ AsmJSGlobalVector asmJSGlobals;
+ AsmJSImportVector asmJSImports;
+ AsmJSExportVector asmJSExports;
+ CacheableCharsVector asmJSFuncNames;
+ CacheableChars globalArgumentName;
+ CacheableChars importArgumentName;
+ CacheableChars bufferArgumentName;
+
+ // These values are not serialized since they are relative to the
+ // containing script which can be different between serialization and
+ // deserialization contexts. Thus, they must be set explicitly using the
+ // ambient Parser/ScriptSource after deserialization.
+ //
+ // srcStart refers to the offset in the ScriptSource to the beginning of
+ // the asm.js module function. If the function has been created with the
+ // Function constructor, this will be the first character in the function
+ // source. Otherwise, it will be the opening parenthesis of the arguments
+ // list.
+ uint32_t toStringStart;
+ uint32_t srcStart;
+ bool strict;
+ bool shouldResistFingerprinting = false;
+ RefPtr<ScriptSource> source;
+
+ uint32_t srcEndBeforeCurly() const { return srcStart + srcLength; }
+ uint32_t srcEndAfterCurly() const {
+ return srcStart + srcLengthWithRightBrace;
+ }
+
+ AsmJSMetadata()
+ : Metadata(ModuleKind::AsmJS),
+ toStringStart(0),
+ srcStart(0),
+ strict(false) {}
+ ~AsmJSMetadata() override = default;
+
+ const AsmJSExport& lookupAsmJSExport(uint32_t funcIndex) const {
+ // The AsmJSExportVector isn't stored in sorted order so do a linear
+ // search. This is for the super-cold and already-expensive toString()
+ // path and the number of exports is generally small.
+ for (const AsmJSExport& exp : asmJSExports) {
+ if (exp.funcIndex() == funcIndex) {
+ return exp;
+ }
+ }
+ MOZ_CRASH("missing asm.js func export");
+ }
+
+ bool mutedErrors() const override { return source->mutedErrors(); }
+ const char16_t* displayURL() const override {
+ return source->hasDisplayURL() ? source->displayURL() : nullptr;
+ }
+ ScriptSource* maybeScriptSource() const override { return source.get(); }
+ bool getFuncName(NameContext ctx, uint32_t funcIndex,
+ UTF8Bytes* name) const override {
+ const char* p = asmJSFuncNames[funcIndex].get();
+ if (!p) {
+ return true;
+ }
+ return name->append(p, strlen(p));
+ }
+
+ AsmJSMetadataCacheablePod& pod() { return *this; }
+ const AsmJSMetadataCacheablePod& pod() const { return *this; }
+};
+
+using MutableAsmJSMetadata = RefPtr<AsmJSMetadata>;
+
+/*****************************************************************************/
+// ParseNode utilities
+
+static inline ParseNode* NextNode(ParseNode* pn) { return pn->pn_next; }
+
+static inline ParseNode* UnaryKid(ParseNode* pn) {
+ return pn->as<UnaryNode>().kid();
+}
+
+static inline ParseNode* BinaryRight(ParseNode* pn) {
+ return pn->as<BinaryNode>().right();
+}
+
+static inline ParseNode* BinaryLeft(ParseNode* pn) {
+ return pn->as<BinaryNode>().left();
+}
+
+static inline ParseNode* ReturnExpr(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::ReturnStmt));
+ return UnaryKid(pn);
+}
+
+static inline ParseNode* TernaryKid1(ParseNode* pn) {
+ return pn->as<TernaryNode>().kid1();
+}
+
+static inline ParseNode* TernaryKid2(ParseNode* pn) {
+ return pn->as<TernaryNode>().kid2();
+}
+
+static inline ParseNode* TernaryKid3(ParseNode* pn) {
+ return pn->as<TernaryNode>().kid3();
+}
+
+static inline ParseNode* ListHead(ParseNode* pn) {
+ return pn->as<ListNode>().head();
+}
+
+static inline unsigned ListLength(ParseNode* pn) {
+ return pn->as<ListNode>().count();
+}
+
+static inline ParseNode* CallCallee(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::CallExpr));
+ return BinaryLeft(pn);
+}
+
+static inline unsigned CallArgListLength(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::CallExpr));
+ return ListLength(BinaryRight(pn));
+}
+
+static inline ParseNode* CallArgList(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::CallExpr));
+ return ListHead(BinaryRight(pn));
+}
+
+static inline ParseNode* VarListHead(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::VarStmt) ||
+ pn->isKind(ParseNodeKind::ConstDecl));
+ return ListHead(pn);
+}
+
+static inline bool IsDefaultCase(ParseNode* pn) {
+ return pn->as<CaseClause>().isDefault();
+}
+
+static inline ParseNode* CaseExpr(ParseNode* pn) {
+ return pn->as<CaseClause>().caseExpression();
+}
+
+static inline ParseNode* CaseBody(ParseNode* pn) {
+ return pn->as<CaseClause>().statementList();
+}
+
+static inline ParseNode* BinaryOpLeft(ParseNode* pn) {
+ MOZ_ASSERT(pn->isBinaryOperation());
+ MOZ_ASSERT(pn->as<ListNode>().count() == 2);
+ return ListHead(pn);
+}
+
+static inline ParseNode* BinaryOpRight(ParseNode* pn) {
+ MOZ_ASSERT(pn->isBinaryOperation());
+ MOZ_ASSERT(pn->as<ListNode>().count() == 2);
+ return NextNode(ListHead(pn));
+}
+
+static inline ParseNode* BitwiseLeft(ParseNode* pn) { return BinaryOpLeft(pn); }
+
+static inline ParseNode* BitwiseRight(ParseNode* pn) {
+ return BinaryOpRight(pn);
+}
+
+static inline ParseNode* MultiplyLeft(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::MulExpr));
+ return BinaryOpLeft(pn);
+}
+
+static inline ParseNode* MultiplyRight(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::MulExpr));
+ return BinaryOpRight(pn);
+}
+
+static inline ParseNode* AddSubLeft(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::AddExpr) ||
+ pn->isKind(ParseNodeKind::SubExpr));
+ return BinaryOpLeft(pn);
+}
+
+static inline ParseNode* AddSubRight(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::AddExpr) ||
+ pn->isKind(ParseNodeKind::SubExpr));
+ return BinaryOpRight(pn);
+}
+
+static inline ParseNode* DivOrModLeft(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::DivExpr) ||
+ pn->isKind(ParseNodeKind::ModExpr));
+ return BinaryOpLeft(pn);
+}
+
+static inline ParseNode* DivOrModRight(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::DivExpr) ||
+ pn->isKind(ParseNodeKind::ModExpr));
+ return BinaryOpRight(pn);
+}
+
+static inline ParseNode* ComparisonLeft(ParseNode* pn) {
+ return BinaryOpLeft(pn);
+}
+
+static inline ParseNode* ComparisonRight(ParseNode* pn) {
+ return BinaryOpRight(pn);
+}
+
+static inline bool IsExpressionStatement(ParseNode* pn) {
+ return pn->isKind(ParseNodeKind::ExpressionStmt);
+}
+
+static inline ParseNode* ExpressionStatementExpr(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::ExpressionStmt));
+ return UnaryKid(pn);
+}
+
+static inline TaggedParserAtomIndex LoopControlMaybeLabel(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::BreakStmt) ||
+ pn->isKind(ParseNodeKind::ContinueStmt));
+ return pn->as<LoopControlStatement>().label();
+}
+
+static inline TaggedParserAtomIndex LabeledStatementLabel(ParseNode* pn) {
+ return pn->as<LabeledStatement>().label();
+}
+
+static inline ParseNode* LabeledStatementStatement(ParseNode* pn) {
+ return pn->as<LabeledStatement>().statement();
+}
+
+static double NumberNodeValue(ParseNode* pn) {
+ return pn->as<NumericLiteral>().value();
+}
+
+static bool NumberNodeHasFrac(ParseNode* pn) {
+ return pn->as<NumericLiteral>().decimalPoint() == HasDecimal;
+}
+
+static ParseNode* DotBase(ParseNode* pn) {
+ return &pn->as<PropertyAccess>().expression();
+}
+
+static TaggedParserAtomIndex DotMember(ParseNode* pn) {
+ return pn->as<PropertyAccess>().name();
+}
+
+static ParseNode* ElemBase(ParseNode* pn) {
+ return &pn->as<PropertyByValue>().expression();
+}
+
+static ParseNode* ElemIndex(ParseNode* pn) {
+ return &pn->as<PropertyByValue>().key();
+}
+
+static inline TaggedParserAtomIndex FunctionName(FunctionNode* funNode) {
+ if (auto name = funNode->funbox()->explicitName()) {
+ return name;
+ }
+ return TaggedParserAtomIndex::null();
+}
+
+static inline ParseNode* FunctionFormalParametersList(FunctionNode* fn,
+ unsigned* numFormals) {
+ ParamsBodyNode* argsBody = fn->body();
+
+ // The number of formals is equal to the number of parameters (excluding the
+ // trailing lexical scope). There are no destructuring or rest parameters for
+ // asm.js functions.
+ *numFormals = argsBody->count();
+
+ // If the function has been fully parsed, the trailing function body node is a
+ // lexical scope. If we've only parsed the function parameters, the last node
+ // is the last parameter.
+ if (*numFormals > 0 && argsBody->last()->is<LexicalScopeNode>()) {
+ MOZ_ASSERT(argsBody->last()->as<LexicalScopeNode>().scopeBody()->isKind(
+ ParseNodeKind::StatementList));
+ (*numFormals)--;
+ }
+
+ return argsBody->head();
+}
+
+static inline ParseNode* FunctionStatementList(FunctionNode* funNode) {
+ LexicalScopeNode* last = funNode->body()->body();
+ MOZ_ASSERT(last->isEmptyScope());
+ ParseNode* body = last->scopeBody();
+ MOZ_ASSERT(body->isKind(ParseNodeKind::StatementList));
+ return body;
+}
+
+static inline bool IsNormalObjectField(ParseNode* pn) {
+ return pn->isKind(ParseNodeKind::PropertyDefinition) &&
+ pn->as<PropertyDefinition>().accessorType() == AccessorType::None &&
+ BinaryLeft(pn)->isKind(ParseNodeKind::ObjectPropertyName);
+}
+
+static inline TaggedParserAtomIndex ObjectNormalFieldName(ParseNode* pn) {
+ MOZ_ASSERT(IsNormalObjectField(pn));
+ MOZ_ASSERT(BinaryLeft(pn)->isKind(ParseNodeKind::ObjectPropertyName));
+ return BinaryLeft(pn)->as<NameNode>().atom();
+}
+
+static inline ParseNode* ObjectNormalFieldInitializer(ParseNode* pn) {
+ MOZ_ASSERT(IsNormalObjectField(pn));
+ return BinaryRight(pn);
+}
+
+static inline bool IsUseOfName(ParseNode* pn, TaggedParserAtomIndex name) {
+ return pn->isName(name);
+}
+
+static inline bool IsIgnoredDirectiveName(TaggedParserAtomIndex atom) {
+ return atom != TaggedParserAtomIndex::WellKnown::useStrict();
+}
+
+static inline bool IsIgnoredDirective(ParseNode* pn) {
+ return pn->isKind(ParseNodeKind::ExpressionStmt) &&
+ UnaryKid(pn)->isKind(ParseNodeKind::StringExpr) &&
+ IsIgnoredDirectiveName(UnaryKid(pn)->as<NameNode>().atom());
+}
+
+static inline bool IsEmptyStatement(ParseNode* pn) {
+ return pn->isKind(ParseNodeKind::EmptyStmt);
+}
+
+static inline ParseNode* SkipEmptyStatements(ParseNode* pn) {
+ while (pn && IsEmptyStatement(pn)) {
+ pn = pn->pn_next;
+ }
+ return pn;
+}
+
+static inline ParseNode* NextNonEmptyStatement(ParseNode* pn) {
+ return SkipEmptyStatements(pn->pn_next);
+}
+
+template <typename Unit>
+static bool GetToken(AsmJSParser<Unit>& parser, TokenKind* tkp) {
+ auto& ts = parser.tokenStream;
+ TokenKind tk;
+ while (true) {
+ if (!ts.getToken(&tk, TokenStreamShared::SlashIsRegExp)) {
+ return false;
+ }
+ if (tk != TokenKind::Semi) {
+ break;
+ }
+ }
+ *tkp = tk;
+ return true;
+}
+
+template <typename Unit>
+static bool PeekToken(AsmJSParser<Unit>& parser, TokenKind* tkp) {
+ auto& ts = parser.tokenStream;
+ TokenKind tk;
+ while (true) {
+ if (!ts.peekToken(&tk, TokenStream::SlashIsRegExp)) {
+ return false;
+ }
+ if (tk != TokenKind::Semi) {
+ break;
+ }
+ ts.consumeKnownToken(TokenKind::Semi, TokenStreamShared::SlashIsRegExp);
+ }
+ *tkp = tk;
+ return true;
+}
+
+template <typename Unit>
+static bool ParseVarOrConstStatement(AsmJSParser<Unit>& parser,
+ ParseNode** var) {
+ TokenKind tk;
+ if (!PeekToken(parser, &tk)) {
+ return false;
+ }
+ if (tk != TokenKind::Var && tk != TokenKind::Const) {
+ *var = nullptr;
+ return true;
+ }
+
+ *var = parser.statementListItem(YieldIsName);
+ if (!*var) {
+ return false;
+ }
+
+ MOZ_ASSERT((*var)->isKind(ParseNodeKind::VarStmt) ||
+ (*var)->isKind(ParseNodeKind::ConstDecl));
+ return true;
+}
+
+/*****************************************************************************/
+
+// Represents the type and value of an asm.js numeric literal.
+//
+// A literal is a double iff the literal contains a decimal point (even if the
+// fractional part is 0). Otherwise, integers may be classified:
+// fixnum: [0, 2^31)
+// negative int: [-2^31, 0)
+// big unsigned: [2^31, 2^32)
+// out of range: otherwise
+// Lastly, a literal may be a float literal which is any double or integer
+// literal coerced with Math.fround.
+class NumLit {
+ public:
+ enum Which {
+ Fixnum,
+ NegativeInt,
+ BigUnsigned,
+ Double,
+ Float,
+ OutOfRangeInt = -1
+ };
+
+ private:
+ Which which_;
+ JS::Value value_;
+
+ public:
+ NumLit() = default;
+
+ NumLit(Which w, const Value& v) : which_(w), value_(v) {}
+
+ Which which() const { return which_; }
+
+ int32_t toInt32() const {
+ MOZ_ASSERT(which_ == Fixnum || which_ == NegativeInt ||
+ which_ == BigUnsigned);
+ return value_.toInt32();
+ }
+
+ uint32_t toUint32() const { return (uint32_t)toInt32(); }
+
+ double toDouble() const {
+ MOZ_ASSERT(which_ == Double);
+ return value_.toDouble();
+ }
+
+ float toFloat() const {
+ MOZ_ASSERT(which_ == Float);
+ return float(value_.toDouble());
+ }
+
+ Value scalarValue() const {
+ MOZ_ASSERT(which_ != OutOfRangeInt);
+ return value_;
+ }
+
+ bool valid() const { return which_ != OutOfRangeInt; }
+
+ bool isZeroBits() const {
+ MOZ_ASSERT(valid());
+ switch (which()) {
+ case NumLit::Fixnum:
+ case NumLit::NegativeInt:
+ case NumLit::BigUnsigned:
+ return toInt32() == 0;
+ case NumLit::Double:
+ return IsPositiveZero(toDouble());
+ case NumLit::Float:
+ return IsPositiveZero(toFloat());
+ case NumLit::OutOfRangeInt:
+ MOZ_CRASH("can't be here because of valid() check above");
+ }
+ return false;
+ }
+
+ LitValPOD value() const {
+ switch (which_) {
+ case NumLit::Fixnum:
+ case NumLit::NegativeInt:
+ case NumLit::BigUnsigned:
+ return LitValPOD(toUint32());
+ case NumLit::Float:
+ return LitValPOD(toFloat());
+ case NumLit::Double:
+ return LitValPOD(toDouble());
+ case NumLit::OutOfRangeInt:;
+ }
+ MOZ_CRASH("bad literal");
+ }
+};
+
+// Represents the type of a general asm.js expression.
+//
+// A canonical subset of types representing the coercion targets: Int, Float,
+// Double.
+//
+// Void is also part of the canonical subset.
+
+class Type {
+ public:
+ enum Which {
+ Fixnum = NumLit::Fixnum,
+ Signed = NumLit::NegativeInt,
+ Unsigned = NumLit::BigUnsigned,
+ DoubleLit = NumLit::Double,
+ Float = NumLit::Float,
+ Double,
+ MaybeDouble,
+ MaybeFloat,
+ Floatish,
+ Int,
+ Intish,
+ Void
+ };
+
+ private:
+ Which which_;
+
+ public:
+ Type() = default;
+ MOZ_IMPLICIT Type(Which w) : which_(w) {}
+
+ // Map an already canonicalized Type to the return type of a function call.
+ static Type ret(Type t) {
+ MOZ_ASSERT(t.isCanonical());
+ // The 32-bit external type is Signed, not Int.
+ return t.isInt() ? Signed : t;
+ }
+
+ static Type lit(const NumLit& lit) {
+ MOZ_ASSERT(lit.valid());
+ Which which = Type::Which(lit.which());
+ MOZ_ASSERT(which >= Fixnum && which <= Float);
+ Type t;
+ t.which_ = which;
+ return t;
+ }
+
+ // Map |t| to one of the canonical vartype representations of a
+ // wasm::ValType.
+ static Type canonicalize(Type t) {
+ switch (t.which()) {
+ case Fixnum:
+ case Signed:
+ case Unsigned:
+ case Int:
+ return Int;
+
+ case Float:
+ return Float;
+
+ case DoubleLit:
+ case Double:
+ return Double;
+
+ case Void:
+ return Void;
+
+ case MaybeDouble:
+ case MaybeFloat:
+ case Floatish:
+ case Intish:
+ // These types need some kind of coercion, they can't be mapped
+ // to an VarType.
+ break;
+ }
+ MOZ_CRASH("Invalid vartype");
+ }
+
+ Which which() const { return which_; }
+
+ bool operator==(Type rhs) const { return which_ == rhs.which_; }
+ bool operator!=(Type rhs) const { return which_ != rhs.which_; }
+
+ bool operator<=(Type rhs) const {
+ switch (rhs.which_) {
+ case Signed:
+ return isSigned();
+ case Unsigned:
+ return isUnsigned();
+ case DoubleLit:
+ return isDoubleLit();
+ case Double:
+ return isDouble();
+ case Float:
+ return isFloat();
+ case MaybeDouble:
+ return isMaybeDouble();
+ case MaybeFloat:
+ return isMaybeFloat();
+ case Floatish:
+ return isFloatish();
+ case Int:
+ return isInt();
+ case Intish:
+ return isIntish();
+ case Fixnum:
+ return isFixnum();
+ case Void:
+ return isVoid();
+ }
+ MOZ_CRASH("unexpected rhs type");
+ }
+
+ bool isFixnum() const { return which_ == Fixnum; }
+
+ bool isSigned() const { return which_ == Signed || which_ == Fixnum; }
+
+ bool isUnsigned() const { return which_ == Unsigned || which_ == Fixnum; }
+
+ bool isInt() const { return isSigned() || isUnsigned() || which_ == Int; }
+
+ bool isIntish() const { return isInt() || which_ == Intish; }
+
+ bool isDoubleLit() const { return which_ == DoubleLit; }
+
+ bool isDouble() const { return isDoubleLit() || which_ == Double; }
+
+ bool isMaybeDouble() const { return isDouble() || which_ == MaybeDouble; }
+
+ bool isFloat() const { return which_ == Float; }
+
+ bool isMaybeFloat() const { return isFloat() || which_ == MaybeFloat; }
+
+ bool isFloatish() const { return isMaybeFloat() || which_ == Floatish; }
+
+ bool isVoid() const { return which_ == Void; }
+
+ bool isExtern() const { return isDouble() || isSigned(); }
+
+ // Check if this is one of the valid types for a function argument.
+ bool isArgType() const { return isInt() || isFloat() || isDouble(); }
+
+ // Check if this is one of the valid types for a function return value.
+ bool isReturnType() const {
+ return isSigned() || isFloat() || isDouble() || isVoid();
+ }
+
+ // Check if this is one of the valid types for a global variable.
+ bool isGlobalVarType() const { return isArgType(); }
+
+ // Check if this is one of the canonical vartype representations of a
+ // wasm::ValType, or is void. See Type::canonicalize().
+ bool isCanonical() const {
+ switch (which()) {
+ case Int:
+ case Float:
+ case Double:
+ case Void:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ // Check if this is a canonical representation of a wasm::ValType.
+ bool isCanonicalValType() const { return !isVoid() && isCanonical(); }
+
+ // Convert this canonical type to a wasm::ValType.
+ ValType canonicalToValType() const {
+ switch (which()) {
+ case Int:
+ return ValType::I32;
+ case Float:
+ return ValType::F32;
+ case Double:
+ return ValType::F64;
+ default:
+ MOZ_CRASH("Need canonical type");
+ }
+ }
+
+ Maybe<ValType> canonicalToReturnType() const {
+ return isVoid() ? Nothing() : Some(canonicalToValType());
+ }
+
+ // Convert this type to a wasm::TypeCode for use in a wasm
+ // block signature. This works for all types, including non-canonical
+ // ones. Consequently, the type isn't valid for subsequent asm.js
+ // validation; it's only valid for use in producing wasm.
+ TypeCode toWasmBlockSignatureType() const {
+ switch (which()) {
+ case Fixnum:
+ case Signed:
+ case Unsigned:
+ case Int:
+ case Intish:
+ return TypeCode::I32;
+
+ case Float:
+ case MaybeFloat:
+ case Floatish:
+ return TypeCode::F32;
+
+ case DoubleLit:
+ case Double:
+ case MaybeDouble:
+ return TypeCode::F64;
+
+ case Void:
+ return TypeCode::BlockVoid;
+ }
+ MOZ_CRASH("Invalid Type");
+ }
+
+ const char* toChars() const {
+ switch (which_) {
+ case Double:
+ return "double";
+ case DoubleLit:
+ return "doublelit";
+ case MaybeDouble:
+ return "double?";
+ case Float:
+ return "float";
+ case Floatish:
+ return "floatish";
+ case MaybeFloat:
+ return "float?";
+ case Fixnum:
+ return "fixnum";
+ case Int:
+ return "int";
+ case Signed:
+ return "signed";
+ case Unsigned:
+ return "unsigned";
+ case Intish:
+ return "intish";
+ case Void:
+ return "void";
+ }
+ MOZ_CRASH("Invalid Type");
+ }
+};
+
+static const unsigned VALIDATION_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
+
+class MOZ_STACK_CLASS ModuleValidatorShared {
+ public:
+ struct Memory {
+ MemoryUsage usage;
+ uint64_t minLength;
+
+ uint64_t minPages() const { return DivideRoundingUp(minLength, PageSize); }
+
+ Memory() = default;
+ };
+
+ class Func {
+ TaggedParserAtomIndex name_;
+ uint32_t sigIndex_;
+ uint32_t firstUse_;
+ uint32_t funcDefIndex_;
+
+ bool defined_;
+
+ // Available when defined:
+ uint32_t srcBegin_;
+ uint32_t srcEnd_;
+ uint32_t line_;
+ Bytes bytes_;
+ Uint32Vector callSiteLineNums_;
+
+ public:
+ Func(TaggedParserAtomIndex name, uint32_t sigIndex, uint32_t firstUse,
+ uint32_t funcDefIndex)
+ : name_(name),
+ sigIndex_(sigIndex),
+ firstUse_(firstUse),
+ funcDefIndex_(funcDefIndex),
+ defined_(false),
+ srcBegin_(0),
+ srcEnd_(0),
+ line_(0) {}
+
+ TaggedParserAtomIndex name() const { return name_; }
+ uint32_t sigIndex() const { return sigIndex_; }
+ uint32_t firstUse() const { return firstUse_; }
+ bool defined() const { return defined_; }
+ uint32_t funcDefIndex() const { return funcDefIndex_; }
+
+ void define(ParseNode* fn, uint32_t line, Bytes&& bytes,
+ Uint32Vector&& callSiteLineNums) {
+ MOZ_ASSERT(!defined_);
+ defined_ = true;
+ srcBegin_ = fn->pn_pos.begin;
+ srcEnd_ = fn->pn_pos.end;
+ line_ = line;
+ bytes_ = std::move(bytes);
+ callSiteLineNums_ = std::move(callSiteLineNums);
+ }
+
+ uint32_t srcBegin() const {
+ MOZ_ASSERT(defined_);
+ return srcBegin_;
+ }
+ uint32_t srcEnd() const {
+ MOZ_ASSERT(defined_);
+ return srcEnd_;
+ }
+ uint32_t line() const {
+ MOZ_ASSERT(defined_);
+ return line_;
+ }
+ const Bytes& bytes() const {
+ MOZ_ASSERT(defined_);
+ return bytes_;
+ }
+ Uint32Vector& callSiteLineNums() {
+ MOZ_ASSERT(defined_);
+ return callSiteLineNums_;
+ }
+ };
+
+ using ConstFuncVector = Vector<const Func*>;
+ using FuncVector = Vector<Func>;
+
+ class Table {
+ uint32_t sigIndex_;
+ TaggedParserAtomIndex name_;
+ uint32_t firstUse_;
+ uint32_t mask_;
+ bool defined_;
+
+ public:
+ Table(uint32_t sigIndex, TaggedParserAtomIndex name, uint32_t firstUse,
+ uint32_t mask)
+ : sigIndex_(sigIndex),
+ name_(name),
+ firstUse_(firstUse),
+ mask_(mask),
+ defined_(false) {}
+
+ Table(Table&& rhs) = delete;
+
+ uint32_t sigIndex() const { return sigIndex_; }
+ TaggedParserAtomIndex name() const { return name_; }
+ uint32_t firstUse() const { return firstUse_; }
+ unsigned mask() const { return mask_; }
+ bool defined() const { return defined_; }
+ void define() {
+ MOZ_ASSERT(!defined_);
+ defined_ = true;
+ }
+ };
+
+ using TableVector = Vector<Table*>;
+
+ class Global {
+ public:
+ enum Which {
+ Variable,
+ ConstantLiteral,
+ ConstantImport,
+ Function,
+ Table,
+ FFI,
+ ArrayView,
+ ArrayViewCtor,
+ MathBuiltinFunction
+ };
+
+ private:
+ Which which_;
+ union U {
+ struct VarOrConst {
+ Type::Which type_;
+ unsigned index_;
+ NumLit literalValue_;
+
+ VarOrConst(unsigned index, const NumLit& lit)
+ : type_(Type::lit(lit).which()),
+ index_(index),
+ literalValue_(lit) // copies |lit|
+ {}
+
+ VarOrConst(unsigned index, Type::Which which)
+ : type_(which), index_(index) {
+ // The |literalValue_| field remains unused and
+ // uninitialized for non-constant variables.
+ }
+
+ explicit VarOrConst(double constant)
+ : type_(Type::Double),
+ literalValue_(NumLit::Double, DoubleValue(constant)) {
+ // The index_ field is unused and uninitialized for
+ // constant doubles.
+ }
+ } varOrConst;
+ uint32_t funcDefIndex_;
+ uint32_t tableIndex_;
+ uint32_t ffiIndex_;
+ Scalar::Type viewType_;
+ AsmJSMathBuiltinFunction mathBuiltinFunc_;
+
+ // |varOrConst|, through |varOrConst.literalValue_|, has a
+ // non-trivial constructor and therefore MUST be placement-new'd
+ // into existence.
+ MOZ_PUSH_DISABLE_NONTRIVIAL_UNION_WARNINGS
+ U() : funcDefIndex_(0) {}
+ MOZ_POP_DISABLE_NONTRIVIAL_UNION_WARNINGS
+ } u;
+
+ friend class ModuleValidatorShared;
+ template <typename Unit>
+ friend class ModuleValidator;
+ friend class js::LifoAlloc;
+
+ explicit Global(Which which) : which_(which) {}
+
+ public:
+ Which which() const { return which_; }
+ Type varOrConstType() const {
+ MOZ_ASSERT(which_ == Variable || which_ == ConstantLiteral ||
+ which_ == ConstantImport);
+ return u.varOrConst.type_;
+ }
+ unsigned varOrConstIndex() const {
+ MOZ_ASSERT(which_ == Variable || which_ == ConstantImport);
+ return u.varOrConst.index_;
+ }
+ bool isConst() const {
+ return which_ == ConstantLiteral || which_ == ConstantImport;
+ }
+ NumLit constLiteralValue() const {
+ MOZ_ASSERT(which_ == ConstantLiteral);
+ return u.varOrConst.literalValue_;
+ }
+ uint32_t funcDefIndex() const {
+ MOZ_ASSERT(which_ == Function);
+ return u.funcDefIndex_;
+ }
+ uint32_t tableIndex() const {
+ MOZ_ASSERT(which_ == Table);
+ return u.tableIndex_;
+ }
+ unsigned ffiIndex() const {
+ MOZ_ASSERT(which_ == FFI);
+ return u.ffiIndex_;
+ }
+ Scalar::Type viewType() const {
+ MOZ_ASSERT(which_ == ArrayView || which_ == ArrayViewCtor);
+ return u.viewType_;
+ }
+ bool isMathFunction() const { return which_ == MathBuiltinFunction; }
+ AsmJSMathBuiltinFunction mathBuiltinFunction() const {
+ MOZ_ASSERT(which_ == MathBuiltinFunction);
+ return u.mathBuiltinFunc_;
+ }
+ };
+
+ struct MathBuiltin {
+ enum Kind { Function, Constant };
+ Kind kind;
+
+ union {
+ double cst;
+ AsmJSMathBuiltinFunction func;
+ } u;
+
+ MathBuiltin() : kind(Kind(-1)), u{} {}
+ explicit MathBuiltin(double cst) : kind(Constant) { u.cst = cst; }
+ explicit MathBuiltin(AsmJSMathBuiltinFunction func) : kind(Function) {
+ u.func = func;
+ }
+ };
+
+ struct ArrayView {
+ ArrayView(TaggedParserAtomIndex name, Scalar::Type type)
+ : name(name), type(type) {}
+
+ TaggedParserAtomIndex name;
+ Scalar::Type type;
+ };
+
+ protected:
+ class HashableSig {
+ uint32_t sigIndex_;
+ const TypeContext& types_;
+
+ public:
+ HashableSig(uint32_t sigIndex, const TypeContext& types)
+ : sigIndex_(sigIndex), types_(types) {}
+ uint32_t sigIndex() const { return sigIndex_; }
+ const FuncType& funcType() const { return types_[sigIndex_].funcType(); }
+
+ // Implement HashPolicy:
+ using Lookup = const FuncType&;
+ static HashNumber hash(Lookup l) { return l.hash(nullptr); }
+ static bool match(HashableSig lhs, Lookup rhs) {
+ return FuncType::strictlyEquals(lhs.funcType(), rhs);
+ }
+ };
+
+ class NamedSig : public HashableSig {
+ TaggedParserAtomIndex name_;
+
+ public:
+ NamedSig(TaggedParserAtomIndex name, uint32_t sigIndex,
+ const TypeContext& types)
+ : HashableSig(sigIndex, types), name_(name) {}
+ TaggedParserAtomIndex name() const { return name_; }
+
+ // Implement HashPolicy:
+ struct Lookup {
+ TaggedParserAtomIndex name;
+ const FuncType& funcType;
+ Lookup(TaggedParserAtomIndex name, const FuncType& funcType)
+ : name(name), funcType(funcType) {}
+ };
+ static HashNumber hash(Lookup l) {
+ return HashGeneric(TaggedParserAtomIndexHasher::hash(l.name),
+ l.funcType.hash(nullptr));
+ }
+ static bool match(NamedSig lhs, Lookup rhs) {
+ return lhs.name() == rhs.name &&
+ FuncType::strictlyEquals(lhs.funcType(), rhs.funcType);
+ }
+ };
+
+ using SigSet = HashSet<HashableSig, HashableSig>;
+ using FuncImportMap = HashMap<NamedSig, uint32_t, NamedSig>;
+ using GlobalMap =
+ HashMap<TaggedParserAtomIndex, Global*, TaggedParserAtomIndexHasher>;
+ using MathNameMap =
+ HashMap<TaggedParserAtomIndex, MathBuiltin, TaggedParserAtomIndexHasher>;
+ using ArrayViewVector = Vector<ArrayView>;
+
+ protected:
+ FrontendContext* fc_;
+ ParserAtomsTable& parserAtoms_;
+ FunctionNode* moduleFunctionNode_;
+ TaggedParserAtomIndex moduleFunctionName_;
+ TaggedParserAtomIndex globalArgumentName_;
+ TaggedParserAtomIndex importArgumentName_;
+ TaggedParserAtomIndex bufferArgumentName_;
+ MathNameMap standardLibraryMathNames_;
+
+ // Validation-internal state:
+ LifoAlloc validationLifo_;
+ Memory memory_;
+ FuncVector funcDefs_;
+ TableVector tables_;
+ GlobalMap globalMap_;
+ SigSet sigSet_;
+ FuncImportMap funcImportMap_;
+ ArrayViewVector arrayViews_;
+
+ // State used to build the AsmJSModule in finish():
+ CompilerEnvironment compilerEnv_;
+ ModuleEnvironment moduleEnv_;
+ MutableAsmJSMetadata asmJSMetadata_;
+
+ // Error reporting:
+ UniqueChars errorString_ = nullptr;
+ uint32_t errorOffset_ = UINT32_MAX;
+ bool errorOverRecursed_ = false;
+
+ protected:
+ ModuleValidatorShared(FrontendContext* fc, ParserAtomsTable& parserAtoms,
+ FunctionNode* moduleFunctionNode)
+ : fc_(fc),
+ parserAtoms_(parserAtoms),
+ moduleFunctionNode_(moduleFunctionNode),
+ moduleFunctionName_(FunctionName(moduleFunctionNode)),
+ standardLibraryMathNames_(fc),
+ validationLifo_(VALIDATION_LIFO_DEFAULT_CHUNK_SIZE),
+ funcDefs_(fc),
+ tables_(fc),
+ globalMap_(fc),
+ sigSet_(fc),
+ funcImportMap_(fc),
+ arrayViews_(fc),
+ compilerEnv_(CompileMode::Once, Tier::Optimized, DebugEnabled::False),
+ moduleEnv_(FeatureArgs(), ModuleKind::AsmJS) {
+ compilerEnv_.computeParameters();
+ memory_.minLength = RoundUpToNextValidAsmJSHeapLength(0);
+ }
+
+ protected:
+ [[nodiscard]] bool initModuleEnvironment() { return moduleEnv_.init(); }
+
+ [[nodiscard]] bool addStandardLibraryMathInfo() {
+ static constexpr struct {
+ const char* name;
+ AsmJSMathBuiltinFunction func;
+ } functions[] = {
+ {"sin", AsmJSMathBuiltin_sin}, {"cos", AsmJSMathBuiltin_cos},
+ {"tan", AsmJSMathBuiltin_tan}, {"asin", AsmJSMathBuiltin_asin},
+ {"acos", AsmJSMathBuiltin_acos}, {"atan", AsmJSMathBuiltin_atan},
+ {"ceil", AsmJSMathBuiltin_ceil}, {"floor", AsmJSMathBuiltin_floor},
+ {"exp", AsmJSMathBuiltin_exp}, {"log", AsmJSMathBuiltin_log},
+ {"pow", AsmJSMathBuiltin_pow}, {"sqrt", AsmJSMathBuiltin_sqrt},
+ {"abs", AsmJSMathBuiltin_abs}, {"atan2", AsmJSMathBuiltin_atan2},
+ {"imul", AsmJSMathBuiltin_imul}, {"clz32", AsmJSMathBuiltin_clz32},
+ {"fround", AsmJSMathBuiltin_fround}, {"min", AsmJSMathBuiltin_min},
+ {"max", AsmJSMathBuiltin_max},
+ };
+
+ auto AddMathFunction = [this](const char* name,
+ AsmJSMathBuiltinFunction func) {
+ auto atom = parserAtoms_.internAscii(fc_, name, strlen(name));
+ if (!atom) {
+ return false;
+ }
+ MathBuiltin builtin(func);
+ return this->standardLibraryMathNames_.putNew(atom, builtin);
+ };
+
+ for (const auto& info : functions) {
+ if (!AddMathFunction(info.name, info.func)) {
+ return false;
+ }
+ }
+
+ static constexpr struct {
+ const char* name;
+ double value;
+ } constants[] = {
+ {"E", M_E},
+ {"LN10", M_LN10},
+ {"LN2", M_LN2},
+ {"LOG2E", M_LOG2E},
+ {"LOG10E", M_LOG10E},
+ {"PI", M_PI},
+ {"SQRT1_2", M_SQRT1_2},
+ {"SQRT2", M_SQRT2},
+ };
+
+ auto AddMathConstant = [this](const char* name, double cst) {
+ auto atom = parserAtoms_.internAscii(fc_, name, strlen(name));
+ if (!atom) {
+ return false;
+ }
+ MathBuiltin builtin(cst);
+ return this->standardLibraryMathNames_.putNew(atom, builtin);
+ };
+
+ for (const auto& info : constants) {
+ if (!AddMathConstant(info.name, info.value)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ public:
+ FrontendContext* fc() const { return fc_; }
+ TaggedParserAtomIndex moduleFunctionName() const {
+ return moduleFunctionName_;
+ }
+ TaggedParserAtomIndex globalArgumentName() const {
+ return globalArgumentName_;
+ }
+ TaggedParserAtomIndex importArgumentName() const {
+ return importArgumentName_;
+ }
+ TaggedParserAtomIndex bufferArgumentName() const {
+ return bufferArgumentName_;
+ }
+ const ModuleEnvironment& env() { return moduleEnv_; }
+
+ void initModuleFunctionName(TaggedParserAtomIndex name) {
+ MOZ_ASSERT(!moduleFunctionName_);
+ moduleFunctionName_ = name;
+ }
+ [[nodiscard]] bool initGlobalArgumentName(TaggedParserAtomIndex n) {
+ globalArgumentName_ = n;
+ if (n) {
+ asmJSMetadata_->globalArgumentName = parserAtoms_.toNewUTF8CharsZ(fc_, n);
+ if (!asmJSMetadata_->globalArgumentName) {
+ return false;
+ }
+ }
+ return true;
+ }
+ [[nodiscard]] bool initImportArgumentName(TaggedParserAtomIndex n) {
+ importArgumentName_ = n;
+ if (n) {
+ asmJSMetadata_->importArgumentName = parserAtoms_.toNewUTF8CharsZ(fc_, n);
+ if (!asmJSMetadata_->importArgumentName) {
+ return false;
+ }
+ }
+ return true;
+ }
+ [[nodiscard]] bool initBufferArgumentName(TaggedParserAtomIndex n) {
+ bufferArgumentName_ = n;
+ if (n) {
+ asmJSMetadata_->bufferArgumentName = parserAtoms_.toNewUTF8CharsZ(fc_, n);
+ if (!asmJSMetadata_->bufferArgumentName) {
+ return false;
+ }
+ }
+ return true;
+ }
+ bool addGlobalVarInit(TaggedParserAtomIndex var, const NumLit& lit, Type type,
+ bool isConst) {
+ MOZ_ASSERT(type.isGlobalVarType());
+ MOZ_ASSERT(type == Type::canonicalize(Type::lit(lit)));
+
+ uint32_t index = moduleEnv_.globals.length();
+ if (!moduleEnv_.globals.emplaceBack(type.canonicalToValType(), !isConst,
+ index, ModuleKind::AsmJS)) {
+ return false;
+ }
+
+ Global::Which which = isConst ? Global::ConstantLiteral : Global::Variable;
+ Global* global = validationLifo_.new_<Global>(which);
+ if (!global) {
+ return false;
+ }
+ if (isConst) {
+ new (&global->u.varOrConst) Global::U::VarOrConst(index, lit);
+ } else {
+ new (&global->u.varOrConst) Global::U::VarOrConst(index, type.which());
+ }
+ if (!globalMap_.putNew(var, global)) {
+ return false;
+ }
+
+ AsmJSGlobal g(AsmJSGlobal::Variable, nullptr);
+ g.pod.u.var.initKind_ = AsmJSGlobal::InitConstant;
+ g.pod.u.var.u.val_ = lit.value();
+ return asmJSMetadata_->asmJSGlobals.append(std::move(g));
+ }
+ bool addGlobalVarImport(TaggedParserAtomIndex var,
+ TaggedParserAtomIndex field, Type type,
+ bool isConst) {
+ MOZ_ASSERT(type.isGlobalVarType());
+
+ UniqueChars fieldChars = parserAtoms_.toNewUTF8CharsZ(fc_, field);
+ if (!fieldChars) {
+ return false;
+ }
+
+ uint32_t index = moduleEnv_.globals.length();
+ ValType valType = type.canonicalToValType();
+ if (!moduleEnv_.globals.emplaceBack(valType, !isConst, index,
+ ModuleKind::AsmJS)) {
+ return false;
+ }
+
+ Global::Which which = isConst ? Global::ConstantImport : Global::Variable;
+ Global* global = validationLifo_.new_<Global>(which);
+ if (!global) {
+ return false;
+ }
+ new (&global->u.varOrConst) Global::U::VarOrConst(index, type.which());
+ if (!globalMap_.putNew(var, global)) {
+ return false;
+ }
+
+ AsmJSGlobal g(AsmJSGlobal::Variable, std::move(fieldChars));
+ g.pod.u.var.initKind_ = AsmJSGlobal::InitImport;
+ g.pod.u.var.u.importValType_ = valType.packed();
+ return asmJSMetadata_->asmJSGlobals.append(std::move(g));
+ }
+ bool addArrayView(TaggedParserAtomIndex var, Scalar::Type vt,
+ TaggedParserAtomIndex maybeField) {
+ UniqueChars fieldChars;
+ if (maybeField) {
+ fieldChars = parserAtoms_.toNewUTF8CharsZ(fc_, maybeField);
+ if (!fieldChars) {
+ return false;
+ }
+ }
+
+ if (!arrayViews_.append(ArrayView(var, vt))) {
+ return false;
+ }
+
+ Global* global = validationLifo_.new_<Global>(Global::ArrayView);
+ if (!global) {
+ return false;
+ }
+ new (&global->u.viewType_) Scalar::Type(vt);
+ if (!globalMap_.putNew(var, global)) {
+ return false;
+ }
+
+ AsmJSGlobal g(AsmJSGlobal::ArrayView, std::move(fieldChars));
+ g.pod.u.viewType_ = vt;
+ return asmJSMetadata_->asmJSGlobals.append(std::move(g));
+ }
+ bool addMathBuiltinFunction(TaggedParserAtomIndex var,
+ AsmJSMathBuiltinFunction func,
+ TaggedParserAtomIndex field) {
+ UniqueChars fieldChars = parserAtoms_.toNewUTF8CharsZ(fc_, field);
+ if (!fieldChars) {
+ return false;
+ }
+
+ Global* global = validationLifo_.new_<Global>(Global::MathBuiltinFunction);
+ if (!global) {
+ return false;
+ }
+ new (&global->u.mathBuiltinFunc_) AsmJSMathBuiltinFunction(func);
+ if (!globalMap_.putNew(var, global)) {
+ return false;
+ }
+
+ AsmJSGlobal g(AsmJSGlobal::MathBuiltinFunction, std::move(fieldChars));
+ g.pod.u.mathBuiltinFunc_ = func;
+ return asmJSMetadata_->asmJSGlobals.append(std::move(g));
+ }
+
+ private:
+ bool addGlobalDoubleConstant(TaggedParserAtomIndex var, double constant) {
+ Global* global = validationLifo_.new_<Global>(Global::ConstantLiteral);
+ if (!global) {
+ return false;
+ }
+ new (&global->u.varOrConst) Global::U::VarOrConst(constant);
+ return globalMap_.putNew(var, global);
+ }
+
+ public:
+ bool addMathBuiltinConstant(TaggedParserAtomIndex var, double constant,
+ TaggedParserAtomIndex field) {
+ UniqueChars fieldChars = parserAtoms_.toNewUTF8CharsZ(fc_, field);
+ if (!fieldChars) {
+ return false;
+ }
+
+ if (!addGlobalDoubleConstant(var, constant)) {
+ return false;
+ }
+
+ AsmJSGlobal g(AsmJSGlobal::Constant, std::move(fieldChars));
+ g.pod.u.constant.value_ = constant;
+ g.pod.u.constant.kind_ = AsmJSGlobal::MathConstant;
+ return asmJSMetadata_->asmJSGlobals.append(std::move(g));
+ }
+ bool addGlobalConstant(TaggedParserAtomIndex var, double constant,
+ TaggedParserAtomIndex field) {
+ UniqueChars fieldChars = parserAtoms_.toNewUTF8CharsZ(fc_, field);
+ if (!fieldChars) {
+ return false;
+ }
+
+ if (!addGlobalDoubleConstant(var, constant)) {
+ return false;
+ }
+
+ AsmJSGlobal g(AsmJSGlobal::Constant, std::move(fieldChars));
+ g.pod.u.constant.value_ = constant;
+ g.pod.u.constant.kind_ = AsmJSGlobal::GlobalConstant;
+ return asmJSMetadata_->asmJSGlobals.append(std::move(g));
+ }
+ bool addArrayViewCtor(TaggedParserAtomIndex var, Scalar::Type vt,
+ TaggedParserAtomIndex field) {
+ UniqueChars fieldChars = parserAtoms_.toNewUTF8CharsZ(fc_, field);
+ if (!fieldChars) {
+ return false;
+ }
+
+ Global* global = validationLifo_.new_<Global>(Global::ArrayViewCtor);
+ if (!global) {
+ return false;
+ }
+ new (&global->u.viewType_) Scalar::Type(vt);
+ if (!globalMap_.putNew(var, global)) {
+ return false;
+ }
+
+ AsmJSGlobal g(AsmJSGlobal::ArrayViewCtor, std::move(fieldChars));
+ g.pod.u.viewType_ = vt;
+ return asmJSMetadata_->asmJSGlobals.append(std::move(g));
+ }
+ bool addFFI(TaggedParserAtomIndex var, TaggedParserAtomIndex field) {
+ UniqueChars fieldChars = parserAtoms_.toNewUTF8CharsZ(fc_, field);
+ if (!fieldChars) {
+ return false;
+ }
+
+ if (asmJSMetadata_->numFFIs == UINT32_MAX) {
+ return false;
+ }
+ uint32_t ffiIndex = asmJSMetadata_->numFFIs++;
+
+ Global* global = validationLifo_.new_<Global>(Global::FFI);
+ if (!global) {
+ return false;
+ }
+ new (&global->u.ffiIndex_) uint32_t(ffiIndex);
+ if (!globalMap_.putNew(var, global)) {
+ return false;
+ }
+
+ AsmJSGlobal g(AsmJSGlobal::FFI, std::move(fieldChars));
+ g.pod.u.ffiIndex_ = ffiIndex;
+ return asmJSMetadata_->asmJSGlobals.append(std::move(g));
+ }
+ bool addExportField(const Func& func, TaggedParserAtomIndex maybeField) {
+ // Record the field name of this export.
+ CacheableName fieldName;
+ if (maybeField) {
+ UniqueChars fieldChars = parserAtoms_.toNewUTF8CharsZ(fc_, maybeField);
+ if (!fieldChars) {
+ return false;
+ }
+ fieldName = CacheableName::fromUTF8Chars(std::move(fieldChars));
+ }
+
+ // Declare which function is exported which gives us an index into the
+ // module ExportVector.
+ uint32_t funcIndex = funcImportMap_.count() + func.funcDefIndex();
+ if (!moduleEnv_.exports.emplaceBack(std::move(fieldName), funcIndex,
+ DefinitionKind::Function)) {
+ return false;
+ }
+
+ // The exported function might have already been exported in which case
+ // the index will refer into the range of AsmJSExports.
+ return asmJSMetadata_->asmJSExports.emplaceBack(
+ funcIndex, func.srcBegin() - asmJSMetadata_->srcStart,
+ func.srcEnd() - asmJSMetadata_->srcStart);
+ }
+
+ bool defineFuncPtrTable(uint32_t tableIndex, Uint32Vector&& elems) {
+ Table& table = *tables_[tableIndex];
+ if (table.defined()) {
+ return false;
+ }
+
+ table.define();
+
+ for (uint32_t& index : elems) {
+ index += funcImportMap_.count();
+ }
+
+ MutableElemSegment seg = js_new<ElemSegment>();
+ if (!seg) {
+ return false;
+ }
+ seg->elemType = RefType::func();
+ seg->tableIndex = tableIndex;
+ seg->offsetIfActive = Some(InitExpr(LitVal(uint32_t(0))));
+ seg->elemFuncIndices = std::move(elems);
+ return moduleEnv_.elemSegments.append(std::move(seg));
+ }
+
+ bool tryConstantAccess(uint64_t start, uint64_t width) {
+ MOZ_ASSERT(UINT64_MAX - start > width);
+ uint64_t len = start + width;
+ if (len > uint64_t(INT32_MAX) + 1) {
+ return false;
+ }
+ len = RoundUpToNextValidAsmJSHeapLength(len);
+ if (len > memory_.minLength) {
+ memory_.minLength = len;
+ }
+ return true;
+ }
+
+ // Error handling.
+ bool hasAlreadyFailed() const { return !!errorString_; }
+
+ bool failOffset(uint32_t offset, const char* str) {
+ MOZ_ASSERT(!hasAlreadyFailed());
+ MOZ_ASSERT(errorOffset_ == UINT32_MAX);
+ MOZ_ASSERT(str);
+ errorOffset_ = offset;
+ errorString_ = DuplicateString(str);
+ return false;
+ }
+
+ bool fail(ParseNode* pn, const char* str) {
+ return failOffset(pn->pn_pos.begin, str);
+ }
+
+ bool failfVAOffset(uint32_t offset, const char* fmt, va_list ap)
+ MOZ_FORMAT_PRINTF(3, 0) {
+ MOZ_ASSERT(!hasAlreadyFailed());
+ MOZ_ASSERT(errorOffset_ == UINT32_MAX);
+ MOZ_ASSERT(fmt);
+ errorOffset_ = offset;
+ errorString_ = JS_vsmprintf(fmt, ap);
+ return false;
+ }
+
+ bool failfOffset(uint32_t offset, const char* fmt, ...)
+ MOZ_FORMAT_PRINTF(3, 4) {
+ va_list ap;
+ va_start(ap, fmt);
+ failfVAOffset(offset, fmt, ap);
+ va_end(ap);
+ return false;
+ }
+
+ bool failf(ParseNode* pn, const char* fmt, ...) MOZ_FORMAT_PRINTF(3, 4) {
+ va_list ap;
+ va_start(ap, fmt);
+ failfVAOffset(pn->pn_pos.begin, fmt, ap);
+ va_end(ap);
+ return false;
+ }
+
+ bool failNameOffset(uint32_t offset, const char* fmt,
+ TaggedParserAtomIndex name) {
+ // This function is invoked without the caller properly rooting its locals.
+ if (UniqueChars bytes = parserAtoms_.toPrintableString(name)) {
+ failfOffset(offset, fmt, bytes.get());
+ } else {
+ ReportOutOfMemory(fc_);
+ }
+ return false;
+ }
+
+ bool failName(ParseNode* pn, const char* fmt, TaggedParserAtomIndex name) {
+ return failNameOffset(pn->pn_pos.begin, fmt, name);
+ }
+
+ bool failOverRecursed() {
+ errorOverRecursed_ = true;
+ return false;
+ }
+
+ unsigned numArrayViews() const { return arrayViews_.length(); }
+ const ArrayView& arrayView(unsigned i) const { return arrayViews_[i]; }
+ unsigned numFuncDefs() const { return funcDefs_.length(); }
+ const Func& funcDef(unsigned i) const { return funcDefs_[i]; }
+ unsigned numFuncPtrTables() const { return tables_.length(); }
+ Table& table(unsigned i) const { return *tables_[i]; }
+
+ const Global* lookupGlobal(TaggedParserAtomIndex name) const {
+ if (GlobalMap::Ptr p = globalMap_.lookup(name)) {
+ return p->value();
+ }
+ return nullptr;
+ }
+
+ Func* lookupFuncDef(TaggedParserAtomIndex name) {
+ if (GlobalMap::Ptr p = globalMap_.lookup(name)) {
+ Global* value = p->value();
+ if (value->which() == Global::Function) {
+ return &funcDefs_[value->funcDefIndex()];
+ }
+ }
+ return nullptr;
+ }
+
+ bool lookupStandardLibraryMathName(TaggedParserAtomIndex name,
+ MathBuiltin* mathBuiltin) const {
+ if (MathNameMap::Ptr p = standardLibraryMathNames_.lookup(name)) {
+ *mathBuiltin = p->value();
+ return true;
+ }
+ return false;
+ }
+
+ bool startFunctionBodies() {
+ if (!arrayViews_.empty()) {
+ memory_.usage = MemoryUsage::Unshared;
+ } else {
+ memory_.usage = MemoryUsage::None;
+ }
+ return true;
+ }
+};
+
+// The ModuleValidator encapsulates the entire validation of an asm.js module.
+// Its lifetime goes from the validation of the top components of an asm.js
+// module (all the globals), the emission of bytecode for all the functions in
+// the module and the validation of function's pointer tables. It also finishes
+// the compilation of all the module's stubs.
+template <typename Unit>
+class MOZ_STACK_CLASS ModuleValidator : public ModuleValidatorShared {
+ private:
+ AsmJSParser<Unit>& parser_;
+
+ public:
+ ModuleValidator(FrontendContext* fc, ParserAtomsTable& parserAtoms,
+ AsmJSParser<Unit>& parser, FunctionNode* moduleFunctionNode)
+ : ModuleValidatorShared(fc, parserAtoms, moduleFunctionNode),
+ parser_(parser) {}
+
+ ~ModuleValidator() {
+ if (errorString_) {
+ MOZ_ASSERT(errorOffset_ != UINT32_MAX);
+ typeFailure(errorOffset_, errorString_.get());
+ }
+ if (errorOverRecursed_) {
+ ReportOverRecursed(fc_);
+ }
+ }
+
+ private:
+ // Helpers:
+ bool newSig(FuncType&& sig, uint32_t* sigIndex) {
+ if (moduleEnv_.types->length() >= MaxTypes) {
+ return failCurrentOffset("too many signatures");
+ }
+
+ *sigIndex = moduleEnv_.types->length();
+ return moduleEnv_.types->addType(std::move(sig));
+ }
+ bool declareSig(FuncType&& sig, uint32_t* sigIndex) {
+ SigSet::AddPtr p = sigSet_.lookupForAdd(sig);
+ if (p) {
+ *sigIndex = p->sigIndex();
+ MOZ_ASSERT(FuncType::strictlyEquals(
+ moduleEnv_.types->type(*sigIndex).funcType(), sig));
+ return true;
+ }
+
+ return newSig(std::move(sig), sigIndex) &&
+ sigSet_.add(p, HashableSig(*sigIndex, *moduleEnv_.types));
+ }
+
+ private:
+ void typeFailure(uint32_t offset, ...) {
+ va_list args;
+ va_start(args, offset);
+
+ auto& ts = tokenStream();
+ ErrorMetadata metadata;
+ if (ts.computeErrorMetadata(&metadata, AsVariant(offset))) {
+ if (ts.anyCharsAccess().options().throwOnAsmJSValidationFailureOption) {
+ ReportCompileErrorLatin1(fc_, std::move(metadata), nullptr,
+ JSMSG_USE_ASM_TYPE_FAIL, &args);
+ } else {
+ // asm.js type failure is indicated by calling one of the fail*
+ // functions below. These functions always return false to
+ // halt asm.js parsing. Whether normal parsing is attempted as
+ // fallback, depends whether an exception is also set.
+ //
+ // If warning succeeds, no exception is set. If warning fails,
+ // an exception is set and execution will halt. Thus it's safe
+ // and correct to ignore the return value here.
+ (void)ts.compileWarning(std::move(metadata), nullptr,
+ JSMSG_USE_ASM_TYPE_FAIL, &args);
+ }
+ }
+
+ va_end(args);
+ }
+
+ public:
+ bool init() {
+ asmJSMetadata_ = js_new<AsmJSMetadata>();
+ if (!asmJSMetadata_) {
+ ReportOutOfMemory(fc_);
+ return false;
+ }
+
+ asmJSMetadata_->toStringStart =
+ moduleFunctionNode_->funbox()->extent().toStringStart;
+ asmJSMetadata_->srcStart = moduleFunctionNode_->body()->pn_pos.begin;
+ asmJSMetadata_->strict = parser_.pc_->sc()->strict() &&
+ !parser_.pc_->sc()->hasExplicitUseStrict();
+ asmJSMetadata_->shouldResistFingerprinting =
+ parser_.options().shouldResistFingerprinting();
+ asmJSMetadata_->source = do_AddRef(parser_.ss);
+
+ if (!initModuleEnvironment()) {
+ return false;
+ }
+ return addStandardLibraryMathInfo();
+ }
+
+ AsmJSParser<Unit>& parser() const { return parser_; }
+
+ auto& tokenStream() const { return parser_.tokenStream; }
+
+ bool shouldResistFingerprinting() const {
+ return asmJSMetadata_->shouldResistFingerprinting;
+ }
+
+ public:
+ bool addFuncDef(TaggedParserAtomIndex name, uint32_t firstUse, FuncType&& sig,
+ Func** func) {
+ uint32_t sigIndex;
+ if (!declareSig(std::move(sig), &sigIndex)) {
+ return false;
+ }
+
+ uint32_t funcDefIndex = funcDefs_.length();
+ if (funcDefIndex >= MaxFuncs) {
+ return failCurrentOffset("too many functions");
+ }
+
+ Global* global = validationLifo_.new_<Global>(Global::Function);
+ if (!global) {
+ return false;
+ }
+ new (&global->u.funcDefIndex_) uint32_t(funcDefIndex);
+ if (!globalMap_.putNew(name, global)) {
+ return false;
+ }
+ if (!funcDefs_.emplaceBack(name, sigIndex, firstUse, funcDefIndex)) {
+ return false;
+ }
+ *func = &funcDefs_.back();
+ return true;
+ }
+ bool declareFuncPtrTable(FuncType&& sig, TaggedParserAtomIndex name,
+ uint32_t firstUse, uint32_t mask,
+ uint32_t* tableIndex) {
+ if (mask > MaxTableLength) {
+ return failCurrentOffset("function pointer table too big");
+ }
+
+ MOZ_ASSERT(moduleEnv_.tables.length() == tables_.length());
+ *tableIndex = moduleEnv_.tables.length();
+
+ uint32_t sigIndex;
+ if (!newSig(std::move(sig), &sigIndex)) {
+ return false;
+ }
+
+ MOZ_ASSERT(sigIndex >= moduleEnv_.asmJSSigToTableIndex.length());
+ if (!moduleEnv_.asmJSSigToTableIndex.resize(sigIndex + 1)) {
+ return false;
+ }
+
+ moduleEnv_.asmJSSigToTableIndex[sigIndex] = moduleEnv_.tables.length();
+ if (!moduleEnv_.tables.emplaceBack(RefType::func(), mask + 1, Nothing(),
+ /* initExpr */ Nothing(),
+ /*isAsmJS*/ true)) {
+ return false;
+ }
+
+ Global* global = validationLifo_.new_<Global>(Global::Table);
+ if (!global) {
+ return false;
+ }
+
+ new (&global->u.tableIndex_) uint32_t(*tableIndex);
+ if (!globalMap_.putNew(name, global)) {
+ return false;
+ }
+
+ Table* t = validationLifo_.new_<Table>(sigIndex, name, firstUse, mask);
+ return t && tables_.append(t);
+ }
+ bool declareImport(TaggedParserAtomIndex name, FuncType&& sig,
+ unsigned ffiIndex, uint32_t* importIndex) {
+ FuncImportMap::AddPtr p =
+ funcImportMap_.lookupForAdd(NamedSig::Lookup(name, sig));
+ if (p) {
+ *importIndex = p->value();
+ return true;
+ }
+
+ *importIndex = funcImportMap_.count();
+ MOZ_ASSERT(*importIndex == asmJSMetadata_->asmJSImports.length());
+
+ if (*importIndex >= MaxImports) {
+ return failCurrentOffset("too many imports");
+ }
+
+ if (!asmJSMetadata_->asmJSImports.emplaceBack(ffiIndex)) {
+ return false;
+ }
+
+ uint32_t sigIndex;
+ if (!declareSig(std::move(sig), &sigIndex)) {
+ return false;
+ }
+
+ return funcImportMap_.add(p, NamedSig(name, sigIndex, *moduleEnv_.types),
+ *importIndex);
+ }
+
+ // Error handling.
+ bool failCurrentOffset(const char* str) {
+ return failOffset(tokenStream().anyCharsAccess().currentToken().pos.begin,
+ str);
+ }
+
+ SharedModule finish() {
+ MOZ_ASSERT(!moduleEnv_.usesMemory());
+ if (memory_.usage != MemoryUsage::None) {
+ Limits limits;
+ limits.shared = memory_.usage == MemoryUsage::Shared ? Shareable::True
+ : Shareable::False;
+ limits.initial = memory_.minPages();
+ limits.maximum = Nothing();
+ limits.indexType = IndexType::I32;
+ moduleEnv_.memory = Some(MemoryDesc(limits));
+ }
+ MOZ_ASSERT(moduleEnv_.funcs.empty());
+ if (!moduleEnv_.funcs.resize(funcImportMap_.count() + funcDefs_.length())) {
+ return nullptr;
+ }
+ for (FuncImportMap::Range r = funcImportMap_.all(); !r.empty();
+ r.popFront()) {
+ uint32_t funcIndex = r.front().value();
+ uint32_t funcTypeIndex = r.front().key().sigIndex();
+ MOZ_ASSERT(!moduleEnv_.funcs[funcIndex].type);
+ moduleEnv_.funcs[funcIndex] = FuncDesc(
+ &moduleEnv_.types->type(funcTypeIndex).funcType(), funcTypeIndex);
+ }
+ for (const Func& func : funcDefs_) {
+ uint32_t funcIndex = funcImportMap_.count() + func.funcDefIndex();
+ uint32_t funcTypeIndex = func.sigIndex();
+ MOZ_ASSERT(!moduleEnv_.funcs[funcIndex].type);
+ moduleEnv_.funcs[funcIndex] = FuncDesc(
+ &moduleEnv_.types->type(funcTypeIndex).funcType(), funcTypeIndex);
+ }
+ for (const Export& exp : moduleEnv_.exports) {
+ if (exp.kind() != DefinitionKind::Function) {
+ continue;
+ }
+ uint32_t funcIndex = exp.funcIndex();
+ moduleEnv_.declareFuncExported(funcIndex, /* eager */ true,
+ /* canRefFunc */ false);
+ }
+
+ moduleEnv_.numFuncImports = funcImportMap_.count();
+
+ MOZ_ASSERT(asmJSMetadata_->asmJSFuncNames.empty());
+ if (!asmJSMetadata_->asmJSFuncNames.resize(funcImportMap_.count())) {
+ return nullptr;
+ }
+ for (const Func& func : funcDefs_) {
+ CacheableChars funcName = parserAtoms_.toNewUTF8CharsZ(fc_, func.name());
+ if (!funcName ||
+ !asmJSMetadata_->asmJSFuncNames.emplaceBack(std::move(funcName))) {
+ return nullptr;
+ }
+ }
+
+ uint32_t endBeforeCurly =
+ tokenStream().anyCharsAccess().currentToken().pos.end;
+ asmJSMetadata_->srcLength = endBeforeCurly - asmJSMetadata_->srcStart;
+
+ TokenPos pos;
+ MOZ_ALWAYS_TRUE(
+ tokenStream().peekTokenPos(&pos, TokenStreamShared::SlashIsRegExp));
+ uint32_t endAfterCurly = pos.end;
+ asmJSMetadata_->srcLengthWithRightBrace =
+ endAfterCurly - asmJSMetadata_->srcStart;
+
+ ScriptedCaller scriptedCaller;
+ if (parser_.ss->filename()) {
+ scriptedCaller.line = 0; // unused
+ scriptedCaller.filename = DuplicateString(parser_.ss->filename());
+ if (!scriptedCaller.filename) {
+ return nullptr;
+ }
+ }
+
+ // The default options are fine for asm.js
+ SharedCompileArgs args =
+ CompileArgs::buildForAsmJS(std::move(scriptedCaller));
+ if (!args) {
+ ReportOutOfMemory(fc_);
+ return nullptr;
+ }
+
+ uint32_t codeSectionSize = 0;
+ for (const Func& func : funcDefs_) {
+ codeSectionSize += func.bytes().length();
+ }
+
+ moduleEnv_.codeSection.emplace();
+ moduleEnv_.codeSection->start = 0;
+ moduleEnv_.codeSection->size = codeSectionSize;
+
+ // asm.js does not have any wasm bytecode to save; view-source is
+ // provided through the ScriptSource.
+ SharedBytes bytes = js_new<ShareableBytes>();
+ if (!bytes) {
+ ReportOutOfMemory(fc_);
+ return nullptr;
+ }
+
+ ModuleGenerator mg(*args, &moduleEnv_, &compilerEnv_, nullptr, nullptr,
+ nullptr);
+ if (!mg.init(asmJSMetadata_.get())) {
+ return nullptr;
+ }
+
+ for (Func& func : funcDefs_) {
+ if (!mg.compileFuncDef(funcImportMap_.count() + func.funcDefIndex(),
+ func.line(), func.bytes().begin(),
+ func.bytes().end(),
+ std::move(func.callSiteLineNums()))) {
+ return nullptr;
+ }
+ }
+
+ if (!mg.finishFuncDefs()) {
+ return nullptr;
+ }
+
+ return mg.finishModule(*bytes);
+ }
+};
+
+/*****************************************************************************/
+// Numeric literal utilities
+
+static bool IsNumericNonFloatLiteral(ParseNode* pn) {
+ // Note: '-' is never rolled into the number; numbers are always positive
+ // and negations must be applied manually.
+ return pn->isKind(ParseNodeKind::NumberExpr) ||
+ (pn->isKind(ParseNodeKind::NegExpr) &&
+ UnaryKid(pn)->isKind(ParseNodeKind::NumberExpr));
+}
+
+static bool IsCallToGlobal(ModuleValidatorShared& m, ParseNode* pn,
+ const ModuleValidatorShared::Global** global) {
+ if (!pn->isKind(ParseNodeKind::CallExpr)) {
+ return false;
+ }
+
+ ParseNode* callee = CallCallee(pn);
+ if (!callee->isKind(ParseNodeKind::Name)) {
+ return false;
+ }
+
+ *global = m.lookupGlobal(callee->as<NameNode>().name());
+ return !!*global;
+}
+
+static bool IsCoercionCall(ModuleValidatorShared& m, ParseNode* pn,
+ Type* coerceTo, ParseNode** coercedExpr) {
+ const ModuleValidatorShared::Global* global;
+ if (!IsCallToGlobal(m, pn, &global)) {
+ return false;
+ }
+
+ if (CallArgListLength(pn) != 1) {
+ return false;
+ }
+
+ if (coercedExpr) {
+ *coercedExpr = CallArgList(pn);
+ }
+
+ if (global->isMathFunction() &&
+ global->mathBuiltinFunction() == AsmJSMathBuiltin_fround) {
+ *coerceTo = Type::Float;
+ return true;
+ }
+
+ return false;
+}
+
+static bool IsFloatLiteral(ModuleValidatorShared& m, ParseNode* pn) {
+ ParseNode* coercedExpr;
+ Type coerceTo;
+ if (!IsCoercionCall(m, pn, &coerceTo, &coercedExpr)) {
+ return false;
+ }
+ // Don't fold into || to avoid clang/memcheck bug (bug 1077031).
+ if (!coerceTo.isFloat()) {
+ return false;
+ }
+ return IsNumericNonFloatLiteral(coercedExpr);
+}
+
+static bool IsNumericLiteral(ModuleValidatorShared& m, ParseNode* pn) {
+ return IsNumericNonFloatLiteral(pn) || IsFloatLiteral(m, pn);
+}
+
+// The JS grammar treats -42 as -(42) (i.e., with separate grammar
+// productions) for the unary - and literal 42). However, the asm.js spec
+// recognizes -42 (modulo parens, so -(42) and -((42))) as a single literal
+// so fold the two potential parse nodes into a single double value.
+static double ExtractNumericNonFloatValue(ParseNode* pn,
+ ParseNode** out = nullptr) {
+ MOZ_ASSERT(IsNumericNonFloatLiteral(pn));
+
+ if (pn->isKind(ParseNodeKind::NegExpr)) {
+ pn = UnaryKid(pn);
+ if (out) {
+ *out = pn;
+ }
+ return -NumberNodeValue(pn);
+ }
+
+ return NumberNodeValue(pn);
+}
+
+static NumLit ExtractNumericLiteral(ModuleValidatorShared& m, ParseNode* pn) {
+ MOZ_ASSERT(IsNumericLiteral(m, pn));
+
+ if (pn->isKind(ParseNodeKind::CallExpr)) {
+ // Float literals are explicitly coerced and thus the coerced literal may be
+ // any valid (non-float) numeric literal.
+ MOZ_ASSERT(CallArgListLength(pn) == 1);
+ pn = CallArgList(pn);
+ double d = ExtractNumericNonFloatValue(pn);
+ return NumLit(NumLit::Float, DoubleValue(d));
+ }
+
+ double d = ExtractNumericNonFloatValue(pn, &pn);
+
+ // The asm.js spec syntactically distinguishes any literal containing a
+ // decimal point or the literal -0 as having double type.
+ if (NumberNodeHasFrac(pn) || IsNegativeZero(d)) {
+ return NumLit(NumLit::Double, DoubleValue(d));
+ }
+
+ // The syntactic checks above rule out these double values.
+ MOZ_ASSERT(!IsNegativeZero(d));
+ MOZ_ASSERT(!std::isnan(d));
+
+ // Although doubles can only *precisely* represent 53-bit integers, they
+ // can *imprecisely* represent integers much bigger than an int64_t.
+ // Furthermore, d may be inf or -inf. In both cases, casting to an int64_t
+ // is undefined, so test against the integer bounds using doubles.
+ if (d < double(INT32_MIN) || d > double(UINT32_MAX)) {
+ return NumLit(NumLit::OutOfRangeInt, UndefinedValue());
+ }
+
+ // With the above syntactic and range limitations, d is definitely an
+ // integer in the range [INT32_MIN, UINT32_MAX] range.
+ int64_t i64 = int64_t(d);
+ if (i64 >= 0) {
+ if (i64 <= INT32_MAX) {
+ return NumLit(NumLit::Fixnum, Int32Value(i64));
+ }
+ MOZ_ASSERT(i64 <= UINT32_MAX);
+ return NumLit(NumLit::BigUnsigned, Int32Value(uint32_t(i64)));
+ }
+ MOZ_ASSERT(i64 >= INT32_MIN);
+ return NumLit(NumLit::NegativeInt, Int32Value(i64));
+}
+
+static inline bool IsLiteralInt(const NumLit& lit, uint32_t* u32) {
+ switch (lit.which()) {
+ case NumLit::Fixnum:
+ case NumLit::BigUnsigned:
+ case NumLit::NegativeInt:
+ *u32 = lit.toUint32();
+ return true;
+ case NumLit::Double:
+ case NumLit::Float:
+ case NumLit::OutOfRangeInt:
+ return false;
+ }
+ MOZ_CRASH("Bad literal type");
+}
+
+static inline bool IsLiteralInt(ModuleValidatorShared& m, ParseNode* pn,
+ uint32_t* u32) {
+ return IsNumericLiteral(m, pn) &&
+ IsLiteralInt(ExtractNumericLiteral(m, pn), u32);
+}
+
+/*****************************************************************************/
+
+namespace {
+
+using LabelVector = Vector<TaggedParserAtomIndex, 4, SystemAllocPolicy>;
+
+class MOZ_STACK_CLASS FunctionValidatorShared {
+ public:
+ struct Local {
+ Type type;
+ unsigned slot;
+ Local(Type t, unsigned slot) : type(t), slot(slot) {
+ MOZ_ASSERT(type.isCanonicalValType());
+ }
+ };
+
+ protected:
+ using LocalMap =
+ HashMap<TaggedParserAtomIndex, Local, TaggedParserAtomIndexHasher>;
+ using LabelMap =
+ HashMap<TaggedParserAtomIndex, uint32_t, TaggedParserAtomIndexHasher>;
+
+ // This is also a ModuleValidator<Unit>& after the appropriate static_cast<>.
+ ModuleValidatorShared& m_;
+
+ FunctionNode* fn_;
+ Bytes bytes_;
+ Encoder encoder_;
+ Uint32Vector callSiteLineNums_;
+ LocalMap locals_;
+
+ // Labels
+ LabelMap breakLabels_;
+ LabelMap continueLabels_;
+ Uint32Vector breakableStack_;
+ Uint32Vector continuableStack_;
+ uint32_t blockDepth_;
+
+ bool hasAlreadyReturned_;
+ Maybe<ValType> ret_;
+
+ private:
+ FunctionValidatorShared(ModuleValidatorShared& m, FunctionNode* fn,
+ FrontendContext* fc)
+ : m_(m),
+ fn_(fn),
+ encoder_(bytes_),
+ locals_(fc),
+ breakLabels_(fc),
+ continueLabels_(fc),
+ blockDepth_(0),
+ hasAlreadyReturned_(false) {}
+
+ protected:
+ template <typename Unit>
+ FunctionValidatorShared(ModuleValidator<Unit>& m, FunctionNode* fn,
+ FrontendContext* fc)
+ : FunctionValidatorShared(static_cast<ModuleValidatorShared&>(m), fn,
+ fc) {}
+
+ public:
+ ModuleValidatorShared& m() const { return m_; }
+
+ FrontendContext* fc() const { return m_.fc(); }
+ FunctionNode* fn() const { return fn_; }
+
+ void define(ModuleValidatorShared::Func* func, unsigned line) {
+ MOZ_ASSERT(!blockDepth_);
+ MOZ_ASSERT(breakableStack_.empty());
+ MOZ_ASSERT(continuableStack_.empty());
+ MOZ_ASSERT(breakLabels_.empty());
+ MOZ_ASSERT(continueLabels_.empty());
+ func->define(fn_, line, std::move(bytes_), std::move(callSiteLineNums_));
+ }
+
+ bool fail(ParseNode* pn, const char* str) { return m_.fail(pn, str); }
+
+ bool failf(ParseNode* pn, const char* fmt, ...) MOZ_FORMAT_PRINTF(3, 4) {
+ va_list ap;
+ va_start(ap, fmt);
+ m_.failfVAOffset(pn->pn_pos.begin, fmt, ap);
+ va_end(ap);
+ return false;
+ }
+
+ bool failName(ParseNode* pn, const char* fmt, TaggedParserAtomIndex name) {
+ return m_.failName(pn, fmt, name);
+ }
+
+ /***************************************************** Local scope setup */
+
+ bool addLocal(ParseNode* pn, TaggedParserAtomIndex name, Type type) {
+ LocalMap::AddPtr p = locals_.lookupForAdd(name);
+ if (p) {
+ return failName(pn, "duplicate local name '%s' not allowed", name);
+ }
+ return locals_.add(p, name, Local(type, locals_.count()));
+ }
+
+ /****************************** For consistency of returns in a function */
+
+ bool hasAlreadyReturned() const { return hasAlreadyReturned_; }
+
+ Maybe<ValType> returnedType() const { return ret_; }
+
+ void setReturnedType(const Maybe<ValType>& ret) {
+ MOZ_ASSERT(!hasAlreadyReturned_);
+ ret_ = ret;
+ hasAlreadyReturned_ = true;
+ }
+
+ /**************************************************************** Labels */
+ private:
+ bool writeBr(uint32_t absolute, Op op = Op::Br) {
+ MOZ_ASSERT(op == Op::Br || op == Op::BrIf);
+ MOZ_ASSERT(absolute < blockDepth_);
+ return encoder().writeOp(op) &&
+ encoder().writeVarU32(blockDepth_ - 1 - absolute);
+ }
+ void removeLabel(TaggedParserAtomIndex label, LabelMap* map) {
+ LabelMap::Ptr p = map->lookup(label);
+ MOZ_ASSERT(p);
+ map->remove(p);
+ }
+
+ public:
+ bool pushBreakableBlock() {
+ return encoder().writeOp(Op::Block) &&
+ encoder().writeFixedU8(uint8_t(TypeCode::BlockVoid)) &&
+ breakableStack_.append(blockDepth_++);
+ }
+ bool popBreakableBlock() {
+ MOZ_ALWAYS_TRUE(breakableStack_.popCopy() == --blockDepth_);
+ return encoder().writeOp(Op::End);
+ }
+
+ bool pushUnbreakableBlock(const LabelVector* labels = nullptr) {
+ if (labels) {
+ for (TaggedParserAtomIndex label : *labels) {
+ if (!breakLabels_.putNew(label, blockDepth_)) {
+ return false;
+ }
+ }
+ }
+ blockDepth_++;
+ return encoder().writeOp(Op::Block) &&
+ encoder().writeFixedU8(uint8_t(TypeCode::BlockVoid));
+ }
+ bool popUnbreakableBlock(const LabelVector* labels = nullptr) {
+ if (labels) {
+ for (TaggedParserAtomIndex label : *labels) {
+ removeLabel(label, &breakLabels_);
+ }
+ }
+ --blockDepth_;
+ return encoder().writeOp(Op::End);
+ }
+
+ bool pushContinuableBlock() {
+ return encoder().writeOp(Op::Block) &&
+ encoder().writeFixedU8(uint8_t(TypeCode::BlockVoid)) &&
+ continuableStack_.append(blockDepth_++);
+ }
+ bool popContinuableBlock() {
+ MOZ_ALWAYS_TRUE(continuableStack_.popCopy() == --blockDepth_);
+ return encoder().writeOp(Op::End);
+ }
+
+ bool pushLoop() {
+ return encoder().writeOp(Op::Block) &&
+ encoder().writeFixedU8(uint8_t(TypeCode::BlockVoid)) &&
+ encoder().writeOp(Op::Loop) &&
+ encoder().writeFixedU8(uint8_t(TypeCode::BlockVoid)) &&
+ breakableStack_.append(blockDepth_++) &&
+ continuableStack_.append(blockDepth_++);
+ }
+ bool popLoop() {
+ MOZ_ALWAYS_TRUE(continuableStack_.popCopy() == --blockDepth_);
+ MOZ_ALWAYS_TRUE(breakableStack_.popCopy() == --blockDepth_);
+ return encoder().writeOp(Op::End) && encoder().writeOp(Op::End);
+ }
+
+ bool pushIf(size_t* typeAt) {
+ ++blockDepth_;
+ return encoder().writeOp(Op::If) && encoder().writePatchableFixedU7(typeAt);
+ }
+ bool switchToElse() {
+ MOZ_ASSERT(blockDepth_ > 0);
+ return encoder().writeOp(Op::Else);
+ }
+ void setIfType(size_t typeAt, TypeCode type) {
+ encoder().patchFixedU7(typeAt, uint8_t(type));
+ }
+ bool popIf() {
+ MOZ_ASSERT(blockDepth_ > 0);
+ --blockDepth_;
+ return encoder().writeOp(Op::End);
+ }
+ bool popIf(size_t typeAt, TypeCode type) {
+ MOZ_ASSERT(blockDepth_ > 0);
+ --blockDepth_;
+ if (!encoder().writeOp(Op::End)) {
+ return false;
+ }
+
+ setIfType(typeAt, type);
+ return true;
+ }
+
+ bool writeBreakIf() { return writeBr(breakableStack_.back(), Op::BrIf); }
+ bool writeContinueIf() { return writeBr(continuableStack_.back(), Op::BrIf); }
+ bool writeUnlabeledBreakOrContinue(bool isBreak) {
+ return writeBr(isBreak ? breakableStack_.back() : continuableStack_.back());
+ }
+ bool writeContinue() { return writeBr(continuableStack_.back()); }
+
+ bool addLabels(const LabelVector& labels, uint32_t relativeBreakDepth,
+ uint32_t relativeContinueDepth) {
+ for (TaggedParserAtomIndex label : labels) {
+ if (!breakLabels_.putNew(label, blockDepth_ + relativeBreakDepth)) {
+ return false;
+ }
+ if (!continueLabels_.putNew(label, blockDepth_ + relativeContinueDepth)) {
+ return false;
+ }
+ }
+ return true;
+ }
+ void removeLabels(const LabelVector& labels) {
+ for (TaggedParserAtomIndex label : labels) {
+ removeLabel(label, &breakLabels_);
+ removeLabel(label, &continueLabels_);
+ }
+ }
+ bool writeLabeledBreakOrContinue(TaggedParserAtomIndex label, bool isBreak) {
+ LabelMap& map = isBreak ? breakLabels_ : continueLabels_;
+ if (LabelMap::Ptr p = map.lookup(label)) {
+ return writeBr(p->value());
+ }
+ MOZ_CRASH("nonexistent label");
+ }
+
+ /*************************************************** Read-only interface */
+
+ const Local* lookupLocal(TaggedParserAtomIndex name) const {
+ if (auto p = locals_.lookup(name)) {
+ return &p->value();
+ }
+ return nullptr;
+ }
+
+ const ModuleValidatorShared::Global* lookupGlobal(
+ TaggedParserAtomIndex name) const {
+ if (locals_.has(name)) {
+ return nullptr;
+ }
+ return m_.lookupGlobal(name);
+ }
+
+ size_t numLocals() const { return locals_.count(); }
+
+ /**************************************************** Encoding interface */
+
+ Encoder& encoder() { return encoder_; }
+
+ [[nodiscard]] bool writeInt32Lit(int32_t i32) {
+ return encoder().writeOp(Op::I32Const) && encoder().writeVarS32(i32);
+ }
+ [[nodiscard]] bool writeConstExpr(const NumLit& lit) {
+ switch (lit.which()) {
+ case NumLit::Fixnum:
+ case NumLit::NegativeInt:
+ case NumLit::BigUnsigned:
+ return writeInt32Lit(lit.toInt32());
+ case NumLit::Float:
+ return encoder().writeOp(Op::F32Const) &&
+ encoder().writeFixedF32(lit.toFloat());
+ case NumLit::Double:
+ return encoder().writeOp(Op::F64Const) &&
+ encoder().writeFixedF64(lit.toDouble());
+ case NumLit::OutOfRangeInt:
+ break;
+ }
+ MOZ_CRASH("unexpected literal type");
+ }
+};
+
+// Encapsulates the building of an asm bytecode function from an asm.js function
+// source code, packing the asm.js code into the asm bytecode form that can
+// be decoded and compiled with a FunctionCompiler.
+template <typename Unit>
+class MOZ_STACK_CLASS FunctionValidator : public FunctionValidatorShared {
+ public:
+ FunctionValidator(ModuleValidator<Unit>& m, FunctionNode* fn)
+ : FunctionValidatorShared(m, fn, m.fc()) {}
+
+ public:
+ ModuleValidator<Unit>& m() const {
+ return static_cast<ModuleValidator<Unit>&>(FunctionValidatorShared::m());
+ }
+
+ [[nodiscard]] bool writeCall(ParseNode* pn, Op op) {
+ MOZ_ASSERT(op == Op::Call);
+ if (!encoder().writeOp(op)) {
+ return false;
+ }
+
+ return appendCallSiteLineNumber(pn);
+ }
+ [[nodiscard]] bool writeCall(ParseNode* pn, MozOp op) {
+ MOZ_ASSERT(op == MozOp::OldCallDirect || op == MozOp::OldCallIndirect);
+ if (!encoder().writeOp(op)) {
+ return false;
+ }
+
+ return appendCallSiteLineNumber(pn);
+ }
+ [[nodiscard]] bool prepareCall(ParseNode* pn) {
+ return appendCallSiteLineNumber(pn);
+ }
+
+ private:
+ [[nodiscard]] bool appendCallSiteLineNumber(ParseNode* node) {
+ const TokenStreamAnyChars& anyChars = m().tokenStream().anyCharsAccess();
+ auto lineToken = anyChars.lineToken(node->pn_pos.begin);
+ uint32_t lineNumber = anyChars.lineNumber(lineToken);
+ if (lineNumber > CallSiteDesc::MAX_LINE_OR_BYTECODE_VALUE) {
+ return fail(node, "line number exceeding implementation limits");
+ }
+ return callSiteLineNums_.append(lineNumber);
+ }
+};
+
+} /* anonymous namespace */
+
+/*****************************************************************************/
+// asm.js type-checking and code-generation algorithm
+
+static bool CheckIdentifier(ModuleValidatorShared& m, ParseNode* usepn,
+ TaggedParserAtomIndex name) {
+ if (name == TaggedParserAtomIndex::WellKnown::arguments() ||
+ name == TaggedParserAtomIndex::WellKnown::eval()) {
+ return m.failName(usepn, "'%s' is not an allowed identifier", name);
+ }
+ return true;
+}
+
+static bool CheckModuleLevelName(ModuleValidatorShared& m, ParseNode* usepn,
+ TaggedParserAtomIndex name) {
+ if (!CheckIdentifier(m, usepn, name)) {
+ return false;
+ }
+
+ if (name == m.moduleFunctionName() || name == m.globalArgumentName() ||
+ name == m.importArgumentName() || name == m.bufferArgumentName() ||
+ m.lookupGlobal(name)) {
+ return m.failName(usepn, "duplicate name '%s' not allowed", name);
+ }
+
+ return true;
+}
+
+static bool CheckFunctionHead(ModuleValidatorShared& m, FunctionNode* funNode) {
+ FunctionBox* funbox = funNode->funbox();
+ MOZ_ASSERT(!funbox->hasExprBody());
+
+ if (funbox->hasRest()) {
+ return m.fail(funNode, "rest args not allowed");
+ }
+ if (funbox->hasDestructuringArgs) {
+ return m.fail(funNode, "destructuring args not allowed");
+ }
+ return true;
+}
+
+static bool CheckArgument(ModuleValidatorShared& m, ParseNode* arg,
+ TaggedParserAtomIndex* name) {
+ *name = TaggedParserAtomIndex::null();
+
+ if (!arg->isKind(ParseNodeKind::Name)) {
+ return m.fail(arg, "argument is not a plain name");
+ }
+
+ TaggedParserAtomIndex argName = arg->as<NameNode>().name();
+ if (!CheckIdentifier(m, arg, argName)) {
+ return false;
+ }
+
+ *name = argName;
+ return true;
+}
+
+static bool CheckModuleArgument(ModuleValidatorShared& m, ParseNode* arg,
+ TaggedParserAtomIndex* name) {
+ if (!CheckArgument(m, arg, name)) {
+ return false;
+ }
+
+ if (!CheckModuleLevelName(m, arg, *name)) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool CheckModuleArguments(ModuleValidatorShared& m,
+ FunctionNode* funNode) {
+ unsigned numFormals;
+ ParseNode* arg1 = FunctionFormalParametersList(funNode, &numFormals);
+ ParseNode* arg2 = arg1 ? NextNode(arg1) : nullptr;
+ ParseNode* arg3 = arg2 ? NextNode(arg2) : nullptr;
+
+ if (numFormals > 3) {
+ return m.fail(funNode, "asm.js modules takes at most 3 argument");
+ }
+
+ TaggedParserAtomIndex arg1Name;
+ if (arg1 && !CheckModuleArgument(m, arg1, &arg1Name)) {
+ return false;
+ }
+ if (!m.initGlobalArgumentName(arg1Name)) {
+ return false;
+ }
+
+ TaggedParserAtomIndex arg2Name;
+ if (arg2 && !CheckModuleArgument(m, arg2, &arg2Name)) {
+ return false;
+ }
+ if (!m.initImportArgumentName(arg2Name)) {
+ return false;
+ }
+
+ TaggedParserAtomIndex arg3Name;
+ if (arg3 && !CheckModuleArgument(m, arg3, &arg3Name)) {
+ return false;
+ }
+ if (!m.initBufferArgumentName(arg3Name)) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool CheckPrecedingStatements(ModuleValidatorShared& m,
+ ParseNode* stmtList) {
+ MOZ_ASSERT(stmtList->isKind(ParseNodeKind::StatementList));
+
+ ParseNode* stmt = ListHead(stmtList);
+ for (unsigned i = 0, n = ListLength(stmtList); i < n; i++) {
+ if (!IsIgnoredDirective(stmt)) {
+ return m.fail(stmt, "invalid asm.js statement");
+ }
+ }
+
+ return true;
+}
+
+static bool CheckGlobalVariableInitConstant(ModuleValidatorShared& m,
+ TaggedParserAtomIndex varName,
+ ParseNode* initNode, bool isConst) {
+ NumLit lit = ExtractNumericLiteral(m, initNode);
+ if (!lit.valid()) {
+ return m.fail(initNode,
+ "global initializer is out of representable integer range");
+ }
+
+ Type canonicalType = Type::canonicalize(Type::lit(lit));
+ if (!canonicalType.isGlobalVarType()) {
+ return m.fail(initNode, "global variable type not allowed");
+ }
+
+ return m.addGlobalVarInit(varName, lit, canonicalType, isConst);
+}
+
+static bool CheckTypeAnnotation(ModuleValidatorShared& m,
+ ParseNode* coercionNode, Type* coerceTo,
+ ParseNode** coercedExpr = nullptr) {
+ switch (coercionNode->getKind()) {
+ case ParseNodeKind::BitOrExpr: {
+ ParseNode* rhs = BitwiseRight(coercionNode);
+ uint32_t i;
+ if (!IsLiteralInt(m, rhs, &i) || i != 0) {
+ return m.fail(rhs, "must use |0 for argument/return coercion");
+ }
+ *coerceTo = Type::Int;
+ if (coercedExpr) {
+ *coercedExpr = BitwiseLeft(coercionNode);
+ }
+ return true;
+ }
+ case ParseNodeKind::PosExpr: {
+ *coerceTo = Type::Double;
+ if (coercedExpr) {
+ *coercedExpr = UnaryKid(coercionNode);
+ }
+ return true;
+ }
+ case ParseNodeKind::CallExpr: {
+ if (IsCoercionCall(m, coercionNode, coerceTo, coercedExpr)) {
+ return true;
+ }
+ break;
+ }
+ default:;
+ }
+
+ return m.fail(coercionNode, "must be of the form +x, x|0 or fround(x)");
+}
+
+static bool CheckGlobalVariableInitImport(ModuleValidatorShared& m,
+ TaggedParserAtomIndex varName,
+ ParseNode* initNode, bool isConst) {
+ Type coerceTo;
+ ParseNode* coercedExpr;
+ if (!CheckTypeAnnotation(m, initNode, &coerceTo, &coercedExpr)) {
+ return false;
+ }
+
+ if (!coercedExpr->isKind(ParseNodeKind::DotExpr)) {
+ return m.failName(coercedExpr, "invalid import expression for global '%s'",
+ varName);
+ }
+
+ if (!coerceTo.isGlobalVarType()) {
+ return m.fail(initNode, "global variable type not allowed");
+ }
+
+ ParseNode* base = DotBase(coercedExpr);
+ TaggedParserAtomIndex field = DotMember(coercedExpr);
+
+ TaggedParserAtomIndex importName = m.importArgumentName();
+ if (!importName) {
+ return m.fail(coercedExpr,
+ "cannot import without an asm.js foreign parameter");
+ }
+ if (!IsUseOfName(base, importName)) {
+ return m.failName(coercedExpr, "base of import expression must be '%s'",
+ importName);
+ }
+
+ return m.addGlobalVarImport(varName, field, coerceTo, isConst);
+}
+
+static bool IsArrayViewCtorName(ModuleValidatorShared& m,
+ TaggedParserAtomIndex name,
+ Scalar::Type* type) {
+ if (name == TaggedParserAtomIndex::WellKnown::Int8Array()) {
+ *type = Scalar::Int8;
+ } else if (name == TaggedParserAtomIndex::WellKnown::Uint8Array()) {
+ *type = Scalar::Uint8;
+ } else if (name == TaggedParserAtomIndex::WellKnown::Int16Array()) {
+ *type = Scalar::Int16;
+ } else if (name == TaggedParserAtomIndex::WellKnown::Uint16Array()) {
+ *type = Scalar::Uint16;
+ } else if (name == TaggedParserAtomIndex::WellKnown::Int32Array()) {
+ *type = Scalar::Int32;
+ } else if (name == TaggedParserAtomIndex::WellKnown::Uint32Array()) {
+ *type = Scalar::Uint32;
+ } else if (name == TaggedParserAtomIndex::WellKnown::Float32Array()) {
+ *type = Scalar::Float32;
+ } else if (name == TaggedParserAtomIndex::WellKnown::Float64Array()) {
+ *type = Scalar::Float64;
+ } else {
+ return false;
+ }
+ return true;
+}
+
+static bool CheckNewArrayViewArgs(ModuleValidatorShared& m, ParseNode* newExpr,
+ TaggedParserAtomIndex bufferName) {
+ ParseNode* ctorExpr = BinaryLeft(newExpr);
+ ParseNode* ctorArgs = BinaryRight(newExpr);
+ ParseNode* bufArg = ListHead(ctorArgs);
+ if (!bufArg || NextNode(bufArg) != nullptr) {
+ return m.fail(ctorExpr,
+ "array view constructor takes exactly one argument");
+ }
+
+ if (!IsUseOfName(bufArg, bufferName)) {
+ return m.failName(bufArg, "argument to array view constructor must be '%s'",
+ bufferName);
+ }
+
+ return true;
+}
+
+static bool CheckNewArrayView(ModuleValidatorShared& m,
+ TaggedParserAtomIndex varName,
+ ParseNode* newExpr) {
+ TaggedParserAtomIndex globalName = m.globalArgumentName();
+ if (!globalName) {
+ return m.fail(
+ newExpr, "cannot create array view without an asm.js global parameter");
+ }
+
+ TaggedParserAtomIndex bufferName = m.bufferArgumentName();
+ if (!bufferName) {
+ return m.fail(newExpr,
+ "cannot create array view without an asm.js heap parameter");
+ }
+
+ ParseNode* ctorExpr = BinaryLeft(newExpr);
+
+ TaggedParserAtomIndex field;
+ Scalar::Type type;
+ if (ctorExpr->isKind(ParseNodeKind::DotExpr)) {
+ ParseNode* base = DotBase(ctorExpr);
+
+ if (!IsUseOfName(base, globalName)) {
+ return m.failName(base, "expecting '%s.*Array", globalName);
+ }
+
+ field = DotMember(ctorExpr);
+ if (!IsArrayViewCtorName(m, field, &type)) {
+ return m.fail(ctorExpr, "could not match typed array name");
+ }
+ } else {
+ if (!ctorExpr->isKind(ParseNodeKind::Name)) {
+ return m.fail(ctorExpr,
+ "expecting name of imported array view constructor");
+ }
+
+ TaggedParserAtomIndex globalName = ctorExpr->as<NameNode>().name();
+ const ModuleValidatorShared::Global* global = m.lookupGlobal(globalName);
+ if (!global) {
+ return m.failName(ctorExpr, "%s not found in module global scope",
+ globalName);
+ }
+
+ if (global->which() != ModuleValidatorShared::Global::ArrayViewCtor) {
+ return m.failName(ctorExpr,
+ "%s must be an imported array view constructor",
+ globalName);
+ }
+
+ type = global->viewType();
+ }
+
+ if (!CheckNewArrayViewArgs(m, newExpr, bufferName)) {
+ return false;
+ }
+
+ return m.addArrayView(varName, type, field);
+}
+
+static bool CheckGlobalMathImport(ModuleValidatorShared& m, ParseNode* initNode,
+ TaggedParserAtomIndex varName,
+ TaggedParserAtomIndex field) {
+ // Math builtin, with the form glob.Math.[[builtin]]
+ ModuleValidatorShared::MathBuiltin mathBuiltin;
+ if (!m.lookupStandardLibraryMathName(field, &mathBuiltin)) {
+ return m.failName(initNode, "'%s' is not a standard Math builtin", field);
+ }
+
+ switch (mathBuiltin.kind) {
+ case ModuleValidatorShared::MathBuiltin::Function:
+ return m.addMathBuiltinFunction(varName, mathBuiltin.u.func, field);
+ case ModuleValidatorShared::MathBuiltin::Constant:
+ return m.addMathBuiltinConstant(varName, mathBuiltin.u.cst, field);
+ default:
+ break;
+ }
+ MOZ_CRASH("unexpected or uninitialized math builtin type");
+}
+
+static bool CheckGlobalDotImport(ModuleValidatorShared& m,
+ TaggedParserAtomIndex varName,
+ ParseNode* initNode) {
+ ParseNode* base = DotBase(initNode);
+ TaggedParserAtomIndex field = DotMember(initNode);
+
+ if (base->isKind(ParseNodeKind::DotExpr)) {
+ ParseNode* global = DotBase(base);
+ TaggedParserAtomIndex math = DotMember(base);
+
+ TaggedParserAtomIndex globalName = m.globalArgumentName();
+ if (!globalName) {
+ return m.fail(
+ base, "import statement requires the module have a stdlib parameter");
+ }
+
+ if (!IsUseOfName(global, globalName)) {
+ if (global->isKind(ParseNodeKind::DotExpr)) {
+ return m.failName(base,
+ "imports can have at most two dot accesses "
+ "(e.g. %s.Math.sin)",
+ globalName);
+ }
+ return m.failName(base, "expecting %s.*", globalName);
+ }
+
+ if (math == TaggedParserAtomIndex::WellKnown::Math()) {
+ return CheckGlobalMathImport(m, initNode, varName, field);
+ }
+ return m.failName(base, "expecting %s.Math", globalName);
+ }
+
+ if (!base->isKind(ParseNodeKind::Name)) {
+ return m.fail(base, "expected name of variable or parameter");
+ }
+
+ auto baseName = base->as<NameNode>().name();
+ if (baseName == m.globalArgumentName()) {
+ if (field == TaggedParserAtomIndex::WellKnown::NaN()) {
+ return m.addGlobalConstant(varName, GenericNaN(), field);
+ }
+ if (field == TaggedParserAtomIndex::WellKnown::Infinity()) {
+ return m.addGlobalConstant(varName, PositiveInfinity<double>(), field);
+ }
+
+ Scalar::Type type;
+ if (IsArrayViewCtorName(m, field, &type)) {
+ return m.addArrayViewCtor(varName, type, field);
+ }
+
+ return m.failName(
+ initNode, "'%s' is not a standard constant or typed array name", field);
+ }
+
+ if (baseName != m.importArgumentName()) {
+ return m.fail(base, "expected global or import name");
+ }
+
+ return m.addFFI(varName, field);
+}
+
+static bool CheckModuleGlobal(ModuleValidatorShared& m, ParseNode* decl,
+ bool isConst) {
+ if (!decl->isKind(ParseNodeKind::AssignExpr)) {
+ return m.fail(decl, "module import needs initializer");
+ }
+ AssignmentNode* assignNode = &decl->as<AssignmentNode>();
+
+ ParseNode* var = assignNode->left();
+
+ if (!var->isKind(ParseNodeKind::Name)) {
+ return m.fail(var, "import variable is not a plain name");
+ }
+
+ TaggedParserAtomIndex varName = var->as<NameNode>().name();
+ if (!CheckModuleLevelName(m, var, varName)) {
+ return false;
+ }
+
+ ParseNode* initNode = assignNode->right();
+
+ if (IsNumericLiteral(m, initNode)) {
+ return CheckGlobalVariableInitConstant(m, varName, initNode, isConst);
+ }
+
+ if (initNode->isKind(ParseNodeKind::BitOrExpr) ||
+ initNode->isKind(ParseNodeKind::PosExpr) ||
+ initNode->isKind(ParseNodeKind::CallExpr)) {
+ return CheckGlobalVariableInitImport(m, varName, initNode, isConst);
+ }
+
+ if (initNode->isKind(ParseNodeKind::NewExpr)) {
+ return CheckNewArrayView(m, varName, initNode);
+ }
+
+ if (initNode->isKind(ParseNodeKind::DotExpr)) {
+ return CheckGlobalDotImport(m, varName, initNode);
+ }
+
+ return m.fail(initNode, "unsupported import expression");
+}
+
+template <typename Unit>
+static bool CheckModuleProcessingDirectives(ModuleValidator<Unit>& m) {
+ auto& ts = m.parser().tokenStream;
+ while (true) {
+ bool matched;
+ if (!ts.matchToken(&matched, TokenKind::String,
+ TokenStreamShared::SlashIsRegExp)) {
+ return false;
+ }
+ if (!matched) {
+ return true;
+ }
+
+ if (!IsIgnoredDirectiveName(ts.anyCharsAccess().currentToken().atom())) {
+ return m.failCurrentOffset("unsupported processing directive");
+ }
+
+ TokenKind tt;
+ if (!ts.getToken(&tt)) {
+ return false;
+ }
+ if (tt != TokenKind::Semi) {
+ return m.failCurrentOffset("expected semicolon after string literal");
+ }
+ }
+}
+
+template <typename Unit>
+static bool CheckModuleGlobals(ModuleValidator<Unit>& m) {
+ while (true) {
+ ParseNode* varStmt;
+ if (!ParseVarOrConstStatement(m.parser(), &varStmt)) {
+ return false;
+ }
+ if (!varStmt) {
+ break;
+ }
+ for (ParseNode* var = VarListHead(varStmt); var; var = NextNode(var)) {
+ if (!CheckModuleGlobal(m, var,
+ varStmt->isKind(ParseNodeKind::ConstDecl))) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+static bool ArgFail(FunctionValidatorShared& f, TaggedParserAtomIndex argName,
+ ParseNode* stmt) {
+ return f.failName(stmt,
+ "expecting argument type declaration for '%s' of the "
+ "form 'arg = arg|0' or 'arg = +arg' or 'arg = fround(arg)'",
+ argName);
+}
+
+static bool CheckArgumentType(FunctionValidatorShared& f, ParseNode* stmt,
+ TaggedParserAtomIndex name, Type* type) {
+ if (!stmt || !IsExpressionStatement(stmt)) {
+ return ArgFail(f, name, stmt ? stmt : f.fn());
+ }
+
+ ParseNode* initNode = ExpressionStatementExpr(stmt);
+ if (!initNode->isKind(ParseNodeKind::AssignExpr)) {
+ return ArgFail(f, name, stmt);
+ }
+
+ ParseNode* argNode = BinaryLeft(initNode);
+ ParseNode* coercionNode = BinaryRight(initNode);
+
+ if (!IsUseOfName(argNode, name)) {
+ return ArgFail(f, name, stmt);
+ }
+
+ ParseNode* coercedExpr;
+ if (!CheckTypeAnnotation(f.m(), coercionNode, type, &coercedExpr)) {
+ return false;
+ }
+
+ if (!type->isArgType()) {
+ return f.failName(stmt, "invalid type for argument '%s'", name);
+ }
+
+ if (!IsUseOfName(coercedExpr, name)) {
+ return ArgFail(f, name, stmt);
+ }
+
+ return true;
+}
+
+static bool CheckProcessingDirectives(ModuleValidatorShared& m,
+ ParseNode** stmtIter) {
+ ParseNode* stmt = *stmtIter;
+
+ while (stmt && IsIgnoredDirective(stmt)) {
+ stmt = NextNode(stmt);
+ }
+
+ *stmtIter = stmt;
+ return true;
+}
+
+static bool CheckArguments(FunctionValidatorShared& f, ParseNode** stmtIter,
+ ValTypeVector* argTypes) {
+ ParseNode* stmt = *stmtIter;
+
+ unsigned numFormals;
+ ParseNode* argpn = FunctionFormalParametersList(f.fn(), &numFormals);
+
+ for (unsigned i = 0; i < numFormals;
+ i++, argpn = NextNode(argpn), stmt = NextNode(stmt)) {
+ TaggedParserAtomIndex name;
+ if (!CheckArgument(f.m(), argpn, &name)) {
+ return false;
+ }
+
+ Type type;
+ if (!CheckArgumentType(f, stmt, name, &type)) {
+ return false;
+ }
+
+ if (!argTypes->append(type.canonicalToValType())) {
+ return false;
+ }
+
+ if (!f.addLocal(argpn, name, type)) {
+ return false;
+ }
+ }
+
+ *stmtIter = stmt;
+ return true;
+}
+
+static bool IsLiteralOrConst(FunctionValidatorShared& f, ParseNode* pn,
+ NumLit* lit) {
+ if (pn->isKind(ParseNodeKind::Name)) {
+ const ModuleValidatorShared::Global* global =
+ f.lookupGlobal(pn->as<NameNode>().name());
+ if (!global ||
+ global->which() != ModuleValidatorShared::Global::ConstantLiteral) {
+ return false;
+ }
+
+ *lit = global->constLiteralValue();
+ return true;
+ }
+
+ if (!IsNumericLiteral(f.m(), pn)) {
+ return false;
+ }
+
+ *lit = ExtractNumericLiteral(f.m(), pn);
+ return true;
+}
+
+static bool CheckFinalReturn(FunctionValidatorShared& f,
+ ParseNode* lastNonEmptyStmt) {
+ if (!f.encoder().writeOp(Op::End)) {
+ return false;
+ }
+
+ if (!f.hasAlreadyReturned()) {
+ f.setReturnedType(Nothing());
+ return true;
+ }
+
+ if (!lastNonEmptyStmt->isKind(ParseNodeKind::ReturnStmt) &&
+ f.returnedType()) {
+ return f.fail(lastNonEmptyStmt,
+ "void incompatible with previous return type");
+ }
+
+ return true;
+}
+
+static bool CheckVariable(FunctionValidatorShared& f, ParseNode* decl,
+ ValTypeVector* types, Vector<NumLit>* inits) {
+ if (!decl->isKind(ParseNodeKind::AssignExpr)) {
+ return f.failName(
+ decl, "var '%s' needs explicit type declaration via an initial value",
+ decl->as<NameNode>().name());
+ }
+ AssignmentNode* assignNode = &decl->as<AssignmentNode>();
+
+ ParseNode* var = assignNode->left();
+
+ if (!var->isKind(ParseNodeKind::Name)) {
+ return f.fail(var, "local variable is not a plain name");
+ }
+
+ TaggedParserAtomIndex name = var->as<NameNode>().name();
+
+ if (!CheckIdentifier(f.m(), var, name)) {
+ return false;
+ }
+
+ ParseNode* initNode = assignNode->right();
+
+ NumLit lit;
+ if (!IsLiteralOrConst(f, initNode, &lit)) {
+ return f.failName(
+ var, "var '%s' initializer must be literal or const literal", name);
+ }
+
+ if (!lit.valid()) {
+ return f.failName(var, "var '%s' initializer out of range", name);
+ }
+
+ Type type = Type::canonicalize(Type::lit(lit));
+
+ return f.addLocal(var, name, type) &&
+ types->append(type.canonicalToValType()) && inits->append(lit);
+}
+
+static bool CheckVariables(FunctionValidatorShared& f, ParseNode** stmtIter) {
+ ParseNode* stmt = *stmtIter;
+
+ uint32_t firstVar = f.numLocals();
+
+ ValTypeVector types;
+ Vector<NumLit> inits(f.fc());
+
+ for (; stmt && stmt->isKind(ParseNodeKind::VarStmt);
+ stmt = NextNonEmptyStatement(stmt)) {
+ for (ParseNode* var = VarListHead(stmt); var; var = NextNode(var)) {
+ if (!CheckVariable(f, var, &types, &inits)) {
+ return false;
+ }
+ }
+ }
+
+ MOZ_ASSERT(f.encoder().empty());
+
+ if (!EncodeLocalEntries(f.encoder(), types)) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < inits.length(); i++) {
+ NumLit lit = inits[i];
+ if (lit.isZeroBits()) {
+ continue;
+ }
+ if (!f.writeConstExpr(lit)) {
+ return false;
+ }
+ if (!f.encoder().writeOp(Op::LocalSet)) {
+ return false;
+ }
+ if (!f.encoder().writeVarU32(firstVar + i)) {
+ return false;
+ }
+ }
+
+ *stmtIter = stmt;
+ return true;
+}
+
+template <typename Unit>
+static bool CheckExpr(FunctionValidator<Unit>& f, ParseNode* expr, Type* type);
+
+template <typename Unit>
+static bool CheckNumericLiteral(FunctionValidator<Unit>& f, ParseNode* num,
+ Type* type) {
+ NumLit lit = ExtractNumericLiteral(f.m(), num);
+ if (!lit.valid()) {
+ return f.fail(num, "numeric literal out of representable integer range");
+ }
+ *type = Type::lit(lit);
+ return f.writeConstExpr(lit);
+}
+
+static bool CheckVarRef(FunctionValidatorShared& f, ParseNode* varRef,
+ Type* type) {
+ TaggedParserAtomIndex name = varRef->as<NameNode>().name();
+
+ if (const FunctionValidatorShared::Local* local = f.lookupLocal(name)) {
+ if (!f.encoder().writeOp(Op::LocalGet)) {
+ return false;
+ }
+ if (!f.encoder().writeVarU32(local->slot)) {
+ return false;
+ }
+ *type = local->type;
+ return true;
+ }
+
+ if (const ModuleValidatorShared::Global* global = f.lookupGlobal(name)) {
+ switch (global->which()) {
+ case ModuleValidatorShared::Global::ConstantLiteral:
+ *type = global->varOrConstType();
+ return f.writeConstExpr(global->constLiteralValue());
+ case ModuleValidatorShared::Global::ConstantImport:
+ case ModuleValidatorShared::Global::Variable: {
+ *type = global->varOrConstType();
+ return f.encoder().writeOp(Op::GlobalGet) &&
+ f.encoder().writeVarU32(global->varOrConstIndex());
+ }
+ case ModuleValidatorShared::Global::Function:
+ case ModuleValidatorShared::Global::FFI:
+ case ModuleValidatorShared::Global::MathBuiltinFunction:
+ case ModuleValidatorShared::Global::Table:
+ case ModuleValidatorShared::Global::ArrayView:
+ case ModuleValidatorShared::Global::ArrayViewCtor:
+ break;
+ }
+ return f.failName(varRef,
+ "'%s' may not be accessed by ordinary expressions", name);
+ }
+
+ return f.failName(varRef, "'%s' not found in local or asm.js module scope",
+ name);
+}
+
+static inline bool IsLiteralOrConstInt(FunctionValidatorShared& f,
+ ParseNode* pn, uint32_t* u32) {
+ NumLit lit;
+ if (!IsLiteralOrConst(f, pn, &lit)) {
+ return false;
+ }
+
+ return IsLiteralInt(lit, u32);
+}
+
+static const int32_t NoMask = -1;
+
+template <typename Unit>
+static bool CheckArrayAccess(FunctionValidator<Unit>& f, ParseNode* viewName,
+ ParseNode* indexExpr, Scalar::Type* viewType) {
+ if (!viewName->isKind(ParseNodeKind::Name)) {
+ return f.fail(viewName,
+ "base of array access must be a typed array view name");
+ }
+
+ const ModuleValidatorShared::Global* global =
+ f.lookupGlobal(viewName->as<NameNode>().name());
+ if (!global || global->which() != ModuleValidatorShared::Global::ArrayView) {
+ return f.fail(viewName,
+ "base of array access must be a typed array view name");
+ }
+
+ *viewType = global->viewType();
+
+ uint32_t index;
+ if (IsLiteralOrConstInt(f, indexExpr, &index)) {
+ uint64_t byteOffset = uint64_t(index) << TypedArrayShift(*viewType);
+ uint64_t width = TypedArrayElemSize(*viewType);
+ if (!f.m().tryConstantAccess(byteOffset, width)) {
+ return f.fail(indexExpr, "constant index out of range");
+ }
+
+ return f.writeInt32Lit(byteOffset);
+ }
+
+ // Mask off the low bits to account for the clearing effect of a right shift
+ // followed by the left shift implicit in the array access. E.g., H32[i>>2]
+ // loses the low two bits.
+ int32_t mask = ~(TypedArrayElemSize(*viewType) - 1);
+
+ if (indexExpr->isKind(ParseNodeKind::RshExpr)) {
+ ParseNode* shiftAmountNode = BitwiseRight(indexExpr);
+
+ uint32_t shift;
+ if (!IsLiteralInt(f.m(), shiftAmountNode, &shift)) {
+ return f.failf(shiftAmountNode, "shift amount must be constant");
+ }
+
+ unsigned requiredShift = TypedArrayShift(*viewType);
+ if (shift != requiredShift) {
+ return f.failf(shiftAmountNode, "shift amount must be %u", requiredShift);
+ }
+
+ ParseNode* pointerNode = BitwiseLeft(indexExpr);
+
+ Type pointerType;
+ if (!CheckExpr(f, pointerNode, &pointerType)) {
+ return false;
+ }
+
+ if (!pointerType.isIntish()) {
+ return f.failf(pointerNode, "%s is not a subtype of int",
+ pointerType.toChars());
+ }
+ } else {
+ // For legacy scalar access compatibility, accept Int8/Uint8 accesses
+ // with no shift.
+ if (TypedArrayShift(*viewType) != 0) {
+ return f.fail(
+ indexExpr,
+ "index expression isn't shifted; must be an Int8/Uint8 access");
+ }
+
+ MOZ_ASSERT(mask == NoMask);
+
+ ParseNode* pointerNode = indexExpr;
+
+ Type pointerType;
+ if (!CheckExpr(f, pointerNode, &pointerType)) {
+ return false;
+ }
+ if (!pointerType.isInt()) {
+ return f.failf(pointerNode, "%s is not a subtype of int",
+ pointerType.toChars());
+ }
+ }
+
+ // Don't generate the mask op if there is no need for it which could happen
+ // for a shift of zero.
+ if (mask != NoMask) {
+ return f.writeInt32Lit(mask) && f.encoder().writeOp(Op::I32And);
+ }
+
+ return true;
+}
+
+static bool WriteArrayAccessFlags(FunctionValidatorShared& f,
+ Scalar::Type viewType) {
+ // asm.js only has naturally-aligned accesses.
+ size_t align = TypedArrayElemSize(viewType);
+ MOZ_ASSERT(IsPowerOfTwo(align));
+ if (!f.encoder().writeFixedU8(CeilingLog2(align))) {
+ return false;
+ }
+
+ // asm.js doesn't have constant offsets, so just encode a 0.
+ if (!f.encoder().writeVarU32(0)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckLoadArray(FunctionValidator<Unit>& f, ParseNode* elem,
+ Type* type) {
+ Scalar::Type viewType;
+
+ if (!CheckArrayAccess(f, ElemBase(elem), ElemIndex(elem), &viewType)) {
+ return false;
+ }
+
+ switch (viewType) {
+ case Scalar::Int8:
+ if (!f.encoder().writeOp(Op::I32Load8S)) return false;
+ break;
+ case Scalar::Uint8:
+ if (!f.encoder().writeOp(Op::I32Load8U)) return false;
+ break;
+ case Scalar::Int16:
+ if (!f.encoder().writeOp(Op::I32Load16S)) return false;
+ break;
+ case Scalar::Uint16:
+ if (!f.encoder().writeOp(Op::I32Load16U)) return false;
+ break;
+ case Scalar::Uint32:
+ case Scalar::Int32:
+ if (!f.encoder().writeOp(Op::I32Load)) return false;
+ break;
+ case Scalar::Float32:
+ if (!f.encoder().writeOp(Op::F32Load)) return false;
+ break;
+ case Scalar::Float64:
+ if (!f.encoder().writeOp(Op::F64Load)) return false;
+ break;
+ default:
+ MOZ_CRASH("unexpected scalar type");
+ }
+
+ switch (viewType) {
+ case Scalar::Int8:
+ case Scalar::Int16:
+ case Scalar::Int32:
+ case Scalar::Uint8:
+ case Scalar::Uint16:
+ case Scalar::Uint32:
+ *type = Type::Intish;
+ break;
+ case Scalar::Float32:
+ *type = Type::MaybeFloat;
+ break;
+ case Scalar::Float64:
+ *type = Type::MaybeDouble;
+ break;
+ default:
+ MOZ_CRASH("Unexpected array type");
+ }
+
+ return WriteArrayAccessFlags(f, viewType);
+}
+
+template <typename Unit>
+static bool CheckStoreArray(FunctionValidator<Unit>& f, ParseNode* lhs,
+ ParseNode* rhs, Type* type) {
+ Scalar::Type viewType;
+ if (!CheckArrayAccess(f, ElemBase(lhs), ElemIndex(lhs), &viewType)) {
+ return false;
+ }
+
+ Type rhsType;
+ if (!CheckExpr(f, rhs, &rhsType)) {
+ return false;
+ }
+
+ switch (viewType) {
+ case Scalar::Int8:
+ case Scalar::Int16:
+ case Scalar::Int32:
+ case Scalar::Uint8:
+ case Scalar::Uint16:
+ case Scalar::Uint32:
+ if (!rhsType.isIntish()) {
+ return f.failf(lhs, "%s is not a subtype of intish", rhsType.toChars());
+ }
+ break;
+ case Scalar::Float32:
+ if (!rhsType.isMaybeDouble() && !rhsType.isFloatish()) {
+ return f.failf(lhs, "%s is not a subtype of double? or floatish",
+ rhsType.toChars());
+ }
+ break;
+ case Scalar::Float64:
+ if (!rhsType.isMaybeFloat() && !rhsType.isMaybeDouble()) {
+ return f.failf(lhs, "%s is not a subtype of float? or double?",
+ rhsType.toChars());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected view type");
+ }
+
+ switch (viewType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ if (!f.encoder().writeOp(MozOp::I32TeeStore8)) {
+ return false;
+ }
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ if (!f.encoder().writeOp(MozOp::I32TeeStore16)) {
+ return false;
+ }
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ if (!f.encoder().writeOp(MozOp::I32TeeStore)) {
+ return false;
+ }
+ break;
+ case Scalar::Float32:
+ if (rhsType.isFloatish()) {
+ if (!f.encoder().writeOp(MozOp::F32TeeStore)) {
+ return false;
+ }
+ } else {
+ if (!f.encoder().writeOp(MozOp::F64TeeStoreF32)) {
+ return false;
+ }
+ }
+ break;
+ case Scalar::Float64:
+ if (rhsType.isFloatish()) {
+ if (!f.encoder().writeOp(MozOp::F32TeeStoreF64)) {
+ return false;
+ }
+ } else {
+ if (!f.encoder().writeOp(MozOp::F64TeeStore)) {
+ return false;
+ }
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected scalar type");
+ }
+
+ if (!WriteArrayAccessFlags(f, viewType)) {
+ return false;
+ }
+
+ *type = rhsType;
+ return true;
+}
+
+template <typename Unit>
+static bool CheckAssignName(FunctionValidator<Unit>& f, ParseNode* lhs,
+ ParseNode* rhs, Type* type) {
+ TaggedParserAtomIndex name = lhs->as<NameNode>().name();
+
+ if (const FunctionValidatorShared::Local* lhsVar = f.lookupLocal(name)) {
+ Type rhsType;
+ if (!CheckExpr(f, rhs, &rhsType)) {
+ return false;
+ }
+
+ if (!f.encoder().writeOp(Op::LocalTee)) {
+ return false;
+ }
+ if (!f.encoder().writeVarU32(lhsVar->slot)) {
+ return false;
+ }
+
+ if (!(rhsType <= lhsVar->type)) {
+ return f.failf(lhs, "%s is not a subtype of %s", rhsType.toChars(),
+ lhsVar->type.toChars());
+ }
+ *type = rhsType;
+ return true;
+ }
+
+ if (const ModuleValidatorShared::Global* global = f.lookupGlobal(name)) {
+ if (global->which() != ModuleValidatorShared::Global::Variable) {
+ return f.failName(lhs, "'%s' is not a mutable variable", name);
+ }
+
+ Type rhsType;
+ if (!CheckExpr(f, rhs, &rhsType)) {
+ return false;
+ }
+
+ Type globType = global->varOrConstType();
+ if (!(rhsType <= globType)) {
+ return f.failf(lhs, "%s is not a subtype of %s", rhsType.toChars(),
+ globType.toChars());
+ }
+ if (!f.encoder().writeOp(MozOp::TeeGlobal)) {
+ return false;
+ }
+ if (!f.encoder().writeVarU32(global->varOrConstIndex())) {
+ return false;
+ }
+
+ *type = rhsType;
+ return true;
+ }
+
+ return f.failName(lhs, "'%s' not found in local or asm.js module scope",
+ name);
+}
+
+template <typename Unit>
+static bool CheckAssign(FunctionValidator<Unit>& f, ParseNode* assign,
+ Type* type) {
+ MOZ_ASSERT(assign->isKind(ParseNodeKind::AssignExpr));
+
+ ParseNode* lhs = BinaryLeft(assign);
+ ParseNode* rhs = BinaryRight(assign);
+
+ if (lhs->getKind() == ParseNodeKind::ElemExpr) {
+ return CheckStoreArray(f, lhs, rhs, type);
+ }
+
+ if (lhs->getKind() == ParseNodeKind::Name) {
+ return CheckAssignName(f, lhs, rhs, type);
+ }
+
+ return f.fail(
+ assign,
+ "left-hand side of assignment must be a variable or array access");
+}
+
+template <typename Unit>
+static bool CheckMathIMul(FunctionValidator<Unit>& f, ParseNode* call,
+ Type* type) {
+ if (CallArgListLength(call) != 2) {
+ return f.fail(call, "Math.imul must be passed 2 arguments");
+ }
+
+ ParseNode* lhs = CallArgList(call);
+ ParseNode* rhs = NextNode(lhs);
+
+ Type lhsType;
+ if (!CheckExpr(f, lhs, &lhsType)) {
+ return false;
+ }
+
+ Type rhsType;
+ if (!CheckExpr(f, rhs, &rhsType)) {
+ return false;
+ }
+
+ if (!lhsType.isIntish()) {
+ return f.failf(lhs, "%s is not a subtype of intish", lhsType.toChars());
+ }
+ if (!rhsType.isIntish()) {
+ return f.failf(rhs, "%s is not a subtype of intish", rhsType.toChars());
+ }
+
+ *type = Type::Signed;
+ return f.encoder().writeOp(Op::I32Mul);
+}
+
+template <typename Unit>
+static bool CheckMathClz32(FunctionValidator<Unit>& f, ParseNode* call,
+ Type* type) {
+ if (CallArgListLength(call) != 1) {
+ return f.fail(call, "Math.clz32 must be passed 1 argument");
+ }
+
+ ParseNode* arg = CallArgList(call);
+
+ Type argType;
+ if (!CheckExpr(f, arg, &argType)) {
+ return false;
+ }
+
+ if (!argType.isIntish()) {
+ return f.failf(arg, "%s is not a subtype of intish", argType.toChars());
+ }
+
+ *type = Type::Fixnum;
+ return f.encoder().writeOp(Op::I32Clz);
+}
+
+template <typename Unit>
+static bool CheckMathAbs(FunctionValidator<Unit>& f, ParseNode* call,
+ Type* type) {
+ if (CallArgListLength(call) != 1) {
+ return f.fail(call, "Math.abs must be passed 1 argument");
+ }
+
+ ParseNode* arg = CallArgList(call);
+
+ Type argType;
+ if (!CheckExpr(f, arg, &argType)) {
+ return false;
+ }
+
+ if (argType.isSigned()) {
+ *type = Type::Unsigned;
+ return f.encoder().writeOp(MozOp::I32Abs);
+ }
+
+ if (argType.isMaybeDouble()) {
+ *type = Type::Double;
+ return f.encoder().writeOp(Op::F64Abs);
+ }
+
+ if (argType.isMaybeFloat()) {
+ *type = Type::Floatish;
+ return f.encoder().writeOp(Op::F32Abs);
+ }
+
+ return f.failf(call, "%s is not a subtype of signed, float? or double?",
+ argType.toChars());
+}
+
+template <typename Unit>
+static bool CheckMathSqrt(FunctionValidator<Unit>& f, ParseNode* call,
+ Type* type) {
+ if (CallArgListLength(call) != 1) {
+ return f.fail(call, "Math.sqrt must be passed 1 argument");
+ }
+
+ ParseNode* arg = CallArgList(call);
+
+ Type argType;
+ if (!CheckExpr(f, arg, &argType)) {
+ return false;
+ }
+
+ if (argType.isMaybeDouble()) {
+ *type = Type::Double;
+ return f.encoder().writeOp(Op::F64Sqrt);
+ }
+
+ if (argType.isMaybeFloat()) {
+ *type = Type::Floatish;
+ return f.encoder().writeOp(Op::F32Sqrt);
+ }
+
+ return f.failf(call, "%s is neither a subtype of double? nor float?",
+ argType.toChars());
+}
+
+template <typename Unit>
+static bool CheckMathMinMax(FunctionValidator<Unit>& f, ParseNode* callNode,
+ bool isMax, Type* type) {
+ if (CallArgListLength(callNode) < 2) {
+ return f.fail(callNode, "Math.min/max must be passed at least 2 arguments");
+ }
+
+ ParseNode* firstArg = CallArgList(callNode);
+ Type firstType;
+ if (!CheckExpr(f, firstArg, &firstType)) {
+ return false;
+ }
+
+ Op op = Op::Limit;
+ MozOp mozOp = MozOp::Limit;
+ if (firstType.isMaybeDouble()) {
+ *type = Type::Double;
+ firstType = Type::MaybeDouble;
+ op = isMax ? Op::F64Max : Op::F64Min;
+ } else if (firstType.isMaybeFloat()) {
+ *type = Type::Float;
+ firstType = Type::MaybeFloat;
+ op = isMax ? Op::F32Max : Op::F32Min;
+ } else if (firstType.isSigned()) {
+ *type = Type::Signed;
+ firstType = Type::Signed;
+ mozOp = isMax ? MozOp::I32Max : MozOp::I32Min;
+ } else {
+ return f.failf(firstArg, "%s is not a subtype of double?, float? or signed",
+ firstType.toChars());
+ }
+
+ unsigned numArgs = CallArgListLength(callNode);
+ ParseNode* nextArg = NextNode(firstArg);
+ for (unsigned i = 1; i < numArgs; i++, nextArg = NextNode(nextArg)) {
+ Type nextType;
+ if (!CheckExpr(f, nextArg, &nextType)) {
+ return false;
+ }
+ if (!(nextType <= firstType)) {
+ return f.failf(nextArg, "%s is not a subtype of %s", nextType.toChars(),
+ firstType.toChars());
+ }
+
+ if (op != Op::Limit) {
+ if (!f.encoder().writeOp(op)) {
+ return false;
+ }
+ } else {
+ if (!f.encoder().writeOp(mozOp)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+using CheckArgType = bool (*)(FunctionValidatorShared& f, ParseNode* argNode,
+ Type type);
+
+template <CheckArgType checkArg, typename Unit>
+static bool CheckCallArgs(FunctionValidator<Unit>& f, ParseNode* callNode,
+ ValTypeVector* args) {
+ ParseNode* argNode = CallArgList(callNode);
+ for (unsigned i = 0; i < CallArgListLength(callNode);
+ i++, argNode = NextNode(argNode)) {
+ Type type;
+ if (!CheckExpr(f, argNode, &type)) {
+ return false;
+ }
+
+ if (!checkArg(f, argNode, type)) {
+ return false;
+ }
+
+ if (!args->append(Type::canonicalize(type).canonicalToValType())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool CheckSignatureAgainstExisting(ModuleValidatorShared& m,
+ ParseNode* usepn, const FuncType& sig,
+ const FuncType& existing) {
+ if (!FuncType::strictlyEquals(sig, existing)) {
+ return m.failf(usepn, "incompatible argument types to function");
+ }
+ return true;
+}
+
+template <typename Unit>
+static bool CheckFunctionSignature(ModuleValidator<Unit>& m, ParseNode* usepn,
+ FuncType&& sig, TaggedParserAtomIndex name,
+ ModuleValidatorShared::Func** func) {
+ if (sig.args().length() > MaxParams) {
+ return m.failf(usepn, "too many parameters");
+ }
+
+ ModuleValidatorShared::Func* existing = m.lookupFuncDef(name);
+ if (!existing) {
+ if (!CheckModuleLevelName(m, usepn, name)) {
+ return false;
+ }
+ return m.addFuncDef(name, usepn->pn_pos.begin, std::move(sig), func);
+ }
+
+ const FuncType& existingSig =
+ m.env().types->type(existing->sigIndex()).funcType();
+
+ if (!CheckSignatureAgainstExisting(m, usepn, sig, existingSig)) {
+ return false;
+ }
+
+ *func = existing;
+ return true;
+}
+
+static bool CheckIsArgType(FunctionValidatorShared& f, ParseNode* argNode,
+ Type type) {
+ if (!type.isArgType()) {
+ return f.failf(argNode, "%s is not a subtype of int, float, or double",
+ type.toChars());
+ }
+ return true;
+}
+
+template <typename Unit>
+static bool CheckInternalCall(FunctionValidator<Unit>& f, ParseNode* callNode,
+ TaggedParserAtomIndex calleeName, Type ret,
+ Type* type) {
+ MOZ_ASSERT(ret.isCanonical());
+
+ ValTypeVector args;
+ if (!CheckCallArgs<CheckIsArgType>(f, callNode, &args)) {
+ return false;
+ }
+
+ ValTypeVector results;
+ Maybe<ValType> retType = ret.canonicalToReturnType();
+ if (retType && !results.append(retType.ref())) {
+ return false;
+ }
+
+ FuncType sig(std::move(args), std::move(results));
+
+ ModuleValidatorShared::Func* callee;
+ if (!CheckFunctionSignature(f.m(), callNode, std::move(sig), calleeName,
+ &callee)) {
+ return false;
+ }
+
+ if (!f.writeCall(callNode, MozOp::OldCallDirect)) {
+ return false;
+ }
+
+ if (!f.encoder().writeVarU32(callee->funcDefIndex())) {
+ return false;
+ }
+
+ *type = Type::ret(ret);
+ return true;
+}
+
+template <typename Unit>
+static bool CheckFuncPtrTableAgainstExisting(ModuleValidator<Unit>& m,
+ ParseNode* usepn,
+ TaggedParserAtomIndex name,
+ FuncType&& sig, unsigned mask,
+ uint32_t* tableIndex) {
+ if (const ModuleValidatorShared::Global* existing = m.lookupGlobal(name)) {
+ if (existing->which() != ModuleValidatorShared::Global::Table) {
+ return m.failName(usepn, "'%s' is not a function-pointer table", name);
+ }
+
+ ModuleValidatorShared::Table& table = m.table(existing->tableIndex());
+ if (mask != table.mask()) {
+ return m.failf(usepn, "mask does not match previous value (%u)",
+ table.mask());
+ }
+
+ if (!CheckSignatureAgainstExisting(
+ m, usepn, sig, m.env().types->type(table.sigIndex()).funcType())) {
+ return false;
+ }
+
+ *tableIndex = existing->tableIndex();
+ return true;
+ }
+
+ if (!CheckModuleLevelName(m, usepn, name)) {
+ return false;
+ }
+
+ if (!m.declareFuncPtrTable(std::move(sig), name, usepn->pn_pos.begin, mask,
+ tableIndex)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckFuncPtrCall(FunctionValidator<Unit>& f, ParseNode* callNode,
+ Type ret, Type* type) {
+ MOZ_ASSERT(ret.isCanonical());
+
+ ParseNode* callee = CallCallee(callNode);
+ ParseNode* tableNode = ElemBase(callee);
+ ParseNode* indexExpr = ElemIndex(callee);
+
+ if (!tableNode->isKind(ParseNodeKind::Name)) {
+ return f.fail(tableNode, "expecting name of function-pointer array");
+ }
+
+ TaggedParserAtomIndex name = tableNode->as<NameNode>().name();
+ if (const ModuleValidatorShared::Global* existing = f.lookupGlobal(name)) {
+ if (existing->which() != ModuleValidatorShared::Global::Table) {
+ return f.failName(
+ tableNode, "'%s' is not the name of a function-pointer array", name);
+ }
+ }
+
+ if (!indexExpr->isKind(ParseNodeKind::BitAndExpr)) {
+ return f.fail(indexExpr,
+ "function-pointer table index expression needs & mask");
+ }
+
+ ParseNode* indexNode = BitwiseLeft(indexExpr);
+ ParseNode* maskNode = BitwiseRight(indexExpr);
+
+ uint32_t mask;
+ if (!IsLiteralInt(f.m(), maskNode, &mask) || mask == UINT32_MAX ||
+ !IsPowerOfTwo(mask + 1)) {
+ return f.fail(maskNode,
+ "function-pointer table index mask value must be a power of "
+ "two minus 1");
+ }
+
+ Type indexType;
+ if (!CheckExpr(f, indexNode, &indexType)) {
+ return false;
+ }
+
+ if (!indexType.isIntish()) {
+ return f.failf(indexNode, "%s is not a subtype of intish",
+ indexType.toChars());
+ }
+
+ ValTypeVector args;
+ if (!CheckCallArgs<CheckIsArgType>(f, callNode, &args)) {
+ return false;
+ }
+
+ ValTypeVector results;
+ Maybe<ValType> retType = ret.canonicalToReturnType();
+ if (retType && !results.append(retType.ref())) {
+ return false;
+ }
+
+ FuncType sig(std::move(args), std::move(results));
+
+ uint32_t tableIndex;
+ if (!CheckFuncPtrTableAgainstExisting(f.m(), tableNode, name, std::move(sig),
+ mask, &tableIndex)) {
+ return false;
+ }
+
+ if (!f.writeCall(callNode, MozOp::OldCallIndirect)) {
+ return false;
+ }
+
+ // Call signature
+ if (!f.encoder().writeVarU32(f.m().table(tableIndex).sigIndex())) {
+ return false;
+ }
+
+ *type = Type::ret(ret);
+ return true;
+}
+
+static bool CheckIsExternType(FunctionValidatorShared& f, ParseNode* argNode,
+ Type type) {
+ if (!type.isExtern()) {
+ return f.failf(argNode, "%s is not a subtype of extern", type.toChars());
+ }
+ return true;
+}
+
+template <typename Unit>
+static bool CheckFFICall(FunctionValidator<Unit>& f, ParseNode* callNode,
+ unsigned ffiIndex, Type ret, Type* type) {
+ MOZ_ASSERT(ret.isCanonical());
+
+ TaggedParserAtomIndex calleeName =
+ CallCallee(callNode)->as<NameNode>().name();
+
+ if (ret.isFloat()) {
+ return f.fail(callNode, "FFI calls can't return float");
+ }
+
+ ValTypeVector args;
+ if (!CheckCallArgs<CheckIsExternType>(f, callNode, &args)) {
+ return false;
+ }
+
+ ValTypeVector results;
+ Maybe<ValType> retType = ret.canonicalToReturnType();
+ if (retType && !results.append(retType.ref())) {
+ return false;
+ }
+
+ FuncType sig(std::move(args), std::move(results));
+
+ uint32_t importIndex;
+ if (!f.m().declareImport(calleeName, std::move(sig), ffiIndex,
+ &importIndex)) {
+ return false;
+ }
+
+ if (!f.writeCall(callNode, Op::Call)) {
+ return false;
+ }
+
+ if (!f.encoder().writeVarU32(importIndex)) {
+ return false;
+ }
+
+ *type = Type::ret(ret);
+ return true;
+}
+
+static bool CheckFloatCoercionArg(FunctionValidatorShared& f,
+ ParseNode* inputNode, Type inputType) {
+ if (inputType.isMaybeDouble()) {
+ return f.encoder().writeOp(Op::F32DemoteF64);
+ }
+ if (inputType.isSigned()) {
+ return f.encoder().writeOp(Op::F32ConvertI32S);
+ }
+ if (inputType.isUnsigned()) {
+ return f.encoder().writeOp(Op::F32ConvertI32U);
+ }
+ if (inputType.isFloatish()) {
+ return true;
+ }
+
+ return f.failf(inputNode,
+ "%s is not a subtype of signed, unsigned, double? or floatish",
+ inputType.toChars());
+}
+
+template <typename Unit>
+static bool CheckCoercedCall(FunctionValidator<Unit>& f, ParseNode* call,
+ Type ret, Type* type);
+
+template <typename Unit>
+static bool CheckCoercionArg(FunctionValidator<Unit>& f, ParseNode* arg,
+ Type expected, Type* type) {
+ MOZ_ASSERT(expected.isCanonicalValType());
+
+ if (arg->isKind(ParseNodeKind::CallExpr)) {
+ return CheckCoercedCall(f, arg, expected, type);
+ }
+
+ Type argType;
+ if (!CheckExpr(f, arg, &argType)) {
+ return false;
+ }
+
+ if (expected.isFloat()) {
+ if (!CheckFloatCoercionArg(f, arg, argType)) {
+ return false;
+ }
+ } else {
+ MOZ_CRASH("not call coercions");
+ }
+
+ *type = Type::ret(expected);
+ return true;
+}
+
+template <typename Unit>
+static bool CheckMathFRound(FunctionValidator<Unit>& f, ParseNode* callNode,
+ Type* type) {
+ if (CallArgListLength(callNode) != 1) {
+ return f.fail(callNode, "Math.fround must be passed 1 argument");
+ }
+
+ ParseNode* argNode = CallArgList(callNode);
+ Type argType;
+ if (!CheckCoercionArg(f, argNode, Type::Float, &argType)) {
+ return false;
+ }
+
+ MOZ_ASSERT(argType == Type::Float);
+ *type = Type::Float;
+ return true;
+}
+
+template <typename Unit>
+static bool CheckMathBuiltinCall(FunctionValidator<Unit>& f,
+ ParseNode* callNode,
+ AsmJSMathBuiltinFunction func, Type* type) {
+ unsigned arity = 0;
+ Op f32 = Op::Limit;
+ Op f64 = Op::Limit;
+ MozOp mozf64 = MozOp::Limit;
+ switch (func) {
+ case AsmJSMathBuiltin_imul:
+ return CheckMathIMul(f, callNode, type);
+ case AsmJSMathBuiltin_clz32:
+ return CheckMathClz32(f, callNode, type);
+ case AsmJSMathBuiltin_abs:
+ return CheckMathAbs(f, callNode, type);
+ case AsmJSMathBuiltin_sqrt:
+ return CheckMathSqrt(f, callNode, type);
+ case AsmJSMathBuiltin_fround:
+ return CheckMathFRound(f, callNode, type);
+ case AsmJSMathBuiltin_min:
+ return CheckMathMinMax(f, callNode, /* isMax = */ false, type);
+ case AsmJSMathBuiltin_max:
+ return CheckMathMinMax(f, callNode, /* isMax = */ true, type);
+ case AsmJSMathBuiltin_ceil:
+ arity = 1;
+ f64 = Op::F64Ceil;
+ f32 = Op::F32Ceil;
+ break;
+ case AsmJSMathBuiltin_floor:
+ arity = 1;
+ f64 = Op::F64Floor;
+ f32 = Op::F32Floor;
+ break;
+ case AsmJSMathBuiltin_sin:
+ arity = 1;
+ if (!f.m().shouldResistFingerprinting()) {
+ mozf64 = MozOp::F64SinNative;
+ } else {
+ mozf64 = MozOp::F64SinFdlibm;
+ }
+ f32 = Op::Unreachable;
+ break;
+ case AsmJSMathBuiltin_cos:
+ arity = 1;
+ if (!f.m().shouldResistFingerprinting()) {
+ mozf64 = MozOp::F64CosNative;
+ } else {
+ mozf64 = MozOp::F64CosFdlibm;
+ }
+ f32 = Op::Unreachable;
+ break;
+ case AsmJSMathBuiltin_tan:
+ arity = 1;
+ if (!f.m().shouldResistFingerprinting()) {
+ mozf64 = MozOp::F64TanNative;
+ } else {
+ mozf64 = MozOp::F64TanFdlibm;
+ }
+ f32 = Op::Unreachable;
+ break;
+ case AsmJSMathBuiltin_asin:
+ arity = 1;
+ mozf64 = MozOp::F64Asin;
+ f32 = Op::Unreachable;
+ break;
+ case AsmJSMathBuiltin_acos:
+ arity = 1;
+ mozf64 = MozOp::F64Acos;
+ f32 = Op::Unreachable;
+ break;
+ case AsmJSMathBuiltin_atan:
+ arity = 1;
+ mozf64 = MozOp::F64Atan;
+ f32 = Op::Unreachable;
+ break;
+ case AsmJSMathBuiltin_exp:
+ arity = 1;
+ mozf64 = MozOp::F64Exp;
+ f32 = Op::Unreachable;
+ break;
+ case AsmJSMathBuiltin_log:
+ arity = 1;
+ mozf64 = MozOp::F64Log;
+ f32 = Op::Unreachable;
+ break;
+ case AsmJSMathBuiltin_pow:
+ arity = 2;
+ mozf64 = MozOp::F64Pow;
+ f32 = Op::Unreachable;
+ break;
+ case AsmJSMathBuiltin_atan2:
+ arity = 2;
+ mozf64 = MozOp::F64Atan2;
+ f32 = Op::Unreachable;
+ break;
+ default:
+ MOZ_CRASH("unexpected mathBuiltin function");
+ }
+
+ unsigned actualArity = CallArgListLength(callNode);
+ if (actualArity != arity) {
+ return f.failf(callNode, "call passed %u arguments, expected %u",
+ actualArity, arity);
+ }
+
+ if (!f.prepareCall(callNode)) {
+ return false;
+ }
+
+ Type firstType;
+ ParseNode* argNode = CallArgList(callNode);
+ if (!CheckExpr(f, argNode, &firstType)) {
+ return false;
+ }
+
+ if (!firstType.isMaybeFloat() && !firstType.isMaybeDouble()) {
+ return f.fail(
+ argNode,
+ "arguments to math call should be a subtype of double? or float?");
+ }
+
+ bool opIsDouble = firstType.isMaybeDouble();
+ if (!opIsDouble && f32 == Op::Unreachable) {
+ return f.fail(callNode, "math builtin cannot be used as float");
+ }
+
+ if (arity == 2) {
+ Type secondType;
+ argNode = NextNode(argNode);
+ if (!CheckExpr(f, argNode, &secondType)) {
+ return false;
+ }
+
+ if (firstType.isMaybeDouble() && !secondType.isMaybeDouble()) {
+ return f.fail(
+ argNode,
+ "both arguments to math builtin call should be the same type");
+ }
+ if (firstType.isMaybeFloat() && !secondType.isMaybeFloat()) {
+ return f.fail(
+ argNode,
+ "both arguments to math builtin call should be the same type");
+ }
+ }
+
+ if (opIsDouble) {
+ if (f64 != Op::Limit) {
+ if (!f.encoder().writeOp(f64)) {
+ return false;
+ }
+ } else {
+ if (!f.encoder().writeOp(mozf64)) {
+ return false;
+ }
+ }
+ } else {
+ if (!f.encoder().writeOp(f32)) {
+ return false;
+ }
+ }
+
+ *type = opIsDouble ? Type::Double : Type::Floatish;
+ return true;
+}
+
+template <typename Unit>
+static bool CheckUncoercedCall(FunctionValidator<Unit>& f, ParseNode* expr,
+ Type* type) {
+ MOZ_ASSERT(expr->isKind(ParseNodeKind::CallExpr));
+
+ const ModuleValidatorShared::Global* global;
+ if (IsCallToGlobal(f.m(), expr, &global) && global->isMathFunction()) {
+ return CheckMathBuiltinCall(f, expr, global->mathBuiltinFunction(), type);
+ }
+
+ return f.fail(
+ expr,
+ "all function calls must be calls to standard lib math functions,"
+ " ignored (via f(); or comma-expression), coerced to signed (via f()|0),"
+ " coerced to float (via fround(f())), or coerced to double (via +f())");
+}
+
+static bool CoerceResult(FunctionValidatorShared& f, ParseNode* expr,
+ Type expected, Type actual, Type* type) {
+ MOZ_ASSERT(expected.isCanonical());
+
+ // At this point, the bytecode resembles this:
+ // | the thing we wanted to coerce | current position |>
+ switch (expected.which()) {
+ case Type::Void:
+ if (!actual.isVoid()) {
+ if (!f.encoder().writeOp(Op::Drop)) {
+ return false;
+ }
+ }
+ break;
+ case Type::Int:
+ if (!actual.isIntish()) {
+ return f.failf(expr, "%s is not a subtype of intish", actual.toChars());
+ }
+ break;
+ case Type::Float:
+ if (!CheckFloatCoercionArg(f, expr, actual)) {
+ return false;
+ }
+ break;
+ case Type::Double:
+ if (actual.isMaybeDouble()) {
+ // No conversion necessary.
+ } else if (actual.isMaybeFloat()) {
+ if (!f.encoder().writeOp(Op::F64PromoteF32)) {
+ return false;
+ }
+ } else if (actual.isSigned()) {
+ if (!f.encoder().writeOp(Op::F64ConvertI32S)) {
+ return false;
+ }
+ } else if (actual.isUnsigned()) {
+ if (!f.encoder().writeOp(Op::F64ConvertI32U)) {
+ return false;
+ }
+ } else {
+ return f.failf(
+ expr, "%s is not a subtype of double?, float?, signed or unsigned",
+ actual.toChars());
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected uncoerced result type");
+ }
+
+ *type = Type::ret(expected);
+ return true;
+}
+
+template <typename Unit>
+static bool CheckCoercedMathBuiltinCall(FunctionValidator<Unit>& f,
+ ParseNode* callNode,
+ AsmJSMathBuiltinFunction func, Type ret,
+ Type* type) {
+ Type actual;
+ if (!CheckMathBuiltinCall(f, callNode, func, &actual)) {
+ return false;
+ }
+ return CoerceResult(f, callNode, ret, actual, type);
+}
+
+template <typename Unit>
+static bool CheckCoercedCall(FunctionValidator<Unit>& f, ParseNode* call,
+ Type ret, Type* type) {
+ MOZ_ASSERT(ret.isCanonical());
+
+ AutoCheckRecursionLimit recursion(f.fc());
+ if (!recursion.checkDontReport(f.fc())) {
+ return f.m().failOverRecursed();
+ }
+
+ if (IsNumericLiteral(f.m(), call)) {
+ NumLit lit = ExtractNumericLiteral(f.m(), call);
+ if (!f.writeConstExpr(lit)) {
+ return false;
+ }
+ return CoerceResult(f, call, ret, Type::lit(lit), type);
+ }
+
+ ParseNode* callee = CallCallee(call);
+
+ if (callee->isKind(ParseNodeKind::ElemExpr)) {
+ return CheckFuncPtrCall(f, call, ret, type);
+ }
+
+ if (!callee->isKind(ParseNodeKind::Name)) {
+ return f.fail(callee, "unexpected callee expression type");
+ }
+
+ TaggedParserAtomIndex calleeName = callee->as<NameNode>().name();
+
+ if (const ModuleValidatorShared::Global* global =
+ f.lookupGlobal(calleeName)) {
+ switch (global->which()) {
+ case ModuleValidatorShared::Global::FFI:
+ return CheckFFICall(f, call, global->ffiIndex(), ret, type);
+ case ModuleValidatorShared::Global::MathBuiltinFunction:
+ return CheckCoercedMathBuiltinCall(
+ f, call, global->mathBuiltinFunction(), ret, type);
+ case ModuleValidatorShared::Global::ConstantLiteral:
+ case ModuleValidatorShared::Global::ConstantImport:
+ case ModuleValidatorShared::Global::Variable:
+ case ModuleValidatorShared::Global::Table:
+ case ModuleValidatorShared::Global::ArrayView:
+ case ModuleValidatorShared::Global::ArrayViewCtor:
+ return f.failName(callee, "'%s' is not callable function", calleeName);
+ case ModuleValidatorShared::Global::Function:
+ break;
+ }
+ }
+
+ return CheckInternalCall(f, call, calleeName, ret, type);
+}
+
+template <typename Unit>
+static bool CheckPos(FunctionValidator<Unit>& f, ParseNode* pos, Type* type) {
+ MOZ_ASSERT(pos->isKind(ParseNodeKind::PosExpr));
+ ParseNode* operand = UnaryKid(pos);
+
+ if (operand->isKind(ParseNodeKind::CallExpr)) {
+ return CheckCoercedCall(f, operand, Type::Double, type);
+ }
+
+ Type actual;
+ if (!CheckExpr(f, operand, &actual)) {
+ return false;
+ }
+
+ return CoerceResult(f, operand, Type::Double, actual, type);
+}
+
+template <typename Unit>
+static bool CheckNot(FunctionValidator<Unit>& f, ParseNode* expr, Type* type) {
+ MOZ_ASSERT(expr->isKind(ParseNodeKind::NotExpr));
+ ParseNode* operand = UnaryKid(expr);
+
+ Type operandType;
+ if (!CheckExpr(f, operand, &operandType)) {
+ return false;
+ }
+
+ if (!operandType.isInt()) {
+ return f.failf(operand, "%s is not a subtype of int",
+ operandType.toChars());
+ }
+
+ *type = Type::Int;
+ return f.encoder().writeOp(Op::I32Eqz);
+}
+
+template <typename Unit>
+static bool CheckNeg(FunctionValidator<Unit>& f, ParseNode* expr, Type* type) {
+ MOZ_ASSERT(expr->isKind(ParseNodeKind::NegExpr));
+ ParseNode* operand = UnaryKid(expr);
+
+ Type operandType;
+ if (!CheckExpr(f, operand, &operandType)) {
+ return false;
+ }
+
+ if (operandType.isInt()) {
+ *type = Type::Intish;
+ return f.encoder().writeOp(MozOp::I32Neg);
+ }
+
+ if (operandType.isMaybeDouble()) {
+ *type = Type::Double;
+ return f.encoder().writeOp(Op::F64Neg);
+ }
+
+ if (operandType.isMaybeFloat()) {
+ *type = Type::Floatish;
+ return f.encoder().writeOp(Op::F32Neg);
+ }
+
+ return f.failf(operand, "%s is not a subtype of int, float? or double?",
+ operandType.toChars());
+}
+
+template <typename Unit>
+static bool CheckCoerceToInt(FunctionValidator<Unit>& f, ParseNode* expr,
+ Type* type) {
+ MOZ_ASSERT(expr->isKind(ParseNodeKind::BitNotExpr));
+ ParseNode* operand = UnaryKid(expr);
+
+ Type operandType;
+ if (!CheckExpr(f, operand, &operandType)) {
+ return false;
+ }
+
+ if (operandType.isMaybeDouble() || operandType.isMaybeFloat()) {
+ *type = Type::Signed;
+ Op opcode =
+ operandType.isMaybeDouble() ? Op::I32TruncF64S : Op::I32TruncF32S;
+ return f.encoder().writeOp(opcode);
+ }
+
+ if (!operandType.isIntish()) {
+ return f.failf(operand, "%s is not a subtype of double?, float? or intish",
+ operandType.toChars());
+ }
+
+ *type = Type::Signed;
+ return true;
+}
+
+template <typename Unit>
+static bool CheckBitNot(FunctionValidator<Unit>& f, ParseNode* neg,
+ Type* type) {
+ MOZ_ASSERT(neg->isKind(ParseNodeKind::BitNotExpr));
+ ParseNode* operand = UnaryKid(neg);
+
+ if (operand->isKind(ParseNodeKind::BitNotExpr)) {
+ return CheckCoerceToInt(f, operand, type);
+ }
+
+ Type operandType;
+ if (!CheckExpr(f, operand, &operandType)) {
+ return false;
+ }
+
+ if (!operandType.isIntish()) {
+ return f.failf(operand, "%s is not a subtype of intish",
+ operandType.toChars());
+ }
+
+ if (!f.encoder().writeOp(MozOp::I32BitNot)) {
+ return false;
+ }
+
+ *type = Type::Signed;
+ return true;
+}
+
+template <typename Unit>
+static bool CheckAsExprStatement(FunctionValidator<Unit>& f,
+ ParseNode* exprStmt);
+
+template <typename Unit>
+static bool CheckComma(FunctionValidator<Unit>& f, ParseNode* comma,
+ Type* type) {
+ MOZ_ASSERT(comma->isKind(ParseNodeKind::CommaExpr));
+ ParseNode* operands = ListHead(comma);
+
+ // The block depth isn't taken into account here, because a comma list can't
+ // contain breaks and continues and nested control flow structures.
+ if (!f.encoder().writeOp(Op::Block)) {
+ return false;
+ }
+
+ size_t typeAt;
+ if (!f.encoder().writePatchableFixedU7(&typeAt)) {
+ return false;
+ }
+
+ ParseNode* pn = operands;
+ for (; NextNode(pn); pn = NextNode(pn)) {
+ if (!CheckAsExprStatement(f, pn)) {
+ return false;
+ }
+ }
+
+ if (!CheckExpr(f, pn, type)) {
+ return false;
+ }
+
+ f.encoder().patchFixedU7(typeAt, uint8_t(type->toWasmBlockSignatureType()));
+
+ return f.encoder().writeOp(Op::End);
+}
+
+template <typename Unit>
+static bool CheckConditional(FunctionValidator<Unit>& f, ParseNode* ternary,
+ Type* type) {
+ MOZ_ASSERT(ternary->isKind(ParseNodeKind::ConditionalExpr));
+
+ ParseNode* cond = TernaryKid1(ternary);
+ ParseNode* thenExpr = TernaryKid2(ternary);
+ ParseNode* elseExpr = TernaryKid3(ternary);
+
+ Type condType;
+ if (!CheckExpr(f, cond, &condType)) {
+ return false;
+ }
+
+ if (!condType.isInt()) {
+ return f.failf(cond, "%s is not a subtype of int", condType.toChars());
+ }
+
+ size_t typeAt;
+ if (!f.pushIf(&typeAt)) {
+ return false;
+ }
+
+ Type thenType;
+ if (!CheckExpr(f, thenExpr, &thenType)) {
+ return false;
+ }
+
+ if (!f.switchToElse()) {
+ return false;
+ }
+
+ Type elseType;
+ if (!CheckExpr(f, elseExpr, &elseType)) {
+ return false;
+ }
+
+ if (thenType.isInt() && elseType.isInt()) {
+ *type = Type::Int;
+ } else if (thenType.isDouble() && elseType.isDouble()) {
+ *type = Type::Double;
+ } else if (thenType.isFloat() && elseType.isFloat()) {
+ *type = Type::Float;
+ } else {
+ return f.failf(
+ ternary,
+ "then/else branches of conditional must both produce int, float, "
+ "double, current types are %s and %s",
+ thenType.toChars(), elseType.toChars());
+ }
+
+ if (!f.popIf(typeAt, type->toWasmBlockSignatureType())) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool IsValidIntMultiplyConstant(ModuleValidator<Unit>& m,
+ ParseNode* expr) {
+ if (!IsNumericLiteral(m, expr)) {
+ return false;
+ }
+
+ NumLit lit = ExtractNumericLiteral(m, expr);
+ switch (lit.which()) {
+ case NumLit::Fixnum:
+ case NumLit::NegativeInt:
+ if (Abs(lit.toInt32()) < (uint32_t(1) << 20)) {
+ return true;
+ }
+ return false;
+ case NumLit::BigUnsigned:
+ case NumLit::Double:
+ case NumLit::Float:
+ case NumLit::OutOfRangeInt:
+ return false;
+ }
+
+ MOZ_CRASH("Bad literal");
+}
+
+template <typename Unit>
+static bool CheckMultiply(FunctionValidator<Unit>& f, ParseNode* star,
+ Type* type) {
+ MOZ_ASSERT(star->isKind(ParseNodeKind::MulExpr));
+ ParseNode* lhs = MultiplyLeft(star);
+ ParseNode* rhs = MultiplyRight(star);
+
+ Type lhsType;
+ if (!CheckExpr(f, lhs, &lhsType)) {
+ return false;
+ }
+
+ Type rhsType;
+ if (!CheckExpr(f, rhs, &rhsType)) {
+ return false;
+ }
+
+ if (lhsType.isInt() && rhsType.isInt()) {
+ if (!IsValidIntMultiplyConstant(f.m(), lhs) &&
+ !IsValidIntMultiplyConstant(f.m(), rhs)) {
+ return f.fail(
+ star,
+ "one arg to int multiply must be a small (-2^20, 2^20) int literal");
+ }
+ *type = Type::Intish;
+ return f.encoder().writeOp(Op::I32Mul);
+ }
+
+ if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) {
+ *type = Type::Double;
+ return f.encoder().writeOp(Op::F64Mul);
+ }
+
+ if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) {
+ *type = Type::Floatish;
+ return f.encoder().writeOp(Op::F32Mul);
+ }
+
+ return f.fail(
+ star, "multiply operands must be both int, both double? or both float?");
+}
+
+template <typename Unit>
+static bool CheckAddOrSub(FunctionValidator<Unit>& f, ParseNode* expr,
+ Type* type, unsigned* numAddOrSubOut = nullptr) {
+ AutoCheckRecursionLimit recursion(f.fc());
+ if (!recursion.checkDontReport(f.fc())) {
+ return f.m().failOverRecursed();
+ }
+
+ MOZ_ASSERT(expr->isKind(ParseNodeKind::AddExpr) ||
+ expr->isKind(ParseNodeKind::SubExpr));
+ ParseNode* lhs = AddSubLeft(expr);
+ ParseNode* rhs = AddSubRight(expr);
+
+ Type lhsType, rhsType;
+ unsigned lhsNumAddOrSub, rhsNumAddOrSub;
+
+ if (lhs->isKind(ParseNodeKind::AddExpr) ||
+ lhs->isKind(ParseNodeKind::SubExpr)) {
+ if (!CheckAddOrSub(f, lhs, &lhsType, &lhsNumAddOrSub)) {
+ return false;
+ }
+ if (lhsType == Type::Intish) {
+ lhsType = Type::Int;
+ }
+ } else {
+ if (!CheckExpr(f, lhs, &lhsType)) {
+ return false;
+ }
+ lhsNumAddOrSub = 0;
+ }
+
+ if (rhs->isKind(ParseNodeKind::AddExpr) ||
+ rhs->isKind(ParseNodeKind::SubExpr)) {
+ if (!CheckAddOrSub(f, rhs, &rhsType, &rhsNumAddOrSub)) {
+ return false;
+ }
+ if (rhsType == Type::Intish) {
+ rhsType = Type::Int;
+ }
+ } else {
+ if (!CheckExpr(f, rhs, &rhsType)) {
+ return false;
+ }
+ rhsNumAddOrSub = 0;
+ }
+
+ unsigned numAddOrSub = lhsNumAddOrSub + rhsNumAddOrSub + 1;
+ if (numAddOrSub > (1 << 20)) {
+ return f.fail(expr, "too many + or - without intervening coercion");
+ }
+
+ if (lhsType.isInt() && rhsType.isInt()) {
+ if (!f.encoder().writeOp(
+ expr->isKind(ParseNodeKind::AddExpr) ? Op::I32Add : Op::I32Sub)) {
+ return false;
+ }
+ *type = Type::Intish;
+ } else if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) {
+ if (!f.encoder().writeOp(
+ expr->isKind(ParseNodeKind::AddExpr) ? Op::F64Add : Op::F64Sub)) {
+ return false;
+ }
+ *type = Type::Double;
+ } else if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) {
+ if (!f.encoder().writeOp(
+ expr->isKind(ParseNodeKind::AddExpr) ? Op::F32Add : Op::F32Sub)) {
+ return false;
+ }
+ *type = Type::Floatish;
+ } else {
+ return f.failf(
+ expr,
+ "operands to + or - must both be int, float? or double?, got %s and %s",
+ lhsType.toChars(), rhsType.toChars());
+ }
+
+ if (numAddOrSubOut) {
+ *numAddOrSubOut = numAddOrSub;
+ }
+ return true;
+}
+
+template <typename Unit>
+static bool CheckDivOrMod(FunctionValidator<Unit>& f, ParseNode* expr,
+ Type* type) {
+ MOZ_ASSERT(expr->isKind(ParseNodeKind::DivExpr) ||
+ expr->isKind(ParseNodeKind::ModExpr));
+
+ ParseNode* lhs = DivOrModLeft(expr);
+ ParseNode* rhs = DivOrModRight(expr);
+
+ Type lhsType, rhsType;
+ if (!CheckExpr(f, lhs, &lhsType)) {
+ return false;
+ }
+ if (!CheckExpr(f, rhs, &rhsType)) {
+ return false;
+ }
+
+ if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) {
+ *type = Type::Double;
+ if (expr->isKind(ParseNodeKind::DivExpr)) {
+ return f.encoder().writeOp(Op::F64Div);
+ }
+ return f.encoder().writeOp(MozOp::F64Mod);
+ }
+
+ if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) {
+ *type = Type::Floatish;
+ if (expr->isKind(ParseNodeKind::DivExpr)) {
+ return f.encoder().writeOp(Op::F32Div);
+ }
+ return f.fail(expr, "modulo cannot receive float arguments");
+ }
+
+ if (lhsType.isSigned() && rhsType.isSigned()) {
+ *type = Type::Intish;
+ return f.encoder().writeOp(
+ expr->isKind(ParseNodeKind::DivExpr) ? Op::I32DivS : Op::I32RemS);
+ }
+
+ if (lhsType.isUnsigned() && rhsType.isUnsigned()) {
+ *type = Type::Intish;
+ return f.encoder().writeOp(
+ expr->isKind(ParseNodeKind::DivExpr) ? Op::I32DivU : Op::I32RemU);
+ }
+
+ return f.failf(
+ expr,
+ "arguments to / or %% must both be double?, float?, signed, or unsigned; "
+ "%s and %s are given",
+ lhsType.toChars(), rhsType.toChars());
+}
+
+template <typename Unit>
+static bool CheckComparison(FunctionValidator<Unit>& f, ParseNode* comp,
+ Type* type) {
+ MOZ_ASSERT(comp->isKind(ParseNodeKind::LtExpr) ||
+ comp->isKind(ParseNodeKind::LeExpr) ||
+ comp->isKind(ParseNodeKind::GtExpr) ||
+ comp->isKind(ParseNodeKind::GeExpr) ||
+ comp->isKind(ParseNodeKind::EqExpr) ||
+ comp->isKind(ParseNodeKind::NeExpr));
+
+ ParseNode* lhs = ComparisonLeft(comp);
+ ParseNode* rhs = ComparisonRight(comp);
+
+ Type lhsType, rhsType;
+ if (!CheckExpr(f, lhs, &lhsType)) {
+ return false;
+ }
+ if (!CheckExpr(f, rhs, &rhsType)) {
+ return false;
+ }
+
+ if (!(lhsType.isSigned() && rhsType.isSigned()) &&
+ !(lhsType.isUnsigned() && rhsType.isUnsigned()) &&
+ !(lhsType.isDouble() && rhsType.isDouble()) &&
+ !(lhsType.isFloat() && rhsType.isFloat())) {
+ return f.failf(comp,
+ "arguments to a comparison must both be signed, unsigned, "
+ "floats or doubles; "
+ "%s and %s are given",
+ lhsType.toChars(), rhsType.toChars());
+ }
+
+ Op stmt;
+ if (lhsType.isSigned() && rhsType.isSigned()) {
+ switch (comp->getKind()) {
+ case ParseNodeKind::EqExpr:
+ stmt = Op::I32Eq;
+ break;
+ case ParseNodeKind::NeExpr:
+ stmt = Op::I32Ne;
+ break;
+ case ParseNodeKind::LtExpr:
+ stmt = Op::I32LtS;
+ break;
+ case ParseNodeKind::LeExpr:
+ stmt = Op::I32LeS;
+ break;
+ case ParseNodeKind::GtExpr:
+ stmt = Op::I32GtS;
+ break;
+ case ParseNodeKind::GeExpr:
+ stmt = Op::I32GeS;
+ break;
+ default:
+ MOZ_CRASH("unexpected comparison op");
+ }
+ } else if (lhsType.isUnsigned() && rhsType.isUnsigned()) {
+ switch (comp->getKind()) {
+ case ParseNodeKind::EqExpr:
+ stmt = Op::I32Eq;
+ break;
+ case ParseNodeKind::NeExpr:
+ stmt = Op::I32Ne;
+ break;
+ case ParseNodeKind::LtExpr:
+ stmt = Op::I32LtU;
+ break;
+ case ParseNodeKind::LeExpr:
+ stmt = Op::I32LeU;
+ break;
+ case ParseNodeKind::GtExpr:
+ stmt = Op::I32GtU;
+ break;
+ case ParseNodeKind::GeExpr:
+ stmt = Op::I32GeU;
+ break;
+ default:
+ MOZ_CRASH("unexpected comparison op");
+ }
+ } else if (lhsType.isDouble()) {
+ switch (comp->getKind()) {
+ case ParseNodeKind::EqExpr:
+ stmt = Op::F64Eq;
+ break;
+ case ParseNodeKind::NeExpr:
+ stmt = Op::F64Ne;
+ break;
+ case ParseNodeKind::LtExpr:
+ stmt = Op::F64Lt;
+ break;
+ case ParseNodeKind::LeExpr:
+ stmt = Op::F64Le;
+ break;
+ case ParseNodeKind::GtExpr:
+ stmt = Op::F64Gt;
+ break;
+ case ParseNodeKind::GeExpr:
+ stmt = Op::F64Ge;
+ break;
+ default:
+ MOZ_CRASH("unexpected comparison op");
+ }
+ } else if (lhsType.isFloat()) {
+ switch (comp->getKind()) {
+ case ParseNodeKind::EqExpr:
+ stmt = Op::F32Eq;
+ break;
+ case ParseNodeKind::NeExpr:
+ stmt = Op::F32Ne;
+ break;
+ case ParseNodeKind::LtExpr:
+ stmt = Op::F32Lt;
+ break;
+ case ParseNodeKind::LeExpr:
+ stmt = Op::F32Le;
+ break;
+ case ParseNodeKind::GtExpr:
+ stmt = Op::F32Gt;
+ break;
+ case ParseNodeKind::GeExpr:
+ stmt = Op::F32Ge;
+ break;
+ default:
+ MOZ_CRASH("unexpected comparison op");
+ }
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+
+ *type = Type::Int;
+ return f.encoder().writeOp(stmt);
+}
+
+template <typename Unit>
+static bool CheckBitwise(FunctionValidator<Unit>& f, ParseNode* bitwise,
+ Type* type) {
+ ParseNode* lhs = BitwiseLeft(bitwise);
+ ParseNode* rhs = BitwiseRight(bitwise);
+
+ int32_t identityElement;
+ bool onlyOnRight;
+ switch (bitwise->getKind()) {
+ case ParseNodeKind::BitOrExpr:
+ identityElement = 0;
+ onlyOnRight = false;
+ *type = Type::Signed;
+ break;
+ case ParseNodeKind::BitAndExpr:
+ identityElement = -1;
+ onlyOnRight = false;
+ *type = Type::Signed;
+ break;
+ case ParseNodeKind::BitXorExpr:
+ identityElement = 0;
+ onlyOnRight = false;
+ *type = Type::Signed;
+ break;
+ case ParseNodeKind::LshExpr:
+ identityElement = 0;
+ onlyOnRight = true;
+ *type = Type::Signed;
+ break;
+ case ParseNodeKind::RshExpr:
+ identityElement = 0;
+ onlyOnRight = true;
+ *type = Type::Signed;
+ break;
+ case ParseNodeKind::UrshExpr:
+ identityElement = 0;
+ onlyOnRight = true;
+ *type = Type::Unsigned;
+ break;
+ default:
+ MOZ_CRASH("not a bitwise op");
+ }
+
+ uint32_t i;
+ if (!onlyOnRight && IsLiteralInt(f.m(), lhs, &i) &&
+ i == uint32_t(identityElement)) {
+ Type rhsType;
+ if (!CheckExpr(f, rhs, &rhsType)) {
+ return false;
+ }
+ if (!rhsType.isIntish()) {
+ return f.failf(bitwise, "%s is not a subtype of intish",
+ rhsType.toChars());
+ }
+ return true;
+ }
+
+ if (IsLiteralInt(f.m(), rhs, &i) && i == uint32_t(identityElement)) {
+ if (bitwise->isKind(ParseNodeKind::BitOrExpr) &&
+ lhs->isKind(ParseNodeKind::CallExpr)) {
+ return CheckCoercedCall(f, lhs, Type::Int, type);
+ }
+
+ Type lhsType;
+ if (!CheckExpr(f, lhs, &lhsType)) {
+ return false;
+ }
+ if (!lhsType.isIntish()) {
+ return f.failf(bitwise, "%s is not a subtype of intish",
+ lhsType.toChars());
+ }
+ return true;
+ }
+
+ Type lhsType;
+ if (!CheckExpr(f, lhs, &lhsType)) {
+ return false;
+ }
+
+ Type rhsType;
+ if (!CheckExpr(f, rhs, &rhsType)) {
+ return false;
+ }
+
+ if (!lhsType.isIntish()) {
+ return f.failf(lhs, "%s is not a subtype of intish", lhsType.toChars());
+ }
+ if (!rhsType.isIntish()) {
+ return f.failf(rhs, "%s is not a subtype of intish", rhsType.toChars());
+ }
+
+ switch (bitwise->getKind()) {
+ case ParseNodeKind::BitOrExpr:
+ if (!f.encoder().writeOp(Op::I32Or)) return false;
+ break;
+ case ParseNodeKind::BitAndExpr:
+ if (!f.encoder().writeOp(Op::I32And)) return false;
+ break;
+ case ParseNodeKind::BitXorExpr:
+ if (!f.encoder().writeOp(Op::I32Xor)) return false;
+ break;
+ case ParseNodeKind::LshExpr:
+ if (!f.encoder().writeOp(Op::I32Shl)) return false;
+ break;
+ case ParseNodeKind::RshExpr:
+ if (!f.encoder().writeOp(Op::I32ShrS)) return false;
+ break;
+ case ParseNodeKind::UrshExpr:
+ if (!f.encoder().writeOp(Op::I32ShrU)) return false;
+ break;
+ default:
+ MOZ_CRASH("not a bitwise op");
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckExpr(FunctionValidator<Unit>& f, ParseNode* expr, Type* type) {
+ AutoCheckRecursionLimit recursion(f.fc());
+ if (!recursion.checkDontReport(f.fc())) {
+ return f.m().failOverRecursed();
+ }
+
+ if (IsNumericLiteral(f.m(), expr)) {
+ return CheckNumericLiteral(f, expr, type);
+ }
+
+ switch (expr->getKind()) {
+ case ParseNodeKind::Name:
+ return CheckVarRef(f, expr, type);
+ case ParseNodeKind::ElemExpr:
+ return CheckLoadArray(f, expr, type);
+ case ParseNodeKind::AssignExpr:
+ return CheckAssign(f, expr, type);
+ case ParseNodeKind::PosExpr:
+ return CheckPos(f, expr, type);
+ case ParseNodeKind::NotExpr:
+ return CheckNot(f, expr, type);
+ case ParseNodeKind::NegExpr:
+ return CheckNeg(f, expr, type);
+ case ParseNodeKind::BitNotExpr:
+ return CheckBitNot(f, expr, type);
+ case ParseNodeKind::CommaExpr:
+ return CheckComma(f, expr, type);
+ case ParseNodeKind::ConditionalExpr:
+ return CheckConditional(f, expr, type);
+ case ParseNodeKind::MulExpr:
+ return CheckMultiply(f, expr, type);
+ case ParseNodeKind::CallExpr:
+ return CheckUncoercedCall(f, expr, type);
+
+ case ParseNodeKind::AddExpr:
+ case ParseNodeKind::SubExpr:
+ return CheckAddOrSub(f, expr, type);
+
+ case ParseNodeKind::DivExpr:
+ case ParseNodeKind::ModExpr:
+ return CheckDivOrMod(f, expr, type);
+
+ case ParseNodeKind::LtExpr:
+ case ParseNodeKind::LeExpr:
+ case ParseNodeKind::GtExpr:
+ case ParseNodeKind::GeExpr:
+ case ParseNodeKind::EqExpr:
+ case ParseNodeKind::NeExpr:
+ return CheckComparison(f, expr, type);
+
+ case ParseNodeKind::BitOrExpr:
+ case ParseNodeKind::BitAndExpr:
+ case ParseNodeKind::BitXorExpr:
+ case ParseNodeKind::LshExpr:
+ case ParseNodeKind::RshExpr:
+ case ParseNodeKind::UrshExpr:
+ return CheckBitwise(f, expr, type);
+
+ default:;
+ }
+
+ return f.fail(expr, "unsupported expression");
+}
+
+template <typename Unit>
+static bool CheckStatement(FunctionValidator<Unit>& f, ParseNode* stmt);
+
+template <typename Unit>
+static bool CheckAsExprStatement(FunctionValidator<Unit>& f, ParseNode* expr) {
+ if (expr->isKind(ParseNodeKind::CallExpr)) {
+ Type ignored;
+ return CheckCoercedCall(f, expr, Type::Void, &ignored);
+ }
+
+ Type resultType;
+ if (!CheckExpr(f, expr, &resultType)) {
+ return false;
+ }
+
+ if (!resultType.isVoid()) {
+ if (!f.encoder().writeOp(Op::Drop)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckExprStatement(FunctionValidator<Unit>& f,
+ ParseNode* exprStmt) {
+ MOZ_ASSERT(exprStmt->isKind(ParseNodeKind::ExpressionStmt));
+ return CheckAsExprStatement(f, UnaryKid(exprStmt));
+}
+
+template <typename Unit>
+static bool CheckLoopConditionOnEntry(FunctionValidator<Unit>& f,
+ ParseNode* cond) {
+ uint32_t maybeLit;
+ if (IsLiteralInt(f.m(), cond, &maybeLit) && maybeLit) {
+ return true;
+ }
+
+ Type condType;
+ if (!CheckExpr(f, cond, &condType)) {
+ return false;
+ }
+ if (!condType.isInt()) {
+ return f.failf(cond, "%s is not a subtype of int", condType.toChars());
+ }
+
+ if (!f.encoder().writeOp(Op::I32Eqz)) {
+ return false;
+ }
+
+ // brIf (i32.eqz $f) $out
+ if (!f.writeBreakIf()) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckWhile(FunctionValidator<Unit>& f, ParseNode* whileStmt,
+ const LabelVector* labels = nullptr) {
+ MOZ_ASSERT(whileStmt->isKind(ParseNodeKind::WhileStmt));
+ ParseNode* cond = BinaryLeft(whileStmt);
+ ParseNode* body = BinaryRight(whileStmt);
+
+ // A while loop `while(#cond) #body` is equivalent to:
+ // (block $after_loop
+ // (loop $top
+ // (brIf $after_loop (i32.eq 0 #cond))
+ // #body
+ // (br $top)
+ // )
+ // )
+ if (labels && !f.addLabels(*labels, 0, 1)) {
+ return false;
+ }
+
+ if (!f.pushLoop()) {
+ return false;
+ }
+
+ if (!CheckLoopConditionOnEntry(f, cond)) {
+ return false;
+ }
+ if (!CheckStatement(f, body)) {
+ return false;
+ }
+ if (!f.writeContinue()) {
+ return false;
+ }
+
+ if (!f.popLoop()) {
+ return false;
+ }
+ if (labels) {
+ f.removeLabels(*labels);
+ }
+ return true;
+}
+
+template <typename Unit>
+static bool CheckFor(FunctionValidator<Unit>& f, ParseNode* forStmt,
+ const LabelVector* labels = nullptr) {
+ MOZ_ASSERT(forStmt->isKind(ParseNodeKind::ForStmt));
+ ParseNode* forHead = BinaryLeft(forStmt);
+ ParseNode* body = BinaryRight(forStmt);
+
+ if (!forHead->isKind(ParseNodeKind::ForHead)) {
+ return f.fail(forHead, "unsupported for-loop statement");
+ }
+
+ ParseNode* maybeInit = TernaryKid1(forHead);
+ ParseNode* maybeCond = TernaryKid2(forHead);
+ ParseNode* maybeInc = TernaryKid3(forHead);
+
+ // A for-loop `for (#init; #cond; #inc) #body` is equivalent to:
+ // (block // depth X
+ // (#init)
+ // (block $after_loop // depth X+1 (block)
+ // (loop $loop_top // depth X+2 (loop)
+ // (brIf $after (eq 0 #cond))
+ // (block $after_body #body) // depth X+3
+ // #inc
+ // (br $loop_top)
+ // )
+ // )
+ // )
+ // A break in the body should break out to $after_loop, i.e. depth + 1.
+ // A continue in the body should break out to $after_body, i.e. depth + 3.
+ if (labels && !f.addLabels(*labels, 1, 3)) {
+ return false;
+ }
+
+ if (!f.pushUnbreakableBlock()) {
+ return false;
+ }
+
+ if (maybeInit && !CheckAsExprStatement(f, maybeInit)) {
+ return false;
+ }
+
+ {
+ if (!f.pushLoop()) {
+ return false;
+ }
+
+ if (maybeCond && !CheckLoopConditionOnEntry(f, maybeCond)) {
+ return false;
+ }
+
+ {
+ // Continuing in the body should just break out to the increment.
+ if (!f.pushContinuableBlock()) {
+ return false;
+ }
+ if (!CheckStatement(f, body)) {
+ return false;
+ }
+ if (!f.popContinuableBlock()) {
+ return false;
+ }
+ }
+
+ if (maybeInc && !CheckAsExprStatement(f, maybeInc)) {
+ return false;
+ }
+
+ if (!f.writeContinue()) {
+ return false;
+ }
+ if (!f.popLoop()) {
+ return false;
+ }
+ }
+
+ if (!f.popUnbreakableBlock()) {
+ return false;
+ }
+
+ if (labels) {
+ f.removeLabels(*labels);
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckDoWhile(FunctionValidator<Unit>& f, ParseNode* whileStmt,
+ const LabelVector* labels = nullptr) {
+ MOZ_ASSERT(whileStmt->isKind(ParseNodeKind::DoWhileStmt));
+ ParseNode* body = BinaryLeft(whileStmt);
+ ParseNode* cond = BinaryRight(whileStmt);
+
+ // A do-while loop `do { #body } while (#cond)` is equivalent to:
+ // (block $after_loop // depth X
+ // (loop $top // depth X+1
+ // (block #body) // depth X+2
+ // (brIf #cond $top)
+ // )
+ // )
+ // A break should break out of the entire loop, i.e. at depth 0.
+ // A continue should break out to the condition, i.e. at depth 2.
+ if (labels && !f.addLabels(*labels, 0, 2)) {
+ return false;
+ }
+
+ if (!f.pushLoop()) {
+ return false;
+ }
+
+ {
+ // An unlabeled continue in the body should break out to the condition.
+ if (!f.pushContinuableBlock()) {
+ return false;
+ }
+ if (!CheckStatement(f, body)) {
+ return false;
+ }
+ if (!f.popContinuableBlock()) {
+ return false;
+ }
+ }
+
+ Type condType;
+ if (!CheckExpr(f, cond, &condType)) {
+ return false;
+ }
+ if (!condType.isInt()) {
+ return f.failf(cond, "%s is not a subtype of int", condType.toChars());
+ }
+
+ if (!f.writeContinueIf()) {
+ return false;
+ }
+
+ if (!f.popLoop()) {
+ return false;
+ }
+ if (labels) {
+ f.removeLabels(*labels);
+ }
+ return true;
+}
+
+template <typename Unit>
+static bool CheckStatementList(FunctionValidator<Unit>& f, ParseNode*,
+ const LabelVector* = nullptr);
+
+template <typename Unit>
+static bool CheckLabel(FunctionValidator<Unit>& f, ParseNode* labeledStmt) {
+ MOZ_ASSERT(labeledStmt->isKind(ParseNodeKind::LabelStmt));
+
+ LabelVector labels;
+ ParseNode* innermost = labeledStmt;
+ do {
+ if (!labels.append(LabeledStatementLabel(innermost))) {
+ return false;
+ }
+ innermost = LabeledStatementStatement(innermost);
+ } while (innermost->getKind() == ParseNodeKind::LabelStmt);
+
+ switch (innermost->getKind()) {
+ case ParseNodeKind::ForStmt:
+ return CheckFor(f, innermost, &labels);
+ case ParseNodeKind::DoWhileStmt:
+ return CheckDoWhile(f, innermost, &labels);
+ case ParseNodeKind::WhileStmt:
+ return CheckWhile(f, innermost, &labels);
+ case ParseNodeKind::StatementList:
+ return CheckStatementList(f, innermost, &labels);
+ default:
+ break;
+ }
+
+ if (!f.pushUnbreakableBlock(&labels)) {
+ return false;
+ }
+
+ if (!CheckStatement(f, innermost)) {
+ return false;
+ }
+
+ if (!f.popUnbreakableBlock(&labels)) {
+ return false;
+ }
+ return true;
+}
+
+template <typename Unit>
+static bool CheckIf(FunctionValidator<Unit>& f, ParseNode* ifStmt) {
+ uint32_t numIfEnd = 1;
+
+recurse:
+ MOZ_ASSERT(ifStmt->isKind(ParseNodeKind::IfStmt));
+ ParseNode* cond = TernaryKid1(ifStmt);
+ ParseNode* thenStmt = TernaryKid2(ifStmt);
+ ParseNode* elseStmt = TernaryKid3(ifStmt);
+
+ Type condType;
+ if (!CheckExpr(f, cond, &condType)) {
+ return false;
+ }
+ if (!condType.isInt()) {
+ return f.failf(cond, "%s is not a subtype of int", condType.toChars());
+ }
+
+ size_t typeAt;
+ if (!f.pushIf(&typeAt)) {
+ return false;
+ }
+
+ f.setIfType(typeAt, TypeCode::BlockVoid);
+
+ if (!CheckStatement(f, thenStmt)) {
+ return false;
+ }
+
+ if (elseStmt) {
+ if (!f.switchToElse()) {
+ return false;
+ }
+
+ if (elseStmt->isKind(ParseNodeKind::IfStmt)) {
+ ifStmt = elseStmt;
+ if (numIfEnd++ == UINT32_MAX) {
+ return false;
+ }
+ goto recurse;
+ }
+
+ if (!CheckStatement(f, elseStmt)) {
+ return false;
+ }
+ }
+
+ for (uint32_t i = 0; i != numIfEnd; ++i) {
+ if (!f.popIf()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool CheckCaseExpr(FunctionValidatorShared& f, ParseNode* caseExpr,
+ int32_t* value) {
+ if (!IsNumericLiteral(f.m(), caseExpr)) {
+ return f.fail(caseExpr,
+ "switch case expression must be an integer literal");
+ }
+
+ NumLit lit = ExtractNumericLiteral(f.m(), caseExpr);
+ switch (lit.which()) {
+ case NumLit::Fixnum:
+ case NumLit::NegativeInt:
+ *value = lit.toInt32();
+ break;
+ case NumLit::OutOfRangeInt:
+ case NumLit::BigUnsigned:
+ return f.fail(caseExpr, "switch case expression out of integer range");
+ case NumLit::Double:
+ case NumLit::Float:
+ return f.fail(caseExpr,
+ "switch case expression must be an integer literal");
+ }
+
+ return true;
+}
+
+static bool CheckDefaultAtEnd(FunctionValidatorShared& f, ParseNode* stmt) {
+ for (; stmt; stmt = NextNode(stmt)) {
+ if (IsDefaultCase(stmt) && NextNode(stmt) != nullptr) {
+ return f.fail(stmt, "default label must be at the end");
+ }
+ }
+
+ return true;
+}
+
+static bool CheckSwitchRange(FunctionValidatorShared& f, ParseNode* stmt,
+ int32_t* low, int32_t* high,
+ uint32_t* tableLength) {
+ if (IsDefaultCase(stmt)) {
+ *low = 0;
+ *high = -1;
+ *tableLength = 0;
+ return true;
+ }
+
+ int32_t i = 0;
+ if (!CheckCaseExpr(f, CaseExpr(stmt), &i)) {
+ return false;
+ }
+
+ *low = *high = i;
+
+ ParseNode* initialStmt = stmt;
+ for (stmt = NextNode(stmt); stmt && !IsDefaultCase(stmt);
+ stmt = NextNode(stmt)) {
+ int32_t i = 0;
+ if (!CheckCaseExpr(f, CaseExpr(stmt), &i)) {
+ return false;
+ }
+
+ *low = std::min(*low, i);
+ *high = std::max(*high, i);
+ }
+
+ int64_t i64 = (int64_t(*high) - int64_t(*low)) + 1;
+ if (i64 > MaxBrTableElems) {
+ return f.fail(
+ initialStmt,
+ "all switch statements generate tables; this table would be too big");
+ }
+
+ *tableLength = uint32_t(i64);
+ return true;
+}
+
+template <typename Unit>
+static bool CheckSwitchExpr(FunctionValidator<Unit>& f, ParseNode* switchExpr) {
+ Type exprType;
+ if (!CheckExpr(f, switchExpr, &exprType)) {
+ return false;
+ }
+ if (!exprType.isSigned()) {
+ return f.failf(switchExpr, "%s is not a subtype of signed",
+ exprType.toChars());
+ }
+ return true;
+}
+
+// A switch will be constructed as:
+// - the default block wrapping all the other blocks, to be able to break
+// out of the switch with an unlabeled break statement. It has two statements
+// (an inner block and the default expr). asm.js rules require default to be at
+// the end, so the default block always encloses all the cases blocks.
+// - one block per case between low and high; undefined cases just jump to the
+// default case. Each of these blocks contain two statements: the next case's
+// block and the possibly empty statement list comprising the case body. The
+// last block pushed is the first case so the (relative) branch target therefore
+// matches the sequential order of cases.
+// - one block for the br_table, so that the first break goes to the first
+// case's block.
+template <typename Unit>
+static bool CheckSwitch(FunctionValidator<Unit>& f, ParseNode* switchStmt) {
+ MOZ_ASSERT(switchStmt->isKind(ParseNodeKind::SwitchStmt));
+
+ ParseNode* switchExpr = BinaryLeft(switchStmt);
+ ParseNode* switchBody = BinaryRight(switchStmt);
+
+ if (switchBody->is<LexicalScopeNode>()) {
+ LexicalScopeNode* scope = &switchBody->as<LexicalScopeNode>();
+ if (!scope->isEmptyScope()) {
+ return f.fail(scope, "switch body may not contain lexical declarations");
+ }
+ switchBody = scope->scopeBody();
+ }
+
+ ParseNode* stmt = ListHead(switchBody);
+ if (!stmt) {
+ if (!CheckSwitchExpr(f, switchExpr)) {
+ return false;
+ }
+ if (!f.encoder().writeOp(Op::Drop)) {
+ return false;
+ }
+ return true;
+ }
+
+ if (!CheckDefaultAtEnd(f, stmt)) {
+ return false;
+ }
+
+ int32_t low = 0, high = 0;
+ uint32_t tableLength = 0;
+ if (!CheckSwitchRange(f, stmt, &low, &high, &tableLength)) {
+ return false;
+ }
+
+ static const uint32_t CASE_NOT_DEFINED = UINT32_MAX;
+
+ Uint32Vector caseDepths;
+ if (!caseDepths.appendN(CASE_NOT_DEFINED, tableLength)) {
+ return false;
+ }
+
+ uint32_t numCases = 0;
+ for (ParseNode* s = stmt; s && !IsDefaultCase(s); s = NextNode(s)) {
+ int32_t caseValue = ExtractNumericLiteral(f.m(), CaseExpr(s)).toInt32();
+
+ MOZ_ASSERT(caseValue >= low);
+ unsigned i = caseValue - low;
+ if (caseDepths[i] != CASE_NOT_DEFINED) {
+ return f.fail(s, "no duplicate case labels");
+ }
+
+ MOZ_ASSERT(numCases != CASE_NOT_DEFINED);
+ caseDepths[i] = numCases++;
+ }
+
+ // Open the wrapping breakable default block.
+ if (!f.pushBreakableBlock()) {
+ return false;
+ }
+
+ // Open all the case blocks.
+ for (uint32_t i = 0; i < numCases; i++) {
+ if (!f.pushUnbreakableBlock()) {
+ return false;
+ }
+ }
+
+ // Open the br_table block.
+ if (!f.pushUnbreakableBlock()) {
+ return false;
+ }
+
+ // The default block is the last one.
+ uint32_t defaultDepth = numCases;
+
+ // Subtract lowest case value, so that all the cases start from 0.
+ if (low) {
+ if (!CheckSwitchExpr(f, switchExpr)) {
+ return false;
+ }
+ if (!f.writeInt32Lit(low)) {
+ return false;
+ }
+ if (!f.encoder().writeOp(Op::I32Sub)) {
+ return false;
+ }
+ } else {
+ if (!CheckSwitchExpr(f, switchExpr)) {
+ return false;
+ }
+ }
+
+ // Start the br_table block.
+ if (!f.encoder().writeOp(Op::BrTable)) {
+ return false;
+ }
+
+ // Write the number of cases (tableLength - 1 + 1 (default)).
+ // Write the number of cases (tableLength - 1 + 1 (default)).
+ if (!f.encoder().writeVarU32(tableLength)) {
+ return false;
+ }
+
+ // Each case value describes the relative depth to the actual block. When
+ // a case is not explicitly defined, it goes to the default.
+ for (size_t i = 0; i < tableLength; i++) {
+ uint32_t target =
+ caseDepths[i] == CASE_NOT_DEFINED ? defaultDepth : caseDepths[i];
+ if (!f.encoder().writeVarU32(target)) {
+ return false;
+ }
+ }
+
+ // Write the default depth.
+ if (!f.encoder().writeVarU32(defaultDepth)) {
+ return false;
+ }
+
+ // Our br_table is done. Close its block, write the cases down in order.
+ if (!f.popUnbreakableBlock()) {
+ return false;
+ }
+
+ for (; stmt && !IsDefaultCase(stmt); stmt = NextNode(stmt)) {
+ if (!CheckStatement(f, CaseBody(stmt))) {
+ return false;
+ }
+ if (!f.popUnbreakableBlock()) {
+ return false;
+ }
+ }
+
+ // Write the default block.
+ if (stmt && IsDefaultCase(stmt)) {
+ if (!CheckStatement(f, CaseBody(stmt))) {
+ return false;
+ }
+ }
+
+ // Close the wrapping block.
+ return f.popBreakableBlock();
+}
+
+static bool CheckReturnType(FunctionValidatorShared& f, ParseNode* usepn,
+ Type ret) {
+ Maybe<ValType> type = ret.canonicalToReturnType();
+
+ if (!f.hasAlreadyReturned()) {
+ f.setReturnedType(type);
+ return true;
+ }
+
+ if (f.returnedType() != type) {
+ return f.failf(usepn, "%s incompatible with previous return of type %s",
+ ToString(type, nullptr).get(),
+ ToString(f.returnedType(), nullptr).get());
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckReturn(FunctionValidator<Unit>& f, ParseNode* returnStmt) {
+ ParseNode* expr = ReturnExpr(returnStmt);
+
+ if (!expr) {
+ if (!CheckReturnType(f, returnStmt, Type::Void)) {
+ return false;
+ }
+ } else {
+ Type type;
+ if (!CheckExpr(f, expr, &type)) {
+ return false;
+ }
+
+ if (!type.isReturnType()) {
+ return f.failf(expr, "%s is not a valid return type", type.toChars());
+ }
+
+ if (!CheckReturnType(f, expr, Type::canonicalize(type))) {
+ return false;
+ }
+ }
+
+ return f.encoder().writeOp(Op::Return);
+}
+
+template <typename Unit>
+static bool CheckStatementList(FunctionValidator<Unit>& f, ParseNode* stmtList,
+ const LabelVector* labels /*= nullptr */) {
+ MOZ_ASSERT(stmtList->isKind(ParseNodeKind::StatementList));
+
+ if (!f.pushUnbreakableBlock(labels)) {
+ return false;
+ }
+
+ for (ParseNode* stmt = ListHead(stmtList); stmt; stmt = NextNode(stmt)) {
+ if (!CheckStatement(f, stmt)) {
+ return false;
+ }
+ }
+
+ return f.popUnbreakableBlock(labels);
+}
+
+template <typename Unit>
+static bool CheckLexicalScope(FunctionValidator<Unit>& f, ParseNode* node) {
+ LexicalScopeNode* lexicalScope = &node->as<LexicalScopeNode>();
+ if (!lexicalScope->isEmptyScope()) {
+ return f.fail(lexicalScope, "cannot have 'let' or 'const' declarations");
+ }
+
+ return CheckStatement(f, lexicalScope->scopeBody());
+}
+
+static bool CheckBreakOrContinue(FunctionValidatorShared& f, bool isBreak,
+ ParseNode* stmt) {
+ if (TaggedParserAtomIndex maybeLabel = LoopControlMaybeLabel(stmt)) {
+ return f.writeLabeledBreakOrContinue(maybeLabel, isBreak);
+ }
+ return f.writeUnlabeledBreakOrContinue(isBreak);
+}
+
+template <typename Unit>
+static bool CheckStatement(FunctionValidator<Unit>& f, ParseNode* stmt) {
+ AutoCheckRecursionLimit recursion(f.fc());
+ if (!recursion.checkDontReport(f.fc())) {
+ return f.m().failOverRecursed();
+ }
+
+ switch (stmt->getKind()) {
+ case ParseNodeKind::EmptyStmt:
+ return true;
+ case ParseNodeKind::ExpressionStmt:
+ return CheckExprStatement(f, stmt);
+ case ParseNodeKind::WhileStmt:
+ return CheckWhile(f, stmt);
+ case ParseNodeKind::ForStmt:
+ return CheckFor(f, stmt);
+ case ParseNodeKind::DoWhileStmt:
+ return CheckDoWhile(f, stmt);
+ case ParseNodeKind::LabelStmt:
+ return CheckLabel(f, stmt);
+ case ParseNodeKind::IfStmt:
+ return CheckIf(f, stmt);
+ case ParseNodeKind::SwitchStmt:
+ return CheckSwitch(f, stmt);
+ case ParseNodeKind::ReturnStmt:
+ return CheckReturn(f, stmt);
+ case ParseNodeKind::StatementList:
+ return CheckStatementList(f, stmt);
+ case ParseNodeKind::BreakStmt:
+ return CheckBreakOrContinue(f, true, stmt);
+ case ParseNodeKind::ContinueStmt:
+ return CheckBreakOrContinue(f, false, stmt);
+ case ParseNodeKind::LexicalScope:
+ return CheckLexicalScope(f, stmt);
+ default:;
+ }
+
+ return f.fail(stmt, "unexpected statement kind");
+}
+
+template <typename Unit>
+static bool ParseFunction(ModuleValidator<Unit>& m, FunctionNode** funNodeOut,
+ unsigned* line) {
+ auto& tokenStream = m.tokenStream();
+
+ tokenStream.consumeKnownToken(TokenKind::Function,
+ TokenStreamShared::SlashIsRegExp);
+
+ auto& anyChars = tokenStream.anyCharsAccess();
+ uint32_t toStringStart = anyChars.currentToken().pos.begin;
+ *line = anyChars.lineNumber(anyChars.lineToken(toStringStart));
+
+ TokenKind tk;
+ if (!tokenStream.getToken(&tk, TokenStreamShared::SlashIsRegExp)) {
+ return false;
+ }
+ if (tk == TokenKind::Mul) {
+ return m.failCurrentOffset("unexpected generator function");
+ }
+ if (!TokenKindIsPossibleIdentifier(tk)) {
+ return false; // The regular parser will throw a SyntaxError, no need to
+ // m.fail.
+ }
+
+ TaggedParserAtomIndex name = m.parser().bindingIdentifier(YieldIsName);
+ if (!name) {
+ return false;
+ }
+
+ FunctionNode* funNode = m.parser().handler_.newFunction(
+ FunctionSyntaxKind::Statement, m.parser().pos());
+ if (!funNode) {
+ return false;
+ }
+
+ ParseContext* outerpc = m.parser().pc_;
+ Directives directives(outerpc);
+ FunctionFlags flags(FunctionFlags::INTERPRETED_NORMAL);
+ FunctionBox* funbox = m.parser().newFunctionBox(
+ funNode, name, flags, toStringStart, directives,
+ GeneratorKind::NotGenerator, FunctionAsyncKind::SyncFunction);
+ if (!funbox) {
+ return false;
+ }
+ funbox->initWithEnclosingParseContext(outerpc, FunctionSyntaxKind::Statement);
+
+ Directives newDirectives = directives;
+ SourceParseContext funpc(&m.parser(), funbox, &newDirectives);
+ if (!funpc.init()) {
+ return false;
+ }
+
+ if (!m.parser().functionFormalParametersAndBody(
+ InAllowed, YieldIsName, &funNode, FunctionSyntaxKind::Statement)) {
+ if (anyChars.hadError() || directives == newDirectives) {
+ return false;
+ }
+
+ return m.fail(funNode, "encountered new directive in function");
+ }
+
+ MOZ_ASSERT(!anyChars.hadError());
+ MOZ_ASSERT(directives == newDirectives);
+
+ *funNodeOut = funNode;
+ return true;
+}
+
+template <typename Unit>
+static bool CheckFunction(ModuleValidator<Unit>& m) {
+ // asm.js modules can be quite large when represented as parse trees so pop
+ // the backing LifoAlloc after parsing/compiling each function. Release the
+ // parser's lifo memory after the last use of a parse node.
+ frontend::ParserBase::Mark mark = m.parser().mark();
+ auto releaseMark =
+ mozilla::MakeScopeExit([&m, &mark] { m.parser().release(mark); });
+
+ FunctionNode* funNode = nullptr;
+ unsigned line = 0;
+ if (!ParseFunction(m, &funNode, &line)) {
+ return false;
+ }
+
+ if (!CheckFunctionHead(m, funNode)) {
+ return false;
+ }
+
+ FunctionValidator<Unit> f(m, funNode);
+
+ ParseNode* stmtIter = ListHead(FunctionStatementList(funNode));
+
+ if (!CheckProcessingDirectives(m, &stmtIter)) {
+ return false;
+ }
+
+ ValTypeVector args;
+ if (!CheckArguments(f, &stmtIter, &args)) {
+ return false;
+ }
+
+ if (!CheckVariables(f, &stmtIter)) {
+ return false;
+ }
+
+ ParseNode* lastNonEmptyStmt = nullptr;
+ for (; stmtIter; stmtIter = NextNonEmptyStatement(stmtIter)) {
+ lastNonEmptyStmt = stmtIter;
+ if (!CheckStatement(f, stmtIter)) {
+ return false;
+ }
+ }
+
+ if (!CheckFinalReturn(f, lastNonEmptyStmt)) {
+ return false;
+ }
+
+ ValTypeVector results;
+ if (f.returnedType()) {
+ if (!results.append(f.returnedType().ref())) {
+ return false;
+ }
+ }
+
+ FuncType sig(std::move(args), std::move(results));
+
+ ModuleValidatorShared::Func* func = nullptr;
+ if (!CheckFunctionSignature(m, funNode, std::move(sig), FunctionName(funNode),
+ &func)) {
+ return false;
+ }
+
+ if (func->defined()) {
+ return m.failName(funNode, "function '%s' already defined",
+ FunctionName(funNode));
+ }
+
+ f.define(func, line);
+
+ return true;
+}
+
+static bool CheckAllFunctionsDefined(ModuleValidatorShared& m) {
+ for (unsigned i = 0; i < m.numFuncDefs(); i++) {
+ const ModuleValidatorShared::Func& f = m.funcDef(i);
+ if (!f.defined()) {
+ return m.failNameOffset(f.firstUse(), "missing definition of function %s",
+ f.name());
+ }
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckFunctions(ModuleValidator<Unit>& m) {
+ while (true) {
+ TokenKind tk;
+ if (!PeekToken(m.parser(), &tk)) {
+ return false;
+ }
+
+ if (tk != TokenKind::Function) {
+ break;
+ }
+
+ if (!CheckFunction(m)) {
+ return false;
+ }
+ }
+
+ return CheckAllFunctionsDefined(m);
+}
+
+template <typename Unit>
+static bool CheckFuncPtrTable(ModuleValidator<Unit>& m, ParseNode* decl) {
+ if (!decl->isKind(ParseNodeKind::AssignExpr)) {
+ return m.fail(decl, "function-pointer table must have initializer");
+ }
+ AssignmentNode* assignNode = &decl->as<AssignmentNode>();
+
+ ParseNode* var = assignNode->left();
+
+ if (!var->isKind(ParseNodeKind::Name)) {
+ return m.fail(var, "function-pointer table name is not a plain name");
+ }
+
+ ParseNode* arrayLiteral = assignNode->right();
+
+ if (!arrayLiteral->isKind(ParseNodeKind::ArrayExpr)) {
+ return m.fail(
+ var, "function-pointer table's initializer must be an array literal");
+ }
+
+ unsigned length = ListLength(arrayLiteral);
+
+ if (!IsPowerOfTwo(length)) {
+ return m.failf(arrayLiteral,
+ "function-pointer table length must be a power of 2 (is %u)",
+ length);
+ }
+
+ unsigned mask = length - 1;
+
+ Uint32Vector elemFuncDefIndices;
+ const FuncType* sig = nullptr;
+ for (ParseNode* elem = ListHead(arrayLiteral); elem; elem = NextNode(elem)) {
+ if (!elem->isKind(ParseNodeKind::Name)) {
+ return m.fail(
+ elem, "function-pointer table's elements must be names of functions");
+ }
+
+ TaggedParserAtomIndex funcName = elem->as<NameNode>().name();
+ const ModuleValidatorShared::Func* func = m.lookupFuncDef(funcName);
+ if (!func) {
+ return m.fail(
+ elem, "function-pointer table's elements must be names of functions");
+ }
+
+ const FuncType& funcSig = m.env().types->type(func->sigIndex()).funcType();
+ if (sig) {
+ if (!FuncType::strictlyEquals(*sig, funcSig)) {
+ return m.fail(elem, "all functions in table must have same signature");
+ }
+ } else {
+ sig = &funcSig;
+ }
+
+ if (!elemFuncDefIndices.append(func->funcDefIndex())) {
+ return false;
+ }
+ }
+
+ FuncType copy;
+ if (!copy.clone(*sig)) {
+ return false;
+ }
+
+ uint32_t tableIndex;
+ if (!CheckFuncPtrTableAgainstExisting(m, var, var->as<NameNode>().name(),
+ std::move(copy), mask, &tableIndex)) {
+ return false;
+ }
+
+ if (!m.defineFuncPtrTable(tableIndex, std::move(elemFuncDefIndices))) {
+ return m.fail(var, "duplicate function-pointer definition");
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckFuncPtrTables(ModuleValidator<Unit>& m) {
+ while (true) {
+ ParseNode* varStmt;
+ if (!ParseVarOrConstStatement(m.parser(), &varStmt)) {
+ return false;
+ }
+ if (!varStmt) {
+ break;
+ }
+ for (ParseNode* var = VarListHead(varStmt); var; var = NextNode(var)) {
+ if (!CheckFuncPtrTable(m, var)) {
+ return false;
+ }
+ }
+ }
+
+ for (unsigned i = 0; i < m.numFuncPtrTables(); i++) {
+ ModuleValidatorShared::Table& table = m.table(i);
+ if (!table.defined()) {
+ return m.failNameOffset(table.firstUse(),
+ "function-pointer table %s wasn't defined",
+ table.name());
+ }
+ }
+
+ return true;
+}
+
+static bool CheckModuleExportFunction(
+ ModuleValidatorShared& m, ParseNode* pn,
+ TaggedParserAtomIndex maybeFieldName = TaggedParserAtomIndex::null()) {
+ if (!pn->isKind(ParseNodeKind::Name)) {
+ return m.fail(pn, "expected name of exported function");
+ }
+
+ TaggedParserAtomIndex funcName = pn->as<NameNode>().name();
+ const ModuleValidatorShared::Func* func = m.lookupFuncDef(funcName);
+ if (!func) {
+ return m.failName(pn, "function '%s' not found", funcName);
+ }
+
+ return m.addExportField(*func, maybeFieldName);
+}
+
+static bool CheckModuleExportObject(ModuleValidatorShared& m,
+ ParseNode* object) {
+ MOZ_ASSERT(object->isKind(ParseNodeKind::ObjectExpr));
+
+ for (ParseNode* pn = ListHead(object); pn; pn = NextNode(pn)) {
+ if (!IsNormalObjectField(pn)) {
+ return m.fail(pn,
+ "only normal object properties may be used in the export "
+ "object literal");
+ }
+
+ TaggedParserAtomIndex fieldName = ObjectNormalFieldName(pn);
+
+ ParseNode* initNode = ObjectNormalFieldInitializer(pn);
+ if (!initNode->isKind(ParseNodeKind::Name)) {
+ return m.fail(
+ initNode,
+ "initializer of exported object literal must be name of function");
+ }
+
+ if (!CheckModuleExportFunction(m, initNode, fieldName)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckModuleReturn(ModuleValidator<Unit>& m) {
+ TokenKind tk;
+ if (!GetToken(m.parser(), &tk)) {
+ return false;
+ }
+ auto& ts = m.parser().tokenStream;
+ if (tk != TokenKind::Return) {
+ return m.failCurrentOffset(
+ (tk == TokenKind::RightCurly || tk == TokenKind::Eof)
+ ? "expecting return statement"
+ : "invalid asm.js. statement");
+ }
+ ts.anyCharsAccess().ungetToken();
+
+ ParseNode* returnStmt = m.parser().statementListItem(YieldIsName);
+ if (!returnStmt) {
+ return false;
+ }
+
+ ParseNode* returnExpr = ReturnExpr(returnStmt);
+ if (!returnExpr) {
+ return m.fail(returnStmt, "export statement must return something");
+ }
+
+ if (returnExpr->isKind(ParseNodeKind::ObjectExpr)) {
+ if (!CheckModuleExportObject(m, returnExpr)) {
+ return false;
+ }
+ } else {
+ if (!CheckModuleExportFunction(m, returnExpr)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckModuleEnd(ModuleValidator<Unit>& m) {
+ TokenKind tk;
+ if (!GetToken(m.parser(), &tk)) {
+ return false;
+ }
+
+ if (tk != TokenKind::Eof && tk != TokenKind::RightCurly) {
+ return m.failCurrentOffset(
+ "top-level export (return) must be the last statement");
+ }
+
+ m.parser().tokenStream.anyCharsAccess().ungetToken();
+ return true;
+}
+
+template <typename Unit>
+static SharedModule CheckModule(FrontendContext* fc,
+ ParserAtomsTable& parserAtoms,
+ AsmJSParser<Unit>& parser, ParseNode* stmtList,
+ unsigned* time) {
+ int64_t before = PRMJ_Now();
+
+ FunctionNode* moduleFunctionNode = parser.pc_->functionBox()->functionNode;
+
+ ModuleValidator<Unit> m(fc, parserAtoms, parser, moduleFunctionNode);
+ if (!m.init()) {
+ return nullptr;
+ }
+
+ if (!CheckFunctionHead(m, moduleFunctionNode)) {
+ return nullptr;
+ }
+
+ if (!CheckModuleArguments(m, moduleFunctionNode)) {
+ return nullptr;
+ }
+
+ if (!CheckPrecedingStatements(m, stmtList)) {
+ return nullptr;
+ }
+
+ if (!CheckModuleProcessingDirectives(m)) {
+ return nullptr;
+ }
+
+ if (!CheckModuleGlobals(m)) {
+ return nullptr;
+ }
+
+ if (!m.startFunctionBodies()) {
+ return nullptr;
+ }
+
+ if (!CheckFunctions(m)) {
+ return nullptr;
+ }
+
+ if (!CheckFuncPtrTables(m)) {
+ return nullptr;
+ }
+
+ if (!CheckModuleReturn(m)) {
+ return nullptr;
+ }
+
+ if (!CheckModuleEnd(m)) {
+ return nullptr;
+ }
+
+ SharedModule module = m.finish();
+ if (!module) {
+ return nullptr;
+ }
+
+ *time = (PRMJ_Now() - before) / PRMJ_USEC_PER_MSEC;
+ return module;
+}
+
+/*****************************************************************************/
+// Link-time validation
+
+static bool LinkFail(JSContext* cx, const char* str) {
+ WarnNumberASCII(cx, JSMSG_USE_ASM_LINK_FAIL, str);
+ return false;
+}
+
+static bool IsMaybeWrappedScriptedProxy(JSObject* obj) {
+ JSObject* unwrapped = UncheckedUnwrap(obj);
+ return unwrapped && IsScriptedProxy(unwrapped);
+}
+
+static bool GetDataProperty(JSContext* cx, HandleValue objVal,
+ Handle<JSAtom*> field, MutableHandleValue v) {
+ if (!objVal.isObject()) {
+ return LinkFail(cx, "accessing property of non-object");
+ }
+
+ RootedObject obj(cx, &objVal.toObject());
+ if (IsMaybeWrappedScriptedProxy(obj)) {
+ return LinkFail(cx, "accessing property of a Proxy");
+ }
+
+ RootedId id(cx, AtomToId(field));
+ Rooted<mozilla::Maybe<PropertyDescriptor>> desc(cx);
+ RootedObject holder(cx);
+ if (!GetPropertyDescriptor(cx, obj, id, &desc, &holder)) {
+ return false;
+ }
+
+ if (!desc.isSome()) {
+ return LinkFail(cx, "property not present on object");
+ }
+
+ if (!desc->isDataDescriptor()) {
+ return LinkFail(cx, "property is not a data property");
+ }
+
+ v.set(desc->value());
+ return true;
+}
+
+static bool GetDataProperty(JSContext* cx, HandleValue objVal,
+ const char* fieldChars, MutableHandleValue v) {
+ Rooted<JSAtom*> field(cx,
+ AtomizeUTF8Chars(cx, fieldChars, strlen(fieldChars)));
+ if (!field) {
+ return false;
+ }
+
+ return GetDataProperty(cx, objVal, field, v);
+}
+
+static bool GetDataProperty(JSContext* cx, HandleValue objVal,
+ const ImmutableTenuredPtr<PropertyName*>& field,
+ MutableHandleValue v) {
+ Handle<PropertyName*> fieldHandle = field;
+ return GetDataProperty(cx, objVal, fieldHandle, v);
+}
+
+static bool HasObjectValueOfMethodPure(JSObject* obj, JSContext* cx) {
+ Value v;
+ if (!GetPropertyPure(cx, obj, NameToId(cx->names().valueOf), &v)) {
+ return false;
+ }
+
+ JSFunction* fun;
+ if (!IsFunctionObject(v, &fun)) {
+ return false;
+ }
+
+ return IsSelfHostedFunctionWithName(fun, cx->names().Object_valueOf);
+}
+
+static bool HasPureCoercion(JSContext* cx, HandleValue v) {
+ // Ideally, we'd reject all non-primitives, but Emscripten has a bug that
+ // generates code that passes functions for some imports. To avoid breaking
+ // all the code that contains this bug, we make an exception for functions
+ // that don't have user-defined valueOf or toString, for their coercions
+ // are not observable and coercion via ToNumber/ToInt32 definitely produces
+ // NaN/0. We should remove this special case later once most apps have been
+ // built with newer Emscripten.
+ return v.toObject().is<JSFunction>() &&
+ HasNoToPrimitiveMethodPure(&v.toObject(), cx) &&
+ HasObjectValueOfMethodPure(&v.toObject(), cx) &&
+ HasNativeMethodPure(&v.toObject(), cx->names().toString, fun_toString,
+ cx);
+}
+
+static bool ValidateGlobalVariable(JSContext* cx, const AsmJSGlobal& global,
+ HandleValue importVal,
+ Maybe<LitValPOD>* val) {
+ switch (global.varInitKind()) {
+ case AsmJSGlobal::InitConstant:
+ val->emplace(global.varInitVal());
+ return true;
+
+ case AsmJSGlobal::InitImport: {
+ RootedValue v(cx);
+ if (!GetDataProperty(cx, importVal, global.field(), &v)) {
+ return false;
+ }
+
+ if (!v.isPrimitive() && !HasPureCoercion(cx, v)) {
+ return LinkFail(cx, "Imported values must be primitives");
+ }
+
+ switch (global.varInitImportType().kind()) {
+ case ValType::I32: {
+ int32_t i32;
+ if (!ToInt32(cx, v, &i32)) {
+ return false;
+ }
+ val->emplace(uint32_t(i32));
+ return true;
+ }
+ case ValType::I64:
+ MOZ_CRASH("int64");
+ case ValType::V128:
+ MOZ_CRASH("v128");
+ case ValType::F32: {
+ float f;
+ if (!RoundFloat32(cx, v, &f)) {
+ return false;
+ }
+ val->emplace(f);
+ return true;
+ }
+ case ValType::F64: {
+ double d;
+ if (!ToNumber(cx, v, &d)) {
+ return false;
+ }
+ val->emplace(d);
+ return true;
+ }
+ case ValType::Ref: {
+ MOZ_CRASH("not available in asm.js");
+ }
+ }
+ }
+ }
+
+ MOZ_CRASH("unreachable");
+}
+
+static bool ValidateFFI(JSContext* cx, const AsmJSGlobal& global,
+ HandleValue importVal,
+ MutableHandle<FunctionVector> ffis) {
+ RootedValue v(cx);
+ if (!GetDataProperty(cx, importVal, global.field(), &v)) {
+ return false;
+ }
+
+ if (!IsFunctionObject(v)) {
+ return LinkFail(cx, "FFI imports must be functions");
+ }
+
+ ffis[global.ffiIndex()].set(&v.toObject().as<JSFunction>());
+ return true;
+}
+
+static bool ValidateArrayView(JSContext* cx, const AsmJSGlobal& global,
+ HandleValue globalVal) {
+ if (!global.field()) {
+ return true;
+ }
+
+ if (Scalar::isBigIntType(global.viewType())) {
+ return LinkFail(cx, "bad typed array constructor");
+ }
+
+ RootedValue v(cx);
+ if (!GetDataProperty(cx, globalVal, global.field(), &v)) {
+ return false;
+ }
+
+ bool tac = IsTypedArrayConstructor(v, global.viewType());
+ if (!tac) {
+ return LinkFail(cx, "bad typed array constructor");
+ }
+
+ return true;
+}
+
+static InlinableNative ToInlinableNative(AsmJSMathBuiltinFunction func) {
+ switch (func) {
+ case AsmJSMathBuiltin_sin:
+ return InlinableNative::MathSin;
+ case AsmJSMathBuiltin_cos:
+ return InlinableNative::MathCos;
+ case AsmJSMathBuiltin_tan:
+ return InlinableNative::MathTan;
+ case AsmJSMathBuiltin_asin:
+ return InlinableNative::MathASin;
+ case AsmJSMathBuiltin_acos:
+ return InlinableNative::MathACos;
+ case AsmJSMathBuiltin_atan:
+ return InlinableNative::MathATan;
+ case AsmJSMathBuiltin_ceil:
+ return InlinableNative::MathCeil;
+ case AsmJSMathBuiltin_floor:
+ return InlinableNative::MathFloor;
+ case AsmJSMathBuiltin_exp:
+ return InlinableNative::MathExp;
+ case AsmJSMathBuiltin_log:
+ return InlinableNative::MathLog;
+ case AsmJSMathBuiltin_pow:
+ return InlinableNative::MathPow;
+ case AsmJSMathBuiltin_sqrt:
+ return InlinableNative::MathSqrt;
+ case AsmJSMathBuiltin_abs:
+ return InlinableNative::MathAbs;
+ case AsmJSMathBuiltin_atan2:
+ return InlinableNative::MathATan2;
+ case AsmJSMathBuiltin_imul:
+ return InlinableNative::MathImul;
+ case AsmJSMathBuiltin_fround:
+ return InlinableNative::MathFRound;
+ case AsmJSMathBuiltin_min:
+ return InlinableNative::MathMin;
+ case AsmJSMathBuiltin_max:
+ return InlinableNative::MathMax;
+ case AsmJSMathBuiltin_clz32:
+ return InlinableNative::MathClz32;
+ }
+ MOZ_CRASH("Invalid asm.js math builtin function");
+}
+
+static bool ValidateMathBuiltinFunction(JSContext* cx,
+ const AsmJSMetadata& metadata,
+ const AsmJSGlobal& global,
+ HandleValue globalVal) {
+ RootedValue v(cx);
+ if (!GetDataProperty(cx, globalVal, cx->names().Math, &v)) {
+ return false;
+ }
+
+ if (!GetDataProperty(cx, v, global.field(), &v)) {
+ return false;
+ }
+
+ InlinableNative native = ToInlinableNative(global.mathBuiltinFunction());
+
+ JSFunction* fun;
+ if (!IsFunctionObject(v, &fun) || !fun->hasJitInfo() ||
+ fun->jitInfo()->type() != JSJitInfo::InlinableNative ||
+ fun->jitInfo()->inlinableNative != native) {
+ return LinkFail(cx, "bad Math.* builtin function");
+ }
+ if (fun->realm()->behaviors().shouldResistFingerprinting() !=
+ metadata.shouldResistFingerprinting) {
+ return LinkFail(cx,
+ "Math.* builtin function and asm.js module have a "
+ "different resist fingerprinting mode");
+ }
+
+ return true;
+}
+
+static bool ValidateConstant(JSContext* cx, const AsmJSGlobal& global,
+ HandleValue globalVal) {
+ RootedValue v(cx, globalVal);
+
+ if (global.constantKind() == AsmJSGlobal::MathConstant) {
+ if (!GetDataProperty(cx, v, cx->names().Math, &v)) {
+ return false;
+ }
+ }
+
+ if (!GetDataProperty(cx, v, global.field(), &v)) {
+ return false;
+ }
+
+ if (!v.isNumber()) {
+ return LinkFail(cx, "math / global constant value needs to be a number");
+ }
+
+ // NaN != NaN
+ if (std::isnan(global.constantValue())) {
+ if (!std::isnan(v.toNumber())) {
+ return LinkFail(cx, "global constant value needs to be NaN");
+ }
+ } else {
+ if (v.toNumber() != global.constantValue()) {
+ return LinkFail(cx, "global constant value mismatch");
+ }
+ }
+
+ return true;
+}
+
+static bool CheckBuffer(JSContext* cx, const AsmJSMetadata& metadata,
+ HandleValue bufferVal,
+ MutableHandle<ArrayBufferObject*> buffer) {
+ if (!bufferVal.isObject()) {
+ return LinkFail(cx, "buffer must be an object");
+ }
+ JSObject* bufferObj = &bufferVal.toObject();
+
+ if (metadata.usesSharedMemory()) {
+ if (!bufferObj->is<SharedArrayBufferObject>()) {
+ return LinkFail(
+ cx, "shared views can only be constructed onto SharedArrayBuffer");
+ }
+ return LinkFail(cx, "Unable to prepare SharedArrayBuffer for asm.js use");
+ }
+
+ if (!bufferObj->is<ArrayBufferObject>()) {
+ return LinkFail(cx,
+ "unshared views can only be constructed onto ArrayBuffer");
+ }
+
+ buffer.set(&bufferObj->as<ArrayBufferObject>());
+
+ size_t memoryLength = buffer->byteLength();
+
+ if (!IsValidAsmJSHeapLength(memoryLength)) {
+ UniqueChars msg;
+ if (memoryLength > MaxHeapLength) {
+ msg = JS_smprintf("ArrayBuffer byteLength 0x%" PRIx64
+ " is not a valid heap length - it is too long."
+ " The longest valid length is 0x%" PRIx64,
+ uint64_t(memoryLength), MaxHeapLength);
+ } else {
+ msg = JS_smprintf("ArrayBuffer byteLength 0x%" PRIx64
+ " is not a valid heap length. The next "
+ "valid length is 0x%" PRIx64,
+ uint64_t(memoryLength),
+ RoundUpToNextValidAsmJSHeapLength(memoryLength));
+ }
+ if (!msg) {
+ return false;
+ }
+ return LinkFail(cx, msg.get());
+ }
+
+ // This check is sufficient without considering the size of the loaded datum
+ // because heap loads and stores start on an aligned boundary and the heap
+ // byteLength has larger alignment.
+ uint64_t minMemoryLength =
+ metadata.usesMemory() ? metadata.memory->initialLength32() : 0;
+ MOZ_ASSERT((minMemoryLength - 1) <= INT32_MAX);
+ if (memoryLength < minMemoryLength) {
+ UniqueChars msg(JS_smprintf("ArrayBuffer byteLength of 0x%" PRIx64
+ " is less than 0x%" PRIx64 " (the "
+ "size implied "
+ "by const heap accesses).",
+ uint64_t(memoryLength), minMemoryLength));
+ if (!msg) {
+ return false;
+ }
+ return LinkFail(cx, msg.get());
+ }
+
+ // ArrayBuffer lengths in SpiderMonkey used to be restricted to <= INT32_MAX,
+ // but that has since been relaxed for the benefit of wasm. We keep the old
+ // limit for asm.js so as to avoid having to worry about whether the asm.js
+ // implementation is safe for larger heaps.
+ if (memoryLength >= INT32_MAX) {
+ UniqueChars msg(
+ JS_smprintf("ArrayBuffer byteLength 0x%" PRIx64
+ " is too large for asm.js (implementation limit).",
+ uint64_t(memoryLength)));
+ if (!msg) {
+ return false;
+ }
+ return LinkFail(cx, msg.get());
+ }
+
+ if (!buffer->prepareForAsmJS()) {
+ return LinkFail(cx, "Unable to prepare ArrayBuffer for asm.js use");
+ }
+
+ MOZ_ASSERT(buffer->isPreparedForAsmJS());
+ return true;
+}
+
+static bool GetImports(JSContext* cx, const AsmJSMetadata& metadata,
+ HandleValue globalVal, HandleValue importVal,
+ ImportValues* imports) {
+ Rooted<FunctionVector> ffis(cx, FunctionVector(cx));
+ if (!ffis.resize(metadata.numFFIs)) {
+ return false;
+ }
+
+ for (const AsmJSGlobal& global : metadata.asmJSGlobals) {
+ switch (global.which()) {
+ case AsmJSGlobal::Variable: {
+ Maybe<LitValPOD> litVal;
+ if (!ValidateGlobalVariable(cx, global, importVal, &litVal)) {
+ return false;
+ }
+ if (!imports->globalValues.append(Val(litVal->asLitVal()))) {
+ return false;
+ }
+ break;
+ }
+ case AsmJSGlobal::FFI:
+ if (!ValidateFFI(cx, global, importVal, &ffis)) {
+ return false;
+ }
+ break;
+ case AsmJSGlobal::ArrayView:
+ case AsmJSGlobal::ArrayViewCtor:
+ if (!ValidateArrayView(cx, global, globalVal)) {
+ return false;
+ }
+ break;
+ case AsmJSGlobal::MathBuiltinFunction:
+ if (!ValidateMathBuiltinFunction(cx, metadata, global, globalVal)) {
+ return false;
+ }
+ break;
+ case AsmJSGlobal::Constant:
+ if (!ValidateConstant(cx, global, globalVal)) {
+ return false;
+ }
+ break;
+ }
+ }
+
+ for (const AsmJSImport& import : metadata.asmJSImports) {
+ if (!imports->funcs.append(ffis[import.ffiIndex()])) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool TryInstantiate(JSContext* cx, CallArgs args, const Module& module,
+ const AsmJSMetadata& metadata,
+ MutableHandle<WasmInstanceObject*> instanceObj,
+ MutableHandleObject exportObj) {
+ HandleValue globalVal = args.get(0);
+ HandleValue importVal = args.get(1);
+ HandleValue bufferVal = args.get(2);
+
+ // Re-check HasPlatformSupport(cx) since this varies per-thread and
+ // 'module' may have been produced on a parser thread.
+ if (!HasPlatformSupport(cx)) {
+ return LinkFail(cx, "no platform support");
+ }
+
+ Rooted<ImportValues> imports(cx);
+
+ if (module.metadata().usesMemory()) {
+ RootedArrayBufferObject buffer(cx);
+ if (!CheckBuffer(cx, metadata, bufferVal, &buffer)) {
+ return false;
+ }
+
+ imports.get().memory =
+ WasmMemoryObject::create(cx, buffer, /* isHuge= */ false, nullptr);
+ if (!imports.get().memory) {
+ return false;
+ }
+ }
+
+ if (!GetImports(cx, metadata, globalVal, importVal, imports.address())) {
+ return false;
+ }
+
+ if (!module.instantiate(cx, imports.get(), nullptr, instanceObj)) {
+ return false;
+ }
+
+ exportObj.set(&instanceObj->exportsObj());
+ return true;
+}
+
+static bool HandleInstantiationFailure(JSContext* cx, CallArgs args,
+ const AsmJSMetadata& metadata) {
+ using js::frontend::FunctionSyntaxKind;
+
+ Rooted<JSAtom*> name(cx, args.callee().as<JSFunction>().explicitName());
+
+ if (cx->isExceptionPending()) {
+ return false;
+ }
+
+ ScriptSource* source = metadata.maybeScriptSource();
+
+ // Source discarding is allowed to affect JS semantics because it is never
+ // enabled for normal JS content.
+ bool haveSource;
+ if (!ScriptSource::loadSource(cx, source, &haveSource)) {
+ return false;
+ }
+ if (!haveSource) {
+ JS_ReportErrorASCII(cx,
+ "asm.js link failure with source discarding enabled");
+ return false;
+ }
+
+ uint32_t begin = metadata.toStringStart;
+ uint32_t end = metadata.srcEndAfterCurly();
+ Rooted<JSLinearString*> src(cx, source->substringDontDeflate(cx, begin, end));
+ if (!src) {
+ return false;
+ }
+
+ JS::CompileOptions options(cx);
+ options.setMutedErrors(source->mutedErrors())
+ .setFile(source->filename())
+ .setNoScriptRval(false);
+ options.asmJSOption = AsmJSOption::DisabledByLinker;
+
+ // The exported function inherits an implicit strict context if the module
+ // also inherited it somehow.
+ if (metadata.strict) {
+ options.setForceStrictMode();
+ }
+
+ AutoStableStringChars linearChars(cx);
+ if (!linearChars.initTwoByte(cx, src)) {
+ return false;
+ }
+
+ SourceText<char16_t> srcBuf;
+ if (!srcBuf.initMaybeBorrowed(cx, linearChars)) {
+ return false;
+ }
+
+ FunctionSyntaxKind syntaxKind = FunctionSyntaxKind::Statement;
+
+ RootedFunction fun(cx, frontend::CompileStandaloneFunction(
+ cx, options, srcBuf, Nothing(), syntaxKind));
+ if (!fun) {
+ return false;
+ }
+
+ fun->initEnvironment(&cx->global()->lexicalEnvironment());
+
+ // Call the function we just recompiled.
+ args.setCallee(ObjectValue(*fun));
+ return InternalCallOrConstruct(
+ cx, args, args.isConstructing() ? CONSTRUCT : NO_CONSTRUCT);
+}
+
+static const Module& AsmJSModuleFunctionToModule(JSFunction* fun) {
+ MOZ_ASSERT(IsAsmJSModule(fun));
+ const Value& v = fun->getExtendedSlot(FunctionExtended::ASMJS_MODULE_SLOT);
+ return v.toObject().as<WasmModuleObject>().module();
+}
+
+// Implements the semantics of an asm.js module function that has been
+// successfully validated.
+bool js::InstantiateAsmJS(JSContext* cx, unsigned argc, JS::Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ JSFunction* callee = &args.callee().as<JSFunction>();
+ const Module& module = AsmJSModuleFunctionToModule(callee);
+ const AsmJSMetadata& metadata = module.metadata().asAsmJS();
+
+ Rooted<WasmInstanceObject*> instanceObj(cx);
+ RootedObject exportObj(cx);
+ if (!TryInstantiate(cx, args, module, metadata, &instanceObj, &exportObj)) {
+ // Link-time validation checks failed, so reparse the entire asm.js
+ // module from scratch to get normal interpreted bytecode which we can
+ // simply Invoke. Very slow.
+ return HandleInstantiationFailure(cx, args, metadata);
+ }
+
+ args.rval().set(ObjectValue(*exportObj));
+ return true;
+}
+
+/*****************************************************************************/
+// Top-level js::CompileAsmJS
+
+static bool NoExceptionPending(FrontendContext* fc) { return !fc->hadErrors(); }
+
+static bool SuccessfulValidation(frontend::ParserBase& parser,
+ unsigned compilationTime) {
+ unsigned errNum = js::SupportDifferentialTesting()
+ ? JSMSG_USE_ASM_TYPE_OK_NO_TIME
+ : JSMSG_USE_ASM_TYPE_OK;
+
+ char timeChars[20];
+ SprintfLiteral(timeChars, "%u", compilationTime);
+
+ return parser.warningNoOffset(errNum, timeChars);
+}
+
+static bool TypeFailureWarning(frontend::ParserBase& parser, const char* str) {
+ if (parser.options().throwOnAsmJSValidationFailureOption) {
+ parser.errorNoOffset(JSMSG_USE_ASM_TYPE_FAIL, str ? str : "");
+ return false;
+ }
+
+ // Per the asm.js standard convention, whether failure sets a pending
+ // exception determines whether to attempt non-asm.js reparsing, so ignore
+ // the return value below.
+ (void)parser.warningNoOffset(JSMSG_USE_ASM_TYPE_FAIL, str ? str : "");
+ return false;
+}
+
+// asm.js requires Ion to be available on the current hardware/OS and to be
+// enabled for wasm, since asm.js compilation goes via wasm.
+static bool IsAsmJSCompilerAvailable(JSContext* cx) {
+ return HasPlatformSupport(cx) && WasmCompilerForAsmJSAvailable(cx);
+}
+
+static bool EstablishPreconditions(frontend::ParserBase& parser) {
+ switch (parser.options().asmJSOption) {
+ case AsmJSOption::DisabledByAsmJSPref:
+ return TypeFailureWarning(
+ parser, "Asm.js optimizer disabled by 'asmjs' runtime option");
+ case AsmJSOption::DisabledByLinker:
+ return TypeFailureWarning(
+ parser,
+ "Asm.js optimizer disabled by linker (instantiation failure)");
+ case AsmJSOption::DisabledByNoWasmCompiler:
+ return TypeFailureWarning(parser,
+ "Asm.js optimizer disabled because no suitable "
+ "wasm compiler is available");
+ case AsmJSOption::DisabledByDebugger:
+ return TypeFailureWarning(
+ parser, "Asm.js optimizer disabled because debugger is active");
+ case AsmJSOption::Enabled:
+ break;
+ }
+
+ if (parser.pc_->isGenerator()) {
+ return TypeFailureWarning(parser,
+ "Asm.js optimizer disabled in generator context");
+ }
+
+ if (parser.pc_->isAsync()) {
+ return TypeFailureWarning(parser,
+ "Asm.js optimizer disabled in async context");
+ }
+
+ if (parser.pc_->isArrowFunction()) {
+ return TypeFailureWarning(
+ parser, "Asm.js optimizer disabled in arrow function context");
+ }
+
+ // Class constructors are also methods
+ if (parser.pc_->isMethod() || parser.pc_->isGetterOrSetter()) {
+ return TypeFailureWarning(
+ parser,
+ "Asm.js optimizer disabled in class constructor or method context");
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool DoCompileAsmJS(FrontendContext* fc, ParserAtomsTable& parserAtoms,
+ AsmJSParser<Unit>& parser, ParseNode* stmtList,
+ bool* validated) {
+ *validated = false;
+
+ // Various conditions disable asm.js optimizations.
+ if (!EstablishPreconditions(parser)) {
+ return NoExceptionPending(fc);
+ }
+
+ // "Checking" parses, validates and compiles, producing a fully compiled
+ // WasmModuleObject as result.
+ unsigned time;
+ SharedModule module = CheckModule(fc, parserAtoms, parser, stmtList, &time);
+ if (!module) {
+ return NoExceptionPending(fc);
+ }
+
+ // Finished! Save the ref-counted module on the FunctionBox. When JSFunctions
+ // are eventually allocated we will create an asm.js constructor for it.
+ FunctionBox* funbox = parser.pc_->functionBox();
+ MOZ_ASSERT(funbox->isInterpreted());
+ if (!funbox->setAsmJSModule(module)) {
+ return NoExceptionPending(fc);
+ }
+
+ // Success! Write to the console with a "warning" message indicating
+ // total compilation time.
+ *validated = true;
+ SuccessfulValidation(parser, time);
+ return NoExceptionPending(fc);
+}
+
+bool js::CompileAsmJS(FrontendContext* fc, ParserAtomsTable& parserAtoms,
+ AsmJSParser<char16_t>& parser, ParseNode* stmtList,
+ bool* validated) {
+ return DoCompileAsmJS(fc, parserAtoms, parser, stmtList, validated);
+}
+
+bool js::CompileAsmJS(FrontendContext* fc, ParserAtomsTable& parserAtoms,
+ AsmJSParser<Utf8Unit>& parser, ParseNode* stmtList,
+ bool* validated) {
+ return DoCompileAsmJS(fc, parserAtoms, parser, stmtList, validated);
+}
+
+/*****************************************************************************/
+// asm.js testing functions
+
+bool js::IsAsmJSModuleNative(Native native) {
+ return native == InstantiateAsmJS;
+}
+
+bool js::IsAsmJSModule(JSFunction* fun) {
+ return fun->maybeNative() == InstantiateAsmJS;
+}
+
+bool js::IsAsmJSFunction(JSFunction* fun) {
+ return fun->kind() == FunctionFlags::AsmJS;
+}
+
+bool js::IsAsmJSStrictModeModuleOrFunction(JSFunction* fun) {
+ if (IsAsmJSModule(fun)) {
+ return AsmJSModuleFunctionToModule(fun).metadata().asAsmJS().strict;
+ }
+
+ if (IsAsmJSFunction(fun)) {
+ return ExportedFunctionToInstance(fun).metadata().asAsmJS().strict;
+ }
+
+ return false;
+}
+
+bool js::IsAsmJSCompilationAvailable(JSContext* cx) {
+ return cx->options().asmJS() && IsAsmJSCompilerAvailable(cx);
+}
+
+bool js::IsAsmJSCompilationAvailable(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ bool available = IsAsmJSCompilationAvailable(cx);
+ args.rval().set(BooleanValue(available));
+ return true;
+}
+
+static JSFunction* MaybeWrappedNativeFunction(const Value& v) {
+ if (!v.isObject()) {
+ return nullptr;
+ }
+
+ return v.toObject().maybeUnwrapIf<JSFunction>();
+}
+
+bool js::IsAsmJSModule(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ bool rval = false;
+ if (JSFunction* fun = MaybeWrappedNativeFunction(args.get(0))) {
+ rval = IsAsmJSModule(fun);
+ }
+
+ args.rval().set(BooleanValue(rval));
+ return true;
+}
+
+bool js::IsAsmJSFunction(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ bool rval = false;
+ if (JSFunction* fun = MaybeWrappedNativeFunction(args.get(0))) {
+ rval = IsAsmJSFunction(fun);
+ }
+
+ args.rval().set(BooleanValue(rval));
+ return true;
+}
+
+/*****************************************************************************/
+// asm.js toString/toSource support
+
+JSString* js::AsmJSModuleToString(JSContext* cx, HandleFunction fun,
+ bool isToSource) {
+ MOZ_ASSERT(IsAsmJSModule(fun));
+
+ const AsmJSMetadata& metadata =
+ AsmJSModuleFunctionToModule(fun).metadata().asAsmJS();
+ uint32_t begin = metadata.toStringStart;
+ uint32_t end = metadata.srcEndAfterCurly();
+ ScriptSource* source = metadata.maybeScriptSource();
+
+ JSStringBuilder out(cx);
+
+ if (isToSource && fun->isLambda() && !out.append("(")) {
+ return nullptr;
+ }
+
+ bool haveSource;
+ if (!ScriptSource::loadSource(cx, source, &haveSource)) {
+ return nullptr;
+ }
+
+ if (!haveSource) {
+ if (!out.append("function ")) {
+ return nullptr;
+ }
+ if (fun->explicitName() && !out.append(fun->explicitName())) {
+ return nullptr;
+ }
+ if (!out.append("() {\n [native code]\n}")) {
+ return nullptr;
+ }
+ } else {
+ Rooted<JSLinearString*> src(cx, source->substring(cx, begin, end));
+ if (!src) {
+ return nullptr;
+ }
+
+ if (!out.append(src)) {
+ return nullptr;
+ }
+ }
+
+ if (isToSource && fun->isLambda() && !out.append(")")) {
+ return nullptr;
+ }
+
+ return out.finishString();
+}
+
+JSString* js::AsmJSFunctionToString(JSContext* cx, HandleFunction fun) {
+ MOZ_ASSERT(IsAsmJSFunction(fun));
+
+ const AsmJSMetadata& metadata =
+ ExportedFunctionToInstance(fun).metadata().asAsmJS();
+ const AsmJSExport& f =
+ metadata.lookupAsmJSExport(ExportedFunctionToFuncIndex(fun));
+
+ uint32_t begin = metadata.srcStart + f.startOffsetInModule();
+ uint32_t end = metadata.srcStart + f.endOffsetInModule();
+
+ ScriptSource* source = metadata.maybeScriptSource();
+ JSStringBuilder out(cx);
+
+ if (!out.append("function ")) {
+ return nullptr;
+ }
+
+ bool haveSource;
+ if (!ScriptSource::loadSource(cx, source, &haveSource)) {
+ return nullptr;
+ }
+
+ if (!haveSource) {
+ // asm.js functions can't be anonymous
+ MOZ_ASSERT(fun->explicitName());
+ if (!out.append(fun->explicitName())) {
+ return nullptr;
+ }
+ if (!out.append("() {\n [native code]\n}")) {
+ return nullptr;
+ }
+ } else {
+ Rooted<JSLinearString*> src(cx, source->substring(cx, begin, end));
+ if (!src) {
+ return nullptr;
+ }
+ if (!out.append(src)) {
+ return nullptr;
+ }
+ }
+
+ return out.finishString();
+}
+
+bool js::IsValidAsmJSHeapLength(size_t length) {
+ if (length < MinHeapLength) {
+ return false;
+ }
+
+ // The heap length is limited by what a wasm memory32 can handle.
+ if (length > MaxMemoryBytes(IndexType::I32)) {
+ return false;
+ }
+
+ return wasm::IsValidARMImmediate(length);
+}
diff --git a/js/src/wasm/AsmJS.h b/js/src/wasm/AsmJS.h
new file mode 100644
index 0000000000..7caa1bf09a
--- /dev/null
+++ b/js/src/wasm/AsmJS.h
@@ -0,0 +1,116 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2014 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_AsmJS_h
+#define wasm_AsmJS_h
+
+#include "mozilla/Utf8.h" // mozilla::Utf8Unit
+
+#include <stdint.h> // uint32_t
+
+#include "jstypes.h" // JS_PUBLIC_API
+#include "js/CallArgs.h" // JSNative
+
+struct JS_PUBLIC_API JSContext;
+class JS_PUBLIC_API JSFunction;
+
+namespace JS {
+
+class JS_PUBLIC_API Value;
+
+template <typename T>
+class Handle;
+
+} // namespace JS
+
+namespace js {
+
+class FrontendContext;
+
+namespace frontend {
+
+class ParserAtomsTable;
+class ParseContext;
+class ParseNode;
+
+template <class ParseHandler, typename CharT>
+class Parser;
+class FullParseHandler;
+
+} // namespace frontend
+
+template <typename Unit>
+using AsmJSParser = frontend::Parser<frontend::FullParseHandler, Unit>;
+
+// This function takes over parsing of a function starting with "use asm". The
+// return value indicates whether an error was reported which the caller should
+// propagate. If no error was reported, the function may still fail to validate
+// as asm.js. In this case, the parser.tokenStream has been advanced an
+// indeterminate amount and the entire function should be reparsed from the
+// beginning.
+
+[[nodiscard]] extern bool CompileAsmJS(FrontendContext* fc,
+ frontend::ParserAtomsTable& parserAtoms,
+ AsmJSParser<mozilla::Utf8Unit>& parser,
+ frontend::ParseNode* stmtList,
+ bool* validated);
+
+[[nodiscard]] extern bool CompileAsmJS(FrontendContext* fc,
+ frontend::ParserAtomsTable& parserAtoms,
+ AsmJSParser<char16_t>& parser,
+ frontend::ParseNode* stmtList,
+ bool* validated);
+
+// asm.js module/export queries:
+
+extern bool IsAsmJSModuleNative(JSNative native);
+
+extern bool IsAsmJSModule(JSFunction* fun);
+
+extern bool IsAsmJSFunction(JSFunction* fun);
+
+extern bool IsAsmJSStrictModeModuleOrFunction(JSFunction* fun);
+
+extern bool InstantiateAsmJS(JSContext* cx, unsigned argc, JS::Value* vp);
+
+// asm.js testing natives:
+
+extern bool IsAsmJSCompilationAvailable(JSContext* cx, unsigned argc,
+ JS::Value* vp);
+
+extern bool IsAsmJSCompilationAvailable(JSContext* cx);
+
+extern bool IsAsmJSModule(JSContext* cx, unsigned argc, JS::Value* vp);
+
+extern bool IsAsmJSFunction(JSContext* cx, unsigned argc, JS::Value* vp);
+
+// asm.js toString/toSource support:
+
+extern JSString* AsmJSFunctionToString(JSContext* cx,
+ JS::Handle<JSFunction*> fun);
+
+extern JSString* AsmJSModuleToString(JSContext* cx, JS::Handle<JSFunction*> fun,
+ bool isToSource);
+
+// asm.js heap:
+
+extern bool IsValidAsmJSHeapLength(size_t length);
+
+} // namespace js
+
+#endif // wasm_AsmJS_h
diff --git a/js/src/wasm/GenerateIntrinsics.py b/js/src/wasm/GenerateIntrinsics.py
new file mode 100644
index 0000000000..f822722dcd
--- /dev/null
+++ b/js/src/wasm/GenerateIntrinsics.py
@@ -0,0 +1,86 @@
+from collections import OrderedDict
+
+import buildconfig
+import six
+import yaml
+from mozbuild.preprocessor import Preprocessor
+
+HEADER_TEMPLATE = """\
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef %(includeguard)s
+#define %(includeguard)s
+
+/* This file is generated by wasm/GenerateInstrinsic.py. Do not edit! */
+
+%(contents)s
+
+#endif // %(includeguard)s
+"""
+
+
+def generate_header(c_out, includeguard, contents):
+ c_out.write(
+ HEADER_TEMPLATE
+ % {
+ "includeguard": includeguard,
+ "contents": contents,
+ }
+ )
+
+
+def load_yaml(yaml_path):
+ # First invoke preprocessor.py so that we can use #ifdef JS_SIMULATOR in
+ # the YAML file.
+ pp = Preprocessor()
+ pp.context.update(buildconfig.defines["ALLDEFINES"])
+ pp.out = six.StringIO()
+ pp.do_filter("substitution")
+ pp.do_include(yaml_path)
+ contents = pp.out.getvalue()
+
+ # Load into an OrderedDict to ensure order is preserved. Note: Python 3.7+
+ # also preserves ordering for normal dictionaries.
+ # Code based on https://stackoverflow.com/a/21912744.
+ class OrderedLoader(yaml.Loader):
+ pass
+
+ def construct_mapping(loader, node):
+ loader.flatten_mapping(node)
+ return OrderedDict(loader.construct_pairs(node))
+
+ tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
+ OrderedLoader.add_constructor(tag, construct_mapping)
+ return yaml.load(contents, OrderedLoader)
+
+
+def main(c_out, yaml_path):
+ data = load_yaml(yaml_path)
+
+ # Interate for all defined intrinsics
+ contents = "#define FOR_EACH_INTRINSIC(M) \\\n"
+ for i in range(len(data)):
+ op = data[i]
+ sa = op["symbolic_address"]
+ contents += (
+ f" M({op['op']}, \"{op['export']}\", "
+ f"{sa['name']}, {sa['type']}, {op['entry']}, {i})\\\n"
+ )
+ contents += "\n"
+
+ for op in data:
+ # Define DECLARE_INTRINSIC_SAS_PARAM_VALTYPES_<op> as:
+ # `{ValType::I32, ValType::I32, ...}`.
+ contents += (
+ f"#define DECLARE_INTRINSIC_SAS_PARAM_VALTYPES_{op['op']} "
+ f"{{ValType::{', ValType::'.join(op['params'])}}}\n"
+ )
+ # Define DECLARE_INTRINSIC_PARAM_TYPES_<op> as:
+ # `<num_types>, {_PTR, _I32, ..., _PTR, _END}`.
+ sas_types = f"{{_PTR{''.join(', _' + p for p in op['params'])}, _PTR, _END}}"
+ num_types = len(op["params"]) + 2
+ contents += f"#define DECLARE_INTRINSIC_PARAM_TYPES_{op['op']} {num_types}, {sas_types}\n"
+
+ generate_header(c_out, "wasm_WasmIntrinsicGenerated_h", contents)
diff --git a/js/src/wasm/WasmBCClass-inl.h b/js/src/wasm/WasmBCClass-inl.h
new file mode 100644
index 0000000000..56d174506e
--- /dev/null
+++ b/js/src/wasm/WasmBCClass-inl.h
@@ -0,0 +1,58 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This is an INTERNAL header for Wasm baseline compiler: inline BaseCompiler
+// methods that don't fit in any other group in particular.
+
+#ifndef wasm_wasm_baseline_object_inl_h
+#define wasm_wasm_baseline_object_inl_h
+
+namespace js {
+namespace wasm {
+
+const FuncType& BaseCompiler::funcType() const {
+ return *moduleEnv_.funcs[func_.index].type;
+}
+
+bool BaseCompiler::usesMemory() const { return moduleEnv_.usesMemory(); }
+
+bool BaseCompiler::usesSharedMemory() const {
+ return moduleEnv_.usesSharedMemory();
+}
+
+const Local& BaseCompiler::localFromSlot(uint32_t slot, MIRType type) {
+ MOZ_ASSERT(localInfo_[slot].type == type);
+ return localInfo_[slot];
+}
+
+BytecodeOffset BaseCompiler::bytecodeOffset() const {
+ return iter_.bytecodeOffset();
+}
+
+bool BaseCompiler::isMem32() const {
+ return moduleEnv_.memory->indexType() == IndexType::I32;
+}
+
+bool BaseCompiler::isMem64() const {
+ return moduleEnv_.memory->indexType() == IndexType::I64;
+}
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_wasm_baseline_object_inl_h
diff --git a/js/src/wasm/WasmBCClass.h b/js/src/wasm/WasmBCClass.h
new file mode 100644
index 0000000000..13b5ff094c
--- /dev/null
+++ b/js/src/wasm/WasmBCClass.h
@@ -0,0 +1,1745 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This is an INTERNAL header for Wasm baseline compiler: the compiler object
+// and its supporting types.
+
+#ifndef wasm_wasm_baseline_object_h
+#define wasm_wasm_baseline_object_h
+
+#include "wasm/WasmBCDefs.h"
+#include "wasm/WasmBCFrame.h"
+#include "wasm/WasmBCRegDefs.h"
+#include "wasm/WasmBCStk.h"
+
+namespace js {
+namespace wasm {
+
+// Container for a piece of out-of-line code, the slow path that supports an
+// operation.
+class OutOfLineCode;
+
+// Part of the inter-bytecode state for the boolean-evaluation-for-control
+// optimization.
+struct BranchState;
+
+// Representation of wasm local variables.
+using Local = BaseStackFrame::Local;
+
+// Bitset used for simple bounds check elimination. Capping this at 64 locals
+// makes sense; even 32 locals would probably be OK in practice.
+//
+// For more information about BCE, see the block comment in WasmBCMemory.cpp.
+using BCESet = uint64_t;
+
+// Information stored in the control node for generating exception handling
+// landing pads.
+struct CatchInfo {
+ uint32_t tagIndex; // Index for the associated exception.
+ NonAssertingLabel label; // The entry label for the handler.
+
+ explicit CatchInfo(uint32_t tagIndex_) : tagIndex(tagIndex_) {}
+};
+
+using CatchInfoVector = Vector<CatchInfo, 1, SystemAllocPolicy>;
+
+// Control node, representing labels and stack heights at join points.
+struct Control {
+ NonAssertingLabel label; // The "exit" label
+ NonAssertingLabel otherLabel; // Used for the "else" branch of if-then-else
+ // and to allow delegate to jump to catches.
+ StackHeight stackHeight; // From BaseStackFrame
+ uint32_t stackSize; // Value stack height
+ BCESet bceSafeOnEntry; // Bounds check info flowing into the item
+ BCESet bceSafeOnExit; // Bounds check info flowing out of the item
+ bool deadOnArrival; // deadCode_ was set on entry to the region
+ bool deadThenBranch; // deadCode_ was set on exit from "then"
+ size_t tryNoteIndex; // For tracking try branch code ranges.
+ CatchInfoVector catchInfos; // Used for try-catch handlers.
+
+ Control()
+ : stackHeight(StackHeight::Invalid()),
+ stackSize(UINT32_MAX),
+ bceSafeOnEntry(0),
+ bceSafeOnExit(~BCESet(0)),
+ deadOnArrival(false),
+ deadThenBranch(false),
+ tryNoteIndex(0) {}
+};
+
+// A vector of Nothing values, used for reading opcodes.
+class BaseNothingVector {
+ Nothing unused_;
+
+ public:
+ bool resize(size_t length) { return true; }
+ Nothing& operator[](size_t) { return unused_; }
+ Nothing& back() { return unused_; }
+ size_t length() const { return 0; }
+ bool append(Nothing& nothing) { return true; }
+};
+
+// The baseline compiler tracks values on a stack of its own -- it needs to scan
+// that stack for spilling -- and thus has no need for the values maintained by
+// the iterator.
+struct BaseCompilePolicy {
+ using Value = Nothing;
+ using ValueVector = BaseNothingVector;
+
+ // The baseline compiler uses the iterator's control stack, attaching
+ // its own control information.
+ using ControlItem = Control;
+};
+
+using BaseOpIter = OpIter<BaseCompilePolicy>;
+
+// Latent operation for boolean-evaluation-for-control optimization.
+enum class LatentOp { None, Compare, Eqz };
+
+// Encapsulate the checking needed for a memory access.
+struct AccessCheck {
+ AccessCheck()
+ : omitBoundsCheck(false),
+ omitAlignmentCheck(false),
+ onlyPointerAlignment(false) {}
+
+ // If `omitAlignmentCheck` is true then we need check neither the
+ // pointer nor the offset. Otherwise, if `onlyPointerAlignment` is true
+ // then we need check only the pointer. Otherwise, check the sum of
+ // pointer and offset.
+
+ bool omitBoundsCheck;
+ bool omitAlignmentCheck;
+ bool onlyPointerAlignment;
+};
+
+// Encapsulate all the information about a function call.
+struct FunctionCall {
+ FunctionCall()
+ : restoreRegisterStateAndRealm(false),
+ usesSystemAbi(false),
+#ifdef JS_CODEGEN_ARM
+ hardFP(true),
+#endif
+ frameAlignAdjustment(0),
+ stackArgAreaSize(0) {
+ }
+
+ WasmABIArgGenerator abi;
+ bool restoreRegisterStateAndRealm;
+ bool usesSystemAbi;
+#ifdef JS_CODEGEN_ARM
+ bool hardFP;
+#endif
+ size_t frameAlignAdjustment;
+ size_t stackArgAreaSize;
+};
+
+enum class PreBarrierKind {
+ // No pre-write barrier is required because the previous value is undefined.
+ None,
+ // Perform a pre-write barrier to mark the previous value if an incremental
+ // GC is underway.
+ Normal,
+};
+
+enum class PostBarrierKind {
+ // Remove an existing store buffer entry if the new value does not require
+ // one. This is required to preserve invariants with HeapPtr when used for
+ // movable storage.
+ Precise,
+ // Add a store buffer entry if the new value requires it, but do not attempt
+ // to remove a pre-existing entry.
+ Imprecise,
+};
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Wasm baseline compiler proper.
+//
+// This is a struct and not a class because there is no real benefit to hiding
+// anything, and because many static functions that are wrappers for masm
+// methods need to reach into it and would otherwise have to be declared as
+// friends.
+//
+// (Members generally have a '_' suffix but some don't because they are
+// referenced everywhere and it would be tedious to spell that out.)
+
+struct BaseCompiler final {
+ ///////////////////////////////////////////////////////////////////////////
+ //
+ // Private types
+
+ using LabelVector = Vector<NonAssertingLabel, 8, SystemAllocPolicy>;
+
+ ///////////////////////////////////////////////////////////////////////////
+ //
+ // Read-only and write-once members.
+
+ // Static compilation environment.
+ const ModuleEnvironment& moduleEnv_;
+ const CompilerEnvironment& compilerEnv_;
+ const FuncCompileInput& func_;
+ const ValTypeVector& locals_;
+
+ // Information about the locations of locals, this is set up during
+ // initialization and read-only after that.
+ BaseStackFrame::LocalVector localInfo_;
+
+ // On specific platforms we sometimes need to use specific registers.
+ const SpecificRegs specific_;
+
+ // SigD and SigF are single-entry parameter lists for f64 and f32, these are
+ // created during initialization.
+ ValTypeVector SigD_;
+ ValTypeVector SigF_;
+
+ // Where to go to to return, bound as compilation ends.
+ NonAssertingLabel returnLabel_;
+
+ // Prologue and epilogue offsets, initialized during prologue and epilogue
+ // generation and only used by the caller.
+ FuncOffsets offsets_;
+
+ // We call this address from the breakable point when the breakpoint handler
+ // is not null.
+ NonAssertingLabel debugTrapStub_;
+ uint32_t previousBreakablePoint_;
+
+ // BaselineCompileFunctions() "lends" us the StkVector to use in this
+ // BaseCompiler object, and that is installed in |stk_| in our constructor.
+ // This is so as to avoid having to malloc/free the vector's contents at
+ // each creation/destruction of a BaseCompiler object. It does however mean
+ // that we need to hold on to a reference to BaselineCompileFunctions()'s
+ // vector, so we can swap (give) its contents back when this BaseCompiler
+ // object is destroyed. This significantly reduces the heap turnover of the
+ // baseline compiler. See bug 1532592.
+ StkVector& stkSource_;
+
+ ///////////////////////////////////////////////////////////////////////////
+ //
+ // Output-only data structures.
+
+ // Bump allocator for temporary memory, used for the value stack and
+ // out-of-line code blobs. Bump-allocated memory is not freed until the end
+ // of the compilation.
+ TempAllocator::Fallible alloc_;
+
+ // Machine code emitter.
+ MacroAssembler& masm;
+
+ ///////////////////////////////////////////////////////////////////////////
+ //
+ // Compilation state.
+
+ // Decoder for this function, used for misc error reporting.
+ Decoder& decoder_;
+
+ // Opcode reader.
+ BaseOpIter iter_;
+
+ // Register allocator.
+ BaseRegAlloc ra;
+
+ // Stack frame abstraction.
+ BaseStackFrame fr;
+
+ // Latent out of line support code for some operations, code for these will be
+ // emitted at the end of compilation.
+ Vector<OutOfLineCode*, 8, SystemAllocPolicy> outOfLine_;
+
+ // Stack map state. This keeps track of live pointer slots and allows precise
+ // stack maps to be generated at safe points.
+ StackMapGenerator stackMapGenerator_;
+
+ // Wasm value stack. This maps values on the wasm stack to values in the
+ // running code and their locations.
+ //
+ // The value stack facilitates on-the-fly register allocation and the use of
+ // immediates in instructions. It tracks latent constants, latent references
+ // to locals, register contents, and values that have been flushed to the CPU
+ // stack.
+ //
+ // The stack can be flushed to the CPU stack using sync().
+ //
+ // The stack is a StkVector rather than a StkVector& since constantly
+ // dereferencing a StkVector& has been shown to add 0.5% or more to the
+ // compiler's dynamic instruction count.
+ StkVector stk_;
+
+ // Flag indicating that the compiler is currently in a dead code region.
+ bool deadCode_;
+
+ ///////////////////////////////////////////////////////////////////////////
+ //
+ // State for bounds check elimination.
+
+ // Locals that have been bounds checked and not updated since
+ BCESet bceSafe_;
+
+ ///////////////////////////////////////////////////////////////////////////
+ //
+ // State for boolean-evaluation-for-control.
+
+ // Latent operation for branch (seen next)
+ LatentOp latentOp_;
+
+ // Operand type, if latentOp_ is true
+ ValType latentType_;
+
+ // Comparison operator, if latentOp_ == Compare, int types
+ Assembler::Condition latentIntCmp_;
+
+ // Comparison operator, if latentOp_ == Compare, float types
+ Assembler::DoubleCondition latentDoubleCmp_;
+
+ ///////////////////////////////////////////////////////////////////////////
+ //
+ // Main compilation API.
+ //
+ // A client will create a compiler object, and then call init(),
+ // emitFunction(), and finish() in that order.
+
+ BaseCompiler(const ModuleEnvironment& moduleEnv,
+ const CompilerEnvironment& compilerEnv,
+ const FuncCompileInput& func, const ValTypeVector& locals,
+ const RegisterOffsets& trapExitLayout,
+ size_t trapExitLayoutNumWords, Decoder& decoder,
+ StkVector& stkSource, TempAllocator* alloc, MacroAssembler* masm,
+ StackMaps* stackMaps);
+ ~BaseCompiler();
+
+ [[nodiscard]] bool init();
+ [[nodiscard]] bool emitFunction();
+ [[nodiscard]] FuncOffsets finish();
+
+ //////////////////////////////////////////////////////////////////////////////
+ //
+ // Sundry accessor abstractions and convenience predicates.
+ //
+ // WasmBaselineObject-inl.h.
+
+ inline const FuncType& funcType() const;
+ inline bool usesMemory() const;
+ inline bool usesSharedMemory() const;
+ inline bool isMem32() const;
+ inline bool isMem64() const;
+
+ // The casts are used by some of the ScratchRegister implementations.
+ operator MacroAssembler&() const { return masm; }
+ operator BaseRegAlloc&() { return ra; }
+
+ //////////////////////////////////////////////////////////////////////////////
+ //
+ // Locals.
+ //
+ // WasmBaselineObject-inl.h.
+
+ // Assert that the local at the given index has the given type, and return a
+ // reference to the Local.
+ inline const Local& localFromSlot(uint32_t slot, MIRType type);
+
+ //////////////////////////////////////////////////////////////////////////////
+ //
+ // Out of line code management.
+
+ [[nodiscard]] OutOfLineCode* addOutOfLineCode(OutOfLineCode* ool);
+ [[nodiscard]] bool generateOutOfLineCode();
+
+ /////////////////////////////////////////////////////////////////////////////
+ //
+ // Layering in the compiler (briefly).
+ //
+ // At the lowest layers are abstractions for registers (managed by the
+ // BaseRegAlloc and the wrappers below) and the stack frame (managed by the
+ // BaseStackFrame).
+ //
+ // The registers and frame are in turn used by the value abstraction, which is
+ // implemented by the Stk type and backed by the value stack. Values may be
+ // stored in registers, in the frame, or may be latent constants, and the
+ // value stack handles storage mostly transparently in its push and pop
+ // routines.
+ //
+ // In turn, the pop routines bring values into registers so that we can
+ // compute on them, and the push routines move values to the stack (where they
+ // may still reside in registers until the registers are needed or the value
+ // must be in memory).
+ //
+ // Routines for managing parameters and results (for blocks or calls) may also
+ // manipulate the stack directly.
+ //
+ // At the top are the code generators: methods that use the poppers and
+ // pushers and other utilities to move values into place, and that emit code
+ // to compute on those values or change control flow.
+
+ /////////////////////////////////////////////////////////////////////////////
+ //
+ // Register management. These are simply strongly-typed wrappers that
+ // delegate to the register allocator.
+
+ inline bool isAvailableI32(RegI32 r);
+ inline bool isAvailableI64(RegI64 r);
+ inline bool isAvailableRef(RegRef r);
+ inline bool isAvailablePtr(RegPtr r);
+ inline bool isAvailableF32(RegF32 r);
+ inline bool isAvailableF64(RegF64 r);
+#ifdef ENABLE_WASM_SIMD
+ inline bool isAvailableV128(RegV128 r);
+#endif
+
+ // Allocate any register
+ [[nodiscard]] inline RegI32 needI32();
+ [[nodiscard]] inline RegI64 needI64();
+ [[nodiscard]] inline RegRef needRef();
+ [[nodiscard]] inline RegPtr needPtr();
+ [[nodiscard]] inline RegF32 needF32();
+ [[nodiscard]] inline RegF64 needF64();
+#ifdef ENABLE_WASM_SIMD
+ [[nodiscard]] inline RegV128 needV128();
+#endif
+
+ // Allocate a specific register
+ inline void needI32(RegI32 specific);
+ inline void needI64(RegI64 specific);
+ inline void needRef(RegRef specific);
+ inline void needPtr(RegPtr specific);
+ inline void needF32(RegF32 specific);
+ inline void needF64(RegF64 specific);
+#ifdef ENABLE_WASM_SIMD
+ inline void needV128(RegV128 specific);
+#endif
+
+ template <typename RegType>
+ inline RegType need();
+
+ // Just a shorthand.
+ inline void need2xI32(RegI32 r0, RegI32 r1);
+ inline void need2xI64(RegI64 r0, RegI64 r1);
+
+ // Get a register but do not sync the stack to free one up. This will crash
+ // if no register is available.
+ inline void needI32NoSync(RegI32 r);
+
+#if defined(JS_CODEGEN_ARM)
+ // Allocate a specific register pair (even-odd register numbers).
+ [[nodiscard]] inline RegI64 needI64Pair();
+#endif
+
+ inline void freeAny(AnyReg r);
+ inline void freeI32(RegI32 r);
+ inline void freeI64(RegI64 r);
+ inline void freeRef(RegRef r);
+ inline void freePtr(RegPtr r);
+ inline void freeF32(RegF32 r);
+ inline void freeF64(RegF64 r);
+#ifdef ENABLE_WASM_SIMD
+ inline void freeV128(RegV128 r);
+#endif
+
+ template <typename RegType>
+ inline void free(RegType r);
+
+ // Free r if it is not invalid.
+ inline void maybeFree(RegI32 r);
+ inline void maybeFree(RegI64 r);
+ inline void maybeFree(RegF32 r);
+ inline void maybeFree(RegF64 r);
+ inline void maybeFree(RegRef r);
+ inline void maybeFree(RegPtr r);
+#ifdef ENABLE_WASM_SIMD
+ inline void maybeFree(RegV128 r);
+#endif
+
+ // On 64-bit systems, `except` must equal r and this is a no-op. On 32-bit
+ // systems, `except` must equal the high or low part of a pair and the other
+ // part of the pair is freed.
+ inline void freeI64Except(RegI64 r, RegI32 except);
+
+ // Return the 32-bit low part of the 64-bit register, do not free anything.
+ inline RegI32 fromI64(RegI64 r);
+
+ // If r is valid, return fromI64(r), otherwise an invalid RegI32.
+ inline RegI32 maybeFromI64(RegI64 r);
+
+#ifdef JS_PUNBOX64
+ // On 64-bit systems, reinterpret r as 64-bit.
+ inline RegI64 fromI32(RegI32 r);
+#endif
+
+ // Widen r to 64 bits; this may allocate another register to form a pair.
+ // Note this does not generate code for sign/zero extension.
+ inline RegI64 widenI32(RegI32 r);
+
+ // Narrow r to 32 bits; this may free part of a pair. Note this does not
+ // generate code to canonicalize the value on 64-bit systems.
+ inline RegI32 narrowI64(RegI64 r);
+ inline RegI32 narrowRef(RegRef r);
+
+ // Return the 32-bit low part of r.
+ inline RegI32 lowPart(RegI64 r);
+
+ // On 64-bit systems, return an invalid register. On 32-bit systems, return
+ // the low part of a pair.
+ inline RegI32 maybeHighPart(RegI64 r);
+
+ // On 64-bit systems, do nothing. On 32-bit systems, clear the high register.
+ inline void maybeClearHighPart(RegI64 r);
+
+ //////////////////////////////////////////////////////////////////////////////
+ //
+ // Values and value stack: Low-level methods for moving Stk values of specific
+ // kinds to registers.
+
+ inline void loadConstI32(const Stk& src, RegI32 dest);
+ inline void loadMemI32(const Stk& src, RegI32 dest);
+ inline void loadLocalI32(const Stk& src, RegI32 dest);
+ inline void loadRegisterI32(const Stk& src, RegI32 dest);
+ inline void loadConstI64(const Stk& src, RegI64 dest);
+ inline void loadMemI64(const Stk& src, RegI64 dest);
+ inline void loadLocalI64(const Stk& src, RegI64 dest);
+ inline void loadRegisterI64(const Stk& src, RegI64 dest);
+ inline void loadConstRef(const Stk& src, RegRef dest);
+ inline void loadMemRef(const Stk& src, RegRef dest);
+ inline void loadLocalRef(const Stk& src, RegRef dest);
+ inline void loadRegisterRef(const Stk& src, RegRef dest);
+ inline void loadConstF64(const Stk& src, RegF64 dest);
+ inline void loadMemF64(const Stk& src, RegF64 dest);
+ inline void loadLocalF64(const Stk& src, RegF64 dest);
+ inline void loadRegisterF64(const Stk& src, RegF64 dest);
+ inline void loadConstF32(const Stk& src, RegF32 dest);
+ inline void loadMemF32(const Stk& src, RegF32 dest);
+ inline void loadLocalF32(const Stk& src, RegF32 dest);
+ inline void loadRegisterF32(const Stk& src, RegF32 dest);
+#ifdef ENABLE_WASM_SIMD
+ inline void loadConstV128(const Stk& src, RegV128 dest);
+ inline void loadMemV128(const Stk& src, RegV128 dest);
+ inline void loadLocalV128(const Stk& src, RegV128 dest);
+ inline void loadRegisterV128(const Stk& src, RegV128 dest);
+#endif
+
+ //////////////////////////////////////////////////////////////////////////
+ //
+ // Values and value stack: Mid-level routines for moving Stk values of any
+ // kind to registers.
+
+ inline void loadI32(const Stk& src, RegI32 dest);
+ inline void loadI64(const Stk& src, RegI64 dest);
+#if !defined(JS_PUNBOX64)
+ inline void loadI64Low(const Stk& src, RegI32 dest);
+ inline void loadI64High(const Stk& src, RegI32 dest);
+#endif
+ inline void loadF64(const Stk& src, RegF64 dest);
+ inline void loadF32(const Stk& src, RegF32 dest);
+#ifdef ENABLE_WASM_SIMD
+ inline void loadV128(const Stk& src, RegV128 dest);
+#endif
+ inline void loadRef(const Stk& src, RegRef dest);
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Value stack: stack management.
+
+ // Flush all local and register value stack elements to memory.
+ inline void sync();
+
+ // Save a register on the value stack temporarily.
+ void saveTempPtr(const RegPtr& r);
+
+ // Restore a temporarily saved register from the value stack.
+ void restoreTempPtr(const RegPtr& r);
+
+ // This is an optimization used to avoid calling sync for setLocal: if the
+ // local does not exist unresolved on the value stack then we can skip the
+ // sync.
+ inline bool hasLocal(uint32_t slot);
+
+ // Sync the local if necessary. (This currently syncs everything if a sync is
+ // needed at all.)
+ inline void syncLocal(uint32_t slot);
+
+ // Return the amount of execution stack consumed by the top numval
+ // values on the value stack.
+ inline size_t stackConsumed(size_t numval);
+
+ // Drop one value off the stack, possibly also moving the physical stack
+ // pointer.
+ inline void dropValue();
+
+#ifdef DEBUG
+ // Check that we're not leaking registers by comparing the
+ // state of the stack + available registers with the set of
+ // all available registers.
+
+ // Call this between opcodes.
+ void performRegisterLeakCheck();
+
+ // This can be called at any point, really, but typically just after
+ // performRegisterLeakCheck().
+ void assertStackInvariants() const;
+
+ // Count the number of memory references on the value stack.
+ inline size_t countMemRefsOnStk();
+
+ // Print the stack to stderr.
+ void showStack(const char* who) const;
+#endif
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Value stack: pushers of values.
+
+ // Push a register onto the value stack.
+ inline void pushAny(AnyReg r);
+ inline void pushI32(RegI32 r);
+ inline void pushI64(RegI64 r);
+ inline void pushRef(RegRef r);
+ inline void pushPtr(RegPtr r);
+ inline void pushF64(RegF64 r);
+ inline void pushF32(RegF32 r);
+#ifdef ENABLE_WASM_SIMD
+ inline void pushV128(RegV128 r);
+#endif
+
+ // Template variation of the foregoing, for use by templated emitters.
+ template <typename RegType>
+ inline void push(RegType item);
+
+ // Push a constant value onto the stack. pushI32 can also take uint32_t, and
+ // pushI64 can take uint64_t; the semantics are the same. Appropriate sign
+ // extension for a 32-bit value on a 64-bit architecture happens when the
+ // value is popped, see the definition of moveImm32.
+ inline void pushI32(int32_t v);
+ inline void pushI64(int64_t v);
+ inline void pushRef(intptr_t v);
+ inline void pushPtr(intptr_t v);
+ inline void pushF64(double v);
+ inline void pushF32(float v);
+#ifdef ENABLE_WASM_SIMD
+ inline void pushV128(V128 v);
+#endif
+ inline void pushConstRef(intptr_t v);
+
+ // Push the local slot onto the stack. The slot will not be read here; it
+ // will be read when it is consumed, or when a side effect to the slot forces
+ // its value to be saved.
+ inline void pushLocalI32(uint32_t slot);
+ inline void pushLocalI64(uint32_t slot);
+ inline void pushLocalRef(uint32_t slot);
+ inline void pushLocalF64(uint32_t slot);
+ inline void pushLocalF32(uint32_t slot);
+#ifdef ENABLE_WASM_SIMD
+ inline void pushLocalV128(uint32_t slot);
+#endif
+
+ // Push an U32 as an I64, zero-extending it in the process
+ inline void pushU32AsI64(RegI32 rs);
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Value stack: poppers and peekers of values.
+
+ // Pop some value off the stack.
+ inline AnyReg popAny();
+ inline AnyReg popAny(AnyReg specific);
+
+ // Call only from other popI32() variants. v must be the stack top. May pop
+ // the CPU stack.
+ inline void popI32(const Stk& v, RegI32 dest);
+
+ [[nodiscard]] inline RegI32 popI32();
+ inline RegI32 popI32(RegI32 specific);
+
+#ifdef ENABLE_WASM_SIMD
+ // Call only from other popV128() variants. v must be the stack top. May pop
+ // the CPU stack.
+ inline void popV128(const Stk& v, RegV128 dest);
+
+ [[nodiscard]] inline RegV128 popV128();
+ inline RegV128 popV128(RegV128 specific);
+#endif
+
+ // Call only from other popI64() variants. v must be the stack top. May pop
+ // the CPU stack.
+ inline void popI64(const Stk& v, RegI64 dest);
+
+ [[nodiscard]] inline RegI64 popI64();
+ inline RegI64 popI64(RegI64 specific);
+
+ // Call only from other popRef() variants. v must be the stack top. May pop
+ // the CPU stack.
+ inline void popRef(const Stk& v, RegRef dest);
+
+ inline RegRef popRef(RegRef specific);
+ [[nodiscard]] inline RegRef popRef();
+
+ // Call only from other popPtr() variants. v must be the stack top. May pop
+ // the CPU stack.
+ inline void popPtr(const Stk& v, RegPtr dest);
+
+ inline RegPtr popPtr(RegPtr specific);
+ [[nodiscard]] inline RegPtr popPtr();
+
+ // Call only from other popF64() variants. v must be the stack top. May pop
+ // the CPU stack.
+ inline void popF64(const Stk& v, RegF64 dest);
+
+ [[nodiscard]] inline RegF64 popF64();
+ inline RegF64 popF64(RegF64 specific);
+
+ // Call only from other popF32() variants. v must be the stack top. May pop
+ // the CPU stack.
+ inline void popF32(const Stk& v, RegF32 dest);
+
+ [[nodiscard]] inline RegF32 popF32();
+ inline RegF32 popF32(RegF32 specific);
+
+ // Templated variation of the foregoing, for use by templated emitters.
+ template <typename RegType>
+ inline RegType pop();
+
+ // Constant poppers will return true and pop the value if the stack top is a
+ // constant of the appropriate type; otherwise pop nothing and return false.
+ [[nodiscard]] inline bool hasConst() const;
+ [[nodiscard]] inline bool popConst(int32_t* c);
+ [[nodiscard]] inline bool popConst(int64_t* c);
+ [[nodiscard]] inline bool peekConst(int32_t* c);
+ [[nodiscard]] inline bool peekConst(int64_t* c);
+ [[nodiscard]] inline bool peek2xConst(int32_t* c0, int32_t* c1);
+ [[nodiscard]] inline bool popConstPositivePowerOfTwo(int32_t* c,
+ uint_fast8_t* power,
+ int32_t cutoff);
+ [[nodiscard]] inline bool popConstPositivePowerOfTwo(int64_t* c,
+ uint_fast8_t* power,
+ int64_t cutoff);
+
+ // Shorthand: Pop r1, then r0.
+ inline void pop2xI32(RegI32* r0, RegI32* r1);
+ inline void pop2xI64(RegI64* r0, RegI64* r1);
+ inline void pop2xF32(RegF32* r0, RegF32* r1);
+ inline void pop2xF64(RegF64* r0, RegF64* r1);
+#ifdef ENABLE_WASM_SIMD
+ inline void pop2xV128(RegV128* r0, RegV128* r1);
+#endif
+ inline void pop2xRef(RegRef* r0, RegRef* r1);
+
+ // Pop to a specific register
+ inline RegI32 popI32ToSpecific(RegI32 specific);
+ inline RegI64 popI64ToSpecific(RegI64 specific);
+
+#ifdef JS_CODEGEN_ARM
+ // Pop an I64 as a valid register pair.
+ inline RegI64 popI64Pair();
+#endif
+
+ // Pop an I64 but narrow it and return the narrowed part.
+ inline RegI32 popI64ToI32();
+ inline RegI32 popI64ToSpecificI32(RegI32 specific);
+
+ // Pop the stack until it has the desired size, but do not move the physical
+ // stack pointer.
+ inline void popValueStackTo(uint32_t stackSize);
+
+ // Pop the given number of elements off the value stack, but do not move
+ // the physical stack pointer.
+ inline void popValueStackBy(uint32_t items);
+
+ // Peek into the stack at relativeDepth from the top.
+ inline Stk& peek(uint32_t relativeDepth);
+
+ // Peek the reference value at the specified depth and load it into a
+ // register.
+ inline void peekRefAt(uint32_t depth, RegRef dest);
+
+ // Peek at the value on the top of the stack and return true if it is a Local
+ // of any type.
+ [[nodiscard]] inline bool peekLocal(uint32_t* local);
+
+ ////////////////////////////////////////////////////////////////////////////
+ //
+ // Block parameters and results.
+ //
+ // Blocks may have multiple parameters and multiple results. Blocks can also
+ // be the target of branches: the entry for loops, and the exit for
+ // non-loops.
+ //
+ // Passing multiple values to a non-branch target (i.e., the entry of a
+ // "block") falls out naturally: any items on the value stack can flow
+ // directly from one block to another.
+ //
+ // However, for branch targets, we need to allocate well-known locations for
+ // the branch values. The approach taken in the baseline compiler is to
+ // allocate registers to the top N values (currently N=1), and then stack
+ // locations for the rest.
+ //
+
+ // Types of result registers that interest us for result-manipulating
+ // functions.
+ enum class ResultRegKind {
+ // General and floating result registers.
+ All,
+
+ // General result registers only.
+ OnlyGPRs
+ };
+
+ // This is a flag ultimately intended for popBlockResults() that specifies how
+ // the CPU stack should be handled after the result values have been
+ // processed.
+ enum class ContinuationKind {
+ // Adjust the stack for a fallthrough: do nothing.
+ Fallthrough,
+
+ // Adjust the stack for a jump: make the stack conform to the
+ // expected stack at the target
+ Jump
+ };
+
+ // TODO: It's definitely disputable whether the result register management is
+ // hot enough to warrant inlining at the outermost level.
+
+ inline void needResultRegisters(ResultType type, ResultRegKind which);
+#ifdef JS_64BIT
+ inline void widenInt32ResultRegisters(ResultType type);
+#endif
+ inline void freeResultRegisters(ResultType type, ResultRegKind which);
+ inline void needIntegerResultRegisters(ResultType type);
+ inline void freeIntegerResultRegisters(ResultType type);
+ inline void needResultRegisters(ResultType type);
+ inline void freeResultRegisters(ResultType type);
+ void assertResultRegistersAvailable(ResultType type);
+ inline void captureResultRegisters(ResultType type);
+ inline void captureCallResultRegisters(ResultType type);
+
+ void popRegisterResults(ABIResultIter& iter);
+ void popStackResults(ABIResultIter& iter, StackHeight stackBase);
+
+ void popBlockResults(ResultType type, StackHeight stackBase,
+ ContinuationKind kind);
+
+ // This function is similar to popBlockResults, but additionally handles the
+ // implicit exception pointer that is pushed to the value stack on entry to
+ // a catch handler by dropping it appropriately.
+ void popCatchResults(ResultType type, StackHeight stackBase);
+
+ Stk captureStackResult(const ABIResult& result, StackHeight resultsBase,
+ uint32_t stackResultBytes);
+
+ [[nodiscard]] bool pushResults(ResultType type, StackHeight resultsBase);
+ [[nodiscard]] bool pushBlockResults(ResultType type);
+
+ // A combination of popBlockResults + pushBlockResults, used when entering a
+ // block with a control-flow join (loops) or split (if) to shuffle the
+ // fallthrough block parameters into the locations expected by the
+ // continuation.
+ //
+ // This function should only be called when entering a block with a
+ // control-flow join at the entry, where there are no live temporaries in
+ // the current block.
+ [[nodiscard]] bool topBlockParams(ResultType type);
+
+ // A combination of popBlockResults + pushBlockResults, used before branches
+ // where we don't know the target (br_if / br_table). If and when the branch
+ // is taken, the stack results will be shuffled down into place. For br_if
+ // that has fallthrough, the parameters for the untaken branch flow through to
+ // the continuation.
+ [[nodiscard]] bool topBranchParams(ResultType type, StackHeight* height);
+
+ // Conditional branches with fallthrough are preceded by a topBranchParams, so
+ // we know that there are no stack results that need to be materialized. In
+ // that case, we can just shuffle the whole block down before popping the
+ // stack.
+ void shuffleStackResultsBeforeBranch(StackHeight srcHeight,
+ StackHeight destHeight, ResultType type);
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Stack maps
+
+ // Various methods for creating a stackmap. Stackmaps are indexed by the
+ // lowest address of the instruction immediately *after* the instruction of
+ // interest. In practice that means either: the return point of a call, the
+ // instruction immediately after a trap instruction (the "resume"
+ // instruction), or the instruction immediately following a no-op (when
+ // debugging is enabled).
+
+ // Create a vanilla stackmap.
+ [[nodiscard]] bool createStackMap(const char* who);
+
+ // Create a stackmap as vanilla, but for a custom assembler offset.
+ [[nodiscard]] bool createStackMap(const char* who,
+ CodeOffset assemblerOffset);
+
+ // Create a stack map as vanilla, and note the presence of a ref-typed
+ // DebugFrame on the stack.
+ [[nodiscard]] bool createStackMap(
+ const char* who, HasDebugFrameWithLiveRefs debugFrameWithLiveRefs);
+
+ // The most general stackmap construction.
+ [[nodiscard]] bool createStackMap(
+ const char* who, const ExitStubMapVector& extras,
+ uint32_t assemblerOffset,
+ HasDebugFrameWithLiveRefs debugFrameWithLiveRefs);
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Control stack
+
+ inline void initControl(Control& item, ResultType params);
+ inline Control& controlItem();
+ inline Control& controlItem(uint32_t relativeDepth);
+ inline Control& controlOutermost();
+ inline LabelKind controlKind(uint32_t relativeDepth);
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Debugger API
+
+ // Insert a breakpoint almost anywhere. This will create a call, with all the
+ // overhead that entails.
+ void insertBreakablePoint(CallSiteDesc::Kind kind);
+
+ // Insert code at the end of a function for breakpoint filtering.
+ void insertBreakpointStub();
+
+ // Debugger API used at the return point: shuffle register return values off
+ // to memory for the debugger to see; and get them back again.
+ void saveRegisterReturnValues(const ResultType& resultType);
+ void restoreRegisterReturnValues(const ResultType& resultType);
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Function prologue and epilogue.
+
+ // Set up and tear down frame, execute prologue and epilogue.
+ [[nodiscard]] bool beginFunction();
+ [[nodiscard]] bool endFunction();
+
+ // Move return values to memory before returning, as appropriate
+ void popStackReturnValues(const ResultType& resultType);
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Calls.
+
+ void beginCall(FunctionCall& call, UseABI useABI,
+ RestoreRegisterStateAndRealm restoreRegisterStateAndRealm);
+ void endCall(FunctionCall& call, size_t stackSpace);
+ void startCallArgs(size_t stackArgAreaSizeUnaligned, FunctionCall* call);
+ ABIArg reservePointerArgument(FunctionCall* call);
+ void passArg(ValType type, const Stk& arg, FunctionCall* call);
+ CodeOffset callDefinition(uint32_t funcIndex, const FunctionCall& call);
+ CodeOffset callSymbolic(SymbolicAddress callee, const FunctionCall& call);
+
+ // Precondition for the call*() methods: sync()
+
+ bool callIndirect(uint32_t funcTypeIndex, uint32_t tableIndex,
+ const Stk& indexVal, const FunctionCall& call,
+ CodeOffset* fastCallOffset, CodeOffset* slowCallOffset);
+ CodeOffset callImport(unsigned instanceDataOffset, const FunctionCall& call);
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ void callRef(const Stk& calleeRef, const FunctionCall& call,
+ CodeOffset* fastCallOffset, CodeOffset* slowCallOffset);
+#endif
+ CodeOffset builtinCall(SymbolicAddress builtin, const FunctionCall& call);
+ CodeOffset builtinInstanceMethodCall(const SymbolicAddressSignature& builtin,
+ const ABIArg& instanceArg,
+ const FunctionCall& call);
+ [[nodiscard]] bool pushCallResults(const FunctionCall& call, ResultType type,
+ const StackResultsLoc& loc);
+
+ // Helpers to pick up the returned value from the return register.
+ inline RegI32 captureReturnedI32();
+ inline RegI64 captureReturnedI64();
+ inline RegF32 captureReturnedF32(const FunctionCall& call);
+ inline RegF64 captureReturnedF64(const FunctionCall& call);
+#ifdef ENABLE_WASM_SIMD
+ inline RegV128 captureReturnedV128(const FunctionCall& call);
+#endif
+ inline RegRef captureReturnedRef();
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Register-to-register moves. These emit nothing if src == dest.
+
+ inline void moveI32(RegI32 src, RegI32 dest);
+ inline void moveI64(RegI64 src, RegI64 dest);
+ inline void moveRef(RegRef src, RegRef dest);
+ inline void movePtr(RegPtr src, RegPtr dest);
+ inline void moveF64(RegF64 src, RegF64 dest);
+ inline void moveF32(RegF32 src, RegF32 dest);
+#ifdef ENABLE_WASM_SIMD
+ inline void moveV128(RegV128 src, RegV128 dest);
+#endif
+
+ template <typename RegType>
+ inline void move(RegType src, RegType dest);
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Immediate-to-register moves.
+ //
+ // The compiler depends on moveImm32() clearing the high bits of a 64-bit
+ // register on 64-bit systems except MIPS64 And LoongArch64 where high bits
+ // are sign extended from lower bits, see doc block "64-bit GPRs carrying
+ // 32-bit values" in MacroAssembler.h.
+
+ inline void moveImm32(int32_t v, RegI32 dest);
+ inline void moveImm64(int64_t v, RegI64 dest);
+ inline void moveImmRef(intptr_t v, RegRef dest);
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Sundry low-level code generators.
+
+ // Check the interrupt flag, trap if it is set.
+ [[nodiscard]] bool addInterruptCheck();
+
+ // Check that the value is not zero, trap if it is.
+ void checkDivideByZero(RegI32 rhs);
+ void checkDivideByZero(RegI64 r);
+
+ // Check that a signed division will not overflow, trap or flush-to-zero if it
+ // will according to `zeroOnOverflow`.
+ void checkDivideSignedOverflow(RegI32 rhs, RegI32 srcDest, Label* done,
+ bool zeroOnOverflow);
+ void checkDivideSignedOverflow(RegI64 rhs, RegI64 srcDest, Label* done,
+ bool zeroOnOverflow);
+
+ // Emit a jump table to be used by tableSwitch()
+ void jumpTable(const LabelVector& labels, Label* theTable);
+
+ // Emit a table switch, `theTable` is the jump table.
+ void tableSwitch(Label* theTable, RegI32 switchValue, Label* dispatchCode);
+
+ // Compare i64 and set an i32 boolean result according to the condition.
+ inline void cmp64Set(Assembler::Condition cond, RegI64 lhs, RegI64 rhs,
+ RegI32 dest);
+
+ // Round floating to integer.
+ [[nodiscard]] inline bool supportsRoundInstruction(RoundingMode mode);
+ inline void roundF32(RoundingMode roundingMode, RegF32 f0);
+ inline void roundF64(RoundingMode roundingMode, RegF64 f0);
+
+ // These are just wrappers around assembler functions, but without
+ // type-specific names, and using our register abstractions for better type
+ // discipline.
+ inline void branchTo(Assembler::DoubleCondition c, RegF64 lhs, RegF64 rhs,
+ Label* l);
+ inline void branchTo(Assembler::DoubleCondition c, RegF32 lhs, RegF32 rhs,
+ Label* l);
+ inline void branchTo(Assembler::Condition c, RegI32 lhs, RegI32 rhs,
+ Label* l);
+ inline void branchTo(Assembler::Condition c, RegI32 lhs, Imm32 rhs, Label* l);
+ inline void branchTo(Assembler::Condition c, RegI64 lhs, RegI64 rhs,
+ Label* l);
+ inline void branchTo(Assembler::Condition c, RegI64 lhs, Imm64 rhs, Label* l);
+ inline void branchTo(Assembler::Condition c, RegRef lhs, ImmWord rhs,
+ Label* l);
+
+#ifdef JS_CODEGEN_X86
+ // Store r in instance scratch storage after first loading the instance from
+ // the frame into the regForInstance. regForInstance must be neither of the
+ // registers in r.
+ void stashI64(RegPtr regForInstance, RegI64 r);
+
+ // Load r from the instance scratch storage after first loading the instance
+ // from the frame into the regForInstance. regForInstance can be one of the
+ // registers in r.
+ void unstashI64(RegPtr regForInstance, RegI64 r);
+#endif
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Code generators for actual operations.
+
+ template <typename RegType, typename IntType>
+ void quotientOrRemainder(RegType rs, RegType rsd, RegType reserved,
+ IsUnsigned isUnsigned, ZeroOnOverflow zeroOnOverflow,
+ bool isConst, IntType c,
+ void (*operate)(MacroAssembler&, RegType, RegType,
+ RegType, IsUnsigned));
+
+ [[nodiscard]] bool truncateF32ToI32(RegF32 src, RegI32 dest,
+ TruncFlags flags);
+ [[nodiscard]] bool truncateF64ToI32(RegF64 src, RegI32 dest,
+ TruncFlags flags);
+
+#ifndef RABALDR_FLOAT_TO_I64_CALLOUT
+ [[nodiscard]] RegF64 needTempForFloatingToI64(TruncFlags flags);
+ [[nodiscard]] bool truncateF32ToI64(RegF32 src, RegI64 dest, TruncFlags flags,
+ RegF64 temp);
+ [[nodiscard]] bool truncateF64ToI64(RegF64 src, RegI64 dest, TruncFlags flags,
+ RegF64 temp);
+#endif // RABALDR_FLOAT_TO_I64_CALLOUT
+
+#ifndef RABALDR_I64_TO_FLOAT_CALLOUT
+ [[nodiscard]] RegI32 needConvertI64ToFloatTemp(ValType to, bool isUnsigned);
+ void convertI64ToF32(RegI64 src, bool isUnsigned, RegF32 dest, RegI32 temp);
+ void convertI64ToF64(RegI64 src, bool isUnsigned, RegF64 dest, RegI32 temp);
+#endif // RABALDR_I64_TO_FLOAT_CALLOUT
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Global variable access.
+
+ Address addressOfGlobalVar(const GlobalDesc& global, RegPtr tmp);
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Table access.
+
+ Address addressOfTableField(uint32_t tableIndex, uint32_t fieldOffset,
+ RegPtr instance);
+ void loadTableLength(uint32_t tableIndex, RegPtr instance, RegI32 length);
+ void loadTableElements(uint32_t tableIndex, RegPtr instance, RegPtr elements);
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Heap access.
+
+ void bceCheckLocal(MemoryAccessDesc* access, AccessCheck* check,
+ uint32_t local);
+ void bceLocalIsUpdated(uint32_t local);
+
+ // Fold offsets into ptr and bounds check as necessary. The instance will be
+ // valid in cases where it's needed.
+ template <typename RegIndexType>
+ void prepareMemoryAccess(MemoryAccessDesc* access, AccessCheck* check,
+ RegPtr instance, RegIndexType ptr);
+
+ void branchAddNoOverflow(uint64_t offset, RegI32 ptr, Label* ok);
+ void branchTestLowZero(RegI32 ptr, Imm32 mask, Label* ok);
+ void boundsCheck4GBOrLargerAccess(RegPtr instance, RegI32 ptr, Label* ok);
+ void boundsCheckBelow4GBAccess(RegPtr instance, RegI32 ptr, Label* ok);
+
+ void branchAddNoOverflow(uint64_t offset, RegI64 ptr, Label* ok);
+ void branchTestLowZero(RegI64 ptr, Imm32 mask, Label* ok);
+ void boundsCheck4GBOrLargerAccess(RegPtr instance, RegI64 ptr, Label* ok);
+ void boundsCheckBelow4GBAccess(RegPtr instance, RegI64 ptr, Label* ok);
+
+#if defined(WASM_HAS_HEAPREG)
+ template <typename RegIndexType>
+ BaseIndex prepareAtomicMemoryAccess(MemoryAccessDesc* access,
+ AccessCheck* check, RegPtr instance,
+ RegIndexType ptr);
+#else
+ // Some consumers depend on the returned Address not incorporating instance,
+ // as instance may be the scratch register.
+ template <typename RegIndexType>
+ Address prepareAtomicMemoryAccess(MemoryAccessDesc* access,
+ AccessCheck* check, RegPtr instance,
+ RegIndexType ptr);
+#endif
+
+ template <typename RegIndexType>
+ void computeEffectiveAddress(MemoryAccessDesc* access);
+
+ [[nodiscard]] bool needInstanceForAccess(const AccessCheck& check);
+
+ // ptr and dest may be the same iff dest is I32.
+ // This may destroy ptr even if ptr and dest are not the same.
+ void executeLoad(MemoryAccessDesc* access, AccessCheck* check,
+ RegPtr instance, RegI32 ptr, AnyReg dest, RegI32 temp);
+ void load(MemoryAccessDesc* access, AccessCheck* check, RegPtr instance,
+ RegI32 ptr, AnyReg dest, RegI32 temp);
+#ifdef ENABLE_WASM_MEMORY64
+ void load(MemoryAccessDesc* access, AccessCheck* check, RegPtr instance,
+ RegI64 ptr, AnyReg dest, RegI64 temp);
+#endif
+
+ template <typename RegType>
+ void doLoadCommon(MemoryAccessDesc* access, AccessCheck check, ValType type);
+
+ void loadCommon(MemoryAccessDesc* access, AccessCheck check, ValType type);
+
+ // ptr and src must not be the same register.
+ // This may destroy ptr and src.
+ void executeStore(MemoryAccessDesc* access, AccessCheck* check,
+ RegPtr instance, RegI32 ptr, AnyReg src, RegI32 temp);
+ void store(MemoryAccessDesc* access, AccessCheck* check, RegPtr instance,
+ RegI32 ptr, AnyReg src, RegI32 temp);
+#ifdef ENABLE_WASM_MEMORY64
+ void store(MemoryAccessDesc* access, AccessCheck* check, RegPtr instance,
+ RegI64 ptr, AnyReg src, RegI64 temp);
+#endif
+
+ template <typename RegType>
+ void doStoreCommon(MemoryAccessDesc* access, AccessCheck check,
+ ValType resultType);
+
+ void storeCommon(MemoryAccessDesc* access, AccessCheck check,
+ ValType resultType);
+
+ void atomicLoad(MemoryAccessDesc* access, ValType type);
+#if !defined(JS_64BIT)
+ template <typename RegIndexType>
+ void atomicLoad64(MemoryAccessDesc* desc);
+#endif
+
+ void atomicStore(MemoryAccessDesc* access, ValType type);
+
+ void atomicRMW(MemoryAccessDesc* access, ValType type, AtomicOp op);
+ template <typename RegIndexType>
+ void atomicRMW32(MemoryAccessDesc* access, ValType type, AtomicOp op);
+ template <typename RegIndexType>
+ void atomicRMW64(MemoryAccessDesc* access, ValType type, AtomicOp op);
+
+ void atomicXchg(MemoryAccessDesc* access, ValType type);
+ template <typename RegIndexType>
+ void atomicXchg64(MemoryAccessDesc* access, WantResult wantResult);
+ template <typename RegIndexType>
+ void atomicXchg32(MemoryAccessDesc* access, ValType type);
+
+ void atomicCmpXchg(MemoryAccessDesc* access, ValType type);
+ template <typename RegIndexType>
+ void atomicCmpXchg32(MemoryAccessDesc* access, ValType type);
+ template <typename RegIndexType>
+ void atomicCmpXchg64(MemoryAccessDesc* access, ValType type);
+
+ template <typename RegType>
+ RegType popConstMemoryAccess(MemoryAccessDesc* access, AccessCheck* check);
+ template <typename RegType>
+ RegType popMemoryAccess(MemoryAccessDesc* access, AccessCheck* check);
+
+ void pushHeapBase();
+
+ ////////////////////////////////////////////////////////////////////////////
+ //
+ // Platform-specific popping and register targeting.
+
+ // The simple popping methods pop values into targeted registers; the caller
+ // can free registers using standard functions. These are always called
+ // popXForY where X says something about types and Y something about the
+ // operation being targeted.
+
+ RegI32 needRotate64Temp();
+ void popAndAllocateForDivAndRemI32(RegI32* r0, RegI32* r1, RegI32* reserved);
+ void popAndAllocateForMulI64(RegI64* r0, RegI64* r1, RegI32* temp);
+#ifndef RABALDR_INT_DIV_I64_CALLOUT
+ void popAndAllocateForDivAndRemI64(RegI64* r0, RegI64* r1, RegI64* reserved,
+ IsRemainder isRemainder);
+#endif
+ RegI32 popI32RhsForShift();
+ RegI32 popI32RhsForShiftI64();
+ RegI64 popI64RhsForShift();
+ RegI32 popI32RhsForRotate();
+ RegI64 popI64RhsForRotate();
+ void popI32ForSignExtendI64(RegI64* r0);
+ void popI64ForSignExtendI64(RegI64* r0);
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Sundry helpers.
+
+ // Retrieve the current bytecodeOffset.
+ inline BytecodeOffset bytecodeOffset() const;
+
+ // Generate a trap instruction for the current bytecodeOffset.
+ inline void trap(Trap t) const;
+
+ // Abstracted helper for throwing, used for throw, rethrow, and rethrowing
+ // at the end of a series of catch blocks (if none matched the exception).
+ [[nodiscard]] bool throwFrom(RegRef exn);
+
+ // Load the specified tag object from the Instance.
+ void loadTag(RegPtr instanceData, uint32_t tagIndex, RegRef tagDst);
+
+ // Load the pending exception state from the Instance and then reset it.
+ void consumePendingException(RegRef* exnDst, RegRef* tagDst);
+
+ [[nodiscard]] bool startTryNote(size_t* tryNoteIndex);
+ void finishTryNote(size_t tryNoteIndex);
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Barriers support.
+
+ // This emits a GC pre-write barrier. The pre-barrier is needed when we
+ // replace a member field with a new value, and the previous field value
+ // might have no other referents, and incremental GC is ongoing. The field
+ // might belong to an object or be a stack slot or a register or a heap
+ // allocated value.
+ //
+ // let obj = { field: previousValue };
+ // obj.field = newValue; // previousValue must be marked with a pre-barrier.
+ //
+ // The `valueAddr` is the address of the location that we are about to
+ // update. This function preserves that register.
+ void emitPreBarrier(RegPtr valueAddr);
+
+ // This emits a GC post-write barrier. The post-barrier is needed when we
+ // replace a member field with a new value, the new value is in the nursery,
+ // and the containing object is a tenured object. The field must then be
+ // added to the store buffer so that the nursery can be correctly collected.
+ // The field might belong to an object or be a stack slot or a register or a
+ // heap allocated value.
+ //
+ // For the difference between 'precise' and 'imprecise', look at the
+ // documentation on PostBarrierKind.
+ //
+ // `object` is a pointer to the object that contains the field. It is used, if
+ // present, to skip adding a store buffer entry when the containing object is
+ // in the nursery. This register is preserved by this function.
+ // `valueAddr` is the address of the location that we are writing to. This
+ // register is consumed by this function.
+ // `prevValue` is the value that existed in the field before `value` was
+ // stored. This register is consumed by this function.
+ // `value` is the value that was stored in the field. This register is
+ // preserved by this function.
+ [[nodiscard]] bool emitPostBarrierImprecise(const Maybe<RegRef>& object,
+ RegPtr valueAddr, RegRef value);
+ [[nodiscard]] bool emitPostBarrierPrecise(const Maybe<RegRef>& object,
+ RegPtr valueAddr, RegRef prevValue,
+ RegRef value);
+
+ // Emits a store to a JS object pointer at the address `valueAddr`, which is
+ // inside the GC cell `object`.
+ //
+ // Preserves `object` and `value`. Consumes `valueAddr`.
+ [[nodiscard]] bool emitBarrieredStore(const Maybe<RegRef>& object,
+ RegPtr valueAddr, RegRef value,
+ PreBarrierKind preBarrierKind,
+ PostBarrierKind postBarrierKind);
+
+ // Emits a store of nullptr to a JS object pointer at the address valueAddr.
+ // Preserves `valueAddr`.
+ void emitBarrieredClear(RegPtr valueAddr);
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Machinery for optimized conditional branches. See comments in the
+ // implementation.
+
+ void setLatentCompare(Assembler::Condition compareOp, ValType operandType);
+ void setLatentCompare(Assembler::DoubleCondition compareOp,
+ ValType operandType);
+ void setLatentEqz(ValType operandType);
+ bool hasLatentOp() const;
+ void resetLatentOp();
+ // Jump to the given branch, passing results, if the condition, `cond`
+ // matches between `lhs` and `rhs.
+ template <typename Cond, typename Lhs, typename Rhs>
+ [[nodiscard]] bool jumpConditionalWithResults(BranchState* b, Cond cond,
+ Lhs lhs, Rhs rhs);
+#ifdef ENABLE_WASM_GC
+ // Jump to the given branch, passing results, if the WasmGcObject, `object`,
+ // is a subtype of `destType`.
+ [[nodiscard]] bool jumpConditionalWithResults(BranchState* b, RegRef object,
+ RefType sourceType,
+ RefType destType,
+ bool onSuccess);
+#endif
+ template <typename Cond>
+ [[nodiscard]] bool sniffConditionalControlCmp(Cond compareOp,
+ ValType operandType);
+ [[nodiscard]] bool sniffConditionalControlEqz(ValType operandType);
+ void emitBranchSetup(BranchState* b);
+ [[nodiscard]] bool emitBranchPerform(BranchState* b);
+
+ //////////////////////////////////////////////////////////////////////
+
+ [[nodiscard]] bool emitBody();
+ [[nodiscard]] bool emitBlock();
+ [[nodiscard]] bool emitLoop();
+ [[nodiscard]] bool emitIf();
+ [[nodiscard]] bool emitElse();
+ // Used for common setup for catch and catch_all.
+ void emitCatchSetup(LabelKind kind, Control& tryCatch,
+ const ResultType& resultType);
+ // Helper function used to generate landing pad code for the special
+ // case in which `delegate` jumps to a function's body block.
+ [[nodiscard]] bool emitBodyDelegateThrowPad();
+
+ [[nodiscard]] bool emitTry();
+ [[nodiscard]] bool emitCatch();
+ [[nodiscard]] bool emitCatchAll();
+ [[nodiscard]] bool emitDelegate();
+ [[nodiscard]] bool emitThrow();
+ [[nodiscard]] bool emitRethrow();
+ [[nodiscard]] bool emitEnd();
+ [[nodiscard]] bool emitBr();
+ [[nodiscard]] bool emitBrIf();
+ [[nodiscard]] bool emitBrTable();
+ [[nodiscard]] bool emitDrop();
+ [[nodiscard]] bool emitReturn();
+
+ // A flag passed to emitCallArgs, describing how the value stack is laid out.
+ enum class CalleeOnStack {
+ // After the arguments to the call, there is a callee pushed onto value
+ // stack. This is only the case for callIndirect. To get the arguments to
+ // the call, emitCallArgs has to reach one element deeper into the value
+ // stack, to skip the callee.
+ True,
+
+ // No callee on the stack.
+ False
+ };
+
+ [[nodiscard]] bool emitCallArgs(const ValTypeVector& argTypes,
+ const StackResultsLoc& results,
+ FunctionCall* baselineCall,
+ CalleeOnStack calleeOnStack);
+
+ [[nodiscard]] bool emitCall();
+ [[nodiscard]] bool emitCallIndirect();
+ [[nodiscard]] bool emitUnaryMathBuiltinCall(SymbolicAddress callee,
+ ValType operandType);
+ [[nodiscard]] bool emitGetLocal();
+ [[nodiscard]] bool emitSetLocal();
+ [[nodiscard]] bool emitTeeLocal();
+ [[nodiscard]] bool emitGetGlobal();
+ [[nodiscard]] bool emitSetGlobal();
+ [[nodiscard]] RegPtr maybeLoadInstanceForAccess(const AccessCheck& check);
+ [[nodiscard]] RegPtr maybeLoadInstanceForAccess(const AccessCheck& check,
+ RegPtr specific);
+ [[nodiscard]] bool emitLoad(ValType type, Scalar::Type viewType);
+ [[nodiscard]] bool emitStore(ValType resultType, Scalar::Type viewType);
+ [[nodiscard]] bool emitSelect(bool typed);
+
+ template <bool isSetLocal>
+ [[nodiscard]] bool emitSetOrTeeLocal(uint32_t slot);
+
+ [[nodiscard]] bool endBlock(ResultType type);
+ [[nodiscard]] bool endIfThen(ResultType type);
+ [[nodiscard]] bool endIfThenElse(ResultType type);
+ [[nodiscard]] bool endTryCatch(ResultType type);
+
+ void doReturn(ContinuationKind kind);
+ void pushReturnValueOfCall(const FunctionCall& call, MIRType type);
+
+ [[nodiscard]] bool pushStackResultsForCall(const ResultType& type,
+ RegPtr temp, StackResultsLoc* loc);
+ void popStackResultsAfterCall(const StackResultsLoc& results,
+ uint32_t stackArgBytes);
+
+ void emitCompareI32(Assembler::Condition compareOp, ValType compareType);
+ void emitCompareI64(Assembler::Condition compareOp, ValType compareType);
+ void emitCompareF32(Assembler::DoubleCondition compareOp,
+ ValType compareType);
+ void emitCompareF64(Assembler::DoubleCondition compareOp,
+ ValType compareType);
+ void emitCompareRef(Assembler::Condition compareOp, ValType compareType);
+
+ template <typename CompilerType>
+ inline CompilerType& selectCompiler();
+
+ template <typename SourceType, typename DestType>
+ inline void emitUnop(void (*op)(MacroAssembler& masm, SourceType rs,
+ DestType rd));
+
+ template <typename SourceType, typename DestType, typename TempType>
+ inline void emitUnop(void (*op)(MacroAssembler& masm, SourceType rs,
+ DestType rd, TempType temp));
+
+ template <typename SourceType, typename DestType, typename ImmType>
+ inline void emitUnop(ImmType immediate, void (*op)(MacroAssembler&, ImmType,
+ SourceType, DestType));
+
+ template <typename CompilerType, typename RegType>
+ inline void emitUnop(void (*op)(CompilerType& compiler, RegType rsd));
+
+ template <typename RegType, typename TempType>
+ inline void emitUnop(void (*op)(BaseCompiler& bc, RegType rsd, TempType rt),
+ TempType (*getSpecializedTemp)(BaseCompiler& bc));
+
+ template <typename CompilerType, typename RhsType, typename LhsDestType>
+ inline void emitBinop(void (*op)(CompilerType& masm, RhsType src,
+ LhsDestType srcDest));
+
+ template <typename RhsDestType, typename LhsType>
+ inline void emitBinop(void (*op)(MacroAssembler& masm, RhsDestType src,
+ LhsType srcDest, RhsDestOp));
+
+ template <typename RhsType, typename LhsDestType, typename TempType>
+ inline void emitBinop(void (*)(MacroAssembler& masm, RhsType rs,
+ LhsDestType rsd, TempType temp));
+
+ template <typename RhsType, typename LhsDestType, typename TempType1,
+ typename TempType2>
+ inline void emitBinop(void (*)(MacroAssembler& masm, RhsType rs,
+ LhsDestType rsd, TempType1 temp1,
+ TempType2 temp2));
+
+ template <typename RhsType, typename LhsDestType, typename ImmType>
+ inline void emitBinop(ImmType immediate, void (*op)(MacroAssembler&, ImmType,
+ RhsType, LhsDestType));
+
+ template <typename RhsType, typename LhsDestType, typename ImmType,
+ typename TempType1, typename TempType2>
+ inline void emitBinop(ImmType immediate,
+ void (*op)(MacroAssembler&, ImmType, RhsType,
+ LhsDestType, TempType1 temp1,
+ TempType2 temp2));
+
+ template <typename CompilerType1, typename CompilerType2, typename RegType,
+ typename ImmType>
+ inline void emitBinop(void (*op)(CompilerType1& compiler1, RegType rs,
+ RegType rd),
+ void (*opConst)(CompilerType2& compiler2, ImmType c,
+ RegType rd),
+ RegType (BaseCompiler::*rhsPopper)() = nullptr);
+
+ template <typename CompilerType, typename ValType>
+ inline void emitTernary(void (*op)(CompilerType&, ValType src0, ValType src1,
+ ValType srcDest));
+
+ template <typename CompilerType, typename ValType>
+ inline void emitTernary(void (*op)(CompilerType&, ValType src0, ValType src1,
+ ValType srcDest, ValType temp));
+
+ template <typename CompilerType, typename ValType>
+ inline void emitTernaryResultLast(void (*op)(CompilerType&, ValType src0,
+ ValType src1, ValType srcDest));
+
+ template <typename R>
+ [[nodiscard]] inline bool emitInstanceCallOp(
+ const SymbolicAddressSignature& fn, R reader);
+
+ template <typename A1, typename R>
+ [[nodiscard]] inline bool emitInstanceCallOp(
+ const SymbolicAddressSignature& fn, R reader);
+
+ template <typename A1, typename A2, typename R>
+ [[nodiscard]] inline bool emitInstanceCallOp(
+ const SymbolicAddressSignature& fn, R reader);
+
+ void emitMultiplyI64();
+ void emitQuotientI32();
+ void emitQuotientU32();
+ void emitRemainderI32();
+ void emitRemainderU32();
+#ifdef RABALDR_INT_DIV_I64_CALLOUT
+ [[nodiscard]] bool emitDivOrModI64BuiltinCall(SymbolicAddress callee,
+ ValType operandType);
+#else
+ void emitQuotientI64();
+ void emitQuotientU64();
+ void emitRemainderI64();
+ void emitRemainderU64();
+#endif
+ void emitRotrI64();
+ void emitRotlI64();
+ void emitEqzI32();
+ void emitEqzI64();
+ template <TruncFlags flags>
+ [[nodiscard]] bool emitTruncateF32ToI32();
+ template <TruncFlags flags>
+ [[nodiscard]] bool emitTruncateF64ToI32();
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+ [[nodiscard]] bool emitConvertFloatingToInt64Callout(SymbolicAddress callee,
+ ValType operandType,
+ ValType resultType);
+#else
+ template <TruncFlags flags>
+ [[nodiscard]] bool emitTruncateF32ToI64();
+ template <TruncFlags flags>
+ [[nodiscard]] bool emitTruncateF64ToI64();
+#endif
+ void emitExtendI64_8();
+ void emitExtendI64_16();
+ void emitExtendI64_32();
+ void emitExtendI32ToI64();
+ void emitExtendU32ToI64();
+#ifdef RABALDR_I64_TO_FLOAT_CALLOUT
+ [[nodiscard]] bool emitConvertInt64ToFloatingCallout(SymbolicAddress callee,
+ ValType operandType,
+ ValType resultType);
+#else
+ void emitConvertU64ToF32();
+ void emitConvertU64ToF64();
+#endif
+ void emitRound(RoundingMode roundingMode, ValType operandType);
+
+ // Generate a call to the instance function denoted by `builtin`, passing as
+ // args the top elements of the compiler's value stack and optionally an
+ // Instance* too. The relationship between the top of stack and arg
+ // ordering is as follows. If the value stack looks like this:
+ //
+ // A <- least recently pushed
+ // B
+ // C <- most recently pushed
+ //
+ // then the called function is expected to have signature [if an Instance*
+ // is also to be passed]:
+ //
+ // static Instance::foo(Instance*, A, B, C)
+ //
+ // and the SymbolicAddressSignature::argTypes array will be
+ //
+ // {_PTR, _A, _B, _C, _END} // _PTR is for the Instance*
+ //
+ // (see WasmBuiltins.cpp). In short, the most recently pushed value is the
+ // rightmost argument to the function.
+ [[nodiscard]] bool emitInstanceCall(const SymbolicAddressSignature& builtin);
+
+ [[nodiscard]] bool emitMemoryGrow();
+ [[nodiscard]] bool emitMemorySize();
+
+ [[nodiscard]] bool emitRefFunc();
+ [[nodiscard]] bool emitRefNull();
+ [[nodiscard]] bool emitRefIsNull();
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ [[nodiscard]] bool emitRefAsNonNull();
+ [[nodiscard]] bool emitBrOnNull();
+ [[nodiscard]] bool emitBrOnNonNull();
+ [[nodiscard]] bool emitCallRef();
+#endif
+
+ [[nodiscard]] bool emitAtomicCmpXchg(ValType type, Scalar::Type viewType);
+ [[nodiscard]] bool emitAtomicLoad(ValType type, Scalar::Type viewType);
+ [[nodiscard]] bool emitAtomicRMW(ValType type, Scalar::Type viewType,
+ AtomicOp op);
+ [[nodiscard]] bool emitAtomicStore(ValType type, Scalar::Type viewType);
+ [[nodiscard]] bool emitWait(ValType type, uint32_t byteSize);
+ [[nodiscard]] bool atomicWait(ValType type, MemoryAccessDesc* access);
+ [[nodiscard]] bool emitWake();
+ [[nodiscard]] bool atomicWake(MemoryAccessDesc* access);
+ [[nodiscard]] bool emitFence();
+ [[nodiscard]] bool emitAtomicXchg(ValType type, Scalar::Type viewType);
+ [[nodiscard]] bool emitMemInit();
+ [[nodiscard]] bool emitMemCopy();
+ [[nodiscard]] bool memCopyCall();
+ void memCopyInlineM32();
+ [[nodiscard]] bool emitTableCopy();
+ [[nodiscard]] bool emitDataOrElemDrop(bool isData);
+ [[nodiscard]] bool emitMemFill();
+ [[nodiscard]] bool memFillCall();
+ void memFillInlineM32();
+ [[nodiscard]] bool emitTableInit();
+ [[nodiscard]] bool emitTableFill();
+ [[nodiscard]] bool emitMemDiscard();
+ [[nodiscard]] bool emitTableGet();
+ [[nodiscard]] bool emitTableGrow();
+ [[nodiscard]] bool emitTableSet();
+ [[nodiscard]] bool emitTableSize();
+
+ void emitTableBoundsCheck(uint32_t tableIndex, RegI32 index, RegPtr instance);
+ [[nodiscard]] bool emitTableGetAnyRef(uint32_t tableIndex);
+ [[nodiscard]] bool emitTableSetAnyRef(uint32_t tableIndex);
+
+#ifdef ENABLE_WASM_GC
+ [[nodiscard]] bool emitStructNew();
+ [[nodiscard]] bool emitStructNewDefault();
+ [[nodiscard]] bool emitStructGet(FieldWideningOp wideningOp);
+ [[nodiscard]] bool emitStructSet();
+ [[nodiscard]] bool emitArrayNew();
+ [[nodiscard]] bool emitArrayNewFixed();
+ [[nodiscard]] bool emitArrayNewDefault();
+ [[nodiscard]] bool emitArrayNewData();
+ [[nodiscard]] bool emitArrayNewElem();
+ [[nodiscard]] bool emitArrayGet(FieldWideningOp wideningOp);
+ [[nodiscard]] bool emitArraySet();
+ [[nodiscard]] bool emitArrayLen(bool decodeIgnoredTypeIndex);
+ [[nodiscard]] bool emitArrayCopy();
+ [[nodiscard]] bool emitRefTestV5();
+ [[nodiscard]] bool emitRefCastV5();
+ [[nodiscard]] bool emitBrOnCastV5(bool onSuccess);
+ [[nodiscard]] bool emitBrOnCastHeapV5(bool onSuccess, bool nullable);
+ [[nodiscard]] bool emitRefAsStructV5();
+ [[nodiscard]] bool emitBrOnNonStructV5();
+ [[nodiscard]] bool emitRefTest(bool nullable);
+ [[nodiscard]] bool emitRefCast(bool nullable);
+ [[nodiscard]] bool emitBrOnCastCommon(bool onSuccess,
+ uint32_t labelRelativeDepth,
+ const ResultType& labelType,
+ RefType sourceType, RefType destType);
+ [[nodiscard]] bool emitBrOnCast();
+ [[nodiscard]] bool emitExternInternalize();
+ [[nodiscard]] bool emitExternExternalize();
+
+ // Utility classes/methods to add trap information related to
+ // null pointer derefences/accesses.
+ struct NoNullCheck {
+ static void emitNullCheck(BaseCompiler*, RegRef) {}
+ static void emitTrapSite(BaseCompiler*) {}
+ };
+ struct SignalNullCheck {
+ static void emitNullCheck(BaseCompiler* bc, RegRef rp);
+ static void emitTrapSite(BaseCompiler* bc);
+ };
+
+ // Load a pointer to the TypeDefInstanceData for a given type index
+ RegPtr loadTypeDefInstanceData(uint32_t typeIndex);
+ // Load a pointer to the SuperTypeVector for a given type index
+ RegPtr loadSuperTypeVector(uint32_t typeIndex);
+
+ RegPtr emitGcArrayGetData(RegRef rp);
+ template <typename NullCheckPolicy>
+ RegI32 emitGcArrayGetNumElements(RegRef rp);
+ void emitGcArrayBoundsCheck(RegI32 index, RegI32 numElements);
+ template <typename T, typename NullCheckPolicy>
+ void emitGcGet(FieldType type, FieldWideningOp wideningOp, const T& src);
+ template <typename T, typename NullCheckPolicy>
+ void emitGcSetScalar(const T& dst, FieldType type, AnyReg value);
+
+ // Common code for both old and new ref.test instructions.
+ void emitRefTestCommon(RefType sourceType, RefType destType);
+ // Common code for both old and new ref.cast instructions.
+ void emitRefCastCommon(RefType sourceType, RefType destType);
+
+ // Allocate registers and branch if the given object is a subtype of the given
+ // heap type.
+ void branchGcRefType(RegRef object, RefType sourceType, RefType destType,
+ Label* label, bool onSuccess);
+
+ // Write `value` to wasm struct `object`, at `areaBase + areaOffset`. The
+ // caller must decide on the in- vs out-of-lineness before the call and set
+ // the latter two accordingly; this routine does not take that into account.
+ // The value in `object` is unmodified, but `areaBase` and `value` may get
+ // trashed.
+ template <typename NullCheckPolicy>
+ [[nodiscard]] bool emitGcStructSet(RegRef object, RegPtr areaBase,
+ uint32_t areaOffset, FieldType fieldType,
+ AnyReg value,
+ PreBarrierKind preBarrierKind);
+
+ [[nodiscard]] bool emitGcArraySet(RegRef object, RegPtr data, RegI32 index,
+ const ArrayType& array, AnyReg value,
+ PreBarrierKind preBarrierKind);
+#endif // ENABLE_WASM_GC
+
+#ifdef ENABLE_WASM_SIMD
+ void emitVectorAndNot();
+# ifdef ENABLE_WASM_RELAXED_SIMD
+ void emitDotI8x16I7x16AddS();
+# endif
+
+ void loadSplat(MemoryAccessDesc* access);
+ void loadZero(MemoryAccessDesc* access);
+ void loadExtend(MemoryAccessDesc* access, Scalar::Type viewType);
+ void loadLane(MemoryAccessDesc* access, uint32_t laneIndex);
+ void storeLane(MemoryAccessDesc* access, uint32_t laneIndex);
+
+ [[nodiscard]] bool emitLoadSplat(Scalar::Type viewType);
+ [[nodiscard]] bool emitLoadZero(Scalar::Type viewType);
+ [[nodiscard]] bool emitLoadExtend(Scalar::Type viewType);
+ [[nodiscard]] bool emitLoadLane(uint32_t laneSize);
+ [[nodiscard]] bool emitStoreLane(uint32_t laneSize);
+ [[nodiscard]] bool emitVectorShuffle();
+ [[nodiscard]] bool emitVectorLaneSelect();
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ [[nodiscard]] bool emitVectorShiftRightI64x2();
+# endif
+#endif
+ [[nodiscard]] bool emitIntrinsic();
+};
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_wasm_baseline_object_h
diff --git a/js/src/wasm/WasmBCCodegen-inl.h b/js/src/wasm/WasmBCCodegen-inl.h
new file mode 100644
index 0000000000..03d5b9f078
--- /dev/null
+++ b/js/src/wasm/WasmBCCodegen-inl.h
@@ -0,0 +1,524 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This is an INTERNAL header for Wasm baseline compiler: inline methods in the
+// compiler for basic code generation.
+
+#ifndef wasm_wasm_baseline_codegen_inl_h
+#define wasm_wasm_baseline_codegen_inl_h
+
+// The templates for register management must be defined by the time we use the
+// templated emitters, below.
+#include "wasm/WasmBCRegMgmt-inl.h"
+
+namespace js {
+namespace wasm {
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Register-to-register moves.
+
+void BaseCompiler::moveI32(RegI32 src, RegI32 dest) {
+ if (src != dest) {
+ masm.move32(src, dest);
+ }
+}
+
+void BaseCompiler::moveI64(RegI64 src, RegI64 dest) {
+ if (src != dest) {
+ masm.move64(src, dest);
+ }
+}
+
+void BaseCompiler::moveRef(RegRef src, RegRef dest) {
+ if (src != dest) {
+ masm.movePtr(src, dest);
+ }
+}
+
+void BaseCompiler::movePtr(RegPtr src, RegPtr dest) {
+ if (src != dest) {
+ masm.movePtr(src, dest);
+ }
+}
+
+void BaseCompiler::moveF64(RegF64 src, RegF64 dest) {
+ if (src != dest) {
+ masm.moveDouble(src, dest);
+ }
+}
+
+void BaseCompiler::moveF32(RegF32 src, RegF32 dest) {
+ if (src != dest) {
+ masm.moveFloat32(src, dest);
+ }
+}
+
+#ifdef ENABLE_WASM_SIMD
+void BaseCompiler::moveV128(RegV128 src, RegV128 dest) {
+ if (src != dest) {
+ masm.moveSimd128(src, dest);
+ }
+}
+#endif
+
+template <>
+inline void BaseCompiler::move<RegI32>(RegI32 src, RegI32 dest) {
+ moveI32(src, dest);
+}
+
+template <>
+inline void BaseCompiler::move<RegI64>(RegI64 src, RegI64 dest) {
+ moveI64(src, dest);
+}
+
+template <>
+inline void BaseCompiler::move<RegF32>(RegF32 src, RegF32 dest) {
+ moveF32(src, dest);
+}
+
+template <>
+inline void BaseCompiler::move<RegF64>(RegF64 src, RegF64 dest) {
+ moveF64(src, dest);
+}
+
+template <>
+inline void BaseCompiler::move<RegRef>(RegRef src, RegRef dest) {
+ moveRef(src, dest);
+}
+
+template <>
+inline void BaseCompiler::move<RegPtr>(RegPtr src, RegPtr dest) {
+ movePtr(src, dest);
+}
+
+#ifdef ENABLE_WASM_SIMD
+template <>
+inline void BaseCompiler::move<RegV128>(RegV128 src, RegV128 dest) {
+ moveV128(src, dest);
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Constant loads.
+
+void BaseCompiler::moveImm32(int32_t v, RegI32 dest) {
+ masm.move32(Imm32(v), dest);
+}
+
+void BaseCompiler::moveImm64(int64_t v, RegI64 dest) {
+ masm.move64(Imm64(v), dest);
+}
+
+void BaseCompiler::moveImmRef(intptr_t v, RegRef dest) {
+ masm.movePtr(ImmWord(v), dest);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Calls.
+
+RegI32 BaseCompiler::captureReturnedI32() {
+ RegI32 r = RegI32(ReturnReg);
+ MOZ_ASSERT(isAvailableI32(r));
+ needI32(r);
+#if defined(JS_64BIT)
+ masm.widenInt32(r);
+#endif
+ return r;
+}
+
+RegI64 BaseCompiler::captureReturnedI64() {
+ RegI64 r = RegI64(ReturnReg64);
+ MOZ_ASSERT(isAvailableI64(r));
+ needI64(r);
+ return r;
+}
+
+RegF32 BaseCompiler::captureReturnedF32(const FunctionCall& call) {
+ RegF32 r = RegF32(ReturnFloat32Reg);
+ MOZ_ASSERT(isAvailableF32(r));
+ needF32(r);
+#if defined(JS_CODEGEN_ARM)
+ if (call.usesSystemAbi && !call.hardFP) {
+ masm.ma_vxfer(ReturnReg, r);
+ }
+#endif
+ return r;
+}
+
+RegF64 BaseCompiler::captureReturnedF64(const FunctionCall& call) {
+ RegF64 r = RegF64(ReturnDoubleReg);
+ MOZ_ASSERT(isAvailableF64(r));
+ needF64(r);
+#if defined(JS_CODEGEN_ARM)
+ if (call.usesSystemAbi && !call.hardFP) {
+ masm.ma_vxfer(ReturnReg64.low, ReturnReg64.high, r);
+ }
+#endif
+ return r;
+}
+
+#ifdef ENABLE_WASM_SIMD
+RegV128 BaseCompiler::captureReturnedV128(const FunctionCall& call) {
+ RegV128 r = RegV128(ReturnSimd128Reg);
+ MOZ_ASSERT(isAvailableV128(r));
+ needV128(r);
+ return r;
+}
+#endif
+
+RegRef BaseCompiler::captureReturnedRef() {
+ RegRef r = RegRef(ReturnReg);
+ MOZ_ASSERT(isAvailableRef(r));
+ needRef(r);
+ return r;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Miscellaneous.
+
+void BaseCompiler::trap(Trap t) const { masm.wasmTrap(t, bytecodeOffset()); }
+
+void BaseCompiler::cmp64Set(Assembler::Condition cond, RegI64 lhs, RegI64 rhs,
+ RegI32 dest) {
+#if defined(JS_PUNBOX64)
+ masm.cmpPtrSet(cond, lhs.reg, rhs.reg, dest);
+#else
+ // TODO / OPTIMIZE (Bug 1316822): This is pretty branchy, we should be
+ // able to do better.
+ Label done, condTrue;
+ masm.branch64(cond, lhs, rhs, &condTrue);
+ moveImm32(0, dest);
+ masm.jump(&done);
+ masm.bind(&condTrue);
+ moveImm32(1, dest);
+ masm.bind(&done);
+#endif
+}
+
+[[nodiscard]] bool BaseCompiler::supportsRoundInstruction(RoundingMode mode) {
+ return Assembler::HasRoundInstruction(mode);
+}
+
+void BaseCompiler::roundF32(RoundingMode roundingMode, RegF32 f0) {
+ masm.nearbyIntFloat32(roundingMode, f0, f0);
+}
+
+void BaseCompiler::roundF64(RoundingMode roundingMode, RegF64 f0) {
+ masm.nearbyIntDouble(roundingMode, f0, f0);
+}
+
+void BaseCompiler::branchTo(Assembler::DoubleCondition c, RegF64 lhs,
+ RegF64 rhs, Label* l) {
+ masm.branchDouble(c, lhs, rhs, l);
+}
+
+void BaseCompiler::branchTo(Assembler::DoubleCondition c, RegF32 lhs,
+ RegF32 rhs, Label* l) {
+ masm.branchFloat(c, lhs, rhs, l);
+}
+
+void BaseCompiler::branchTo(Assembler::Condition c, RegI32 lhs, RegI32 rhs,
+ Label* l) {
+ masm.branch32(c, lhs, rhs, l);
+}
+
+void BaseCompiler::branchTo(Assembler::Condition c, RegI32 lhs, Imm32 rhs,
+ Label* l) {
+ masm.branch32(c, lhs, rhs, l);
+}
+
+void BaseCompiler::branchTo(Assembler::Condition c, RegI64 lhs, RegI64 rhs,
+ Label* l) {
+ masm.branch64(c, lhs, rhs, l);
+}
+
+void BaseCompiler::branchTo(Assembler::Condition c, RegI64 lhs, Imm64 rhs,
+ Label* l) {
+ masm.branch64(c, lhs, rhs, l);
+}
+
+void BaseCompiler::branchTo(Assembler::Condition c, RegRef lhs, ImmWord rhs,
+ Label* l) {
+ masm.branchPtr(c, lhs, rhs, l);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Templated emitters
+
+template <>
+inline BaseCompiler& BaseCompiler::selectCompiler<BaseCompiler>() {
+ return *this;
+}
+
+template <>
+inline MacroAssembler& BaseCompiler::selectCompiler<MacroAssembler>() {
+ return masm;
+}
+
+template <typename SourceType, typename DestType>
+void BaseCompiler::emitUnop(void (*op)(MacroAssembler& masm, SourceType rs,
+ DestType rd)) {
+ SourceType rs = pop<SourceType>();
+ DestType rd = need<DestType>();
+ op(masm, rs, rd);
+ free(rs);
+ push(rd);
+}
+
+// Specialize narrowing reuse. Consumers may assume that rs.reg==rd on 64-bit
+// platforms, or rs.low==rd on 32-bit platforms.
+template <>
+inline void BaseCompiler::emitUnop(void (*op)(MacroAssembler& masm, RegI64 rs,
+ RegI32 rd)) {
+ RegI64 rs = pop<RegI64>();
+ RegI32 rd = fromI64(rs);
+ op(masm, rs, rd);
+ freeI64Except(rs, rd);
+ push(rd);
+}
+
+template <typename CompilerType, typename RegType>
+void BaseCompiler::emitUnop(void (*op)(CompilerType& compiler, RegType rsd)) {
+ RegType rsd = pop<RegType>();
+ op(selectCompiler<CompilerType>(), rsd);
+ push(rsd);
+}
+
+template <typename RegType, typename TempType>
+void BaseCompiler::emitUnop(void (*op)(BaseCompiler& bc, RegType rsd,
+ TempType rt),
+ TempType (*getSpecializedTemp)(BaseCompiler& bc)) {
+ RegType rsd = pop<RegType>();
+ TempType temp = getSpecializedTemp(*this);
+ op(*this, rsd, temp);
+ maybeFree(temp);
+ push(rsd);
+}
+
+template <typename SourceType, typename DestType, typename TempType>
+void BaseCompiler::emitUnop(void (*op)(MacroAssembler& masm, SourceType rs,
+ DestType rd, TempType temp)) {
+ SourceType rs = pop<SourceType>();
+ DestType rd = need<DestType>();
+ TempType temp = need<TempType>();
+ op(masm, rs, rd, temp);
+ free(rs);
+ free(temp);
+ push(rd);
+}
+
+template <typename SourceType, typename DestType, typename ImmType>
+void BaseCompiler::emitUnop(ImmType immediate,
+ void (*op)(MacroAssembler&, ImmType, SourceType,
+ DestType)) {
+ SourceType rs = pop<SourceType>();
+ DestType rd = need<DestType>();
+ op(masm, immediate, rs, rd);
+ free(rs);
+ push(rd);
+}
+
+template <typename CompilerType, typename RhsType, typename LhsDestType>
+void BaseCompiler::emitBinop(void (*op)(CompilerType& masm, RhsType src,
+ LhsDestType srcDest)) {
+ RhsType rs = pop<RhsType>();
+ LhsDestType rsd = pop<LhsDestType>();
+ op(selectCompiler<CompilerType>(), rs, rsd);
+ free(rs);
+ push(rsd);
+}
+
+template <typename CompilerType, typename ValType>
+void BaseCompiler::emitTernary(void (*op)(CompilerType&, ValType src0,
+ ValType src1, ValType srcDest)) {
+ ValType src2 = pop<ValType>();
+ ValType src1 = pop<ValType>();
+ ValType srcDest = pop<ValType>();
+ op(selectCompiler<CompilerType>(), src1, src2, srcDest);
+ free(src2);
+ free(src1);
+ push(srcDest);
+}
+
+template <typename CompilerType, typename ValType>
+void BaseCompiler::emitTernary(void (*op)(CompilerType&, ValType src0,
+ ValType src1, ValType srcDest,
+ ValType temp)) {
+ ValType src2 = pop<ValType>();
+ ValType src1 = pop<ValType>();
+ ValType srcDest = pop<ValType>();
+ ValType temp = need<ValType>();
+ op(selectCompiler<CompilerType>(), src1, src2, srcDest, temp);
+ free(temp);
+ free(src2);
+ free(src1);
+ push(srcDest);
+}
+
+template <typename CompilerType, typename ValType>
+void BaseCompiler::emitTernaryResultLast(void (*op)(CompilerType&, ValType src0,
+ ValType src1,
+ ValType srcDest)) {
+ ValType srcDest = pop<ValType>();
+ ValType src2 = pop<ValType>();
+ ValType src1 = pop<ValType>();
+ op(selectCompiler<CompilerType>(), src1, src2, srcDest);
+ free(src2);
+ free(src1);
+ push(srcDest);
+}
+
+template <typename RhsDestType, typename LhsType>
+void BaseCompiler::emitBinop(void (*op)(MacroAssembler& masm, RhsDestType src,
+ LhsType srcDest, RhsDestOp)) {
+ RhsDestType rsd = pop<RhsDestType>();
+ LhsType rs = pop<LhsType>();
+ op(masm, rsd, rs, RhsDestOp::True);
+ free(rs);
+ push(rsd);
+}
+
+template <typename RhsType, typename LhsDestType, typename TempType>
+void BaseCompiler::emitBinop(void (*op)(MacroAssembler& masm, RhsType rs,
+ LhsDestType rsd, TempType temp)) {
+ RhsType rs = pop<RhsType>();
+ LhsDestType rsd = pop<LhsDestType>();
+ TempType temp = need<TempType>();
+ op(masm, rs, rsd, temp);
+ free(rs);
+ free(temp);
+ push(rsd);
+}
+
+template <typename RhsType, typename LhsDestType, typename TempType1,
+ typename TempType2>
+void BaseCompiler::emitBinop(void (*op)(MacroAssembler& masm, RhsType rs,
+ LhsDestType rsd, TempType1 temp1,
+ TempType2 temp2)) {
+ RhsType rs = pop<RhsType>();
+ LhsDestType rsd = pop<LhsDestType>();
+ TempType1 temp1 = need<TempType1>();
+ TempType2 temp2 = need<TempType2>();
+ op(masm, rs, rsd, temp1, temp2);
+ free(rs);
+ free(temp1);
+ free(temp2);
+ push(rsd);
+}
+
+template <typename RhsType, typename LhsDestType, typename ImmType>
+void BaseCompiler::emitBinop(ImmType immediate,
+ void (*op)(MacroAssembler&, ImmType, RhsType,
+ LhsDestType)) {
+ RhsType rs = pop<RhsType>();
+ LhsDestType rsd = pop<LhsDestType>();
+ op(masm, immediate, rs, rsd);
+ free(rs);
+ push(rsd);
+}
+
+template <typename RhsType, typename LhsDestType, typename ImmType,
+ typename TempType1, typename TempType2>
+void BaseCompiler::emitBinop(ImmType immediate,
+ void (*op)(MacroAssembler&, ImmType, RhsType,
+ LhsDestType, TempType1 temp1,
+ TempType2 temp2)) {
+ RhsType rs = pop<RhsType>();
+ LhsDestType rsd = pop<LhsDestType>();
+ TempType1 temp1 = need<TempType1>();
+ TempType2 temp2 = need<TempType2>();
+ op(masm, immediate, rs, rsd, temp1, temp2);
+ free(rs);
+ free(temp1);
+ free(temp2);
+ push(rsd);
+}
+
+template <typename CompilerType1, typename CompilerType2, typename RegType,
+ typename ImmType>
+void BaseCompiler::emitBinop(void (*op)(CompilerType1& compiler, RegType rs,
+ RegType rsd),
+ void (*opConst)(CompilerType2& compiler, ImmType c,
+ RegType rsd),
+ RegType (BaseCompiler::*rhsPopper)()) {
+ ImmType c;
+ if (popConst(&c)) {
+ RegType rsd = pop<RegType>();
+ opConst(selectCompiler<CompilerType2>(), c, rsd);
+ push(rsd);
+ } else {
+ RegType rs = rhsPopper ? (this->*rhsPopper)() : pop<RegType>();
+ RegType rsd = pop<RegType>();
+ op(selectCompiler<CompilerType1>(), rs, rsd);
+ free(rs);
+ push(rsd);
+ }
+}
+
+template <typename R>
+bool BaseCompiler::emitInstanceCallOp(const SymbolicAddressSignature& fn,
+ R reader) {
+ if (!reader()) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ return emitInstanceCall(fn);
+}
+
+template <typename A1, typename R>
+bool BaseCompiler::emitInstanceCallOp(const SymbolicAddressSignature& fn,
+ R reader) {
+ A1 arg = 0;
+ if (!reader(&arg)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ push(arg);
+ return emitInstanceCall(fn);
+}
+
+template <typename A1, typename A2, typename R>
+bool BaseCompiler::emitInstanceCallOp(const SymbolicAddressSignature& fn,
+ R reader) {
+ A1 arg1 = 0;
+ A2 arg2 = 0;
+ if (!reader(&arg1, &arg2)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ // Note order of arguments must be the same as for the reader.
+ push(arg1);
+ push(arg2);
+ return emitInstanceCall(fn);
+}
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_wasm_baseline_codegen_inl_h
diff --git a/js/src/wasm/WasmBCDefs.h b/js/src/wasm/WasmBCDefs.h
new file mode 100644
index 0000000000..d3bd547460
--- /dev/null
+++ b/js/src/wasm/WasmBCDefs.h
@@ -0,0 +1,201 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This is an INTERNAL header for Wasm baseline compiler: common configuration
+// and simple definitions; all include directives.
+
+#ifndef wasm_wasm_baseline_defs_h
+#define wasm_wasm_baseline_defs_h
+
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/ScopeExit.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "jit/AtomicOp.h"
+#include "jit/IonTypes.h"
+#include "jit/JitAllocPolicy.h"
+#include "jit/Label.h"
+#include "jit/RegisterAllocator.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+#if defined(JS_CODEGEN_ARM)
+# include "jit/arm/Assembler-arm.h"
+#endif
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+# include "jit/x86-shared/Architecture-x86-shared.h"
+# include "jit/x86-shared/Assembler-x86-shared.h"
+#endif
+#if defined(JS_CODEGEN_MIPS64)
+# include "jit/mips-shared/Assembler-mips-shared.h"
+# include "jit/mips64/Assembler-mips64.h"
+#endif
+#if defined(JS_CODEGEN_LOONG64)
+# include "jit/loong64/Assembler-loong64.h"
+#endif
+#if defined(JS_CODEGEN_RISCV64)
+# include "jit/riscv64/Assembler-riscv64.h"
+#endif
+#include "js/ScalarType.h"
+#include "util/Memory.h"
+#include "wasm/WasmCodegenTypes.h"
+#include "wasm/WasmDebugFrame.h"
+#include "wasm/WasmGC.h"
+#include "wasm/WasmGcObject.h"
+#include "wasm/WasmGenerator.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmOpIter.h"
+#include "wasm/WasmSignalHandlers.h"
+#include "wasm/WasmStubs.h"
+#include "wasm/WasmValidate.h"
+
+using mozilla::DebugOnly;
+using mozilla::FloorLog2;
+using mozilla::IsPowerOfTwo;
+using mozilla::Maybe;
+using mozilla::Nothing;
+using mozilla::Some;
+
+namespace js {
+namespace wasm {
+
+using HandleNaNSpecially = bool;
+using InvertBranch = bool;
+using IsKnownNotZero = bool;
+using IsUnsigned = bool;
+using IsRemainder = bool;
+using NeedsBoundsCheck = bool;
+using WantResult = bool;
+using ZeroOnOverflow = bool;
+
+class BaseStackFrame;
+
+// Two flags, useABI and restoreRegisterStateAndRealm, control how calls are
+// made.
+//
+// UseABI::Wasm implies that the Instance/Heap/Global registers are nonvolatile,
+// except when RestoreRegisterStateAndRealm::True is also set, when they are
+// volatile.
+//
+// UseABI::Builtin implies that the Instance/Heap/Global registers are volatile.
+// In this case, we require RestoreRegisterStateAndRealm::False. The calling
+// convention is otherwise like UseABI::Wasm.
+//
+// UseABI::System implies that the Instance/Heap/Global registers are volatile.
+// Additionally, the parameter passing mechanism may be slightly different from
+// the UseABI::Wasm convention.
+//
+// When the Instance/Heap/Global registers are not volatile, the baseline
+// compiler will restore the Instance register from its save slot before the
+// call, since the baseline compiler uses the Instance register for other
+// things.
+//
+// When those registers are volatile, the baseline compiler will reload them
+// after the call (it will restore the Instance register from the save slot and
+// load the other two from the Instance data).
+
+enum class UseABI { Wasm, Builtin, System };
+enum class RestoreRegisterStateAndRealm { False = false, True = true };
+enum class RhsDestOp { True = true };
+
+// Compiler configuration.
+//
+// The following internal configuration #defines are used. The configuration is
+// partly below in this file, partly in WasmBCRegDefs.h.
+//
+// RABALDR_PIN_INSTANCE
+// InstanceReg is not allocatable and always holds the current Instance*,
+// except in known contexts where it could have been clobbered, such as after
+// certain calls.
+//
+// RABALDR_ZERO_EXTENDS
+// The canonical representation of a 32-bit value in a 64-bit register is
+// zero-extended. For 64-bit platforms only. See comment block "64-bit GPRs
+// carrying 32-bit values" in MacroAssembler.h.
+//
+// RABALDR_CHUNKY_STACK
+// The platform must allocate the CPU stack in chunks and not word-at-a-time
+// due to SP alignment requirements (ARM64 for now).
+//
+// RABALDR_INT_DIV_I64_CALLOUT
+// The platform calls out to the runtime to divide i64/u64.
+//
+// RABALDR_I64_TO_FLOAT_CALLOUT
+// The platform calls out to the runtime for i64 -> fXX conversions.
+//
+// RABALDR_FLOAT_TO_I64_CALLOUT
+// The platform calls out to the runtime for fXX -> i64 conversions.
+//
+// RABALDR_SCRATCH_<TypeName>
+// The baseline compiler has its own scratch registers for the given type, it
+// does not use the MacroAssembler's scratch. This is really an anachronism -
+// the baseline compiler should never use the MacroAssembler's scratches.
+//
+// RABALDR_SCRATCH_F32_ALIASES_F64
+// On a platform where the baseline compiler has its own F32 and F64
+// scratches, these are the same register.
+
+#ifdef JS_CODEGEN_X64
+# define RABALDR_ZERO_EXTENDS
+# define RABALDR_PIN_INSTANCE
+#endif
+
+#ifdef JS_CODEGEN_ARM64
+# define RABALDR_CHUNKY_STACK
+# define RABALDR_ZERO_EXTENDS
+# define RABALDR_PIN_INSTANCE
+#endif
+
+#ifdef JS_CODEGEN_X86
+# define RABALDR_INT_DIV_I64_CALLOUT
+#endif
+
+#ifdef JS_CODEGEN_ARM
+# define RABALDR_INT_DIV_I64_CALLOUT
+# define RABALDR_I64_TO_FLOAT_CALLOUT
+# define RABALDR_FLOAT_TO_I64_CALLOUT
+#endif
+
+#ifdef JS_CODEGEN_MIPS64
+# define RABALDR_PIN_INSTANCE
+#endif
+
+#ifdef JS_CODEGEN_LOONG64
+# define RABALDR_PIN_INSTANCE
+#endif
+
+#ifdef JS_CODEGEN_RISCV64
+# define RABALDR_PIN_INSTANCE
+#endif
+
+// Max number of pushes onto the value stack for any opcode or emitter that
+// does not push a variable, unbounded amount (anything with multiple
+// results). This includes also intermediate pushes such as values pushed as
+// parameters for builtin calls.
+//
+// This limit is set quite high on purpose, so as to avoid brittleness. The
+// true max value is likely no more than four or five.
+
+static constexpr size_t MaxPushesPerOpcode = 10;
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_wasm_baseline_defs_h
diff --git a/js/src/wasm/WasmBCFrame.cpp b/js/src/wasm/WasmBCFrame.cpp
new file mode 100644
index 0000000000..454f732823
--- /dev/null
+++ b/js/src/wasm/WasmBCFrame.cpp
@@ -0,0 +1,544 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmBCFrame.h"
+
+#include "wasm/WasmBaselineCompile.h" // For BaseLocalIter
+#include "wasm/WasmBCClass.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "wasm/WasmBCClass-inl.h"
+#include "wasm/WasmBCCodegen-inl.h"
+#include "wasm/WasmBCRegDefs-inl.h"
+#include "wasm/WasmBCRegMgmt-inl.h"
+#include "wasm/WasmBCStkMgmt-inl.h"
+
+namespace js {
+namespace wasm {
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// BaseLocalIter methods.
+
+BaseLocalIter::BaseLocalIter(const ValTypeVector& locals,
+ const ArgTypeVector& args, bool debugEnabled)
+ : locals_(locals),
+ args_(args),
+ argsIter_(args_),
+ index_(0),
+ frameSize_(0),
+ nextFrameSize_(debugEnabled ? DebugFrame::offsetOfFrame() : 0),
+ frameOffset_(INT32_MAX),
+ stackResultPointerOffset_(INT32_MAX),
+ mirType_(MIRType::Undefined),
+ done_(false) {
+ MOZ_ASSERT(args.lengthWithoutStackResults() <= locals.length());
+ settle();
+}
+
+int32_t BaseLocalIter::pushLocal(size_t nbytes) {
+ MOZ_ASSERT(nbytes % 4 == 0 && nbytes <= 16);
+ nextFrameSize_ = AlignBytes(frameSize_, nbytes) + nbytes;
+ return nextFrameSize_; // Locals grow down so capture base address.
+}
+
+void BaseLocalIter::settle() {
+ MOZ_ASSERT(!done_);
+ frameSize_ = nextFrameSize_;
+
+ if (!argsIter_.done()) {
+ mirType_ = argsIter_.mirType();
+ MIRType concreteType = mirType_;
+ switch (mirType_) {
+ case MIRType::StackResults:
+ // The pointer to stack results is handled like any other argument:
+ // either addressed in place if it is passed on the stack, or we spill
+ // it in the frame if it's in a register.
+ MOZ_ASSERT(args_.isSyntheticStackResultPointerArg(index_));
+ concreteType = MIRType::Pointer;
+ [[fallthrough]];
+ case MIRType::Int32:
+ case MIRType::Int64:
+ case MIRType::Double:
+ case MIRType::Float32:
+ case MIRType::RefOrNull:
+#ifdef ENABLE_WASM_SIMD
+ case MIRType::Simd128:
+#endif
+ if (argsIter_->argInRegister()) {
+ frameOffset_ = pushLocal(MIRTypeToSize(concreteType));
+ } else {
+ frameOffset_ = -(argsIter_->offsetFromArgBase() + sizeof(Frame));
+ }
+ break;
+ default:
+ MOZ_CRASH("Argument type");
+ }
+ if (mirType_ == MIRType::StackResults) {
+ stackResultPointerOffset_ = frameOffset();
+ // Advance past the synthetic stack result pointer argument and fall
+ // through to the next case.
+ argsIter_++;
+ frameSize_ = nextFrameSize_;
+ MOZ_ASSERT(argsIter_.done());
+ } else {
+ return;
+ }
+ }
+
+ if (index_ < locals_.length()) {
+ switch (locals_[index_].kind()) {
+ case ValType::I32:
+ case ValType::I64:
+ case ValType::F32:
+ case ValType::F64:
+#ifdef ENABLE_WASM_SIMD
+ case ValType::V128:
+#endif
+ case ValType::Ref:
+ // TODO/AnyRef-boxing: With boxed immediates and strings, the
+ // debugger must be made aware that AnyRef != Pointer.
+ ASSERT_ANYREF_IS_JSOBJECT;
+ mirType_ = locals_[index_].toMIRType();
+ frameOffset_ = pushLocal(MIRTypeToSize(mirType_));
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: Unexpected local type");
+ }
+ return;
+ }
+
+ done_ = true;
+}
+
+void BaseLocalIter::operator++(int) {
+ MOZ_ASSERT(!done_);
+ index_++;
+ if (!argsIter_.done()) {
+ argsIter_++;
+ }
+ settle();
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Stack map methods.
+
+bool BaseCompiler::createStackMap(const char* who) {
+ const ExitStubMapVector noExtras;
+ return stackMapGenerator_.createStackMap(who, noExtras, masm.currentOffset(),
+ HasDebugFrameWithLiveRefs::No, stk_);
+}
+
+bool BaseCompiler::createStackMap(const char* who, CodeOffset assemblerOffset) {
+ const ExitStubMapVector noExtras;
+ return stackMapGenerator_.createStackMap(who, noExtras,
+ assemblerOffset.offset(),
+ HasDebugFrameWithLiveRefs::No, stk_);
+}
+
+bool BaseCompiler::createStackMap(
+ const char* who, HasDebugFrameWithLiveRefs debugFrameWithLiveRefs) {
+ const ExitStubMapVector noExtras;
+ return stackMapGenerator_.createStackMap(who, noExtras, masm.currentOffset(),
+ debugFrameWithLiveRefs, stk_);
+}
+
+bool BaseCompiler::createStackMap(
+ const char* who, const ExitStubMapVector& extras, uint32_t assemblerOffset,
+ HasDebugFrameWithLiveRefs debugFrameWithLiveRefs) {
+ return stackMapGenerator_.createStackMap(who, extras, assemblerOffset,
+ debugFrameWithLiveRefs, stk_);
+}
+
+bool MachineStackTracker::cloneTo(MachineStackTracker* dst) {
+ MOZ_ASSERT(dst->vec_.empty());
+ if (!dst->vec_.appendAll(vec_)) {
+ return false;
+ }
+ dst->numPtrs_ = numPtrs_;
+ return true;
+}
+
+bool StackMapGenerator::generateStackmapEntriesForTrapExit(
+ const ArgTypeVector& args, ExitStubMapVector* extras) {
+ return GenerateStackmapEntriesForTrapExit(args, trapExitLayout_,
+ trapExitLayoutNumWords_, extras);
+}
+
+bool StackMapGenerator::createStackMap(
+ const char* who, const ExitStubMapVector& extras, uint32_t assemblerOffset,
+ HasDebugFrameWithLiveRefs debugFrameWithLiveRefs, const StkVector& stk) {
+ size_t countedPointers = machineStackTracker.numPtrs() + memRefsOnStk;
+#ifndef DEBUG
+ // An important optimization. If there are obviously no pointers, as
+ // we expect in the majority of cases, exit quickly.
+ if (countedPointers == 0 &&
+ debugFrameWithLiveRefs == HasDebugFrameWithLiveRefs::No) {
+ // We can skip creating the map if there are no |true| elements in
+ // |extras|.
+ bool extrasHasRef = false;
+ for (bool b : extras) {
+ if (b) {
+ extrasHasRef = true;
+ break;
+ }
+ }
+ if (!extrasHasRef) {
+ return true;
+ }
+ }
+#else
+ // In the debug case, create the stackmap regardless, and cross-check
+ // the pointer-counting below. We expect the final map to have
+ // |countedPointers| in total. This doesn't include those in the
+ // DebugFrame, but they do not appear in the map's bitmap. Note that
+ // |countedPointers| is debug-only from this point onwards.
+ for (bool b : extras) {
+ countedPointers += (b ? 1 : 0);
+ }
+#endif
+
+ // Start with the frame-setup map, and add operand-stack information to
+ // that. augmentedMst holds live data only within individual calls to
+ // createStackMap.
+ augmentedMst.clear();
+ if (!machineStackTracker.cloneTo(&augmentedMst)) {
+ return false;
+ }
+
+ // At this point, augmentedMst only contains entries covering the
+ // incoming argument area (if any) and for the area allocated by this
+ // function's prologue. We now need to calculate how far the machine's
+ // stack pointer is below where it was at the start of the body. But we
+ // must take care not to include any words pushed as arguments to an
+ // upcoming function call, since those words "belong" to the stackmap of
+ // the callee, not to the stackmap of this function. Note however that
+ // any alignment padding pushed prior to pushing the args *does* belong to
+ // this function.
+ //
+ // That padding is taken into account at the point where
+ // framePushedExcludingOutboundCallArgs is set, viz, in startCallArgs(),
+ // and comprises two components:
+ //
+ // * call->frameAlignAdjustment
+ // * the padding applied to the stack arg area itself. That is:
+ // StackArgAreaSize(argTys) - StackArgAreaSizeUnpadded(argTys)
+ Maybe<uint32_t> framePushedExcludingArgs;
+ if (framePushedAtEntryToBody.isNothing()) {
+ // Still in the prologue. framePushedExcludingArgs remains Nothing.
+ MOZ_ASSERT(framePushedExcludingOutboundCallArgs.isNothing());
+ } else {
+ // In the body.
+ MOZ_ASSERT(masm_.framePushed() >= framePushedAtEntryToBody.value());
+ if (framePushedExcludingOutboundCallArgs.isSome()) {
+ // In the body, and we've potentially pushed some args onto the stack.
+ // We must ignore them when sizing the stackmap.
+ MOZ_ASSERT(masm_.framePushed() >=
+ framePushedExcludingOutboundCallArgs.value());
+ MOZ_ASSERT(framePushedExcludingOutboundCallArgs.value() >=
+ framePushedAtEntryToBody.value());
+ framePushedExcludingArgs =
+ Some(framePushedExcludingOutboundCallArgs.value());
+ } else {
+ // In the body, but not with call args on the stack. The stackmap
+ // must be sized so as to extend all the way "down" to
+ // masm_.framePushed().
+ framePushedExcludingArgs = Some(masm_.framePushed());
+ }
+ }
+
+ if (framePushedExcludingArgs.isSome()) {
+ uint32_t bodyPushedBytes =
+ framePushedExcludingArgs.value() - framePushedAtEntryToBody.value();
+ MOZ_ASSERT(0 == bodyPushedBytes % sizeof(void*));
+ if (!augmentedMst.pushNonGCPointers(bodyPushedBytes / sizeof(void*))) {
+ return false;
+ }
+ }
+
+ // Scan the operand stack, marking pointers in the just-added new
+ // section.
+ MOZ_ASSERT_IF(framePushedAtEntryToBody.isNothing(), stk.empty());
+ MOZ_ASSERT_IF(framePushedExcludingArgs.isNothing(), stk.empty());
+
+ for (const Stk& v : stk) {
+#ifndef DEBUG
+ // We don't track roots in registers, per rationale below, so if this
+ // doesn't hold, something is seriously wrong, and we're likely to get a
+ // GC-related crash.
+ MOZ_RELEASE_ASSERT(v.kind() != Stk::RegisterRef);
+ if (v.kind() != Stk::MemRef) {
+ continue;
+ }
+#else
+ // Take the opportunity to check everything we reasonably can about
+ // operand stack elements.
+ switch (v.kind()) {
+ case Stk::MemI32:
+ case Stk::MemI64:
+ case Stk::MemF32:
+ case Stk::MemF64:
+ case Stk::ConstI32:
+ case Stk::ConstI64:
+ case Stk::ConstF32:
+ case Stk::ConstF64:
+# ifdef ENABLE_WASM_SIMD
+ case Stk::MemV128:
+ case Stk::ConstV128:
+# endif
+ // All of these have uninteresting type.
+ continue;
+ case Stk::LocalI32:
+ case Stk::LocalI64:
+ case Stk::LocalF32:
+ case Stk::LocalF64:
+# ifdef ENABLE_WASM_SIMD
+ case Stk::LocalV128:
+# endif
+ // These also have uninteresting type. Check that they live in the
+ // section of stack set up by beginFunction(). The unguarded use of
+ // |value()| here is safe due to the assertion above this loop.
+ MOZ_ASSERT(v.offs() <= framePushedAtEntryToBody.value());
+ continue;
+ case Stk::RegisterI32:
+ case Stk::RegisterI64:
+ case Stk::RegisterF32:
+ case Stk::RegisterF64:
+# ifdef ENABLE_WASM_SIMD
+ case Stk::RegisterV128:
+# endif
+ // These also have uninteresting type, but more to the point: all
+ // registers holding live values should have been flushed to the
+ // machine stack immediately prior to the instruction to which this
+ // stackmap pertains. So these can't happen.
+ MOZ_CRASH("createStackMap: operand stack has Register-non-Ref");
+ case Stk::MemRef:
+ // This is the only case we care about. We'll handle it after the
+ // switch.
+ break;
+ case Stk::LocalRef:
+ // We need the stackmap to mention this pointer, but it should
+ // already be in the machineStackTracker section created by
+ // beginFunction().
+ MOZ_ASSERT(v.offs() <= framePushedAtEntryToBody.value());
+ continue;
+ case Stk::ConstRef:
+ // This can currently only be a null pointer.
+ MOZ_ASSERT(v.refval() == 0);
+ continue;
+ case Stk::RegisterRef:
+ // This can't happen, per rationale above.
+ MOZ_CRASH("createStackMap: operand stack contains RegisterRef");
+ default:
+ MOZ_CRASH("createStackMap: unknown operand stack element");
+ }
+#endif
+ // v.offs() holds masm.framePushed() at the point immediately after it
+ // was pushed on the stack. Since it's still on the stack,
+ // masm.framePushed() can't be less.
+ MOZ_ASSERT(v.offs() <= framePushedExcludingArgs.value());
+ uint32_t offsFromMapLowest = framePushedExcludingArgs.value() - v.offs();
+ MOZ_ASSERT(0 == offsFromMapLowest % sizeof(void*));
+ augmentedMst.setGCPointer(offsFromMapLowest / sizeof(void*));
+ }
+
+ // Create the final StackMap. The initial map is zeroed out, so there's
+ // no need to write zero bits in it.
+ const uint32_t extraWords = extras.length();
+ const uint32_t augmentedMstWords = augmentedMst.length();
+ const uint32_t numMappedWords = extraWords + augmentedMstWords;
+ StackMap* stackMap = StackMap::create(numMappedWords);
+ if (!stackMap) {
+ return false;
+ }
+
+ {
+ // First the exit stub extra words, if any.
+ uint32_t i = 0;
+ for (bool b : extras) {
+ if (b) {
+ stackMap->setBit(i);
+ }
+ i++;
+ }
+ }
+ {
+ // Followed by the "main" part of the map.
+ //
+ // This is really just a bit-array copy, so it is reasonable to ask
+ // whether the representation of MachineStackTracker could be made more
+ // similar to that of StackMap, so that the copy could be done with
+ // `memcpy`. Unfortunately it's not so simple; see comment on `class
+ // MachineStackTracker` for details.
+ MachineStackTracker::Iter iter(augmentedMst);
+ while (true) {
+ size_t i = iter.get();
+ if (i == MachineStackTracker::Iter::FINISHED) {
+ break;
+ }
+ stackMap->setBit(extraWords + i);
+ }
+ }
+
+ stackMap->setExitStubWords(extraWords);
+
+ // Record in the map, how far down from the highest address the Frame* is.
+ // Take the opportunity to check that we haven't marked any part of the
+ // Frame itself as a pointer.
+ stackMap->setFrameOffsetFromTop(numStackArgWords +
+ sizeof(Frame) / sizeof(void*));
+#ifdef DEBUG
+ for (uint32_t i = 0; i < sizeof(Frame) / sizeof(void*); i++) {
+ MOZ_ASSERT(stackMap->getBit(stackMap->header.numMappedWords -
+ stackMap->header.frameOffsetFromTop + i) == 0);
+ }
+#endif
+
+ // Note the presence of a DebugFrame with live pointers, if any.
+ if (debugFrameWithLiveRefs != HasDebugFrameWithLiveRefs::No) {
+ stackMap->setHasDebugFrameWithLiveRefs();
+ }
+
+ // Add the completed map to the running collection thereof.
+ if (!stackMaps_->add((uint8_t*)(uintptr_t)assemblerOffset, stackMap)) {
+ stackMap->destroy();
+ return false;
+ }
+
+#ifdef DEBUG
+ {
+ // Crosscheck the map pointer counting.
+ uint32_t nw = stackMap->header.numMappedWords;
+ uint32_t np = 0;
+ for (uint32_t i = 0; i < nw; i++) {
+ np += stackMap->getBit(i);
+ }
+ MOZ_ASSERT(size_t(np) == countedPointers);
+ }
+#endif
+
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Stack frame methods.
+
+void BaseStackFrame::zeroLocals(BaseRegAlloc* ra) {
+ MOZ_ASSERT(varLow_ != UINT32_MAX);
+
+ if (varLow_ == varHigh_) {
+ return;
+ }
+
+ static const uint32_t wordSize = sizeof(void*);
+
+ // The adjustments to 'low' by the size of the item being stored compensates
+ // for the fact that locals offsets are the offsets from Frame to the bytes
+ // directly "above" the locals in the locals area. See comment at Local.
+
+ // On 64-bit systems we may have 32-bit alignment for the local area as it
+ // may be preceded by parameters and prologue/debug data.
+
+ uint32_t low = varLow_;
+ if (low % wordSize) {
+ masm.store32(Imm32(0), Address(sp_, localOffset(low + 4)));
+ low += 4;
+ }
+ MOZ_ASSERT(low % wordSize == 0);
+
+ const uint32_t high = AlignBytes(varHigh_, wordSize);
+
+ // An UNROLL_LIMIT of 16 is chosen so that we only need an 8-bit signed
+ // immediate to represent the offset in the store instructions in the loop
+ // on x64.
+
+ const uint32_t UNROLL_LIMIT = 16;
+ const uint32_t initWords = (high - low) / wordSize;
+ const uint32_t tailWords = initWords % UNROLL_LIMIT;
+ const uint32_t loopHigh = high - (tailWords * wordSize);
+
+ // With only one word to initialize, just store an immediate zero.
+
+ if (initWords == 1) {
+ masm.storePtr(ImmWord(0), Address(sp_, localOffset(low + wordSize)));
+ return;
+ }
+
+ // For other cases, it's best to have a zero in a register.
+ //
+ // One can do more here with SIMD registers (store 16 bytes at a time) or
+ // with instructions like STRD on ARM (store 8 bytes at a time), but that's
+ // for another day.
+
+ RegI32 zero = ra->needI32();
+ masm.mov(ImmWord(0), zero);
+
+ // For the general case we want to have a loop body of UNROLL_LIMIT stores
+ // and then a tail of less than UNROLL_LIMIT stores. When initWords is less
+ // than 2*UNROLL_LIMIT the loop trip count is at most 1 and there is no
+ // benefit to having the pointer calculations and the compare-and-branch.
+ // So we completely unroll when we have initWords < 2 * UNROLL_LIMIT. (In
+ // this case we'll end up using 32-bit offsets on x64 for up to half of the
+ // stores, though.)
+
+ // Fully-unrolled case.
+
+ if (initWords < 2 * UNROLL_LIMIT) {
+ for (uint32_t i = low; i < high; i += wordSize) {
+ masm.storePtr(zero, Address(sp_, localOffset(i + wordSize)));
+ }
+ ra->freeI32(zero);
+ return;
+ }
+
+ // Unrolled loop with a tail. Stores will use negative offsets. That's OK
+ // for x86 and ARM, at least.
+
+ // Compute pointer to the highest-addressed slot on the frame.
+ RegI32 p = ra->needI32();
+ masm.computeEffectiveAddress(Address(sp_, localOffset(low + wordSize)), p);
+
+ // Compute pointer to the lowest-addressed slot on the frame that will be
+ // initialized by the loop body.
+ RegI32 lim = ra->needI32();
+ masm.computeEffectiveAddress(Address(sp_, localOffset(loopHigh + wordSize)),
+ lim);
+
+ // The loop body. Eventually we'll have p == lim and exit the loop.
+ Label again;
+ masm.bind(&again);
+ for (uint32_t i = 0; i < UNROLL_LIMIT; ++i) {
+ masm.storePtr(zero, Address(p, -(wordSize * i)));
+ }
+ masm.subPtr(Imm32(UNROLL_LIMIT * wordSize), p);
+ masm.branchPtr(Assembler::LessThan, lim, p, &again);
+
+ // The tail.
+ for (uint32_t i = 0; i < tailWords; ++i) {
+ masm.storePtr(zero, Address(p, -(wordSize * i)));
+ }
+
+ ra->freeI32(p);
+ ra->freeI32(lim);
+ ra->freeI32(zero);
+}
+
+} // namespace wasm
+} // namespace js
diff --git a/js/src/wasm/WasmBCFrame.h b/js/src/wasm/WasmBCFrame.h
new file mode 100644
index 0000000000..089d3d5e84
--- /dev/null
+++ b/js/src/wasm/WasmBCFrame.h
@@ -0,0 +1,1397 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This is an INTERNAL header for Wasm baseline compiler: CPU stack frame,
+// stack maps, and associated logic.
+
+#ifndef wasm_wasm_baseline_frame_h
+#define wasm_wasm_baseline_frame_h
+
+#include "wasm/WasmBaselineCompile.h" // For BaseLocalIter
+#include "wasm/WasmBCDefs.h"
+#include "wasm/WasmBCRegDefs.h"
+#include "wasm/WasmBCStk.h"
+#include "wasm/WasmConstants.h" // For MaxFrameSize
+
+// [SMDOC] Wasm baseline compiler's stack frame.
+//
+// For background, see "Wasm's ABIs" in WasmFrame.h, the following should never
+// be in conflict with that.
+//
+// The stack frame has four parts ("below" means at lower addresses):
+//
+// - the Frame element;
+// - the Local area, including the DebugFrame element and possibly a spilled
+// pointer to stack results, if any; allocated below the header with various
+// forms of alignment;
+// - the Dynamic area, comprising the temporary storage the compiler uses for
+// register spilling, allocated below the Local area;
+// - the Arguments area, comprising memory allocated for outgoing calls,
+// allocated below the Dynamic area.
+//
+// +==============================+
+// | Incoming stack arg |
+// | ... |
+// ------------- +==============================+
+// | Frame (fixed size) |
+// ------------- +==============================+ <-------------------- FP
+// ^ | DebugFrame (optional) | ^ ^ ^^
+// localSize | Register arg local | | | ||
+// | | ... | | | framePushed
+// | | Register stack result ptr?| | | ||
+// | | Non-arg local | | | ||
+// | | ... | | | ||
+// | | (padding) | | | ||
+// | | Instance pointer | | | ||
+// | +------------------------------+ | | ||
+// v | (padding) | | v ||
+// ------------- +==============================+ currentStackHeight ||
+// ^ | Dynamic (variable size) | | ||
+// dynamicSize | ... | | ||
+// v | ... | v ||
+// ------------- | (free space, sometimes) | --------- v|
+// +==============================+ <----- SP not-during calls
+// | Arguments (sometimes) | |
+// | ... | v
+// +==============================+ <----- SP during calls
+//
+// The Frame is addressed off the stack pointer. masm.framePushed() is always
+// correct, and masm.getStackPointer() + masm.framePushed() always addresses the
+// Frame, with the DebugFrame optionally below it.
+//
+// The Local area (including the DebugFrame and, if needed, the spilled value of
+// the stack results area pointer) is laid out by BaseLocalIter and is allocated
+// and deallocated by standard prologue and epilogue functions that manipulate
+// the stack pointer, but it is accessed via BaseStackFrame.
+//
+// The Dynamic area is maintained by and accessed via BaseStackFrame. On some
+// systems (such as ARM64), the Dynamic memory may be allocated in chunks
+// because the SP needs a specific alignment, and in this case there will
+// normally be some free space directly above the SP. The stack height does not
+// include the free space, it reflects the logically used space only.
+//
+// The Dynamic area is where space for stack results is allocated when calling
+// functions that return results on the stack. If a function has stack results,
+// a pointer to the low address of the stack result area is passed as an
+// additional argument, according to the usual ABI. See
+// ABIResultIter::HasStackResults.
+//
+// The Arguments area is allocated and deallocated via BaseStackFrame (see
+// comments later) but is accessed directly off the stack pointer.
+
+namespace js {
+namespace wasm {
+
+using namespace js::jit;
+
+// Abstraction of the height of the stack frame, to avoid type confusion.
+
+class StackHeight {
+ friend class BaseStackFrameAllocator;
+
+ uint32_t height;
+
+ public:
+ explicit StackHeight(uint32_t h) : height(h) {}
+ static StackHeight Invalid() { return StackHeight(UINT32_MAX); }
+ bool isValid() const { return height != UINT32_MAX; }
+ bool operator==(StackHeight rhs) const {
+ MOZ_ASSERT(isValid() && rhs.isValid());
+ return height == rhs.height;
+ }
+ bool operator!=(StackHeight rhs) const { return !(*this == rhs); }
+};
+
+// Abstraction for where multi-value results go on the machine stack.
+
+class StackResultsLoc {
+ uint32_t bytes_;
+ size_t count_;
+ Maybe<uint32_t> height_;
+
+ public:
+ StackResultsLoc() : bytes_(0), count_(0){};
+ StackResultsLoc(uint32_t bytes, size_t count, uint32_t height)
+ : bytes_(bytes), count_(count), height_(Some(height)) {
+ MOZ_ASSERT(bytes != 0);
+ MOZ_ASSERT(count != 0);
+ MOZ_ASSERT(height != 0);
+ }
+
+ uint32_t bytes() const { return bytes_; }
+ uint32_t count() const { return count_; }
+ uint32_t height() const { return height_.value(); }
+
+ bool hasStackResults() const { return bytes() != 0; }
+ StackResults stackResults() const {
+ return hasStackResults() ? StackResults::HasStackResults
+ : StackResults::NoStackResults;
+ }
+};
+
+// Abstraction of the baseline compiler's stack frame (except for the Frame /
+// DebugFrame parts). See comments above for more. Remember, "below" on the
+// stack means at lower addresses.
+//
+// The abstraction is split into two parts: BaseStackFrameAllocator is
+// responsible for allocating and deallocating space on the stack and for
+// performing computations that are affected by how the allocation is performed;
+// BaseStackFrame then provides a pleasant interface for stack frame management.
+
+class BaseStackFrameAllocator {
+ MacroAssembler& masm;
+
+#ifdef RABALDR_CHUNKY_STACK
+ // On platforms that require the stack pointer to be aligned on a boundary
+ // greater than the typical stack item (eg, ARM64 requires 16-byte alignment
+ // but items are 8 bytes), allocate stack memory in chunks, and use a
+ // separate stack height variable to track the effective stack pointer
+ // within the allocated area. Effectively, there's a variable amount of
+ // free space directly above the stack pointer. See diagram above.
+
+ // The following must be true in order for the stack height to be
+ // predictable at control flow joins:
+ //
+ // - The Local area is always aligned according to WasmStackAlignment, ie,
+ // masm.framePushed() % WasmStackAlignment is zero after allocating
+ // locals.
+ //
+ // - ChunkSize is always a multiple of WasmStackAlignment.
+ //
+ // - Pushing and popping are always in units of ChunkSize (hence preserving
+ // alignment).
+ //
+ // - The free space on the stack (masm.framePushed() - currentStackHeight_)
+ // is a predictable (nonnegative) amount.
+
+ // As an optimization, we pre-allocate some space on the stack, the size of
+ // this allocation is InitialChunk and it must be a multiple of ChunkSize.
+ // It is allocated as part of the function prologue and deallocated as part
+ // of the epilogue, along with the locals.
+ //
+ // If ChunkSize is too large then we risk overflowing the stack on simple
+ // recursions with few live values where stack overflow should not be a
+ // risk; if it is too small we spend too much time adjusting the stack
+ // pointer.
+ //
+ // Good values for ChunkSize are the subject of future empirical analysis;
+ // eight words is just an educated guess.
+
+ static constexpr uint32_t ChunkSize = 8 * sizeof(void*);
+ static constexpr uint32_t InitialChunk = ChunkSize;
+
+ // The current logical height of the frame is
+ // currentStackHeight_ = localSize_ + dynamicSize
+ // where dynamicSize is not accounted for explicitly and localSize_ also
+ // includes size for the DebugFrame.
+ //
+ // The allocated size of the frame, provided by masm.framePushed(), is usually
+ // larger than currentStackHeight_, notably at the beginning of execution when
+ // we've allocated InitialChunk extra space.
+
+ uint32_t currentStackHeight_;
+#endif
+
+ // Size of the Local area in bytes (stable after BaseCompiler::init() has
+ // called BaseStackFrame::setupLocals(), which in turn calls
+ // BaseStackFrameAllocator::setLocalSize()), always rounded to the proper
+ // stack alignment. The Local area is then allocated in beginFunction(),
+ // following the allocation of the Header. See onFixedStackAllocated()
+ // below.
+
+ uint32_t localSize_;
+
+ protected:
+ ///////////////////////////////////////////////////////////////////////////
+ //
+ // Initialization
+
+ explicit BaseStackFrameAllocator(MacroAssembler& masm)
+ : masm(masm),
+#ifdef RABALDR_CHUNKY_STACK
+ currentStackHeight_(0),
+#endif
+ localSize_(UINT32_MAX) {
+ }
+
+ protected:
+ //////////////////////////////////////////////////////////////////////
+ //
+ // The Local area - the static part of the frame.
+
+ // Record the size of the Local area, once it is known.
+
+ void setLocalSize(uint32_t localSize) {
+ MOZ_ASSERT(localSize == AlignBytes(localSize, sizeof(void*)),
+ "localSize_ should be aligned to at least a pointer");
+ MOZ_ASSERT(localSize_ == UINT32_MAX);
+ localSize_ = localSize;
+ }
+
+ // Record the current stack height, after it has become stable in
+ // beginFunction(). See also BaseStackFrame::onFixedStackAllocated().
+
+ void onFixedStackAllocated() {
+ MOZ_ASSERT(localSize_ != UINT32_MAX);
+#ifdef RABALDR_CHUNKY_STACK
+ currentStackHeight_ = localSize_;
+#endif
+ }
+
+ public:
+ // The fixed amount of memory, in bytes, allocated on the stack below the
+ // Header for purposes such as locals and other fixed values. Includes all
+ // necessary alignment, and on ARM64 also the initial chunk for the working
+ // stack memory.
+
+ uint32_t fixedAllocSize() const {
+ MOZ_ASSERT(localSize_ != UINT32_MAX);
+#ifdef RABALDR_CHUNKY_STACK
+ return localSize_ + InitialChunk;
+#else
+ return localSize_;
+#endif
+ }
+
+#ifdef RABALDR_CHUNKY_STACK
+ // The allocated frame size is frequently larger than the logical stack
+ // height; we round up to a chunk boundary, and special case the initial
+ // chunk.
+ uint32_t framePushedForHeight(uint32_t logicalHeight) {
+ if (logicalHeight <= fixedAllocSize()) {
+ return fixedAllocSize();
+ }
+ return fixedAllocSize() +
+ AlignBytes(logicalHeight - fixedAllocSize(), ChunkSize);
+ }
+#endif
+
+ protected:
+ //////////////////////////////////////////////////////////////////////
+ //
+ // The Dynamic area - the dynamic part of the frame, for spilling and saving
+ // intermediate values.
+
+ // Offset off of sp_ for the slot at stack area location `offset`.
+
+ int32_t stackOffset(int32_t offset) {
+ MOZ_ASSERT(offset > 0);
+ return masm.framePushed() - offset;
+ }
+
+ uint32_t computeHeightWithStackResults(StackHeight stackBase,
+ uint32_t stackResultBytes) {
+ MOZ_ASSERT(stackResultBytes);
+ MOZ_ASSERT(currentStackHeight() >= stackBase.height);
+ return stackBase.height + stackResultBytes;
+ }
+
+#ifdef RABALDR_CHUNKY_STACK
+ void pushChunkyBytes(uint32_t bytes) {
+ checkChunkyInvariants();
+ uint32_t freeSpace = masm.framePushed() - currentStackHeight_;
+ if (freeSpace < bytes) {
+ uint32_t bytesToReserve = AlignBytes(bytes - freeSpace, ChunkSize);
+ MOZ_ASSERT(bytesToReserve + freeSpace >= bytes);
+ masm.reserveStack(bytesToReserve);
+ }
+ currentStackHeight_ += bytes;
+ checkChunkyInvariants();
+ }
+
+ void popChunkyBytes(uint32_t bytes) {
+ checkChunkyInvariants();
+ currentStackHeight_ -= bytes;
+ // Sometimes, popChunkyBytes() is used to pop a larger area, as when we drop
+ // values consumed by a call, and we may need to drop several chunks. But
+ // never drop the initial chunk. Crucially, the amount we drop is always an
+ // integral number of chunks.
+ uint32_t freeSpace = masm.framePushed() - currentStackHeight_;
+ if (freeSpace >= ChunkSize) {
+ uint32_t targetAllocSize = framePushedForHeight(currentStackHeight_);
+ uint32_t amountToFree = masm.framePushed() - targetAllocSize;
+ MOZ_ASSERT(amountToFree % ChunkSize == 0);
+ if (amountToFree) {
+ masm.freeStack(amountToFree);
+ }
+ }
+ checkChunkyInvariants();
+ }
+#endif
+
+ uint32_t currentStackHeight() const {
+#ifdef RABALDR_CHUNKY_STACK
+ return currentStackHeight_;
+#else
+ return masm.framePushed();
+#endif
+ }
+
+ private:
+#ifdef RABALDR_CHUNKY_STACK
+ void checkChunkyInvariants() {
+ MOZ_ASSERT(masm.framePushed() >= fixedAllocSize());
+ MOZ_ASSERT(masm.framePushed() >= currentStackHeight_);
+ MOZ_ASSERT(masm.framePushed() == fixedAllocSize() ||
+ masm.framePushed() - currentStackHeight_ < ChunkSize);
+ MOZ_ASSERT((masm.framePushed() - localSize_) % ChunkSize == 0);
+ }
+#endif
+
+ // For a given stack height, return the appropriate size of the allocated
+ // frame.
+
+ uint32_t framePushedForHeight(StackHeight stackHeight) {
+#ifdef RABALDR_CHUNKY_STACK
+ // A more complicated adjustment is needed.
+ return framePushedForHeight(stackHeight.height);
+#else
+ // The allocated frame size equals the stack height.
+ return stackHeight.height;
+#endif
+ }
+
+ public:
+ // The current height of the stack area, not necessarily zero-based, in a
+ // type-safe way.
+
+ StackHeight stackHeight() const { return StackHeight(currentStackHeight()); }
+
+ // Set the frame height to a previously recorded value.
+
+ void setStackHeight(StackHeight amount) {
+#ifdef RABALDR_CHUNKY_STACK
+ currentStackHeight_ = amount.height;
+ masm.setFramePushed(framePushedForHeight(amount));
+ checkChunkyInvariants();
+#else
+ masm.setFramePushed(amount.height);
+#endif
+ }
+
+ // The current height of the dynamic part of the stack area (ie, the backing
+ // store for the evaluation stack), zero-based.
+
+ uint32_t dynamicHeight() const { return currentStackHeight() - localSize_; }
+
+ // Before branching to an outer control label, pop the execution stack to
+ // the level expected by that region, but do not update masm.framePushed()
+ // as that will happen as compilation leaves the block.
+ //
+ // Note these operate directly on the stack pointer register.
+
+ void popStackBeforeBranch(StackHeight destStackHeight,
+ uint32_t stackResultBytes) {
+ uint32_t framePushedHere = masm.framePushed();
+ StackHeight heightThere =
+ StackHeight(destStackHeight.height + stackResultBytes);
+ uint32_t framePushedThere = framePushedForHeight(heightThere);
+ if (framePushedHere > framePushedThere) {
+ masm.addToStackPtr(Imm32(framePushedHere - framePushedThere));
+ }
+ }
+
+ void popStackBeforeBranch(StackHeight destStackHeight, ResultType type) {
+ popStackBeforeBranch(destStackHeight,
+ ABIResultIter::MeasureStackBytes(type));
+ }
+
+ // Given that there are |stackParamSize| bytes on the dynamic stack
+ // corresponding to the stack results, return the stack height once these
+ // parameters are popped.
+
+ StackHeight stackResultsBase(uint32_t stackParamSize) {
+ return StackHeight(currentStackHeight() - stackParamSize);
+ }
+
+ // For most of WebAssembly, adjacent instructions have fallthrough control
+ // flow between them, which allows us to simply thread the current stack
+ // height through the compiler. There are two exceptions to this rule: when
+ // leaving a block via dead code, and when entering the "else" arm of an "if".
+ // In these cases, the stack height is the block entry height, plus any stack
+ // values (results in the block exit case, parameters in the else entry case).
+
+ void resetStackHeight(StackHeight destStackHeight, ResultType type) {
+ uint32_t height = destStackHeight.height;
+ height += ABIResultIter::MeasureStackBytes(type);
+ setStackHeight(StackHeight(height));
+ }
+
+ // Return offset of stack result.
+
+ uint32_t locateStackResult(const ABIResult& result, StackHeight stackBase,
+ uint32_t stackResultBytes) {
+ MOZ_ASSERT(result.onStack());
+ MOZ_ASSERT(result.stackOffset() + result.size() <= stackResultBytes);
+ uint32_t end = computeHeightWithStackResults(stackBase, stackResultBytes);
+ return end - result.stackOffset();
+ }
+
+ public:
+ //////////////////////////////////////////////////////////////////////
+ //
+ // The Argument area - for outgoing calls.
+ //
+ // We abstract these operations as an optimization: we can merge the freeing
+ // of the argument area and dropping values off the stack after a call. But
+ // they always amount to manipulating the real stack pointer by some amount.
+ //
+ // Note that we do not update currentStackHeight_ for this; the frame does
+ // not know about outgoing arguments. But we do update framePushed(), so we
+ // can still index into the frame below the outgoing arguments area.
+
+ // This is always equivalent to a masm.reserveStack() call.
+
+ void allocArgArea(size_t argSize) {
+ if (argSize) {
+ masm.reserveStack(argSize);
+ }
+ }
+
+ // This frees the argument area allocated by allocArgArea(), and `argSize`
+ // must be equal to the `argSize` argument to allocArgArea(). In addition
+ // we drop some values from the frame, corresponding to the values that were
+ // consumed by the call.
+
+ void freeArgAreaAndPopBytes(size_t argSize, size_t dropSize) {
+#ifdef RABALDR_CHUNKY_STACK
+ // Freeing the outgoing arguments and freeing the consumed values have
+ // different semantics here, which is why the operation is split.
+ if (argSize) {
+ masm.freeStack(argSize);
+ }
+ popChunkyBytes(dropSize);
+#else
+ if (argSize + dropSize) {
+ masm.freeStack(argSize + dropSize);
+ }
+#endif
+ }
+};
+
+class BaseStackFrame final : public BaseStackFrameAllocator {
+ MacroAssembler& masm;
+
+ // The largest observed value of masm.framePushed(), ie, the size of the
+ // stack frame. Read this for its true value only when code generation is
+ // finished.
+ uint32_t maxFramePushed_;
+
+ // Patch point where we check for stack overflow.
+ CodeOffset stackAddOffset_;
+
+ // Low byte offset of pointer to stack results, if any.
+ Maybe<int32_t> stackResultsPtrOffset_;
+
+ // The offset of instance pointer.
+ uint32_t instancePointerOffset_;
+
+ // Low byte offset of local area for true locals (not parameters).
+ uint32_t varLow_;
+
+ // High byte offset + 1 of local area for true locals.
+ uint32_t varHigh_;
+
+ // The stack pointer, cached for brevity.
+ RegisterOrSP sp_;
+
+ public:
+ explicit BaseStackFrame(MacroAssembler& masm)
+ : BaseStackFrameAllocator(masm),
+ masm(masm),
+ maxFramePushed_(0),
+ stackAddOffset_(0),
+ instancePointerOffset_(UINT32_MAX),
+ varLow_(UINT32_MAX),
+ varHigh_(UINT32_MAX),
+ sp_(masm.getStackPointer()) {}
+
+ ///////////////////////////////////////////////////////////////////////////
+ //
+ // Stack management and overflow checking
+
+ // This must be called once beginFunction has allocated space for the Header
+ // (the Frame and DebugFrame) and the Local area, and will record the current
+ // frame size for internal use by the stack abstractions.
+
+ void onFixedStackAllocated() {
+ maxFramePushed_ = masm.framePushed();
+ BaseStackFrameAllocator::onFixedStackAllocated();
+ }
+
+ // We won't know until after we've generated code how big the frame will be
+ // (we may need arbitrary spill slots and outgoing param slots) so emit a
+ // patchable add that is patched in endFunction().
+ //
+ // Note the platform scratch register may be used by branchPtr(), so
+ // generally tmp must be something else.
+
+ void checkStack(Register tmp, BytecodeOffset trapOffset) {
+ stackAddOffset_ = masm.sub32FromStackPtrWithPatch(tmp);
+ Label ok;
+ masm.branchPtr(Assembler::Below,
+ Address(InstanceReg, wasm::Instance::offsetOfStackLimit()),
+ tmp, &ok);
+ masm.wasmTrap(Trap::StackOverflow, trapOffset);
+ masm.bind(&ok);
+ }
+
+ void patchCheckStack() {
+ masm.patchSub32FromStackPtr(stackAddOffset_,
+ Imm32(int32_t(maxFramePushed_)));
+ }
+
+ // Very large frames are implausible, probably an attack.
+
+ bool checkStackHeight() { return maxFramePushed_ <= MaxFrameSize; }
+
+ ///////////////////////////////////////////////////////////////////////////
+ //
+ // Local area
+
+ struct Local {
+ // Type of the value.
+ const MIRType type;
+
+ // Byte offset from Frame "into" the locals, ie positive for true locals
+ // and negative for incoming args that read directly from the arg area.
+ // It assumes the stack is growing down and that locals are on the stack
+ // at lower addresses than Frame, and is the offset from Frame of the
+ // lowest-addressed byte of the local.
+ const int32_t offs;
+
+ Local(MIRType type, int32_t offs) : type(type), offs(offs) {}
+
+ bool isStackArgument() const { return offs < 0; }
+ };
+
+ // Profiling shows that the number of parameters and locals frequently
+ // touches or exceeds 8. So 16 seems like a reasonable starting point.
+ using LocalVector = Vector<Local, 16, SystemAllocPolicy>;
+
+ // Initialize `localInfo` based on the types of `locals` and `args`.
+ [[nodiscard]] bool setupLocals(const ValTypeVector& locals,
+ const ArgTypeVector& args, bool debugEnabled,
+ LocalVector* localInfo) {
+ if (!localInfo->reserve(locals.length())) {
+ return false;
+ }
+
+ DebugOnly<uint32_t> index = 0;
+ BaseLocalIter i(locals, args, debugEnabled);
+ for (; !i.done() && i.index() < args.lengthWithoutStackResults(); i++) {
+ MOZ_ASSERT(i.isArg());
+ MOZ_ASSERT(i.index() == index);
+ localInfo->infallibleEmplaceBack(i.mirType(), i.frameOffset());
+ index++;
+ }
+
+ varLow_ = i.frameSize();
+ for (; !i.done(); i++) {
+ MOZ_ASSERT(!i.isArg());
+ MOZ_ASSERT(i.index() == index);
+ localInfo->infallibleEmplaceBack(i.mirType(), i.frameOffset());
+ index++;
+ }
+ varHigh_ = i.frameSize();
+
+ // Reserve an additional stack slot for the instance pointer.
+ const uint32_t pointerAlignedVarHigh = AlignBytes(varHigh_, sizeof(void*));
+ const uint32_t localSize = pointerAlignedVarHigh + sizeof(void*);
+ instancePointerOffset_ = localSize;
+
+ setLocalSize(AlignBytes(localSize, WasmStackAlignment));
+
+ if (args.hasSyntheticStackResultPointerArg()) {
+ stackResultsPtrOffset_ = Some(i.stackResultPointerOffset());
+ }
+
+ return true;
+ }
+
+ void zeroLocals(BaseRegAlloc* ra);
+
+ Address addressOfLocal(const Local& local, uint32_t additionalOffset = 0) {
+ if (local.isStackArgument()) {
+ return Address(FramePointer,
+ stackArgumentOffsetFromFp(local) + additionalOffset);
+ }
+ return Address(sp_, localOffsetFromSp(local) + additionalOffset);
+ }
+
+ void loadLocalI32(const Local& src, RegI32 dest) {
+ masm.load32(addressOfLocal(src), dest);
+ }
+
+#ifndef JS_PUNBOX64
+ void loadLocalI64Low(const Local& src, RegI32 dest) {
+ masm.load32(addressOfLocal(src, INT64LOW_OFFSET), dest);
+ }
+
+ void loadLocalI64High(const Local& src, RegI32 dest) {
+ masm.load32(addressOfLocal(src, INT64HIGH_OFFSET), dest);
+ }
+#endif
+
+ void loadLocalI64(const Local& src, RegI64 dest) {
+ masm.load64(addressOfLocal(src), dest);
+ }
+
+ void loadLocalRef(const Local& src, RegRef dest) {
+ masm.loadPtr(addressOfLocal(src), dest);
+ }
+
+ void loadLocalF64(const Local& src, RegF64 dest) {
+ masm.loadDouble(addressOfLocal(src), dest);
+ }
+
+ void loadLocalF32(const Local& src, RegF32 dest) {
+ masm.loadFloat32(addressOfLocal(src), dest);
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ void loadLocalV128(const Local& src, RegV128 dest) {
+ masm.loadUnalignedSimd128(addressOfLocal(src), dest);
+ }
+#endif
+
+ void storeLocalI32(RegI32 src, const Local& dest) {
+ masm.store32(src, addressOfLocal(dest));
+ }
+
+ void storeLocalI64(RegI64 src, const Local& dest) {
+ masm.store64(src, addressOfLocal(dest));
+ }
+
+ void storeLocalRef(RegRef src, const Local& dest) {
+ masm.storePtr(src, addressOfLocal(dest));
+ }
+
+ void storeLocalF64(RegF64 src, const Local& dest) {
+ masm.storeDouble(src, addressOfLocal(dest));
+ }
+
+ void storeLocalF32(RegF32 src, const Local& dest) {
+ masm.storeFloat32(src, addressOfLocal(dest));
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ void storeLocalV128(RegV128 src, const Local& dest) {
+ masm.storeUnalignedSimd128(src, addressOfLocal(dest));
+ }
+#endif
+
+ // Offset off of sp_ for `local`.
+ int32_t localOffsetFromSp(const Local& local) {
+ MOZ_ASSERT(!local.isStackArgument());
+ return localOffset(local.offs);
+ }
+
+ // Offset off of frame pointer for `stack argument`.
+ int32_t stackArgumentOffsetFromFp(const Local& local) {
+ MOZ_ASSERT(local.isStackArgument());
+ return -local.offs;
+ }
+
+ // The incoming stack result area pointer is for stack results of the function
+ // being compiled.
+ void loadIncomingStackResultAreaPtr(RegPtr reg) {
+ const int32_t offset = stackResultsPtrOffset_.value();
+ Address src = offset < 0 ? Address(FramePointer, -offset)
+ : Address(sp_, stackOffset(offset));
+ masm.loadPtr(src, reg);
+ }
+
+ void storeIncomingStackResultAreaPtr(RegPtr reg) {
+ // If we get here, that means the pointer to the stack results area was
+ // passed in as a register, and therefore it will be spilled below the
+ // frame, so the offset is a positive height.
+ MOZ_ASSERT(stackResultsPtrOffset_.value() > 0);
+ masm.storePtr(reg,
+ Address(sp_, stackOffset(stackResultsPtrOffset_.value())));
+ }
+
+ void loadInstancePtr(Register dst) {
+ masm.loadPtr(Address(sp_, stackOffset(instancePointerOffset_)), dst);
+ }
+
+ void storeInstancePtr(Register instance) {
+ masm.storePtr(instance, Address(sp_, stackOffset(instancePointerOffset_)));
+ }
+
+ int32_t getInstancePtrOffset() { return stackOffset(instancePointerOffset_); }
+
+ // An outgoing stack result area pointer is for stack results of callees of
+ // the function being compiled.
+ void computeOutgoingStackResultAreaPtr(const StackResultsLoc& results,
+ RegPtr dest) {
+ MOZ_ASSERT(results.height() <= masm.framePushed());
+ uint32_t offsetFromSP = masm.framePushed() - results.height();
+ masm.moveStackPtrTo(dest);
+ if (offsetFromSP) {
+ masm.addPtr(Imm32(offsetFromSP), dest);
+ }
+ }
+
+ private:
+ // Offset off of sp_ for a local with offset `offset` from Frame.
+ int32_t localOffset(int32_t offset) { return masm.framePushed() - offset; }
+
+ public:
+ ///////////////////////////////////////////////////////////////////////////
+ //
+ // Dynamic area
+
+ static constexpr size_t StackSizeOfPtr = ABIResult::StackSizeOfPtr;
+ static constexpr size_t StackSizeOfInt64 = ABIResult::StackSizeOfInt64;
+ static constexpr size_t StackSizeOfFloat = ABIResult::StackSizeOfFloat;
+ static constexpr size_t StackSizeOfDouble = ABIResult::StackSizeOfDouble;
+#ifdef ENABLE_WASM_SIMD
+ static constexpr size_t StackSizeOfV128 = ABIResult::StackSizeOfV128;
+#endif
+
+ // Pushes the register `r` to the stack. This pushes the full 64-bit width on
+ // 64-bit systems, and 32-bits otherwise.
+ uint32_t pushGPR(Register r) {
+ DebugOnly<uint32_t> stackBefore = currentStackHeight();
+#ifdef RABALDR_CHUNKY_STACK
+ pushChunkyBytes(StackSizeOfPtr);
+ masm.storePtr(r, Address(sp_, stackOffset(currentStackHeight())));
+#else
+ masm.Push(r);
+#endif
+ maxFramePushed_ = std::max(maxFramePushed_, masm.framePushed());
+ MOZ_ASSERT(stackBefore + StackSizeOfPtr == currentStackHeight());
+ return currentStackHeight();
+ }
+
+ uint32_t pushFloat32(FloatRegister r) {
+ DebugOnly<uint32_t> stackBefore = currentStackHeight();
+#ifdef RABALDR_CHUNKY_STACK
+ pushChunkyBytes(StackSizeOfFloat);
+ masm.storeFloat32(r, Address(sp_, stackOffset(currentStackHeight())));
+#else
+ masm.Push(r);
+#endif
+ maxFramePushed_ = std::max(maxFramePushed_, masm.framePushed());
+ MOZ_ASSERT(stackBefore + StackSizeOfFloat == currentStackHeight());
+ return currentStackHeight();
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ uint32_t pushV128(RegV128 r) {
+ DebugOnly<uint32_t> stackBefore = currentStackHeight();
+# ifdef RABALDR_CHUNKY_STACK
+ pushChunkyBytes(StackSizeOfV128);
+# else
+ masm.adjustStack(-(int)StackSizeOfV128);
+# endif
+ masm.storeUnalignedSimd128(r,
+ Address(sp_, stackOffset(currentStackHeight())));
+ maxFramePushed_ = std::max(maxFramePushed_, masm.framePushed());
+ MOZ_ASSERT(stackBefore + StackSizeOfV128 == currentStackHeight());
+ return currentStackHeight();
+ }
+#endif
+
+ uint32_t pushDouble(FloatRegister r) {
+ DebugOnly<uint32_t> stackBefore = currentStackHeight();
+#ifdef RABALDR_CHUNKY_STACK
+ pushChunkyBytes(StackSizeOfDouble);
+ masm.storeDouble(r, Address(sp_, stackOffset(currentStackHeight())));
+#else
+ masm.Push(r);
+#endif
+ maxFramePushed_ = std::max(maxFramePushed_, masm.framePushed());
+ MOZ_ASSERT(stackBefore + StackSizeOfDouble == currentStackHeight());
+ return currentStackHeight();
+ }
+
+ // Pops the stack into the register `r`. This pops the full 64-bit width on
+ // 64-bit systems, and 32-bits otherwise.
+ void popGPR(Register r) {
+ DebugOnly<uint32_t> stackBefore = currentStackHeight();
+#ifdef RABALDR_CHUNKY_STACK
+ masm.loadPtr(Address(sp_, stackOffset(currentStackHeight())), r);
+ popChunkyBytes(StackSizeOfPtr);
+#else
+ masm.Pop(r);
+#endif
+ MOZ_ASSERT(stackBefore - StackSizeOfPtr == currentStackHeight());
+ }
+
+ void popFloat32(FloatRegister r) {
+ DebugOnly<uint32_t> stackBefore = currentStackHeight();
+#ifdef RABALDR_CHUNKY_STACK
+ masm.loadFloat32(Address(sp_, stackOffset(currentStackHeight())), r);
+ popChunkyBytes(StackSizeOfFloat);
+#else
+ masm.Pop(r);
+#endif
+ MOZ_ASSERT(stackBefore - StackSizeOfFloat == currentStackHeight());
+ }
+
+ void popDouble(FloatRegister r) {
+ DebugOnly<uint32_t> stackBefore = currentStackHeight();
+#ifdef RABALDR_CHUNKY_STACK
+ masm.loadDouble(Address(sp_, stackOffset(currentStackHeight())), r);
+ popChunkyBytes(StackSizeOfDouble);
+#else
+ masm.Pop(r);
+#endif
+ MOZ_ASSERT(stackBefore - StackSizeOfDouble == currentStackHeight());
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ void popV128(RegV128 r) {
+ DebugOnly<uint32_t> stackBefore = currentStackHeight();
+ masm.loadUnalignedSimd128(Address(sp_, stackOffset(currentStackHeight())),
+ r);
+# ifdef RABALDR_CHUNKY_STACK
+ popChunkyBytes(StackSizeOfV128);
+# else
+ masm.adjustStack((int)StackSizeOfV128);
+# endif
+ MOZ_ASSERT(stackBefore - StackSizeOfV128 == currentStackHeight());
+ }
+#endif
+
+ void popBytes(size_t bytes) {
+ if (bytes > 0) {
+#ifdef RABALDR_CHUNKY_STACK
+ popChunkyBytes(bytes);
+#else
+ masm.freeStack(bytes);
+#endif
+ }
+ }
+
+ void loadStackI32(int32_t offset, RegI32 dest) {
+ masm.load32(Address(sp_, stackOffset(offset)), dest);
+ }
+
+ void loadStackI64(int32_t offset, RegI64 dest) {
+ masm.load64(Address(sp_, stackOffset(offset)), dest);
+ }
+
+#ifndef JS_PUNBOX64
+ void loadStackI64Low(int32_t offset, RegI32 dest) {
+ masm.load32(Address(sp_, stackOffset(offset - INT64LOW_OFFSET)), dest);
+ }
+
+ void loadStackI64High(int32_t offset, RegI32 dest) {
+ masm.load32(Address(sp_, stackOffset(offset - INT64HIGH_OFFSET)), dest);
+ }
+#endif
+
+ void loadStackRef(int32_t offset, RegRef dest) {
+ masm.loadPtr(Address(sp_, stackOffset(offset)), dest);
+ }
+
+ void loadStackF64(int32_t offset, RegF64 dest) {
+ masm.loadDouble(Address(sp_, stackOffset(offset)), dest);
+ }
+
+ void loadStackF32(int32_t offset, RegF32 dest) {
+ masm.loadFloat32(Address(sp_, stackOffset(offset)), dest);
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ void loadStackV128(int32_t offset, RegV128 dest) {
+ masm.loadUnalignedSimd128(Address(sp_, stackOffset(offset)), dest);
+ }
+#endif
+
+ uint32_t prepareStackResultArea(StackHeight stackBase,
+ uint32_t stackResultBytes) {
+ uint32_t end = computeHeightWithStackResults(stackBase, stackResultBytes);
+ if (currentStackHeight() < end) {
+ uint32_t bytes = end - currentStackHeight();
+#ifdef RABALDR_CHUNKY_STACK
+ pushChunkyBytes(bytes);
+#else
+ masm.reserveStack(bytes);
+#endif
+ maxFramePushed_ = std::max(maxFramePushed_, masm.framePushed());
+ }
+ return end;
+ }
+
+ void finishStackResultArea(StackHeight stackBase, uint32_t stackResultBytes) {
+ uint32_t end = computeHeightWithStackResults(stackBase, stackResultBytes);
+ MOZ_ASSERT(currentStackHeight() >= end);
+ popBytes(currentStackHeight() - end);
+ }
+
+ // |srcHeight| and |destHeight| are stack heights *including* |bytes|.
+ void shuffleStackResultsTowardFP(uint32_t srcHeight, uint32_t destHeight,
+ uint32_t bytes, Register temp) {
+ MOZ_ASSERT(destHeight < srcHeight);
+ MOZ_ASSERT(bytes % sizeof(uint32_t) == 0);
+ uint32_t destOffset = stackOffset(destHeight) + bytes;
+ uint32_t srcOffset = stackOffset(srcHeight) + bytes;
+ while (bytes >= sizeof(intptr_t)) {
+ destOffset -= sizeof(intptr_t);
+ srcOffset -= sizeof(intptr_t);
+ bytes -= sizeof(intptr_t);
+ masm.loadPtr(Address(sp_, srcOffset), temp);
+ masm.storePtr(temp, Address(sp_, destOffset));
+ }
+ if (bytes) {
+ MOZ_ASSERT(bytes == sizeof(uint32_t));
+ destOffset -= sizeof(uint32_t);
+ srcOffset -= sizeof(uint32_t);
+ masm.load32(Address(sp_, srcOffset), temp);
+ masm.store32(temp, Address(sp_, destOffset));
+ }
+ }
+
+ // Unlike the overload that operates on raw heights, |srcHeight| and
+ // |destHeight| are stack heights *not including* |bytes|.
+ void shuffleStackResultsTowardFP(StackHeight srcHeight,
+ StackHeight destHeight, uint32_t bytes,
+ Register temp) {
+ MOZ_ASSERT(srcHeight.isValid());
+ MOZ_ASSERT(destHeight.isValid());
+ uint32_t src = computeHeightWithStackResults(srcHeight, bytes);
+ uint32_t dest = computeHeightWithStackResults(destHeight, bytes);
+ MOZ_ASSERT(src <= currentStackHeight());
+ MOZ_ASSERT(dest <= currentStackHeight());
+ shuffleStackResultsTowardFP(src, dest, bytes, temp);
+ }
+
+ // |srcHeight| and |destHeight| are stack heights *including* |bytes|.
+ void shuffleStackResultsTowardSP(uint32_t srcHeight, uint32_t destHeight,
+ uint32_t bytes, Register temp) {
+ MOZ_ASSERT(destHeight > srcHeight);
+ MOZ_ASSERT(bytes % sizeof(uint32_t) == 0);
+ uint32_t destOffset = stackOffset(destHeight);
+ uint32_t srcOffset = stackOffset(srcHeight);
+ while (bytes >= sizeof(intptr_t)) {
+ masm.loadPtr(Address(sp_, srcOffset), temp);
+ masm.storePtr(temp, Address(sp_, destOffset));
+ destOffset += sizeof(intptr_t);
+ srcOffset += sizeof(intptr_t);
+ bytes -= sizeof(intptr_t);
+ }
+ if (bytes) {
+ MOZ_ASSERT(bytes == sizeof(uint32_t));
+ masm.load32(Address(sp_, srcOffset), temp);
+ masm.store32(temp, Address(sp_, destOffset));
+ }
+ }
+
+ // Copy results from the top of the current stack frame to an area of memory,
+ // and pop the stack accordingly. `dest` is the address of the low byte of
+ // that memory.
+ void popStackResultsToMemory(Register dest, uint32_t bytes, Register temp) {
+ MOZ_ASSERT(bytes <= currentStackHeight());
+ MOZ_ASSERT(bytes % sizeof(uint32_t) == 0);
+ uint32_t bytesToPop = bytes;
+ uint32_t srcOffset = stackOffset(currentStackHeight());
+ uint32_t destOffset = 0;
+ while (bytes >= sizeof(intptr_t)) {
+ masm.loadPtr(Address(sp_, srcOffset), temp);
+ masm.storePtr(temp, Address(dest, destOffset));
+ destOffset += sizeof(intptr_t);
+ srcOffset += sizeof(intptr_t);
+ bytes -= sizeof(intptr_t);
+ }
+ if (bytes) {
+ MOZ_ASSERT(bytes == sizeof(uint32_t));
+ masm.load32(Address(sp_, srcOffset), temp);
+ masm.store32(temp, Address(dest, destOffset));
+ }
+ popBytes(bytesToPop);
+ }
+
+ void allocArgArea(size_t argSize) {
+ if (argSize) {
+ BaseStackFrameAllocator::allocArgArea(argSize);
+ maxFramePushed_ = std::max(maxFramePushed_, masm.framePushed());
+ }
+ }
+
+ private:
+ void store32BitsToStack(int32_t imm, uint32_t destHeight, Register temp) {
+ masm.move32(Imm32(imm), temp);
+ masm.store32(temp, Address(sp_, stackOffset(destHeight)));
+ }
+
+ void store64BitsToStack(int64_t imm, uint32_t destHeight, Register temp) {
+#ifdef JS_PUNBOX64
+ masm.move64(Imm64(imm), Register64(temp));
+ masm.store64(Register64(temp), Address(sp_, stackOffset(destHeight)));
+#else
+ union {
+ int64_t i64;
+ int32_t i32[2];
+ } bits = {.i64 = imm};
+ static_assert(sizeof(bits) == 8);
+ store32BitsToStack(bits.i32[0], destHeight, temp);
+ store32BitsToStack(bits.i32[1], destHeight - sizeof(int32_t), temp);
+#endif
+ }
+
+ public:
+ void storeImmediatePtrToStack(intptr_t imm, uint32_t destHeight,
+ Register temp) {
+#ifdef JS_PUNBOX64
+ static_assert(StackSizeOfPtr == 8);
+ store64BitsToStack(imm, destHeight, temp);
+#else
+ static_assert(StackSizeOfPtr == 4);
+ store32BitsToStack(int32_t(imm), destHeight, temp);
+#endif
+ }
+
+ void storeImmediateI64ToStack(int64_t imm, uint32_t destHeight,
+ Register temp) {
+ store64BitsToStack(imm, destHeight, temp);
+ }
+
+ void storeImmediateF32ToStack(float imm, uint32_t destHeight, Register temp) {
+ union {
+ int32_t i32;
+ float f32;
+ } bits = {.f32 = imm};
+ static_assert(sizeof(bits) == 4);
+ // Do not store 4 bytes if StackSizeOfFloat == 8. It's probably OK to do
+ // so, but it costs little to store something predictable.
+ if (StackSizeOfFloat == 4) {
+ store32BitsToStack(bits.i32, destHeight, temp);
+ } else {
+ store64BitsToStack(uint32_t(bits.i32), destHeight, temp);
+ }
+ }
+
+ void storeImmediateF64ToStack(double imm, uint32_t destHeight,
+ Register temp) {
+ union {
+ int64_t i64;
+ double f64;
+ } bits = {.f64 = imm};
+ static_assert(sizeof(bits) == 8);
+ store64BitsToStack(bits.i64, destHeight, temp);
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ void storeImmediateV128ToStack(V128 imm, uint32_t destHeight, Register temp) {
+ union {
+ int32_t i32[4];
+ uint8_t bytes[16];
+ } bits{};
+ static_assert(sizeof(bits) == 16);
+ memcpy(bits.bytes, imm.bytes, 16);
+ for (unsigned i = 0; i < 4; i++) {
+ store32BitsToStack(bits.i32[i], destHeight - i * sizeof(int32_t), temp);
+ }
+ }
+#endif
+};
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// MachineStackTracker, used for stack-slot pointerness tracking.
+
+// An expensive operation in stack-map creation is copying of the
+// MachineStackTracker (MST) into the final StackMap. This is done in
+// StackMapGenerator::createStackMap. Given that this is basically a
+// bit-array copy, it is reasonable to ask whether the two classes could have
+// a more similar representation, so that the copy could then be done with
+// `memcpy`.
+//
+// Although in principle feasible, the follow complications exist, and so for
+// the moment, this has not been done.
+//
+// * StackMap is optimised for compact size (storage) since there will be
+// many, so it uses a true bitmap. MST is intended to be fast and simple,
+// and only one exists at once (per compilation thread). Doing this would
+// require MST to use a true bitmap, and hence ..
+//
+// * .. the copying can't be a straight memcpy, since StackMap has entries for
+// words not covered by MST. Hence the copy would need to shift bits in
+// each byte left or right (statistically speaking, in 7 cases out of 8) in
+// order to ensure no "holes" in the resulting bitmap.
+//
+// * Furthermore the copying would need to logically invert the direction of
+// the stacks. For MST, index zero in the vector corresponds to the highest
+// address in the stack. For StackMap, bit index zero corresponds to the
+// lowest address in the stack.
+//
+// * Finally, StackMap is a variable-length structure whose size must be known
+// at creation time. The size of an MST by contrast isn't known at creation
+// time -- it grows as the baseline compiler pushes stuff on its value
+// stack. That's why it has to have vector entry 0 being the highest address.
+//
+// * Although not directly relevant, StackMaps are also created by the via-Ion
+// compilation routes, by translation from the pre-existing "JS-era"
+// LSafePoints (CreateStackMapFromLSafepoint). So if we want to mash
+// StackMap around to suit baseline better, we also need to ensure it
+// doesn't break Ion somehow.
+
+class MachineStackTracker {
+ // Simulates the machine's stack, with one bool per word. The booleans are
+ // represented as `uint8_t`s so as to guarantee the element size is one
+ // byte. Index zero in this vector corresponds to the highest address in
+ // the machine's stack. The last entry corresponds to what SP currently
+ // points at. This all assumes a grow-down stack.
+ //
+ // numPtrs_ contains the number of "true" values in vec_, and is therefore
+ // redundant. But it serves as a constant-time way to detect the common
+ // case where vec_ holds no "true" values.
+ size_t numPtrs_;
+ Vector<uint8_t, 64, SystemAllocPolicy> vec_;
+
+ public:
+ MachineStackTracker() : numPtrs_(0) {}
+
+ ~MachineStackTracker() {
+#ifdef DEBUG
+ size_t n = 0;
+ for (uint8_t b : vec_) {
+ n += (b ? 1 : 0);
+ }
+ MOZ_ASSERT(n == numPtrs_);
+#endif
+ }
+
+ // Clone this MachineStackTracker, writing the result at |dst|.
+ [[nodiscard]] bool cloneTo(MachineStackTracker* dst);
+
+ // Notionally push |n| non-pointers on the stack.
+ [[nodiscard]] bool pushNonGCPointers(size_t n) {
+ return vec_.appendN(uint8_t(false), n);
+ }
+
+ // Mark the stack slot |offsetFromSP| up from the bottom as holding a
+ // pointer.
+ void setGCPointer(size_t offsetFromSP) {
+ // offsetFromSP == 0 denotes the most recently pushed item, == 1 the
+ // second most recently pushed item, etc.
+ MOZ_ASSERT(offsetFromSP < vec_.length());
+
+ size_t offsetFromTop = vec_.length() - 1 - offsetFromSP;
+ numPtrs_ = numPtrs_ + 1 - (vec_[offsetFromTop] ? 1 : 0);
+ vec_[offsetFromTop] = uint8_t(true);
+ }
+
+ // Query the pointerness of the slot |offsetFromSP| up from the bottom.
+ bool isGCPointer(size_t offsetFromSP) const {
+ MOZ_ASSERT(offsetFromSP < vec_.length());
+
+ size_t offsetFromTop = vec_.length() - 1 - offsetFromSP;
+ return bool(vec_[offsetFromTop]);
+ }
+
+ // Return the number of words tracked by this MachineStackTracker.
+ size_t length() const { return vec_.length(); }
+
+ // Return the number of pointer-typed words tracked by this
+ // MachineStackTracker.
+ size_t numPtrs() const {
+ MOZ_ASSERT(numPtrs_ <= length());
+ return numPtrs_;
+ }
+
+ // Discard all contents, but (per mozilla::Vector::clear semantics) don't
+ // free or reallocate any dynamic storage associated with |vec_|.
+ void clear() {
+ vec_.clear();
+ numPtrs_ = 0;
+ }
+
+ // An iterator that produces indices of reftyped slots, starting at the
+ // logical bottom of the (grow-down) stack. Indices have the same meaning
+ // as the arguments to `isGCPointer`. That is, if this iterator produces a
+ // value `i`, then it means that `isGCPointer(i) == true`; if the value `i`
+ // is never produced then `isGCPointer(i) == false`. The values are
+ // produced in ascending order.
+ //
+ // Because most slots are non-reftyped, some effort has been put into
+ // skipping over large groups of non-reftyped slots quickly.
+ class Iter {
+ // Both `bufU8_` and `bufU32_` are made to point to `vec_`s array of
+ // `uint8_t`s, so we can scan (backwards) through it either in bytes or
+ // 32-bit words. Recall that the last element in `vec_` pertains to the
+ // lowest-addressed word in the machine's grow-down stack, and we want to
+ // iterate logically "up" this stack, so we need to iterate backwards
+ // through `vec_`.
+ //
+ // This dual-pointer scheme assumes that the `vec_`s content array is at
+ // least 32-bit aligned.
+ const uint8_t* bufU8_;
+ const uint32_t* bufU32_;
+ // The number of elements in `bufU8_`.
+ const size_t nElems_;
+ // The index in `bufU8_` where the next search should start.
+ size_t next_;
+
+ public:
+ explicit Iter(const MachineStackTracker& mst)
+ : bufU8_((uint8_t*)mst.vec_.begin()),
+ bufU32_((uint32_t*)mst.vec_.begin()),
+ nElems_(mst.vec_.length()),
+ next_(mst.vec_.length() - 1) {
+ MOZ_ASSERT(uintptr_t(bufU8_) == uintptr_t(bufU32_));
+ // Check minimum alignment constraint on the array.
+ MOZ_ASSERT(0 == (uintptr_t(bufU8_) & 3));
+ }
+
+ ~Iter() { MOZ_ASSERT(uintptr_t(bufU8_) == uintptr_t(bufU32_)); }
+
+ // It is important, for termination of the search loop in `next()`, that
+ // this has the value obtained by subtracting 1 from size_t(0).
+ static constexpr size_t FINISHED = ~size_t(0);
+ static_assert(FINISHED == size_t(0) - 1);
+
+ // Returns the next index `i` for which `isGCPointer(i) == true`.
+ size_t get() {
+ while (next_ != FINISHED) {
+ if (bufU8_[next_]) {
+ next_--;
+ return nElems_ - 1 - (next_ + 1);
+ }
+ // Invariant: next_ != FINISHED (so it's still a valid index)
+ // and: bufU8_[next_] == 0
+ // (so we need to move backwards by at least 1)
+ //
+ // BEGIN optimization -- this could be removed without affecting
+ // correctness.
+ if ((next_ & 7) == 0) {
+ // We're at the "bottom" of the current dual-4-element word. Check
+ // if we can jump backwards by 8. This saves a conditional branch
+ // and a few cycles by ORing two adjacent 32-bit words together,
+ // whilst not requiring 64-bit alignment of `bufU32_`.
+ while (next_ >= 8 &&
+ (bufU32_[(next_ - 4) >> 2] | bufU32_[(next_ - 8) >> 2]) == 0) {
+ next_ -= 8;
+ }
+ }
+ // END optimization
+ next_--;
+ }
+ return FINISHED;
+ }
+ };
+};
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// StackMapGenerator, which carries all state needed to create stackmaps.
+
+enum class HasDebugFrameWithLiveRefs { No, Maybe };
+
+struct StackMapGenerator {
+ private:
+ // --- These are constant for the life of the function's compilation ---
+
+ // For generating stackmaps, we'll need to know the offsets of registers
+ // as saved by the trap exit stub.
+ const RegisterOffsets& trapExitLayout_;
+ const size_t trapExitLayoutNumWords_;
+
+ // Completed stackmaps are added here
+ StackMaps* stackMaps_;
+
+ // So as to be able to get current offset when creating stackmaps
+ const MacroAssembler& masm_;
+
+ public:
+ // --- These are constant once we've completed beginFunction() ---
+
+ // The number of words of arguments passed to this function in memory.
+ size_t numStackArgWords;
+
+ MachineStackTracker machineStackTracker; // tracks machine stack pointerness
+
+ // This holds masm.framePushed at entry to the function's body. It is a
+ // Maybe because createStackMap needs to know whether or not we're still
+ // in the prologue. It makes a Nothing-to-Some transition just once per
+ // function.
+ Maybe<uint32_t> framePushedAtEntryToBody;
+
+ // --- These can change at any point ---
+
+ // This holds masm.framePushed at it would be be for a function call
+ // instruction, but excluding the stack area used to pass arguments in
+ // memory. That is, for an upcoming function call, this will hold
+ //
+ // masm.framePushed() at the call instruction -
+ // StackArgAreaSizeUnaligned(argumentTypes)
+ //
+ // This value denotes the lowest-addressed stack word covered by the current
+ // function's stackmap. Words below this point form the highest-addressed
+ // area of the callee's stackmap. Note that all alignment padding above the
+ // arguments-in-memory themselves belongs to the caller's stackmap, which
+ // is why this is defined in terms of StackArgAreaSizeUnaligned() rather than
+ // StackArgAreaSizeAligned().
+ //
+ // When not inside a function call setup/teardown sequence, it is Nothing.
+ // It can make Nothing-to/from-Some transitions arbitrarily as we progress
+ // through the function body.
+ Maybe<uint32_t> framePushedExcludingOutboundCallArgs;
+
+ // The number of memory-resident, ref-typed entries on the containing
+ // BaseCompiler::stk_.
+ size_t memRefsOnStk;
+
+ // This is a copy of machineStackTracker that is used only within individual
+ // calls to createStackMap. It is here only to avoid possible heap allocation
+ // costs resulting from making it local to createStackMap().
+ MachineStackTracker augmentedMst;
+
+ StackMapGenerator(StackMaps* stackMaps, const RegisterOffsets& trapExitLayout,
+ const size_t trapExitLayoutNumWords,
+ const MacroAssembler& masm)
+ : trapExitLayout_(trapExitLayout),
+ trapExitLayoutNumWords_(trapExitLayoutNumWords),
+ stackMaps_(stackMaps),
+ masm_(masm),
+ numStackArgWords(0),
+ memRefsOnStk(0) {}
+
+ // At the beginning of a function, we may have live roots in registers (as
+ // arguments) at the point where we perform a stack overflow check. This
+ // method generates the "extra" stackmap entries to describe that, in the
+ // case that the check fails and we wind up calling into the wasm exit
+ // stub, as generated by GenerateTrapExit().
+ //
+ // The resulting map must correspond precisely with the stack layout
+ // created for the integer registers as saved by (code generated by)
+ // GenerateTrapExit(). To do that we use trapExitLayout_ and
+ // trapExitLayoutNumWords_, which together comprise a description of the
+ // layout and are created by GenerateTrapExitRegisterOffsets().
+ [[nodiscard]] bool generateStackmapEntriesForTrapExit(
+ const ArgTypeVector& args, ExitStubMapVector* extras);
+
+ // Creates a stackmap associated with the instruction denoted by
+ // |assemblerOffset|, incorporating pointers from the current operand
+ // stack |stk|, incorporating possible extra pointers in |extra| at the
+ // lower addressed end, and possibly with the associated frame having a
+ // DebugFrame that must be traced, as indicated by |debugFrameWithLiveRefs|.
+ [[nodiscard]] bool createStackMap(
+ const char* who, const ExitStubMapVector& extras,
+ uint32_t assemblerOffset,
+ HasDebugFrameWithLiveRefs debugFrameWithLiveRefs, const StkVector& stk);
+};
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_wasm_baseline_frame_h
diff --git a/js/src/wasm/WasmBCMemory.cpp b/js/src/wasm/WasmBCMemory.cpp
new file mode 100644
index 0000000000..a8777a4217
--- /dev/null
+++ b/js/src/wasm/WasmBCMemory.cpp
@@ -0,0 +1,2799 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmBCClass.h"
+#include "wasm/WasmBCDefs.h"
+#include "wasm/WasmBCRegDefs.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+#include "wasm/WasmBCClass-inl.h"
+#include "wasm/WasmBCCodegen-inl.h"
+#include "wasm/WasmBCRegDefs-inl.h"
+#include "wasm/WasmBCRegMgmt-inl.h"
+#include "wasm/WasmBCStkMgmt-inl.h"
+
+namespace js {
+namespace wasm {
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Heap access subroutines.
+
+// Bounds check elimination.
+//
+// We perform BCE on two kinds of address expressions: on constant heap pointers
+// that are known to be in the heap or will be handled by the out-of-bounds trap
+// handler; and on local variables that have been checked in dominating code
+// without being updated since.
+//
+// For an access through a constant heap pointer + an offset we can eliminate
+// the bounds check if the sum of the address and offset is below the sum of the
+// minimum memory length and the offset guard length.
+//
+// For an access through a local variable + an offset we can eliminate the
+// bounds check if the local variable has already been checked and has not been
+// updated since, and the offset is less than the guard limit.
+//
+// To track locals for which we can eliminate checks we use a bit vector
+// bceSafe_ that has a bit set for those locals whose bounds have been checked
+// and which have not subsequently been set. Initially this vector is zero.
+//
+// In straight-line code a bit is set when we perform a bounds check on an
+// access via the local and is reset when the variable is updated.
+//
+// In control flow, the bit vector is manipulated as follows. Each ControlItem
+// has a value bceSafeOnEntry, which is the value of bceSafe_ on entry to the
+// item, and a value bceSafeOnExit, which is initially ~0. On a branch (br,
+// brIf, brTable), we always AND the branch target's bceSafeOnExit with the
+// value of bceSafe_ at the branch point. On exiting an item by falling out of
+// it, provided we're not in dead code, we AND the current value of bceSafe_
+// into the item's bceSafeOnExit. Additional processing depends on the item
+// type:
+//
+// - After a block, set bceSafe_ to the block's bceSafeOnExit.
+//
+// - On loop entry, after pushing the ControlItem, set bceSafe_ to zero; the
+// back edges would otherwise require us to iterate to a fixedpoint.
+//
+// - After a loop, the bceSafe_ is left unchanged, because only fallthrough
+// control flow will reach that point and the bceSafe_ value represents the
+// correct state of the fallthrough path.
+//
+// - Set bceSafe_ to the ControlItem's bceSafeOnEntry at both the 'then' branch
+// and the 'else' branch.
+//
+// - After an if-then-else, set bceSafe_ to the if-then-else's bceSafeOnExit.
+//
+// - After an if-then, set bceSafe_ to the if-then's bceSafeOnExit AND'ed with
+// the if-then's bceSafeOnEntry.
+//
+// Finally, when the debugger allows locals to be mutated we must disable BCE
+// for references via a local, by returning immediately from bceCheckLocal if
+// compilerEnv_.debugEnabled() is true.
+
+void BaseCompiler::bceCheckLocal(MemoryAccessDesc* access, AccessCheck* check,
+ uint32_t local) {
+ if (local >= sizeof(BCESet) * 8) {
+ return;
+ }
+
+ uint32_t offsetGuardLimit =
+ GetMaxOffsetGuardLimit(moduleEnv_.hugeMemoryEnabled());
+
+ if ((bceSafe_ & (BCESet(1) << local)) &&
+ access->offset64() < offsetGuardLimit) {
+ check->omitBoundsCheck = true;
+ }
+
+ // The local becomes safe even if the offset is beyond the guard limit.
+ bceSafe_ |= (BCESet(1) << local);
+}
+
+void BaseCompiler::bceLocalIsUpdated(uint32_t local) {
+ if (local >= sizeof(BCESet) * 8) {
+ return;
+ }
+
+ bceSafe_ &= ~(BCESet(1) << local);
+}
+
+// Alignment check elimination.
+//
+// Alignment checks for atomic operations can be omitted if the pointer is a
+// constant and the pointer + offset is aligned. Alignment checking that can't
+// be omitted can still be simplified by checking only the pointer if the offset
+// is aligned.
+//
+// (In addition, alignment checking of the pointer can be omitted if the pointer
+// has been checked in dominating code, but we don't do that yet.)
+
+template <>
+RegI32 BaseCompiler::popConstMemoryAccess<RegI32>(MemoryAccessDesc* access,
+ AccessCheck* check) {
+ int32_t addrTemp;
+ MOZ_ALWAYS_TRUE(popConst(&addrTemp));
+ uint32_t addr = addrTemp;
+
+ uint32_t offsetGuardLimit =
+ GetMaxOffsetGuardLimit(moduleEnv_.hugeMemoryEnabled());
+
+ uint64_t ea = uint64_t(addr) + uint64_t(access->offset());
+ uint64_t limit = moduleEnv_.memory->initialLength32() + offsetGuardLimit;
+
+ check->omitBoundsCheck = ea < limit;
+ check->omitAlignmentCheck = (ea & (access->byteSize() - 1)) == 0;
+
+ // Fold the offset into the pointer if we can, as this is always
+ // beneficial.
+ if (ea <= UINT32_MAX) {
+ addr = uint32_t(ea);
+ access->clearOffset();
+ }
+
+ RegI32 r = needI32();
+ moveImm32(int32_t(addr), r);
+ return r;
+}
+
+#ifdef ENABLE_WASM_MEMORY64
+template <>
+RegI64 BaseCompiler::popConstMemoryAccess<RegI64>(MemoryAccessDesc* access,
+ AccessCheck* check) {
+ int64_t addrTemp;
+ MOZ_ALWAYS_TRUE(popConst(&addrTemp));
+ uint64_t addr = addrTemp;
+
+ uint32_t offsetGuardLimit =
+ GetMaxOffsetGuardLimit(moduleEnv_.hugeMemoryEnabled());
+
+ uint64_t ea = addr + access->offset64();
+ bool overflow = ea < addr;
+ uint64_t limit = moduleEnv_.memory->initialLength64() + offsetGuardLimit;
+
+ if (!overflow) {
+ check->omitBoundsCheck = ea < limit;
+ check->omitAlignmentCheck = (ea & (access->byteSize() - 1)) == 0;
+
+ // Fold the offset into the pointer if we can, as this is always
+ // beneficial.
+ addr = uint64_t(ea);
+ access->clearOffset();
+ }
+
+ RegI64 r = needI64();
+ moveImm64(int64_t(addr), r);
+ return r;
+}
+#endif
+
+template <typename RegType>
+RegType BaseCompiler::popMemoryAccess(MemoryAccessDesc* access,
+ AccessCheck* check) {
+ check->onlyPointerAlignment =
+ (access->offset64() & (access->byteSize() - 1)) == 0;
+
+ // If there's a constant it will have the correct type for RegType.
+ if (hasConst()) {
+ return popConstMemoryAccess<RegType>(access, check);
+ }
+
+ // If there's a local it will have the correct type for RegType.
+ uint32_t local;
+ if (peekLocal(&local)) {
+ bceCheckLocal(access, check, local);
+ }
+
+ return pop<RegType>();
+}
+
+#ifdef JS_64BIT
+static inline RegI64 RegPtrToRegIntptr(RegPtr r) {
+ return RegI64(Register64(Register(r)));
+}
+
+# ifndef WASM_HAS_HEAPREG
+static inline RegPtr RegIntptrToRegPtr(RegI64 r) {
+ return RegPtr(Register64(r).reg);
+}
+# endif
+#else
+static inline RegI32 RegPtrToRegIntptr(RegPtr r) { return RegI32(Register(r)); }
+
+# ifndef WASM_HAS_HEAPREG
+static inline RegPtr RegIntptrToRegPtr(RegI32 r) { return RegPtr(Register(r)); }
+# endif
+#endif
+
+#ifdef WASM_HAS_HEAPREG
+void BaseCompiler::pushHeapBase() {
+ RegPtr heapBase = need<RegPtr>();
+ move(RegPtr(HeapReg), heapBase);
+ push(RegPtrToRegIntptr(heapBase));
+}
+#else
+void BaseCompiler::pushHeapBase() {
+ RegPtr heapBase = need<RegPtr>();
+# ifdef RABALDR_PIN_INSTANCE
+ movePtr(RegPtr(InstanceReg), heapBase);
+# else
+ fr.loadInstancePtr(heapBase);
+# endif
+ masm.loadPtr(Address(heapBase, Instance::offsetOfMemoryBase()), heapBase);
+ push(RegPtrToRegIntptr(heapBase));
+}
+#endif
+
+void BaseCompiler::branchAddNoOverflow(uint64_t offset, RegI32 ptr, Label* ok) {
+ // The invariant holds because ptr is RegI32 - this is m32.
+ MOZ_ASSERT(offset <= UINT32_MAX);
+ masm.branchAdd32(Assembler::CarryClear, Imm32(uint32_t(offset)), ptr, ok);
+}
+
+#ifdef ENABLE_WASM_MEMORY64
+void BaseCompiler::branchAddNoOverflow(uint64_t offset, RegI64 ptr, Label* ok) {
+# if defined(JS_64BIT)
+ masm.branchAddPtr(Assembler::CarryClear, ImmWord(offset), Register64(ptr).reg,
+ ok);
+# else
+ masm.branchAdd64(Assembler::CarryClear, Imm64(offset), ptr, ok);
+# endif
+}
+#endif
+
+void BaseCompiler::branchTestLowZero(RegI32 ptr, Imm32 mask, Label* ok) {
+ masm.branchTest32(Assembler::Zero, ptr, mask, ok);
+}
+
+#ifdef ENABLE_WASM_MEMORY64
+void BaseCompiler::branchTestLowZero(RegI64 ptr, Imm32 mask, Label* ok) {
+# ifdef JS_64BIT
+ masm.branchTestPtr(Assembler::Zero, Register64(ptr).reg, mask, ok);
+# else
+ masm.branchTestPtr(Assembler::Zero, ptr.low, mask, ok);
+# endif
+}
+#endif
+
+void BaseCompiler::boundsCheck4GBOrLargerAccess(RegPtr instance, RegI32 ptr,
+ Label* ok) {
+#ifdef JS_64BIT
+ // Extend the value to 64 bits, check the 64-bit value against the 64-bit
+ // bound, then chop back to 32 bits. On most platform the extending and
+ // chopping are no-ops. It's important that the value we end up with has
+ // flowed through the Spectre mask
+
+ // Note, ptr and ptr64 are the same register.
+ RegI64 ptr64 = fromI32(ptr);
+
+ // In principle there may be non-zero bits in the upper bits of the
+ // register; clear them.
+# ifdef RABALDR_ZERO_EXTENDS
+ masm.debugAssertCanonicalInt32(ptr);
+# else
+ masm.move32To64ZeroExtend(ptr, ptr64);
+# endif
+
+ boundsCheck4GBOrLargerAccess(instance, ptr64, ok);
+
+ // Restore the value to the canonical form for a 32-bit value in a
+ // 64-bit register and/or the appropriate form for further use in the
+ // indexing instruction.
+# ifdef RABALDR_ZERO_EXTENDS
+ // The canonical value is zero-extended; we already have that.
+# else
+ masm.move64To32(ptr64, ptr);
+# endif
+#else
+ // No support needed, we have max 2GB heap on 32-bit
+ MOZ_CRASH("No 32-bit support");
+#endif
+}
+
+void BaseCompiler::boundsCheckBelow4GBAccess(RegPtr instance, RegI32 ptr,
+ Label* ok) {
+ // If the memory's max size is known to be smaller than 64K pages exactly,
+ // we can use a 32-bit check and avoid extension and wrapping.
+ masm.wasmBoundsCheck32(
+ Assembler::Below, ptr,
+ Address(instance, Instance::offsetOfBoundsCheckLimit()), ok);
+}
+
+void BaseCompiler::boundsCheck4GBOrLargerAccess(RegPtr instance, RegI64 ptr,
+ Label* ok) {
+ // Any Spectre mitigation will appear to update the ptr64 register.
+ masm.wasmBoundsCheck64(
+ Assembler::Below, ptr,
+ Address(instance, Instance::offsetOfBoundsCheckLimit()), ok);
+}
+
+void BaseCompiler::boundsCheckBelow4GBAccess(RegPtr instance, RegI64 ptr,
+ Label* ok) {
+ // The bounds check limit is valid to 64 bits, so there's no sense in doing
+ // anything complicated here. There may be optimization paths here in the
+ // future and they may differ on 32-bit and 64-bit.
+ boundsCheck4GBOrLargerAccess(instance, ptr, ok);
+}
+
+// Make sure the ptr could be used as an index register.
+static inline void ToValidIndex(MacroAssembler& masm, RegI32 ptr) {
+#if defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_LOONG64) || \
+ defined(JS_CODEGEN_RISCV64)
+ // When ptr is used as an index, it will be added to a 64-bit register.
+ // So we should explicitly promote ptr to 64-bit. Since now ptr holds a
+ // unsigned 32-bit value, we zero-extend it to 64-bit here.
+ masm.move32To64ZeroExtend(ptr, Register64(ptr));
+#endif
+}
+
+#if defined(ENABLE_WASM_MEMORY64)
+static inline void ToValidIndex(MacroAssembler& masm, RegI64 ptr) {}
+#endif
+
+// RegIndexType is RegI32 for Memory32 and RegI64 for Memory64.
+template <typename RegIndexType>
+void BaseCompiler::prepareMemoryAccess(MemoryAccessDesc* access,
+ AccessCheck* check, RegPtr instance,
+ RegIndexType ptr) {
+ uint32_t offsetGuardLimit =
+ GetMaxOffsetGuardLimit(moduleEnv_.hugeMemoryEnabled());
+
+ // Fold offset if necessary for further computations.
+ if (access->offset64() >= offsetGuardLimit ||
+ access->offset64() > UINT32_MAX ||
+ (access->isAtomic() && !check->omitAlignmentCheck &&
+ !check->onlyPointerAlignment)) {
+ Label ok;
+ branchAddNoOverflow(access->offset64(), ptr, &ok);
+ masm.wasmTrap(Trap::OutOfBounds, bytecodeOffset());
+ masm.bind(&ok);
+ access->clearOffset();
+ check->onlyPointerAlignment = true;
+ }
+
+ // Alignment check if required.
+
+ if (access->isAtomic() && !check->omitAlignmentCheck) {
+ MOZ_ASSERT(check->onlyPointerAlignment);
+ // We only care about the low pointer bits here.
+ Label ok;
+ branchTestLowZero(ptr, Imm32(access->byteSize() - 1), &ok);
+ masm.wasmTrap(Trap::UnalignedAccess, bytecodeOffset());
+ masm.bind(&ok);
+ }
+
+ // Ensure no instance if we don't need it.
+
+ if (moduleEnv_.hugeMemoryEnabled()) {
+ // We have HeapReg and no bounds checking and need load neither
+ // memoryBase nor boundsCheckLimit from instance.
+ MOZ_ASSERT_IF(check->omitBoundsCheck, instance.isInvalid());
+ }
+#ifdef WASM_HAS_HEAPREG
+ // We have HeapReg and don't need to load the memoryBase from instance.
+ MOZ_ASSERT_IF(check->omitBoundsCheck, instance.isInvalid());
+#endif
+
+ // Bounds check if required.
+
+ if (!moduleEnv_.hugeMemoryEnabled() && !check->omitBoundsCheck) {
+ Label ok;
+#ifdef JS_64BIT
+ // The checking depends on how many bits are in the pointer and how many
+ // bits are in the bound.
+ static_assert(0x100000000 % PageSize == 0);
+ if (!moduleEnv_.memory->boundsCheckLimitIs32Bits() &&
+ MaxMemoryPages(moduleEnv_.memory->indexType()) >=
+ Pages(0x100000000 / PageSize)) {
+ boundsCheck4GBOrLargerAccess(instance, ptr, &ok);
+ } else {
+ boundsCheckBelow4GBAccess(instance, ptr, &ok);
+ }
+#else
+ boundsCheckBelow4GBAccess(instance, ptr, &ok);
+#endif
+ masm.wasmTrap(Trap::OutOfBounds, bytecodeOffset());
+ masm.bind(&ok);
+ }
+
+ ToValidIndex(masm, ptr);
+}
+
+template <typename RegIndexType>
+void BaseCompiler::computeEffectiveAddress(MemoryAccessDesc* access) {
+ if (access->offset()) {
+ Label ok;
+ RegIndexType ptr = pop<RegIndexType>();
+ branchAddNoOverflow(access->offset64(), ptr, &ok);
+ masm.wasmTrap(Trap::OutOfBounds, bytecodeOffset());
+ masm.bind(&ok);
+ access->clearOffset();
+ push(ptr);
+ }
+}
+
+bool BaseCompiler::needInstanceForAccess(const AccessCheck& check) {
+#ifndef WASM_HAS_HEAPREG
+ // Platform requires instance for memory base.
+ return true;
+#else
+ return !moduleEnv_.hugeMemoryEnabled() && !check.omitBoundsCheck;
+#endif
+}
+
+RegPtr BaseCompiler::maybeLoadInstanceForAccess(const AccessCheck& check) {
+ if (needInstanceForAccess(check)) {
+#ifdef RABALDR_PIN_INSTANCE
+ // NOTE, returning InstanceReg here depends for correctness on *ALL*
+ // clients not attempting to free this register and not push it on the value
+ // stack.
+ //
+ // We have assertions in place to guard against that, so the risk of the
+ // leaky abstraction is acceptable. performRegisterLeakCheck() will ensure
+ // that after every bytecode, the union of available registers from the
+ // regalloc and used registers from the stack equals the set of allocatable
+ // registers at startup. Thus if the instance is freed incorrectly it will
+ // end up in that union via the regalloc, and if it is pushed incorrectly it
+ // will end up in the union via the stack.
+ return RegPtr(InstanceReg);
+#else
+ RegPtr instance = need<RegPtr>();
+ fr.loadInstancePtr(instance);
+ return instance;
+#endif
+ }
+ return RegPtr::Invalid();
+}
+
+RegPtr BaseCompiler::maybeLoadInstanceForAccess(const AccessCheck& check,
+ RegPtr specific) {
+ if (needInstanceForAccess(check)) {
+#ifdef RABALDR_PIN_INSTANCE
+ movePtr(RegPtr(InstanceReg), specific);
+#else
+ fr.loadInstancePtr(specific);
+#endif
+ return specific;
+ }
+ return RegPtr::Invalid();
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Load and store.
+
+void BaseCompiler::executeLoad(MemoryAccessDesc* access, AccessCheck* check,
+ RegPtr instance, RegI32 ptr, AnyReg dest,
+ RegI32 temp) {
+ // Emit the load. At this point, 64-bit offsets will have been resolved.
+#if defined(JS_CODEGEN_X64)
+ MOZ_ASSERT(temp.isInvalid());
+ Operand srcAddr(HeapReg, ptr, TimesOne, access->offset());
+
+ if (dest.tag == AnyReg::I64) {
+ masm.wasmLoadI64(*access, srcAddr, dest.i64());
+ } else {
+ masm.wasmLoad(*access, srcAddr, dest.any());
+ }
+#elif defined(JS_CODEGEN_X86)
+ MOZ_ASSERT(temp.isInvalid());
+ masm.addPtr(Address(instance, Instance::offsetOfMemoryBase()), ptr);
+ Operand srcAddr(ptr, access->offset());
+
+ if (dest.tag == AnyReg::I64) {
+ MOZ_ASSERT(dest.i64() == specific_.abiReturnRegI64);
+ masm.wasmLoadI64(*access, srcAddr, dest.i64());
+ } else {
+ // For 8 bit loads, this will generate movsbl or movzbl, so
+ // there's no constraint on what the output register may be.
+ masm.wasmLoad(*access, srcAddr, dest.any());
+ }
+#elif defined(JS_CODEGEN_MIPS64)
+ if (IsUnaligned(*access)) {
+ switch (dest.tag) {
+ case AnyReg::I64:
+ masm.wasmUnalignedLoadI64(*access, HeapReg, ptr, ptr, dest.i64(), temp);
+ break;
+ case AnyReg::F32:
+ masm.wasmUnalignedLoadFP(*access, HeapReg, ptr, ptr, dest.f32(), temp);
+ break;
+ case AnyReg::F64:
+ masm.wasmUnalignedLoadFP(*access, HeapReg, ptr, ptr, dest.f64(), temp);
+ break;
+ case AnyReg::I32:
+ masm.wasmUnalignedLoad(*access, HeapReg, ptr, ptr, dest.i32(), temp);
+ break;
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+ } else {
+ if (dest.tag == AnyReg::I64) {
+ masm.wasmLoadI64(*access, HeapReg, ptr, ptr, dest.i64());
+ } else {
+ masm.wasmLoad(*access, HeapReg, ptr, ptr, dest.any());
+ }
+ }
+#elif defined(JS_CODEGEN_ARM)
+ MOZ_ASSERT(temp.isInvalid());
+ if (dest.tag == AnyReg::I64) {
+ masm.wasmLoadI64(*access, HeapReg, ptr, ptr, dest.i64());
+ } else {
+ masm.wasmLoad(*access, HeapReg, ptr, ptr, dest.any());
+ }
+#elif defined(JS_CODEGEN_ARM64)
+ MOZ_ASSERT(temp.isInvalid());
+ if (dest.tag == AnyReg::I64) {
+ masm.wasmLoadI64(*access, HeapReg, ptr, dest.i64());
+ } else {
+ masm.wasmLoad(*access, HeapReg, ptr, dest.any());
+ }
+#elif defined(JS_CODEGEN_LOONG64)
+ MOZ_ASSERT(temp.isInvalid());
+ if (dest.tag == AnyReg::I64) {
+ masm.wasmLoadI64(*access, HeapReg, ptr, ptr, dest.i64());
+ } else {
+ masm.wasmLoad(*access, HeapReg, ptr, ptr, dest.any());
+ }
+#elif defined(JS_CODEGEN_RISCV64)
+ MOZ_CRASH("UNIMPLEMENTED ON RISCV64");
+#else
+ MOZ_CRASH("BaseCompiler platform hook: load");
+#endif
+}
+
+// ptr and dest may be the same iff dest is I32.
+// This may destroy ptr even if ptr and dest are not the same.
+void BaseCompiler::load(MemoryAccessDesc* access, AccessCheck* check,
+ RegPtr instance, RegI32 ptr, AnyReg dest, RegI32 temp) {
+ prepareMemoryAccess(access, check, instance, ptr);
+ executeLoad(access, check, instance, ptr, dest, temp);
+}
+
+#ifdef ENABLE_WASM_MEMORY64
+void BaseCompiler::load(MemoryAccessDesc* access, AccessCheck* check,
+ RegPtr instance, RegI64 ptr, AnyReg dest, RegI64 temp) {
+ prepareMemoryAccess(access, check, instance, ptr);
+
+# if !defined(JS_64BIT)
+ // On 32-bit systems we have a maximum 2GB heap and bounds checking has
+ // been applied to ensure that the 64-bit pointer is valid.
+ return executeLoad(access, check, instance, RegI32(ptr.low), dest,
+ maybeFromI64(temp));
+# elif defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64)
+ // On x64 and arm64 the 32-bit code simply assumes that the high bits of the
+ // 64-bit pointer register are zero and performs a 64-bit add. Thus the code
+ // generated is the same for the 64-bit and the 32-bit case.
+ return executeLoad(access, check, instance, RegI32(ptr.reg), dest,
+ maybeFromI64(temp));
+# elif defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_LOONG64)
+ // On mips64 and loongarch64, the 'prepareMemoryAccess' function will make
+ // sure that ptr holds a valid 64-bit index value. Thus the code generated in
+ // 'executeLoad' is the same for the 64-bit and the 32-bit case.
+ return executeLoad(access, check, instance, RegI32(ptr.reg), dest,
+ maybeFromI64(temp));
+# else
+ MOZ_CRASH("Missing platform hook");
+# endif
+}
+#endif
+
+void BaseCompiler::executeStore(MemoryAccessDesc* access, AccessCheck* check,
+ RegPtr instance, RegI32 ptr, AnyReg src,
+ RegI32 temp) {
+ // Emit the store. At this point, 64-bit offsets will have been resolved.
+#if defined(JS_CODEGEN_X64)
+ MOZ_ASSERT(temp.isInvalid());
+ Operand dstAddr(HeapReg, ptr, TimesOne, access->offset());
+
+ masm.wasmStore(*access, src.any(), dstAddr);
+#elif defined(JS_CODEGEN_X86)
+ MOZ_ASSERT(temp.isInvalid());
+ masm.addPtr(Address(instance, Instance::offsetOfMemoryBase()), ptr);
+ Operand dstAddr(ptr, access->offset());
+
+ if (access->type() == Scalar::Int64) {
+ masm.wasmStoreI64(*access, src.i64(), dstAddr);
+ } else {
+ AnyRegister value;
+ ScratchI8 scratch(*this);
+ if (src.tag == AnyReg::I64) {
+ if (access->byteSize() == 1 && !ra.isSingleByteI32(src.i64().low)) {
+ masm.mov(src.i64().low, scratch);
+ value = AnyRegister(scratch);
+ } else {
+ value = AnyRegister(src.i64().low);
+ }
+ } else if (access->byteSize() == 1 && !ra.isSingleByteI32(src.i32())) {
+ masm.mov(src.i32(), scratch);
+ value = AnyRegister(scratch);
+ } else {
+ value = src.any();
+ }
+
+ masm.wasmStore(*access, value, dstAddr);
+ }
+#elif defined(JS_CODEGEN_ARM)
+ MOZ_ASSERT(temp.isInvalid());
+ if (access->type() == Scalar::Int64) {
+ masm.wasmStoreI64(*access, src.i64(), HeapReg, ptr, ptr);
+ } else if (src.tag == AnyReg::I64) {
+ masm.wasmStore(*access, AnyRegister(src.i64().low), HeapReg, ptr, ptr);
+ } else {
+ masm.wasmStore(*access, src.any(), HeapReg, ptr, ptr);
+ }
+#elif defined(JS_CODEGEN_MIPS64)
+ if (IsUnaligned(*access)) {
+ switch (src.tag) {
+ case AnyReg::I64:
+ masm.wasmUnalignedStoreI64(*access, src.i64(), HeapReg, ptr, ptr, temp);
+ break;
+ case AnyReg::F32:
+ masm.wasmUnalignedStoreFP(*access, src.f32(), HeapReg, ptr, ptr, temp);
+ break;
+ case AnyReg::F64:
+ masm.wasmUnalignedStoreFP(*access, src.f64(), HeapReg, ptr, ptr, temp);
+ break;
+ case AnyReg::I32:
+ masm.wasmUnalignedStore(*access, src.i32(), HeapReg, ptr, ptr, temp);
+ break;
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+ } else {
+ if (src.tag == AnyReg::I64) {
+ masm.wasmStoreI64(*access, src.i64(), HeapReg, ptr, ptr);
+ } else {
+ masm.wasmStore(*access, src.any(), HeapReg, ptr, ptr);
+ }
+ }
+#elif defined(JS_CODEGEN_ARM64)
+ MOZ_ASSERT(temp.isInvalid());
+ if (access->type() == Scalar::Int64) {
+ masm.wasmStoreI64(*access, src.i64(), HeapReg, ptr);
+ } else {
+ masm.wasmStore(*access, src.any(), HeapReg, ptr);
+ }
+#elif defined(JS_CODEGEN_LOONG64)
+ MOZ_ASSERT(temp.isInvalid());
+ if (access->type() == Scalar::Int64) {
+ masm.wasmStoreI64(*access, src.i64(), HeapReg, ptr, ptr);
+ } else {
+ masm.wasmStore(*access, src.any(), HeapReg, ptr, ptr);
+ }
+#else
+ MOZ_CRASH("BaseCompiler platform hook: store");
+#endif
+}
+
+// ptr and src must not be the same register.
+// This may destroy ptr and src.
+void BaseCompiler::store(MemoryAccessDesc* access, AccessCheck* check,
+ RegPtr instance, RegI32 ptr, AnyReg src, RegI32 temp) {
+ prepareMemoryAccess(access, check, instance, ptr);
+ executeStore(access, check, instance, ptr, src, temp);
+}
+
+#ifdef ENABLE_WASM_MEMORY64
+void BaseCompiler::store(MemoryAccessDesc* access, AccessCheck* check,
+ RegPtr instance, RegI64 ptr, AnyReg src, RegI64 temp) {
+ prepareMemoryAccess(access, check, instance, ptr);
+ // See comments in load()
+# if !defined(JS_64BIT)
+ return executeStore(access, check, instance, RegI32(ptr.low), src,
+ maybeFromI64(temp));
+# elif defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_LOONG64)
+ return executeStore(access, check, instance, RegI32(ptr.reg), src,
+ maybeFromI64(temp));
+# else
+ MOZ_CRASH("Missing platform hook");
+# endif
+}
+#endif
+
+template <typename RegType>
+void BaseCompiler::doLoadCommon(MemoryAccessDesc* access, AccessCheck check,
+ ValType type) {
+ RegPtr instance;
+ RegType temp;
+#if defined(JS_CODEGEN_MIPS64)
+ temp = need<RegType>();
+#endif
+
+ switch (type.kind()) {
+ case ValType::I32: {
+ RegType rp = popMemoryAccess<RegType>(access, &check);
+ RegI32 rv = needI32();
+ instance = maybeLoadInstanceForAccess(check);
+ load(access, &check, instance, rp, AnyReg(rv), temp);
+ push(rv);
+ free(rp);
+ break;
+ }
+ case ValType::I64: {
+ RegI64 rv;
+ RegType rp;
+#ifdef JS_CODEGEN_X86
+ rv = specific_.abiReturnRegI64;
+ needI64(rv);
+ rp = popMemoryAccess<RegType>(access, &check);
+#else
+ rp = popMemoryAccess<RegType>(access, &check);
+ rv = needI64();
+#endif
+ instance = maybeLoadInstanceForAccess(check);
+ load(access, &check, instance, rp, AnyReg(rv), temp);
+ push(rv);
+ free(rp);
+ break;
+ }
+ case ValType::F32: {
+ RegType rp = popMemoryAccess<RegType>(access, &check);
+ RegF32 rv = needF32();
+ instance = maybeLoadInstanceForAccess(check);
+ load(access, &check, instance, rp, AnyReg(rv), temp);
+ push(rv);
+ free(rp);
+ break;
+ }
+ case ValType::F64: {
+ RegType rp = popMemoryAccess<RegType>(access, &check);
+ RegF64 rv = needF64();
+ instance = maybeLoadInstanceForAccess(check);
+ load(access, &check, instance, rp, AnyReg(rv), temp);
+ push(rv);
+ free(rp);
+ break;
+ }
+#ifdef ENABLE_WASM_SIMD
+ case ValType::V128: {
+ RegType rp = popMemoryAccess<RegType>(access, &check);
+ RegV128 rv = needV128();
+ instance = maybeLoadInstanceForAccess(check);
+ load(access, &check, instance, rp, AnyReg(rv), temp);
+ push(rv);
+ free(rp);
+ break;
+ }
+#endif
+ default:
+ MOZ_CRASH("load type");
+ break;
+ }
+
+#ifndef RABALDR_PIN_INSTANCE
+ maybeFree(instance);
+#endif
+ maybeFree(temp);
+}
+
+void BaseCompiler::loadCommon(MemoryAccessDesc* access, AccessCheck check,
+ ValType type) {
+ if (isMem32()) {
+ doLoadCommon<RegI32>(access, check, type);
+ } else {
+#ifdef ENABLE_WASM_MEMORY64
+ doLoadCommon<RegI64>(access, check, type);
+#else
+ MOZ_CRASH("Memory64 not enabled / supported on this platform");
+#endif
+ }
+}
+
+template <typename RegType>
+void BaseCompiler::doStoreCommon(MemoryAccessDesc* access, AccessCheck check,
+ ValType resultType) {
+ RegPtr instance;
+ RegType temp;
+#if defined(JS_CODEGEN_MIPS64)
+ temp = need<RegType>();
+#endif
+
+ switch (resultType.kind()) {
+ case ValType::I32: {
+ RegI32 rv = popI32();
+ RegType rp = popMemoryAccess<RegType>(access, &check);
+ instance = maybeLoadInstanceForAccess(check);
+ store(access, &check, instance, rp, AnyReg(rv), temp);
+ free(rp);
+ free(rv);
+ break;
+ }
+ case ValType::I64: {
+ RegI64 rv = popI64();
+ RegType rp = popMemoryAccess<RegType>(access, &check);
+ instance = maybeLoadInstanceForAccess(check);
+ store(access, &check, instance, rp, AnyReg(rv), temp);
+ free(rp);
+ free(rv);
+ break;
+ }
+ case ValType::F32: {
+ RegF32 rv = popF32();
+ RegType rp = popMemoryAccess<RegType>(access, &check);
+ instance = maybeLoadInstanceForAccess(check);
+ store(access, &check, instance, rp, AnyReg(rv), temp);
+ free(rp);
+ free(rv);
+ break;
+ }
+ case ValType::F64: {
+ RegF64 rv = popF64();
+ RegType rp = popMemoryAccess<RegType>(access, &check);
+ instance = maybeLoadInstanceForAccess(check);
+ store(access, &check, instance, rp, AnyReg(rv), temp);
+ free(rp);
+ free(rv);
+ break;
+ }
+#ifdef ENABLE_WASM_SIMD
+ case ValType::V128: {
+ RegV128 rv = popV128();
+ RegType rp = popMemoryAccess<RegType>(access, &check);
+ instance = maybeLoadInstanceForAccess(check);
+ store(access, &check, instance, rp, AnyReg(rv), temp);
+ free(rp);
+ free(rv);
+ break;
+ }
+#endif
+ default:
+ MOZ_CRASH("store type");
+ break;
+ }
+
+#ifndef RABALDR_PIN_INSTANCE
+ maybeFree(instance);
+#endif
+ maybeFree(temp);
+}
+
+void BaseCompiler::storeCommon(MemoryAccessDesc* access, AccessCheck check,
+ ValType type) {
+ if (isMem32()) {
+ doStoreCommon<RegI32>(access, check, type);
+ } else {
+#ifdef ENABLE_WASM_MEMORY64
+ doStoreCommon<RegI64>(access, check, type);
+#else
+ MOZ_CRASH("Memory64 not enabled / supported on this platform");
+#endif
+ }
+}
+
+// Convert something that may contain a heap index into a Register that can be
+// used in an access.
+
+static inline Register ToRegister(RegI32 r) { return Register(r); }
+#ifdef ENABLE_WASM_MEMORY64
+# ifdef JS_PUNBOX64
+static inline Register ToRegister(RegI64 r) { return r.reg; }
+# else
+static inline Register ToRegister(RegI64 r) { return r.low; }
+# endif
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Atomic operations.
+//
+// The atomic operations have very diverse per-platform needs for register
+// allocation and temps. To handle that, the implementations are structured as
+// a per-operation framework method that calls into platform-specific helpers
+// (usually called PopAndAllocate, Perform, and Deallocate) in a per-operation
+// namespace. This structure results in a little duplication and boilerplate
+// but is otherwise clean and flexible and keeps code and supporting definitions
+// entirely co-located.
+
+#ifdef WASM_HAS_HEAPREG
+
+// RegIndexType is RegI32 for Memory32 and RegI64 for Memory64.
+template <typename RegIndexType>
+BaseIndex BaseCompiler::prepareAtomicMemoryAccess(MemoryAccessDesc* access,
+ AccessCheck* check,
+ RegPtr instance,
+ RegIndexType ptr) {
+ MOZ_ASSERT(needInstanceForAccess(*check) == instance.isValid());
+ prepareMemoryAccess(access, check, instance, ptr);
+ // At this point, 64-bit offsets will have been resolved.
+ return BaseIndex(HeapReg, ToRegister(ptr), TimesOne, access->offset());
+}
+
+#else
+
+// Some consumers depend on the returned Address not incorporating instance, as
+// instance may be the scratch register.
+//
+// RegIndexType is RegI32 for Memory32 and RegI64 for Memory64.
+template <typename RegIndexType>
+Address BaseCompiler::prepareAtomicMemoryAccess(MemoryAccessDesc* access,
+ AccessCheck* check,
+ RegPtr instance,
+ RegIndexType ptr) {
+ MOZ_ASSERT(needInstanceForAccess(*check) == instance.isValid());
+ prepareMemoryAccess(access, check, instance, ptr);
+ masm.addPtr(Address(instance, Instance::offsetOfMemoryBase()),
+ ToRegister(ptr));
+ // At this point, 64-bit offsets will have been resolved.
+ return Address(ToRegister(ptr), access->offset());
+}
+
+#endif
+
+#ifndef WASM_HAS_HEAPREG
+# ifdef JS_CODEGEN_X86
+using ScratchAtomicNoHeapReg = ScratchEBX;
+# else
+# error "Unimplemented porting interface"
+# endif
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Atomic load and store.
+
+namespace atomic_load64 {
+
+#ifdef JS_CODEGEN_ARM
+
+static void Allocate(BaseCompiler* bc, RegI64* rd, RegI64*) {
+ *rd = bc->needI64Pair();
+}
+
+static void Deallocate(BaseCompiler* bc, RegI64) {}
+
+#elif defined JS_CODEGEN_X86
+
+static void Allocate(BaseCompiler* bc, RegI64* rd, RegI64* temp) {
+ // The result is in edx:eax, and we need ecx:ebx as a temp. But ebx will also
+ // be used as a scratch, so don't manage that here.
+ bc->needI32(bc->specific_.ecx);
+ *temp = bc->specific_.ecx_ebx;
+ bc->needI64(bc->specific_.edx_eax);
+ *rd = bc->specific_.edx_eax;
+}
+
+static void Deallocate(BaseCompiler* bc, RegI64 temp) {
+ // See comment above.
+ MOZ_ASSERT(temp.high == js::jit::ecx);
+ bc->freeI32(bc->specific_.ecx);
+}
+
+#elif defined(__wasi__) || (defined(JS_CODEGEN_NONE) && !defined(JS_64BIT))
+
+static void Allocate(BaseCompiler*, RegI64*, RegI64*) {}
+static void Deallocate(BaseCompiler*, RegI64) {}
+
+#endif
+
+} // namespace atomic_load64
+
+#if !defined(JS_64BIT)
+template <typename RegIndexType>
+void BaseCompiler::atomicLoad64(MemoryAccessDesc* access) {
+ RegI64 rd, temp;
+ atomic_load64::Allocate(this, &rd, &temp);
+
+ AccessCheck check;
+ RegIndexType rp = popMemoryAccess<RegIndexType>(access, &check);
+
+# ifdef WASM_HAS_HEAPREG
+ RegPtr instance = maybeLoadInstanceForAccess(check);
+ auto memaddr = prepareAtomicMemoryAccess(access, &check, instance, rp);
+ masm.wasmAtomicLoad64(*access, memaddr, temp, rd);
+# ifndef RABALDR_PIN_INSTANCE
+ maybeFree(instance);
+# endif
+# else
+ ScratchAtomicNoHeapReg scratch(*this);
+ RegPtr instance =
+ maybeLoadInstanceForAccess(check, RegIntptrToRegPtr(scratch));
+ auto memaddr = prepareAtomicMemoryAccess(access, &check, instance, rp);
+ masm.wasmAtomicLoad64(*access, memaddr, temp, rd);
+ MOZ_ASSERT(instance == scratch);
+# endif
+
+ free(rp);
+ atomic_load64::Deallocate(this, temp);
+ pushI64(rd);
+}
+#endif
+
+void BaseCompiler::atomicLoad(MemoryAccessDesc* access, ValType type) {
+ Scalar::Type viewType = access->type();
+ if (Scalar::byteSize(viewType) <= sizeof(void*)) {
+ loadCommon(access, AccessCheck(), type);
+ return;
+ }
+
+ MOZ_ASSERT(type == ValType::I64 && Scalar::byteSize(viewType) == 8);
+
+#if !defined(JS_64BIT)
+ if (isMem32()) {
+ atomicLoad64<RegI32>(access);
+ } else {
+# ifdef ENABLE_WASM_MEMORY64
+ atomicLoad64<RegI64>(access);
+# else
+ MOZ_CRASH("Memory64 not enabled / supported on this platform");
+# endif
+ }
+#else
+ MOZ_CRASH("Should not happen");
+#endif
+}
+
+void BaseCompiler::atomicStore(MemoryAccessDesc* access, ValType type) {
+ Scalar::Type viewType = access->type();
+
+ if (Scalar::byteSize(viewType) <= sizeof(void*)) {
+ storeCommon(access, AccessCheck(), type);
+ return;
+ }
+
+ MOZ_ASSERT(type == ValType::I64 && Scalar::byteSize(viewType) == 8);
+
+#if !defined(JS_64BIT)
+ if (isMem32()) {
+ atomicXchg64<RegI32>(access, WantResult(false));
+ } else {
+# ifdef ENABLE_WASM_MEMORY64
+ atomicXchg64<RegI64>(access, WantResult(false));
+# else
+ MOZ_CRASH("Memory64 not enabled / supported on this platform");
+# endif
+ }
+#else
+ MOZ_CRASH("Should not happen");
+#endif
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Atomic RMW op= operations.
+
+void BaseCompiler::atomicRMW(MemoryAccessDesc* access, ValType type,
+ AtomicOp op) {
+ Scalar::Type viewType = access->type();
+ if (Scalar::byteSize(viewType) <= 4) {
+ if (isMem32()) {
+ atomicRMW32<RegI32>(access, type, op);
+ } else {
+#ifdef ENABLE_WASM_MEMORY64
+ atomicRMW32<RegI64>(access, type, op);
+#else
+ MOZ_CRASH("Memory64 not enabled / supported on this platform");
+#endif
+ }
+ } else {
+ MOZ_ASSERT(type == ValType::I64 && Scalar::byteSize(viewType) == 8);
+ if (isMem32()) {
+ atomicRMW64<RegI32>(access, type, op);
+ } else {
+#ifdef ENABLE_WASM_MEMORY64
+ atomicRMW64<RegI64>(access, type, op);
+#else
+ MOZ_CRASH("Memory64 not enabled / supported on this platform");
+#endif
+ }
+ }
+}
+
+namespace atomic_rmw32 {
+
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+
+struct Temps {
+ // On x86 we use the ScratchI32 for the temp, otherwise we'd run out of
+ // registers for 64-bit operations.
+# if defined(JS_CODEGEN_X64)
+ RegI32 t0;
+# endif
+};
+
+static void PopAndAllocate(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType, AtomicOp op, RegI32* rd,
+ RegI32* rv, Temps* temps) {
+ bc->needI32(bc->specific_.eax);
+ if (op == AtomicFetchAddOp || op == AtomicFetchSubOp) {
+ // We use xadd, so source and destination are the same. Using
+ // eax here is overconstraining, but for byte operations on x86
+ // we do need something with a byte register.
+ if (type == ValType::I64) {
+ *rv = bc->popI64ToSpecificI32(bc->specific_.eax);
+ } else {
+ *rv = bc->popI32ToSpecific(bc->specific_.eax);
+ }
+ *rd = *rv;
+ } else {
+ // We use a cmpxchg loop. The output must be eax; the input
+ // must be in a separate register since it may be used several
+ // times.
+ if (type == ValType::I64) {
+ *rv = bc->popI64ToI32();
+ } else {
+ *rv = bc->popI32();
+ }
+ *rd = bc->specific_.eax;
+# ifdef JS_CODEGEN_X64
+ temps->t0 = bc->needI32();
+# endif
+ }
+}
+
+template <typename T>
+static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access, T srcAddr,
+ AtomicOp op, RegI32 rv, RegI32 rd, const Temps& temps) {
+# ifdef JS_CODEGEN_X64
+ RegI32 temp = temps.t0;
+# else
+ RegI32 temp;
+ ScratchI32 scratch(*bc);
+ if (op != AtomicFetchAddOp && op != AtomicFetchSubOp) {
+ temp = scratch;
+ }
+# endif
+ bc->masm.wasmAtomicFetchOp(access, op, rv, srcAddr, temp, rd);
+}
+
+static void Deallocate(BaseCompiler* bc, RegI32 rv, const Temps& temps) {
+ if (rv != bc->specific_.eax) {
+ bc->freeI32(rv);
+ }
+# ifdef JS_CODEGEN_X64
+ bc->maybeFree(temps.t0);
+# endif
+}
+
+#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+
+struct Temps {
+ RegI32 t0;
+};
+
+static void PopAndAllocate(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType, AtomicOp op, RegI32* rd,
+ RegI32* rv, Temps* temps) {
+ *rv = type == ValType::I64 ? bc->popI64ToI32() : bc->popI32();
+ temps->t0 = bc->needI32();
+ *rd = bc->needI32();
+}
+
+static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access,
+ BaseIndex srcAddr, AtomicOp op, RegI32 rv, RegI32 rd,
+ const Temps& temps) {
+ bc->masm.wasmAtomicFetchOp(access, op, rv, srcAddr, temps.t0, rd);
+}
+
+static void Deallocate(BaseCompiler* bc, RegI32 rv, const Temps& temps) {
+ bc->freeI32(rv);
+ bc->freeI32(temps.t0);
+}
+
+#elif defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_LOONG64)
+
+struct Temps {
+ RegI32 t0, t1, t2;
+};
+
+static void PopAndAllocate(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType, AtomicOp op, RegI32* rd,
+ RegI32* rv, Temps* temps) {
+ *rv = type == ValType::I64 ? bc->popI64ToI32() : bc->popI32();
+ if (Scalar::byteSize(viewType) < 4) {
+ temps->t0 = bc->needI32();
+ temps->t1 = bc->needI32();
+ temps->t2 = bc->needI32();
+ }
+ *rd = bc->needI32();
+}
+
+static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access,
+ BaseIndex srcAddr, AtomicOp op, RegI32 rv, RegI32 rd,
+ const Temps& temps) {
+ bc->masm.wasmAtomicFetchOp(access, op, rv, srcAddr, temps.t0, temps.t1,
+ temps.t2, rd);
+}
+
+static void Deallocate(BaseCompiler* bc, RegI32 rv, const Temps& temps) {
+ bc->freeI32(rv);
+ bc->maybeFree(temps.t0);
+ bc->maybeFree(temps.t1);
+ bc->maybeFree(temps.t2);
+}
+
+#elif defined(JS_CODEGEN_RISCV64)
+
+using Temps = Nothing;
+
+static void PopAndAllocate(BaseCompiler*, ValType, Scalar::Type, AtomicOp,
+ RegI32*, RegI32*, Temps*) {
+ MOZ_CRASH("UNIMPLEMENTED ON RISCV64");
+}
+
+static void Perform(BaseCompiler*, const MemoryAccessDesc&, BaseIndex, AtomicOp,
+ RegI32, RegI32, const Temps&) {
+ MOZ_CRASH("UNIMPLEMENTED ON RISCV64");
+}
+
+static void Deallocate(BaseCompiler*, RegI32, const Temps&) {
+ MOZ_CRASH("UNIMPLEMENTED ON RISCV64");
+}
+
+#elif defined(JS_CODEGEN_NONE) || defined(JS_CODEGEN_WASM32)
+
+using Temps = Nothing;
+
+static void PopAndAllocate(BaseCompiler*, ValType, Scalar::Type, AtomicOp,
+ RegI32*, RegI32*, Temps*) {}
+
+static void Perform(BaseCompiler*, const MemoryAccessDesc&, BaseIndex, AtomicOp,
+ RegI32, RegI32, const Temps&) {}
+
+static void Deallocate(BaseCompiler*, RegI32, const Temps&) {}
+
+#endif
+
+} // namespace atomic_rmw32
+
+template <typename RegIndexType>
+void BaseCompiler::atomicRMW32(MemoryAccessDesc* access, ValType type,
+ AtomicOp op) {
+ Scalar::Type viewType = access->type();
+ RegI32 rd, rv;
+ atomic_rmw32::Temps temps;
+ atomic_rmw32::PopAndAllocate(this, type, viewType, op, &rd, &rv, &temps);
+
+ AccessCheck check;
+ RegIndexType rp = popMemoryAccess<RegIndexType>(access, &check);
+ RegPtr instance = maybeLoadInstanceForAccess(check);
+
+ auto memaddr = prepareAtomicMemoryAccess(access, &check, instance, rp);
+ atomic_rmw32::Perform(this, *access, memaddr, op, rv, rd, temps);
+
+#ifndef RABALDR_PIN_INSTANCE
+ maybeFree(instance);
+#endif
+ atomic_rmw32::Deallocate(this, rv, temps);
+ free(rp);
+
+ if (type == ValType::I64) {
+ pushU32AsI64(rd);
+ } else {
+ pushI32(rd);
+ }
+}
+
+namespace atomic_rmw64 {
+
+#if defined(JS_CODEGEN_X64)
+
+static void PopAndAllocate(BaseCompiler* bc, AtomicOp op, RegI64* rd,
+ RegI64* rv, RegI64* temp) {
+ if (op == AtomicFetchAddOp || op == AtomicFetchSubOp) {
+ // We use xaddq, so input and output must be the same register.
+ *rv = bc->popI64();
+ *rd = *rv;
+ } else {
+ // We use a cmpxchgq loop, so the output must be rax and we need a temp.
+ bc->needI64(bc->specific_.rax);
+ *rd = bc->specific_.rax;
+ *rv = bc->popI64();
+ *temp = bc->needI64();
+ }
+}
+
+static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access,
+ BaseIndex srcAddr, AtomicOp op, RegI64 rv, RegI64 temp,
+ RegI64 rd) {
+ bc->masm.wasmAtomicFetchOp64(access, op, rv, srcAddr, temp, rd);
+}
+
+static void Deallocate(BaseCompiler* bc, AtomicOp op, RegI64 rv, RegI64 temp) {
+ bc->maybeFree(temp);
+ if (op != AtomicFetchAddOp && op != AtomicFetchSubOp) {
+ bc->freeI64(rv);
+ }
+}
+
+#elif defined(JS_CODEGEN_X86)
+
+// Register allocation is tricky, see comments at atomic_xchg64 below.
+//
+// - Initially rv=ecx:edx and eax is reserved, rd=unallocated.
+// - Then rp is popped into esi+edi because those are the only available.
+// - The Setup operation makes rd=edx:eax.
+// - Deallocation then frees only the ecx part of rv.
+//
+// The temp is unused here.
+
+static void PopAndAllocate(BaseCompiler* bc, AtomicOp op, RegI64* rd,
+ RegI64* rv, RegI64*) {
+ bc->needI32(bc->specific_.eax);
+ bc->needI32(bc->specific_.ecx);
+ bc->needI32(bc->specific_.edx);
+ *rv = RegI64(Register64(bc->specific_.ecx, bc->specific_.edx));
+ bc->popI64ToSpecific(*rv);
+}
+
+static void Setup(BaseCompiler* bc, RegI64* rd) { *rd = bc->specific_.edx_eax; }
+
+static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access,
+ Address srcAddr, AtomicOp op, RegI64 rv, RegI64, RegI64 rd,
+ const ScratchAtomicNoHeapReg& scratch) {
+ MOZ_ASSERT(rv.high == bc->specific_.ecx);
+ MOZ_ASSERT(Register(scratch) == js::jit::ebx);
+
+ bc->fr.pushGPR(rv.high);
+ bc->fr.pushGPR(rv.low);
+ Address value(StackPointer, 0);
+
+ bc->masm.wasmAtomicFetchOp64(access, op, value, srcAddr,
+ bc->specific_.ecx_ebx, rd);
+
+ bc->fr.popBytes(8);
+}
+
+static void Deallocate(BaseCompiler* bc, AtomicOp, RegI64, RegI64) {
+ bc->freeI32(bc->specific_.ecx);
+}
+
+#elif defined(JS_CODEGEN_ARM)
+
+static void PopAndAllocate(BaseCompiler* bc, AtomicOp op, RegI64* rd,
+ RegI64* rv, RegI64* temp) {
+ // We use a ldrex/strexd loop so the temp and the output must be
+ // odd/even pairs.
+ *rv = bc->popI64();
+ *temp = bc->needI64Pair();
+ *rd = bc->needI64Pair();
+}
+
+static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access,
+ BaseIndex srcAddr, AtomicOp op, RegI64 rv, RegI64 temp,
+ RegI64 rd) {
+ bc->masm.wasmAtomicFetchOp64(access, op, rv, srcAddr, temp, rd);
+}
+
+static void Deallocate(BaseCompiler* bc, AtomicOp op, RegI64 rv, RegI64 temp) {
+ bc->freeI64(rv);
+ bc->freeI64(temp);
+}
+
+#elif defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_LOONG64)
+
+static void PopAndAllocate(BaseCompiler* bc, AtomicOp op, RegI64* rd,
+ RegI64* rv, RegI64* temp) {
+ *rv = bc->popI64();
+ *temp = bc->needI64();
+ *rd = bc->needI64();
+}
+
+static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access,
+ BaseIndex srcAddr, AtomicOp op, RegI64 rv, RegI64 temp,
+ RegI64 rd) {
+ bc->masm.wasmAtomicFetchOp64(access, op, rv, srcAddr, temp, rd);
+}
+
+static void Deallocate(BaseCompiler* bc, AtomicOp op, RegI64 rv, RegI64 temp) {
+ bc->freeI64(rv);
+ bc->freeI64(temp);
+}
+#elif defined(JS_CODEGEN_RISCV64)
+
+static void PopAndAllocate(BaseCompiler*, AtomicOp, RegI64*, RegI64*, RegI64*) {
+ MOZ_CRASH("UNIMPLEMENTED ON RISCV64");
+}
+
+static void Perform(BaseCompiler*, const MemoryAccessDesc&, BaseIndex,
+ AtomicOp op, RegI64, RegI64, RegI64) {
+ MOZ_CRASH("UNIMPLEMENTED ON RISCV64");
+}
+
+static void Deallocate(BaseCompiler*, AtomicOp, RegI64, RegI64) {
+ MOZ_CRASH("UNIMPLEMENTED ON RISCV64");
+}
+
+#elif defined(JS_CODEGEN_NONE) || defined(JS_CODEGEN_WASM32)
+
+static void PopAndAllocate(BaseCompiler*, AtomicOp, RegI64*, RegI64*, RegI64*) {
+}
+
+static void Perform(BaseCompiler*, const MemoryAccessDesc&, BaseIndex,
+ AtomicOp op, RegI64, RegI64, RegI64) {}
+
+static void Deallocate(BaseCompiler*, AtomicOp, RegI64, RegI64) {}
+
+#endif
+
+} // namespace atomic_rmw64
+
+template <typename RegIndexType>
+void BaseCompiler::atomicRMW64(MemoryAccessDesc* access, ValType type,
+ AtomicOp op) {
+ RegI64 rd, rv, temp;
+ atomic_rmw64::PopAndAllocate(this, op, &rd, &rv, &temp);
+
+ AccessCheck check;
+ RegIndexType rp = popMemoryAccess<RegIndexType>(access, &check);
+
+#if defined(WASM_HAS_HEAPREG)
+ RegPtr instance = maybeLoadInstanceForAccess(check);
+ auto memaddr = prepareAtomicMemoryAccess(access, &check, instance, rp);
+ atomic_rmw64::Perform(this, *access, memaddr, op, rv, temp, rd);
+# ifndef RABALDR_PIN_INSTANCE
+ maybeFree(instance);
+# endif
+#else
+ ScratchAtomicNoHeapReg scratch(*this);
+ RegPtr instance =
+ maybeLoadInstanceForAccess(check, RegIntptrToRegPtr(scratch));
+ auto memaddr = prepareAtomicMemoryAccess(access, &check, instance, rp);
+ atomic_rmw64::Setup(this, &rd);
+ atomic_rmw64::Perform(this, *access, memaddr, op, rv, temp, rd, scratch);
+ MOZ_ASSERT(instance == scratch);
+#endif
+
+ free(rp);
+ atomic_rmw64::Deallocate(this, op, rv, temp);
+
+ pushI64(rd);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Atomic exchange (also used for atomic store in some cases).
+
+void BaseCompiler::atomicXchg(MemoryAccessDesc* access, ValType type) {
+ Scalar::Type viewType = access->type();
+ if (Scalar::byteSize(viewType) <= 4) {
+ if (isMem32()) {
+ atomicXchg32<RegI32>(access, type);
+ } else {
+#ifdef ENABLE_WASM_MEMORY64
+ atomicXchg32<RegI64>(access, type);
+#else
+ MOZ_CRASH("Memory64 not enabled / supported on this platform");
+#endif
+ }
+ } else {
+ MOZ_ASSERT(type == ValType::I64 && Scalar::byteSize(viewType) == 8);
+ if (isMem32()) {
+ atomicXchg64<RegI32>(access, WantResult(true));
+ } else {
+#ifdef ENABLE_WASM_MEMORY64
+ atomicXchg64<RegI64>(access, WantResult(true));
+#else
+ MOZ_CRASH("Memory64 not enabled / supported on this platform");
+#endif
+ }
+ }
+}
+
+namespace atomic_xchg32 {
+
+#if defined(JS_CODEGEN_X64)
+
+using Temps = Nothing;
+
+static void PopAndAllocate(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType, RegI32* rd, RegI32* rv,
+ Temps*) {
+ // The xchg instruction reuses rv as rd.
+ *rv = (type == ValType::I64) ? bc->popI64ToI32() : bc->popI32();
+ *rd = *rv;
+}
+
+static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access,
+ BaseIndex srcAddr, RegI32 rv, RegI32 rd, const Temps&) {
+ bc->masm.wasmAtomicExchange(access, srcAddr, rv, rd);
+}
+
+static void Deallocate(BaseCompiler* bc, RegI32, const Temps&) {}
+
+#elif defined(JS_CODEGEN_X86)
+
+using Temps = Nothing;
+
+static void PopAndAllocate(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType, RegI32* rd, RegI32* rv,
+ Temps*) {
+ // The xchg instruction reuses rv as rd.
+ *rv = (type == ValType::I64) ? bc->popI64ToI32() : bc->popI32();
+ *rd = *rv;
+}
+
+static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access,
+ Address srcAddr, RegI32 rv, RegI32 rd, const Temps&) {
+ if (access.type() == Scalar::Uint8 && !bc->ra.isSingleByteI32(rd)) {
+ ScratchI8 scratch(*bc);
+ // The output register must have a byte persona.
+ bc->masm.wasmAtomicExchange(access, srcAddr, rv, scratch);
+ bc->masm.movl(scratch, rd);
+ } else {
+ bc->masm.wasmAtomicExchange(access, srcAddr, rv, rd);
+ }
+}
+
+static void Deallocate(BaseCompiler* bc, RegI32, const Temps&) {}
+
+#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+
+using Temps = Nothing;
+
+static void PopAndAllocate(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType, RegI32* rd, RegI32* rv,
+ Temps*) {
+ *rv = (type == ValType::I64) ? bc->popI64ToI32() : bc->popI32();
+ *rd = bc->needI32();
+}
+
+static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access,
+ BaseIndex srcAddr, RegI32 rv, RegI32 rd, const Temps&) {
+ bc->masm.wasmAtomicExchange(access, srcAddr, rv, rd);
+}
+
+static void Deallocate(BaseCompiler* bc, RegI32 rv, const Temps&) {
+ bc->freeI32(rv);
+}
+
+#elif defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_LOONG64)
+
+struct Temps {
+ RegI32 t0, t1, t2;
+};
+
+static void PopAndAllocate(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType, RegI32* rd, RegI32* rv,
+ Temps* temps) {
+ *rv = (type == ValType::I64) ? bc->popI64ToI32() : bc->popI32();
+ if (Scalar::byteSize(viewType) < 4) {
+ temps->t0 = bc->needI32();
+ temps->t1 = bc->needI32();
+ temps->t2 = bc->needI32();
+ }
+ *rd = bc->needI32();
+}
+
+static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access,
+ BaseIndex srcAddr, RegI32 rv, RegI32 rd,
+ const Temps& temps) {
+ bc->masm.wasmAtomicExchange(access, srcAddr, rv, temps.t0, temps.t1, temps.t2,
+ rd);
+}
+
+static void Deallocate(BaseCompiler* bc, RegI32 rv, const Temps& temps) {
+ bc->freeI32(rv);
+ bc->maybeFree(temps.t0);
+ bc->maybeFree(temps.t1);
+ bc->maybeFree(temps.t2);
+}
+
+#elif defined(JS_CODEGEN_RISCV64)
+
+using Temps = Nothing;
+
+static void PopAndAllocate(BaseCompiler*, ValType, Scalar::Type, RegI32*,
+ RegI32*, Temps*) {
+ MOZ_CRASH("UNIMPLEMENTED ON RISCV64");
+}
+static void Perform(BaseCompiler*, const MemoryAccessDesc&, BaseIndex, RegI32,
+ RegI32, const Temps&) {
+ MOZ_CRASH("UNIMPLEMENTED ON RISCV64");
+}
+static void Deallocate(BaseCompiler*, RegI32, const Temps&) {
+ MOZ_CRASH("UNIMPLEMENTED ON RISCV64");
+}
+
+#elif defined(JS_CODEGEN_NONE) || defined(JS_CODEGEN_WASM32)
+
+using Temps = Nothing;
+
+static void PopAndAllocate(BaseCompiler*, ValType, Scalar::Type, RegI32*,
+ RegI32*, Temps*) {}
+static void Perform(BaseCompiler*, const MemoryAccessDesc&, BaseIndex, RegI32,
+ RegI32, const Temps&) {}
+static void Deallocate(BaseCompiler*, RegI32, const Temps&) {}
+
+#endif
+
+} // namespace atomic_xchg32
+
+template <typename RegIndexType>
+void BaseCompiler::atomicXchg32(MemoryAccessDesc* access, ValType type) {
+ Scalar::Type viewType = access->type();
+
+ RegI32 rd, rv;
+ atomic_xchg32::Temps temps;
+ atomic_xchg32::PopAndAllocate(this, type, viewType, &rd, &rv, &temps);
+
+ AccessCheck check;
+
+ RegIndexType rp = popMemoryAccess<RegIndexType>(access, &check);
+ RegPtr instance = maybeLoadInstanceForAccess(check);
+
+ auto memaddr = prepareAtomicMemoryAccess(access, &check, instance, rp);
+ atomic_xchg32::Perform(this, *access, memaddr, rv, rd, temps);
+
+#ifndef RABALDR_PIN_INSTANCE
+ maybeFree(instance);
+#endif
+ free(rp);
+ atomic_xchg32::Deallocate(this, rv, temps);
+
+ if (type == ValType::I64) {
+ pushU32AsI64(rd);
+ } else {
+ pushI32(rd);
+ }
+}
+
+namespace atomic_xchg64 {
+
+#if defined(JS_CODEGEN_X64)
+
+static void PopAndAllocate(BaseCompiler* bc, RegI64* rd, RegI64* rv) {
+ *rv = bc->popI64();
+ *rd = *rv;
+}
+
+static void Deallocate(BaseCompiler* bc, RegI64 rd, RegI64) {
+ bc->maybeFree(rd);
+}
+
+#elif defined(JS_CODEGEN_X86)
+
+// Register allocation is tricky in several ways.
+//
+// - For a 64-bit access on memory64 we need six registers for rd, rv, and rp,
+// but have only five (as the temp ebx is needed too), so we target all
+// registers explicitly to make sure there's space.
+//
+// - We'll be using cmpxchg8b, and when we do the operation, rv must be in
+// ecx:ebx, and rd must be edx:eax. We can't use ebx for rv initially because
+// we need ebx for a scratch also, so use a separate temp and move the value
+// to ebx just before the operation.
+//
+// In sum:
+//
+// - Initially rv=ecx:edx and eax is reserved, rd=unallocated.
+// - Then rp is popped into esi+edi because those are the only available.
+// - The Setup operation makes rv=ecx:ebx and rd=edx:eax and moves edx->ebx.
+// - Deallocation then frees only the ecx part of rv.
+
+static void PopAndAllocate(BaseCompiler* bc, RegI64* rd, RegI64* rv) {
+ bc->needI32(bc->specific_.ecx);
+ bc->needI32(bc->specific_.edx);
+ bc->needI32(bc->specific_.eax);
+ *rv = RegI64(Register64(bc->specific_.ecx, bc->specific_.edx));
+ bc->popI64ToSpecific(*rv);
+}
+
+static void Setup(BaseCompiler* bc, RegI64* rv, RegI64* rd,
+ const ScratchAtomicNoHeapReg& scratch) {
+ MOZ_ASSERT(rv->high == bc->specific_.ecx);
+ MOZ_ASSERT(Register(scratch) == js::jit::ebx);
+ bc->masm.move32(rv->low, scratch);
+ *rv = bc->specific_.ecx_ebx;
+ *rd = bc->specific_.edx_eax;
+}
+
+static void Deallocate(BaseCompiler* bc, RegI64 rd, RegI64 rv) {
+ MOZ_ASSERT(rd == bc->specific_.edx_eax || rd == RegI64::Invalid());
+ bc->maybeFree(rd);
+ bc->freeI32(bc->specific_.ecx);
+}
+
+#elif defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_LOONG64)
+
+static void PopAndAllocate(BaseCompiler* bc, RegI64* rd, RegI64* rv) {
+ *rv = bc->popI64();
+ *rd = bc->needI64();
+}
+
+static void Deallocate(BaseCompiler* bc, RegI64 rd, RegI64 rv) {
+ bc->freeI64(rv);
+ bc->maybeFree(rd);
+}
+
+#elif defined(JS_CODEGEN_ARM)
+
+static void PopAndAllocate(BaseCompiler* bc, RegI64* rd, RegI64* rv) {
+ // Both rv and rd must be odd/even pairs.
+ *rv = bc->popI64ToSpecific(bc->needI64Pair());
+ *rd = bc->needI64Pair();
+}
+
+static void Deallocate(BaseCompiler* bc, RegI64 rd, RegI64 rv) {
+ bc->freeI64(rv);
+ bc->maybeFree(rd);
+}
+
+#elif defined(JS_CODEGEN_RISCV64)
+
+static void PopAndAllocate(BaseCompiler*, RegI64*, RegI64*) {
+ MOZ_CRASH("UNIMPLEMENTED ON RISCV64");
+}
+static void Deallocate(BaseCompiler*, RegI64, RegI64) {
+ MOZ_CRASH("UNIMPLEMENTED ON RISCV64");
+}
+
+#elif defined(JS_CODEGEN_NONE) || defined(JS_CODEGEN_WASM32)
+
+static void PopAndAllocate(BaseCompiler*, RegI64*, RegI64*) {}
+static void Deallocate(BaseCompiler*, RegI64, RegI64) {}
+
+#endif
+
+} // namespace atomic_xchg64
+
+template <typename RegIndexType>
+void BaseCompiler::atomicXchg64(MemoryAccessDesc* access,
+ WantResult wantResult) {
+ RegI64 rd, rv;
+ atomic_xchg64::PopAndAllocate(this, &rd, &rv);
+
+ AccessCheck check;
+ RegIndexType rp = popMemoryAccess<RegIndexType>(access, &check);
+
+#ifdef WASM_HAS_HEAPREG
+ RegPtr instance = maybeLoadInstanceForAccess(check);
+ auto memaddr =
+ prepareAtomicMemoryAccess<RegIndexType>(access, &check, instance, rp);
+ masm.wasmAtomicExchange64(*access, memaddr, rv, rd);
+# ifndef RABALDR_PIN_INSTANCE
+ maybeFree(instance);
+# endif
+#else
+ ScratchAtomicNoHeapReg scratch(*this);
+ RegPtr instance =
+ maybeLoadInstanceForAccess(check, RegIntptrToRegPtr(scratch));
+ Address memaddr = prepareAtomicMemoryAccess(access, &check, instance, rp);
+ atomic_xchg64::Setup(this, &rv, &rd, scratch);
+ masm.wasmAtomicExchange64(*access, memaddr, rv, rd);
+ MOZ_ASSERT(instance == scratch);
+#endif
+
+ free(rp);
+ if (wantResult) {
+ pushI64(rd);
+ rd = RegI64::Invalid();
+ }
+ atomic_xchg64::Deallocate(this, rd, rv);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Atomic compare-exchange.
+
+void BaseCompiler::atomicCmpXchg(MemoryAccessDesc* access, ValType type) {
+ Scalar::Type viewType = access->type();
+ if (Scalar::byteSize(viewType) <= 4) {
+ if (isMem32()) {
+ atomicCmpXchg32<RegI32>(access, type);
+ } else {
+#ifdef ENABLE_WASM_MEMORY64
+ atomicCmpXchg32<RegI64>(access, type);
+#else
+ MOZ_CRASH("Memory64 not enabled / supported on this platform");
+#endif
+ }
+ } else {
+ MOZ_ASSERT(type == ValType::I64 && Scalar::byteSize(viewType) == 8);
+ if (isMem32()) {
+ atomicCmpXchg64<RegI32>(access, type);
+ } else {
+#ifdef ENABLE_WASM_MEMORY64
+ atomicCmpXchg64<RegI64>(access, type);
+#else
+ MOZ_CRASH("Memory64 not enabled / supported on this platform");
+#endif
+ }
+ }
+}
+
+namespace atomic_cmpxchg32 {
+
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+
+using Temps = Nothing;
+
+static void PopAndAllocate(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType, RegI32* rexpect, RegI32* rnew,
+ RegI32* rd, Temps*) {
+ // For cmpxchg, the expected value and the result are both in eax.
+ bc->needI32(bc->specific_.eax);
+ if (type == ValType::I64) {
+ *rnew = bc->popI64ToI32();
+ *rexpect = bc->popI64ToSpecificI32(bc->specific_.eax);
+ } else {
+ *rnew = bc->popI32();
+ *rexpect = bc->popI32ToSpecific(bc->specific_.eax);
+ }
+ *rd = *rexpect;
+}
+
+template <typename T>
+static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access, T srcAddr,
+ RegI32 rexpect, RegI32 rnew, RegI32 rd, const Temps&) {
+# if defined(JS_CODEGEN_X86)
+ ScratchI8 scratch(*bc);
+ if (access.type() == Scalar::Uint8) {
+ MOZ_ASSERT(rd == bc->specific_.eax);
+ if (!bc->ra.isSingleByteI32(rnew)) {
+ // The replacement value must have a byte persona.
+ bc->masm.movl(rnew, scratch);
+ rnew = scratch;
+ }
+ }
+# endif
+ bc->masm.wasmCompareExchange(access, srcAddr, rexpect, rnew, rd);
+}
+
+static void Deallocate(BaseCompiler* bc, RegI32, RegI32 rnew, const Temps&) {
+ bc->freeI32(rnew);
+}
+
+#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+
+using Temps = Nothing;
+
+static void PopAndAllocate(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType, RegI32* rexpect, RegI32* rnew,
+ RegI32* rd, Temps*) {
+ if (type == ValType::I64) {
+ *rnew = bc->popI64ToI32();
+ *rexpect = bc->popI64ToI32();
+ } else {
+ *rnew = bc->popI32();
+ *rexpect = bc->popI32();
+ }
+ *rd = bc->needI32();
+}
+
+static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access,
+ BaseIndex srcAddr, RegI32 rexpect, RegI32 rnew, RegI32 rd,
+ const Temps&) {
+ bc->masm.wasmCompareExchange(access, srcAddr, rexpect, rnew, rd);
+}
+
+static void Deallocate(BaseCompiler* bc, RegI32 rexpect, RegI32 rnew,
+ const Temps&) {
+ bc->freeI32(rnew);
+ bc->freeI32(rexpect);
+}
+
+#elif defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_LOONG64)
+
+struct Temps {
+ RegI32 t0, t1, t2;
+};
+
+static void PopAndAllocate(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType, RegI32* rexpect, RegI32* rnew,
+ RegI32* rd, Temps* temps) {
+ if (type == ValType::I64) {
+ *rnew = bc->popI64ToI32();
+ *rexpect = bc->popI64ToI32();
+ } else {
+ *rnew = bc->popI32();
+ *rexpect = bc->popI32();
+ }
+ if (Scalar::byteSize(viewType) < 4) {
+ temps->t0 = bc->needI32();
+ temps->t1 = bc->needI32();
+ temps->t2 = bc->needI32();
+ }
+ *rd = bc->needI32();
+}
+
+static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access,
+ BaseIndex srcAddr, RegI32 rexpect, RegI32 rnew, RegI32 rd,
+ const Temps& temps) {
+ bc->masm.wasmCompareExchange(access, srcAddr, rexpect, rnew, temps.t0,
+ temps.t1, temps.t2, rd);
+}
+
+static void Deallocate(BaseCompiler* bc, RegI32 rexpect, RegI32 rnew,
+ const Temps& temps) {
+ bc->freeI32(rnew);
+ bc->freeI32(rexpect);
+ bc->maybeFree(temps.t0);
+ bc->maybeFree(temps.t1);
+ bc->maybeFree(temps.t2);
+}
+
+#elif defined(JS_CODEGEN_RISCV64)
+
+using Temps = Nothing;
+
+static void PopAndAllocate(BaseCompiler*, ValType, Scalar::Type, RegI32*,
+ RegI32*, RegI32*, Temps*) {
+ MOZ_CRASH("UNIMPLEMENTED ON RISCV64");
+}
+
+static void Perform(BaseCompiler*, const MemoryAccessDesc&, BaseIndex, RegI32,
+ RegI32, RegI32, const Temps& temps) {
+ MOZ_CRASH("UNIMPLEMENTED ON RISCV64");
+}
+
+static void Deallocate(BaseCompiler*, RegI32, RegI32, const Temps&) {
+ MOZ_CRASH("UNIMPLEMENTED ON RISCV64");
+}
+
+#elif defined(JS_CODEGEN_NONE) || defined(JS_CODEGEN_WASM32)
+
+using Temps = Nothing;
+
+static void PopAndAllocate(BaseCompiler*, ValType, Scalar::Type, RegI32*,
+ RegI32*, RegI32*, Temps*) {}
+
+static void Perform(BaseCompiler*, const MemoryAccessDesc&, BaseIndex, RegI32,
+ RegI32, RegI32, const Temps& temps) {}
+
+static void Deallocate(BaseCompiler*, RegI32, RegI32, const Temps&) {}
+
+#endif
+
+} // namespace atomic_cmpxchg32
+
+template <typename RegIndexType>
+void BaseCompiler::atomicCmpXchg32(MemoryAccessDesc* access, ValType type) {
+ Scalar::Type viewType = access->type();
+ RegI32 rexpect, rnew, rd;
+ atomic_cmpxchg32::Temps temps;
+ atomic_cmpxchg32::PopAndAllocate(this, type, viewType, &rexpect, &rnew, &rd,
+ &temps);
+
+ AccessCheck check;
+ RegIndexType rp = popMemoryAccess<RegIndexType>(access, &check);
+ RegPtr instance = maybeLoadInstanceForAccess(check);
+
+ auto memaddr = prepareAtomicMemoryAccess(access, &check, instance, rp);
+ atomic_cmpxchg32::Perform(this, *access, memaddr, rexpect, rnew, rd, temps);
+
+#ifndef RABALDR_PIN_INSTANCE
+ maybeFree(instance);
+#endif
+ free(rp);
+ atomic_cmpxchg32::Deallocate(this, rexpect, rnew, temps);
+
+ if (type == ValType::I64) {
+ pushU32AsI64(rd);
+ } else {
+ pushI32(rd);
+ }
+}
+
+namespace atomic_cmpxchg64 {
+
+// The templates are needed for x86 code generation, which needs complicated
+// register allocation for memory64.
+
+template <typename RegIndexType>
+static void PopAndAllocate(BaseCompiler* bc, RegI64* rexpect, RegI64* rnew,
+ RegI64* rd);
+
+template <typename RegIndexType>
+static void Deallocate(BaseCompiler* bc, RegI64 rexpect, RegI64 rnew);
+
+#if defined(JS_CODEGEN_X64)
+
+template <typename RegIndexType>
+static void PopAndAllocate(BaseCompiler* bc, RegI64* rexpect, RegI64* rnew,
+ RegI64* rd) {
+ // For cmpxchg, the expected value and the result are both in rax.
+ bc->needI64(bc->specific_.rax);
+ *rnew = bc->popI64();
+ *rexpect = bc->popI64ToSpecific(bc->specific_.rax);
+ *rd = *rexpect;
+}
+
+static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access,
+ BaseIndex srcAddr, RegI64 rexpect, RegI64 rnew, RegI64 rd) {
+ bc->masm.wasmCompareExchange64(access, srcAddr, rexpect, rnew, rd);
+}
+
+template <typename RegIndexType>
+static void Deallocate(BaseCompiler* bc, RegI64 rexpect, RegI64 rnew) {
+ bc->freeI64(rnew);
+}
+
+#elif defined(JS_CODEGEN_X86)
+
+template <typename RegIndexType>
+static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access,
+ Address srcAddr, RegI64 rexpect, RegI64 rnew, RegI64 rd,
+ ScratchAtomicNoHeapReg& scratch);
+
+// Memory32: For cmpxchg8b, the expected value and the result are both in
+// edx:eax, and the replacement value is in ecx:ebx. But we can't allocate ebx
+// initially because we need it later for a scratch, so instead we allocate a
+// temp to hold the low word of 'new'.
+
+template <>
+void PopAndAllocate<RegI32>(BaseCompiler* bc, RegI64* rexpect, RegI64* rnew,
+ RegI64* rd) {
+ bc->needI64(bc->specific_.edx_eax);
+ bc->needI32(bc->specific_.ecx);
+ RegI32 tmp = bc->needI32();
+ *rnew = bc->popI64ToSpecific(RegI64(Register64(bc->specific_.ecx, tmp)));
+ *rexpect = bc->popI64ToSpecific(bc->specific_.edx_eax);
+ *rd = *rexpect;
+}
+
+template <>
+void Perform<RegI32>(BaseCompiler* bc, const MemoryAccessDesc& access,
+ Address srcAddr, RegI64 rexpect, RegI64 rnew, RegI64 rd,
+ ScratchAtomicNoHeapReg& scratch) {
+ MOZ_ASSERT(Register(scratch) == js::jit::ebx);
+ MOZ_ASSERT(rnew.high == bc->specific_.ecx);
+ bc->masm.move32(rnew.low, ebx);
+ bc->masm.wasmCompareExchange64(access, srcAddr, rexpect,
+ bc->specific_.ecx_ebx, rd);
+}
+
+template <>
+void Deallocate<RegI32>(BaseCompiler* bc, RegI64 rexpect, RegI64 rnew) {
+ bc->freeI64(rnew);
+}
+
+// Memory64: Register allocation is particularly hairy here. With memory64, we
+// have up to seven live values: i64 expected-value, i64 new-value, i64 pointer,
+// and instance. The instance can use the scratch but there's no avoiding that
+// we'll run out of registers.
+//
+// Unlike for the rmw ops, we can't use edx as the rnew.low since it's used
+// for the rexpect.high. And we can't push anything onto the stack while we're
+// popping the memory address because the memory address may be on the stack.
+
+# ifdef ENABLE_WASM_MEMORY64
+template <>
+void PopAndAllocate<RegI64>(BaseCompiler* bc, RegI64* rexpect, RegI64* rnew,
+ RegI64* rd) {
+ // We reserve these (and ebx). The 64-bit pointer will end up in esi+edi.
+ bc->needI32(bc->specific_.eax);
+ bc->needI32(bc->specific_.ecx);
+ bc->needI32(bc->specific_.edx);
+
+ // Pop the 'new' value and stash it in the instance scratch area. Do not
+ // initialize *rnew to anything.
+ RegI64 tmp(Register64(bc->specific_.ecx, bc->specific_.edx));
+ bc->popI64ToSpecific(tmp);
+ {
+ ScratchPtr instanceScratch(*bc);
+ bc->stashI64(instanceScratch, tmp);
+ }
+
+ *rexpect = bc->popI64ToSpecific(bc->specific_.edx_eax);
+ *rd = *rexpect;
+}
+
+template <>
+void Perform<RegI64>(BaseCompiler* bc, const MemoryAccessDesc& access,
+ Address srcAddr, RegI64 rexpect, RegI64 rnew, RegI64 rd,
+ ScratchAtomicNoHeapReg& scratch) {
+ MOZ_ASSERT(rnew.isInvalid());
+ rnew = bc->specific_.ecx_ebx;
+
+ bc->unstashI64(RegPtr(Register(bc->specific_.ecx)), rnew);
+ bc->masm.wasmCompareExchange64(access, srcAddr, rexpect, rnew, rd);
+}
+
+template <>
+void Deallocate<RegI64>(BaseCompiler* bc, RegI64 rexpect, RegI64 rnew) {
+ // edx:ebx have been pushed as the result, and the pointer was freed
+ // separately in the caller, so just free ecx.
+ bc->free(bc->specific_.ecx);
+}
+# endif
+
+#elif defined(JS_CODEGEN_ARM)
+
+template <typename RegIndexType>
+static void PopAndAllocate(BaseCompiler* bc, RegI64* rexpect, RegI64* rnew,
+ RegI64* rd) {
+ // The replacement value and the result must both be odd/even pairs.
+ *rnew = bc->popI64Pair();
+ *rexpect = bc->popI64();
+ *rd = bc->needI64Pair();
+}
+
+static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access,
+ BaseIndex srcAddr, RegI64 rexpect, RegI64 rnew, RegI64 rd) {
+ bc->masm.wasmCompareExchange64(access, srcAddr, rexpect, rnew, rd);
+}
+
+template <typename RegIndexType>
+static void Deallocate(BaseCompiler* bc, RegI64 rexpect, RegI64 rnew) {
+ bc->freeI64(rexpect);
+ bc->freeI64(rnew);
+}
+
+#elif defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_LOONG64)
+
+template <typename RegIndexType>
+static void PopAndAllocate(BaseCompiler* bc, RegI64* rexpect, RegI64* rnew,
+ RegI64* rd) {
+ *rnew = bc->popI64();
+ *rexpect = bc->popI64();
+ *rd = bc->needI64();
+}
+
+static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access,
+ BaseIndex srcAddr, RegI64 rexpect, RegI64 rnew, RegI64 rd) {
+ bc->masm.wasmCompareExchange64(access, srcAddr, rexpect, rnew, rd);
+}
+
+template <typename RegIndexType>
+static void Deallocate(BaseCompiler* bc, RegI64 rexpect, RegI64 rnew) {
+ bc->freeI64(rexpect);
+ bc->freeI64(rnew);
+}
+
+#elif defined(JS_CODEGEN_RISCV64)
+
+template <typename RegIndexType>
+static void PopAndAllocate(BaseCompiler* bc, RegI64* rexpect, RegI64* rnew,
+ RegI64* rd) {
+ MOZ_CRASH("UNIMPLEMENTED ON RISCV64");
+}
+static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access,
+ BaseIndex srcAddr, RegI64 rexpect, RegI64 rnew, RegI64 rd) {
+ MOZ_CRASH("UNIMPLEMENTED ON RISCV64");
+}
+template <typename RegIndexType>
+static void Deallocate(BaseCompiler* bc, RegI64 rexpect, RegI64 rnew) {
+ MOZ_CRASH("UNIMPLEMENTED ON RISCV64");
+}
+
+#elif defined(JS_CODEGEN_NONE) || defined(JS_CODEGEN_WASM32)
+
+template <typename RegIndexType>
+static void PopAndAllocate(BaseCompiler* bc, RegI64* rexpect, RegI64* rnew,
+ RegI64* rd) {}
+static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access,
+ BaseIndex srcAddr, RegI64 rexpect, RegI64 rnew, RegI64 rd) {
+}
+template <typename RegIndexType>
+static void Deallocate(BaseCompiler* bc, RegI64 rexpect, RegI64 rnew) {}
+
+#endif
+
+} // namespace atomic_cmpxchg64
+
+template <typename RegIndexType>
+void BaseCompiler::atomicCmpXchg64(MemoryAccessDesc* access, ValType type) {
+ RegI64 rexpect, rnew, rd;
+ atomic_cmpxchg64::PopAndAllocate<RegIndexType>(this, &rexpect, &rnew, &rd);
+
+ AccessCheck check;
+ RegIndexType rp = popMemoryAccess<RegIndexType>(access, &check);
+
+#ifdef WASM_HAS_HEAPREG
+ RegPtr instance = maybeLoadInstanceForAccess(check);
+ auto memaddr = prepareAtomicMemoryAccess(access, &check, instance, rp);
+ atomic_cmpxchg64::Perform(this, *access, memaddr, rexpect, rnew, rd);
+# ifndef RABALDR_PIN_INSTANCE
+ maybeFree(instance);
+# endif
+#else
+ ScratchAtomicNoHeapReg scratch(*this);
+ RegPtr instance =
+ maybeLoadInstanceForAccess(check, RegIntptrToRegPtr(scratch));
+ Address memaddr = prepareAtomicMemoryAccess(access, &check, instance, rp);
+ atomic_cmpxchg64::Perform<RegIndexType>(this, *access, memaddr, rexpect, rnew,
+ rd, scratch);
+ MOZ_ASSERT(instance == scratch);
+#endif
+
+ free(rp);
+ atomic_cmpxchg64::Deallocate<RegIndexType>(this, rexpect, rnew);
+
+ pushI64(rd);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Synchronization.
+
+bool BaseCompiler::atomicWait(ValType type, MemoryAccessDesc* access) {
+ switch (type.kind()) {
+ case ValType::I32: {
+ RegI64 timeout = popI64();
+ RegI32 val = popI32();
+
+ if (isMem32()) {
+ computeEffectiveAddress<RegI32>(access);
+ } else {
+#ifdef ENABLE_WASM_MEMORY64
+ computeEffectiveAddress<RegI64>(access);
+#else
+ MOZ_CRASH("Memory64 not enabled / supported on this platform");
+#endif
+ }
+
+ pushI32(val);
+ pushI64(timeout);
+
+ if (!emitInstanceCall(isMem32() ? SASigWaitI32M32 : SASigWaitI32M64)) {
+ return false;
+ }
+ break;
+ }
+ case ValType::I64: {
+ RegI64 timeout = popI64();
+ RegI64 val = popI64();
+
+ if (isMem32()) {
+ computeEffectiveAddress<RegI32>(access);
+ } else {
+#ifdef ENABLE_WASM_MEMORY64
+# ifdef JS_CODEGEN_X86
+ {
+ ScratchPtr scratch(*this);
+ stashI64(scratch, val);
+ freeI64(val);
+ }
+# endif
+ computeEffectiveAddress<RegI64>(access);
+# ifdef JS_CODEGEN_X86
+ {
+ ScratchPtr scratch(*this);
+ val = needI64();
+ unstashI64(scratch, val);
+ }
+# endif
+#else
+ MOZ_CRASH("Memory64 not enabled / supported on this platform");
+#endif
+ }
+
+ pushI64(val);
+ pushI64(timeout);
+
+ if (!emitInstanceCall(isMem32() ? SASigWaitI64M32 : SASigWaitI64M64)) {
+ return false;
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH();
+ }
+
+ return true;
+}
+
+bool BaseCompiler::atomicWake(MemoryAccessDesc* access) {
+ RegI32 count = popI32();
+
+ if (isMem32()) {
+ computeEffectiveAddress<RegI32>(access);
+ } else {
+#ifdef ENABLE_WASM_MEMORY64
+ computeEffectiveAddress<RegI64>(access);
+#else
+ MOZ_CRASH("Memory64 not enabled / supported on this platform");
+#endif
+ }
+
+ pushI32(count);
+ return emitInstanceCall(isMem32() ? SASigWakeM32 : SASigWakeM64);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Bulk memory.
+
+void BaseCompiler::memCopyInlineM32() {
+ MOZ_ASSERT(MaxInlineMemoryCopyLength != 0);
+
+ int32_t signedLength;
+ MOZ_ALWAYS_TRUE(popConst(&signedLength));
+ uint32_t length = signedLength;
+ MOZ_ASSERT(length != 0 && length <= MaxInlineMemoryCopyLength);
+
+ RegI32 src = popI32();
+ RegI32 dest = popI32();
+
+ // Compute the number of copies of each width we will need to do
+ size_t remainder = length;
+#ifdef ENABLE_WASM_SIMD
+ size_t numCopies16 = 0;
+ if (MacroAssembler::SupportsFastUnalignedFPAccesses()) {
+ numCopies16 = remainder / sizeof(V128);
+ remainder %= sizeof(V128);
+ }
+#endif
+#ifdef JS_64BIT
+ size_t numCopies8 = remainder / sizeof(uint64_t);
+ remainder %= sizeof(uint64_t);
+#endif
+ size_t numCopies4 = remainder / sizeof(uint32_t);
+ remainder %= sizeof(uint32_t);
+ size_t numCopies2 = remainder / sizeof(uint16_t);
+ remainder %= sizeof(uint16_t);
+ size_t numCopies1 = remainder;
+
+ // Load all source bytes onto the value stack from low to high using the
+ // widest transfer width we can for the system. We will trap without writing
+ // anything if any source byte is out-of-bounds.
+ bool omitBoundsCheck = false;
+ size_t offset = 0;
+
+#ifdef ENABLE_WASM_SIMD
+ for (uint32_t i = 0; i < numCopies16; i++) {
+ RegI32 temp = needI32();
+ moveI32(src, temp);
+ pushI32(temp);
+
+ MemoryAccessDesc access(Scalar::Simd128, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ loadCommon(&access, check, ValType::V128);
+
+ offset += sizeof(V128);
+ omitBoundsCheck = true;
+ }
+#endif
+
+#ifdef JS_64BIT
+ for (uint32_t i = 0; i < numCopies8; i++) {
+ RegI32 temp = needI32();
+ moveI32(src, temp);
+ pushI32(temp);
+
+ MemoryAccessDesc access(Scalar::Int64, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ loadCommon(&access, check, ValType::I64);
+
+ offset += sizeof(uint64_t);
+ omitBoundsCheck = true;
+ }
+#endif
+
+ for (uint32_t i = 0; i < numCopies4; i++) {
+ RegI32 temp = needI32();
+ moveI32(src, temp);
+ pushI32(temp);
+
+ MemoryAccessDesc access(Scalar::Uint32, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ loadCommon(&access, check, ValType::I32);
+
+ offset += sizeof(uint32_t);
+ omitBoundsCheck = true;
+ }
+
+ if (numCopies2) {
+ RegI32 temp = needI32();
+ moveI32(src, temp);
+ pushI32(temp);
+
+ MemoryAccessDesc access(Scalar::Uint16, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ loadCommon(&access, check, ValType::I32);
+
+ offset += sizeof(uint16_t);
+ omitBoundsCheck = true;
+ }
+
+ if (numCopies1) {
+ RegI32 temp = needI32();
+ moveI32(src, temp);
+ pushI32(temp);
+
+ MemoryAccessDesc access(Scalar::Uint8, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ loadCommon(&access, check, ValType::I32);
+ }
+
+ // Store all source bytes from the value stack to the destination from
+ // high to low. We will trap without writing anything on the first store
+ // if any dest byte is out-of-bounds.
+ offset = length;
+ omitBoundsCheck = false;
+
+ if (numCopies1) {
+ offset -= sizeof(uint8_t);
+
+ RegI32 value = popI32();
+ RegI32 temp = needI32();
+ moveI32(dest, temp);
+ pushI32(temp);
+ pushI32(value);
+
+ MemoryAccessDesc access(Scalar::Uint8, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ storeCommon(&access, check, ValType::I32);
+
+ omitBoundsCheck = true;
+ }
+
+ if (numCopies2) {
+ offset -= sizeof(uint16_t);
+
+ RegI32 value = popI32();
+ RegI32 temp = needI32();
+ moveI32(dest, temp);
+ pushI32(temp);
+ pushI32(value);
+
+ MemoryAccessDesc access(Scalar::Uint16, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ storeCommon(&access, check, ValType::I32);
+
+ omitBoundsCheck = true;
+ }
+
+ for (uint32_t i = 0; i < numCopies4; i++) {
+ offset -= sizeof(uint32_t);
+
+ RegI32 value = popI32();
+ RegI32 temp = needI32();
+ moveI32(dest, temp);
+ pushI32(temp);
+ pushI32(value);
+
+ MemoryAccessDesc access(Scalar::Uint32, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ storeCommon(&access, check, ValType::I32);
+
+ omitBoundsCheck = true;
+ }
+
+#ifdef JS_64BIT
+ for (uint32_t i = 0; i < numCopies8; i++) {
+ offset -= sizeof(uint64_t);
+
+ RegI64 value = popI64();
+ RegI32 temp = needI32();
+ moveI32(dest, temp);
+ pushI32(temp);
+ pushI64(value);
+
+ MemoryAccessDesc access(Scalar::Int64, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ storeCommon(&access, check, ValType::I64);
+
+ omitBoundsCheck = true;
+ }
+#endif
+
+#ifdef ENABLE_WASM_SIMD
+ for (uint32_t i = 0; i < numCopies16; i++) {
+ offset -= sizeof(V128);
+
+ RegV128 value = popV128();
+ RegI32 temp = needI32();
+ moveI32(dest, temp);
+ pushI32(temp);
+ pushV128(value);
+
+ MemoryAccessDesc access(Scalar::Simd128, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ storeCommon(&access, check, ValType::V128);
+
+ omitBoundsCheck = true;
+ }
+#endif
+
+ freeI32(dest);
+ freeI32(src);
+}
+
+void BaseCompiler::memFillInlineM32() {
+ MOZ_ASSERT(MaxInlineMemoryFillLength != 0);
+
+ int32_t signedLength;
+ int32_t signedValue;
+ MOZ_ALWAYS_TRUE(popConst(&signedLength));
+ MOZ_ALWAYS_TRUE(popConst(&signedValue));
+ uint32_t length = uint32_t(signedLength);
+ uint32_t value = uint32_t(signedValue);
+ MOZ_ASSERT(length != 0 && length <= MaxInlineMemoryFillLength);
+
+ RegI32 dest = popI32();
+
+ // Compute the number of copies of each width we will need to do
+ size_t remainder = length;
+#ifdef ENABLE_WASM_SIMD
+ size_t numCopies16 = 0;
+ if (MacroAssembler::SupportsFastUnalignedFPAccesses()) {
+ numCopies16 = remainder / sizeof(V128);
+ remainder %= sizeof(V128);
+ }
+#endif
+#ifdef JS_64BIT
+ size_t numCopies8 = remainder / sizeof(uint64_t);
+ remainder %= sizeof(uint64_t);
+#endif
+ size_t numCopies4 = remainder / sizeof(uint32_t);
+ remainder %= sizeof(uint32_t);
+ size_t numCopies2 = remainder / sizeof(uint16_t);
+ remainder %= sizeof(uint16_t);
+ size_t numCopies1 = remainder;
+
+ MOZ_ASSERT(numCopies2 <= 1 && numCopies1 <= 1);
+
+ // Generate splatted definitions for wider fills as needed
+#ifdef ENABLE_WASM_SIMD
+ V128 val16(value);
+#endif
+#ifdef JS_64BIT
+ uint64_t val8 = SplatByteToUInt<uint64_t>(value, 8);
+#endif
+ uint32_t val4 = SplatByteToUInt<uint32_t>(value, 4);
+ uint32_t val2 = SplatByteToUInt<uint32_t>(value, 2);
+ uint32_t val1 = value;
+
+ // Store the fill value to the destination from high to low. We will trap
+ // without writing anything on the first store if any dest byte is
+ // out-of-bounds.
+ size_t offset = length;
+ bool omitBoundsCheck = false;
+
+ if (numCopies1) {
+ offset -= sizeof(uint8_t);
+
+ RegI32 temp = needI32();
+ moveI32(dest, temp);
+ pushI32(temp);
+ pushI32(val1);
+
+ MemoryAccessDesc access(Scalar::Uint8, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ storeCommon(&access, check, ValType::I32);
+
+ omitBoundsCheck = true;
+ }
+
+ if (numCopies2) {
+ offset -= sizeof(uint16_t);
+
+ RegI32 temp = needI32();
+ moveI32(dest, temp);
+ pushI32(temp);
+ pushI32(val2);
+
+ MemoryAccessDesc access(Scalar::Uint16, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ storeCommon(&access, check, ValType::I32);
+
+ omitBoundsCheck = true;
+ }
+
+ for (uint32_t i = 0; i < numCopies4; i++) {
+ offset -= sizeof(uint32_t);
+
+ RegI32 temp = needI32();
+ moveI32(dest, temp);
+ pushI32(temp);
+ pushI32(val4);
+
+ MemoryAccessDesc access(Scalar::Uint32, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ storeCommon(&access, check, ValType::I32);
+
+ omitBoundsCheck = true;
+ }
+
+#ifdef JS_64BIT
+ for (uint32_t i = 0; i < numCopies8; i++) {
+ offset -= sizeof(uint64_t);
+
+ RegI32 temp = needI32();
+ moveI32(dest, temp);
+ pushI32(temp);
+ pushI64(val8);
+
+ MemoryAccessDesc access(Scalar::Int64, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ storeCommon(&access, check, ValType::I64);
+
+ omitBoundsCheck = true;
+ }
+#endif
+
+#ifdef ENABLE_WASM_SIMD
+ for (uint32_t i = 0; i < numCopies16; i++) {
+ offset -= sizeof(V128);
+
+ RegI32 temp = needI32();
+ moveI32(dest, temp);
+ pushI32(temp);
+ pushV128(val16);
+
+ MemoryAccessDesc access(Scalar::Simd128, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ storeCommon(&access, check, ValType::V128);
+
+ omitBoundsCheck = true;
+ }
+#endif
+
+ freeI32(dest);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// SIMD and Relaxed SIMD.
+
+#ifdef ENABLE_WASM_SIMD
+void BaseCompiler::loadSplat(MemoryAccessDesc* access) {
+ // We can implement loadSplat mostly as load + splat because the push of the
+ // result onto the value stack in loadCommon normally will not generate any
+ // code, it will leave the value in a register which we will consume.
+
+ // We use uint types when we can on the general assumption that unsigned loads
+ // might be smaller/faster on some platforms, because no sign extension needs
+ // to be done after the sub-register load.
+ RegV128 rd = needV128();
+ switch (access->type()) {
+ case Scalar::Uint8: {
+ loadCommon(access, AccessCheck(), ValType::I32);
+ RegI32 rs = popI32();
+ masm.splatX16(rs, rd);
+ free(rs);
+ break;
+ }
+ case Scalar::Uint16: {
+ loadCommon(access, AccessCheck(), ValType::I32);
+ RegI32 rs = popI32();
+ masm.splatX8(rs, rd);
+ free(rs);
+ break;
+ }
+ case Scalar::Uint32: {
+ loadCommon(access, AccessCheck(), ValType::I32);
+ RegI32 rs = popI32();
+ masm.splatX4(rs, rd);
+ free(rs);
+ break;
+ }
+ case Scalar::Int64: {
+ loadCommon(access, AccessCheck(), ValType::I64);
+ RegI64 rs = popI64();
+ masm.splatX2(rs, rd);
+ free(rs);
+ break;
+ }
+ default:
+ MOZ_CRASH();
+ }
+ pushV128(rd);
+}
+
+void BaseCompiler::loadZero(MemoryAccessDesc* access) {
+ access->setZeroExtendSimd128Load();
+ loadCommon(access, AccessCheck(), ValType::V128);
+}
+
+void BaseCompiler::loadExtend(MemoryAccessDesc* access, Scalar::Type viewType) {
+ loadCommon(access, AccessCheck(), ValType::I64);
+
+ RegI64 rs = popI64();
+ RegV128 rd = needV128();
+ masm.moveGPR64ToDouble(rs, rd);
+ switch (viewType) {
+ case Scalar::Int8:
+ masm.widenLowInt8x16(rd, rd);
+ break;
+ case Scalar::Uint8:
+ masm.unsignedWidenLowInt8x16(rd, rd);
+ break;
+ case Scalar::Int16:
+ masm.widenLowInt16x8(rd, rd);
+ break;
+ case Scalar::Uint16:
+ masm.unsignedWidenLowInt16x8(rd, rd);
+ break;
+ case Scalar::Int32:
+ masm.widenLowInt32x4(rd, rd);
+ break;
+ case Scalar::Uint32:
+ masm.unsignedWidenLowInt32x4(rd, rd);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ freeI64(rs);
+ pushV128(rd);
+}
+
+void BaseCompiler::loadLane(MemoryAccessDesc* access, uint32_t laneIndex) {
+ ValType type = access->type() == Scalar::Int64 ? ValType::I64 : ValType::I32;
+
+ RegV128 rsd = popV128();
+ loadCommon(access, AccessCheck(), type);
+
+ if (type == ValType::I32) {
+ RegI32 rs = popI32();
+ switch (access->type()) {
+ case Scalar::Uint8:
+ masm.replaceLaneInt8x16(laneIndex, rs, rsd);
+ break;
+ case Scalar::Uint16:
+ masm.replaceLaneInt16x8(laneIndex, rs, rsd);
+ break;
+ case Scalar::Int32:
+ masm.replaceLaneInt32x4(laneIndex, rs, rsd);
+ break;
+ default:
+ MOZ_CRASH("unsupported access type");
+ }
+ freeI32(rs);
+ } else {
+ MOZ_ASSERT(type == ValType::I64);
+ RegI64 rs = popI64();
+ masm.replaceLaneInt64x2(laneIndex, rs, rsd);
+ freeI64(rs);
+ }
+
+ pushV128(rsd);
+}
+
+void BaseCompiler::storeLane(MemoryAccessDesc* access, uint32_t laneIndex) {
+ ValType type = access->type() == Scalar::Int64 ? ValType::I64 : ValType::I32;
+
+ RegV128 rs = popV128();
+ if (type == ValType::I32) {
+ RegI32 tmp = needI32();
+ switch (access->type()) {
+ case Scalar::Uint8:
+ masm.extractLaneInt8x16(laneIndex, rs, tmp);
+ break;
+ case Scalar::Uint16:
+ masm.extractLaneInt16x8(laneIndex, rs, tmp);
+ break;
+ case Scalar::Int32:
+ masm.extractLaneInt32x4(laneIndex, rs, tmp);
+ break;
+ default:
+ MOZ_CRASH("unsupported laneSize");
+ }
+ pushI32(tmp);
+ } else {
+ MOZ_ASSERT(type == ValType::I64);
+ RegI64 tmp = needI64();
+ masm.extractLaneInt64x2(laneIndex, rs, tmp);
+ pushI64(tmp);
+ }
+ freeV128(rs);
+
+ storeCommon(access, AccessCheck(), type);
+}
+#endif // ENABLE_WASM_SIMD
+
+} // namespace wasm
+} // namespace js
diff --git a/js/src/wasm/WasmBCRegDefs-inl.h b/js/src/wasm/WasmBCRegDefs-inl.h
new file mode 100644
index 0000000000..1531152f03
--- /dev/null
+++ b/js/src/wasm/WasmBCRegDefs-inl.h
@@ -0,0 +1,180 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This is an INTERNAL header for Wasm baseline compiler: inline BaseCompiler
+// methods that don't fit in any other group in particular.
+
+#ifndef wasm_wasm_baseline_reg_defs_inl_h
+#define wasm_wasm_baseline_reg_defs_inl_h
+
+namespace js {
+namespace wasm {
+// TODO / OPTIMIZE (Bug 1316802): Do not sync everything on allocation
+// failure, only as much as we need.
+
+RegI32 BaseRegAlloc::needI32() {
+ if (!hasGPR()) {
+ bc->sync();
+ }
+ return RegI32(allocGPR());
+}
+
+void BaseRegAlloc::needI32(RegI32 specific) {
+ if (!isAvailableI32(specific)) {
+ bc->sync();
+ }
+ allocGPR(specific);
+}
+
+RegI64 BaseRegAlloc::needI64() {
+ if (!hasGPR64()) {
+ bc->sync();
+ }
+ return RegI64(allocInt64());
+}
+
+void BaseRegAlloc::needI64(RegI64 specific) {
+ if (!isAvailableI64(specific)) {
+ bc->sync();
+ }
+ allocInt64(specific);
+}
+
+RegRef BaseRegAlloc::needRef() {
+ if (!hasGPR()) {
+ bc->sync();
+ }
+ return RegRef(allocGPR());
+}
+
+void BaseRegAlloc::needRef(RegRef specific) {
+ if (!isAvailableRef(specific)) {
+ bc->sync();
+ }
+ allocGPR(specific);
+}
+
+RegPtr BaseRegAlloc::needPtr() {
+ if (!hasGPR()) {
+ bc->sync();
+ }
+ return RegPtr(allocGPR());
+}
+
+void BaseRegAlloc::needPtr(RegPtr specific) {
+ if (!isAvailablePtr(specific)) {
+ bc->sync();
+ }
+ allocGPR(specific);
+}
+
+RegPtr BaseRegAlloc::needTempPtr(RegPtr fallback, bool* saved) {
+ if (hasGPR()) {
+ *saved = false;
+ return RegPtr(allocGPR());
+ }
+ *saved = true;
+ bc->saveTempPtr(fallback);
+ MOZ_ASSERT(isAvailablePtr(fallback));
+ allocGPR(fallback);
+ return RegPtr(fallback);
+}
+
+RegF32 BaseRegAlloc::needF32() {
+ if (!hasFPU<MIRType::Float32>()) {
+ bc->sync();
+ }
+ return RegF32(allocFPU<MIRType::Float32>());
+}
+
+void BaseRegAlloc::needF32(RegF32 specific) {
+ if (!isAvailableF32(specific)) {
+ bc->sync();
+ }
+ allocFPU(specific);
+}
+
+RegF64 BaseRegAlloc::needF64() {
+ if (!hasFPU<MIRType::Double>()) {
+ bc->sync();
+ }
+ return RegF64(allocFPU<MIRType::Double>());
+}
+
+void BaseRegAlloc::needF64(RegF64 specific) {
+ if (!isAvailableF64(specific)) {
+ bc->sync();
+ }
+ allocFPU(specific);
+}
+
+#ifdef ENABLE_WASM_SIMD
+RegV128 BaseRegAlloc::needV128() {
+ if (!hasFPU<MIRType::Simd128>()) {
+ bc->sync();
+ }
+ return RegV128(allocFPU<MIRType::Simd128>());
+}
+
+void BaseRegAlloc::needV128(RegV128 specific) {
+ if (!isAvailableV128(specific)) {
+ bc->sync();
+ }
+ allocFPU(specific);
+}
+#endif
+
+void BaseRegAlloc::freeI32(RegI32 r) { freeGPR(r); }
+
+void BaseRegAlloc::freeI64(RegI64 r) { freeInt64(r); }
+
+void BaseRegAlloc::freeRef(RegRef r) { freeGPR(r); }
+
+void BaseRegAlloc::freePtr(RegPtr r) { freeGPR(r); }
+
+void BaseRegAlloc::freeF64(RegF64 r) { freeFPU(r); }
+
+void BaseRegAlloc::freeF32(RegF32 r) { freeFPU(r); }
+
+#ifdef ENABLE_WASM_SIMD
+void BaseRegAlloc::freeV128(RegV128 r) { freeFPU(r); }
+#endif
+
+void BaseRegAlloc::freeTempPtr(RegPtr r, bool saved) {
+ freePtr(r);
+ if (saved) {
+ bc->restoreTempPtr(r);
+ MOZ_ASSERT(!isAvailablePtr(r));
+ }
+}
+
+#ifdef JS_CODEGEN_ARM
+RegI64 BaseRegAlloc::needI64Pair() {
+ if (!hasGPRPair()) {
+ bc->sync();
+ }
+ Register low, high;
+ allocGPRPair(&low, &high);
+ return RegI64(Register64(high, low));
+}
+#endif
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_wasm_baseline_reg_defs_inl_h
diff --git a/js/src/wasm/WasmBCRegDefs.h b/js/src/wasm/WasmBCRegDefs.h
new file mode 100644
index 0000000000..621ee7c6a9
--- /dev/null
+++ b/js/src/wasm/WasmBCRegDefs.h
@@ -0,0 +1,852 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This is an INTERNAL header for Wasm baseline compiler: definitions of
+// registers and the register allocator.
+
+#ifndef wasm_wasm_baseline_regdefs_h
+#define wasm_wasm_baseline_regdefs_h
+
+#include "wasm/WasmBCDefs.h"
+
+namespace js {
+namespace wasm {
+
+struct BaseCompiler;
+
+using namespace js::jit;
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Scratch register configuration.
+
+#if defined(JS_CODEGEN_NONE) || defined(JS_CODEGEN_WASM32)
+# define RABALDR_SCRATCH_I32
+# define RABALDR_SCRATCH_F32
+# define RABALDR_SCRATCH_F64
+
+static constexpr Register RabaldrScratchI32 = Register::Invalid();
+static constexpr FloatRegister RabaldrScratchF32 = InvalidFloatReg;
+static constexpr FloatRegister RabaldrScratchF64 = InvalidFloatReg;
+#endif
+
+#ifdef JS_CODEGEN_ARM64
+# define RABALDR_SCRATCH_I32
+# define RABALDR_SCRATCH_F32
+# define RABALDR_SCRATCH_F64
+# define RABALDR_SCRATCH_V128
+# define RABALDR_SCRATCH_F32_ALIASES_F64
+
+static constexpr Register RabaldrScratchI32{Registers::x15};
+
+// Note, the float scratch regs cannot be registers that are used for parameter
+// passing in any ABI we use. Argregs tend to be low-numbered; register 30
+// should be safe.
+
+static constexpr FloatRegister RabaldrScratchF32{FloatRegisters::s30,
+ FloatRegisters::Single};
+static constexpr FloatRegister RabaldrScratchF64{FloatRegisters::d30,
+ FloatRegisters::Double};
+# ifdef ENABLE_WASM_SIMD
+static constexpr FloatRegister RabaldrScratchV128{FloatRegisters::d30,
+ FloatRegisters::Simd128};
+# endif
+
+static_assert(RabaldrScratchF32 != ScratchFloat32Reg_, "Too busy");
+static_assert(RabaldrScratchF64 != ScratchDoubleReg_, "Too busy");
+# ifdef ENABLE_WASM_SIMD
+static_assert(RabaldrScratchV128 != ScratchSimd128Reg, "Too busy");
+# endif
+#endif
+
+#ifdef JS_CODEGEN_X86
+// The selection of EBX here steps gingerly around: the need for EDX
+// to be allocatable for multiply/divide; ECX to be allocatable for
+// shift/rotate; EAX (= ReturnReg) to be allocatable as the result
+// register; EBX not being one of the WasmTableCall registers; and
+// needing a temp register for load/store that has a single-byte
+// persona.
+//
+// The compiler assumes that RabaldrScratchI32 has a single-byte
+// persona. Code for 8-byte atomic operations assumes that
+// RabaldrScratchI32 is in fact ebx.
+
+# define RABALDR_SCRATCH_I32
+static constexpr Register RabaldrScratchI32 = ebx;
+#endif
+
+#ifdef JS_CODEGEN_ARM
+// We use our own scratch register, because the macro assembler uses
+// the regular scratch register(s) pretty liberally. We could
+// work around that in several cases but the mess does not seem
+// worth it yet. CallTempReg2 seems safe.
+
+# define RABALDR_SCRATCH_I32
+static constexpr Register RabaldrScratchI32 = CallTempReg2;
+#endif
+
+#ifdef JS_CODEGEN_MIPS64
+# define RABALDR_SCRATCH_I32
+static constexpr Register RabaldrScratchI32 = CallTempReg2;
+#endif
+
+#ifdef JS_CODEGEN_LOONG64
+// We use our own scratch register, because the macro assembler uses
+// the regular scratch register(s) pretty liberally. We could
+// work around that in several cases but the mess does not seem
+// worth it yet. CallTempReg2 seems safe.
+
+# define RABALDR_SCRATCH_I32
+static constexpr Register RabaldrScratchI32 = CallTempReg2;
+#endif
+
+#ifdef RABALDR_SCRATCH_F32_ALIASES_F64
+# if !defined(RABALDR_SCRATCH_F32) || !defined(RABALDR_SCRATCH_F64)
+# error "Bad configuration"
+# endif
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// ...
+
+template <MIRType t>
+struct RegTypeOf {
+#ifdef ENABLE_WASM_SIMD
+ static_assert(t == MIRType::Float32 || t == MIRType::Double ||
+ t == MIRType::Simd128,
+ "Float mask type");
+#else
+ static_assert(t == MIRType::Float32 || t == MIRType::Double,
+ "Float mask type");
+#endif
+};
+
+template <>
+struct RegTypeOf<MIRType::Float32> {
+ static constexpr RegTypeName value = RegTypeName::Float32;
+};
+template <>
+struct RegTypeOf<MIRType::Double> {
+ static constexpr RegTypeName value = RegTypeName::Float64;
+};
+#ifdef ENABLE_WASM_SIMD
+template <>
+struct RegTypeOf<MIRType::Simd128> {
+ static constexpr RegTypeName value = RegTypeName::Vector128;
+};
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Strongly typed register wrappers.
+
+// The strongly typed register wrappers are especially useful to distinguish
+// float registers from double registers, but they also clearly distinguish
+// 32-bit registers from 64-bit register pairs on 32-bit systems.
+
+struct RegI32 : public Register {
+ RegI32() : Register(Register::Invalid()) {}
+ explicit RegI32(Register reg) : Register(reg) {
+ MOZ_ASSERT(reg != Invalid());
+ }
+ bool isInvalid() const { return *this == Invalid(); }
+ bool isValid() const { return !isInvalid(); }
+ static RegI32 Invalid() { return RegI32(); }
+};
+
+struct RegI64 : public Register64 {
+ RegI64() : Register64(Register64::Invalid()) {}
+ explicit RegI64(Register64 reg) : Register64(reg) {
+ MOZ_ASSERT(reg != Invalid());
+ }
+ bool isInvalid() const { return *this == Invalid(); }
+ bool isValid() const { return !isInvalid(); }
+ static RegI64 Invalid() { return RegI64(); }
+};
+
+// RegRef is for GC-pointers, for non GC-pointers use RegPtr
+struct RegRef : public Register {
+ RegRef() : Register(Register::Invalid()) {}
+ explicit RegRef(Register reg) : Register(reg) {
+ MOZ_ASSERT(reg != Invalid());
+ }
+ bool isInvalid() const { return *this == Invalid(); }
+ bool isValid() const { return !isInvalid(); }
+ static RegRef Invalid() { return RegRef(); }
+};
+
+// RegPtr is for non GC-pointers, for GC-pointers use RegRef
+struct RegPtr : public Register {
+ RegPtr() : Register(Register::Invalid()) {}
+ explicit RegPtr(Register reg) : Register(reg) {
+ MOZ_ASSERT(reg != Invalid());
+ }
+ bool isInvalid() const { return *this == Invalid(); }
+ bool isValid() const { return !isInvalid(); }
+ static RegPtr Invalid() { return RegPtr(); }
+};
+
+struct RegF32 : public FloatRegister {
+ RegF32() : FloatRegister() {}
+ explicit RegF32(FloatRegister reg) : FloatRegister(reg) {
+ MOZ_ASSERT(isSingle());
+ }
+ bool isValid() const { return !isInvalid(); }
+ static RegF32 Invalid() { return RegF32(); }
+};
+
+struct RegF64 : public FloatRegister {
+ RegF64() : FloatRegister() {}
+ explicit RegF64(FloatRegister reg) : FloatRegister(reg) {
+ MOZ_ASSERT(isDouble());
+ }
+ bool isValid() const { return !isInvalid(); }
+ static RegF64 Invalid() { return RegF64(); }
+};
+
+#ifdef ENABLE_WASM_SIMD
+struct RegV128 : public FloatRegister {
+ RegV128() : FloatRegister() {}
+ explicit RegV128(FloatRegister reg) : FloatRegister(reg) {
+ MOZ_ASSERT(isSimd128());
+ }
+ bool isValid() const { return !isInvalid(); }
+ static RegV128 Invalid() { return RegV128(); }
+};
+#endif
+
+struct AnyReg {
+ union {
+ RegI32 i32_;
+ RegI64 i64_;
+ RegRef ref_;
+ RegF32 f32_;
+ RegF64 f64_;
+#ifdef ENABLE_WASM_SIMD
+ RegV128 v128_;
+#endif
+ };
+
+ enum {
+ I32,
+ I64,
+ REF,
+ F32,
+ F64,
+#ifdef ENABLE_WASM_SIMD
+ V128
+#endif
+ } tag;
+
+ explicit AnyReg(RegI32 r) {
+ tag = I32;
+ i32_ = r;
+ }
+ explicit AnyReg(RegI64 r) {
+ tag = I64;
+ i64_ = r;
+ }
+ explicit AnyReg(RegF32 r) {
+ tag = F32;
+ f32_ = r;
+ }
+ explicit AnyReg(RegF64 r) {
+ tag = F64;
+ f64_ = r;
+ }
+#ifdef ENABLE_WASM_SIMD
+ explicit AnyReg(RegV128 r) {
+ tag = V128;
+ v128_ = r;
+ }
+#endif
+ explicit AnyReg(RegRef r) {
+ tag = REF;
+ ref_ = r;
+ }
+
+ RegI32 i32() const {
+ MOZ_ASSERT(tag == I32);
+ return i32_;
+ }
+ RegI64 i64() const {
+ MOZ_ASSERT(tag == I64);
+ return i64_;
+ }
+ RegF32 f32() const {
+ MOZ_ASSERT(tag == F32);
+ return f32_;
+ }
+ RegF64 f64() const {
+ MOZ_ASSERT(tag == F64);
+ return f64_;
+ }
+#ifdef ENABLE_WASM_SIMD
+ RegV128 v128() const {
+ MOZ_ASSERT(tag == V128);
+ return v128_;
+ }
+#endif
+ RegRef ref() const {
+ MOZ_ASSERT(tag == REF);
+ return ref_;
+ }
+
+ AnyRegister any() const {
+ switch (tag) {
+ case F32:
+ return AnyRegister(f32_);
+ case F64:
+ return AnyRegister(f64_);
+#ifdef ENABLE_WASM_SIMD
+ case V128:
+ return AnyRegister(v128_);
+#endif
+ case I32:
+ return AnyRegister(i32_);
+ case I64:
+#ifdef JS_PUNBOX64
+ return AnyRegister(i64_.reg);
+#else
+ // The compiler is written so that this is never needed: any() is
+ // called on arbitrary registers for asm.js but asm.js does not have
+ // 64-bit ints. For wasm, any() is called on arbitrary registers
+ // only on 64-bit platforms.
+ MOZ_CRASH("AnyReg::any() on 32-bit platform");
+#endif
+ case REF:
+ MOZ_CRASH("AnyReg::any() not implemented for ref types");
+ default:
+ MOZ_CRASH();
+ }
+ // Work around GCC 5 analysis/warning bug.
+ MOZ_CRASH("AnyReg::any(): impossible case");
+ }
+};
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Platform-specific registers.
+//
+// All platforms must define struct SpecificRegs. All 32-bit platforms must
+// have an abiReturnRegI64 member in that struct.
+
+#if defined(JS_CODEGEN_X64)
+struct SpecificRegs {
+ RegI32 eax, ecx, edx, edi, esi;
+ RegI64 rax, rcx, rdx;
+
+ SpecificRegs()
+ : eax(RegI32(js::jit::eax)),
+ ecx(RegI32(js::jit::ecx)),
+ edx(RegI32(js::jit::edx)),
+ edi(RegI32(js::jit::edi)),
+ esi(RegI32(js::jit::esi)),
+ rax(RegI64(Register64(js::jit::rax))),
+ rcx(RegI64(Register64(js::jit::rcx))),
+ rdx(RegI64(Register64(js::jit::rdx))) {}
+};
+#elif defined(JS_CODEGEN_X86)
+struct SpecificRegs {
+ RegI32 eax, ecx, edx, edi, esi;
+ RegI64 ecx_ebx, edx_eax, abiReturnRegI64;
+
+ SpecificRegs()
+ : eax(RegI32(js::jit::eax)),
+ ecx(RegI32(js::jit::ecx)),
+ edx(RegI32(js::jit::edx)),
+ edi(RegI32(js::jit::edi)),
+ esi(RegI32(js::jit::esi)),
+ ecx_ebx(RegI64(Register64(js::jit::ecx, js::jit::ebx))),
+ edx_eax(RegI64(Register64(js::jit::edx, js::jit::eax))),
+ abiReturnRegI64(edx_eax) {}
+};
+#elif defined(JS_CODEGEN_ARM)
+struct SpecificRegs {
+ RegI64 abiReturnRegI64;
+
+ SpecificRegs() : abiReturnRegI64(ReturnReg64) {}
+};
+#elif defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_LOONG64)
+struct SpecificRegs {
+ // Required by gcc.
+ SpecificRegs() {}
+};
+#else
+struct SpecificRegs {
+# ifndef JS_64BIT
+ RegI64 abiReturnRegI64;
+# endif
+
+ SpecificRegs() { MOZ_CRASH("BaseCompiler porting interface: SpecificRegs"); }
+};
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Register allocator.
+
+class BaseRegAlloc {
+ // Notes on float register allocation.
+ //
+ // The general rule in SpiderMonkey is that float registers can alias double
+ // registers, but there are predicates to handle exceptions to that rule:
+ // hasUnaliasedDouble() and hasMultiAlias(). The way aliasing actually
+ // works is platform dependent and exposed through the aliased(n, &r)
+ // predicate, etc.
+ //
+ // - hasUnaliasedDouble(): on ARM VFPv3-D32 there are double registers that
+ // cannot be treated as float.
+ // - hasMultiAlias(): on ARM and MIPS a double register aliases two float
+ // registers.
+ //
+ // On some platforms (x86, x64, ARM64) but not all (ARM)
+ // ScratchFloat32Register is the same as ScratchDoubleRegister.
+ //
+ // It's a basic invariant of the AllocatableRegisterSet that it deals
+ // properly with aliasing of registers: if s0 or s1 are allocated then d0 is
+ // not allocatable; if s0 and s1 are freed individually then d0 becomes
+ // allocatable.
+
+ BaseCompiler* bc;
+ AllocatableGeneralRegisterSet availGPR;
+ AllocatableFloatRegisterSet availFPU;
+#ifdef DEBUG
+ // The registers available after removing ScratchReg, HeapReg, etc.
+ AllocatableGeneralRegisterSet allGPR;
+ AllocatableFloatRegisterSet allFPU;
+ uint32_t scratchTaken;
+#endif
+#ifdef JS_CODEGEN_X86
+ AllocatableGeneralRegisterSet singleByteRegs;
+#endif
+
+ bool hasGPR() { return !availGPR.empty(); }
+
+ bool hasGPR64() {
+#ifdef JS_PUNBOX64
+ return !availGPR.empty();
+#else
+ if (availGPR.empty()) {
+ return false;
+ }
+ Register r = allocGPR();
+ bool available = !availGPR.empty();
+ freeGPR(r);
+ return available;
+#endif
+ }
+
+ template <MIRType t>
+ bool hasFPU() {
+ return availFPU.hasAny<RegTypeOf<t>::value>();
+ }
+
+ bool isAvailableGPR(Register r) { return availGPR.has(r); }
+
+ bool isAvailableFPU(FloatRegister r) { return availFPU.has(r); }
+
+ void allocGPR(Register r) {
+ MOZ_ASSERT(isAvailableGPR(r));
+ availGPR.take(r);
+ }
+
+ Register allocGPR() {
+ MOZ_ASSERT(hasGPR());
+ return availGPR.takeAny();
+ }
+
+ void allocInt64(Register64 r) {
+#ifdef JS_PUNBOX64
+ allocGPR(r.reg);
+#else
+ allocGPR(r.low);
+ allocGPR(r.high);
+#endif
+ }
+
+ Register64 allocInt64() {
+ MOZ_ASSERT(hasGPR64());
+#ifdef JS_PUNBOX64
+ return Register64(availGPR.takeAny());
+#else
+ Register high = availGPR.takeAny();
+ Register low = availGPR.takeAny();
+ return Register64(high, low);
+#endif
+ }
+
+#ifdef JS_CODEGEN_ARM
+ // r12 is normally the ScratchRegister and r13 is always the stack pointer,
+ // so the highest possible pair has r10 as the even-numbered register.
+
+ static constexpr uint32_t PAIR_LIMIT = 10;
+
+ bool hasGPRPair() {
+ for (uint32_t i = 0; i <= PAIR_LIMIT; i += 2) {
+ if (isAvailableGPR(Register::FromCode(i)) &&
+ isAvailableGPR(Register::FromCode(i + 1))) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void allocGPRPair(Register* low, Register* high) {
+ MOZ_ASSERT(hasGPRPair());
+ for (uint32_t i = 0; i <= PAIR_LIMIT; i += 2) {
+ if (isAvailableGPR(Register::FromCode(i)) &&
+ isAvailableGPR(Register::FromCode(i + 1))) {
+ *low = Register::FromCode(i);
+ *high = Register::FromCode(i + 1);
+ allocGPR(*low);
+ allocGPR(*high);
+ return;
+ }
+ }
+ MOZ_CRASH("No pair");
+ }
+#endif
+
+ void allocFPU(FloatRegister r) {
+ MOZ_ASSERT(isAvailableFPU(r));
+ availFPU.take(r);
+ }
+
+ template <MIRType t>
+ FloatRegister allocFPU() {
+ return availFPU.takeAny<RegTypeOf<t>::value>();
+ }
+
+ void freeGPR(Register r) { availGPR.add(r); }
+
+ void freeInt64(Register64 r) {
+#ifdef JS_PUNBOX64
+ freeGPR(r.reg);
+#else
+ freeGPR(r.low);
+ freeGPR(r.high);
+#endif
+ }
+
+ void freeFPU(FloatRegister r) { availFPU.add(r); }
+
+ public:
+ explicit BaseRegAlloc()
+ : bc(nullptr),
+ availGPR(GeneralRegisterSet::All()),
+ availFPU(FloatRegisterSet::All())
+#ifdef DEBUG
+ ,
+ scratchTaken(0)
+#endif
+#ifdef JS_CODEGEN_X86
+ ,
+ singleByteRegs(GeneralRegisterSet(Registers::SingleByteRegs))
+#endif
+ {
+ RegisterAllocator::takeWasmRegisters(availGPR);
+
+#ifdef RABALDR_PIN_INSTANCE
+ // If the InstanceReg is pinned then it is never available for
+ // allocation.
+ availGPR.take(InstanceReg);
+#endif
+
+ // Allocate any private scratch registers.
+#if defined(RABALDR_SCRATCH_I32)
+ if (RabaldrScratchI32 != RegI32::Invalid()) {
+ availGPR.take(RabaldrScratchI32);
+ }
+#endif
+
+#ifdef RABALDR_SCRATCH_F32_ALIASES_F64
+ static_assert(RabaldrScratchF32 != InvalidFloatReg, "Float reg definition");
+ static_assert(RabaldrScratchF64 != InvalidFloatReg, "Float reg definition");
+#endif
+
+#if defined(RABALDR_SCRATCH_F32) && !defined(RABALDR_SCRATCH_F32_ALIASES_F64)
+ if (RabaldrScratchF32 != RegF32::Invalid()) {
+ availFPU.take(RabaldrScratchF32);
+ }
+#endif
+
+#if defined(RABALDR_SCRATCH_F64)
+# ifdef RABALDR_SCRATCH_F32_ALIASES_F64
+ MOZ_ASSERT(availFPU.has(RabaldrScratchF32));
+# endif
+ if (RabaldrScratchF64 != RegF64::Invalid()) {
+ availFPU.take(RabaldrScratchF64);
+ }
+# ifdef RABALDR_SCRATCH_F32_ALIASES_F64
+ MOZ_ASSERT(!availFPU.has(RabaldrScratchF32));
+# endif
+#endif
+
+#ifdef DEBUG
+ allGPR = availGPR;
+ allFPU = availFPU;
+#endif
+ }
+
+ void init(BaseCompiler* bc) { this->bc = bc; }
+
+ enum class ScratchKind { I32 = 1, F32 = 2, F64 = 4, V128 = 8 };
+
+#ifdef DEBUG
+ bool isScratchRegisterTaken(ScratchKind s) const {
+ return (scratchTaken & uint32_t(s)) != 0;
+ }
+
+ void setScratchRegisterTaken(ScratchKind s, bool state) {
+ if (state) {
+ scratchTaken |= uint32_t(s);
+ } else {
+ scratchTaken &= ~uint32_t(s);
+ }
+ }
+#endif
+
+#ifdef JS_CODEGEN_X86
+ bool isSingleByteI32(Register r) { return singleByteRegs.has(r); }
+#endif
+
+ bool isAvailableI32(RegI32 r) { return isAvailableGPR(r); }
+
+ bool isAvailableI64(RegI64 r) {
+#ifdef JS_PUNBOX64
+ return isAvailableGPR(r.reg);
+#else
+ return isAvailableGPR(r.low) && isAvailableGPR(r.high);
+#endif
+ }
+
+ bool isAvailableRef(RegRef r) { return isAvailableGPR(r); }
+
+ bool isAvailablePtr(RegPtr r) { return isAvailableGPR(r); }
+
+ bool isAvailableF32(RegF32 r) { return isAvailableFPU(r); }
+
+ bool isAvailableF64(RegF64 r) { return isAvailableFPU(r); }
+
+#ifdef ENABLE_WASM_SIMD
+ bool isAvailableV128(RegV128 r) { return isAvailableFPU(r); }
+#endif
+
+ [[nodiscard]] inline RegI32 needI32();
+ inline void needI32(RegI32 specific);
+
+ [[nodiscard]] inline RegI64 needI64();
+ inline void needI64(RegI64 specific);
+
+ [[nodiscard]] inline RegRef needRef();
+ inline void needRef(RegRef specific);
+
+ [[nodiscard]] inline RegPtr needPtr();
+ inline void needPtr(RegPtr specific);
+
+ [[nodiscard]] inline RegF32 needF32();
+ inline void needF32(RegF32 specific);
+
+ [[nodiscard]] inline RegF64 needF64();
+ inline void needF64(RegF64 specific);
+
+#ifdef ENABLE_WASM_SIMD
+ [[nodiscard]] inline RegV128 needV128();
+ inline void needV128(RegV128 specific);
+#endif
+
+ inline void freeI32(RegI32 r);
+ inline void freeI64(RegI64 r);
+ inline void freeRef(RegRef r);
+ inline void freePtr(RegPtr r);
+ inline void freeF64(RegF64 r);
+ inline void freeF32(RegF32 r);
+#ifdef ENABLE_WASM_SIMD
+ inline void freeV128(RegV128 r);
+#endif
+
+ // Use when you need a register for a short time but explicitly want to avoid
+ // a full sync().
+ [[nodiscard]] inline RegPtr needTempPtr(RegPtr fallback, bool* saved);
+ inline void freeTempPtr(RegPtr r, bool saved);
+
+#ifdef JS_CODEGEN_ARM
+ [[nodiscard]] inline RegI64 needI64Pair();
+#endif
+
+#ifdef DEBUG
+ friend class LeakCheck;
+
+ class MOZ_RAII LeakCheck {
+ private:
+ const BaseRegAlloc& ra;
+ AllocatableGeneralRegisterSet knownGPR_;
+ AllocatableFloatRegisterSet knownFPU_;
+
+ public:
+ explicit LeakCheck(const BaseRegAlloc& ra) : ra(ra) {
+ knownGPR_ = ra.availGPR;
+ knownFPU_ = ra.availFPU;
+ }
+
+ ~LeakCheck() {
+ MOZ_ASSERT(knownGPR_.bits() == ra.allGPR.bits());
+ MOZ_ASSERT(knownFPU_.bits() == ra.allFPU.bits());
+ }
+
+ void addKnownI32(RegI32 r) { knownGPR_.add(r); }
+
+ void addKnownI64(RegI64 r) {
+# ifdef JS_PUNBOX64
+ knownGPR_.add(r.reg);
+# else
+ knownGPR_.add(r.high);
+ knownGPR_.add(r.low);
+# endif
+ }
+
+ void addKnownF32(RegF32 r) { knownFPU_.add(r); }
+
+ void addKnownF64(RegF64 r) { knownFPU_.add(r); }
+
+# ifdef ENABLE_WASM_SIMD
+ void addKnownV128(RegV128 r) { knownFPU_.add(r); }
+# endif
+
+ void addKnownRef(RegRef r) { knownGPR_.add(r); }
+ };
+#endif
+};
+
+// Scratch register abstractions.
+//
+// We define our own scratch registers when the platform doesn't provide what we
+// need. A notable use case is that we will need a private scratch register
+// when the platform masm uses its scratch register very frequently (eg, ARM).
+
+class BaseScratchRegister {
+#ifdef DEBUG
+ BaseRegAlloc& ra;
+ BaseRegAlloc::ScratchKind kind_;
+
+ public:
+ explicit BaseScratchRegister(BaseRegAlloc& ra, BaseRegAlloc::ScratchKind kind)
+ : ra(ra), kind_(kind) {
+ MOZ_ASSERT(!ra.isScratchRegisterTaken(kind_));
+ ra.setScratchRegisterTaken(kind_, true);
+ }
+ ~BaseScratchRegister() {
+ MOZ_ASSERT(ra.isScratchRegisterTaken(kind_));
+ ra.setScratchRegisterTaken(kind_, false);
+ }
+#else
+ public:
+ explicit BaseScratchRegister(BaseRegAlloc& ra,
+ BaseRegAlloc::ScratchKind kind) {}
+#endif
+};
+
+#ifdef ENABLE_WASM_SIMD
+# ifdef RABALDR_SCRATCH_V128
+class ScratchV128 : public BaseScratchRegister {
+ public:
+ explicit ScratchV128(BaseRegAlloc& ra)
+ : BaseScratchRegister(ra, BaseRegAlloc::ScratchKind::V128) {}
+ operator RegV128() const { return RegV128(RabaldrScratchV128); }
+};
+# else
+class ScratchV128 : public ScratchSimd128Scope {
+ public:
+ explicit ScratchV128(MacroAssembler& m) : ScratchSimd128Scope(m) {}
+ operator RegV128() const { return RegV128(FloatRegister(*this)); }
+};
+# endif
+#endif
+
+#ifdef RABALDR_SCRATCH_F64
+class ScratchF64 : public BaseScratchRegister {
+ public:
+ explicit ScratchF64(BaseRegAlloc& ra)
+ : BaseScratchRegister(ra, BaseRegAlloc::ScratchKind::F64) {}
+ operator RegF64() const { return RegF64(RabaldrScratchF64); }
+};
+#else
+class ScratchF64 : public ScratchDoubleScope {
+ public:
+ explicit ScratchF64(MacroAssembler& m) : ScratchDoubleScope(m) {}
+ operator RegF64() const { return RegF64(FloatRegister(*this)); }
+};
+#endif
+
+#ifdef RABALDR_SCRATCH_F32
+class ScratchF32 : public BaseScratchRegister {
+ public:
+ explicit ScratchF32(BaseRegAlloc& ra)
+ : BaseScratchRegister(ra, BaseRegAlloc::ScratchKind::F32) {}
+ operator RegF32() const { return RegF32(RabaldrScratchF32); }
+};
+#else
+class ScratchF32 : public ScratchFloat32Scope {
+ public:
+ explicit ScratchF32(MacroAssembler& m) : ScratchFloat32Scope(m) {}
+ operator RegF32() const { return RegF32(FloatRegister(*this)); }
+};
+#endif
+
+#ifdef RABALDR_SCRATCH_I32
+template <class RegType>
+class ScratchGPR : public BaseScratchRegister {
+ public:
+ explicit ScratchGPR(BaseRegAlloc& ra)
+ : BaseScratchRegister(ra, BaseRegAlloc::ScratchKind::I32) {}
+ operator RegType() const { return RegType(RabaldrScratchI32); }
+};
+#else
+template <class RegType>
+class ScratchGPR : public ScratchRegisterScope {
+ public:
+ explicit ScratchGPR(MacroAssembler& m) : ScratchRegisterScope(m) {}
+ operator RegType() const { return RegType(Register(*this)); }
+};
+#endif
+
+using ScratchI32 = ScratchGPR<RegI32>;
+using ScratchPtr = ScratchGPR<RegPtr>;
+using ScratchRef = ScratchGPR<RegRef>;
+
+#if defined(JS_CODEGEN_X86)
+// ScratchEBX is a mnemonic device: For some atomic ops we really need EBX,
+// no other register will do. And we would normally have to allocate that
+// register using ScratchI32 since normally the scratch register is EBX.
+// But the whole point of ScratchI32 is to hide that relationship. By using
+// the ScratchEBX alias, we document that at that point we require the
+// scratch register to be EBX.
+using ScratchEBX = ScratchI32;
+
+// ScratchI8 is a mnemonic device: For some ops we need a register with a
+// byte subregister.
+using ScratchI8 = ScratchI32;
+#endif
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_wasm_baseline_regdefs_h
diff --git a/js/src/wasm/WasmBCRegMgmt-inl.h b/js/src/wasm/WasmBCRegMgmt-inl.h
new file mode 100644
index 0000000000..d448b7405b
--- /dev/null
+++ b/js/src/wasm/WasmBCRegMgmt-inl.h
@@ -0,0 +1,486 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This is an INTERNAL header for Wasm baseline compiler: inline methods in the
+// compiler for register management.
+
+#ifndef wasm_wasm_baseline_reg_mgmt_inl_h
+#define wasm_wasm_baseline_reg_mgmt_inl_h
+
+namespace js {
+namespace wasm {
+
+bool BaseCompiler::isAvailableI32(RegI32 r) { return ra.isAvailableI32(r); }
+bool BaseCompiler::isAvailableI64(RegI64 r) { return ra.isAvailableI64(r); }
+bool BaseCompiler::isAvailableRef(RegRef r) { return ra.isAvailableRef(r); }
+bool BaseCompiler::isAvailablePtr(RegPtr r) { return ra.isAvailablePtr(r); }
+bool BaseCompiler::isAvailableF32(RegF32 r) { return ra.isAvailableF32(r); }
+bool BaseCompiler::isAvailableF64(RegF64 r) { return ra.isAvailableF64(r); }
+#ifdef ENABLE_WASM_SIMD
+bool BaseCompiler::isAvailableV128(RegV128 r) { return ra.isAvailableV128(r); }
+#endif
+
+[[nodiscard]] RegI32 BaseCompiler::needI32() { return ra.needI32(); }
+[[nodiscard]] RegI64 BaseCompiler::needI64() { return ra.needI64(); }
+[[nodiscard]] RegRef BaseCompiler::needRef() { return ra.needRef(); }
+[[nodiscard]] RegPtr BaseCompiler::needPtr() { return ra.needPtr(); }
+[[nodiscard]] RegF32 BaseCompiler::needF32() { return ra.needF32(); }
+[[nodiscard]] RegF64 BaseCompiler::needF64() { return ra.needF64(); }
+#ifdef ENABLE_WASM_SIMD
+[[nodiscard]] RegV128 BaseCompiler::needV128() { return ra.needV128(); }
+#endif
+
+void BaseCompiler::needI32(RegI32 specific) { ra.needI32(specific); }
+void BaseCompiler::needI64(RegI64 specific) { ra.needI64(specific); }
+void BaseCompiler::needRef(RegRef specific) { ra.needRef(specific); }
+void BaseCompiler::needPtr(RegPtr specific) { ra.needPtr(specific); }
+void BaseCompiler::needF32(RegF32 specific) { ra.needF32(specific); }
+void BaseCompiler::needF64(RegF64 specific) { ra.needF64(specific); }
+#ifdef ENABLE_WASM_SIMD
+void BaseCompiler::needV128(RegV128 specific) { ra.needV128(specific); }
+#endif
+
+#if defined(JS_CODEGEN_ARM)
+[[nodiscard]] RegI64 BaseCompiler::needI64Pair() { return ra.needI64Pair(); }
+#endif
+
+void BaseCompiler::freeI32(RegI32 r) { ra.freeI32(r); }
+void BaseCompiler::freeI64(RegI64 r) { ra.freeI64(r); }
+void BaseCompiler::freeRef(RegRef r) { ra.freeRef(r); }
+void BaseCompiler::freePtr(RegPtr r) { ra.freePtr(r); }
+void BaseCompiler::freeF32(RegF32 r) { ra.freeF32(r); }
+void BaseCompiler::freeF64(RegF64 r) { ra.freeF64(r); }
+#ifdef ENABLE_WASM_SIMD
+void BaseCompiler::freeV128(RegV128 r) { ra.freeV128(r); }
+#endif
+
+void BaseCompiler::freeAny(AnyReg r) {
+ switch (r.tag) {
+ case AnyReg::I32:
+ freeI32(r.i32());
+ break;
+ case AnyReg::I64:
+ freeI64(r.i64());
+ break;
+ case AnyReg::REF:
+ freeRef(r.ref());
+ break;
+ case AnyReg::F32:
+ freeF32(r.f32());
+ break;
+ case AnyReg::F64:
+ freeF64(r.f64());
+ break;
+#ifdef ENABLE_WASM_SIMD
+ case AnyReg::V128:
+ freeV128(r.v128());
+ break;
+#endif
+ default:
+ MOZ_CRASH();
+ }
+}
+
+template <>
+inline void BaseCompiler::free<RegI32>(RegI32 r) {
+ freeI32(r);
+}
+
+template <>
+inline void BaseCompiler::free<RegI64>(RegI64 r) {
+ freeI64(r);
+}
+
+template <>
+inline void BaseCompiler::free<RegRef>(RegRef r) {
+ freeRef(r);
+}
+
+template <>
+inline void BaseCompiler::free<RegPtr>(RegPtr r) {
+ freePtr(r);
+}
+
+template <>
+inline void BaseCompiler::free<RegF32>(RegF32 r) {
+ freeF32(r);
+}
+
+template <>
+inline void BaseCompiler::free<RegF64>(RegF64 r) {
+ freeF64(r);
+}
+
+#ifdef ENABLE_WASM_SIMD
+template <>
+inline void BaseCompiler::free<RegV128>(RegV128 r) {
+ freeV128(r);
+}
+#endif
+
+template <>
+inline void BaseCompiler::free<AnyReg>(AnyReg r) {
+ freeAny(r);
+}
+
+void BaseCompiler::freeI64Except(RegI64 r, RegI32 except) {
+#ifdef JS_PUNBOX64
+ MOZ_ASSERT(r.reg == except);
+#else
+ MOZ_ASSERT(r.high == except || r.low == except);
+ freeI64(r);
+ needI32(except);
+#endif
+}
+
+void BaseCompiler::maybeFree(RegI32 r) {
+ if (r.isValid()) {
+ freeI32(r);
+ }
+}
+
+void BaseCompiler::maybeFree(RegI64 r) {
+ if (r.isValid()) {
+ freeI64(r);
+ }
+}
+
+void BaseCompiler::maybeFree(RegF32 r) {
+ if (r.isValid()) {
+ freeF32(r);
+ }
+}
+
+void BaseCompiler::maybeFree(RegF64 r) {
+ if (r.isValid()) {
+ freeF64(r);
+ }
+}
+
+void BaseCompiler::maybeFree(RegRef r) {
+ if (r.isValid()) {
+ freeRef(r);
+ }
+}
+
+void BaseCompiler::maybeFree(RegPtr r) {
+ if (r.isValid()) {
+ freePtr(r);
+ }
+}
+
+#ifdef ENABLE_WASM_SIMD128
+void BaseCompiler::maybeFree(RegV128 r) {
+ if (r.isValid()) {
+ freeV128(r);
+ }
+}
+#endif
+
+void BaseCompiler::needI32NoSync(RegI32 r) {
+ MOZ_ASSERT(isAvailableI32(r));
+ needI32(r);
+}
+
+// TODO / OPTIMIZE: need2xI32() can be optimized along with needI32()
+// to avoid sync(). (Bug 1316802)
+
+void BaseCompiler::need2xI32(RegI32 r0, RegI32 r1) {
+ needI32(r0);
+ needI32(r1);
+}
+
+void BaseCompiler::need2xI64(RegI64 r0, RegI64 r1) {
+ needI64(r0);
+ needI64(r1);
+}
+
+RegI32 BaseCompiler::fromI64(RegI64 r) { return RegI32(lowPart(r)); }
+
+RegI32 BaseCompiler::maybeFromI64(RegI64 r) {
+ if (!r.isValid()) {
+ return RegI32::Invalid();
+ }
+ return fromI64(r);
+}
+
+#ifdef JS_PUNBOX64
+RegI64 BaseCompiler::fromI32(RegI32 r) { return RegI64(Register64(r)); }
+#endif
+
+RegI64 BaseCompiler::widenI32(RegI32 r) {
+ MOZ_ASSERT(!isAvailableI32(r));
+#ifdef JS_PUNBOX64
+ return fromI32(r);
+#else
+ RegI32 high = needI32();
+ return RegI64(Register64(high, r));
+#endif
+}
+
+RegI32 BaseCompiler::narrowI64(RegI64 r) {
+#ifdef JS_PUNBOX64
+ return RegI32(r.reg);
+#else
+ freeI32(RegI32(r.high));
+ return RegI32(r.low);
+#endif
+}
+
+RegI32 BaseCompiler::narrowRef(RegRef r) { return RegI32(r); }
+
+RegI32 BaseCompiler::lowPart(RegI64 r) {
+#ifdef JS_PUNBOX64
+ return RegI32(r.reg);
+#else
+ return RegI32(r.low);
+#endif
+}
+
+RegI32 BaseCompiler::maybeHighPart(RegI64 r) {
+#ifdef JS_PUNBOX64
+ return RegI32::Invalid();
+#else
+ return RegI32(r.high);
+#endif
+}
+
+void BaseCompiler::maybeClearHighPart(RegI64 r) {
+#if !defined(JS_PUNBOX64)
+ moveImm32(0, RegI32(r.high));
+#endif
+}
+
+// TODO: We want these to be inlined for sure; do we need an `inline` somewhere?
+
+template <>
+inline RegI32 BaseCompiler::need<RegI32>() {
+ return needI32();
+}
+template <>
+inline RegI64 BaseCompiler::need<RegI64>() {
+ return needI64();
+}
+template <>
+inline RegF32 BaseCompiler::need<RegF32>() {
+ return needF32();
+}
+template <>
+inline RegF64 BaseCompiler::need<RegF64>() {
+ return needF64();
+}
+
+template <>
+inline RegI32 BaseCompiler::pop<RegI32>() {
+ return popI32();
+}
+template <>
+inline RegI64 BaseCompiler::pop<RegI64>() {
+ return popI64();
+}
+template <>
+inline RegF32 BaseCompiler::pop<RegF32>() {
+ return popF32();
+}
+template <>
+inline RegF64 BaseCompiler::pop<RegF64>() {
+ return popF64();
+}
+
+#ifdef ENABLE_WASM_SIMD
+template <>
+inline RegV128 BaseCompiler::need<RegV128>() {
+ return needV128();
+}
+template <>
+inline RegV128 BaseCompiler::pop<RegV128>() {
+ return popV128();
+}
+#endif
+
+// RegPtr values can't be pushed, hence can't be popped.
+template <>
+inline RegPtr BaseCompiler::need<RegPtr>() {
+ return needPtr();
+}
+
+void BaseCompiler::needResultRegisters(ResultType type, ResultRegKind which) {
+ if (type.empty()) {
+ return;
+ }
+
+ for (ABIResultIter iter(type); !iter.done(); iter.next()) {
+ ABIResult result = iter.cur();
+ // Register results are visited first; when we see a stack result we're
+ // done.
+ if (!result.inRegister()) {
+ return;
+ }
+ switch (result.type().kind()) {
+ case ValType::I32:
+ needI32(RegI32(result.gpr()));
+ break;
+ case ValType::I64:
+ needI64(RegI64(result.gpr64()));
+ break;
+ case ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ if (which == ResultRegKind::All) {
+ needV128(RegV128(result.fpr()));
+ }
+ break;
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ case ValType::F32:
+ if (which == ResultRegKind::All) {
+ needF32(RegF32(result.fpr()));
+ }
+ break;
+ case ValType::F64:
+ if (which == ResultRegKind::All) {
+ needF64(RegF64(result.fpr()));
+ }
+ break;
+ case ValType::Ref:
+ needRef(RegRef(result.gpr()));
+ break;
+ }
+ }
+}
+
+#ifdef JS_64BIT
+void BaseCompiler::widenInt32ResultRegisters(ResultType type) {
+ if (type.empty()) {
+ return;
+ }
+
+ for (ABIResultIter iter(type); !iter.done(); iter.next()) {
+ ABIResult result = iter.cur();
+ if (result.inRegister() && result.type().kind() == ValType::I32) {
+ masm.widenInt32(result.gpr());
+ }
+ }
+}
+#endif
+
+void BaseCompiler::freeResultRegisters(ResultType type, ResultRegKind which) {
+ if (type.empty()) {
+ return;
+ }
+
+ for (ABIResultIter iter(type); !iter.done(); iter.next()) {
+ ABIResult result = iter.cur();
+ // Register results are visited first; when we see a stack result we're
+ // done.
+ if (!result.inRegister()) {
+ return;
+ }
+ switch (result.type().kind()) {
+ case ValType::I32:
+ freeI32(RegI32(result.gpr()));
+ break;
+ case ValType::I64:
+ freeI64(RegI64(result.gpr64()));
+ break;
+ case ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ if (which == ResultRegKind::All) {
+ freeV128(RegV128(result.fpr()));
+ }
+ break;
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ case ValType::F32:
+ if (which == ResultRegKind::All) {
+ freeF32(RegF32(result.fpr()));
+ }
+ break;
+ case ValType::F64:
+ if (which == ResultRegKind::All) {
+ freeF64(RegF64(result.fpr()));
+ }
+ break;
+ case ValType::Ref:
+ freeRef(RegRef(result.gpr()));
+ break;
+ }
+ }
+}
+
+void BaseCompiler::needIntegerResultRegisters(ResultType type) {
+ needResultRegisters(type, ResultRegKind::OnlyGPRs);
+}
+
+void BaseCompiler::freeIntegerResultRegisters(ResultType type) {
+ freeResultRegisters(type, ResultRegKind::OnlyGPRs);
+}
+
+void BaseCompiler::needResultRegisters(ResultType type) {
+ needResultRegisters(type, ResultRegKind::All);
+}
+
+void BaseCompiler::freeResultRegisters(ResultType type) {
+ freeResultRegisters(type, ResultRegKind::All);
+}
+
+void BaseCompiler::captureResultRegisters(ResultType type) {
+ assertResultRegistersAvailable(type);
+ needResultRegisters(type);
+}
+
+void BaseCompiler::captureCallResultRegisters(ResultType type) {
+ captureResultRegisters(type);
+#ifdef JS_64BIT
+ widenInt32ResultRegisters(type);
+#endif
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Control stack. Some of these are very hot.
+
+void BaseCompiler::initControl(Control& item, ResultType params) {
+ // Make sure the constructor was run properly
+ MOZ_ASSERT(!item.stackHeight.isValid() && item.stackSize == UINT32_MAX);
+
+ uint32_t paramCount = deadCode_ ? 0 : params.length();
+ uint32_t stackParamSize = stackConsumed(paramCount);
+ item.stackHeight = fr.stackResultsBase(stackParamSize);
+ item.stackSize = stk_.length() - paramCount;
+ item.deadOnArrival = deadCode_;
+ item.bceSafeOnEntry = bceSafe_;
+}
+
+Control& BaseCompiler::controlItem() { return iter_.controlItem(); }
+
+Control& BaseCompiler::controlItem(uint32_t relativeDepth) {
+ return iter_.controlItem(relativeDepth);
+}
+
+Control& BaseCompiler::controlOutermost() { return iter_.controlOutermost(); }
+
+LabelKind BaseCompiler::controlKind(uint32_t relativeDepth) {
+ return iter_.controlKind(relativeDepth);
+}
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_wasm_baseline_reg_mgmt_inl_h
diff --git a/js/src/wasm/WasmBCStk.h b/js/src/wasm/WasmBCStk.h
new file mode 100644
index 0000000000..330e8abd06
--- /dev/null
+++ b/js/src/wasm/WasmBCStk.h
@@ -0,0 +1,345 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This is an INTERNAL header for Wasm baseline compiler: Wasm value stack.
+
+#ifndef wasm_wasm_baseline_stk_h
+#define wasm_wasm_baseline_stk_h
+
+#include "wasm/WasmBCDefs.h"
+#include "wasm/WasmBCRegDefs.h"
+
+namespace js {
+namespace wasm {
+
+// Value stack: stack elements
+
+struct Stk {
+ private:
+ Stk() : kind_(Unknown), i64val_(0) {}
+
+ public:
+ enum Kind {
+ // The Mem opcodes are all clustered at the beginning to
+ // allow for a quick test within sync().
+ MemI32, // 32-bit integer stack value ("offs")
+ MemI64, // 64-bit integer stack value ("offs")
+ MemF32, // 32-bit floating stack value ("offs")
+ MemF64, // 64-bit floating stack value ("offs")
+#ifdef ENABLE_WASM_SIMD
+ MemV128, // 128-bit vector stack value ("offs")
+#endif
+ MemRef, // reftype (pointer wide) stack value ("offs")
+
+ // The Local opcodes follow the Mem opcodes for a similar
+ // quick test within hasLocal().
+ LocalI32, // Local int32 var ("slot")
+ LocalI64, // Local int64 var ("slot")
+ LocalF32, // Local float32 var ("slot")
+ LocalF64, // Local double var ("slot")
+#ifdef ENABLE_WASM_SIMD
+ LocalV128, // Local v128 var ("slot")
+#endif
+ LocalRef, // Local reftype (pointer wide) var ("slot")
+
+ RegisterI32, // 32-bit integer register ("i32reg")
+ RegisterI64, // 64-bit integer register ("i64reg")
+ RegisterF32, // 32-bit floating register ("f32reg")
+ RegisterF64, // 64-bit floating register ("f64reg")
+#ifdef ENABLE_WASM_SIMD
+ RegisterV128, // 128-bit vector register ("v128reg")
+#endif
+ RegisterRef, // reftype (pointer wide) register ("refReg")
+
+ ConstI32, // 32-bit integer constant ("i32val")
+ ConstI64, // 64-bit integer constant ("i64val")
+ ConstF32, // 32-bit floating constant ("f32val")
+ ConstF64, // 64-bit floating constant ("f64val")
+#ifdef ENABLE_WASM_SIMD
+ ConstV128, // 128-bit vector constant ("v128val")
+#endif
+ ConstRef, // reftype (pointer wide) constant ("refval")
+
+ Unknown,
+ };
+
+ Kind kind_;
+
+ static const Kind MemLast = MemRef;
+ static const Kind LocalLast = LocalRef;
+
+ union {
+ RegI32 i32reg_;
+ RegI64 i64reg_;
+ RegRef refReg_;
+ RegF32 f32reg_;
+ RegF64 f64reg_;
+#ifdef ENABLE_WASM_SIMD
+ RegV128 v128reg_;
+#endif
+ int32_t i32val_;
+ int64_t i64val_;
+ intptr_t refval_;
+ float f32val_;
+ double f64val_;
+#ifdef ENABLE_WASM_SIMD
+ V128 v128val_;
+#endif
+ uint32_t slot_;
+ uint32_t offs_;
+ };
+
+ explicit Stk(RegI32 r) : kind_(RegisterI32), i32reg_(r) {}
+ explicit Stk(RegI64 r) : kind_(RegisterI64), i64reg_(r) {}
+ explicit Stk(RegRef r) : kind_(RegisterRef), refReg_(r) {}
+ explicit Stk(RegF32 r) : kind_(RegisterF32), f32reg_(r) {}
+ explicit Stk(RegF64 r) : kind_(RegisterF64), f64reg_(r) {}
+#ifdef ENABLE_WASM_SIMD
+ explicit Stk(RegV128 r) : kind_(RegisterV128), v128reg_(r) {}
+#endif
+ explicit Stk(int32_t v) : kind_(ConstI32), i32val_(v) {}
+ explicit Stk(uint32_t v) : kind_(ConstI32), i32val_(int32_t(v)) {}
+ explicit Stk(int64_t v) : kind_(ConstI64), i64val_(v) {}
+ explicit Stk(float v) : kind_(ConstF32), f32val_(v) {}
+ explicit Stk(double v) : kind_(ConstF64), f64val_(v) {}
+#ifdef ENABLE_WASM_SIMD
+ explicit Stk(V128 v) : kind_(ConstV128), v128val_(v) {}
+#endif
+ explicit Stk(Kind k, uint32_t v) : kind_(k), slot_(v) {
+ MOZ_ASSERT(k > MemLast && k <= LocalLast);
+ }
+ static Stk StkRef(intptr_t v) {
+ Stk s;
+ s.kind_ = ConstRef;
+ s.refval_ = v;
+ return s;
+ }
+ static Stk StackResult(ValType type, uint32_t offs) {
+ Kind k;
+ switch (type.kind()) {
+ case ValType::I32:
+ k = Stk::MemI32;
+ break;
+ case ValType::I64:
+ k = Stk::MemI64;
+ break;
+ case ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ k = Stk::MemV128;
+ break;
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+ case ValType::F32:
+ k = Stk::MemF32;
+ break;
+ case ValType::F64:
+ k = Stk::MemF64;
+ break;
+ case ValType::Ref:
+ k = Stk::MemRef;
+ break;
+ }
+ Stk s;
+ s.setOffs(k, offs);
+ return s;
+ }
+
+ void setOffs(Kind k, uint32_t v) {
+ MOZ_ASSERT(k <= MemLast);
+ kind_ = k;
+ offs_ = v;
+ }
+
+ Kind kind() const { return kind_; }
+ bool isMem() const { return kind_ <= MemLast; }
+
+ RegI32 i32reg() const {
+ MOZ_ASSERT(kind_ == RegisterI32);
+ return i32reg_;
+ }
+ RegI64 i64reg() const {
+ MOZ_ASSERT(kind_ == RegisterI64);
+ return i64reg_;
+ }
+ RegRef refReg() const {
+ MOZ_ASSERT(kind_ == RegisterRef);
+ return refReg_;
+ }
+ RegF32 f32reg() const {
+ MOZ_ASSERT(kind_ == RegisterF32);
+ return f32reg_;
+ }
+ RegF64 f64reg() const {
+ MOZ_ASSERT(kind_ == RegisterF64);
+ return f64reg_;
+ }
+#ifdef ENABLE_WASM_SIMD
+ RegV128 v128reg() const {
+ MOZ_ASSERT(kind_ == RegisterV128);
+ return v128reg_;
+ }
+#endif
+ int32_t i32val() const {
+ MOZ_ASSERT(kind_ == ConstI32);
+ return i32val_;
+ }
+ int64_t i64val() const {
+ MOZ_ASSERT(kind_ == ConstI64);
+ return i64val_;
+ }
+ intptr_t refval() const {
+ MOZ_ASSERT(kind_ == ConstRef);
+ return refval_;
+ }
+
+ // For these two, use an out-param instead of simply returning, to
+ // use the normal stack and not the x87 FP stack (which has effect on
+ // NaNs with the signaling bit set).
+
+ void f32val(float* out) const {
+ MOZ_ASSERT(kind_ == ConstF32);
+ *out = f32val_;
+ }
+ void f64val(double* out) const {
+ MOZ_ASSERT(kind_ == ConstF64);
+ *out = f64val_;
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ // For SIMD, do the same as for floats since we're using float registers to
+ // hold vectors; this is just conservative.
+ void v128val(V128* out) const {
+ MOZ_ASSERT(kind_ == ConstV128);
+ *out = v128val_;
+ }
+#endif
+
+ uint32_t slot() const {
+ MOZ_ASSERT(kind_ > MemLast && kind_ <= LocalLast);
+ return slot_;
+ }
+ uint32_t offs() const {
+ MOZ_ASSERT(isMem());
+ return offs_;
+ }
+
+#ifdef DEBUG
+ // Print a stack element (Stk) to stderr. Skip the trailing \n. Printing
+ // of the actual contents of each stack element (see case ConstI32) can be
+ // filled in on demand -- even printing just the element `kind_` fields can
+ // be very useful.
+ void showStackElem() const {
+ switch (kind_) {
+ case MemI32:
+ fprintf(stderr, "MemI32()");
+ break;
+ case MemI64:
+ fprintf(stderr, "MemI64()");
+ break;
+ case MemF32:
+ fprintf(stderr, "MemF32()");
+ break;
+ case MemF64:
+ fprintf(stderr, "MemF64()");
+ break;
+# ifdef ENABLE_WASM_SIMD
+ case MemV128:
+ fprintf(stderr, "MemV128()");
+ break;
+# endif
+ case MemRef:
+ fprintf(stderr, "MemRef()");
+ break;
+ case LocalI32:
+ fprintf(stderr, "LocalI32()");
+ break;
+ case LocalI64:
+ fprintf(stderr, "LocalI64()");
+ break;
+ case LocalF32:
+ fprintf(stderr, "LocalF32()");
+ break;
+ case LocalF64:
+ fprintf(stderr, "LocalF64()");
+ break;
+# ifdef ENABLE_WASM_SIMD
+ case LocalV128:
+ fprintf(stderr, "LocalV128()");
+ break;
+# endif
+ case LocalRef:
+ fprintf(stderr, "LocalRef()");
+ break;
+ case RegisterI32:
+ fprintf(stderr, "RegisterI32()");
+ break;
+ case RegisterI64:
+ fprintf(stderr, "RegisterI64()");
+ break;
+ case RegisterF32:
+ fprintf(stderr, "RegisterF32()");
+ break;
+ case RegisterF64:
+ fprintf(stderr, "RegisterF64()");
+ break;
+# ifdef ENABLE_WASM_SIMD
+ case RegisterV128:
+ fprintf(stderr, "RegisterV128()");
+ break;
+# endif
+ case RegisterRef:
+ fprintf(stderr, "RegisterRef()");
+ break;
+ case ConstI32:
+ fprintf(stderr, "ConstI32(%d)", (int)i32val_);
+ break;
+ case ConstI64:
+ fprintf(stderr, "ConstI64()");
+ break;
+ case ConstF32:
+ fprintf(stderr, "ConstF32()");
+ break;
+ case ConstF64:
+ fprintf(stderr, "ConstF64()");
+ break;
+# ifdef ENABLE_WASM_SIMD
+ case ConstV128:
+ fprintf(stderr, "ConstV128()");
+ break;
+# endif
+ case ConstRef:
+ fprintf(stderr, "ConstRef()");
+ break;
+ case Unknown:
+ fprintf(stderr, "Unknown()");
+ break;
+ default:
+ fprintf(stderr, "!! Stk::showStackElem !!");
+ break;
+ }
+ }
+#endif
+};
+
+using StkVector = Vector<Stk, 0, SystemAllocPolicy>;
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_wasm_baseline_stk_h
diff --git a/js/src/wasm/WasmBCStkMgmt-inl.h b/js/src/wasm/WasmBCStkMgmt-inl.h
new file mode 100644
index 0000000000..6ce55cab3a
--- /dev/null
+++ b/js/src/wasm/WasmBCStkMgmt-inl.h
@@ -0,0 +1,1320 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This is an INTERNAL header for Wasm baseline compiler: inline methods in the
+// compiler for Stk values and value stack management.
+
+#ifndef wasm_wasm_baseline_stk_mgmt_inl_h
+#define wasm_wasm_baseline_stk_mgmt_inl_h
+
+namespace js {
+namespace wasm {
+
+#ifdef DEBUG
+size_t BaseCompiler::countMemRefsOnStk() {
+ size_t nRefs = 0;
+ for (Stk& v : stk_) {
+ if (v.kind() == Stk::MemRef) {
+ nRefs++;
+ }
+ }
+ return nRefs;
+}
+#endif
+
+template <typename T>
+void BaseCompiler::push(T item) {
+ // None of the single-arg Stk constructors create a Stk::MemRef, so
+ // there's no need to increment stackMapGenerator_.memRefsOnStk here.
+ stk_.infallibleEmplaceBack(Stk(item));
+}
+
+void BaseCompiler::pushConstRef(intptr_t v) {
+ stk_.infallibleEmplaceBack(Stk::StkRef(v));
+}
+
+void BaseCompiler::loadConstI32(const Stk& src, RegI32 dest) {
+ moveImm32(src.i32val(), dest);
+}
+
+void BaseCompiler::loadMemI32(const Stk& src, RegI32 dest) {
+ fr.loadStackI32(src.offs(), dest);
+}
+
+void BaseCompiler::loadLocalI32(const Stk& src, RegI32 dest) {
+ fr.loadLocalI32(localFromSlot(src.slot(), MIRType::Int32), dest);
+}
+
+void BaseCompiler::loadRegisterI32(const Stk& src, RegI32 dest) {
+ moveI32(src.i32reg(), dest);
+}
+
+void BaseCompiler::loadConstI64(const Stk& src, RegI64 dest) {
+ moveImm64(src.i64val(), dest);
+}
+
+void BaseCompiler::loadMemI64(const Stk& src, RegI64 dest) {
+ fr.loadStackI64(src.offs(), dest);
+}
+
+void BaseCompiler::loadLocalI64(const Stk& src, RegI64 dest) {
+ fr.loadLocalI64(localFromSlot(src.slot(), MIRType::Int64), dest);
+}
+
+void BaseCompiler::loadRegisterI64(const Stk& src, RegI64 dest) {
+ moveI64(src.i64reg(), dest);
+}
+
+void BaseCompiler::loadConstRef(const Stk& src, RegRef dest) {
+ moveImmRef(src.refval(), dest);
+}
+
+void BaseCompiler::loadMemRef(const Stk& src, RegRef dest) {
+ fr.loadStackRef(src.offs(), dest);
+}
+
+void BaseCompiler::loadLocalRef(const Stk& src, RegRef dest) {
+ fr.loadLocalRef(localFromSlot(src.slot(), MIRType::RefOrNull), dest);
+}
+
+void BaseCompiler::loadRegisterRef(const Stk& src, RegRef dest) {
+ moveRef(src.refReg(), dest);
+}
+
+void BaseCompiler::loadConstF64(const Stk& src, RegF64 dest) {
+ double d;
+ src.f64val(&d);
+ masm.loadConstantDouble(d, dest);
+}
+
+void BaseCompiler::loadMemF64(const Stk& src, RegF64 dest) {
+ fr.loadStackF64(src.offs(), dest);
+}
+
+void BaseCompiler::loadLocalF64(const Stk& src, RegF64 dest) {
+ fr.loadLocalF64(localFromSlot(src.slot(), MIRType::Double), dest);
+}
+
+void BaseCompiler::loadRegisterF64(const Stk& src, RegF64 dest) {
+ moveF64(src.f64reg(), dest);
+}
+
+void BaseCompiler::loadConstF32(const Stk& src, RegF32 dest) {
+ float f;
+ src.f32val(&f);
+ masm.loadConstantFloat32(f, dest);
+}
+
+void BaseCompiler::loadMemF32(const Stk& src, RegF32 dest) {
+ fr.loadStackF32(src.offs(), dest);
+}
+
+void BaseCompiler::loadLocalF32(const Stk& src, RegF32 dest) {
+ fr.loadLocalF32(localFromSlot(src.slot(), MIRType::Float32), dest);
+}
+
+void BaseCompiler::loadRegisterF32(const Stk& src, RegF32 dest) {
+ moveF32(src.f32reg(), dest);
+}
+
+#ifdef ENABLE_WASM_SIMD
+void BaseCompiler::loadConstV128(const Stk& src, RegV128 dest) {
+ V128 f;
+ src.v128val(&f);
+ masm.loadConstantSimd128(SimdConstant::CreateX16((int8_t*)f.bytes), dest);
+}
+
+void BaseCompiler::loadMemV128(const Stk& src, RegV128 dest) {
+ fr.loadStackV128(src.offs(), dest);
+}
+
+void BaseCompiler::loadLocalV128(const Stk& src, RegV128 dest) {
+ fr.loadLocalV128(localFromSlot(src.slot(), MIRType::Simd128), dest);
+}
+
+void BaseCompiler::loadRegisterV128(const Stk& src, RegV128 dest) {
+ moveV128(src.v128reg(), dest);
+}
+#endif
+
+void BaseCompiler::loadI32(const Stk& src, RegI32 dest) {
+ switch (src.kind()) {
+ case Stk::ConstI32:
+ loadConstI32(src, dest);
+ break;
+ case Stk::MemI32:
+ loadMemI32(src, dest);
+ break;
+ case Stk::LocalI32:
+ loadLocalI32(src, dest);
+ break;
+ case Stk::RegisterI32:
+ loadRegisterI32(src, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: Expected I32 on stack");
+ }
+}
+
+void BaseCompiler::loadI64(const Stk& src, RegI64 dest) {
+ switch (src.kind()) {
+ case Stk::ConstI64:
+ loadConstI64(src, dest);
+ break;
+ case Stk::MemI64:
+ loadMemI64(src, dest);
+ break;
+ case Stk::LocalI64:
+ loadLocalI64(src, dest);
+ break;
+ case Stk::RegisterI64:
+ loadRegisterI64(src, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: Expected I64 on stack");
+ }
+}
+
+#if !defined(JS_PUNBOX64)
+void BaseCompiler::loadI64Low(const Stk& src, RegI32 dest) {
+ switch (src.kind()) {
+ case Stk::ConstI64:
+ moveImm32(int32_t(src.i64val()), dest);
+ break;
+ case Stk::MemI64:
+ fr.loadStackI64Low(src.offs(), dest);
+ break;
+ case Stk::LocalI64:
+ fr.loadLocalI64Low(localFromSlot(src.slot(), MIRType::Int64), dest);
+ break;
+ case Stk::RegisterI64:
+ moveI32(RegI32(src.i64reg().low), dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: Expected I64 on stack");
+ }
+}
+
+void BaseCompiler::loadI64High(const Stk& src, RegI32 dest) {
+ switch (src.kind()) {
+ case Stk::ConstI64:
+ moveImm32(int32_t(src.i64val() >> 32), dest);
+ break;
+ case Stk::MemI64:
+ fr.loadStackI64High(src.offs(), dest);
+ break;
+ case Stk::LocalI64:
+ fr.loadLocalI64High(localFromSlot(src.slot(), MIRType::Int64), dest);
+ break;
+ case Stk::RegisterI64:
+ moveI32(RegI32(src.i64reg().high), dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: Expected I64 on stack");
+ }
+}
+#endif
+
+void BaseCompiler::loadF64(const Stk& src, RegF64 dest) {
+ switch (src.kind()) {
+ case Stk::ConstF64:
+ loadConstF64(src, dest);
+ break;
+ case Stk::MemF64:
+ loadMemF64(src, dest);
+ break;
+ case Stk::LocalF64:
+ loadLocalF64(src, dest);
+ break;
+ case Stk::RegisterF64:
+ loadRegisterF64(src, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected F64 on stack");
+ }
+}
+
+void BaseCompiler::loadF32(const Stk& src, RegF32 dest) {
+ switch (src.kind()) {
+ case Stk::ConstF32:
+ loadConstF32(src, dest);
+ break;
+ case Stk::MemF32:
+ loadMemF32(src, dest);
+ break;
+ case Stk::LocalF32:
+ loadLocalF32(src, dest);
+ break;
+ case Stk::RegisterF32:
+ loadRegisterF32(src, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected F32 on stack");
+ }
+}
+
+#ifdef ENABLE_WASM_SIMD
+void BaseCompiler::loadV128(const Stk& src, RegV128 dest) {
+ switch (src.kind()) {
+ case Stk::ConstV128:
+ loadConstV128(src, dest);
+ break;
+ case Stk::MemV128:
+ loadMemV128(src, dest);
+ break;
+ case Stk::LocalV128:
+ loadLocalV128(src, dest);
+ break;
+ case Stk::RegisterV128:
+ loadRegisterV128(src, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected V128 on stack");
+ }
+}
+#endif
+
+void BaseCompiler::loadRef(const Stk& src, RegRef dest) {
+ switch (src.kind()) {
+ case Stk::ConstRef:
+ loadConstRef(src, dest);
+ break;
+ case Stk::MemRef:
+ loadMemRef(src, dest);
+ break;
+ case Stk::LocalRef:
+ loadLocalRef(src, dest);
+ break;
+ case Stk::RegisterRef:
+ loadRegisterRef(src, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected ref on stack");
+ }
+}
+
+void BaseCompiler::peekRefAt(uint32_t depth, RegRef dest) {
+ MOZ_ASSERT(depth < stk_.length());
+ Stk& src = peek(stk_.length() - depth - 1);
+ loadRef(src, dest);
+}
+
+// Flush all local and register value stack elements to memory.
+//
+// TODO / OPTIMIZE: As this is fairly expensive and causes worse
+// code to be emitted subsequently, it is useful to avoid calling
+// it. (Bug 1316802)
+//
+// Some optimization has been done already. Remaining
+// opportunities:
+//
+// - It would be interesting to see if we can specialize it
+// before calls with particularly simple signatures, or where
+// we can do parallel assignment of register arguments, or
+// similar. See notes in emitCall().
+//
+// - Operations that need specific registers: multiply, quotient,
+// remainder, will tend to sync because the registers we need
+// will tend to be allocated. We may be able to avoid that by
+// prioritizing registers differently (takeLast instead of
+// takeFirst) but we may also be able to allocate an unused
+// register on demand to free up one we need, thus avoiding the
+// sync. That type of fix would go into needI32().
+
+void BaseCompiler::sync() {
+ size_t start = 0;
+ size_t lim = stk_.length();
+
+ for (size_t i = lim; i > 0; i--) {
+ // Memory opcodes are first in the enum, single check against MemLast is
+ // fine.
+ if (stk_[i - 1].kind() <= Stk::MemLast) {
+ start = i;
+ break;
+ }
+ }
+
+ for (size_t i = start; i < lim; i++) {
+ Stk& v = stk_[i];
+ switch (v.kind()) {
+ case Stk::LocalI32: {
+ ScratchI32 scratch(*this);
+ loadLocalI32(v, scratch);
+ uint32_t offs = fr.pushGPR(scratch);
+ v.setOffs(Stk::MemI32, offs);
+ break;
+ }
+ case Stk::RegisterI32: {
+ uint32_t offs = fr.pushGPR(v.i32reg());
+ freeI32(v.i32reg());
+ v.setOffs(Stk::MemI32, offs);
+ break;
+ }
+ case Stk::LocalI64: {
+ ScratchI32 scratch(*this);
+#ifdef JS_PUNBOX64
+ loadI64(v, fromI32(scratch));
+ uint32_t offs = fr.pushGPR(scratch);
+#else
+ fr.loadLocalI64High(localFromSlot(v.slot(), MIRType::Int64), scratch);
+ fr.pushGPR(scratch);
+ fr.loadLocalI64Low(localFromSlot(v.slot(), MIRType::Int64), scratch);
+ uint32_t offs = fr.pushGPR(scratch);
+#endif
+ v.setOffs(Stk::MemI64, offs);
+ break;
+ }
+ case Stk::RegisterI64: {
+#ifdef JS_PUNBOX64
+ uint32_t offs = fr.pushGPR(v.i64reg().reg);
+ freeI64(v.i64reg());
+#else
+ fr.pushGPR(v.i64reg().high);
+ uint32_t offs = fr.pushGPR(v.i64reg().low);
+ freeI64(v.i64reg());
+#endif
+ v.setOffs(Stk::MemI64, offs);
+ break;
+ }
+ case Stk::LocalF64: {
+ ScratchF64 scratch(*this);
+ loadF64(v, scratch);
+ uint32_t offs = fr.pushDouble(scratch);
+ v.setOffs(Stk::MemF64, offs);
+ break;
+ }
+ case Stk::RegisterF64: {
+ uint32_t offs = fr.pushDouble(v.f64reg());
+ freeF64(v.f64reg());
+ v.setOffs(Stk::MemF64, offs);
+ break;
+ }
+ case Stk::LocalF32: {
+ ScratchF32 scratch(*this);
+ loadF32(v, scratch);
+ uint32_t offs = fr.pushFloat32(scratch);
+ v.setOffs(Stk::MemF32, offs);
+ break;
+ }
+ case Stk::RegisterF32: {
+ uint32_t offs = fr.pushFloat32(v.f32reg());
+ freeF32(v.f32reg());
+ v.setOffs(Stk::MemF32, offs);
+ break;
+ }
+#ifdef ENABLE_WASM_SIMD
+ case Stk::LocalV128: {
+ ScratchV128 scratch(*this);
+ loadV128(v, scratch);
+ uint32_t offs = fr.pushV128(scratch);
+ v.setOffs(Stk::MemV128, offs);
+ break;
+ }
+ case Stk::RegisterV128: {
+ uint32_t offs = fr.pushV128(v.v128reg());
+ freeV128(v.v128reg());
+ v.setOffs(Stk::MemV128, offs);
+ break;
+ }
+#endif
+ case Stk::LocalRef: {
+ ScratchRef scratch(*this);
+ loadLocalRef(v, scratch);
+ uint32_t offs = fr.pushGPR(scratch);
+ v.setOffs(Stk::MemRef, offs);
+ stackMapGenerator_.memRefsOnStk++;
+ break;
+ }
+ case Stk::RegisterRef: {
+ uint32_t offs = fr.pushGPR(v.refReg());
+ freeRef(v.refReg());
+ v.setOffs(Stk::MemRef, offs);
+ stackMapGenerator_.memRefsOnStk++;
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+ }
+}
+
+// This is an optimization used to avoid calling sync() for
+// setLocal(): if the local does not exist unresolved on the stack
+// then we can skip the sync.
+
+bool BaseCompiler::hasLocal(uint32_t slot) {
+ for (size_t i = stk_.length(); i > 0; i--) {
+ // Memory opcodes are first in the enum, single check against MemLast is
+ // fine.
+ Stk::Kind kind = stk_[i - 1].kind();
+ if (kind <= Stk::MemLast) {
+ return false;
+ }
+
+ // Local opcodes follow memory opcodes in the enum, single check against
+ // LocalLast is sufficient.
+ if (kind <= Stk::LocalLast && stk_[i - 1].slot() == slot) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void BaseCompiler::syncLocal(uint32_t slot) {
+ if (hasLocal(slot)) {
+ sync(); // TODO / OPTIMIZE: Improve this? (Bug 1316817)
+ }
+}
+
+// Push the register r onto the stack.
+
+void BaseCompiler::pushAny(AnyReg r) {
+ switch (r.tag) {
+ case AnyReg::I32: {
+ pushI32(r.i32());
+ break;
+ }
+ case AnyReg::I64: {
+ pushI64(r.i64());
+ break;
+ }
+ case AnyReg::F32: {
+ pushF32(r.f32());
+ break;
+ }
+ case AnyReg::F64: {
+ pushF64(r.f64());
+ break;
+ }
+#ifdef ENABLE_WASM_SIMD
+ case AnyReg::V128: {
+ pushV128(r.v128());
+ break;
+ }
+#endif
+ case AnyReg::REF: {
+ pushRef(r.ref());
+ break;
+ }
+ }
+}
+
+void BaseCompiler::pushI32(RegI32 r) {
+ MOZ_ASSERT(!isAvailableI32(r));
+ push(Stk(r));
+}
+
+void BaseCompiler::pushI64(RegI64 r) {
+ MOZ_ASSERT(!isAvailableI64(r));
+ push(Stk(r));
+}
+
+void BaseCompiler::pushRef(RegRef r) {
+ MOZ_ASSERT(!isAvailableRef(r));
+ push(Stk(r));
+}
+
+void BaseCompiler::pushPtr(RegPtr r) {
+ MOZ_ASSERT(!isAvailablePtr(r));
+#ifdef JS_64BIT
+ pushI64(RegI64(Register64(r)));
+#else
+ pushI32(RegI32(r));
+#endif
+}
+
+void BaseCompiler::pushF64(RegF64 r) {
+ MOZ_ASSERT(!isAvailableF64(r));
+ push(Stk(r));
+}
+
+void BaseCompiler::pushF32(RegF32 r) {
+ MOZ_ASSERT(!isAvailableF32(r));
+ push(Stk(r));
+}
+
+#ifdef ENABLE_WASM_SIMD
+void BaseCompiler::pushV128(RegV128 r) {
+ MOZ_ASSERT(!isAvailableV128(r));
+ push(Stk(r));
+}
+#endif
+
+// Push the value onto the stack. PushI32 can also take uint32_t, and PushI64
+// can take uint64_t; the semantics are the same. Appropriate sign extension
+// for a 32-bit value on a 64-bit architecture happens when the value is
+// popped, see the definition of moveImm32 below.
+
+void BaseCompiler::pushI32(int32_t v) { push(Stk(v)); }
+
+void BaseCompiler::pushI64(int64_t v) { push(Stk(v)); }
+
+void BaseCompiler::pushRef(intptr_t v) { pushConstRef(v); }
+
+void BaseCompiler::pushPtr(intptr_t v) {
+#ifdef JS_64BIT
+ pushI64(v);
+#else
+ pushI32(v);
+#endif
+}
+
+void BaseCompiler::pushF64(double v) { push(Stk(v)); }
+
+void BaseCompiler::pushF32(float v) { push(Stk(v)); }
+
+#ifdef ENABLE_WASM_SIMD
+void BaseCompiler::pushV128(V128 v) { push(Stk(v)); }
+#endif
+
+// Push the local slot onto the stack. The slot will not be read
+// here; it will be read when it is consumed, or when a side
+// effect to the slot forces its value to be saved.
+
+void BaseCompiler::pushLocalI32(uint32_t slot) {
+ stk_.infallibleEmplaceBack(Stk(Stk::LocalI32, slot));
+}
+
+void BaseCompiler::pushLocalI64(uint32_t slot) {
+ stk_.infallibleEmplaceBack(Stk(Stk::LocalI64, slot));
+}
+
+void BaseCompiler::pushLocalRef(uint32_t slot) {
+ stk_.infallibleEmplaceBack(Stk(Stk::LocalRef, slot));
+}
+
+void BaseCompiler::pushLocalF64(uint32_t slot) {
+ stk_.infallibleEmplaceBack(Stk(Stk::LocalF64, slot));
+}
+
+void BaseCompiler::pushLocalF32(uint32_t slot) {
+ stk_.infallibleEmplaceBack(Stk(Stk::LocalF32, slot));
+}
+
+#ifdef ENABLE_WASM_SIMD
+void BaseCompiler::pushLocalV128(uint32_t slot) {
+ stk_.infallibleEmplaceBack(Stk(Stk::LocalV128, slot));
+}
+#endif
+
+void BaseCompiler::pushU32AsI64(RegI32 rs) {
+ RegI64 rd = widenI32(rs);
+ masm.move32To64ZeroExtend(rs, rd);
+ pushI64(rd);
+}
+
+AnyReg BaseCompiler::popAny(AnyReg specific) {
+ switch (stk_.back().kind()) {
+ case Stk::MemI32:
+ case Stk::LocalI32:
+ case Stk::RegisterI32:
+ case Stk::ConstI32:
+ return AnyReg(popI32(specific.i32()));
+
+ case Stk::MemI64:
+ case Stk::LocalI64:
+ case Stk::RegisterI64:
+ case Stk::ConstI64:
+ return AnyReg(popI64(specific.i64()));
+
+ case Stk::MemF32:
+ case Stk::LocalF32:
+ case Stk::RegisterF32:
+ case Stk::ConstF32:
+ return AnyReg(popF32(specific.f32()));
+
+ case Stk::MemF64:
+ case Stk::LocalF64:
+ case Stk::RegisterF64:
+ case Stk::ConstF64:
+ return AnyReg(popF64(specific.f64()));
+
+#ifdef ENABLE_WASM_SIMD
+ case Stk::MemV128:
+ case Stk::LocalV128:
+ case Stk::RegisterV128:
+ case Stk::ConstV128:
+ return AnyReg(popV128(specific.v128()));
+#endif
+
+ case Stk::MemRef:
+ case Stk::LocalRef:
+ case Stk::RegisterRef:
+ case Stk::ConstRef:
+ return AnyReg(popRef(specific.ref()));
+
+ case Stk::Unknown:
+ MOZ_CRASH();
+
+ default:
+ MOZ_CRASH();
+ }
+}
+
+AnyReg BaseCompiler::popAny() {
+ switch (stk_.back().kind()) {
+ case Stk::MemI32:
+ case Stk::LocalI32:
+ case Stk::RegisterI32:
+ case Stk::ConstI32:
+ return AnyReg(popI32());
+
+ case Stk::MemI64:
+ case Stk::LocalI64:
+ case Stk::RegisterI64:
+ case Stk::ConstI64:
+ return AnyReg(popI64());
+
+ case Stk::MemF32:
+ case Stk::LocalF32:
+ case Stk::RegisterF32:
+ case Stk::ConstF32:
+ return AnyReg(popF32());
+
+ case Stk::MemF64:
+ case Stk::LocalF64:
+ case Stk::RegisterF64:
+ case Stk::ConstF64:
+ return AnyReg(popF64());
+
+#ifdef ENABLE_WASM_SIMD
+ case Stk::MemV128:
+ case Stk::LocalV128:
+ case Stk::RegisterV128:
+ case Stk::ConstV128:
+ return AnyReg(popV128());
+#endif
+
+ case Stk::MemRef:
+ case Stk::LocalRef:
+ case Stk::RegisterRef:
+ case Stk::ConstRef:
+ return AnyReg(popRef());
+
+ case Stk::Unknown:
+ MOZ_CRASH();
+
+ default:
+ MOZ_CRASH();
+ }
+}
+
+// Call only from other popI32() variants.
+// v must be the stack top. May pop the CPU stack.
+
+void BaseCompiler::popI32(const Stk& v, RegI32 dest) {
+ MOZ_ASSERT(&v == &stk_.back());
+ switch (v.kind()) {
+ case Stk::ConstI32:
+ loadConstI32(v, dest);
+ break;
+ case Stk::LocalI32:
+ loadLocalI32(v, dest);
+ break;
+ case Stk::MemI32:
+ fr.popGPR(dest);
+ break;
+ case Stk::RegisterI32:
+ loadRegisterI32(v, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected int on stack");
+ }
+}
+
+RegI32 BaseCompiler::popI32() {
+ Stk& v = stk_.back();
+ RegI32 r;
+ if (v.kind() == Stk::RegisterI32) {
+ r = v.i32reg();
+ } else {
+ popI32(v, (r = needI32()));
+ }
+ stk_.popBack();
+ return r;
+}
+
+RegI32 BaseCompiler::popI32(RegI32 specific) {
+ Stk& v = stk_.back();
+
+ if (!(v.kind() == Stk::RegisterI32 && v.i32reg() == specific)) {
+ needI32(specific);
+ popI32(v, specific);
+ if (v.kind() == Stk::RegisterI32) {
+ freeI32(v.i32reg());
+ }
+ }
+
+ stk_.popBack();
+ return specific;
+}
+
+#ifdef ENABLE_WASM_SIMD
+// Call only from other popV128() variants.
+// v must be the stack top. May pop the CPU stack.
+
+void BaseCompiler::popV128(const Stk& v, RegV128 dest) {
+ MOZ_ASSERT(&v == &stk_.back());
+ switch (v.kind()) {
+ case Stk::ConstV128:
+ loadConstV128(v, dest);
+ break;
+ case Stk::LocalV128:
+ loadLocalV128(v, dest);
+ break;
+ case Stk::MemV128:
+ fr.popV128(dest);
+ break;
+ case Stk::RegisterV128:
+ loadRegisterV128(v, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected int on stack");
+ }
+}
+
+RegV128 BaseCompiler::popV128() {
+ Stk& v = stk_.back();
+ RegV128 r;
+ if (v.kind() == Stk::RegisterV128) {
+ r = v.v128reg();
+ } else {
+ popV128(v, (r = needV128()));
+ }
+ stk_.popBack();
+ return r;
+}
+
+RegV128 BaseCompiler::popV128(RegV128 specific) {
+ Stk& v = stk_.back();
+
+ if (!(v.kind() == Stk::RegisterV128 && v.v128reg() == specific)) {
+ needV128(specific);
+ popV128(v, specific);
+ if (v.kind() == Stk::RegisterV128) {
+ freeV128(v.v128reg());
+ }
+ }
+
+ stk_.popBack();
+ return specific;
+}
+#endif
+
+// Call only from other popI64() variants.
+// v must be the stack top. May pop the CPU stack.
+
+void BaseCompiler::popI64(const Stk& v, RegI64 dest) {
+ MOZ_ASSERT(&v == &stk_.back());
+ switch (v.kind()) {
+ case Stk::ConstI64:
+ loadConstI64(v, dest);
+ break;
+ case Stk::LocalI64:
+ loadLocalI64(v, dest);
+ break;
+ case Stk::MemI64:
+#ifdef JS_PUNBOX64
+ fr.popGPR(dest.reg);
+#else
+ fr.popGPR(dest.low);
+ fr.popGPR(dest.high);
+#endif
+ break;
+ case Stk::RegisterI64:
+ loadRegisterI64(v, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected long on stack");
+ }
+}
+
+RegI64 BaseCompiler::popI64() {
+ Stk& v = stk_.back();
+ RegI64 r;
+ if (v.kind() == Stk::RegisterI64) {
+ r = v.i64reg();
+ } else {
+ popI64(v, (r = needI64()));
+ }
+ stk_.popBack();
+ return r;
+}
+
+// Note, the stack top can be in one half of "specific" on 32-bit
+// systems. We can optimize, but for simplicity, if the register
+// does not match exactly, then just force the stack top to memory
+// and then read it back in.
+
+RegI64 BaseCompiler::popI64(RegI64 specific) {
+ Stk& v = stk_.back();
+
+ if (!(v.kind() == Stk::RegisterI64 && v.i64reg() == specific)) {
+ needI64(specific);
+ popI64(v, specific);
+ if (v.kind() == Stk::RegisterI64) {
+ freeI64(v.i64reg());
+ }
+ }
+
+ stk_.popBack();
+ return specific;
+}
+
+// Call only from other popRef() variants.
+// v must be the stack top. May pop the CPU stack.
+
+void BaseCompiler::popRef(const Stk& v, RegRef dest) {
+ MOZ_ASSERT(&v == &stk_.back());
+ switch (v.kind()) {
+ case Stk::ConstRef:
+ loadConstRef(v, dest);
+ break;
+ case Stk::LocalRef:
+ loadLocalRef(v, dest);
+ break;
+ case Stk::MemRef:
+ fr.popGPR(dest);
+ break;
+ case Stk::RegisterRef:
+ loadRegisterRef(v, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected ref on stack");
+ }
+}
+
+RegRef BaseCompiler::popRef(RegRef specific) {
+ Stk& v = stk_.back();
+
+ if (!(v.kind() == Stk::RegisterRef && v.refReg() == specific)) {
+ needRef(specific);
+ popRef(v, specific);
+ if (v.kind() == Stk::RegisterRef) {
+ freeRef(v.refReg());
+ }
+ }
+
+ stk_.popBack();
+ if (v.kind() == Stk::MemRef) {
+ stackMapGenerator_.memRefsOnStk--;
+ }
+ return specific;
+}
+
+RegRef BaseCompiler::popRef() {
+ Stk& v = stk_.back();
+ RegRef r;
+ if (v.kind() == Stk::RegisterRef) {
+ r = v.refReg();
+ } else {
+ popRef(v, (r = needRef()));
+ }
+ stk_.popBack();
+ if (v.kind() == Stk::MemRef) {
+ stackMapGenerator_.memRefsOnStk--;
+ }
+ return r;
+}
+
+// Call only from other popPtr() variants.
+// v must be the stack top. May pop the CPU stack.
+
+void BaseCompiler::popPtr(const Stk& v, RegPtr dest) {
+#ifdef JS_64BIT
+ popI64(v, RegI64(Register64(dest)));
+#else
+ popI32(v, RegI32(dest));
+#endif
+}
+
+RegPtr BaseCompiler::popPtr(RegPtr specific) {
+#ifdef JS_64BIT
+ return RegPtr(popI64(RegI64(Register64(specific))).reg);
+#else
+ return RegPtr(popI32(RegI32(specific)));
+#endif
+}
+
+RegPtr BaseCompiler::popPtr() {
+#ifdef JS_64BIT
+ return RegPtr(popI64().reg);
+#else
+ return RegPtr(popI32());
+#endif
+}
+
+// Call only from other popF64() variants.
+// v must be the stack top. May pop the CPU stack.
+
+void BaseCompiler::popF64(const Stk& v, RegF64 dest) {
+ MOZ_ASSERT(&v == &stk_.back());
+ switch (v.kind()) {
+ case Stk::ConstF64:
+ loadConstF64(v, dest);
+ break;
+ case Stk::LocalF64:
+ loadLocalF64(v, dest);
+ break;
+ case Stk::MemF64:
+ fr.popDouble(dest);
+ break;
+ case Stk::RegisterF64:
+ loadRegisterF64(v, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected double on stack");
+ }
+}
+
+RegF64 BaseCompiler::popF64() {
+ Stk& v = stk_.back();
+ RegF64 r;
+ if (v.kind() == Stk::RegisterF64) {
+ r = v.f64reg();
+ } else {
+ popF64(v, (r = needF64()));
+ }
+ stk_.popBack();
+ return r;
+}
+
+RegF64 BaseCompiler::popF64(RegF64 specific) {
+ Stk& v = stk_.back();
+
+ if (!(v.kind() == Stk::RegisterF64 && v.f64reg() == specific)) {
+ needF64(specific);
+ popF64(v, specific);
+ if (v.kind() == Stk::RegisterF64) {
+ freeF64(v.f64reg());
+ }
+ }
+
+ stk_.popBack();
+ return specific;
+}
+
+// Call only from other popF32() variants.
+// v must be the stack top. May pop the CPU stack.
+
+void BaseCompiler::popF32(const Stk& v, RegF32 dest) {
+ MOZ_ASSERT(&v == &stk_.back());
+ switch (v.kind()) {
+ case Stk::ConstF32:
+ loadConstF32(v, dest);
+ break;
+ case Stk::LocalF32:
+ loadLocalF32(v, dest);
+ break;
+ case Stk::MemF32:
+ fr.popFloat32(dest);
+ break;
+ case Stk::RegisterF32:
+ loadRegisterF32(v, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected float on stack");
+ }
+}
+
+RegF32 BaseCompiler::popF32() {
+ Stk& v = stk_.back();
+ RegF32 r;
+ if (v.kind() == Stk::RegisterF32) {
+ r = v.f32reg();
+ } else {
+ popF32(v, (r = needF32()));
+ }
+ stk_.popBack();
+ return r;
+}
+
+RegF32 BaseCompiler::popF32(RegF32 specific) {
+ Stk& v = stk_.back();
+
+ if (!(v.kind() == Stk::RegisterF32 && v.f32reg() == specific)) {
+ needF32(specific);
+ popF32(v, specific);
+ if (v.kind() == Stk::RegisterF32) {
+ freeF32(v.f32reg());
+ }
+ }
+
+ stk_.popBack();
+ return specific;
+}
+
+bool BaseCompiler::hasConst() const {
+ const Stk& v = stk_.back();
+ switch (v.kind()) {
+ case Stk::ConstI32:
+ case Stk::ConstI64:
+ case Stk::ConstF32:
+ case Stk::ConstF64:
+#ifdef ENABLE_WASM_SIMD
+ case Stk::ConstV128:
+#endif
+ case Stk::ConstRef:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool BaseCompiler::popConst(int32_t* c) {
+ Stk& v = stk_.back();
+ if (v.kind() != Stk::ConstI32) {
+ return false;
+ }
+ *c = v.i32val();
+ stk_.popBack();
+ return true;
+}
+
+bool BaseCompiler::popConst(int64_t* c) {
+ Stk& v = stk_.back();
+ if (v.kind() != Stk::ConstI64) {
+ return false;
+ }
+ *c = v.i64val();
+ stk_.popBack();
+ return true;
+}
+
+bool BaseCompiler::peekConst(int32_t* c) {
+ Stk& v = stk_.back();
+ if (v.kind() != Stk::ConstI32) {
+ return false;
+ }
+ *c = v.i32val();
+ return true;
+}
+
+bool BaseCompiler::peekConst(int64_t* c) {
+ Stk& v = stk_.back();
+ if (v.kind() != Stk::ConstI64) {
+ return false;
+ }
+ *c = v.i64val();
+ return true;
+}
+
+bool BaseCompiler::peek2xConst(int32_t* c0, int32_t* c1) {
+ MOZ_ASSERT(stk_.length() >= 2);
+ const Stk& v0 = *(stk_.end() - 1);
+ const Stk& v1 = *(stk_.end() - 2);
+ if (v0.kind() != Stk::ConstI32 || v1.kind() != Stk::ConstI32) {
+ return false;
+ }
+ *c0 = v0.i32val();
+ *c1 = v1.i32val();
+ return true;
+}
+
+bool BaseCompiler::popConstPositivePowerOfTwo(int32_t* c, uint_fast8_t* power,
+ int32_t cutoff) {
+ Stk& v = stk_.back();
+ if (v.kind() != Stk::ConstI32) {
+ return false;
+ }
+ *c = v.i32val();
+ if (*c <= cutoff || !IsPowerOfTwo(static_cast<uint32_t>(*c))) {
+ return false;
+ }
+ *power = FloorLog2(*c);
+ stk_.popBack();
+ return true;
+}
+
+bool BaseCompiler::popConstPositivePowerOfTwo(int64_t* c, uint_fast8_t* power,
+ int64_t cutoff) {
+ Stk& v = stk_.back();
+ if (v.kind() != Stk::ConstI64) {
+ return false;
+ }
+ *c = v.i64val();
+ if (*c <= cutoff || !IsPowerOfTwo(static_cast<uint64_t>(*c))) {
+ return false;
+ }
+ *power = FloorLog2(*c);
+ stk_.popBack();
+ return true;
+}
+
+void BaseCompiler::pop2xI32(RegI32* r0, RegI32* r1) {
+ *r1 = popI32();
+ *r0 = popI32();
+}
+
+void BaseCompiler::pop2xI64(RegI64* r0, RegI64* r1) {
+ *r1 = popI64();
+ *r0 = popI64();
+}
+
+void BaseCompiler::pop2xF32(RegF32* r0, RegF32* r1) {
+ *r1 = popF32();
+ *r0 = popF32();
+}
+
+void BaseCompiler::pop2xF64(RegF64* r0, RegF64* r1) {
+ *r1 = popF64();
+ *r0 = popF64();
+}
+
+#ifdef ENABLE_WASM_SIMD
+void BaseCompiler::pop2xV128(RegV128* r0, RegV128* r1) {
+ *r1 = popV128();
+ *r0 = popV128();
+}
+#endif
+
+void BaseCompiler::pop2xRef(RegRef* r0, RegRef* r1) {
+ *r1 = popRef();
+ *r0 = popRef();
+}
+
+// Pop to a specific register
+RegI32 BaseCompiler::popI32ToSpecific(RegI32 specific) {
+ freeI32(specific);
+ return popI32(specific);
+}
+
+RegI64 BaseCompiler::popI64ToSpecific(RegI64 specific) {
+ freeI64(specific);
+ return popI64(specific);
+}
+
+#ifdef JS_CODEGEN_ARM
+// Pop an I64 as a valid register pair.
+RegI64 BaseCompiler::popI64Pair() {
+ RegI64 r = needI64Pair();
+ popI64ToSpecific(r);
+ return r;
+}
+#endif
+
+// Pop an I64 but narrow it and return the narrowed part.
+RegI32 BaseCompiler::popI64ToI32() {
+ RegI64 r = popI64();
+ return narrowI64(r);
+}
+
+RegI32 BaseCompiler::popI64ToSpecificI32(RegI32 specific) {
+ RegI64 rd = widenI32(specific);
+ popI64ToSpecific(rd);
+ return narrowI64(rd);
+}
+
+bool BaseCompiler::peekLocal(uint32_t* local) {
+ Stk& v = stk_.back();
+ // See hasLocal() for documentation of this logic.
+ if (v.kind() <= Stk::MemLast || v.kind() > Stk::LocalLast) {
+ return false;
+ }
+ *local = v.slot();
+ return true;
+}
+
+size_t BaseCompiler::stackConsumed(size_t numval) {
+ size_t size = 0;
+ MOZ_ASSERT(numval <= stk_.length());
+ for (uint32_t i = stk_.length() - 1; numval > 0; numval--, i--) {
+ Stk& v = stk_[i];
+ switch (v.kind()) {
+ case Stk::MemRef:
+ size += BaseStackFrame::StackSizeOfPtr;
+ break;
+ case Stk::MemI32:
+ size += BaseStackFrame::StackSizeOfPtr;
+ break;
+ case Stk::MemI64:
+ size += BaseStackFrame::StackSizeOfInt64;
+ break;
+ case Stk::MemF64:
+ size += BaseStackFrame::StackSizeOfDouble;
+ break;
+ case Stk::MemF32:
+ size += BaseStackFrame::StackSizeOfFloat;
+ break;
+#ifdef ENABLE_WASM_SIMD
+ case Stk::MemV128:
+ size += BaseStackFrame::StackSizeOfV128;
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+ return size;
+}
+
+void BaseCompiler::popValueStackTo(uint32_t stackSize) {
+ for (uint32_t i = stk_.length(); i > stackSize; i--) {
+ Stk& v = stk_[i - 1];
+ switch (v.kind()) {
+ case Stk::RegisterI32:
+ freeI32(v.i32reg());
+ break;
+ case Stk::RegisterI64:
+ freeI64(v.i64reg());
+ break;
+ case Stk::RegisterF64:
+ freeF64(v.f64reg());
+ break;
+ case Stk::RegisterF32:
+ freeF32(v.f32reg());
+ break;
+#ifdef ENABLE_WASM_SIMD
+ case Stk::RegisterV128:
+ freeV128(v.v128reg());
+ break;
+#endif
+ case Stk::RegisterRef:
+ freeRef(v.refReg());
+ break;
+ case Stk::MemRef:
+ stackMapGenerator_.memRefsOnStk--;
+ break;
+ default:
+ break;
+ }
+ }
+ stk_.shrinkTo(stackSize);
+}
+
+void BaseCompiler::popValueStackBy(uint32_t items) {
+ popValueStackTo(stk_.length() - items);
+}
+
+void BaseCompiler::dropValue() {
+ if (peek(0).isMem()) {
+ fr.popBytes(stackConsumed(1));
+ }
+ popValueStackBy(1);
+}
+
+// Peek at the stack, for calls.
+
+Stk& BaseCompiler::peek(uint32_t relativeDepth) {
+ return stk_[stk_.length() - 1 - relativeDepth];
+}
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_wasm_baseline_stk_mgmt_inl_h
diff --git a/js/src/wasm/WasmBaselineCompile.cpp b/js/src/wasm/WasmBaselineCompile.cpp
new file mode 100644
index 0000000000..b277ca22c6
--- /dev/null
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -0,0 +1,11009 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * [SMDOC] WebAssembly baseline compiler (RabaldrMonkey)
+ *
+ * For now, see WasmBCClass.h for general comments about the compiler's
+ * structure.
+ *
+ * ----------------
+ *
+ * General assumptions for 32-bit vs 64-bit code:
+ *
+ * - A 32-bit register can be extended in-place to a 64-bit register on 64-bit
+ * systems.
+ *
+ * - Code that knows that Register64 has a '.reg' member on 64-bit systems and
+ * '.high' and '.low' members on 32-bit systems, or knows the implications
+ * thereof, is #ifdef JS_PUNBOX64. All other code is #if(n)?def JS_64BIT.
+ *
+ * Coding standards are a little fluid:
+ *
+ * - In "small" code generating functions (eg emitMultiplyF64, emitQuotientI32,
+ * and surrounding functions; most functions fall into this class) where the
+ * meaning is obvious:
+ *
+ * Old school:
+ * - if there is a single source + destination register, it is called 'r'
+ * - if there is one source and a different destination, they are called 'rs'
+ * and 'rd'
+ * - if there is one source + destination register and another source register
+ * they are called 'r' and 'rs'
+ * - if there are two source registers and a destination register they are
+ * called 'rs0', 'rs1', and 'rd'.
+ *
+ * The new thing:
+ * - what is called 'r' in the old-school naming scheme is increasingly called
+ * 'rsd' in source+dest cases.
+ *
+ * - Generic temp registers are named /temp[0-9]?/ not /tmp[0-9]?/.
+ *
+ * - Registers can be named non-generically for their function ('rp' for the
+ * 'pointer' register and 'rv' for the 'value' register are typical) and those
+ * names may or may not have an 'r' prefix.
+ *
+ * - "Larger" code generating functions make their own rules.
+ */
+
+#include "wasm/WasmBaselineCompile.h"
+
+#include "wasm/WasmBCClass.h"
+#include "wasm/WasmBCDefs.h"
+#include "wasm/WasmBCFrame.h"
+#include "wasm/WasmBCRegDefs.h"
+#include "wasm/WasmBCStk.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "wasm/WasmBCClass-inl.h"
+#include "wasm/WasmBCCodegen-inl.h"
+#include "wasm/WasmBCRegDefs-inl.h"
+#include "wasm/WasmBCRegMgmt-inl.h"
+#include "wasm/WasmBCStkMgmt-inl.h"
+
+namespace js {
+namespace wasm {
+
+using namespace js::jit;
+
+////////////////////////////////////////////////////////////
+//
+// Out of line code management.
+
+// The baseline compiler will use OOL code more sparingly than Ion since our
+// code is not high performance and frills like code density and branch
+// prediction friendliness will be less important.
+class OutOfLineCode : public TempObject {
+ private:
+ NonAssertingLabel entry_;
+ NonAssertingLabel rejoin_;
+ StackHeight stackHeight_;
+
+ public:
+ OutOfLineCode() : stackHeight_(StackHeight::Invalid()) {}
+
+ Label* entry() { return &entry_; }
+ Label* rejoin() { return &rejoin_; }
+
+ void setStackHeight(StackHeight stackHeight) {
+ MOZ_ASSERT(!stackHeight_.isValid());
+ stackHeight_ = stackHeight;
+ }
+
+ void bind(BaseStackFrame* fr, MacroAssembler* masm) {
+ MOZ_ASSERT(stackHeight_.isValid());
+ masm->bind(&entry_);
+ fr->setStackHeight(stackHeight_);
+ }
+
+ // The generate() method must be careful about register use because it will be
+ // invoked when there is a register assignment in the BaseCompiler that does
+ // not correspond to the available registers when the generated OOL code is
+ // executed. The register allocator *must not* be called.
+ //
+ // The best strategy is for the creator of the OOL object to allocate all
+ // temps that the OOL code will need.
+ //
+ // Input, output, and temp registers are embedded in the OOL object and are
+ // known to the code generator.
+ //
+ // Scratch registers are available to use in OOL code.
+ //
+ // All other registers must be explicitly saved and restored by the OOL code
+ // before being used.
+
+ virtual void generate(MacroAssembler* masm) = 0;
+};
+
+OutOfLineCode* BaseCompiler::addOutOfLineCode(OutOfLineCode* ool) {
+ if (!ool || !outOfLine_.append(ool)) {
+ return nullptr;
+ }
+ ool->setStackHeight(fr.stackHeight());
+ return ool;
+}
+
+bool BaseCompiler::generateOutOfLineCode() {
+ for (auto* ool : outOfLine_) {
+ if (!ool->entry()->used()) {
+ continue;
+ }
+ ool->bind(&fr, &masm);
+ ool->generate(&masm);
+ }
+
+ return !masm.oom();
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Sundry code generation.
+
+bool BaseCompiler::addInterruptCheck() {
+#ifdef RABALDR_PIN_INSTANCE
+ Register tmp(InstanceReg);
+#else
+ ScratchI32 tmp(*this);
+ fr.loadInstancePtr(tmp);
+#endif
+ Label ok;
+ masm.branch32(Assembler::Equal,
+ Address(tmp, wasm::Instance::offsetOfInterrupt()), Imm32(0),
+ &ok);
+ masm.wasmTrap(wasm::Trap::CheckInterrupt, bytecodeOffset());
+ masm.bind(&ok);
+ return createStackMap("addInterruptCheck");
+}
+
+void BaseCompiler::checkDivideByZero(RegI32 rhs) {
+ Label nonZero;
+ masm.branchTest32(Assembler::NonZero, rhs, rhs, &nonZero);
+ trap(Trap::IntegerDivideByZero);
+ masm.bind(&nonZero);
+}
+
+void BaseCompiler::checkDivideByZero(RegI64 r) {
+ Label nonZero;
+ ScratchI32 scratch(*this);
+ masm.branchTest64(Assembler::NonZero, r, r, scratch, &nonZero);
+ trap(Trap::IntegerDivideByZero);
+ masm.bind(&nonZero);
+}
+
+void BaseCompiler::checkDivideSignedOverflow(RegI32 rhs, RegI32 srcDest,
+ Label* done, bool zeroOnOverflow) {
+ Label notMin;
+ masm.branch32(Assembler::NotEqual, srcDest, Imm32(INT32_MIN), &notMin);
+ if (zeroOnOverflow) {
+ masm.branch32(Assembler::NotEqual, rhs, Imm32(-1), &notMin);
+ moveImm32(0, srcDest);
+ masm.jump(done);
+ } else {
+ masm.branch32(Assembler::NotEqual, rhs, Imm32(-1), &notMin);
+ trap(Trap::IntegerOverflow);
+ }
+ masm.bind(&notMin);
+}
+
+void BaseCompiler::checkDivideSignedOverflow(RegI64 rhs, RegI64 srcDest,
+ Label* done, bool zeroOnOverflow) {
+ Label notmin;
+ masm.branch64(Assembler::NotEqual, srcDest, Imm64(INT64_MIN), &notmin);
+ masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), &notmin);
+ if (zeroOnOverflow) {
+ masm.xor64(srcDest, srcDest);
+ masm.jump(done);
+ } else {
+ trap(Trap::IntegerOverflow);
+ }
+ masm.bind(&notmin);
+}
+
+void BaseCompiler::jumpTable(const LabelVector& labels, Label* theTable) {
+ // Flush constant pools to ensure that the table is never interrupted by
+ // constant pool entries.
+ masm.flush();
+
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+ // Prevent nop sequences to appear in the jump table.
+ AutoForbidNops afn(&masm);
+#endif
+ masm.bind(theTable);
+
+ for (const auto& label : labels) {
+ CodeLabel cl;
+ masm.writeCodePointer(&cl);
+ cl.target()->bind(label.offset());
+ masm.addCodeLabel(cl);
+ }
+}
+
+void BaseCompiler::tableSwitch(Label* theTable, RegI32 switchValue,
+ Label* dispatchCode) {
+ masm.bind(dispatchCode);
+
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+ ScratchI32 scratch(*this);
+ CodeLabel tableCl;
+
+ masm.mov(&tableCl, scratch);
+
+ tableCl.target()->bind(theTable->offset());
+ masm.addCodeLabel(tableCl);
+
+ masm.jmp(Operand(scratch, switchValue, ScalePointer));
+#elif defined(JS_CODEGEN_ARM)
+ // Flush constant pools: offset must reflect the distance from the MOV
+ // to the start of the table; as the address of the MOV is given by the
+ // label, nothing must come between the bind() and the ma_mov().
+ AutoForbidPoolsAndNops afp(&masm,
+ /* number of instructions in scope = */ 5);
+
+ ScratchI32 scratch(*this);
+
+ // Compute the offset from the ma_mov instruction to the jump table.
+ Label here;
+ masm.bind(&here);
+ uint32_t offset = here.offset() - theTable->offset();
+
+ // Read PC+8
+ masm.ma_mov(pc, scratch);
+
+ // ARM scratch register is required by ma_sub.
+ ScratchRegisterScope arm_scratch(*this);
+
+ // Compute the absolute table base pointer into `scratch`, offset by 8
+ // to account for the fact that ma_mov read PC+8.
+ masm.ma_sub(Imm32(offset + 8), scratch, arm_scratch);
+
+ // Jump indirect via table element.
+ masm.ma_ldr(DTRAddr(scratch, DtrRegImmShift(switchValue, LSL, 2)), pc, Offset,
+ Assembler::Always);
+#elif defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_LOONG64) || \
+ defined(JS_CODEGEN_RISCV64)
+ ScratchI32 scratch(*this);
+ CodeLabel tableCl;
+
+ masm.ma_li(scratch, &tableCl);
+
+ tableCl.target()->bind(theTable->offset());
+ masm.addCodeLabel(tableCl);
+
+ masm.branchToComputedAddress(BaseIndex(scratch, switchValue, ScalePointer));
+#elif defined(JS_CODEGEN_ARM64)
+ AutoForbidPoolsAndNops afp(&masm,
+ /* number of instructions in scope = */ 4);
+
+ ScratchI32 scratch(*this);
+
+ ARMRegister s(scratch, 64);
+ ARMRegister v(switchValue, 64);
+ masm.Adr(s, theTable);
+ masm.Add(s, s, Operand(v, vixl::LSL, 3));
+ masm.Ldr(s, MemOperand(s, 0));
+ masm.Br(s);
+#else
+ MOZ_CRASH("BaseCompiler platform hook: tableSwitch");
+#endif
+}
+
+#ifdef JS_CODEGEN_X86
+void BaseCompiler::stashI64(RegPtr regForInstance, RegI64 r) {
+ MOZ_ASSERT(Instance::sizeOfBaselineScratch() >= 8);
+ MOZ_ASSERT(regForInstance != r.low && regForInstance != r.high);
+# ifdef RABALDR_PIN_INSTANCE
+# error "Pinned instance not expected"
+# endif
+ fr.loadInstancePtr(regForInstance);
+ masm.store32(r.low,
+ Address(regForInstance, Instance::offsetOfBaselineScratch()));
+ masm.store32(
+ r.high, Address(regForInstance, Instance::offsetOfBaselineScratch() + 4));
+}
+
+void BaseCompiler::unstashI64(RegPtr regForInstance, RegI64 r) {
+ MOZ_ASSERT(Instance::sizeOfBaselineScratch() >= 8);
+# ifdef RABALDR_PIN_INSTANCE
+# error "Pinned instance not expected"
+# endif
+ fr.loadInstancePtr(regForInstance);
+ if (regForInstance == r.low) {
+ masm.load32(
+ Address(regForInstance, Instance::offsetOfBaselineScratch() + 4),
+ r.high);
+ masm.load32(Address(regForInstance, Instance::offsetOfBaselineScratch()),
+ r.low);
+ } else {
+ masm.load32(Address(regForInstance, Instance::offsetOfBaselineScratch()),
+ r.low);
+ masm.load32(
+ Address(regForInstance, Instance::offsetOfBaselineScratch() + 4),
+ r.high);
+ }
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Function entry and exit
+
+bool BaseCompiler::beginFunction() {
+ AutoCreatedBy acb(masm, "(wasm)BaseCompiler::beginFunction");
+
+ JitSpew(JitSpew_Codegen, "# ========================================");
+ JitSpew(JitSpew_Codegen, "# Emitting wasm baseline code");
+ JitSpew(JitSpew_Codegen,
+ "# beginFunction: start of function prologue for index %d",
+ (int)func_.index);
+
+ // Make a start on the stackmap for this function. Inspect the args so
+ // as to determine which of them are both in-memory and pointer-typed, and
+ // add entries to machineStackTracker as appropriate.
+
+ ArgTypeVector args(funcType());
+ size_t inboundStackArgBytes = StackArgAreaSizeUnaligned(args);
+ MOZ_ASSERT(inboundStackArgBytes % sizeof(void*) == 0);
+ stackMapGenerator_.numStackArgWords = inboundStackArgBytes / sizeof(void*);
+
+ MOZ_ASSERT(stackMapGenerator_.machineStackTracker.length() == 0);
+ if (!stackMapGenerator_.machineStackTracker.pushNonGCPointers(
+ stackMapGenerator_.numStackArgWords)) {
+ return false;
+ }
+
+ // Identify GC-managed pointers passed on the stack.
+ for (WasmABIArgIter i(args); !i.done(); i++) {
+ ABIArg argLoc = *i;
+ if (argLoc.kind() == ABIArg::Stack &&
+ args[i.index()] == MIRType::RefOrNull) {
+ uint32_t offset = argLoc.offsetFromArgBase();
+ MOZ_ASSERT(offset < inboundStackArgBytes);
+ MOZ_ASSERT(offset % sizeof(void*) == 0);
+ stackMapGenerator_.machineStackTracker.setGCPointer(offset /
+ sizeof(void*));
+ }
+ }
+
+ GenerateFunctionPrologue(
+ masm, CallIndirectId::forFunc(moduleEnv_, func_.index),
+ compilerEnv_.mode() == CompileMode::Tier1 ? Some(func_.index) : Nothing(),
+ &offsets_);
+
+ // GenerateFunctionPrologue pushes exactly one wasm::Frame's worth of
+ // stuff, and none of the values are GC pointers. Hence:
+ if (!stackMapGenerator_.machineStackTracker.pushNonGCPointers(
+ sizeof(Frame) / sizeof(void*))) {
+ return false;
+ }
+
+ // Initialize DebugFrame fields before the stack overflow trap so that
+ // we have the invariant that all observable Frames in a debugEnabled
+ // Module have valid DebugFrames.
+ if (compilerEnv_.debugEnabled()) {
+#ifdef JS_CODEGEN_ARM64
+ static_assert(DebugFrame::offsetOfFrame() % WasmStackAlignment == 0,
+ "aligned");
+#endif
+ masm.reserveStack(DebugFrame::offsetOfFrame());
+ if (!stackMapGenerator_.machineStackTracker.pushNonGCPointers(
+ DebugFrame::offsetOfFrame() / sizeof(void*))) {
+ return false;
+ }
+
+ masm.store32(Imm32(func_.index), Address(masm.getStackPointer(),
+ DebugFrame::offsetOfFuncIndex()));
+ masm.store32(Imm32(0),
+ Address(masm.getStackPointer(), DebugFrame::offsetOfFlags()));
+
+ // No need to initialize cachedReturnJSValue_ or any ref-typed spilled
+ // register results, as they are traced if and only if a corresponding
+ // flag (hasCachedReturnJSValue or hasSpilledRefRegisterResult) is set.
+ }
+
+ // Generate a stack-overflow check and its associated stackmap.
+
+ fr.checkStack(ABINonArgReg0, BytecodeOffset(func_.lineOrBytecode));
+
+ ExitStubMapVector extras;
+ if (!stackMapGenerator_.generateStackmapEntriesForTrapExit(args, &extras)) {
+ return false;
+ }
+ if (!createStackMap("stack check", extras, masm.currentOffset(),
+ HasDebugFrameWithLiveRefs::No)) {
+ return false;
+ }
+
+ size_t reservedBytes = fr.fixedAllocSize() - masm.framePushed();
+ MOZ_ASSERT(0 == (reservedBytes % sizeof(void*)));
+
+ masm.reserveStack(reservedBytes);
+ fr.onFixedStackAllocated();
+ if (!stackMapGenerator_.machineStackTracker.pushNonGCPointers(
+ reservedBytes / sizeof(void*))) {
+ return false;
+ }
+
+ // Locals are stack allocated. Mark ref-typed ones in the stackmap
+ // accordingly.
+ for (const Local& l : localInfo_) {
+ // Locals that are stack arguments were already added to the stackmap
+ // before pushing the frame.
+ if (l.type == MIRType::RefOrNull && !l.isStackArgument()) {
+ uint32_t offs = fr.localOffsetFromSp(l);
+ MOZ_ASSERT(0 == (offs % sizeof(void*)));
+ stackMapGenerator_.machineStackTracker.setGCPointer(offs / sizeof(void*));
+ }
+ }
+
+ // Copy arguments from registers to stack.
+ for (WasmABIArgIter i(args); !i.done(); i++) {
+ if (args.isSyntheticStackResultPointerArg(i.index())) {
+ // If there are stack results and the pointer to stack results
+ // was passed in a register, store it to the stack.
+ if (i->argInRegister()) {
+ fr.storeIncomingStackResultAreaPtr(RegPtr(i->gpr()));
+ }
+ // If we're in a debug frame, copy the stack result pointer arg
+ // to a well-known place.
+ if (compilerEnv_.debugEnabled()) {
+ Register target = ABINonArgReturnReg0;
+ fr.loadIncomingStackResultAreaPtr(RegPtr(target));
+ size_t debugFrameOffset =
+ masm.framePushed() - DebugFrame::offsetOfFrame();
+ size_t debugStackResultsPointerOffset =
+ debugFrameOffset + DebugFrame::offsetOfStackResultsPointer();
+ masm.storePtr(target, Address(masm.getStackPointer(),
+ debugStackResultsPointerOffset));
+ }
+ continue;
+ }
+ if (!i->argInRegister()) {
+ continue;
+ }
+ Local& l = localInfo_[args.naturalIndex(i.index())];
+ switch (i.mirType()) {
+ case MIRType::Int32:
+ fr.storeLocalI32(RegI32(i->gpr()), l);
+ break;
+ case MIRType::Int64:
+ fr.storeLocalI64(RegI64(i->gpr64()), l);
+ break;
+ case MIRType::RefOrNull: {
+ DebugOnly<uint32_t> offs = fr.localOffsetFromSp(l);
+ MOZ_ASSERT(0 == (offs % sizeof(void*)));
+ fr.storeLocalRef(RegRef(i->gpr()), l);
+ // We should have just visited this local in the preceding loop.
+ MOZ_ASSERT(stackMapGenerator_.machineStackTracker.isGCPointer(
+ offs / sizeof(void*)));
+ break;
+ }
+ case MIRType::Double:
+ fr.storeLocalF64(RegF64(i->fpu()), l);
+ break;
+ case MIRType::Float32:
+ fr.storeLocalF32(RegF32(i->fpu()), l);
+ break;
+#ifdef ENABLE_WASM_SIMD
+ case MIRType::Simd128:
+ fr.storeLocalV128(RegV128(i->fpu()), l);
+ break;
+#endif
+ default:
+ MOZ_CRASH("Function argument type");
+ }
+ }
+
+ fr.zeroLocals(&ra);
+ fr.storeInstancePtr(InstanceReg);
+
+ if (compilerEnv_.debugEnabled()) {
+ insertBreakablePoint(CallSiteDesc::EnterFrame);
+ if (!createStackMap("debug: enter-frame breakpoint")) {
+ return false;
+ }
+ }
+
+ JitSpew(JitSpew_Codegen,
+ "# beginFunction: enter body with masm.framePushed = %u",
+ masm.framePushed());
+ MOZ_ASSERT(stackMapGenerator_.framePushedAtEntryToBody.isNothing());
+ stackMapGenerator_.framePushedAtEntryToBody.emplace(masm.framePushed());
+
+ return true;
+}
+
+bool BaseCompiler::endFunction() {
+ AutoCreatedBy acb(masm, "(wasm)BaseCompiler::endFunction");
+
+ JitSpew(JitSpew_Codegen, "# endFunction: start of function epilogue");
+
+ // Always branch to returnLabel_.
+ masm.breakpoint();
+
+ // Patch the add in the prologue so that it checks against the correct
+ // frame size. Flush the constant pool in case it needs to be patched.
+ masm.flush();
+
+ // Precondition for patching.
+ if (masm.oom()) {
+ return false;
+ }
+
+ fr.patchCheckStack();
+
+ masm.bind(&returnLabel_);
+
+ ResultType resultType(ResultType::Vector(funcType().results()));
+
+ popStackReturnValues(resultType);
+
+ if (compilerEnv_.debugEnabled()) {
+ // Store and reload the return value from DebugFrame::return so that
+ // it can be clobbered, and/or modified by the debug trap.
+ saveRegisterReturnValues(resultType);
+ insertBreakablePoint(CallSiteDesc::Breakpoint);
+ if (!createStackMap("debug: return-point breakpoint",
+ HasDebugFrameWithLiveRefs::Maybe)) {
+ return false;
+ }
+ insertBreakablePoint(CallSiteDesc::LeaveFrame);
+ if (!createStackMap("debug: leave-frame breakpoint",
+ HasDebugFrameWithLiveRefs::Maybe)) {
+ return false;
+ }
+ restoreRegisterReturnValues(resultType);
+ }
+
+#ifndef RABALDR_PIN_INSTANCE
+ // To satisy instance extent invariant we need to reload InstanceReg because
+ // baseline can clobber it.
+ fr.loadInstancePtr(InstanceReg);
+#endif
+ GenerateFunctionEpilogue(masm, fr.fixedAllocSize(), &offsets_);
+
+#if defined(JS_ION_PERF)
+ // FIXME - profiling code missing. No bug for this.
+
+ // Note the end of the inline code and start of the OOL code.
+ // gen->perfSpewer().noteEndInlineCode(masm);
+#endif
+
+ JitSpew(JitSpew_Codegen, "# endFunction: end of function epilogue");
+ JitSpew(JitSpew_Codegen, "# endFunction: start of OOL code");
+ if (!generateOutOfLineCode()) {
+ return false;
+ }
+ JitSpew(JitSpew_Codegen, "# endFunction: end of OOL code");
+
+ JitSpew(JitSpew_Codegen, "# endFunction: end of OOL code");
+ if (compilerEnv_.debugEnabled()) {
+ JitSpew(JitSpew_Codegen, "# endFunction: start of debug trap stub");
+ insertBreakpointStub();
+ JitSpew(JitSpew_Codegen, "# endFunction: end of debug trap stub");
+ }
+
+ offsets_.end = masm.currentOffset();
+
+ if (!fr.checkStackHeight()) {
+ return decoder_.fail(decoder_.beginOffset(), "stack frame is too large");
+ }
+
+ JitSpew(JitSpew_Codegen, "# endFunction: end of OOL code for index %d",
+ (int)func_.index);
+ return !masm.oom();
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Debugger API.
+
+void BaseCompiler::insertBreakablePoint(CallSiteDesc::Kind kind) {
+#ifndef RABALDR_PIN_INSTANCE
+ fr.loadInstancePtr(InstanceReg);
+#endif
+
+ // The breakpoint code must call the breakpoint handler installed on the
+ // instance if it is not null. There is one breakable point before
+ // every bytecode, and one at the beginning and at the end of the function.
+ //
+ // There are many constraints:
+ //
+ // - Code should be read-only; we do not want to patch
+ // - The breakpoint code should be as dense as possible, given the volume of
+ // breakable points
+ // - The handler-is-null case should be as fast as we can make it
+ //
+ // The scratch register is available here.
+ //
+ // An unconditional callout would be densest but is too slow. The best
+ // balance results from an inline test for null with a conditional call. The
+ // best code sequence is platform-dependent.
+ //
+ // The conditional call goes to a stub attached to the function that performs
+ // further filtering before calling the breakpoint handler.
+#if defined(JS_CODEGEN_X64)
+ // REX 83 MODRM OFFS IB
+ static_assert(Instance::offsetOfDebugTrapHandler() < 128);
+ masm.cmpq(Imm32(0), Operand(Address(InstanceReg,
+ Instance::offsetOfDebugTrapHandler())));
+
+ // 74 OFFS
+ Label L;
+ L.bind(masm.currentOffset() + 7);
+ masm.j(Assembler::Zero, &L);
+
+ // E8 OFFS OFFS OFFS OFFS
+ masm.call(&debugTrapStub_);
+ masm.append(CallSiteDesc(iter_.lastOpcodeOffset(), kind),
+ CodeOffset(masm.currentOffset()));
+
+ // Branch destination
+ MOZ_ASSERT_IF(!masm.oom(), masm.currentOffset() == uint32_t(L.offset()));
+#elif defined(JS_CODEGEN_X86)
+ // 83 MODRM OFFS IB
+ static_assert(Instance::offsetOfDebugTrapHandler() < 128);
+ masm.cmpl(Imm32(0), Operand(Address(InstanceReg,
+ Instance::offsetOfDebugTrapHandler())));
+
+ // 74 OFFS
+ Label L;
+ L.bind(masm.currentOffset() + 7);
+ masm.j(Assembler::Zero, &L);
+
+ // E8 OFFS OFFS OFFS OFFS
+ masm.call(&debugTrapStub_);
+ masm.append(CallSiteDesc(iter_.lastOpcodeOffset(), kind),
+ CodeOffset(masm.currentOffset()));
+
+ // Branch destination
+ MOZ_ASSERT_IF(!masm.oom(), masm.currentOffset() == uint32_t(L.offset()));
+#elif defined(JS_CODEGEN_ARM64)
+ ScratchPtr scratch(*this);
+ ARMRegister tmp(scratch, 64);
+ Label L;
+ masm.Ldr(tmp, MemOperand(Address(InstanceReg,
+ Instance::offsetOfDebugTrapHandler())));
+ masm.Cbz(tmp, &L);
+ masm.Bl(&debugTrapStub_);
+ masm.append(CallSiteDesc(iter_.lastOpcodeOffset(), kind),
+ CodeOffset(masm.currentOffset()));
+ masm.bind(&L);
+#elif defined(JS_CODEGEN_ARM)
+ ScratchPtr scratch(*this);
+ masm.loadPtr(Address(InstanceReg, Instance::offsetOfDebugTrapHandler()),
+ scratch);
+ masm.ma_orr(scratch, scratch, SetCC);
+ masm.ma_bl(&debugTrapStub_, Assembler::NonZero);
+ masm.append(CallSiteDesc(iter_.lastOpcodeOffset(), kind),
+ CodeOffset(masm.currentOffset()));
+#elif defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_RISCV64)
+ ScratchPtr scratch(*this);
+ Label L;
+ masm.loadPtr(Address(InstanceReg, Instance::offsetOfDebugTrapHandler()),
+ scratch);
+ masm.branchPtr(Assembler::Equal, scratch, ImmWord(0), &L);
+ masm.call(&debugTrapStub_);
+ masm.append(CallSiteDesc(iter_.lastOpcodeOffset(), kind),
+ CodeOffset(masm.currentOffset()));
+ masm.bind(&L);
+#else
+ MOZ_CRASH("BaseCompiler platform hook: insertBreakablePoint");
+#endif
+}
+
+void BaseCompiler::insertBreakpointStub() {
+ // The debug trap stub performs out-of-line filtering before jumping to the
+ // debug trap handler if necessary. The trap handler returns directly to
+ // the breakable point.
+ //
+ // NOTE, the link register is live here on platforms that have LR.
+ //
+ // The scratch register is available here (as it was at the call site).
+ //
+ // It's useful for the debug trap stub to be compact, as every function gets
+ // one.
+
+ Label L;
+ masm.bind(&debugTrapStub_);
+
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ {
+ ScratchPtr scratch(*this);
+
+ // Get the per-instance table of filtering bits.
+ masm.loadPtr(Address(InstanceReg, Instance::offsetOfDebugFilter()),
+ scratch);
+
+ // Check the filter bit. There is one bit per function in the module.
+ // Table elements are 32-bit because the masm makes that convenient.
+ masm.branchTest32(Assembler::NonZero, Address(scratch, func_.index / 32),
+ Imm32(1 << (func_.index % 32)), &L);
+
+ // Fast path: return to the execution.
+ masm.ret();
+ }
+#elif defined(JS_CODEGEN_ARM64)
+ {
+ ScratchPtr scratch(*this);
+
+ // Logic as above, except abiret to jump to the LR directly
+ masm.loadPtr(Address(InstanceReg, Instance::offsetOfDebugFilter()),
+ scratch);
+ masm.branchTest32(Assembler::NonZero, Address(scratch, func_.index / 32),
+ Imm32(1 << (func_.index % 32)), &L);
+ masm.abiret();
+ }
+#elif defined(JS_CODEGEN_ARM)
+ {
+ // We must be careful not to use the SecondScratchRegister, which usually
+ // is LR, as LR is live here. This means avoiding masm abstractions such
+ // as branchTest32.
+
+ static_assert(ScratchRegister != lr);
+ static_assert(Instance::offsetOfDebugFilter() < 0x1000);
+
+ ScratchRegisterScope tmp1(masm);
+ ScratchI32 tmp2(*this);
+ masm.ma_ldr(
+ DTRAddr(InstanceReg, DtrOffImm(Instance::offsetOfDebugFilter())), tmp1);
+ masm.ma_mov(Imm32(func_.index / 32), tmp2);
+ masm.ma_ldr(DTRAddr(tmp1, DtrRegImmShift(tmp2, LSL, 0)), tmp2);
+ masm.ma_tst(tmp2, Imm32(1 << func_.index % 32), tmp1, Assembler::Always);
+ masm.ma_bx(lr, Assembler::Zero);
+ }
+#elif defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_RISCV64)
+ {
+ ScratchPtr scratch(*this);
+
+ // Logic same as ARM64.
+ masm.loadPtr(Address(InstanceReg, Instance::offsetOfDebugFilter()),
+ scratch);
+ masm.branchTest32(Assembler::NonZero, Address(scratch, func_.index / 32),
+ Imm32(1 << (func_.index % 32)), &L);
+ masm.abiret();
+ }
+#else
+ MOZ_CRASH("BaseCompiler platform hook: endFunction");
+#endif
+
+ // Jump to the debug trap handler.
+ masm.bind(&L);
+ masm.jump(Address(InstanceReg, Instance::offsetOfDebugTrapHandler()));
+}
+
+void BaseCompiler::saveRegisterReturnValues(const ResultType& resultType) {
+ MOZ_ASSERT(compilerEnv_.debugEnabled());
+ size_t debugFrameOffset = masm.framePushed() - DebugFrame::offsetOfFrame();
+ size_t registerResultIdx = 0;
+ for (ABIResultIter i(resultType); !i.done(); i.next()) {
+ const ABIResult result = i.cur();
+ if (!result.inRegister()) {
+#ifdef DEBUG
+ for (i.next(); !i.done(); i.next()) {
+ MOZ_ASSERT(!i.cur().inRegister());
+ }
+#endif
+ break;
+ }
+
+ size_t resultOffset = DebugFrame::offsetOfRegisterResult(registerResultIdx);
+ Address dest(masm.getStackPointer(), debugFrameOffset + resultOffset);
+ switch (result.type().kind()) {
+ case ValType::I32:
+ masm.store32(RegI32(result.gpr()), dest);
+ break;
+ case ValType::I64:
+ masm.store64(RegI64(result.gpr64()), dest);
+ break;
+ case ValType::F64:
+ masm.storeDouble(RegF64(result.fpr()), dest);
+ break;
+ case ValType::F32:
+ masm.storeFloat32(RegF32(result.fpr()), dest);
+ break;
+ case ValType::Ref: {
+ uint32_t flag =
+ DebugFrame::hasSpilledRegisterRefResultBitMask(registerResultIdx);
+ // Tell Instance::traceFrame that we have a pointer to trace.
+ masm.or32(Imm32(flag),
+ Address(masm.getStackPointer(),
+ debugFrameOffset + DebugFrame::offsetOfFlags()));
+ masm.storePtr(RegRef(result.gpr()), dest);
+ break;
+ }
+ case ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ masm.storeUnalignedSimd128(RegV128(result.fpr()), dest);
+ break;
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ }
+ registerResultIdx++;
+ }
+}
+
+void BaseCompiler::restoreRegisterReturnValues(const ResultType& resultType) {
+ MOZ_ASSERT(compilerEnv_.debugEnabled());
+ size_t debugFrameOffset = masm.framePushed() - DebugFrame::offsetOfFrame();
+ size_t registerResultIdx = 0;
+ for (ABIResultIter i(resultType); !i.done(); i.next()) {
+ const ABIResult result = i.cur();
+ if (!result.inRegister()) {
+#ifdef DEBUG
+ for (i.next(); !i.done(); i.next()) {
+ MOZ_ASSERT(!i.cur().inRegister());
+ }
+#endif
+ break;
+ }
+ size_t resultOffset =
+ DebugFrame::offsetOfRegisterResult(registerResultIdx++);
+ Address src(masm.getStackPointer(), debugFrameOffset + resultOffset);
+ switch (result.type().kind()) {
+ case ValType::I32:
+ masm.load32(src, RegI32(result.gpr()));
+ break;
+ case ValType::I64:
+ masm.load64(src, RegI64(result.gpr64()));
+ break;
+ case ValType::F64:
+ masm.loadDouble(src, RegF64(result.fpr()));
+ break;
+ case ValType::F32:
+ masm.loadFloat32(src, RegF32(result.fpr()));
+ break;
+ case ValType::Ref:
+ masm.loadPtr(src, RegRef(result.gpr()));
+ break;
+ case ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ masm.loadUnalignedSimd128(src, RegV128(result.fpr()));
+ break;
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ }
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Results and block parameters
+
+void BaseCompiler::popStackReturnValues(const ResultType& resultType) {
+ uint32_t bytes = ABIResultIter::MeasureStackBytes(resultType);
+ if (bytes == 0) {
+ return;
+ }
+ Register target = ABINonArgReturnReg0;
+ Register temp = ABINonArgReturnReg1;
+ fr.loadIncomingStackResultAreaPtr(RegPtr(target));
+ fr.popStackResultsToMemory(target, bytes, temp);
+}
+
+// TODO / OPTIMIZE (Bug 1316818): At the moment we use the Wasm
+// inter-procedure ABI for block returns, which allocates ReturnReg as the
+// single block result register. It is possible other choices would lead to
+// better register allocation, as ReturnReg is often first in the register set
+// and will be heavily wanted by the register allocator that uses takeFirst().
+//
+// Obvious options:
+// - pick a register at the back of the register set
+// - pick a random register per block (different blocks have
+// different join regs)
+
+void BaseCompiler::popRegisterResults(ABIResultIter& iter) {
+ // Pop register results. Note that in the single-value case, popping to a
+ // register may cause a sync(); for multi-value we sync'd already.
+ for (; !iter.done(); iter.next()) {
+ const ABIResult& result = iter.cur();
+ if (!result.inRegister()) {
+ // TODO / OPTIMIZE: We sync here to avoid solving the general parallel
+ // move problem in popStackResults. However we could avoid syncing the
+ // values that are going to registers anyway, if they are already in
+ // registers.
+ sync();
+ break;
+ }
+ switch (result.type().kind()) {
+ case ValType::I32:
+ popI32(RegI32(result.gpr()));
+ break;
+ case ValType::I64:
+ popI64(RegI64(result.gpr64()));
+ break;
+ case ValType::F32:
+ popF32(RegF32(result.fpr()));
+ break;
+ case ValType::F64:
+ popF64(RegF64(result.fpr()));
+ break;
+ case ValType::Ref:
+ popRef(RegRef(result.gpr()));
+ break;
+ case ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ popV128(RegV128(result.fpr()));
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ }
+ }
+}
+
+void BaseCompiler::popStackResults(ABIResultIter& iter, StackHeight stackBase) {
+ MOZ_ASSERT(!iter.done());
+
+ // The iterator should be advanced beyond register results, and register
+ // results should be popped already from the value stack.
+ uint32_t alreadyPopped = iter.index();
+
+ // At this point, only stack arguments are remaining. Iterate through them
+ // to measure how much stack space they will take up.
+ for (; !iter.done(); iter.next()) {
+ MOZ_ASSERT(iter.cur().onStack());
+ }
+
+ // Calculate the space needed to store stack results, in bytes.
+ uint32_t stackResultBytes = iter.stackBytesConsumedSoFar();
+ MOZ_ASSERT(stackResultBytes);
+
+ // Compute the stack height including the stack results. Note that it's
+ // possible that this call expands the stack, for example if some of the
+ // results are supplied by constants and so are not already on the machine
+ // stack.
+ uint32_t endHeight = fr.prepareStackResultArea(stackBase, stackResultBytes);
+
+ // Find a free GPR to use when shuffling stack values. If none is
+ // available, push ReturnReg and restore it after we're done.
+ bool saved = false;
+ RegPtr temp = ra.needTempPtr(RegPtr(ReturnReg), &saved);
+
+ // The sequence of Stk values is in the same order on the machine stack as
+ // the result locations, but there is a complication: constant values are
+ // not actually pushed on the machine stack. (At this point registers and
+ // locals have been spilled already.) So, moving the Stk values into place
+ // isn't simply a shuffle-down or shuffle-up operation. There is a part of
+ // the Stk sequence that shuffles toward the FP, a part that's already in
+ // place, and a part that shuffles toward the SP. After shuffling, we have
+ // to materialize the constants.
+
+ // Shuffle mem values toward the frame pointer, copying deepest values
+ // first. Stop when we run out of results, get to a register result, or
+ // find a Stk value that is closer to the FP than the result.
+ for (iter.switchToPrev(); !iter.done(); iter.prev()) {
+ const ABIResult& result = iter.cur();
+ if (!result.onStack()) {
+ break;
+ }
+ MOZ_ASSERT(result.stackOffset() < stackResultBytes);
+ uint32_t destHeight = endHeight - result.stackOffset();
+ uint32_t stkBase = stk_.length() - (iter.count() - alreadyPopped);
+ Stk& v = stk_[stkBase + iter.index()];
+ if (v.isMem()) {
+ uint32_t srcHeight = v.offs();
+ if (srcHeight <= destHeight) {
+ break;
+ }
+ fr.shuffleStackResultsTowardFP(srcHeight, destHeight, result.size(),
+ temp);
+ }
+ }
+
+ // Reset iterator and skip register results.
+ for (iter.reset(); !iter.done(); iter.next()) {
+ if (iter.cur().onStack()) {
+ break;
+ }
+ }
+
+ // Revisit top stack values, shuffling mem values toward the stack pointer,
+ // copying shallowest values first.
+ for (; !iter.done(); iter.next()) {
+ const ABIResult& result = iter.cur();
+ MOZ_ASSERT(result.onStack());
+ MOZ_ASSERT(result.stackOffset() < stackResultBytes);
+ uint32_t destHeight = endHeight - result.stackOffset();
+ Stk& v = stk_[stk_.length() - (iter.index() - alreadyPopped) - 1];
+ if (v.isMem()) {
+ uint32_t srcHeight = v.offs();
+ if (srcHeight >= destHeight) {
+ break;
+ }
+ fr.shuffleStackResultsTowardSP(srcHeight, destHeight, result.size(),
+ temp);
+ }
+ }
+
+ // Reset iterator and skip register results, which are already popped off
+ // the value stack.
+ for (iter.reset(); !iter.done(); iter.next()) {
+ if (iter.cur().onStack()) {
+ break;
+ }
+ }
+
+ // Materialize constants and pop the remaining items from the value stack.
+ for (; !iter.done(); iter.next()) {
+ const ABIResult& result = iter.cur();
+ uint32_t resultHeight = endHeight - result.stackOffset();
+ Stk& v = stk_.back();
+ switch (v.kind()) {
+ case Stk::ConstI32:
+#if defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_LOONG64) || \
+ defined(JS_CODEGEN_RISCV64)
+ fr.storeImmediatePtrToStack(v.i32val_, resultHeight, temp);
+#else
+ fr.storeImmediatePtrToStack(uint32_t(v.i32val_), resultHeight, temp);
+#endif
+ break;
+ case Stk::ConstF32:
+ fr.storeImmediateF32ToStack(v.f32val_, resultHeight, temp);
+ break;
+ case Stk::ConstI64:
+ fr.storeImmediateI64ToStack(v.i64val_, resultHeight, temp);
+ break;
+ case Stk::ConstF64:
+ fr.storeImmediateF64ToStack(v.f64val_, resultHeight, temp);
+ break;
+#ifdef ENABLE_WASM_SIMD
+ case Stk::ConstV128:
+ fr.storeImmediateV128ToStack(v.v128val_, resultHeight, temp);
+ break;
+#endif
+ case Stk::ConstRef:
+ fr.storeImmediatePtrToStack(v.refval_, resultHeight, temp);
+ break;
+ case Stk::MemRef:
+ // Update bookkeeping as we pop the Stk entry.
+ stackMapGenerator_.memRefsOnStk--;
+ break;
+ default:
+ MOZ_ASSERT(v.isMem());
+ break;
+ }
+ stk_.popBack();
+ }
+
+ ra.freeTempPtr(temp, saved);
+
+ // This will pop the stack if needed.
+ fr.finishStackResultArea(stackBase, stackResultBytes);
+}
+
+void BaseCompiler::popBlockResults(ResultType type, StackHeight stackBase,
+ ContinuationKind kind) {
+ if (!type.empty()) {
+ ABIResultIter iter(type);
+ popRegisterResults(iter);
+ if (!iter.done()) {
+ popStackResults(iter, stackBase);
+ // Because popStackResults might clobber the stack, it leaves the stack
+ // pointer already in the right place for the continuation, whether the
+ // continuation is a jump or fallthrough.
+ return;
+ }
+ }
+ // We get here if there are no stack results. For a fallthrough, the stack
+ // is already at the right height. For a jump, we may need to pop the stack
+ // pointer if the continuation's stack height is lower than the current
+ // stack height.
+ if (kind == ContinuationKind::Jump) {
+ fr.popStackBeforeBranch(stackBase, type);
+ }
+}
+
+// This function is similar to popBlockResults, but additionally handles the
+// implicit exception pointer that is pushed to the value stack on entry to
+// a catch handler by dropping it appropriately.
+void BaseCompiler::popCatchResults(ResultType type, StackHeight stackBase) {
+ if (!type.empty()) {
+ ABIResultIter iter(type);
+ popRegisterResults(iter);
+ if (!iter.done()) {
+ popStackResults(iter, stackBase);
+ // Since popStackResults clobbers the stack, we only need to free the
+ // exception off of the value stack.
+ popValueStackBy(1);
+ } else {
+ // If there are no stack results, we have to adjust the stack by
+ // dropping the exception reference that's now on the stack.
+ dropValue();
+ }
+ } else {
+ dropValue();
+ }
+ fr.popStackBeforeBranch(stackBase, type);
+}
+
+Stk BaseCompiler::captureStackResult(const ABIResult& result,
+ StackHeight resultsBase,
+ uint32_t stackResultBytes) {
+ MOZ_ASSERT(result.onStack());
+ uint32_t offs = fr.locateStackResult(result, resultsBase, stackResultBytes);
+ return Stk::StackResult(result.type(), offs);
+}
+
+// TODO: It may be fruitful to inline the fast path here, as it will be common.
+
+bool BaseCompiler::pushResults(ResultType type, StackHeight resultsBase) {
+ if (type.empty()) {
+ return true;
+ }
+
+ if (type.length() > 1) {
+ // Reserve extra space on the stack for all the values we'll push.
+ // Multi-value push is not accounted for by the pre-sizing of the stack in
+ // the decoding loop.
+ //
+ // Also make sure we leave headroom for other pushes that will occur after
+ // pushing results, just to be safe.
+ if (!stk_.reserve(stk_.length() + type.length() + MaxPushesPerOpcode)) {
+ return false;
+ }
+ }
+
+ // We need to push the results in reverse order, so first iterate through
+ // all results to determine the locations of stack result types.
+ ABIResultIter iter(type);
+ while (!iter.done()) {
+ iter.next();
+ }
+ uint32_t stackResultBytes = iter.stackBytesConsumedSoFar();
+ for (iter.switchToPrev(); !iter.done(); iter.prev()) {
+ const ABIResult& result = iter.cur();
+ if (!result.onStack()) {
+ break;
+ }
+ Stk v = captureStackResult(result, resultsBase, stackResultBytes);
+ push(v);
+ if (v.kind() == Stk::MemRef) {
+ stackMapGenerator_.memRefsOnStk++;
+ }
+ }
+
+ for (; !iter.done(); iter.prev()) {
+ const ABIResult& result = iter.cur();
+ MOZ_ASSERT(result.inRegister());
+ switch (result.type().kind()) {
+ case ValType::I32:
+ pushI32(RegI32(result.gpr()));
+ break;
+ case ValType::I64:
+ pushI64(RegI64(result.gpr64()));
+ break;
+ case ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ pushV128(RegV128(result.fpr()));
+ break;
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ case ValType::F32:
+ pushF32(RegF32(result.fpr()));
+ break;
+ case ValType::F64:
+ pushF64(RegF64(result.fpr()));
+ break;
+ case ValType::Ref:
+ pushRef(RegRef(result.gpr()));
+ break;
+ }
+ }
+
+ return true;
+}
+
+bool BaseCompiler::pushBlockResults(ResultType type) {
+ return pushResults(type, controlItem().stackHeight);
+}
+
+// A combination of popBlockResults + pushBlockResults, used when entering a
+// block with a control-flow join (loops) or split (if) to shuffle the
+// fallthrough block parameters into the locations expected by the
+// continuation.
+bool BaseCompiler::topBlockParams(ResultType type) {
+ // This function should only be called when entering a block with a
+ // control-flow join at the entry, where there are no live temporaries in
+ // the current block.
+ StackHeight base = controlItem().stackHeight;
+ MOZ_ASSERT(fr.stackResultsBase(stackConsumed(type.length())) == base);
+ popBlockResults(type, base, ContinuationKind::Fallthrough);
+ return pushBlockResults(type);
+}
+
+// A combination of popBlockResults + pushBlockResults, used before branches
+// where we don't know the target (br_if / br_table). If and when the branch
+// is taken, the stack results will be shuffled down into place. For br_if
+// that has fallthrough, the parameters for the untaken branch flow through to
+// the continuation.
+bool BaseCompiler::topBranchParams(ResultType type, StackHeight* height) {
+ if (type.empty()) {
+ *height = fr.stackHeight();
+ return true;
+ }
+ // There may be temporary values that need spilling; delay computation of
+ // the stack results base until after the popRegisterResults(), which spills
+ // if needed.
+ ABIResultIter iter(type);
+ popRegisterResults(iter);
+ StackHeight base = fr.stackResultsBase(stackConsumed(iter.remaining()));
+ if (!iter.done()) {
+ popStackResults(iter, base);
+ }
+ if (!pushResults(type, base)) {
+ return false;
+ }
+ *height = base;
+ return true;
+}
+
+// Conditional branches with fallthrough are preceded by a topBranchParams, so
+// we know that there are no stack results that need to be materialized. In
+// that case, we can just shuffle the whole block down before popping the
+// stack.
+void BaseCompiler::shuffleStackResultsBeforeBranch(StackHeight srcHeight,
+ StackHeight destHeight,
+ ResultType type) {
+ uint32_t stackResultBytes = 0;
+
+ if (ABIResultIter::HasStackResults(type)) {
+ MOZ_ASSERT(stk_.length() >= type.length());
+ ABIResultIter iter(type);
+ for (; !iter.done(); iter.next()) {
+#ifdef DEBUG
+ const ABIResult& result = iter.cur();
+ const Stk& v = stk_[stk_.length() - iter.index() - 1];
+ MOZ_ASSERT(v.isMem() == result.onStack());
+#endif
+ }
+
+ stackResultBytes = iter.stackBytesConsumedSoFar();
+ MOZ_ASSERT(stackResultBytes > 0);
+
+ if (srcHeight != destHeight) {
+ // Find a free GPR to use when shuffling stack values. If none
+ // is available, push ReturnReg and restore it after we're done.
+ bool saved = false;
+ RegPtr temp = ra.needTempPtr(RegPtr(ReturnReg), &saved);
+ fr.shuffleStackResultsTowardFP(srcHeight, destHeight, stackResultBytes,
+ temp);
+ ra.freeTempPtr(temp, saved);
+ }
+ }
+
+ fr.popStackBeforeBranch(destHeight, stackResultBytes);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Function calls.
+
+void BaseCompiler::beginCall(
+ FunctionCall& call, UseABI useABI,
+ RestoreRegisterStateAndRealm restoreRegisterStateAndRealm) {
+ MOZ_ASSERT_IF(
+ useABI == UseABI::Builtin,
+ restoreRegisterStateAndRealm == RestoreRegisterStateAndRealm::False);
+
+ call.restoreRegisterStateAndRealm =
+ restoreRegisterStateAndRealm == RestoreRegisterStateAndRealm::True;
+ call.usesSystemAbi = useABI == UseABI::System;
+
+ if (call.usesSystemAbi) {
+ // Call-outs need to use the appropriate system ABI.
+#if defined(JS_CODEGEN_ARM)
+ call.hardFP = UseHardFpABI();
+ call.abi.setUseHardFp(call.hardFP);
+#endif
+ } else {
+#if defined(JS_CODEGEN_ARM)
+ MOZ_ASSERT(call.hardFP, "All private ABIs pass FP arguments in registers");
+#endif
+ }
+
+ // Use masm.framePushed() because the value we want here does not depend
+ // on the height of the frame's stack area, but the actual size of the
+ // allocated frame.
+ call.frameAlignAdjustment = ComputeByteAlignment(
+ masm.framePushed() + sizeof(Frame), JitStackAlignment);
+}
+
+void BaseCompiler::endCall(FunctionCall& call, size_t stackSpace) {
+ size_t adjustment = call.stackArgAreaSize + call.frameAlignAdjustment;
+ fr.freeArgAreaAndPopBytes(adjustment, stackSpace);
+
+ MOZ_ASSERT(stackMapGenerator_.framePushedExcludingOutboundCallArgs.isSome());
+ stackMapGenerator_.framePushedExcludingOutboundCallArgs.reset();
+
+ if (call.restoreRegisterStateAndRealm) {
+ // The instance has been clobbered, so always reload
+ fr.loadInstancePtr(InstanceReg);
+ masm.loadWasmPinnedRegsFromInstance();
+ masm.switchToWasmInstanceRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
+ } else if (call.usesSystemAbi) {
+ // On x86 there are no pinned registers, so don't waste time
+ // reloading the instance.
+#ifndef JS_CODEGEN_X86
+ // The instance has been clobbered, so always reload
+ fr.loadInstancePtr(InstanceReg);
+ masm.loadWasmPinnedRegsFromInstance();
+#endif
+ }
+}
+
+void BaseCompiler::startCallArgs(size_t stackArgAreaSizeUnaligned,
+ FunctionCall* call) {
+ size_t stackArgAreaSizeAligned =
+ AlignStackArgAreaSize(stackArgAreaSizeUnaligned);
+ MOZ_ASSERT(stackArgAreaSizeUnaligned <= stackArgAreaSizeAligned);
+
+ // Record the masm.framePushed() value at this point, before we push args
+ // for the call, but including the alignment space placed above the args.
+ // This defines the lower limit of the stackmap that will be created for
+ // this call.
+ MOZ_ASSERT(
+ stackMapGenerator_.framePushedExcludingOutboundCallArgs.isNothing());
+ stackMapGenerator_.framePushedExcludingOutboundCallArgs.emplace(
+ // However much we've pushed so far
+ masm.framePushed() +
+ // Extra space we'll push to get the frame aligned
+ call->frameAlignAdjustment +
+ // Extra space we'll push to get the outbound arg area aligned
+ (stackArgAreaSizeAligned - stackArgAreaSizeUnaligned));
+
+ call->stackArgAreaSize = stackArgAreaSizeAligned;
+
+ size_t adjustment = call->stackArgAreaSize + call->frameAlignAdjustment;
+ fr.allocArgArea(adjustment);
+}
+
+ABIArg BaseCompiler::reservePointerArgument(FunctionCall* call) {
+ return call->abi.next(MIRType::Pointer);
+}
+
+// TODO / OPTIMIZE (Bug 1316821): Note passArg is used only in one place.
+// (Or it was, until Luke wandered through, but that can be fixed again.)
+// I'm not saying we should manually inline it, but we could hoist the
+// dispatch into the caller and have type-specific implementations of
+// passArg: passArgI32(), etc. Then those might be inlined, at least in PGO
+// builds.
+//
+// The bulk of the work here (60%) is in the next() call, though.
+//
+// Notably, since next() is so expensive, StackArgAreaSizeUnaligned()
+// becomes expensive too.
+//
+// Somehow there could be a trick here where the sequence of argument types
+// (read from the input stream) leads to a cached entry for
+// StackArgAreaSizeUnaligned() and for how to pass arguments...
+//
+// But at least we could reduce the cost of StackArgAreaSizeUnaligned() by
+// first reading the argument types into a (reusable) vector, then we have
+// the outgoing size at low cost, and then we can pass args based on the
+// info we read.
+
+void BaseCompiler::passArg(ValType type, const Stk& arg, FunctionCall* call) {
+ switch (type.kind()) {
+ case ValType::I32: {
+ ABIArg argLoc = call->abi.next(MIRType::Int32);
+ if (argLoc.kind() == ABIArg::Stack) {
+ ScratchI32 scratch(*this);
+ loadI32(arg, scratch);
+ masm.store32(scratch, Address(masm.getStackPointer(),
+ argLoc.offsetFromArgBase()));
+ } else {
+ loadI32(arg, RegI32(argLoc.gpr()));
+ }
+ break;
+ }
+ case ValType::I64: {
+ ABIArg argLoc = call->abi.next(MIRType::Int64);
+ if (argLoc.kind() == ABIArg::Stack) {
+ ScratchI32 scratch(*this);
+#ifdef JS_PUNBOX64
+ loadI64(arg, fromI32(scratch));
+ masm.storePtr(scratch, Address(masm.getStackPointer(),
+ argLoc.offsetFromArgBase()));
+#else
+ loadI64Low(arg, scratch);
+ masm.store32(scratch, LowWord(Address(masm.getStackPointer(),
+ argLoc.offsetFromArgBase())));
+ loadI64High(arg, scratch);
+ masm.store32(scratch, HighWord(Address(masm.getStackPointer(),
+ argLoc.offsetFromArgBase())));
+#endif
+ } else {
+ loadI64(arg, RegI64(argLoc.gpr64()));
+ }
+ break;
+ }
+ case ValType::V128: {
+#ifdef ENABLE_WASM_SIMD
+ ABIArg argLoc = call->abi.next(MIRType::Simd128);
+ switch (argLoc.kind()) {
+ case ABIArg::Stack: {
+ ScratchV128 scratch(*this);
+ loadV128(arg, scratch);
+ masm.storeUnalignedSimd128(
+ (RegV128)scratch,
+ Address(masm.getStackPointer(), argLoc.offsetFromArgBase()));
+ break;
+ }
+ case ABIArg::GPR: {
+ MOZ_CRASH("Unexpected parameter passing discipline");
+ }
+ case ABIArg::FPU: {
+ loadV128(arg, RegV128(argLoc.fpu()));
+ break;
+ }
+# if defined(JS_CODEGEN_REGISTER_PAIR)
+ case ABIArg::GPR_PAIR: {
+ MOZ_CRASH("Unexpected parameter passing discipline");
+ }
+# endif
+ case ABIArg::Uninitialized:
+ MOZ_CRASH("Uninitialized ABIArg kind");
+ }
+ break;
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ }
+ case ValType::F64: {
+ ABIArg argLoc = call->abi.next(MIRType::Double);
+ switch (argLoc.kind()) {
+ case ABIArg::Stack: {
+ ScratchF64 scratch(*this);
+ loadF64(arg, scratch);
+ masm.storeDouble(scratch, Address(masm.getStackPointer(),
+ argLoc.offsetFromArgBase()));
+ break;
+ }
+#if defined(JS_CODEGEN_REGISTER_PAIR)
+ case ABIArg::GPR_PAIR: {
+# if defined(JS_CODEGEN_ARM)
+ ScratchF64 scratch(*this);
+ loadF64(arg, scratch);
+ masm.ma_vxfer(scratch, argLoc.evenGpr(), argLoc.oddGpr());
+ break;
+# else
+ MOZ_CRASH("BaseCompiler platform hook: passArg F64 pair");
+# endif
+ }
+#endif
+ case ABIArg::FPU: {
+ loadF64(arg, RegF64(argLoc.fpu()));
+ break;
+ }
+ case ABIArg::GPR: {
+ MOZ_CRASH("Unexpected parameter passing discipline");
+ }
+ case ABIArg::Uninitialized:
+ MOZ_CRASH("Uninitialized ABIArg kind");
+ }
+ break;
+ }
+ case ValType::F32: {
+ ABIArg argLoc = call->abi.next(MIRType::Float32);
+ switch (argLoc.kind()) {
+ case ABIArg::Stack: {
+ ScratchF32 scratch(*this);
+ loadF32(arg, scratch);
+ masm.storeFloat32(scratch, Address(masm.getStackPointer(),
+ argLoc.offsetFromArgBase()));
+ break;
+ }
+ case ABIArg::GPR: {
+ ScratchF32 scratch(*this);
+ loadF32(arg, scratch);
+ masm.moveFloat32ToGPR(scratch, argLoc.gpr());
+ break;
+ }
+ case ABIArg::FPU: {
+ loadF32(arg, RegF32(argLoc.fpu()));
+ break;
+ }
+#if defined(JS_CODEGEN_REGISTER_PAIR)
+ case ABIArg::GPR_PAIR: {
+ MOZ_CRASH("Unexpected parameter passing discipline");
+ }
+#endif
+ case ABIArg::Uninitialized:
+ MOZ_CRASH("Uninitialized ABIArg kind");
+ }
+ break;
+ }
+ case ValType::Ref: {
+ ABIArg argLoc = call->abi.next(MIRType::RefOrNull);
+ if (argLoc.kind() == ABIArg::Stack) {
+ ScratchRef scratch(*this);
+ loadRef(arg, scratch);
+ masm.storePtr(scratch, Address(masm.getStackPointer(),
+ argLoc.offsetFromArgBase()));
+ } else {
+ loadRef(arg, RegRef(argLoc.gpr()));
+ }
+ break;
+ }
+ }
+}
+
+CodeOffset BaseCompiler::callDefinition(uint32_t funcIndex,
+ const FunctionCall& call) {
+ CallSiteDesc desc(bytecodeOffset(), CallSiteDesc::Func);
+ return masm.call(desc, funcIndex);
+}
+
+CodeOffset BaseCompiler::callSymbolic(SymbolicAddress callee,
+ const FunctionCall& call) {
+ CallSiteDesc desc(bytecodeOffset(), CallSiteDesc::Symbolic);
+ return masm.call(desc, callee);
+}
+
+// Precondition: sync()
+
+class OutOfLineAbortingTrap : public OutOfLineCode {
+ Trap trap_;
+ BytecodeOffset off_;
+
+ public:
+ OutOfLineAbortingTrap(Trap trap, BytecodeOffset off)
+ : trap_(trap), off_(off) {}
+
+ virtual void generate(MacroAssembler* masm) override {
+ masm->wasmTrap(trap_, off_);
+ MOZ_ASSERT(!rejoin()->bound());
+ }
+};
+
+bool BaseCompiler::callIndirect(uint32_t funcTypeIndex, uint32_t tableIndex,
+ const Stk& indexVal, const FunctionCall& call,
+ CodeOffset* fastCallOffset,
+ CodeOffset* slowCallOffset) {
+ CallIndirectId callIndirectId =
+ CallIndirectId::forFuncType(moduleEnv_, funcTypeIndex);
+ MOZ_ASSERT(callIndirectId.kind() != CallIndirectIdKind::AsmJS);
+
+ const TableDesc& table = moduleEnv_.tables[tableIndex];
+
+ loadI32(indexVal, RegI32(WasmTableCallIndexReg));
+
+ CallSiteDesc desc(bytecodeOffset(), CallSiteDesc::Indirect);
+ CalleeDesc callee =
+ CalleeDesc::wasmTable(moduleEnv_, table, tableIndex, callIndirectId);
+ OutOfLineCode* oob = addOutOfLineCode(
+ new (alloc_) OutOfLineAbortingTrap(Trap::OutOfBounds, bytecodeOffset()));
+ if (!oob) {
+ return false;
+ }
+ Label* nullCheckFailed = nullptr;
+#ifndef WASM_HAS_HEAPREG
+ OutOfLineCode* nullref = addOutOfLineCode(new (alloc_) OutOfLineAbortingTrap(
+ Trap::IndirectCallToNull, bytecodeOffset()));
+ if (!oob) {
+ return false;
+ }
+ nullCheckFailed = nullref->entry();
+#endif
+ masm.wasmCallIndirect(desc, callee, oob->entry(), nullCheckFailed,
+ mozilla::Nothing(), fastCallOffset, slowCallOffset);
+ return true;
+}
+
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+void BaseCompiler::callRef(const Stk& calleeRef, const FunctionCall& call,
+ CodeOffset* fastCallOffset,
+ CodeOffset* slowCallOffset) {
+ CallSiteDesc desc(bytecodeOffset(), CallSiteDesc::FuncRef);
+ CalleeDesc callee = CalleeDesc::wasmFuncRef();
+
+ loadRef(calleeRef, RegRef(WasmCallRefReg));
+ masm.wasmCallRef(desc, callee, fastCallOffset, slowCallOffset);
+}
+#endif
+
+// Precondition: sync()
+
+CodeOffset BaseCompiler::callImport(unsigned instanceDataOffset,
+ const FunctionCall& call) {
+ CallSiteDesc desc(bytecodeOffset(), CallSiteDesc::Import);
+ CalleeDesc callee = CalleeDesc::import(instanceDataOffset);
+ return masm.wasmCallImport(desc, callee);
+}
+
+CodeOffset BaseCompiler::builtinCall(SymbolicAddress builtin,
+ const FunctionCall& call) {
+ return callSymbolic(builtin, call);
+}
+
+CodeOffset BaseCompiler::builtinInstanceMethodCall(
+ const SymbolicAddressSignature& builtin, const ABIArg& instanceArg,
+ const FunctionCall& call) {
+#ifndef RABALDR_PIN_INSTANCE
+ // Builtin method calls assume the instance register has been set.
+ fr.loadInstancePtr(InstanceReg);
+#endif
+ CallSiteDesc desc(bytecodeOffset(), CallSiteDesc::Symbolic);
+ return masm.wasmCallBuiltinInstanceMethod(desc, instanceArg, builtin.identity,
+ builtin.failureMode);
+}
+
+bool BaseCompiler::pushCallResults(const FunctionCall& call, ResultType type,
+ const StackResultsLoc& loc) {
+#if defined(JS_CODEGEN_ARM)
+ // pushResults currently bypasses special case code in captureReturnedFxx()
+ // that converts GPR results to FPR results for systemABI+softFP. If we
+ // ever start using that combination for calls we need more code. This
+ // assert is stronger than we need - we only care about results in return
+ // registers - but that's OK.
+ MOZ_ASSERT(!call.usesSystemAbi || call.hardFP);
+#endif
+ return pushResults(type, fr.stackResultsBase(loc.bytes()));
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Exception handling
+
+// Abstracted helper for throwing, used for throw, rethrow, and rethrowing
+// at the end of a series of catch blocks (if none matched the exception).
+bool BaseCompiler::throwFrom(RegRef exn) {
+ pushRef(exn);
+
+ // ThrowException invokes a trap, and the rest is dead code.
+ return emitInstanceCall(SASigThrowException);
+}
+
+void BaseCompiler::loadTag(RegPtr instance, uint32_t tagIndex, RegRef tagDst) {
+ size_t offset =
+ Instance::offsetInData(moduleEnv_.offsetOfTagInstanceData(tagIndex));
+ masm.loadPtr(Address(instance, offset), tagDst);
+}
+
+void BaseCompiler::consumePendingException(RegRef* exnDst, RegRef* tagDst) {
+ RegPtr pendingAddr = RegPtr(PreBarrierReg);
+ needPtr(pendingAddr);
+ masm.computeEffectiveAddress(
+ Address(InstanceReg, Instance::offsetOfPendingException()), pendingAddr);
+ *exnDst = needRef();
+ masm.loadPtr(Address(pendingAddr, 0), *exnDst);
+ emitBarrieredClear(pendingAddr);
+
+ *tagDst = needRef();
+ masm.computeEffectiveAddress(
+ Address(InstanceReg, Instance::offsetOfPendingExceptionTag()),
+ pendingAddr);
+ masm.loadPtr(Address(pendingAddr, 0), *tagDst);
+ emitBarrieredClear(pendingAddr);
+ freePtr(pendingAddr);
+}
+
+bool BaseCompiler::startTryNote(size_t* tryNoteIndex) {
+ // Check the previous try note to ensure that we don't share an edge with
+ // it that could lead to ambiguity. Insert a nop, if required.
+ TryNoteVector& tryNotes = masm.tryNotes();
+ if (tryNotes.length() > 0) {
+ const TryNote& previous = tryNotes.back();
+ uint32_t currentOffset = masm.currentOffset();
+ if (previous.tryBodyBegin() == currentOffset ||
+ previous.tryBodyEnd() == currentOffset) {
+ masm.nop();
+ }
+ }
+
+ // Mark the beginning of the try note
+ wasm::TryNote tryNote = wasm::TryNote();
+ tryNote.setTryBodyBegin(masm.currentOffset());
+ return masm.append(tryNote, tryNoteIndex);
+}
+
+void BaseCompiler::finishTryNote(size_t tryNoteIndex) {
+ TryNoteVector& tryNotes = masm.tryNotes();
+ TryNote& tryNote = tryNotes[tryNoteIndex];
+
+ // Disallow zero-length try notes by inserting a no-op
+ if (tryNote.tryBodyBegin() == masm.currentOffset()) {
+ masm.nop();
+ }
+
+ // Check the previous try note to ensure that we don't share an edge with
+ // it that could lead to ambiguity. Insert a nop, if required.
+ if (tryNotes.length() > 0) {
+ const TryNote& previous = tryNotes.back();
+ uint32_t currentOffset = masm.currentOffset();
+ if (previous.tryBodyEnd() == currentOffset) {
+ masm.nop();
+ }
+ }
+
+ // Don't set the end of the try note if we've OOM'ed, as the above nop's may
+ // not have been placed. This is okay as this compilation will be thrown
+ // away.
+ if (masm.oom()) {
+ return;
+ }
+
+ // Mark the end of the try note
+ tryNote.setTryBodyEnd(masm.currentOffset());
+}
+
+////////////////////////////////////////////////////////////
+//
+// Platform-specific popping and register targeting.
+
+// The simple popping methods pop values into targeted registers; the caller
+// can free registers using standard functions. These are always called
+// popXForY where X says something about types and Y something about the
+// operation being targeted.
+
+RegI32 BaseCompiler::needRotate64Temp() {
+#if defined(JS_CODEGEN_X86)
+ return needI32();
+#elif defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || \
+ defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
+ return RegI32::Invalid();
+#else
+ MOZ_CRASH("BaseCompiler platform hook: needRotate64Temp");
+#endif
+}
+
+void BaseCompiler::popAndAllocateForDivAndRemI32(RegI32* r0, RegI32* r1,
+ RegI32* reserved) {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ // r0 must be eax, and edx will be clobbered.
+ need2xI32(specific_.eax, specific_.edx);
+ *r1 = popI32();
+ *r0 = popI32ToSpecific(specific_.eax);
+ *reserved = specific_.edx;
+#else
+ pop2xI32(r0, r1);
+#endif
+}
+
+static void QuotientI32(MacroAssembler& masm, RegI32 rs, RegI32 rsd,
+ RegI32 reserved, IsUnsigned isUnsigned) {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ masm.quotient32(rs, rsd, reserved, isUnsigned);
+#else
+ masm.quotient32(rs, rsd, isUnsigned);
+#endif
+}
+
+static void RemainderI32(MacroAssembler& masm, RegI32 rs, RegI32 rsd,
+ RegI32 reserved, IsUnsigned isUnsigned) {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ masm.remainder32(rs, rsd, reserved, isUnsigned);
+#else
+ masm.remainder32(rs, rsd, isUnsigned);
+#endif
+}
+
+void BaseCompiler::popAndAllocateForMulI64(RegI64* r0, RegI64* r1,
+ RegI32* temp) {
+#if defined(JS_CODEGEN_X64)
+ pop2xI64(r0, r1);
+#elif defined(JS_CODEGEN_X86)
+ // lhsDest must be edx:eax and rhs must not be that.
+ needI64(specific_.edx_eax);
+ *r1 = popI64();
+ *r0 = popI64ToSpecific(specific_.edx_eax);
+ *temp = needI32();
+#elif defined(JS_CODEGEN_MIPS64)
+ pop2xI64(r0, r1);
+#elif defined(JS_CODEGEN_ARM)
+ pop2xI64(r0, r1);
+ *temp = needI32();
+#elif defined(JS_CODEGEN_ARM64)
+ pop2xI64(r0, r1);
+#elif defined(JS_CODEGEN_LOONG64)
+ pop2xI64(r0, r1);
+#else
+ MOZ_CRASH("BaseCompiler porting interface: popAndAllocateForMulI64");
+#endif
+}
+
+#ifndef RABALDR_INT_DIV_I64_CALLOUT
+
+void BaseCompiler::popAndAllocateForDivAndRemI64(RegI64* r0, RegI64* r1,
+ RegI64* reserved,
+ IsRemainder isRemainder) {
+# if defined(JS_CODEGEN_X64)
+ // r0 must be rax, and rdx will be clobbered.
+ need2xI64(specific_.rax, specific_.rdx);
+ *r1 = popI64();
+ *r0 = popI64ToSpecific(specific_.rax);
+ *reserved = specific_.rdx;
+# elif defined(JS_CODEGEN_ARM64)
+ pop2xI64(r0, r1);
+ if (isRemainder) {
+ *reserved = needI64();
+ }
+# else
+ pop2xI64(r0, r1);
+# endif
+}
+
+static void QuotientI64(MacroAssembler& masm, RegI64 rhs, RegI64 srcDest,
+ RegI64 reserved, IsUnsigned isUnsigned) {
+# if defined(JS_CODEGEN_X64)
+ // The caller must set up the following situation.
+ MOZ_ASSERT(srcDest.reg == rax);
+ MOZ_ASSERT(reserved.reg == rdx);
+ if (isUnsigned) {
+ masm.xorq(rdx, rdx);
+ masm.udivq(rhs.reg);
+ } else {
+ masm.cqo();
+ masm.idivq(rhs.reg);
+ }
+# elif defined(JS_CODEGEN_MIPS64)
+ MOZ_ASSERT(reserved.isInvalid());
+ if (isUnsigned) {
+ masm.as_ddivu(srcDest.reg, rhs.reg);
+ } else {
+ masm.as_ddiv(srcDest.reg, rhs.reg);
+ }
+ masm.as_mflo(srcDest.reg);
+# elif defined(JS_CODEGEN_ARM64)
+ MOZ_ASSERT(reserved.isInvalid());
+ ARMRegister sd(srcDest.reg, 64);
+ ARMRegister r(rhs.reg, 64);
+ if (isUnsigned) {
+ masm.Udiv(sd, sd, r);
+ } else {
+ masm.Sdiv(sd, sd, r);
+ }
+# elif defined(JS_CODEGEN_LOONG64)
+ if (isUnsigned) {
+ masm.as_div_du(srcDest.reg, srcDest.reg, rhs.reg);
+ } else {
+ masm.as_div_d(srcDest.reg, srcDest.reg, rhs.reg);
+ }
+# else
+ MOZ_CRASH("BaseCompiler platform hook: quotientI64");
+# endif
+}
+
+static void RemainderI64(MacroAssembler& masm, RegI64 rhs, RegI64 srcDest,
+ RegI64 reserved, IsUnsigned isUnsigned) {
+# if defined(JS_CODEGEN_X64)
+ // The caller must set up the following situation.
+ MOZ_ASSERT(srcDest.reg == rax);
+ MOZ_ASSERT(reserved.reg == rdx);
+
+ if (isUnsigned) {
+ masm.xorq(rdx, rdx);
+ masm.udivq(rhs.reg);
+ } else {
+ masm.cqo();
+ masm.idivq(rhs.reg);
+ }
+ masm.movq(rdx, rax);
+# elif defined(JS_CODEGEN_MIPS64)
+ MOZ_ASSERT(reserved.isInvalid());
+ if (isUnsigned) {
+ masm.as_ddivu(srcDest.reg, rhs.reg);
+ } else {
+ masm.as_ddiv(srcDest.reg, rhs.reg);
+ }
+ masm.as_mfhi(srcDest.reg);
+# elif defined(JS_CODEGEN_ARM64)
+ ARMRegister sd(srcDest.reg, 64);
+ ARMRegister r(rhs.reg, 64);
+ ARMRegister t(reserved.reg, 64);
+ if (isUnsigned) {
+ masm.Udiv(t, sd, r);
+ } else {
+ masm.Sdiv(t, sd, r);
+ }
+ masm.Mul(t, t, r);
+ masm.Sub(sd, sd, t);
+# elif defined(JS_CODEGEN_LOONG64)
+ if (isUnsigned) {
+ masm.as_mod_du(srcDest.reg, srcDest.reg, rhs.reg);
+ } else {
+ masm.as_mod_d(srcDest.reg, srcDest.reg, rhs.reg);
+ }
+# else
+ MOZ_CRASH("BaseCompiler platform hook: remainderI64");
+# endif
+}
+
+#endif // RABALDR_INT_DIV_I64_CALLOUT
+
+RegI32 BaseCompiler::popI32RhsForShift() {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ // r1 must be ecx for a variable shift, unless BMI2 is available.
+ if (!Assembler::HasBMI2()) {
+ return popI32(specific_.ecx);
+ }
+#endif
+ RegI32 r = popI32();
+#if defined(JS_CODEGEN_ARM)
+ masm.and32(Imm32(31), r);
+#endif
+ return r;
+}
+
+RegI32 BaseCompiler::popI32RhsForShiftI64() {
+#if defined(JS_CODEGEN_X86)
+ // A limitation in the x86 masm requires ecx here
+ return popI32(specific_.ecx);
+#elif defined(JS_CODEGEN_X64)
+ if (!Assembler::HasBMI2()) {
+ return popI32(specific_.ecx);
+ }
+ return popI32();
+#else
+ return popI32();
+#endif
+}
+
+RegI64 BaseCompiler::popI64RhsForShift() {
+#if defined(JS_CODEGEN_X86)
+ // r1 must be ecx for a variable shift.
+ needI32(specific_.ecx);
+ return popI64ToSpecific(widenI32(specific_.ecx));
+#else
+# if defined(JS_CODEGEN_X64)
+ // r1 must be rcx for a variable shift, unless BMI2 is available.
+ if (!Assembler::HasBMI2()) {
+ needI64(specific_.rcx);
+ return popI64ToSpecific(specific_.rcx);
+ }
+# endif
+ // No masking is necessary on 64-bit platforms, and on arm32 the masm
+ // implementation masks.
+ return popI64();
+#endif
+}
+
+RegI32 BaseCompiler::popI32RhsForRotate() {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ // r1 must be ecx for a variable rotate.
+ return popI32(specific_.ecx);
+#else
+ return popI32();
+#endif
+}
+
+RegI64 BaseCompiler::popI64RhsForRotate() {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ // r1 must be ecx for a variable rotate.
+ needI32(specific_.ecx);
+ return popI64ToSpecific(widenI32(specific_.ecx));
+#else
+ return popI64();
+#endif
+}
+
+void BaseCompiler::popI32ForSignExtendI64(RegI64* r0) {
+#if defined(JS_CODEGEN_X86)
+ // r0 must be edx:eax for cdq
+ need2xI32(specific_.edx, specific_.eax);
+ *r0 = specific_.edx_eax;
+ popI32ToSpecific(specific_.eax);
+#else
+ *r0 = widenI32(popI32());
+#endif
+}
+
+void BaseCompiler::popI64ForSignExtendI64(RegI64* r0) {
+#if defined(JS_CODEGEN_X86)
+ // r0 must be edx:eax for cdq
+ need2xI32(specific_.edx, specific_.eax);
+ // Low on top, high underneath
+ *r0 = popI64ToSpecific(specific_.edx_eax);
+#else
+ *r0 = popI64();
+#endif
+}
+
+class OutOfLineTruncateCheckF32OrF64ToI32 : public OutOfLineCode {
+ AnyReg src;
+ RegI32 dest;
+ TruncFlags flags;
+ BytecodeOffset off;
+
+ public:
+ OutOfLineTruncateCheckF32OrF64ToI32(AnyReg src, RegI32 dest, TruncFlags flags,
+ BytecodeOffset off)
+ : src(src), dest(dest), flags(flags), off(off) {}
+
+ virtual void generate(MacroAssembler* masm) override {
+ if (src.tag == AnyReg::F32) {
+ masm->oolWasmTruncateCheckF32ToI32(src.f32(), dest, flags, off, rejoin());
+ } else if (src.tag == AnyReg::F64) {
+ masm->oolWasmTruncateCheckF64ToI32(src.f64(), dest, flags, off, rejoin());
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+ }
+};
+
+bool BaseCompiler::truncateF32ToI32(RegF32 src, RegI32 dest, TruncFlags flags) {
+ BytecodeOffset off = bytecodeOffset();
+ OutOfLineCode* ool =
+ addOutOfLineCode(new (alloc_) OutOfLineTruncateCheckF32OrF64ToI32(
+ AnyReg(src), dest, flags, off));
+ if (!ool) {
+ return false;
+ }
+ bool isSaturating = flags & TRUNC_SATURATING;
+ if (flags & TRUNC_UNSIGNED) {
+ masm.wasmTruncateFloat32ToUInt32(src, dest, isSaturating, ool->entry());
+ } else {
+ masm.wasmTruncateFloat32ToInt32(src, dest, isSaturating, ool->entry());
+ }
+ masm.bind(ool->rejoin());
+ return true;
+}
+
+bool BaseCompiler::truncateF64ToI32(RegF64 src, RegI32 dest, TruncFlags flags) {
+ BytecodeOffset off = bytecodeOffset();
+ OutOfLineCode* ool =
+ addOutOfLineCode(new (alloc_) OutOfLineTruncateCheckF32OrF64ToI32(
+ AnyReg(src), dest, flags, off));
+ if (!ool) {
+ return false;
+ }
+ bool isSaturating = flags & TRUNC_SATURATING;
+ if (flags & TRUNC_UNSIGNED) {
+ masm.wasmTruncateDoubleToUInt32(src, dest, isSaturating, ool->entry());
+ } else {
+ masm.wasmTruncateDoubleToInt32(src, dest, isSaturating, ool->entry());
+ }
+ masm.bind(ool->rejoin());
+ return true;
+}
+
+class OutOfLineTruncateCheckF32OrF64ToI64 : public OutOfLineCode {
+ AnyReg src;
+ RegI64 dest;
+ TruncFlags flags;
+ BytecodeOffset off;
+
+ public:
+ OutOfLineTruncateCheckF32OrF64ToI64(AnyReg src, RegI64 dest, TruncFlags flags,
+ BytecodeOffset off)
+ : src(src), dest(dest), flags(flags), off(off) {}
+
+ virtual void generate(MacroAssembler* masm) override {
+ if (src.tag == AnyReg::F32) {
+ masm->oolWasmTruncateCheckF32ToI64(src.f32(), dest, flags, off, rejoin());
+ } else if (src.tag == AnyReg::F64) {
+ masm->oolWasmTruncateCheckF64ToI64(src.f64(), dest, flags, off, rejoin());
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+ }
+};
+
+#ifndef RABALDR_FLOAT_TO_I64_CALLOUT
+
+RegF64 BaseCompiler::needTempForFloatingToI64(TruncFlags flags) {
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ if (flags & TRUNC_UNSIGNED) {
+ return needF64();
+ }
+# endif
+ return RegF64::Invalid();
+}
+
+bool BaseCompiler::truncateF32ToI64(RegF32 src, RegI64 dest, TruncFlags flags,
+ RegF64 temp) {
+ OutOfLineCode* ool =
+ addOutOfLineCode(new (alloc_) OutOfLineTruncateCheckF32OrF64ToI64(
+ AnyReg(src), dest, flags, bytecodeOffset()));
+ if (!ool) {
+ return false;
+ }
+ bool isSaturating = flags & TRUNC_SATURATING;
+ if (flags & TRUNC_UNSIGNED) {
+ masm.wasmTruncateFloat32ToUInt64(src, dest, isSaturating, ool->entry(),
+ ool->rejoin(), temp);
+ } else {
+ masm.wasmTruncateFloat32ToInt64(src, dest, isSaturating, ool->entry(),
+ ool->rejoin(), temp);
+ }
+ return true;
+}
+
+bool BaseCompiler::truncateF64ToI64(RegF64 src, RegI64 dest, TruncFlags flags,
+ RegF64 temp) {
+ OutOfLineCode* ool =
+ addOutOfLineCode(new (alloc_) OutOfLineTruncateCheckF32OrF64ToI64(
+ AnyReg(src), dest, flags, bytecodeOffset()));
+ if (!ool) {
+ return false;
+ }
+ bool isSaturating = flags & TRUNC_SATURATING;
+ if (flags & TRUNC_UNSIGNED) {
+ masm.wasmTruncateDoubleToUInt64(src, dest, isSaturating, ool->entry(),
+ ool->rejoin(), temp);
+ } else {
+ masm.wasmTruncateDoubleToInt64(src, dest, isSaturating, ool->entry(),
+ ool->rejoin(), temp);
+ }
+ return true;
+}
+
+#endif // RABALDR_FLOAT_TO_I64_CALLOUT
+
+#ifndef RABALDR_I64_TO_FLOAT_CALLOUT
+
+RegI32 BaseCompiler::needConvertI64ToFloatTemp(ValType to, bool isUnsigned) {
+ bool needs = false;
+ if (to == ValType::F64) {
+ needs = isUnsigned && masm.convertUInt64ToDoubleNeedsTemp();
+ } else {
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ needs = true;
+# endif
+ }
+ return needs ? needI32() : RegI32::Invalid();
+}
+
+void BaseCompiler::convertI64ToF32(RegI64 src, bool isUnsigned, RegF32 dest,
+ RegI32 temp) {
+ if (isUnsigned) {
+ masm.convertUInt64ToFloat32(src, dest, temp);
+ } else {
+ masm.convertInt64ToFloat32(src, dest);
+ }
+}
+
+void BaseCompiler::convertI64ToF64(RegI64 src, bool isUnsigned, RegF64 dest,
+ RegI32 temp) {
+ if (isUnsigned) {
+ masm.convertUInt64ToDouble(src, dest, temp);
+ } else {
+ masm.convertInt64ToDouble(src, dest);
+ }
+}
+
+#endif // RABALDR_I64_TO_FLOAT_CALLOUT
+
+//////////////////////////////////////////////////////////////////////
+//
+// Global variable access.
+
+Address BaseCompiler::addressOfGlobalVar(const GlobalDesc& global, RegPtr tmp) {
+ uint32_t globalToInstanceOffset = Instance::offsetInData(global.offset());
+#ifdef RABALDR_PIN_INSTANCE
+ movePtr(RegPtr(InstanceReg), tmp);
+#else
+ fr.loadInstancePtr(tmp);
+#endif
+ if (global.isIndirect()) {
+ masm.loadPtr(Address(tmp, globalToInstanceOffset), tmp);
+ return Address(tmp, 0);
+ }
+ return Address(tmp, globalToInstanceOffset);
+}
+
+//////////////////////////////////////////////////////////////////////
+//
+// Table access.
+
+Address BaseCompiler::addressOfTableField(uint32_t tableIndex,
+ uint32_t fieldOffset,
+ RegPtr instance) {
+ uint32_t tableToInstanceOffset = wasm::Instance::offsetInData(
+ moduleEnv_.offsetOfTableInstanceData(tableIndex) + fieldOffset);
+ return Address(instance, tableToInstanceOffset);
+}
+
+void BaseCompiler::loadTableLength(uint32_t tableIndex, RegPtr instance,
+ RegI32 length) {
+ masm.load32(addressOfTableField(
+ tableIndex, offsetof(TableInstanceData, length), instance),
+ length);
+}
+
+void BaseCompiler::loadTableElements(uint32_t tableIndex, RegPtr instance,
+ RegPtr elements) {
+ masm.loadPtr(addressOfTableField(
+ tableIndex, offsetof(TableInstanceData, elements), instance),
+ elements);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Basic emitters for simple operators.
+
+static void AddI32(MacroAssembler& masm, RegI32 rs, RegI32 rsd) {
+ masm.add32(rs, rsd);
+}
+
+static void AddImmI32(MacroAssembler& masm, int32_t c, RegI32 rsd) {
+ masm.add32(Imm32(c), rsd);
+}
+
+static void SubI32(MacroAssembler& masm, RegI32 rs, RegI32 rsd) {
+ masm.sub32(rs, rsd);
+}
+
+static void SubImmI32(MacroAssembler& masm, int32_t c, RegI32 rsd) {
+ masm.sub32(Imm32(c), rsd);
+}
+
+static void MulI32(MacroAssembler& masm, RegI32 rs, RegI32 rsd) {
+ masm.mul32(rs, rsd);
+}
+
+static void OrI32(MacroAssembler& masm, RegI32 rs, RegI32 rsd) {
+ masm.or32(rs, rsd);
+}
+
+static void OrImmI32(MacroAssembler& masm, int32_t c, RegI32 rsd) {
+ masm.or32(Imm32(c), rsd);
+}
+
+static void AndI32(MacroAssembler& masm, RegI32 rs, RegI32 rsd) {
+ masm.and32(rs, rsd);
+}
+
+static void AndImmI32(MacroAssembler& masm, int32_t c, RegI32 rsd) {
+ masm.and32(Imm32(c), rsd);
+}
+
+static void XorI32(MacroAssembler& masm, RegI32 rs, RegI32 rsd) {
+ masm.xor32(rs, rsd);
+}
+
+static void XorImmI32(MacroAssembler& masm, int32_t c, RegI32 rsd) {
+ masm.xor32(Imm32(c), rsd);
+}
+
+static void ClzI32(MacroAssembler& masm, RegI32 rsd) {
+ masm.clz32(rsd, rsd, IsKnownNotZero(false));
+}
+
+static void CtzI32(MacroAssembler& masm, RegI32 rsd) {
+ masm.ctz32(rsd, rsd, IsKnownNotZero(false));
+}
+
+// Currently common to PopcntI32 and PopcntI64
+static RegI32 PopcntTemp(BaseCompiler& bc) {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ return AssemblerX86Shared::HasPOPCNT() ? RegI32::Invalid() : bc.needI32();
+#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_LOONG64)
+ return bc.needI32();
+#else
+ MOZ_CRASH("BaseCompiler platform hook: PopcntTemp");
+#endif
+}
+
+static void PopcntI32(BaseCompiler& bc, RegI32 rsd, RegI32 temp) {
+ bc.masm.popcnt32(rsd, rsd, temp);
+}
+
+static void ShlI32(MacroAssembler& masm, RegI32 rs, RegI32 rsd) {
+ masm.lshift32(rs, rsd);
+}
+
+static void ShlImmI32(MacroAssembler& masm, int32_t c, RegI32 rsd) {
+ masm.lshift32(Imm32(c & 31), rsd);
+}
+
+static void ShrI32(MacroAssembler& masm, RegI32 rs, RegI32 rsd) {
+ masm.rshift32Arithmetic(rs, rsd);
+}
+
+static void ShrImmI32(MacroAssembler& masm, int32_t c, RegI32 rsd) {
+ masm.rshift32Arithmetic(Imm32(c & 31), rsd);
+}
+
+static void ShrUI32(MacroAssembler& masm, RegI32 rs, RegI32 rsd) {
+ masm.rshift32(rs, rsd);
+}
+
+static void ShrUImmI32(MacroAssembler& masm, int32_t c, RegI32 rsd) {
+ masm.rshift32(Imm32(c & 31), rsd);
+}
+
+static void RotlI32(MacroAssembler& masm, RegI32 rs, RegI32 rsd) {
+ masm.rotateLeft(rs, rsd, rsd);
+}
+
+static void RotlImmI32(MacroAssembler& masm, int32_t c, RegI32 rsd) {
+ masm.rotateLeft(Imm32(c & 31), rsd, rsd);
+}
+
+static void RotrI32(MacroAssembler& masm, RegI32 rs, RegI32 rsd) {
+ masm.rotateRight(rs, rsd, rsd);
+}
+
+static void RotrImmI32(MacroAssembler& masm, int32_t c, RegI32 rsd) {
+ masm.rotateRight(Imm32(c & 31), rsd, rsd);
+}
+
+static void EqzI32(MacroAssembler& masm, RegI32 rsd) {
+ masm.cmp32Set(Assembler::Equal, rsd, Imm32(0), rsd);
+}
+
+static void WrapI64ToI32(MacroAssembler& masm, RegI64 rs, RegI32 rd) {
+ masm.move64To32(rs, rd);
+}
+
+static void AddI64(MacroAssembler& masm, RegI64 rs, RegI64 rsd) {
+ masm.add64(rs, rsd);
+}
+
+static void AddImmI64(MacroAssembler& masm, int64_t c, RegI64 rsd) {
+ masm.add64(Imm64(c), rsd);
+}
+
+static void SubI64(MacroAssembler& masm, RegI64 rs, RegI64 rsd) {
+ masm.sub64(rs, rsd);
+}
+
+static void SubImmI64(MacroAssembler& masm, int64_t c, RegI64 rsd) {
+ masm.sub64(Imm64(c), rsd);
+}
+
+static void OrI64(MacroAssembler& masm, RegI64 rs, RegI64 rsd) {
+ masm.or64(rs, rsd);
+}
+
+static void OrImmI64(MacroAssembler& masm, int64_t c, RegI64 rsd) {
+ masm.or64(Imm64(c), rsd);
+}
+
+static void AndI64(MacroAssembler& masm, RegI64 rs, RegI64 rsd) {
+ masm.and64(rs, rsd);
+}
+
+static void AndImmI64(MacroAssembler& masm, int64_t c, RegI64 rsd) {
+ masm.and64(Imm64(c), rsd);
+}
+
+static void XorI64(MacroAssembler& masm, RegI64 rs, RegI64 rsd) {
+ masm.xor64(rs, rsd);
+}
+
+static void XorImmI64(MacroAssembler& masm, int64_t c, RegI64 rsd) {
+ masm.xor64(Imm64(c), rsd);
+}
+
+static void ClzI64(BaseCompiler& bc, RegI64 rsd) {
+ bc.masm.clz64(rsd, bc.lowPart(rsd));
+ bc.maybeClearHighPart(rsd);
+}
+
+static void CtzI64(BaseCompiler& bc, RegI64 rsd) {
+ bc.masm.ctz64(rsd, bc.lowPart(rsd));
+ bc.maybeClearHighPart(rsd);
+}
+
+static void PopcntI64(BaseCompiler& bc, RegI64 rsd, RegI32 temp) {
+ bc.masm.popcnt64(rsd, rsd, temp);
+}
+
+static void ShlI64(BaseCompiler& bc, RegI64 rs, RegI64 rsd) {
+ bc.masm.lshift64(bc.lowPart(rs), rsd);
+}
+
+static void ShlImmI64(MacroAssembler& masm, int64_t c, RegI64 rsd) {
+ masm.lshift64(Imm32(c & 63), rsd);
+}
+
+static void ShrI64(BaseCompiler& bc, RegI64 rs, RegI64 rsd) {
+ bc.masm.rshift64Arithmetic(bc.lowPart(rs), rsd);
+}
+
+static void ShrImmI64(MacroAssembler& masm, int64_t c, RegI64 rsd) {
+ masm.rshift64Arithmetic(Imm32(c & 63), rsd);
+}
+
+static void ShrUI64(BaseCompiler& bc, RegI64 rs, RegI64 rsd) {
+ bc.masm.rshift64(bc.lowPart(rs), rsd);
+}
+
+static void ShrUImmI64(MacroAssembler& masm, int64_t c, RegI64 rsd) {
+ masm.rshift64(Imm32(c & 63), rsd);
+}
+
+static void EqzI64(MacroAssembler& masm, RegI64 rs, RegI32 rd) {
+#ifdef JS_PUNBOX64
+ masm.cmpPtrSet(Assembler::Equal, rs.reg, ImmWord(0), rd);
+#else
+ MOZ_ASSERT(rs.low == rd);
+ masm.or32(rs.high, rs.low);
+ masm.cmp32Set(Assembler::Equal, rs.low, Imm32(0), rd);
+#endif
+}
+
+static void AddF64(MacroAssembler& masm, RegF64 rs, RegF64 rsd) {
+ masm.addDouble(rs, rsd);
+}
+
+static void SubF64(MacroAssembler& masm, RegF64 rs, RegF64 rsd) {
+ masm.subDouble(rs, rsd);
+}
+
+static void MulF64(MacroAssembler& masm, RegF64 rs, RegF64 rsd) {
+ masm.mulDouble(rs, rsd);
+}
+
+static void DivF64(MacroAssembler& masm, RegF64 rs, RegF64 rsd) {
+ masm.divDouble(rs, rsd);
+}
+
+static void MinF64(BaseCompiler& bc, RegF64 rs, RegF64 rsd) {
+ // Convert signaling NaN to quiet NaNs.
+ //
+ // TODO / OPTIMIZE (bug 1316824): see comment in MinF32.
+#ifdef RABALDR_SCRATCH_F64
+ ScratchF64 zero(bc.ra);
+#else
+ ScratchF64 zero(bc.masm);
+#endif
+ bc.masm.loadConstantDouble(0, zero);
+ bc.masm.subDouble(zero, rsd);
+ bc.masm.subDouble(zero, rs);
+ bc.masm.minDouble(rs, rsd, HandleNaNSpecially(true));
+}
+
+static void MaxF64(BaseCompiler& bc, RegF64 rs, RegF64 rsd) {
+ // Convert signaling NaN to quiet NaNs.
+ //
+ // TODO / OPTIMIZE (bug 1316824): see comment in MinF32.
+#ifdef RABALDR_SCRATCH_F64
+ ScratchF64 zero(bc.ra);
+#else
+ ScratchF64 zero(bc.masm);
+#endif
+ bc.masm.loadConstantDouble(0, zero);
+ bc.masm.subDouble(zero, rsd);
+ bc.masm.subDouble(zero, rs);
+ bc.masm.maxDouble(rs, rsd, HandleNaNSpecially(true));
+}
+
+static void CopysignF64(MacroAssembler& masm, RegF64 rs, RegF64 rsd,
+ RegI64 temp0, RegI64 temp1) {
+ masm.moveDoubleToGPR64(rsd, temp0);
+ masm.moveDoubleToGPR64(rs, temp1);
+ masm.and64(Imm64(INT64_MAX), temp0);
+ masm.and64(Imm64(INT64_MIN), temp1);
+ masm.or64(temp1, temp0);
+ masm.moveGPR64ToDouble(temp0, rsd);
+}
+
+static void AbsF64(MacroAssembler& masm, RegF64 rsd) {
+ masm.absDouble(rsd, rsd);
+}
+
+static void NegateF64(MacroAssembler& masm, RegF64 rsd) {
+ masm.negateDouble(rsd);
+}
+
+static void SqrtF64(MacroAssembler& masm, RegF64 rsd) {
+ masm.sqrtDouble(rsd, rsd);
+}
+
+static void AddF32(MacroAssembler& masm, RegF32 rs, RegF32 rsd) {
+ masm.addFloat32(rs, rsd);
+}
+
+static void SubF32(MacroAssembler& masm, RegF32 rs, RegF32 rsd) {
+ masm.subFloat32(rs, rsd);
+}
+
+static void MulF32(MacroAssembler& masm, RegF32 rs, RegF32 rsd) {
+ masm.mulFloat32(rs, rsd);
+}
+
+static void DivF32(MacroAssembler& masm, RegF32 rs, RegF32 rsd) {
+ masm.divFloat32(rs, rsd);
+}
+
+static void MinF32(BaseCompiler& bc, RegF32 rs, RegF32 rsd) {
+ // Convert signaling NaN to quiet NaNs.
+ //
+ // TODO / OPTIMIZE (bug 1316824): Don't do this if one of the operands
+ // is known to be a constant.
+#ifdef RABALDR_SCRATCH_F32
+ ScratchF32 zero(bc.ra);
+#else
+ ScratchF32 zero(bc.masm);
+#endif
+ bc.masm.loadConstantFloat32(0.f, zero);
+ bc.masm.subFloat32(zero, rsd);
+ bc.masm.subFloat32(zero, rs);
+ bc.masm.minFloat32(rs, rsd, HandleNaNSpecially(true));
+}
+
+static void MaxF32(BaseCompiler& bc, RegF32 rs, RegF32 rsd) {
+ // Convert signaling NaN to quiet NaNs.
+ //
+ // TODO / OPTIMIZE (bug 1316824): see comment in MinF32.
+#ifdef RABALDR_SCRATCH_F32
+ ScratchF32 zero(bc.ra);
+#else
+ ScratchF32 zero(bc.masm);
+#endif
+ bc.masm.loadConstantFloat32(0.f, zero);
+ bc.masm.subFloat32(zero, rsd);
+ bc.masm.subFloat32(zero, rs);
+ bc.masm.maxFloat32(rs, rsd, HandleNaNSpecially(true));
+}
+
+static void CopysignF32(MacroAssembler& masm, RegF32 rs, RegF32 rsd,
+ RegI32 temp0, RegI32 temp1) {
+ masm.moveFloat32ToGPR(rsd, temp0);
+ masm.moveFloat32ToGPR(rs, temp1);
+ masm.and32(Imm32(INT32_MAX), temp0);
+ masm.and32(Imm32(INT32_MIN), temp1);
+ masm.or32(temp1, temp0);
+ masm.moveGPRToFloat32(temp0, rsd);
+}
+
+static void AbsF32(MacroAssembler& masm, RegF32 rsd) {
+ masm.absFloat32(rsd, rsd);
+}
+
+static void NegateF32(MacroAssembler& masm, RegF32 rsd) {
+ masm.negateFloat(rsd);
+}
+
+static void SqrtF32(MacroAssembler& masm, RegF32 rsd) {
+ masm.sqrtFloat32(rsd, rsd);
+}
+
+#ifndef RABALDR_I64_TO_FLOAT_CALLOUT
+static void ConvertI64ToF32(MacroAssembler& masm, RegI64 rs, RegF32 rd) {
+ masm.convertInt64ToFloat32(rs, rd);
+}
+
+static void ConvertI64ToF64(MacroAssembler& masm, RegI64 rs, RegF64 rd) {
+ masm.convertInt64ToDouble(rs, rd);
+}
+#endif
+
+static void ReinterpretF32AsI32(MacroAssembler& masm, RegF32 rs, RegI32 rd) {
+ masm.moveFloat32ToGPR(rs, rd);
+}
+
+static void ReinterpretF64AsI64(MacroAssembler& masm, RegF64 rs, RegI64 rd) {
+ masm.moveDoubleToGPR64(rs, rd);
+}
+
+static void ConvertF64ToF32(MacroAssembler& masm, RegF64 rs, RegF32 rd) {
+ masm.convertDoubleToFloat32(rs, rd);
+}
+
+static void ConvertI32ToF32(MacroAssembler& masm, RegI32 rs, RegF32 rd) {
+ masm.convertInt32ToFloat32(rs, rd);
+}
+
+static void ConvertU32ToF32(MacroAssembler& masm, RegI32 rs, RegF32 rd) {
+ masm.convertUInt32ToFloat32(rs, rd);
+}
+
+static void ConvertF32ToF64(MacroAssembler& masm, RegF32 rs, RegF64 rd) {
+ masm.convertFloat32ToDouble(rs, rd);
+}
+
+static void ConvertI32ToF64(MacroAssembler& masm, RegI32 rs, RegF64 rd) {
+ masm.convertInt32ToDouble(rs, rd);
+}
+
+static void ConvertU32ToF64(MacroAssembler& masm, RegI32 rs, RegF64 rd) {
+ masm.convertUInt32ToDouble(rs, rd);
+}
+
+static void ReinterpretI32AsF32(MacroAssembler& masm, RegI32 rs, RegF32 rd) {
+ masm.moveGPRToFloat32(rs, rd);
+}
+
+static void ReinterpretI64AsF64(MacroAssembler& masm, RegI64 rs, RegF64 rd) {
+ masm.moveGPR64ToDouble(rs, rd);
+}
+
+static void ExtendI32_8(BaseCompiler& bc, RegI32 rsd) {
+#ifdef JS_CODEGEN_X86
+ if (!bc.ra.isSingleByteI32(rsd)) {
+ ScratchI8 scratch(bc.ra);
+ bc.masm.move32(rsd, scratch);
+ bc.masm.move8SignExtend(scratch, rsd);
+ return;
+ }
+#endif
+ bc.masm.move8SignExtend(rsd, rsd);
+}
+
+static void ExtendI32_16(MacroAssembler& masm, RegI32 rsd) {
+ masm.move16SignExtend(rsd, rsd);
+}
+
+void BaseCompiler::emitMultiplyI64() {
+ RegI64 r, rs;
+ RegI32 temp;
+ popAndAllocateForMulI64(&r, &rs, &temp);
+ masm.mul64(rs, r, temp);
+ maybeFree(temp);
+ freeI64(rs);
+ pushI64(r);
+}
+
+template <typename RegType, typename IntType>
+void BaseCompiler::quotientOrRemainder(
+ RegType rs, RegType rsd, RegType reserved, IsUnsigned isUnsigned,
+ ZeroOnOverflow zeroOnOverflow, bool isConst, IntType c,
+ void (*operate)(MacroAssembler& masm, RegType rs, RegType rsd,
+ RegType reserved, IsUnsigned isUnsigned)) {
+ Label done;
+ if (!isConst || c == 0) {
+ checkDivideByZero(rs);
+ }
+ if (!isUnsigned && (!isConst || c == -1)) {
+ checkDivideSignedOverflow(rs, rsd, &done, zeroOnOverflow);
+ }
+ operate(masm, rs, rsd, reserved, isUnsigned);
+ masm.bind(&done);
+}
+
+void BaseCompiler::emitQuotientI32() {
+ int32_t c;
+ uint_fast8_t power;
+ if (popConstPositivePowerOfTwo(&c, &power, 0)) {
+ if (power != 0) {
+ RegI32 r = popI32();
+ Label positive;
+ masm.branchTest32(Assembler::NotSigned, r, r, &positive);
+ masm.add32(Imm32(c - 1), r);
+ masm.bind(&positive);
+
+ masm.rshift32Arithmetic(Imm32(power & 31), r);
+ pushI32(r);
+ }
+ } else {
+ bool isConst = peekConst(&c);
+ RegI32 r, rs, reserved;
+ popAndAllocateForDivAndRemI32(&r, &rs, &reserved);
+ quotientOrRemainder(rs, r, reserved, IsUnsigned(false),
+ ZeroOnOverflow(false), isConst, c, QuotientI32);
+ maybeFree(reserved);
+ freeI32(rs);
+ pushI32(r);
+ }
+}
+
+void BaseCompiler::emitQuotientU32() {
+ int32_t c;
+ uint_fast8_t power;
+ if (popConstPositivePowerOfTwo(&c, &power, 0)) {
+ if (power != 0) {
+ RegI32 r = popI32();
+ masm.rshift32(Imm32(power & 31), r);
+ pushI32(r);
+ }
+ } else {
+ bool isConst = peekConst(&c);
+ RegI32 r, rs, reserved;
+ popAndAllocateForDivAndRemI32(&r, &rs, &reserved);
+ quotientOrRemainder(rs, r, reserved, IsUnsigned(true),
+ ZeroOnOverflow(false), isConst, c, QuotientI32);
+ maybeFree(reserved);
+ freeI32(rs);
+ pushI32(r);
+ }
+}
+
+void BaseCompiler::emitRemainderI32() {
+ int32_t c;
+ uint_fast8_t power;
+ if (popConstPositivePowerOfTwo(&c, &power, 1)) {
+ RegI32 r = popI32();
+ RegI32 temp = needI32();
+ moveI32(r, temp);
+
+ Label positive;
+ masm.branchTest32(Assembler::NotSigned, temp, temp, &positive);
+ masm.add32(Imm32(c - 1), temp);
+ masm.bind(&positive);
+
+ masm.rshift32Arithmetic(Imm32(power & 31), temp);
+ masm.lshift32(Imm32(power & 31), temp);
+ masm.sub32(temp, r);
+ freeI32(temp);
+
+ pushI32(r);
+ } else {
+ bool isConst = peekConst(&c);
+ RegI32 r, rs, reserved;
+ popAndAllocateForDivAndRemI32(&r, &rs, &reserved);
+ quotientOrRemainder(rs, r, reserved, IsUnsigned(false),
+ ZeroOnOverflow(true), isConst, c, RemainderI32);
+ maybeFree(reserved);
+ freeI32(rs);
+ pushI32(r);
+ }
+}
+
+void BaseCompiler::emitRemainderU32() {
+ int32_t c;
+ uint_fast8_t power;
+ if (popConstPositivePowerOfTwo(&c, &power, 1)) {
+ RegI32 r = popI32();
+ masm.and32(Imm32(c - 1), r);
+ pushI32(r);
+ } else {
+ bool isConst = peekConst(&c);
+ RegI32 r, rs, reserved;
+ popAndAllocateForDivAndRemI32(&r, &rs, &reserved);
+ quotientOrRemainder(rs, r, reserved, IsUnsigned(true), ZeroOnOverflow(true),
+ isConst, c, RemainderI32);
+ maybeFree(reserved);
+ freeI32(rs);
+ pushI32(r);
+ }
+}
+
+#ifndef RABALDR_INT_DIV_I64_CALLOUT
+void BaseCompiler::emitQuotientI64() {
+ int64_t c;
+ uint_fast8_t power;
+ if (popConstPositivePowerOfTwo(&c, &power, 0)) {
+ if (power != 0) {
+ RegI64 r = popI64();
+ Label positive;
+ masm.branchTest64(Assembler::NotSigned, r, r, RegI32::Invalid(),
+ &positive);
+ masm.add64(Imm64(c - 1), r);
+ masm.bind(&positive);
+
+ masm.rshift64Arithmetic(Imm32(power & 63), r);
+ pushI64(r);
+ }
+ } else {
+ bool isConst = peekConst(&c);
+ RegI64 r, rs, reserved;
+ popAndAllocateForDivAndRemI64(&r, &rs, &reserved, IsRemainder(false));
+ quotientOrRemainder(rs, r, reserved, IsUnsigned(false),
+ ZeroOnOverflow(false), isConst, c, QuotientI64);
+ maybeFree(reserved);
+ freeI64(rs);
+ pushI64(r);
+ }
+}
+
+void BaseCompiler::emitQuotientU64() {
+ int64_t c;
+ uint_fast8_t power;
+ if (popConstPositivePowerOfTwo(&c, &power, 0)) {
+ if (power != 0) {
+ RegI64 r = popI64();
+ masm.rshift64(Imm32(power & 63), r);
+ pushI64(r);
+ }
+ } else {
+ bool isConst = peekConst(&c);
+ RegI64 r, rs, reserved;
+ popAndAllocateForDivAndRemI64(&r, &rs, &reserved, IsRemainder(false));
+ quotientOrRemainder(rs, r, reserved, IsUnsigned(true),
+ ZeroOnOverflow(false), isConst, c, QuotientI64);
+ maybeFree(reserved);
+ freeI64(rs);
+ pushI64(r);
+ }
+}
+
+void BaseCompiler::emitRemainderI64() {
+ int64_t c;
+ uint_fast8_t power;
+ if (popConstPositivePowerOfTwo(&c, &power, 1)) {
+ RegI64 r = popI64();
+ RegI64 temp = needI64();
+ moveI64(r, temp);
+
+ Label positive;
+ masm.branchTest64(Assembler::NotSigned, temp, temp, RegI32::Invalid(),
+ &positive);
+ masm.add64(Imm64(c - 1), temp);
+ masm.bind(&positive);
+
+ masm.rshift64Arithmetic(Imm32(power & 63), temp);
+ masm.lshift64(Imm32(power & 63), temp);
+ masm.sub64(temp, r);
+ freeI64(temp);
+
+ pushI64(r);
+ } else {
+ bool isConst = peekConst(&c);
+ RegI64 r, rs, reserved;
+ popAndAllocateForDivAndRemI64(&r, &rs, &reserved, IsRemainder(true));
+ quotientOrRemainder(rs, r, reserved, IsUnsigned(false),
+ ZeroOnOverflow(true), isConst, c, RemainderI64);
+ maybeFree(reserved);
+ freeI64(rs);
+ pushI64(r);
+ }
+}
+
+void BaseCompiler::emitRemainderU64() {
+ int64_t c;
+ uint_fast8_t power;
+ if (popConstPositivePowerOfTwo(&c, &power, 1)) {
+ RegI64 r = popI64();
+ masm.and64(Imm64(c - 1), r);
+ pushI64(r);
+ } else {
+ bool isConst = peekConst(&c);
+ RegI64 r, rs, reserved;
+ popAndAllocateForDivAndRemI64(&r, &rs, &reserved, IsRemainder(true));
+ quotientOrRemainder(rs, r, reserved, IsUnsigned(true), ZeroOnOverflow(true),
+ isConst, c, RemainderI64);
+ maybeFree(reserved);
+ freeI64(rs);
+ pushI64(r);
+ }
+}
+#endif // RABALDR_INT_DIV_I64_CALLOUT
+
+void BaseCompiler::emitRotrI64() {
+ int64_t c;
+ if (popConst(&c)) {
+ RegI64 r = popI64();
+ RegI32 temp = needRotate64Temp();
+ masm.rotateRight64(Imm32(c & 63), r, r, temp);
+ maybeFree(temp);
+ pushI64(r);
+ } else {
+ RegI64 rs = popI64RhsForRotate();
+ RegI64 r = popI64();
+ masm.rotateRight64(lowPart(rs), r, r, maybeHighPart(rs));
+ freeI64(rs);
+ pushI64(r);
+ }
+}
+
+void BaseCompiler::emitRotlI64() {
+ int64_t c;
+ if (popConst(&c)) {
+ RegI64 r = popI64();
+ RegI32 temp = needRotate64Temp();
+ masm.rotateLeft64(Imm32(c & 63), r, r, temp);
+ maybeFree(temp);
+ pushI64(r);
+ } else {
+ RegI64 rs = popI64RhsForRotate();
+ RegI64 r = popI64();
+ masm.rotateLeft64(lowPart(rs), r, r, maybeHighPart(rs));
+ freeI64(rs);
+ pushI64(r);
+ }
+}
+
+void BaseCompiler::emitEqzI32() {
+ if (sniffConditionalControlEqz(ValType::I32)) {
+ return;
+ }
+ emitUnop(EqzI32);
+}
+
+void BaseCompiler::emitEqzI64() {
+ if (sniffConditionalControlEqz(ValType::I64)) {
+ return;
+ }
+ emitUnop(EqzI64);
+}
+
+template <TruncFlags flags>
+bool BaseCompiler::emitTruncateF32ToI32() {
+ RegF32 rs = popF32();
+ RegI32 rd = needI32();
+ if (!truncateF32ToI32(rs, rd, flags)) {
+ return false;
+ }
+ freeF32(rs);
+ pushI32(rd);
+ return true;
+}
+
+template <TruncFlags flags>
+bool BaseCompiler::emitTruncateF64ToI32() {
+ RegF64 rs = popF64();
+ RegI32 rd = needI32();
+ if (!truncateF64ToI32(rs, rd, flags)) {
+ return false;
+ }
+ freeF64(rs);
+ pushI32(rd);
+ return true;
+}
+
+#ifndef RABALDR_FLOAT_TO_I64_CALLOUT
+template <TruncFlags flags>
+bool BaseCompiler::emitTruncateF32ToI64() {
+ RegF32 rs = popF32();
+ RegI64 rd = needI64();
+ RegF64 temp = needTempForFloatingToI64(flags);
+ if (!truncateF32ToI64(rs, rd, flags, temp)) {
+ return false;
+ }
+ maybeFree(temp);
+ freeF32(rs);
+ pushI64(rd);
+ return true;
+}
+
+template <TruncFlags flags>
+bool BaseCompiler::emitTruncateF64ToI64() {
+ RegF64 rs = popF64();
+ RegI64 rd = needI64();
+ RegF64 temp = needTempForFloatingToI64(flags);
+ if (!truncateF64ToI64(rs, rd, flags, temp)) {
+ return false;
+ }
+ maybeFree(temp);
+ freeF64(rs);
+ pushI64(rd);
+ return true;
+}
+#endif // RABALDR_FLOAT_TO_I64_CALLOUT
+
+void BaseCompiler::emitExtendI64_8() {
+ RegI64 r;
+ popI64ForSignExtendI64(&r);
+ masm.move8To64SignExtend(lowPart(r), r);
+ pushI64(r);
+}
+
+void BaseCompiler::emitExtendI64_16() {
+ RegI64 r;
+ popI64ForSignExtendI64(&r);
+ masm.move16To64SignExtend(lowPart(r), r);
+ pushI64(r);
+}
+
+void BaseCompiler::emitExtendI64_32() {
+ RegI64 r;
+ popI64ForSignExtendI64(&r);
+ masm.move32To64SignExtend(lowPart(r), r);
+ pushI64(r);
+}
+
+void BaseCompiler::emitExtendI32ToI64() {
+ RegI64 r;
+ popI32ForSignExtendI64(&r);
+ masm.move32To64SignExtend(lowPart(r), r);
+ pushI64(r);
+}
+
+void BaseCompiler::emitExtendU32ToI64() {
+ RegI32 rs = popI32();
+ RegI64 rd = widenI32(rs);
+ masm.move32To64ZeroExtend(rs, rd);
+ pushI64(rd);
+}
+
+#ifndef RABALDR_I64_TO_FLOAT_CALLOUT
+void BaseCompiler::emitConvertU64ToF32() {
+ RegI64 rs = popI64();
+ RegF32 rd = needF32();
+ RegI32 temp = needConvertI64ToFloatTemp(ValType::F32, IsUnsigned(true));
+ convertI64ToF32(rs, IsUnsigned(true), rd, temp);
+ maybeFree(temp);
+ freeI64(rs);
+ pushF32(rd);
+}
+
+void BaseCompiler::emitConvertU64ToF64() {
+ RegI64 rs = popI64();
+ RegF64 rd = needF64();
+ RegI32 temp = needConvertI64ToFloatTemp(ValType::F64, IsUnsigned(true));
+ convertI64ToF64(rs, IsUnsigned(true), rd, temp);
+ maybeFree(temp);
+ freeI64(rs);
+ pushF64(rd);
+}
+#endif // RABALDR_I64_TO_FLOAT_CALLOUT
+
+////////////////////////////////////////////////////////////
+//
+// Machinery for optimized conditional branches.
+//
+// To disable this optimization it is enough always to return false from
+// sniffConditionalControl{Cmp,Eqz}.
+
+struct BranchState {
+ union {
+ struct {
+ RegI32 lhs;
+ RegI32 rhs;
+ int32_t imm;
+ bool rhsImm;
+ } i32;
+ struct {
+ RegI64 lhs;
+ RegI64 rhs;
+ int64_t imm;
+ bool rhsImm;
+ } i64;
+ struct {
+ RegF32 lhs;
+ RegF32 rhs;
+ } f32;
+ struct {
+ RegF64 lhs;
+ RegF64 rhs;
+ } f64;
+ };
+
+ Label* const label; // The target of the branch, never NULL
+ const StackHeight stackHeight; // The stack base above which to place
+ // stack-spilled block results, if
+ // hasBlockResults().
+ const bool invertBranch; // If true, invert the sense of the branch
+ const ResultType resultType; // The result propagated along the edges
+
+ explicit BranchState(Label* label)
+ : label(label),
+ stackHeight(StackHeight::Invalid()),
+ invertBranch(false),
+ resultType(ResultType::Empty()) {}
+
+ BranchState(Label* label, bool invertBranch)
+ : label(label),
+ stackHeight(StackHeight::Invalid()),
+ invertBranch(invertBranch),
+ resultType(ResultType::Empty()) {}
+
+ BranchState(Label* label, StackHeight stackHeight, bool invertBranch,
+ ResultType resultType)
+ : label(label),
+ stackHeight(stackHeight),
+ invertBranch(invertBranch),
+ resultType(resultType) {}
+
+ bool hasBlockResults() const { return stackHeight.isValid(); }
+};
+
+void BaseCompiler::setLatentCompare(Assembler::Condition compareOp,
+ ValType operandType) {
+ latentOp_ = LatentOp::Compare;
+ latentType_ = operandType;
+ latentIntCmp_ = compareOp;
+}
+
+void BaseCompiler::setLatentCompare(Assembler::DoubleCondition compareOp,
+ ValType operandType) {
+ latentOp_ = LatentOp::Compare;
+ latentType_ = operandType;
+ latentDoubleCmp_ = compareOp;
+}
+
+void BaseCompiler::setLatentEqz(ValType operandType) {
+ latentOp_ = LatentOp::Eqz;
+ latentType_ = operandType;
+}
+
+bool BaseCompiler::hasLatentOp() const { return latentOp_ != LatentOp::None; }
+
+void BaseCompiler::resetLatentOp() { latentOp_ = LatentOp::None; }
+
+// Emit a conditional branch that optionally and optimally cleans up the CPU
+// stack before we branch.
+//
+// Cond is either Assembler::Condition or Assembler::DoubleCondition.
+//
+// Lhs is RegI32, RegI64, or RegF32, RegF64, or RegRef.
+//
+// Rhs is either the same as Lhs, or an immediate expression compatible with
+// Lhs "when applicable".
+
+template <typename Cond, typename Lhs, typename Rhs>
+bool BaseCompiler::jumpConditionalWithResults(BranchState* b, Cond cond,
+ Lhs lhs, Rhs rhs) {
+ if (b->hasBlockResults()) {
+ StackHeight resultsBase(0);
+ if (!topBranchParams(b->resultType, &resultsBase)) {
+ return false;
+ }
+ if (b->stackHeight != resultsBase) {
+ Label notTaken;
+ branchTo(b->invertBranch ? cond : Assembler::InvertCondition(cond), lhs,
+ rhs, &notTaken);
+
+ // Shuffle stack args.
+ shuffleStackResultsBeforeBranch(resultsBase, b->stackHeight,
+ b->resultType);
+ masm.jump(b->label);
+ masm.bind(&notTaken);
+ return true;
+ }
+ }
+
+ branchTo(b->invertBranch ? Assembler::InvertCondition(cond) : cond, lhs, rhs,
+ b->label);
+ return true;
+}
+
+#ifdef ENABLE_WASM_GC
+bool BaseCompiler::jumpConditionalWithResults(BranchState* b, RegRef object,
+ RefType sourceType,
+ RefType destType,
+ bool onSuccess) {
+ if (b->hasBlockResults()) {
+ StackHeight resultsBase(0);
+ if (!topBranchParams(b->resultType, &resultsBase)) {
+ return false;
+ }
+ if (b->stackHeight != resultsBase) {
+ Label notTaken;
+ // Temporarily take the result registers so that branchGcHeapType doesn't
+ // use them.
+ needIntegerResultRegisters(b->resultType);
+ branchGcRefType(object, sourceType, destType, &notTaken,
+ /*onSuccess=*/b->invertBranch ? !onSuccess : onSuccess);
+ freeIntegerResultRegisters(b->resultType);
+
+ // Shuffle stack args.
+ shuffleStackResultsBeforeBranch(resultsBase, b->stackHeight,
+ b->resultType);
+ masm.jump(b->label);
+ masm.bind(&notTaken);
+ return true;
+ }
+ }
+
+ branchGcRefType(object, sourceType, destType, b->label,
+ /*onSuccess=*/b->invertBranch ? !onSuccess : onSuccess);
+ return true;
+}
+#endif
+
+// sniffConditionalControl{Cmp,Eqz} may modify the latentWhatever_ state in
+// the BaseCompiler so that a subsequent conditional branch can be compiled
+// optimally. emitBranchSetup() and emitBranchPerform() will consume that
+// state. If the latter methods are not called because deadCode_ is true
+// then the compiler MUST instead call resetLatentOp() to reset the state.
+
+template <typename Cond>
+bool BaseCompiler::sniffConditionalControlCmp(Cond compareOp,
+ ValType operandType) {
+ MOZ_ASSERT(latentOp_ == LatentOp::None,
+ "Latent comparison state not properly reset");
+
+#ifdef JS_CODEGEN_X86
+ // On x86, latent i64 binary comparisons use too many registers: the
+ // reserved join register and the lhs and rhs operands require six, but we
+ // only have five.
+ if (operandType == ValType::I64) {
+ return false;
+ }
+#endif
+
+ // No optimization for pointer compares yet.
+ if (operandType.isRefRepr()) {
+ return false;
+ }
+
+ OpBytes op{};
+ iter_.peekOp(&op);
+ switch (op.b0) {
+ case uint16_t(Op::BrIf):
+ case uint16_t(Op::If):
+ case uint16_t(Op::SelectNumeric):
+ case uint16_t(Op::SelectTyped):
+ setLatentCompare(compareOp, operandType);
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool BaseCompiler::sniffConditionalControlEqz(ValType operandType) {
+ MOZ_ASSERT(latentOp_ == LatentOp::None,
+ "Latent comparison state not properly reset");
+
+ OpBytes op{};
+ iter_.peekOp(&op);
+ switch (op.b0) {
+ case uint16_t(Op::BrIf):
+ case uint16_t(Op::SelectNumeric):
+ case uint16_t(Op::SelectTyped):
+ case uint16_t(Op::If):
+ setLatentEqz(operandType);
+ return true;
+ default:
+ return false;
+ }
+}
+
+void BaseCompiler::emitBranchSetup(BranchState* b) {
+ // Avoid allocating operands to latentOp_ to result registers.
+ if (b->hasBlockResults()) {
+ needResultRegisters(b->resultType);
+ }
+
+ // Set up fields so that emitBranchPerform() need not switch on latentOp_.
+ switch (latentOp_) {
+ case LatentOp::None: {
+ latentIntCmp_ = Assembler::NotEqual;
+ latentType_ = ValType::I32;
+ b->i32.lhs = popI32();
+ b->i32.rhsImm = true;
+ b->i32.imm = 0;
+ break;
+ }
+ case LatentOp::Compare: {
+ switch (latentType_.kind()) {
+ case ValType::I32: {
+ if (popConst(&b->i32.imm)) {
+ b->i32.lhs = popI32();
+ b->i32.rhsImm = true;
+ } else {
+ pop2xI32(&b->i32.lhs, &b->i32.rhs);
+ b->i32.rhsImm = false;
+ }
+ break;
+ }
+ case ValType::I64: {
+ pop2xI64(&b->i64.lhs, &b->i64.rhs);
+ b->i64.rhsImm = false;
+ break;
+ }
+ case ValType::F32: {
+ pop2xF32(&b->f32.lhs, &b->f32.rhs);
+ break;
+ }
+ case ValType::F64: {
+ pop2xF64(&b->f64.lhs, &b->f64.rhs);
+ break;
+ }
+ default: {
+ MOZ_CRASH("Unexpected type for LatentOp::Compare");
+ }
+ }
+ break;
+ }
+ case LatentOp::Eqz: {
+ switch (latentType_.kind()) {
+ case ValType::I32: {
+ latentIntCmp_ = Assembler::Equal;
+ b->i32.lhs = popI32();
+ b->i32.rhsImm = true;
+ b->i32.imm = 0;
+ break;
+ }
+ case ValType::I64: {
+ latentIntCmp_ = Assembler::Equal;
+ b->i64.lhs = popI64();
+ b->i64.rhsImm = true;
+ b->i64.imm = 0;
+ break;
+ }
+ default: {
+ MOZ_CRASH("Unexpected type for LatentOp::Eqz");
+ }
+ }
+ break;
+ }
+ }
+
+ if (b->hasBlockResults()) {
+ freeResultRegisters(b->resultType);
+ }
+}
+
+bool BaseCompiler::emitBranchPerform(BranchState* b) {
+ switch (latentType_.kind()) {
+ case ValType::I32: {
+ if (b->i32.rhsImm) {
+ if (!jumpConditionalWithResults(b, latentIntCmp_, b->i32.lhs,
+ Imm32(b->i32.imm))) {
+ return false;
+ }
+ } else {
+ if (!jumpConditionalWithResults(b, latentIntCmp_, b->i32.lhs,
+ b->i32.rhs)) {
+ return false;
+ }
+ freeI32(b->i32.rhs);
+ }
+ freeI32(b->i32.lhs);
+ break;
+ }
+ case ValType::I64: {
+ if (b->i64.rhsImm) {
+ if (!jumpConditionalWithResults(b, latentIntCmp_, b->i64.lhs,
+ Imm64(b->i64.imm))) {
+ return false;
+ }
+ } else {
+ if (!jumpConditionalWithResults(b, latentIntCmp_, b->i64.lhs,
+ b->i64.rhs)) {
+ return false;
+ }
+ freeI64(b->i64.rhs);
+ }
+ freeI64(b->i64.lhs);
+ break;
+ }
+ case ValType::F32: {
+ if (!jumpConditionalWithResults(b, latentDoubleCmp_, b->f32.lhs,
+ b->f32.rhs)) {
+ return false;
+ }
+ freeF32(b->f32.lhs);
+ freeF32(b->f32.rhs);
+ break;
+ }
+ case ValType::F64: {
+ if (!jumpConditionalWithResults(b, latentDoubleCmp_, b->f64.lhs,
+ b->f64.rhs)) {
+ return false;
+ }
+ freeF64(b->f64.lhs);
+ freeF64(b->f64.rhs);
+ break;
+ }
+ default: {
+ MOZ_CRASH("Unexpected type for LatentOp::Compare");
+ }
+ }
+ resetLatentOp();
+ return true;
+}
+
+// For blocks and loops and ifs:
+//
+// - Sync the value stack before going into the block in order to simplify exit
+// from the block: all exits from the block can assume that there are no
+// live registers except the one carrying the exit value.
+// - The block can accumulate a number of dead values on the stacks, so when
+// branching out of the block or falling out at the end be sure to
+// pop the appropriate stacks back to where they were on entry, while
+// preserving the exit value.
+// - A continue branch in a loop is much like an exit branch, but the branch
+// value must not be preserved.
+// - The exit value is always in a designated join register (type dependent).
+
+bool BaseCompiler::emitBlock() {
+ ResultType params;
+ if (!iter_.readBlock(&params)) {
+ return false;
+ }
+
+ if (!deadCode_) {
+ sync(); // Simplifies branching out from block
+ }
+
+ initControl(controlItem(), params);
+
+ return true;
+}
+
+bool BaseCompiler::endBlock(ResultType type) {
+ Control& block = controlItem();
+
+ if (deadCode_) {
+ // Block does not fall through; reset stack.
+ fr.resetStackHeight(block.stackHeight, type);
+ popValueStackTo(block.stackSize);
+ } else {
+ // If the block label is used, we have a control join, so we need to shuffle
+ // fallthrough values into place. Otherwise if it's not a control join, we
+ // can leave the value stack alone.
+ MOZ_ASSERT(stk_.length() == block.stackSize + type.length());
+ if (block.label.used()) {
+ popBlockResults(type, block.stackHeight, ContinuationKind::Fallthrough);
+ }
+ block.bceSafeOnExit &= bceSafe_;
+ }
+
+ // Bind after cleanup: branches out will have popped the stack.
+ if (block.label.used()) {
+ masm.bind(&block.label);
+ if (deadCode_) {
+ captureResultRegisters(type);
+ deadCode_ = false;
+ }
+ if (!pushBlockResults(type)) {
+ return false;
+ }
+ }
+
+ bceSafe_ = block.bceSafeOnExit;
+
+ return true;
+}
+
+bool BaseCompiler::emitLoop() {
+ ResultType params;
+ if (!iter_.readLoop(&params)) {
+ return false;
+ }
+
+ if (!deadCode_) {
+ sync(); // Simplifies branching out from block
+ }
+
+ initControl(controlItem(), params);
+ bceSafe_ = 0;
+
+ if (!deadCode_) {
+ // Loop entry is a control join, so shuffle the entry parameters into the
+ // well-known locations.
+ if (!topBlockParams(params)) {
+ return false;
+ }
+ masm.nopAlign(CodeAlignment);
+ masm.bind(&controlItem(0).label);
+ // The interrupt check barfs if there are live registers.
+ sync();
+ if (!addInterruptCheck()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// The bodies of the "then" and "else" arms can be arbitrary sequences
+// of expressions, they push control and increment the nesting and can
+// even be targeted by jumps. A branch to the "if" block branches to
+// the exit of the if, ie, it's like "break". Consider:
+//
+// (func (result i32)
+// (if (i32.const 1)
+// (begin (br 1) (unreachable))
+// (begin (unreachable)))
+// (i32.const 1))
+//
+// The branch causes neither of the unreachable expressions to be
+// evaluated.
+
+bool BaseCompiler::emitIf() {
+ ResultType params;
+ Nothing unused_cond;
+ if (!iter_.readIf(&params, &unused_cond)) {
+ return false;
+ }
+
+ BranchState b(&controlItem().otherLabel, InvertBranch(true));
+ if (!deadCode_) {
+ needResultRegisters(params);
+ emitBranchSetup(&b);
+ freeResultRegisters(params);
+ sync();
+ } else {
+ resetLatentOp();
+ }
+
+ initControl(controlItem(), params);
+
+ if (!deadCode_) {
+ // Because params can flow immediately to results in the case of an empty
+ // "then" or "else" block, and the result of an if/then is a join in
+ // general, we shuffle params eagerly to the result allocations.
+ if (!topBlockParams(params)) {
+ return false;
+ }
+ if (!emitBranchPerform(&b)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool BaseCompiler::endIfThen(ResultType type) {
+ Control& ifThen = controlItem();
+
+ // The parameters to the "if" logically flow to both the "then" and "else"
+ // blocks, but the "else" block is empty. Since we know that the "if"
+ // type-checks, that means that the "else" parameters are the "else" results,
+ // and that the "if"'s result type is the same as its parameter type.
+
+ if (deadCode_) {
+ // "then" arm does not fall through; reset stack.
+ fr.resetStackHeight(ifThen.stackHeight, type);
+ popValueStackTo(ifThen.stackSize);
+ if (!ifThen.deadOnArrival) {
+ captureResultRegisters(type);
+ }
+ } else {
+ MOZ_ASSERT(stk_.length() == ifThen.stackSize + type.length());
+ // Assume we have a control join, so place results in block result
+ // allocations.
+ popBlockResults(type, ifThen.stackHeight, ContinuationKind::Fallthrough);
+ MOZ_ASSERT(!ifThen.deadOnArrival);
+ }
+
+ if (ifThen.otherLabel.used()) {
+ masm.bind(&ifThen.otherLabel);
+ }
+
+ if (ifThen.label.used()) {
+ masm.bind(&ifThen.label);
+ }
+
+ if (!deadCode_) {
+ ifThen.bceSafeOnExit &= bceSafe_;
+ }
+
+ deadCode_ = ifThen.deadOnArrival;
+ if (!deadCode_) {
+ if (!pushBlockResults(type)) {
+ return false;
+ }
+ }
+
+ bceSafe_ = ifThen.bceSafeOnExit & ifThen.bceSafeOnEntry;
+
+ return true;
+}
+
+bool BaseCompiler::emitElse() {
+ ResultType params, results;
+ BaseNothingVector unused_thenValues{};
+
+ if (!iter_.readElse(&params, &results, &unused_thenValues)) {
+ return false;
+ }
+
+ Control& ifThenElse = controlItem(0);
+
+ // See comment in endIfThenElse, below.
+
+ // Exit the "then" branch.
+
+ ifThenElse.deadThenBranch = deadCode_;
+
+ if (deadCode_) {
+ fr.resetStackHeight(ifThenElse.stackHeight, results);
+ popValueStackTo(ifThenElse.stackSize);
+ } else {
+ MOZ_ASSERT(stk_.length() == ifThenElse.stackSize + results.length());
+ popBlockResults(results, ifThenElse.stackHeight, ContinuationKind::Jump);
+ freeResultRegisters(results);
+ MOZ_ASSERT(!ifThenElse.deadOnArrival);
+ }
+
+ if (!deadCode_) {
+ masm.jump(&ifThenElse.label);
+ }
+
+ if (ifThenElse.otherLabel.used()) {
+ masm.bind(&ifThenElse.otherLabel);
+ }
+
+ // Reset to the "else" branch.
+
+ if (!deadCode_) {
+ ifThenElse.bceSafeOnExit &= bceSafe_;
+ }
+
+ deadCode_ = ifThenElse.deadOnArrival;
+ bceSafe_ = ifThenElse.bceSafeOnEntry;
+
+ fr.resetStackHeight(ifThenElse.stackHeight, params);
+
+ if (!deadCode_) {
+ captureResultRegisters(params);
+ if (!pushBlockResults(params)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool BaseCompiler::endIfThenElse(ResultType type) {
+ Control& ifThenElse = controlItem();
+
+ // The expression type is not a reliable guide to what we'll find
+ // on the stack, we could have (if E (i32.const 1) (unreachable))
+ // in which case the "else" arm is AnyType but the type of the
+ // full expression is I32. So restore whatever's there, not what
+ // we want to find there. The "then" arm has the same constraint.
+
+ if (deadCode_) {
+ // "then" arm does not fall through; reset stack.
+ fr.resetStackHeight(ifThenElse.stackHeight, type);
+ popValueStackTo(ifThenElse.stackSize);
+ } else {
+ MOZ_ASSERT(stk_.length() == ifThenElse.stackSize + type.length());
+ // Assume we have a control join, so place results in block result
+ // allocations.
+ popBlockResults(type, ifThenElse.stackHeight,
+ ContinuationKind::Fallthrough);
+ ifThenElse.bceSafeOnExit &= bceSafe_;
+ MOZ_ASSERT(!ifThenElse.deadOnArrival);
+ }
+
+ if (ifThenElse.label.used()) {
+ masm.bind(&ifThenElse.label);
+ }
+
+ bool joinLive =
+ !ifThenElse.deadOnArrival &&
+ (!ifThenElse.deadThenBranch || !deadCode_ || ifThenElse.label.bound());
+
+ if (joinLive) {
+ // No values were provided by the "then" path, but capture the values
+ // provided by the "else" path.
+ if (deadCode_) {
+ captureResultRegisters(type);
+ }
+ deadCode_ = false;
+ }
+
+ bceSafe_ = ifThenElse.bceSafeOnExit;
+
+ if (!deadCode_) {
+ if (!pushBlockResults(type)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool BaseCompiler::emitEnd() {
+ LabelKind kind;
+ ResultType type;
+ BaseNothingVector unused_values{};
+ if (!iter_.readEnd(&kind, &type, &unused_values, &unused_values)) {
+ return false;
+ }
+
+ // Every label case is responsible to pop the control item at the appropriate
+ // time for the label case
+ switch (kind) {
+ case LabelKind::Body:
+ if (!endBlock(type)) {
+ return false;
+ }
+ doReturn(ContinuationKind::Fallthrough);
+ // This is emitted here after `doReturn` to avoid being executed in the
+ // normal return path of a function, and instead only when a `delegate`
+ // jumps to it.
+ if (!emitBodyDelegateThrowPad()) {
+ return false;
+ }
+ iter_.popEnd();
+ MOZ_ASSERT(iter_.controlStackEmpty());
+ return iter_.endFunction(iter_.end());
+ case LabelKind::Block:
+ if (!endBlock(type)) {
+ return false;
+ }
+ iter_.popEnd();
+ break;
+ case LabelKind::Loop:
+ // The end of a loop isn't a branch target, so we can just leave its
+ // results on the expression stack to be consumed by the outer block.
+ iter_.popEnd();
+ break;
+ case LabelKind::Then:
+ if (!endIfThen(type)) {
+ return false;
+ }
+ iter_.popEnd();
+ break;
+ case LabelKind::Else:
+ if (!endIfThenElse(type)) {
+ return false;
+ }
+ iter_.popEnd();
+ break;
+ case LabelKind::Try:
+ case LabelKind::Catch:
+ case LabelKind::CatchAll:
+ if (!endTryCatch(type)) {
+ return false;
+ }
+ iter_.popEnd();
+ break;
+ }
+
+ return true;
+}
+
+bool BaseCompiler::emitBr() {
+ uint32_t relativeDepth;
+ ResultType type;
+ BaseNothingVector unused_values{};
+ if (!iter_.readBr(&relativeDepth, &type, &unused_values)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ Control& target = controlItem(relativeDepth);
+ target.bceSafeOnExit &= bceSafe_;
+
+ // Save any values in the designated join registers, as if the target block
+ // returned normally.
+
+ popBlockResults(type, target.stackHeight, ContinuationKind::Jump);
+ masm.jump(&target.label);
+
+ // The registers holding the join values are free for the remainder of this
+ // block.
+
+ freeResultRegisters(type);
+
+ deadCode_ = true;
+
+ return true;
+}
+
+bool BaseCompiler::emitBrIf() {
+ uint32_t relativeDepth;
+ ResultType type;
+ BaseNothingVector unused_values{};
+ Nothing unused_condition;
+ if (!iter_.readBrIf(&relativeDepth, &type, &unused_values,
+ &unused_condition)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ resetLatentOp();
+ return true;
+ }
+
+ Control& target = controlItem(relativeDepth);
+ target.bceSafeOnExit &= bceSafe_;
+
+ BranchState b(&target.label, target.stackHeight, InvertBranch(false), type);
+ emitBranchSetup(&b);
+ return emitBranchPerform(&b);
+}
+
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+bool BaseCompiler::emitBrOnNull() {
+ MOZ_ASSERT(!hasLatentOp());
+
+ uint32_t relativeDepth;
+ ResultType type;
+ BaseNothingVector unused_values{};
+ Nothing unused_condition;
+ if (!iter_.readBrOnNull(&relativeDepth, &type, &unused_values,
+ &unused_condition)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ Control& target = controlItem(relativeDepth);
+ target.bceSafeOnExit &= bceSafe_;
+
+ BranchState b(&target.label, target.stackHeight, InvertBranch(false), type);
+ if (b.hasBlockResults()) {
+ needResultRegisters(b.resultType);
+ }
+ RegRef rp = popRef();
+ if (b.hasBlockResults()) {
+ freeResultRegisters(b.resultType);
+ }
+ if (!jumpConditionalWithResults(&b, Assembler::Equal, rp,
+ ImmWord(NULLREF_VALUE))) {
+ return false;
+ }
+ pushRef(rp);
+
+ return true;
+}
+
+bool BaseCompiler::emitBrOnNonNull() {
+ MOZ_ASSERT(!hasLatentOp());
+
+ uint32_t relativeDepth;
+ ResultType type;
+ BaseNothingVector unused_values{};
+ Nothing unused_condition;
+ if (!iter_.readBrOnNonNull(&relativeDepth, &type, &unused_values,
+ &unused_condition)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ Control& target = controlItem(relativeDepth);
+ target.bceSafeOnExit &= bceSafe_;
+
+ BranchState b(&target.label, target.stackHeight, InvertBranch(false), type);
+ MOZ_ASSERT(b.hasBlockResults(), "br_on_non_null has block results");
+
+ // Don't allocate the result register used in the branch
+ needIntegerResultRegisters(b.resultType);
+
+ // Get the ref from the top of the stack
+ RegRef condition = popRef();
+
+ // Create a copy of the ref for passing to the on_non_null label,
+ // the original ref is used in the condition.
+ RegRef rp = needRef();
+ moveRef(condition, rp);
+ pushRef(rp);
+
+ freeIntegerResultRegisters(b.resultType);
+
+ if (!jumpConditionalWithResults(&b, Assembler::NotEqual, condition,
+ ImmWord(NULLREF_VALUE))) {
+ return false;
+ }
+
+ freeRef(condition);
+
+ // Dropping null reference.
+ dropValue();
+
+ return true;
+}
+#endif
+
+bool BaseCompiler::emitBrTable() {
+ Uint32Vector depths;
+ uint32_t defaultDepth;
+ ResultType branchParams;
+ BaseNothingVector unused_values{};
+ Nothing unused_index;
+ // N.B., `branchParams' gets set to the type of the default branch target. In
+ // the presence of subtyping, it could be that the different branch targets
+ // have different types. Here we rely on the assumption that the value
+ // representations (e.g. Stk value types) of all branch target types are the
+ // same, in the baseline compiler. Notably, this means that all Ref types
+ // should be represented the same.
+ if (!iter_.readBrTable(&depths, &defaultDepth, &branchParams, &unused_values,
+ &unused_index)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ // Don't use param registers for rc
+ needIntegerResultRegisters(branchParams);
+
+ // Table switch value always on top.
+ RegI32 rc = popI32();
+
+ freeIntegerResultRegisters(branchParams);
+
+ StackHeight resultsBase(0);
+ if (!topBranchParams(branchParams, &resultsBase)) {
+ return false;
+ }
+
+ Label dispatchCode;
+ masm.branch32(Assembler::Below, rc, Imm32(depths.length()), &dispatchCode);
+
+ // This is the out-of-range stub. rc is dead here but we don't need it.
+
+ shuffleStackResultsBeforeBranch(
+ resultsBase, controlItem(defaultDepth).stackHeight, branchParams);
+ controlItem(defaultDepth).bceSafeOnExit &= bceSafe_;
+ masm.jump(&controlItem(defaultDepth).label);
+
+ // Emit stubs. rc is dead in all of these but we don't need it.
+ //
+ // The labels in the vector are in the TempAllocator and will
+ // be freed by and by.
+ //
+ // TODO / OPTIMIZE (Bug 1316804): Branch directly to the case code if we
+ // can, don't emit an intermediate stub.
+
+ LabelVector stubs;
+ if (!stubs.reserve(depths.length())) {
+ return false;
+ }
+
+ for (uint32_t depth : depths) {
+ stubs.infallibleEmplaceBack(NonAssertingLabel());
+ masm.bind(&stubs.back());
+ shuffleStackResultsBeforeBranch(resultsBase, controlItem(depth).stackHeight,
+ branchParams);
+ controlItem(depth).bceSafeOnExit &= bceSafe_;
+ masm.jump(&controlItem(depth).label);
+ }
+
+ // Emit table.
+
+ Label theTable;
+ jumpTable(stubs, &theTable);
+
+ // Emit indirect jump. rc is live here.
+
+ tableSwitch(&theTable, rc, &dispatchCode);
+
+ deadCode_ = true;
+
+ // Clean up.
+
+ freeI32(rc);
+ popValueStackBy(branchParams.length());
+
+ return true;
+}
+
+bool BaseCompiler::emitTry() {
+ ResultType params;
+ if (!iter_.readTry(&params)) {
+ return false;
+ }
+
+ if (!deadCode_) {
+ // Simplifies jumping out, but it is also necessary so that control
+ // can re-enter the catch handler without restoring registers.
+ sync();
+ }
+
+ initControl(controlItem(), params);
+
+ if (!deadCode_) {
+ // Be conservative for BCE due to complex control flow in try blocks.
+ controlItem().bceSafeOnExit = 0;
+ if (!startTryNote(&controlItem().tryNoteIndex)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void BaseCompiler::emitCatchSetup(LabelKind kind, Control& tryCatch,
+ const ResultType& resultType) {
+ // Catch ends the try or last catch, so we finish this like endIfThen.
+ if (deadCode_) {
+ fr.resetStackHeight(tryCatch.stackHeight, resultType);
+ popValueStackTo(tryCatch.stackSize);
+ } else {
+ // If the previous block is a catch, we need to handle the extra exception
+ // reference on the stack (for rethrow) and thus the stack size is 1 more.
+ MOZ_ASSERT(stk_.length() == tryCatch.stackSize + resultType.length() +
+ (kind == LabelKind::Try ? 0 : 1));
+ // Try jumps to the end of the try-catch block unless a throw is done.
+ if (kind == LabelKind::Try) {
+ popBlockResults(resultType, tryCatch.stackHeight, ContinuationKind::Jump);
+ } else {
+ popCatchResults(resultType, tryCatch.stackHeight);
+ }
+ MOZ_ASSERT(stk_.length() == tryCatch.stackSize);
+ freeResultRegisters(resultType);
+ MOZ_ASSERT(!tryCatch.deadOnArrival);
+ }
+
+ // Reset to this "catch" branch.
+ deadCode_ = tryCatch.deadOnArrival;
+
+ // We use the empty result type here because catch does *not* take the
+ // try-catch block parameters.
+ fr.resetStackHeight(tryCatch.stackHeight, ResultType::Empty());
+
+ if (deadCode_) {
+ return;
+ }
+
+ bceSafe_ = 0;
+
+ // The end of the previous try/catch jumps to the join point.
+ masm.jump(&tryCatch.label);
+
+ // Note end of try block for finding the catch block target. This needs
+ // to happen after the stack is reset to the correct height.
+ if (kind == LabelKind::Try) {
+ finishTryNote(controlItem().tryNoteIndex);
+ }
+}
+
+bool BaseCompiler::emitCatch() {
+ LabelKind kind;
+ uint32_t tagIndex;
+ ResultType paramType, resultType;
+ BaseNothingVector unused_tryValues{};
+
+ if (!iter_.readCatch(&kind, &tagIndex, &paramType, &resultType,
+ &unused_tryValues)) {
+ return false;
+ }
+
+ Control& tryCatch = controlItem();
+
+ emitCatchSetup(kind, tryCatch, resultType);
+
+ if (deadCode_) {
+ return true;
+ }
+
+ // Construct info used for the exception landing pad.
+ CatchInfo catchInfo(tagIndex);
+ if (!tryCatch.catchInfos.emplaceBack(catchInfo)) {
+ return false;
+ }
+
+ masm.bind(&tryCatch.catchInfos.back().label);
+
+ // Extract the arguments in the exception package and push them.
+ const SharedTagType& tagType = moduleEnv_.tags[tagIndex].type;
+ const ValTypeVector& params = tagType->argTypes_;
+ const TagOffsetVector& offsets = tagType->argOffsets_;
+
+ // The landing pad uses the block return protocol to communicate the
+ // exception object pointer to the catch block.
+ ResultType exnResult = ResultType::Single(RefType::extern_());
+ captureResultRegisters(exnResult);
+ if (!pushBlockResults(exnResult)) {
+ return false;
+ }
+ RegRef exn = popRef();
+ RegPtr data = needPtr();
+
+ masm.loadPtr(Address(exn, (int32_t)WasmExceptionObject::offsetOfData()),
+ data);
+
+ // This method can increase stk_.length() by an unbounded amount, so we need
+ // to perform an allocation here to accomodate the variable number of values.
+ // There is enough headroom for the fixed number of values. The general case
+ // is handled in emitBody.
+ if (!stk_.reserve(stk_.length() + params.length() + 1)) {
+ return false;
+ }
+
+ // This reference is pushed onto the stack because a potential rethrow
+ // may need to access it. It is always popped at the end of the block.
+ pushRef(exn);
+
+ for (uint32_t i = 0; i < params.length(); i++) {
+ int32_t offset = offsets[i];
+ switch (params[i].kind()) {
+ case ValType::I32: {
+ RegI32 reg = needI32();
+ masm.load32(Address(data, offset), reg);
+ pushI32(reg);
+ break;
+ }
+ case ValType::I64: {
+ RegI64 reg = needI64();
+ masm.load64(Address(data, offset), reg);
+ pushI64(reg);
+ break;
+ }
+ case ValType::F32: {
+ RegF32 reg = needF32();
+ masm.loadFloat32(Address(data, offset), reg);
+ pushF32(reg);
+ break;
+ }
+ case ValType::F64: {
+ RegF64 reg = needF64();
+ masm.loadDouble(Address(data, offset), reg);
+ pushF64(reg);
+ break;
+ }
+ case ValType::V128: {
+#ifdef ENABLE_WASM_SIMD
+ RegV128 reg = needV128();
+ masm.loadUnalignedSimd128(Address(data, offset), reg);
+ pushV128(reg);
+ break;
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ }
+ case ValType::Ref: {
+ // TODO/AnyRef-boxing: With boxed immediates and strings, this may need
+ // to handle other kinds of values.
+ ASSERT_ANYREF_IS_JSOBJECT;
+ RegRef reg = needRef();
+ masm.loadPtr(Address(data, offset), reg);
+ pushRef(reg);
+ break;
+ }
+ }
+ }
+ freePtr(data);
+
+ return true;
+}
+
+bool BaseCompiler::emitCatchAll() {
+ LabelKind kind;
+ ResultType paramType, resultType;
+ BaseNothingVector unused_tryValues{};
+
+ if (!iter_.readCatchAll(&kind, &paramType, &resultType, &unused_tryValues)) {
+ return false;
+ }
+
+ Control& tryCatch = controlItem();
+
+ emitCatchSetup(kind, tryCatch, resultType);
+
+ if (deadCode_) {
+ return true;
+ }
+
+ CatchInfo catchInfo(CatchAllIndex);
+ if (!tryCatch.catchInfos.emplaceBack(catchInfo)) {
+ return false;
+ }
+
+ masm.bind(&tryCatch.catchInfos.back().label);
+
+ // The landing pad uses the block return protocol to communicate the
+ // exception object pointer to the catch block.
+ ResultType exnResult = ResultType::Single(RefType::extern_());
+ captureResultRegisters(exnResult);
+ // This reference is pushed onto the stack because a potential rethrow
+ // may need to access it. It is always popped at the end of the block.
+ return pushBlockResults(exnResult);
+}
+
+bool BaseCompiler::emitBodyDelegateThrowPad() {
+ Control& block = controlItem();
+
+ // Only emit a landing pad if a `delegate` has generated a jump to here.
+ if (block.otherLabel.used()) {
+ StackHeight savedHeight = fr.stackHeight();
+ fr.setStackHeight(block.stackHeight);
+ masm.bind(&block.otherLabel);
+
+ // A try-delegate jumps immediately to its delegated try block, so we are
+ // responsible to unpack the exception and rethrow it.
+ RegRef exn;
+ RegRef tag;
+ consumePendingException(&exn, &tag);
+ freeRef(tag);
+ if (!throwFrom(exn)) {
+ return false;
+ }
+ fr.setStackHeight(savedHeight);
+ }
+
+ return true;
+}
+
+bool BaseCompiler::emitDelegate() {
+ uint32_t relativeDepth;
+ ResultType resultType;
+ BaseNothingVector unused_tryValues{};
+
+ if (!iter_.readDelegate(&relativeDepth, &resultType, &unused_tryValues)) {
+ return false;
+ }
+
+ Control& tryDelegate = controlItem();
+
+ // End the try branch like a plain catch block without exception ref handling.
+ if (deadCode_) {
+ fr.resetStackHeight(tryDelegate.stackHeight, resultType);
+ popValueStackTo(tryDelegate.stackSize);
+ } else {
+ MOZ_ASSERT(stk_.length() == tryDelegate.stackSize + resultType.length());
+ popBlockResults(resultType, tryDelegate.stackHeight,
+ ContinuationKind::Jump);
+ freeResultRegisters(resultType);
+ masm.jump(&tryDelegate.label);
+ MOZ_ASSERT(!tryDelegate.deadOnArrival);
+ }
+
+ deadCode_ = tryDelegate.deadOnArrival;
+
+ if (deadCode_) {
+ return true;
+ }
+
+ // Create an exception landing pad that immediately branches to the landing
+ // pad of the delegated try block.
+ masm.bind(&tryDelegate.otherLabel);
+
+ StackHeight savedHeight = fr.stackHeight();
+ fr.setStackHeight(tryDelegate.stackHeight);
+
+ // Mark the end of the try body. This may insert a nop.
+ finishTryNote(controlItem().tryNoteIndex);
+
+ // The landing pad begins at this point
+ TryNoteVector& tryNotes = masm.tryNotes();
+ TryNote& tryNote = tryNotes[controlItem().tryNoteIndex];
+ tryNote.setLandingPad(masm.currentOffset(), masm.framePushed());
+
+ // Store the Instance that was left in InstanceReg by the exception
+ // handling mechanism, that is this frame's Instance but with the exception
+ // filled in Instance::pendingException.
+ fr.storeInstancePtr(InstanceReg);
+
+ // If the target block is a non-try block, skip over it and find the next
+ // try block or the very last block (to re-throw out of the function).
+ Control& lastBlock = controlOutermost();
+ while (controlKind(relativeDepth) != LabelKind::Try &&
+ &controlItem(relativeDepth) != &lastBlock) {
+ relativeDepth++;
+ }
+ Control& target = controlItem(relativeDepth);
+
+ popBlockResults(ResultType::Empty(), target.stackHeight,
+ ContinuationKind::Jump);
+ masm.jump(&target.otherLabel);
+
+ fr.setStackHeight(savedHeight);
+
+ // Where the try branch jumps to, if it's not dead.
+ if (tryDelegate.label.used()) {
+ masm.bind(&tryDelegate.label);
+ }
+
+ captureResultRegisters(resultType);
+ bceSafe_ = tryDelegate.bceSafeOnExit;
+
+ return pushBlockResults(resultType);
+}
+
+bool BaseCompiler::endTryCatch(ResultType type) {
+ Control& tryCatch = controlItem();
+ LabelKind tryKind = controlKind(0);
+
+ if (deadCode_) {
+ fr.resetStackHeight(tryCatch.stackHeight, type);
+ popValueStackTo(tryCatch.stackSize);
+ } else {
+ // If the previous block is a catch, we must handle the extra exception
+ // reference on the stack (for rethrow) and thus the stack size is 1 more.
+ MOZ_ASSERT(stk_.length() == tryCatch.stackSize + type.length() +
+ (tryKind == LabelKind::Try ? 0 : 1));
+ // Assume we have a control join, so place results in block result
+ // allocations and also handle the implicit exception reference if needed.
+ if (tryKind == LabelKind::Try) {
+ popBlockResults(type, tryCatch.stackHeight, ContinuationKind::Jump);
+ } else {
+ popCatchResults(type, tryCatch.stackHeight);
+ }
+ MOZ_ASSERT(stk_.length() == tryCatch.stackSize);
+ // Since we will emit a landing pad after this and jump over it to get to
+ // the control join, we free these here and re-capture at the join.
+ freeResultRegisters(type);
+ masm.jump(&tryCatch.label);
+ MOZ_ASSERT(!tryCatch.bceSafeOnExit);
+ MOZ_ASSERT(!tryCatch.deadOnArrival);
+ }
+
+ deadCode_ = tryCatch.deadOnArrival;
+
+ if (deadCode_) {
+ return true;
+ }
+
+ // Create landing pad for all catch handlers in this block.
+ // When used for a catchless try block, this will generate a landing pad
+ // with no handlers and only the fall-back rethrow.
+ masm.bind(&tryCatch.otherLabel);
+
+ // The stack height also needs to be set not for a block result, but for the
+ // entry to the exception handlers. This is reset again below for the join.
+ StackHeight prePadHeight = fr.stackHeight();
+ fr.setStackHeight(tryCatch.stackHeight);
+
+ // If we are in a catchless try block, then there were no catch blocks to
+ // mark the end of the try note, so we need to end it here.
+ if (tryKind == LabelKind::Try) {
+ // Mark the end of the try body. This may insert a nop.
+ finishTryNote(controlItem().tryNoteIndex);
+ }
+
+ // The landing pad begins at this point
+ TryNoteVector& tryNotes = masm.tryNotes();
+ TryNote& tryNote = tryNotes[controlItem().tryNoteIndex];
+ tryNote.setLandingPad(masm.currentOffset(), masm.framePushed());
+
+ // Store the Instance that was left in InstanceReg by the exception
+ // handling mechanism, that is this frame's Instance but with the exception
+ // filled in Instance::pendingException.
+ fr.storeInstancePtr(InstanceReg);
+
+ // Load exception pointer from Instance and make sure that it is
+ // saved before the following call will clear it.
+ RegRef exn;
+ RegRef tag;
+ consumePendingException(&exn, &tag);
+
+ // Get a register to hold the tags for each catch
+ RegRef catchTag = needRef();
+
+ // Ensure that the exception is assigned to the block return register
+ // before branching to a handler.
+ pushRef(exn);
+ ResultType exnResult = ResultType::Single(RefType::extern_());
+ popBlockResults(exnResult, tryCatch.stackHeight, ContinuationKind::Jump);
+ freeResultRegisters(exnResult);
+
+ bool hasCatchAll = false;
+ for (CatchInfo& info : tryCatch.catchInfos) {
+ if (info.tagIndex != CatchAllIndex) {
+ MOZ_ASSERT(!hasCatchAll);
+ loadTag(RegPtr(InstanceReg), info.tagIndex, catchTag);
+ masm.branchPtr(Assembler::Equal, tag, catchTag, &info.label);
+ } else {
+ masm.jump(&info.label);
+ hasCatchAll = true;
+ }
+ }
+ freeRef(catchTag);
+ freeRef(tag);
+
+ // If none of the tag checks succeed and there is no catch_all,
+ // then we rethrow the exception.
+ if (!hasCatchAll) {
+ captureResultRegisters(exnResult);
+ if (!pushBlockResults(exnResult) || !throwFrom(popRef())) {
+ return false;
+ }
+ }
+
+ // Reset stack height for join.
+ fr.setStackHeight(prePadHeight);
+
+ // Create join point.
+ if (tryCatch.label.used()) {
+ masm.bind(&tryCatch.label);
+ }
+
+ captureResultRegisters(type);
+ deadCode_ = tryCatch.deadOnArrival;
+ bceSafe_ = tryCatch.bceSafeOnExit;
+
+ return pushBlockResults(type);
+}
+
+bool BaseCompiler::emitThrow() {
+ uint32_t tagIndex;
+ BaseNothingVector unused_argValues{};
+
+ if (!iter_.readThrow(&tagIndex, &unused_argValues)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ const TagDesc& tagDesc = moduleEnv_.tags[tagIndex];
+ const ResultType& params = tagDesc.type->resultType();
+ const TagOffsetVector& offsets = tagDesc.type->argOffsets_;
+
+ // Load the tag object
+#ifdef RABALDR_PIN_INSTANCE
+ RegPtr instance(InstanceReg);
+#else
+ RegPtr instance = needPtr();
+ fr.loadInstancePtr(instance);
+#endif
+ RegRef tag = needRef();
+ loadTag(instance, tagIndex, tag);
+#ifndef RABALDR_PIN_INSTANCE
+ freePtr(instance);
+#endif
+
+ // Create the new exception object that we will throw.
+ pushRef(tag);
+ if (!emitInstanceCall(SASigExceptionNew)) {
+ return false;
+ }
+
+ // Get registers for exn and data, excluding the prebarrier register
+ needPtr(RegPtr(PreBarrierReg));
+ RegRef exn = popRef();
+ RegPtr data = needPtr();
+ freePtr(RegPtr(PreBarrierReg));
+
+ masm.loadPtr(Address(exn, WasmExceptionObject::offsetOfData()), data);
+
+ for (int32_t i = params.length() - 1; i >= 0; i--) {
+ uint32_t offset = offsets[i];
+ switch (params[i].kind()) {
+ case ValType::I32: {
+ RegI32 reg = popI32();
+ masm.store32(reg, Address(data, offset));
+ freeI32(reg);
+ break;
+ }
+ case ValType::I64: {
+ RegI64 reg = popI64();
+ masm.store64(reg, Address(data, offset));
+ freeI64(reg);
+ break;
+ }
+ case ValType::F32: {
+ RegF32 reg = popF32();
+ masm.storeFloat32(reg, Address(data, offset));
+ freeF32(reg);
+ break;
+ }
+ case ValType::F64: {
+ RegF64 reg = popF64();
+ masm.storeDouble(reg, Address(data, offset));
+ freeF64(reg);
+ break;
+ }
+ case ValType::V128: {
+#ifdef ENABLE_WASM_SIMD
+ RegV128 reg = popV128();
+ masm.storeUnalignedSimd128(reg, Address(data, offset));
+ freeV128(reg);
+ break;
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ }
+ case ValType::Ref: {
+ RegPtr valueAddr(PreBarrierReg);
+ needPtr(valueAddr);
+ masm.computeEffectiveAddress(Address(data, offset), valueAddr);
+ RegRef rv = popRef();
+ pushPtr(data);
+ // emitBarrieredStore preserves exn, rv
+ if (!emitBarrieredStore(Some(exn), valueAddr, rv,
+ PreBarrierKind::Normal,
+ PostBarrierKind::Imprecise)) {
+ return false;
+ }
+ popPtr(data);
+ freeRef(rv);
+ break;
+ }
+ }
+ }
+ freePtr(data);
+
+ deadCode_ = true;
+
+ return throwFrom(exn);
+}
+
+bool BaseCompiler::emitRethrow() {
+ uint32_t relativeDepth;
+ if (!iter_.readRethrow(&relativeDepth)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ Control& tryCatch = controlItem(relativeDepth);
+ RegRef exn = needRef();
+ peekRefAt(tryCatch.stackSize, exn);
+
+ deadCode_ = true;
+
+ return throwFrom(exn);
+}
+
+bool BaseCompiler::emitDrop() {
+ if (!iter_.readDrop()) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ dropValue();
+ return true;
+}
+
+void BaseCompiler::doReturn(ContinuationKind kind) {
+ if (deadCode_) {
+ return;
+ }
+
+ StackHeight height = controlOutermost().stackHeight;
+ ResultType type = ResultType::Vector(funcType().results());
+ popBlockResults(type, height, kind);
+ masm.jump(&returnLabel_);
+ freeResultRegisters(type);
+}
+
+bool BaseCompiler::emitReturn() {
+ BaseNothingVector unused_values{};
+ if (!iter_.readReturn(&unused_values)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ doReturn(ContinuationKind::Jump);
+ deadCode_ = true;
+
+ return true;
+}
+
+bool BaseCompiler::emitCallArgs(const ValTypeVector& argTypes,
+ const StackResultsLoc& results,
+ FunctionCall* baselineCall,
+ CalleeOnStack calleeOnStack) {
+ MOZ_ASSERT(!deadCode_);
+
+ ArgTypeVector args(argTypes, results.stackResults());
+ uint32_t naturalArgCount = argTypes.length();
+ uint32_t abiArgCount = args.lengthWithStackResults();
+ startCallArgs(StackArgAreaSizeUnaligned(args), baselineCall);
+
+ // Args are deeper on the stack than the stack result area, if any.
+ size_t argsDepth = results.count();
+ // They're deeper than the callee too, for callIndirect.
+ if (calleeOnStack == CalleeOnStack::True) {
+ argsDepth++;
+ }
+
+ for (size_t i = 0; i < abiArgCount; ++i) {
+ if (args.isNaturalArg(i)) {
+ size_t naturalIndex = args.naturalIndex(i);
+ size_t stackIndex = naturalArgCount - 1 - naturalIndex + argsDepth;
+ passArg(argTypes[naturalIndex], peek(stackIndex), baselineCall);
+ } else {
+ // The synthetic stack result area pointer.
+ ABIArg argLoc = baselineCall->abi.next(MIRType::Pointer);
+ if (argLoc.kind() == ABIArg::Stack) {
+ ScratchPtr scratch(*this);
+ fr.computeOutgoingStackResultAreaPtr(results, scratch);
+ masm.storePtr(scratch, Address(masm.getStackPointer(),
+ argLoc.offsetFromArgBase()));
+ } else {
+ fr.computeOutgoingStackResultAreaPtr(results, RegPtr(argLoc.gpr()));
+ }
+ }
+ }
+
+#ifndef RABALDR_PIN_INSTANCE
+ fr.loadInstancePtr(InstanceReg);
+#endif
+ return true;
+}
+
+void BaseCompiler::pushReturnValueOfCall(const FunctionCall& call,
+ MIRType type) {
+ switch (type) {
+ case MIRType::Int32: {
+ RegI32 rv = captureReturnedI32();
+ pushI32(rv);
+ break;
+ }
+ case MIRType::Int64: {
+ RegI64 rv = captureReturnedI64();
+ pushI64(rv);
+ break;
+ }
+ case MIRType::Float32: {
+ RegF32 rv = captureReturnedF32(call);
+ pushF32(rv);
+ break;
+ }
+ case MIRType::Double: {
+ RegF64 rv = captureReturnedF64(call);
+ pushF64(rv);
+ break;
+ }
+#ifdef ENABLE_WASM_SIMD
+ case MIRType::Simd128: {
+ RegV128 rv = captureReturnedV128(call);
+ pushV128(rv);
+ break;
+ }
+#endif
+ case MIRType::RefOrNull: {
+ RegRef rv = captureReturnedRef();
+ pushRef(rv);
+ break;
+ }
+ default:
+ // In particular, passing |type| as MIRType::Void or MIRType::Pointer to
+ // this function is an error.
+ MOZ_CRASH("Function return type");
+ }
+}
+
+bool BaseCompiler::pushStackResultsForCall(const ResultType& type, RegPtr temp,
+ StackResultsLoc* loc) {
+ if (!ABIResultIter::HasStackResults(type)) {
+ return true;
+ }
+
+ // This method can increase stk_.length() by an unbounded amount, so we need
+ // to perform an allocation here to accomodate the variable number of values.
+ // There is enough headroom for any fixed number of values. The general case
+ // is handled in emitBody.
+ if (!stk_.reserve(stk_.length() + type.length())) {
+ return false;
+ }
+
+ // Measure stack results.
+ ABIResultIter i(type);
+ size_t count = 0;
+ for (; !i.done(); i.next()) {
+ if (i.cur().onStack()) {
+ count++;
+ }
+ }
+ uint32_t bytes = i.stackBytesConsumedSoFar();
+
+ // Reserve space for the stack results.
+ StackHeight resultsBase = fr.stackHeight();
+ uint32_t height = fr.prepareStackResultArea(resultsBase, bytes);
+
+ // Push Stk values onto the value stack, and zero out Ref values.
+ for (i.switchToPrev(); !i.done(); i.prev()) {
+ const ABIResult& result = i.cur();
+ if (result.onStack()) {
+ Stk v = captureStackResult(result, resultsBase, bytes);
+ push(v);
+ if (v.kind() == Stk::MemRef) {
+ stackMapGenerator_.memRefsOnStk++;
+ fr.storeImmediatePtrToStack(intptr_t(0), v.offs(), temp);
+ }
+ }
+ }
+
+ *loc = StackResultsLoc(bytes, count, height);
+
+ return true;
+}
+
+// After a call, some results may be written to the stack result locations that
+// are pushed on the machine stack after any stack args. If there are stack
+// args and stack results, these results need to be shuffled down, as the args
+// are "consumed" by the call.
+void BaseCompiler::popStackResultsAfterCall(const StackResultsLoc& results,
+ uint32_t stackArgBytes) {
+ if (results.bytes() != 0) {
+ popValueStackBy(results.count());
+ if (stackArgBytes != 0) {
+ uint32_t srcHeight = results.height();
+ MOZ_ASSERT(srcHeight >= stackArgBytes + results.bytes());
+ uint32_t destHeight = srcHeight - stackArgBytes;
+
+ fr.shuffleStackResultsTowardFP(srcHeight, destHeight, results.bytes(),
+ ABINonArgReturnVolatileReg);
+ }
+ }
+}
+
+// For now, always sync() at the beginning of the call to easily save live
+// values.
+//
+// TODO / OPTIMIZE (Bug 1316806): We may be able to avoid a full sync(), since
+// all we want is to save live registers that won't be saved by the callee or
+// that we need for outgoing args - we don't need to sync the locals. We can
+// just push the necessary registers, it'll be like a lightweight sync.
+//
+// Even some of the pushing may be unnecessary if the registers will be consumed
+// by the call, because then what we want is parallel assignment to the argument
+// registers or onto the stack for outgoing arguments. A sync() is just
+// simpler.
+
+bool BaseCompiler::emitCall() {
+ uint32_t funcIndex;
+ BaseNothingVector args_{};
+ if (!iter_.readCall(&funcIndex, &args_)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ sync();
+
+ const FuncType& funcType = *moduleEnv_.funcs[funcIndex].type;
+ bool import = moduleEnv_.funcIsImport(funcIndex);
+
+ uint32_t numArgs = funcType.args().length();
+ size_t stackArgBytes = stackConsumed(numArgs);
+
+ ResultType resultType(ResultType::Vector(funcType.results()));
+ StackResultsLoc results;
+ if (!pushStackResultsForCall(resultType, RegPtr(ABINonArgReg0), &results)) {
+ return false;
+ }
+
+ FunctionCall baselineCall{};
+ beginCall(baselineCall, UseABI::Wasm,
+ import ? RestoreRegisterStateAndRealm::True
+ : RestoreRegisterStateAndRealm::False);
+
+ if (!emitCallArgs(funcType.args(), results, &baselineCall,
+ CalleeOnStack::False)) {
+ return false;
+ }
+
+ CodeOffset raOffset;
+ if (import) {
+ raOffset = callImport(moduleEnv_.offsetOfFuncImportInstanceData(funcIndex),
+ baselineCall);
+ } else {
+ raOffset = callDefinition(funcIndex, baselineCall);
+ }
+
+ if (!createStackMap("emitCall", raOffset)) {
+ return false;
+ }
+
+ popStackResultsAfterCall(results, stackArgBytes);
+
+ endCall(baselineCall, stackArgBytes);
+
+ popValueStackBy(numArgs);
+
+ captureCallResultRegisters(resultType);
+ return pushCallResults(baselineCall, resultType, results);
+}
+
+bool BaseCompiler::emitCallIndirect() {
+ uint32_t funcTypeIndex;
+ uint32_t tableIndex;
+ Nothing callee_;
+ BaseNothingVector args_{};
+ if (!iter_.readCallIndirect(&funcTypeIndex, &tableIndex, &callee_, &args_)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ sync();
+
+ const FuncType& funcType = (*moduleEnv_.types)[funcTypeIndex].funcType();
+
+ // Stack: ... arg1 .. argn callee
+
+ uint32_t numArgs = funcType.args().length() + 1;
+ size_t stackArgBytes = stackConsumed(numArgs);
+
+ ResultType resultType(ResultType::Vector(funcType.results()));
+ StackResultsLoc results;
+ if (!pushStackResultsForCall(resultType, RegPtr(ABINonArgReg0), &results)) {
+ return false;
+ }
+
+ FunctionCall baselineCall{};
+ // State and realm are restored as needed by by callIndirect (really by
+ // MacroAssembler::wasmCallIndirect).
+ beginCall(baselineCall, UseABI::Wasm, RestoreRegisterStateAndRealm::False);
+
+ if (!emitCallArgs(funcType.args(), results, &baselineCall,
+ CalleeOnStack::True)) {
+ return false;
+ }
+
+ const Stk& callee = peek(results.count());
+ CodeOffset fastCallOffset;
+ CodeOffset slowCallOffset;
+ if (!callIndirect(funcTypeIndex, tableIndex, callee, baselineCall,
+ &fastCallOffset, &slowCallOffset)) {
+ return false;
+ }
+ if (!createStackMap("emitCallIndirect", fastCallOffset)) {
+ return false;
+ }
+ if (!createStackMap("emitCallIndirect", slowCallOffset)) {
+ return false;
+ }
+
+ popStackResultsAfterCall(results, stackArgBytes);
+
+ endCall(baselineCall, stackArgBytes);
+
+ popValueStackBy(numArgs);
+
+ captureCallResultRegisters(resultType);
+ return pushCallResults(baselineCall, resultType, results);
+}
+
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+bool BaseCompiler::emitCallRef() {
+ const FuncType* funcType;
+ Nothing unused_callee;
+ BaseNothingVector unused_args{};
+ if (!iter_.readCallRef(&funcType, &unused_callee, &unused_args)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ sync();
+
+ // Stack: ... arg1 .. argn callee
+
+ uint32_t numArgs = funcType->args().length() + 1;
+ size_t stackArgBytes = stackConsumed(numArgs);
+
+ ResultType resultType(ResultType::Vector(funcType->results()));
+ StackResultsLoc results;
+ if (!pushStackResultsForCall(resultType, RegPtr(ABINonArgReg0), &results)) {
+ return false;
+ }
+
+ FunctionCall baselineCall{};
+ // State and realm are restored as needed by by callRef (really by
+ // MacroAssembler::wasmCallRef).
+ beginCall(baselineCall, UseABI::Wasm, RestoreRegisterStateAndRealm::False);
+
+ if (!emitCallArgs(funcType->args(), results, &baselineCall,
+ CalleeOnStack::True)) {
+ return false;
+ }
+
+ const Stk& callee = peek(results.count());
+ CodeOffset fastCallOffset;
+ CodeOffset slowCallOffset;
+ callRef(callee, baselineCall, &fastCallOffset, &slowCallOffset);
+ if (!createStackMap("emitCallRef", fastCallOffset)) {
+ return false;
+ }
+ if (!createStackMap("emitCallRef", slowCallOffset)) {
+ return false;
+ }
+
+ popStackResultsAfterCall(results, stackArgBytes);
+
+ endCall(baselineCall, stackArgBytes);
+
+ popValueStackBy(numArgs);
+
+ captureCallResultRegisters(resultType);
+ return pushCallResults(baselineCall, resultType, results);
+}
+#endif
+
+void BaseCompiler::emitRound(RoundingMode roundingMode, ValType operandType) {
+ if (operandType == ValType::F32) {
+ RegF32 f0 = popF32();
+ roundF32(roundingMode, f0);
+ pushF32(f0);
+ } else if (operandType == ValType::F64) {
+ RegF64 f0 = popF64();
+ roundF64(roundingMode, f0);
+ pushF64(f0);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+bool BaseCompiler::emitUnaryMathBuiltinCall(SymbolicAddress callee,
+ ValType operandType) {
+ Nothing operand_;
+ if (!iter_.readUnary(operandType, &operand_)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ RoundingMode roundingMode;
+ if (IsRoundingFunction(callee, &roundingMode) &&
+ supportsRoundInstruction(roundingMode)) {
+ emitRound(roundingMode, operandType);
+ return true;
+ }
+
+ sync();
+
+ ValTypeVector& signature = operandType == ValType::F32 ? SigF_ : SigD_;
+ ValType retType = operandType;
+ uint32_t numArgs = signature.length();
+ size_t stackSpace = stackConsumed(numArgs);
+ StackResultsLoc noStackResults;
+
+ FunctionCall baselineCall{};
+ beginCall(baselineCall, UseABI::Builtin, RestoreRegisterStateAndRealm::False);
+
+ if (!emitCallArgs(signature, noStackResults, &baselineCall,
+ CalleeOnStack::False)) {
+ return false;
+ }
+
+ CodeOffset raOffset = builtinCall(callee, baselineCall);
+ if (!createStackMap("emitUnaryMathBuiltin[..]", raOffset)) {
+ return false;
+ }
+
+ endCall(baselineCall, stackSpace);
+
+ popValueStackBy(numArgs);
+
+ pushReturnValueOfCall(baselineCall, retType.toMIRType());
+
+ return true;
+}
+
+#ifdef RABALDR_INT_DIV_I64_CALLOUT
+bool BaseCompiler::emitDivOrModI64BuiltinCall(SymbolicAddress callee,
+ ValType operandType) {
+ MOZ_ASSERT(operandType == ValType::I64);
+ MOZ_ASSERT(!deadCode_);
+
+ sync();
+
+ needI64(specific_.abiReturnRegI64);
+
+ RegI64 rhs = popI64();
+ RegI64 srcDest = popI64ToSpecific(specific_.abiReturnRegI64);
+
+ Label done;
+
+ checkDivideByZero(rhs);
+
+ if (callee == SymbolicAddress::DivI64) {
+ checkDivideSignedOverflow(rhs, srcDest, &done, ZeroOnOverflow(false));
+ } else if (callee == SymbolicAddress::ModI64) {
+ checkDivideSignedOverflow(rhs, srcDest, &done, ZeroOnOverflow(true));
+ }
+
+ masm.setupWasmABICall();
+ masm.passABIArg(srcDest.high);
+ masm.passABIArg(srcDest.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+ CodeOffset raOffset = masm.callWithABI(
+ bytecodeOffset(), callee, mozilla::Some(fr.getInstancePtrOffset()));
+ if (!createStackMap("emitDivOrModI64Bui[..]", raOffset)) {
+ return false;
+ }
+
+ masm.bind(&done);
+
+ freeI64(rhs);
+ pushI64(srcDest);
+ return true;
+}
+#endif // RABALDR_INT_DIV_I64_CALLOUT
+
+#ifdef RABALDR_I64_TO_FLOAT_CALLOUT
+bool BaseCompiler::emitConvertInt64ToFloatingCallout(SymbolicAddress callee,
+ ValType operandType,
+ ValType resultType) {
+ sync();
+
+ RegI64 input = popI64();
+
+ FunctionCall call{};
+
+ masm.setupWasmABICall();
+# ifdef JS_PUNBOX64
+ MOZ_CRASH("BaseCompiler platform hook: emitConvertInt64ToFloatingCallout");
+# else
+ masm.passABIArg(input.high);
+ masm.passABIArg(input.low);
+# endif
+ CodeOffset raOffset = masm.callWithABI(
+ bytecodeOffset(), callee, mozilla::Some(fr.getInstancePtrOffset()),
+ resultType == ValType::F32 ? MoveOp::FLOAT32 : MoveOp::DOUBLE);
+ if (!createStackMap("emitConvertInt64To[..]", raOffset)) {
+ return false;
+ }
+
+ freeI64(input);
+
+ if (resultType == ValType::F32) {
+ pushF32(captureReturnedF32(call));
+ } else {
+ pushF64(captureReturnedF64(call));
+ }
+
+ return true;
+}
+#endif // RABALDR_I64_TO_FLOAT_CALLOUT
+
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+// `Callee` always takes a double, so a float32 input must be converted.
+bool BaseCompiler::emitConvertFloatingToInt64Callout(SymbolicAddress callee,
+ ValType operandType,
+ ValType resultType) {
+ RegF64 doubleInput;
+ if (operandType == ValType::F32) {
+ doubleInput = needF64();
+ RegF32 input = popF32();
+ masm.convertFloat32ToDouble(input, doubleInput);
+ freeF32(input);
+ } else {
+ doubleInput = popF64();
+ }
+
+ // We may need the value after the call for the ool check.
+ RegF64 otherReg = needF64();
+ moveF64(doubleInput, otherReg);
+ pushF64(otherReg);
+
+ sync();
+
+ FunctionCall call{};
+
+ masm.setupWasmABICall();
+ masm.passABIArg(doubleInput, MoveOp::DOUBLE);
+ CodeOffset raOffset = masm.callWithABI(
+ bytecodeOffset(), callee, mozilla::Some(fr.getInstancePtrOffset()));
+ if (!createStackMap("emitConvertFloatin[..]", raOffset)) {
+ return false;
+ }
+
+ freeF64(doubleInput);
+
+ RegI64 rv = captureReturnedI64();
+
+ RegF64 inputVal = popF64();
+
+ TruncFlags flags = 0;
+ if (callee == SymbolicAddress::TruncateDoubleToUint64) {
+ flags |= TRUNC_UNSIGNED;
+ }
+ if (callee == SymbolicAddress::SaturatingTruncateDoubleToInt64 ||
+ callee == SymbolicAddress::SaturatingTruncateDoubleToUint64) {
+ flags |= TRUNC_SATURATING;
+ }
+
+ // If we're saturating, the callout will always produce the final result
+ // value. Otherwise, the callout value will return 0x8000000000000000
+ // and we need to produce traps.
+ OutOfLineCode* ool = nullptr;
+ if (!(flags & TRUNC_SATURATING)) {
+ // The OOL check just succeeds or fails, it does not generate a value.
+ ool = addOutOfLineCode(new (alloc_) OutOfLineTruncateCheckF32OrF64ToI64(
+ AnyReg(inputVal), rv, flags, bytecodeOffset()));
+ if (!ool) {
+ return false;
+ }
+
+ masm.branch64(Assembler::Equal, rv, Imm64(0x8000000000000000),
+ ool->entry());
+ masm.bind(ool->rejoin());
+ }
+
+ pushI64(rv);
+ freeF64(inputVal);
+
+ return true;
+}
+#endif // RABALDR_FLOAT_TO_I64_CALLOUT
+
+bool BaseCompiler::emitGetLocal() {
+ uint32_t slot;
+ if (!iter_.readGetLocal(locals_, &slot)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ // Local loads are pushed unresolved, ie, they may be deferred
+ // until needed, until they may be affected by a store, or until a
+ // sync. This is intended to reduce register pressure.
+
+ switch (locals_[slot].kind()) {
+ case ValType::I32:
+ pushLocalI32(slot);
+ break;
+ case ValType::I64:
+ pushLocalI64(slot);
+ break;
+ case ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ pushLocalV128(slot);
+ break;
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ case ValType::F64:
+ pushLocalF64(slot);
+ break;
+ case ValType::F32:
+ pushLocalF32(slot);
+ break;
+ case ValType::Ref:
+ pushLocalRef(slot);
+ break;
+ }
+
+ return true;
+}
+
+template <bool isSetLocal>
+bool BaseCompiler::emitSetOrTeeLocal(uint32_t slot) {
+ if (deadCode_) {
+ return true;
+ }
+
+ bceLocalIsUpdated(slot);
+ switch (locals_[slot].kind()) {
+ case ValType::I32: {
+ RegI32 rv = popI32();
+ syncLocal(slot);
+ fr.storeLocalI32(rv, localFromSlot(slot, MIRType::Int32));
+ if (isSetLocal) {
+ freeI32(rv);
+ } else {
+ pushI32(rv);
+ }
+ break;
+ }
+ case ValType::I64: {
+ RegI64 rv = popI64();
+ syncLocal(slot);
+ fr.storeLocalI64(rv, localFromSlot(slot, MIRType::Int64));
+ if (isSetLocal) {
+ freeI64(rv);
+ } else {
+ pushI64(rv);
+ }
+ break;
+ }
+ case ValType::F64: {
+ RegF64 rv = popF64();
+ syncLocal(slot);
+ fr.storeLocalF64(rv, localFromSlot(slot, MIRType::Double));
+ if (isSetLocal) {
+ freeF64(rv);
+ } else {
+ pushF64(rv);
+ }
+ break;
+ }
+ case ValType::F32: {
+ RegF32 rv = popF32();
+ syncLocal(slot);
+ fr.storeLocalF32(rv, localFromSlot(slot, MIRType::Float32));
+ if (isSetLocal) {
+ freeF32(rv);
+ } else {
+ pushF32(rv);
+ }
+ break;
+ }
+ case ValType::V128: {
+#ifdef ENABLE_WASM_SIMD
+ RegV128 rv = popV128();
+ syncLocal(slot);
+ fr.storeLocalV128(rv, localFromSlot(slot, MIRType::Simd128));
+ if (isSetLocal) {
+ freeV128(rv);
+ } else {
+ pushV128(rv);
+ }
+ break;
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ }
+ case ValType::Ref: {
+ RegRef rv = popRef();
+ syncLocal(slot);
+ fr.storeLocalRef(rv, localFromSlot(slot, MIRType::RefOrNull));
+ if (isSetLocal) {
+ freeRef(rv);
+ } else {
+ pushRef(rv);
+ }
+ break;
+ }
+ }
+
+ return true;
+}
+
+bool BaseCompiler::emitSetLocal() {
+ uint32_t slot;
+ Nothing unused_value;
+ if (!iter_.readSetLocal(locals_, &slot, &unused_value)) {
+ return false;
+ }
+ return emitSetOrTeeLocal<true>(slot);
+}
+
+bool BaseCompiler::emitTeeLocal() {
+ uint32_t slot;
+ Nothing unused_value;
+ if (!iter_.readTeeLocal(locals_, &slot, &unused_value)) {
+ return false;
+ }
+ return emitSetOrTeeLocal<false>(slot);
+}
+
+bool BaseCompiler::emitGetGlobal() {
+ uint32_t id;
+ if (!iter_.readGetGlobal(&id)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ const GlobalDesc& global = moduleEnv_.globals[id];
+
+ if (global.isConstant()) {
+ LitVal value = global.constantValue();
+ switch (value.type().kind()) {
+ case ValType::I32:
+ pushI32(value.i32());
+ break;
+ case ValType::I64:
+ pushI64(value.i64());
+ break;
+ case ValType::F32:
+ pushF32(value.f32());
+ break;
+ case ValType::F64:
+ pushF64(value.f64());
+ break;
+ case ValType::Ref:
+ pushRef(intptr_t(value.ref().forCompiledCode()));
+ break;
+#ifdef ENABLE_WASM_SIMD
+ case ValType::V128:
+ pushV128(value.v128());
+ break;
+#endif
+ default:
+ MOZ_CRASH("Global constant type");
+ }
+ return true;
+ }
+
+ switch (global.type().kind()) {
+ case ValType::I32: {
+ RegI32 rv = needI32();
+ ScratchPtr tmp(*this);
+ masm.load32(addressOfGlobalVar(global, tmp), rv);
+ pushI32(rv);
+ break;
+ }
+ case ValType::I64: {
+ RegI64 rv = needI64();
+ ScratchPtr tmp(*this);
+ masm.load64(addressOfGlobalVar(global, tmp), rv);
+ pushI64(rv);
+ break;
+ }
+ case ValType::F32: {
+ RegF32 rv = needF32();
+ ScratchPtr tmp(*this);
+ masm.loadFloat32(addressOfGlobalVar(global, tmp), rv);
+ pushF32(rv);
+ break;
+ }
+ case ValType::F64: {
+ RegF64 rv = needF64();
+ ScratchPtr tmp(*this);
+ masm.loadDouble(addressOfGlobalVar(global, tmp), rv);
+ pushF64(rv);
+ break;
+ }
+ case ValType::Ref: {
+ RegRef rv = needRef();
+ ScratchPtr tmp(*this);
+ masm.loadPtr(addressOfGlobalVar(global, tmp), rv);
+ pushRef(rv);
+ break;
+ }
+#ifdef ENABLE_WASM_SIMD
+ case ValType::V128: {
+ RegV128 rv = needV128();
+ ScratchPtr tmp(*this);
+ masm.loadUnalignedSimd128(addressOfGlobalVar(global, tmp), rv);
+ pushV128(rv);
+ break;
+ }
+#endif
+ default:
+ MOZ_CRASH("Global variable type");
+ break;
+ }
+ return true;
+}
+
+bool BaseCompiler::emitSetGlobal() {
+ uint32_t id;
+ Nothing unused_value;
+ if (!iter_.readSetGlobal(&id, &unused_value)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ const GlobalDesc& global = moduleEnv_.globals[id];
+
+ switch (global.type().kind()) {
+ case ValType::I32: {
+ RegI32 rv = popI32();
+ ScratchPtr tmp(*this);
+ masm.store32(rv, addressOfGlobalVar(global, tmp));
+ freeI32(rv);
+ break;
+ }
+ case ValType::I64: {
+ RegI64 rv = popI64();
+ ScratchPtr tmp(*this);
+ masm.store64(rv, addressOfGlobalVar(global, tmp));
+ freeI64(rv);
+ break;
+ }
+ case ValType::F32: {
+ RegF32 rv = popF32();
+ ScratchPtr tmp(*this);
+ masm.storeFloat32(rv, addressOfGlobalVar(global, tmp));
+ freeF32(rv);
+ break;
+ }
+ case ValType::F64: {
+ RegF64 rv = popF64();
+ ScratchPtr tmp(*this);
+ masm.storeDouble(rv, addressOfGlobalVar(global, tmp));
+ freeF64(rv);
+ break;
+ }
+ case ValType::Ref: {
+ RegPtr valueAddr(PreBarrierReg);
+ needPtr(valueAddr);
+ {
+ ScratchPtr tmp(*this);
+ masm.computeEffectiveAddress(addressOfGlobalVar(global, tmp),
+ valueAddr);
+ }
+ RegRef rv = popRef();
+ // emitBarrieredStore preserves rv
+ if (!emitBarrieredStore(Nothing(), valueAddr, rv, PreBarrierKind::Normal,
+ PostBarrierKind::Imprecise)) {
+ return false;
+ }
+ freeRef(rv);
+ break;
+ }
+#ifdef ENABLE_WASM_SIMD
+ case ValType::V128: {
+ RegV128 rv = popV128();
+ ScratchPtr tmp(*this);
+ masm.storeUnalignedSimd128(rv, addressOfGlobalVar(global, tmp));
+ freeV128(rv);
+ break;
+ }
+#endif
+ default:
+ MOZ_CRASH("Global variable type");
+ break;
+ }
+ return true;
+}
+
+bool BaseCompiler::emitLoad(ValType type, Scalar::Type viewType) {
+ LinearMemoryAddress<Nothing> addr;
+ if (!iter_.readLoad(type, Scalar::byteSize(viewType), &addr)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset());
+ loadCommon(&access, AccessCheck(), type);
+ return true;
+}
+
+bool BaseCompiler::emitStore(ValType resultType, Scalar::Type viewType) {
+ LinearMemoryAddress<Nothing> addr;
+ Nothing unused_value;
+ if (!iter_.readStore(resultType, Scalar::byteSize(viewType), &addr,
+ &unused_value)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset());
+ storeCommon(&access, AccessCheck(), resultType);
+ return true;
+}
+
+bool BaseCompiler::emitSelect(bool typed) {
+ StackType type;
+ Nothing unused_trueValue;
+ Nothing unused_falseValue;
+ Nothing unused_condition;
+ if (!iter_.readSelect(typed, &type, &unused_trueValue, &unused_falseValue,
+ &unused_condition)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ resetLatentOp();
+ return true;
+ }
+
+ // I32 condition on top, then false, then true.
+
+ Label done;
+ BranchState b(&done);
+ emitBranchSetup(&b);
+
+ switch (type.valType().kind()) {
+ case ValType::I32: {
+ RegI32 r, rs;
+ pop2xI32(&r, &rs);
+ if (!emitBranchPerform(&b)) {
+ return false;
+ }
+ moveI32(rs, r);
+ masm.bind(&done);
+ freeI32(rs);
+ pushI32(r);
+ break;
+ }
+ case ValType::I64: {
+#ifdef JS_CODEGEN_X86
+ // There may be as many as four Int64 values in registers at a time: two
+ // for the latent branch operands, and two for the true/false values we
+ // normally pop before executing the branch. On x86 this is one value
+ // too many, so we need to generate more complicated code here, and for
+ // simplicity's sake we do so even if the branch operands are not Int64.
+ // However, the resulting control flow diamond is complicated since the
+ // arms of the diamond will have to stay synchronized with respect to
+ // their evaluation stack and regalloc state. To simplify further, we
+ // use a double branch and a temporary boolean value for now.
+ RegI32 temp = needI32();
+ moveImm32(0, temp);
+ if (!emitBranchPerform(&b)) {
+ return false;
+ }
+ moveImm32(1, temp);
+ masm.bind(&done);
+
+ Label trueValue;
+ RegI64 r, rs;
+ pop2xI64(&r, &rs);
+ masm.branch32(Assembler::Equal, temp, Imm32(0), &trueValue);
+ moveI64(rs, r);
+ masm.bind(&trueValue);
+ freeI32(temp);
+ freeI64(rs);
+ pushI64(r);
+#else
+ RegI64 r, rs;
+ pop2xI64(&r, &rs);
+ if (!emitBranchPerform(&b)) {
+ return false;
+ }
+ moveI64(rs, r);
+ masm.bind(&done);
+ freeI64(rs);
+ pushI64(r);
+#endif
+ break;
+ }
+ case ValType::F32: {
+ RegF32 r, rs;
+ pop2xF32(&r, &rs);
+ if (!emitBranchPerform(&b)) {
+ return false;
+ }
+ moveF32(rs, r);
+ masm.bind(&done);
+ freeF32(rs);
+ pushF32(r);
+ break;
+ }
+ case ValType::F64: {
+ RegF64 r, rs;
+ pop2xF64(&r, &rs);
+ if (!emitBranchPerform(&b)) {
+ return false;
+ }
+ moveF64(rs, r);
+ masm.bind(&done);
+ freeF64(rs);
+ pushF64(r);
+ break;
+ }
+#ifdef ENABLE_WASM_SIMD
+ case ValType::V128: {
+ RegV128 r, rs;
+ pop2xV128(&r, &rs);
+ if (!emitBranchPerform(&b)) {
+ return false;
+ }
+ moveV128(rs, r);
+ masm.bind(&done);
+ freeV128(rs);
+ pushV128(r);
+ break;
+ }
+#endif
+ case ValType::Ref: {
+ RegRef r, rs;
+ pop2xRef(&r, &rs);
+ if (!emitBranchPerform(&b)) {
+ return false;
+ }
+ moveRef(rs, r);
+ masm.bind(&done);
+ freeRef(rs);
+ pushRef(r);
+ break;
+ }
+ default: {
+ MOZ_CRASH("select type");
+ }
+ }
+
+ return true;
+}
+
+void BaseCompiler::emitCompareI32(Assembler::Condition compareOp,
+ ValType compareType) {
+ MOZ_ASSERT(compareType == ValType::I32);
+
+ if (sniffConditionalControlCmp(compareOp, compareType)) {
+ return;
+ }
+
+ int32_t c;
+ if (popConst(&c)) {
+ RegI32 r = popI32();
+ masm.cmp32Set(compareOp, r, Imm32(c), r);
+ pushI32(r);
+ } else {
+ RegI32 r, rs;
+ pop2xI32(&r, &rs);
+ masm.cmp32Set(compareOp, r, rs, r);
+ freeI32(rs);
+ pushI32(r);
+ }
+}
+
+void BaseCompiler::emitCompareI64(Assembler::Condition compareOp,
+ ValType compareType) {
+ MOZ_ASSERT(compareType == ValType::I64);
+
+ if (sniffConditionalControlCmp(compareOp, compareType)) {
+ return;
+ }
+
+ RegI64 rs0, rs1;
+ pop2xI64(&rs0, &rs1);
+ RegI32 rd(fromI64(rs0));
+ cmp64Set(compareOp, rs0, rs1, rd);
+ freeI64(rs1);
+ freeI64Except(rs0, rd);
+ pushI32(rd);
+}
+
+void BaseCompiler::emitCompareF32(Assembler::DoubleCondition compareOp,
+ ValType compareType) {
+ MOZ_ASSERT(compareType == ValType::F32);
+
+ if (sniffConditionalControlCmp(compareOp, compareType)) {
+ return;
+ }
+
+ Label across;
+ RegF32 rs0, rs1;
+ pop2xF32(&rs0, &rs1);
+ RegI32 rd = needI32();
+ moveImm32(1, rd);
+ masm.branchFloat(compareOp, rs0, rs1, &across);
+ moveImm32(0, rd);
+ masm.bind(&across);
+ freeF32(rs0);
+ freeF32(rs1);
+ pushI32(rd);
+}
+
+void BaseCompiler::emitCompareF64(Assembler::DoubleCondition compareOp,
+ ValType compareType) {
+ MOZ_ASSERT(compareType == ValType::F64);
+
+ if (sniffConditionalControlCmp(compareOp, compareType)) {
+ return;
+ }
+
+ Label across;
+ RegF64 rs0, rs1;
+ pop2xF64(&rs0, &rs1);
+ RegI32 rd = needI32();
+ moveImm32(1, rd);
+ masm.branchDouble(compareOp, rs0, rs1, &across);
+ moveImm32(0, rd);
+ masm.bind(&across);
+ freeF64(rs0);
+ freeF64(rs1);
+ pushI32(rd);
+}
+
+void BaseCompiler::emitCompareRef(Assembler::Condition compareOp,
+ ValType compareType) {
+ MOZ_ASSERT(!sniffConditionalControlCmp(compareOp, compareType));
+
+ RegRef rs1, rs2;
+ pop2xRef(&rs1, &rs2);
+ RegI32 rd = needI32();
+ masm.cmpPtrSet(compareOp, rs1, rs2, rd);
+ freeRef(rs1);
+ freeRef(rs2);
+ pushI32(rd);
+}
+
+bool BaseCompiler::emitInstanceCall(const SymbolicAddressSignature& builtin) {
+ // See declaration (WasmBCClass.h) for info on the relationship between the
+ // compiler's value stack and the argument order for the to-be-called
+ // function.
+ const MIRType* argTypes = builtin.argTypes;
+ MOZ_ASSERT(argTypes[0] == MIRType::Pointer);
+
+ sync();
+
+ uint32_t numNonInstanceArgs = builtin.numArgs - 1 /* instance */;
+ size_t stackSpace = stackConsumed(numNonInstanceArgs);
+
+ FunctionCall baselineCall{};
+ beginCall(baselineCall, UseABI::System, RestoreRegisterStateAndRealm::True);
+
+ ABIArg instanceArg = reservePointerArgument(&baselineCall);
+
+ startCallArgs(StackArgAreaSizeUnaligned(builtin), &baselineCall);
+ for (uint32_t i = 1; i < builtin.numArgs; i++) {
+ ValType t;
+ switch (argTypes[i]) {
+ case MIRType::Int32:
+ t = ValType::I32;
+ break;
+ case MIRType::Int64:
+ t = ValType::I64;
+ break;
+ case MIRType::Float32:
+ t = ValType::F32;
+ break;
+ case MIRType::RefOrNull:
+ t = RefType::extern_();
+ break;
+ case MIRType::Pointer:
+ // Instance function args can now be uninterpreted pointers (eg, for
+ // the cases PostBarrier and PostBarrierFilter) so we simply treat
+ // them like the equivalently sized integer.
+ t = ValType::fromMIRType(TargetWordMIRType());
+ break;
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+ passArg(t, peek(numNonInstanceArgs - i), &baselineCall);
+ }
+ CodeOffset raOffset =
+ builtinInstanceMethodCall(builtin, instanceArg, baselineCall);
+ if (!createStackMap("emitInstanceCall", raOffset)) {
+ return false;
+ }
+
+ endCall(baselineCall, stackSpace);
+
+ popValueStackBy(numNonInstanceArgs);
+
+ // Note, many clients of emitInstanceCall currently assume that pushing the
+ // result here does not destroy ReturnReg.
+ //
+ // Furthermore, clients assume that if builtin.retType != MIRType::None, the
+ // callee will have returned a result and left it in ReturnReg for us to
+ // find, and that that register will not be destroyed here (or above).
+
+ // For the return type only, MIRType::None is used to indicate that the
+ // call doesn't return a result, that is, returns a C/C++ "void".
+
+ if (builtin.retType != MIRType::None) {
+ pushReturnValueOfCall(baselineCall, builtin.retType);
+ }
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Reference types.
+
+bool BaseCompiler::emitRefFunc() {
+ return emitInstanceCallOp<uint32_t>(SASigRefFunc,
+ [this](uint32_t* funcIndex) -> bool {
+ return iter_.readRefFunc(funcIndex);
+ });
+}
+
+bool BaseCompiler::emitRefNull() {
+ RefType type;
+ if (!iter_.readRefNull(&type)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ pushRef(NULLREF_VALUE);
+ return true;
+}
+
+bool BaseCompiler::emitRefIsNull() {
+ Nothing nothing;
+ if (!iter_.readRefIsNull(&nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ RegRef r = popRef();
+ RegI32 rd = narrowRef(r);
+
+ masm.cmpPtrSet(Assembler::Equal, r, ImmWord(NULLREF_VALUE), rd);
+ pushI32(rd);
+ return true;
+}
+
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+bool BaseCompiler::emitRefAsNonNull() {
+ Nothing nothing;
+ if (!iter_.readRefAsNonNull(&nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ RegRef rp = popRef();
+ Label ok;
+ masm.branchTestPtr(Assembler::NonZero, rp, rp, &ok);
+ trap(Trap::NullPointerDereference);
+ masm.bind(&ok);
+ pushRef(rp);
+
+ return true;
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Atomic operations.
+
+bool BaseCompiler::emitAtomicCmpXchg(ValType type, Scalar::Type viewType) {
+ LinearMemoryAddress<Nothing> addr;
+ Nothing unused{};
+ if (!iter_.readAtomicCmpXchg(&addr, type, Scalar::byteSize(viewType), &unused,
+ &unused)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset(),
+ Synchronization::Full());
+ atomicCmpXchg(&access, type);
+ return true;
+}
+
+bool BaseCompiler::emitAtomicLoad(ValType type, Scalar::Type viewType) {
+ LinearMemoryAddress<Nothing> addr;
+ if (!iter_.readAtomicLoad(&addr, type, Scalar::byteSize(viewType))) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset(),
+ Synchronization::Load());
+ atomicLoad(&access, type);
+ return true;
+}
+
+bool BaseCompiler::emitAtomicRMW(ValType type, Scalar::Type viewType,
+ AtomicOp op) {
+ LinearMemoryAddress<Nothing> addr;
+ Nothing unused_value;
+ if (!iter_.readAtomicRMW(&addr, type, Scalar::byteSize(viewType),
+ &unused_value)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset(),
+ Synchronization::Full());
+ atomicRMW(&access, type, op);
+ return true;
+}
+
+bool BaseCompiler::emitAtomicStore(ValType type, Scalar::Type viewType) {
+ LinearMemoryAddress<Nothing> addr;
+ Nothing unused_value;
+ if (!iter_.readAtomicStore(&addr, type, Scalar::byteSize(viewType),
+ &unused_value)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset(),
+ Synchronization::Store());
+ atomicStore(&access, type);
+ return true;
+}
+
+bool BaseCompiler::emitAtomicXchg(ValType type, Scalar::Type viewType) {
+ LinearMemoryAddress<Nothing> addr;
+ Nothing unused_value;
+ if (!iter_.readAtomicRMW(&addr, type, Scalar::byteSize(viewType),
+ &unused_value)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset(),
+ Synchronization::Full());
+ atomicXchg(&access, type);
+ return true;
+}
+
+bool BaseCompiler::emitWait(ValType type, uint32_t byteSize) {
+ Nothing nothing;
+ LinearMemoryAddress<Nothing> addr;
+ if (!iter_.readWait(&addr, type, byteSize, &nothing, &nothing)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ MemoryAccessDesc access(
+ type.kind() == ValType::I32 ? Scalar::Int32 : Scalar::Int64, addr.align,
+ addr.offset, bytecodeOffset());
+ return atomicWait(type, &access);
+}
+
+bool BaseCompiler::emitWake() {
+ Nothing nothing;
+ LinearMemoryAddress<Nothing> addr;
+ if (!iter_.readWake(&addr, &nothing)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ MemoryAccessDesc access(Scalar::Int32, addr.align, addr.offset,
+ bytecodeOffset());
+ return atomicWake(&access);
+}
+
+bool BaseCompiler::emitFence() {
+ if (!iter_.readFence()) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ masm.memoryBarrier(MembarFull);
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Bulk memory operations.
+
+bool BaseCompiler::emitMemoryGrow() {
+ return emitInstanceCallOp(
+ !usesMemory() || isMem32() ? SASigMemoryGrowM32 : SASigMemoryGrowM64,
+ [this]() -> bool {
+ Nothing arg;
+ return iter_.readMemoryGrow(&arg);
+ });
+}
+
+bool BaseCompiler::emitMemorySize() {
+ return emitInstanceCallOp(
+ !usesMemory() || isMem32() ? SASigMemorySizeM32 : SASigMemorySizeM64,
+ [this]() -> bool { return iter_.readMemorySize(); });
+}
+
+bool BaseCompiler::emitMemCopy() {
+ uint32_t dstMemOrTableIndex = 0;
+ uint32_t srcMemOrTableIndex = 0;
+ Nothing nothing;
+ if (!iter_.readMemOrTableCopy(true, &dstMemOrTableIndex, &nothing,
+ &srcMemOrTableIndex, &nothing, &nothing)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+
+ if (isMem32()) {
+ int32_t signedLength;
+ if (peekConst(&signedLength) && signedLength != 0 &&
+ uint32_t(signedLength) <= MaxInlineMemoryCopyLength) {
+ memCopyInlineM32();
+ return true;
+ }
+ }
+
+ return memCopyCall();
+}
+
+bool BaseCompiler::memCopyCall() {
+ pushHeapBase();
+ return emitInstanceCall(
+ usesSharedMemory()
+ ? (isMem32() ? SASigMemCopySharedM32 : SASigMemCopySharedM64)
+ : (isMem32() ? SASigMemCopyM32 : SASigMemCopyM64));
+}
+
+bool BaseCompiler::emitMemFill() {
+ Nothing nothing;
+ if (!iter_.readMemFill(&nothing, &nothing, &nothing)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+
+ if (isMem32()) {
+ int32_t signedLength;
+ int32_t signedValue;
+ if (peek2xConst(&signedLength, &signedValue) && signedLength != 0 &&
+ uint32_t(signedLength) <= MaxInlineMemoryFillLength) {
+ memFillInlineM32();
+ return true;
+ }
+ }
+ return memFillCall();
+}
+
+bool BaseCompiler::memFillCall() {
+ pushHeapBase();
+ return emitInstanceCall(
+ usesSharedMemory()
+ ? (isMem32() ? SASigMemFillSharedM32 : SASigMemFillSharedM64)
+ : (isMem32() ? SASigMemFillM32 : SASigMemFillM64));
+}
+
+bool BaseCompiler::emitMemInit() {
+ return emitInstanceCallOp<uint32_t>(
+ (!usesMemory() || isMem32() ? SASigMemInitM32 : SASigMemInitM64),
+ [this](uint32_t* segIndex) -> bool {
+ Nothing nothing;
+ if (iter_.readMemOrTableInit(/*isMem*/ true, segIndex, nullptr,
+ &nothing, &nothing, &nothing)) {
+ return true;
+ }
+ return false;
+ });
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Bulk table operations.
+
+bool BaseCompiler::emitTableCopy() {
+ uint32_t dstMemOrTableIndex = 0;
+ uint32_t srcMemOrTableIndex = 0;
+ Nothing nothing;
+ if (!iter_.readMemOrTableCopy(false, &dstMemOrTableIndex, &nothing,
+ &srcMemOrTableIndex, &nothing, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ pushI32(dstMemOrTableIndex);
+ pushI32(srcMemOrTableIndex);
+ return emitInstanceCall(SASigTableCopy);
+}
+
+bool BaseCompiler::emitTableInit() {
+ return emitInstanceCallOp<uint32_t, uint32_t>(
+ SASigTableInit,
+ [this](uint32_t* segIndex, uint32_t* dstTableIndex) -> bool {
+ Nothing nothing;
+ return iter_.readMemOrTableInit(/*isMem*/ false, segIndex,
+ dstTableIndex, &nothing, &nothing,
+ &nothing);
+ });
+}
+
+bool BaseCompiler::emitTableFill() {
+ // fill(start:u32, val:ref, len:u32, table:u32) -> void
+ return emitInstanceCallOp<uint32_t>(
+ SASigTableFill, [this](uint32_t* tableIndex) -> bool {
+ Nothing nothing;
+ return iter_.readTableFill(tableIndex, &nothing, &nothing, &nothing);
+ });
+}
+
+bool BaseCompiler::emitMemDiscard() {
+ Nothing nothing;
+ if (!iter_.readMemDiscard(&nothing, &nothing)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+
+ pushHeapBase();
+ return emitInstanceCall(
+ usesSharedMemory()
+ ? (isMem32() ? SASigMemDiscardSharedM32 : SASigMemDiscardSharedM64)
+ : (isMem32() ? SASigMemDiscardM32 : SASigMemDiscardM64));
+}
+
+bool BaseCompiler::emitTableGet() {
+ uint32_t tableIndex;
+ Nothing nothing;
+ if (!iter_.readTableGet(&tableIndex, &nothing)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ if (moduleEnv_.tables[tableIndex].elemType.tableRepr() == TableRepr::Ref) {
+ return emitTableGetAnyRef(tableIndex);
+ }
+ pushI32(tableIndex);
+ // get(index:u32, table:u32) -> AnyRef
+ return emitInstanceCall(SASigTableGet);
+}
+
+bool BaseCompiler::emitTableGrow() {
+ // grow(initValue:anyref, delta:u32, table:u32) -> u32
+ return emitInstanceCallOp<uint32_t>(
+ SASigTableGrow, [this](uint32_t* tableIndex) -> bool {
+ Nothing nothing;
+ return iter_.readTableGrow(tableIndex, &nothing, &nothing);
+ });
+}
+
+bool BaseCompiler::emitTableSet() {
+ uint32_t tableIndex;
+ Nothing nothing;
+ if (!iter_.readTableSet(&tableIndex, &nothing, &nothing)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ if (moduleEnv_.tables[tableIndex].elemType.tableRepr() == TableRepr::Ref) {
+ return emitTableSetAnyRef(tableIndex);
+ }
+ pushI32(tableIndex);
+ // set(index:u32, value:ref, table:u32) -> void
+ return emitInstanceCall(SASigTableSet);
+}
+
+bool BaseCompiler::emitTableSize() {
+ uint32_t tableIndex;
+ if (!iter_.readTableSize(&tableIndex)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+
+ RegPtr instance = needPtr();
+ RegI32 length = needI32();
+
+ fr.loadInstancePtr(instance);
+ loadTableLength(tableIndex, instance, length);
+
+ pushI32(length);
+ freePtr(instance);
+ return true;
+}
+
+void BaseCompiler::emitTableBoundsCheck(uint32_t tableIndex, RegI32 index,
+ RegPtr instance) {
+ Label ok;
+ masm.wasmBoundsCheck32(
+ Assembler::Condition::Below, index,
+ addressOfTableField(tableIndex, offsetof(TableInstanceData, length),
+ instance),
+ &ok);
+ masm.wasmTrap(wasm::Trap::OutOfBounds, bytecodeOffset());
+ masm.bind(&ok);
+}
+
+bool BaseCompiler::emitTableGetAnyRef(uint32_t tableIndex) {
+ RegPtr instance = needPtr();
+ RegPtr elements = needPtr();
+ RegI32 index = popI32();
+
+ fr.loadInstancePtr(instance);
+ emitTableBoundsCheck(tableIndex, index, instance);
+ loadTableElements(tableIndex, instance, elements);
+ masm.loadPtr(BaseIndex(elements, index, ScalePointer), elements);
+
+ pushRef(RegRef(elements));
+ freeI32(index);
+ freePtr(instance);
+
+ return true;
+}
+
+bool BaseCompiler::emitTableSetAnyRef(uint32_t tableIndex) {
+ // Create temporaries for valueAddr that is not in the prebarrier register
+ // and can be consumed by the barrier operation
+ RegPtr valueAddr = RegPtr(PreBarrierReg);
+ needPtr(valueAddr);
+
+ RegPtr instance = needPtr();
+ RegPtr elements = needPtr();
+ RegRef value = popRef();
+ RegI32 index = popI32();
+
+ // x86 is one register too short for this operation, shuffle `value` back
+ // onto the stack until it is needed.
+#ifdef JS_CODEGEN_X86
+ pushRef(value);
+#endif
+
+ fr.loadInstancePtr(instance);
+ emitTableBoundsCheck(tableIndex, index, instance);
+ loadTableElements(tableIndex, instance, elements);
+ masm.computeEffectiveAddress(BaseIndex(elements, index, ScalePointer),
+ valueAddr);
+
+ freeI32(index);
+ freePtr(elements);
+ freePtr(instance);
+
+#ifdef JS_CODEGEN_X86
+ value = popRef();
+#endif
+
+ if (!emitBarrieredStore(Nothing(), valueAddr, value, PreBarrierKind::Normal,
+ PostBarrierKind::Precise)) {
+ return false;
+ }
+ freeRef(value);
+ return true;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Data and element segment management.
+
+bool BaseCompiler::emitDataOrElemDrop(bool isData) {
+ return emitInstanceCallOp<uint32_t>(
+ isData ? SASigDataDrop : SASigElemDrop, [&](uint32_t* segIndex) -> bool {
+ return iter_.readDataOrElemDrop(isData, segIndex);
+ });
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// General object support.
+
+void BaseCompiler::emitPreBarrier(RegPtr valueAddr) {
+ Label skipBarrier;
+ ScratchPtr scratch(*this);
+
+#ifdef RABALDR_PIN_INSTANCE
+ Register instance(InstanceReg);
+#else
+ Register instance(scratch);
+ fr.loadInstancePtr(instance);
+#endif
+
+ EmitWasmPreBarrierGuard(masm, instance, scratch, valueAddr,
+ /*valueOffset=*/0, &skipBarrier, nullptr);
+
+#ifndef RABALDR_PIN_INSTANCE
+ fr.loadInstancePtr(instance);
+#endif
+#ifdef JS_CODEGEN_ARM64
+ // The prebarrier stub assumes the PseudoStackPointer is set up. It is OK
+ // to just move the sp to x28 here because x28 is not being used by the
+ // baseline compiler and need not be saved or restored.
+ MOZ_ASSERT(!GeneralRegisterSet::All().hasRegisterIndex(x28.asUnsized()));
+ masm.Mov(x28, sp);
+#endif
+ // The prebarrier call preserves all volatile registers
+ EmitWasmPreBarrierCall(masm, instance, scratch, valueAddr, /*valueOffset=*/0);
+
+ masm.bind(&skipBarrier);
+}
+
+bool BaseCompiler::emitPostBarrierImprecise(const Maybe<RegRef>& object,
+ RegPtr valueAddr, RegRef value) {
+ // We must force a sync before the guard so that locals are in a consistent
+ // location for whether or not the post-barrier call is taken.
+ sync();
+
+ // Emit a guard to skip the post-barrier call if it is not needed.
+ Label skipBarrier;
+ RegPtr otherScratch = needPtr();
+ EmitWasmPostBarrierGuard(masm, object, otherScratch, value, &skipBarrier);
+ freePtr(otherScratch);
+
+ // Push `object` and `value` to preserve them across the call.
+ if (object) {
+ pushRef(*object);
+ }
+ pushRef(value);
+
+ // The `valueAddr` is a raw pointer to the cell within some GC object or
+ // instance area, and we are careful so that the GC will not run while the
+ // post-barrier call is active, so push a uintptr_t value.
+ pushPtr(valueAddr);
+ if (!emitInstanceCall(SASigPostBarrier)) {
+ return false;
+ }
+
+ // Restore `object` and `value`.
+ popRef(value);
+ if (object) {
+ popRef(*object);
+ }
+
+ masm.bind(&skipBarrier);
+ return true;
+}
+
+bool BaseCompiler::emitPostBarrierPrecise(const Maybe<RegRef>& object,
+ RegPtr valueAddr, RegRef prevValue,
+ RegRef value) {
+ // Push `object` and `value` to preserve them across the call.
+ if (object) {
+ pushRef(*object);
+ }
+ pushRef(value);
+
+ // Push the arguments and call the precise post-barrier
+ pushPtr(valueAddr);
+ pushRef(prevValue);
+ if (!emitInstanceCall(SASigPostBarrierPrecise)) {
+ return false;
+ }
+
+ // Restore `object` and `value`.
+ popRef(value);
+ if (object) {
+ popRef(*object);
+ }
+
+ return true;
+}
+
+bool BaseCompiler::emitBarrieredStore(const Maybe<RegRef>& object,
+ RegPtr valueAddr, RegRef value,
+ PreBarrierKind preBarrierKind,
+ PostBarrierKind postBarrierKind) {
+ // TODO/AnyRef-boxing: With boxed immediates and strings, the write
+ // barrier is going to have to be more complicated.
+ ASSERT_ANYREF_IS_JSOBJECT;
+
+ // The pre-barrier preserves all allocated registers.
+ if (preBarrierKind == PreBarrierKind::Normal) {
+ emitPreBarrier(valueAddr);
+ }
+
+ // The precise post-barrier requires the previous value stored in the field,
+ // in order to know if the previous store buffer entry needs to be removed.
+ RegRef prevValue;
+ if (postBarrierKind == PostBarrierKind::Precise) {
+ prevValue = needRef();
+ masm.loadPtr(Address(valueAddr, 0), prevValue);
+ }
+
+ // Store the value
+ masm.storePtr(value, Address(valueAddr, 0));
+
+ // The post-barrier preserves object and value.
+ if (postBarrierKind == PostBarrierKind::Precise) {
+ return emitPostBarrierPrecise(object, valueAddr, prevValue, value);
+ }
+ return emitPostBarrierImprecise(object, valueAddr, value);
+}
+
+void BaseCompiler::emitBarrieredClear(RegPtr valueAddr) {
+ // TODO/AnyRef-boxing: With boxed immediates and strings, the write
+ // barrier is going to have to be more complicated.
+ ASSERT_ANYREF_IS_JSOBJECT;
+
+ // The pre-barrier preserves all allocated registers.
+ emitPreBarrier(valueAddr);
+
+ // Store null
+ masm.storePtr(ImmWord(0), Address(valueAddr, 0));
+
+ // No post-barrier is needed, as null does not require a store buffer entry
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// GC proposal.
+
+#ifdef ENABLE_WASM_GC
+
+RegPtr BaseCompiler::loadTypeDefInstanceData(uint32_t typeIndex) {
+ RegPtr rp = needPtr();
+ RegPtr instance;
+# ifndef RABALDR_PIN_INSTANCE
+ instance = rp;
+ fr.loadInstancePtr(instance);
+# else
+ // We can use the pinned instance register.
+ instance = RegPtr(InstanceReg);
+# endif
+ masm.computeEffectiveAddress(
+ Address(instance, Instance::offsetInData(
+ moduleEnv_.offsetOfTypeDefInstanceData(typeIndex))),
+ rp);
+ return rp;
+}
+
+RegPtr BaseCompiler::loadSuperTypeVector(uint32_t typeIndex) {
+ RegPtr rp = needPtr();
+ RegPtr instance;
+# ifndef RABALDR_PIN_INSTANCE
+ // We need to load the instance register, but can use the destination
+ // register as a temporary.
+ instance = rp;
+ fr.loadInstancePtr(rp);
+# else
+ // We can use the pinned instance register.
+ instance = RegPtr(InstanceReg);
+# endif
+ masm.loadPtr(
+ Address(instance, Instance::offsetInData(
+ moduleEnv_.offsetOfSuperTypeVector(typeIndex))),
+ rp);
+ return rp;
+}
+
+/* static */
+void BaseCompiler::SignalNullCheck::emitNullCheck(BaseCompiler* bc, RegRef rp) {
+ Label ok;
+ MacroAssembler& masm = bc->masm;
+ masm.branchTestPtr(Assembler::NonZero, rp, rp, &ok);
+ bc->trap(Trap::NullPointerDereference);
+ masm.bind(&ok);
+}
+
+/* static */
+void BaseCompiler::SignalNullCheck::emitTrapSite(BaseCompiler* bc) {
+ wasm::BytecodeOffset trapOffset(bc->bytecodeOffset());
+ MacroAssembler& masm = bc->masm;
+ masm.append(wasm::Trap::NullPointerDereference,
+ wasm::TrapSite(masm.currentOffset(), trapOffset));
+}
+
+RegPtr BaseCompiler::emitGcArrayGetData(RegRef rp) {
+ // `rp` points at a WasmArrayObject. Return a reg holding the value of its
+ // `data_` field.
+ RegPtr rdata = needPtr();
+ masm.loadPtr(Address(rp, WasmArrayObject::offsetOfData()), rdata);
+ return rdata;
+}
+
+template <typename NullCheckPolicy>
+RegI32 BaseCompiler::emitGcArrayGetNumElements(RegRef rp) {
+ // `rp` points at a WasmArrayObject. Return a reg holding the value of its
+ // `numElements_` field.
+ STATIC_ASSERT_WASMARRAYELEMENTS_NUMELEMENTS_IS_U32;
+ RegI32 numElements = needI32();
+ NullCheckPolicy::emitTrapSite(this);
+ masm.load32(Address(rp, WasmArrayObject::offsetOfNumElements()), numElements);
+ return numElements;
+}
+
+void BaseCompiler::emitGcArrayBoundsCheck(RegI32 index, RegI32 numElements) {
+ Label inBounds;
+ masm.branch32(Assembler::Below, index, numElements, &inBounds);
+ masm.wasmTrap(Trap::OutOfBounds, bytecodeOffset());
+ masm.bind(&inBounds);
+}
+
+template <typename T, typename NullCheckPolicy>
+void BaseCompiler::emitGcGet(FieldType type, FieldWideningOp wideningOp,
+ const T& src) {
+ switch (type.kind()) {
+ case FieldType::I8: {
+ MOZ_ASSERT(wideningOp != FieldWideningOp::None);
+ RegI32 r = needI32();
+ NullCheckPolicy::emitTrapSite(this);
+ if (wideningOp == FieldWideningOp::Unsigned) {
+ masm.load8ZeroExtend(src, r);
+ } else {
+ masm.load8SignExtend(src, r);
+ }
+ pushI32(r);
+ break;
+ }
+ case FieldType::I16: {
+ MOZ_ASSERT(wideningOp != FieldWideningOp::None);
+ RegI32 r = needI32();
+ NullCheckPolicy::emitTrapSite(this);
+ if (wideningOp == FieldWideningOp::Unsigned) {
+ masm.load16ZeroExtend(src, r);
+ } else {
+ masm.load16SignExtend(src, r);
+ }
+ pushI32(r);
+ break;
+ }
+ case FieldType::I32: {
+ MOZ_ASSERT(wideningOp == FieldWideningOp::None);
+ RegI32 r = needI32();
+ NullCheckPolicy::emitTrapSite(this);
+ masm.load32(src, r);
+ pushI32(r);
+ break;
+ }
+ case FieldType::I64: {
+ MOZ_ASSERT(wideningOp == FieldWideningOp::None);
+ RegI64 r = needI64();
+ NullCheckPolicy::emitTrapSite(this);
+ masm.load64(src, r);
+ pushI64(r);
+ break;
+ }
+ case FieldType::F32: {
+ MOZ_ASSERT(wideningOp == FieldWideningOp::None);
+ RegF32 r = needF32();
+ NullCheckPolicy::emitTrapSite(this);
+ masm.loadFloat32(src, r);
+ pushF32(r);
+ break;
+ }
+ case FieldType::F64: {
+ MOZ_ASSERT(wideningOp == FieldWideningOp::None);
+ RegF64 r = needF64();
+ NullCheckPolicy::emitTrapSite(this);
+ masm.loadDouble(src, r);
+ pushF64(r);
+ break;
+ }
+# ifdef ENABLE_WASM_SIMD
+ case FieldType::V128: {
+ MOZ_ASSERT(wideningOp == FieldWideningOp::None);
+ RegV128 r = needV128();
+ NullCheckPolicy::emitTrapSite(this);
+ masm.loadUnalignedSimd128(src, r);
+ pushV128(r);
+ break;
+ }
+# endif
+ case FieldType::Ref: {
+ MOZ_ASSERT(wideningOp == FieldWideningOp::None);
+ RegRef r = needRef();
+ NullCheckPolicy::emitTrapSite(this);
+ masm.loadPtr(src, r);
+ pushRef(r);
+ break;
+ }
+ default: {
+ MOZ_CRASH("Unexpected field type");
+ }
+ }
+}
+
+template <typename T, typename NullCheckPolicy>
+void BaseCompiler::emitGcSetScalar(const T& dst, FieldType type, AnyReg value) {
+ NullCheckPolicy::emitTrapSite(this);
+ switch (type.kind()) {
+ case FieldType::I8: {
+ masm.store8(value.i32(), dst);
+ break;
+ }
+ case FieldType::I16: {
+ masm.store16(value.i32(), dst);
+ break;
+ }
+ case FieldType::I32: {
+ masm.store32(value.i32(), dst);
+ break;
+ }
+ case FieldType::I64: {
+ masm.store64(value.i64(), dst);
+ break;
+ }
+ case FieldType::F32: {
+ masm.storeFloat32(value.f32(), dst);
+ break;
+ }
+ case FieldType::F64: {
+ masm.storeDouble(value.f64(), dst);
+ break;
+ }
+# ifdef ENABLE_WASM_SIMD
+ case FieldType::V128: {
+ masm.storeUnalignedSimd128(value.v128(), dst);
+ break;
+ }
+# endif
+ default: {
+ MOZ_CRASH("Unexpected field type");
+ }
+ }
+}
+
+template <typename NullCheckPolicy>
+bool BaseCompiler::emitGcStructSet(RegRef object, RegPtr areaBase,
+ uint32_t areaOffset, FieldType fieldType,
+ AnyReg value,
+ PreBarrierKind preBarrierKind) {
+ // Easy path if the field is a scalar
+ if (!fieldType.isRefRepr()) {
+ emitGcSetScalar<Address, NullCheckPolicy>(Address(areaBase, areaOffset),
+ fieldType, value);
+ freeAny(value);
+ return true;
+ }
+
+ // Create temporary for the valueAddr that is not in the prebarrier register
+ // and can be consumed by the barrier operation
+ RegPtr valueAddr = RegPtr(PreBarrierReg);
+ needPtr(valueAddr);
+ masm.computeEffectiveAddress(Address(areaBase, areaOffset), valueAddr);
+
+ NullCheckPolicy::emitNullCheck(this, object);
+
+ // emitBarrieredStore preserves object and value
+ if (!emitBarrieredStore(Some(object), valueAddr, value.ref(), preBarrierKind,
+ PostBarrierKind::Imprecise)) {
+ return false;
+ }
+ freeRef(value.ref());
+
+ return true;
+}
+
+bool BaseCompiler::emitGcArraySet(RegRef object, RegPtr data, RegI32 index,
+ const ArrayType& arrayType, AnyReg value,
+ PreBarrierKind preBarrierKind) {
+ // Try to use a base index store instruction if the field type fits in a
+ // shift immediate. If not we shift the index manually and then unshift
+ // it after the store. We don't use an extra register for this because we
+ // don't have any to spare on x86.
+ uint32_t shift = arrayType.elementType_.indexingShift();
+ Scale scale;
+ bool shiftedIndex = false;
+ if (IsShiftInScaleRange(shift)) {
+ scale = ShiftToScale(shift);
+ } else {
+ masm.lshiftPtr(Imm32(shift), index);
+ scale = TimesOne;
+ shiftedIndex = true;
+ }
+ auto unshiftIndex = mozilla::MakeScopeExit([&] {
+ if (shiftedIndex) {
+ masm.rshiftPtr(Imm32(shift), index);
+ }
+ });
+
+ // Easy path if the field is a scalar
+ if (!arrayType.elementType_.isRefRepr()) {
+ emitGcSetScalar<BaseIndex, NoNullCheck>(BaseIndex(data, index, scale, 0),
+ arrayType.elementType_, value);
+ return true;
+ }
+
+ // Create temporaries for valueAddr that is not in the prebarrier register
+ // and can be consumed by the barrier operation
+ RegPtr valueAddr = RegPtr(PreBarrierReg);
+ needPtr(valueAddr);
+ masm.computeEffectiveAddress(BaseIndex(data, index, scale, 0), valueAddr);
+
+ // Save state for after barriered write
+ pushPtr(data);
+ pushI32(index);
+
+ // emitBarrieredStore preserves object and value
+ if (!emitBarrieredStore(Some(object), valueAddr, value.ref(), preBarrierKind,
+ PostBarrierKind::Imprecise)) {
+ return false;
+ }
+
+ // Restore state
+ popI32(index);
+ popPtr(data);
+
+ return true;
+}
+
+bool BaseCompiler::emitStructNew() {
+ uint32_t typeIndex;
+ BaseNothingVector args{};
+ if (!iter_.readStructNew(&typeIndex, &args)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ const StructType& structType = (*moduleEnv_.types)[typeIndex].structType();
+
+ // Allocate an uninitialized struct. This requires the type definition
+ // for the struct to be pushed on the stack. This will trap on OOM.
+ pushPtr(loadTypeDefInstanceData(typeIndex));
+ if (!emitInstanceCall(SASigStructNewUninit)) {
+ return false;
+ }
+
+ // Optimization opportunity: Iterate backward to pop arguments off the
+ // stack. This will generate more instructions than we want, since we
+ // really only need to pop the stack once at the end, not for every element,
+ // but to do better we need a bit more machinery to load elements off the
+ // stack into registers.
+
+ bool isOutlineStruct = structType.size_ > WasmStructObject_MaxInlineBytes;
+
+ // Reserve this register early if we will need it so that it is not taken by
+ // any register used in this function.
+ needPtr(RegPtr(PreBarrierReg));
+
+ RegRef object = popRef();
+ RegPtr outlineBase = isOutlineStruct ? needPtr() : RegPtr();
+
+ // Free the barrier reg after we've allocated all registers
+ freePtr(RegPtr(PreBarrierReg));
+
+ // Optimization opportunity: when the value being stored is a known
+ // zero/null we need store nothing. This case may be somewhat common
+ // because struct.new forces a value to be specified for every field.
+
+ // Optimization opportunity: this loop reestablishes the outline base pointer
+ // every iteration, which really isn't very clever. It would be better to
+ // establish it once before we start, then re-set it if/when we transition
+ // from the out-of-line area back to the in-line area. That would however
+ // require making ::emitGcStructSet preserve that register, which it
+ // currently doesn't.
+
+ uint32_t fieldIndex = structType.fields_.length();
+ while (fieldIndex-- > 0) {
+ const StructField& field = structType.fields_[fieldIndex];
+ FieldType fieldType = field.type;
+ uint32_t fieldOffset = field.offset;
+
+ bool areaIsOutline;
+ uint32_t areaOffset;
+ WasmStructObject::fieldOffsetToAreaAndOffset(fieldType, fieldOffset,
+ &areaIsOutline, &areaOffset);
+
+ // Reserve the barrier reg if we might need it for this store
+ if (fieldType.isRefRepr()) {
+ needPtr(RegPtr(PreBarrierReg));
+ }
+ AnyReg value = popAny();
+ // Free the barrier reg now that we've loaded the value
+ if (fieldType.isRefRepr()) {
+ freePtr(RegPtr(PreBarrierReg));
+ }
+
+ if (areaIsOutline) {
+ // Load the outline data pointer
+ masm.loadPtr(Address(object, WasmStructObject::offsetOfOutlineData()),
+ outlineBase);
+
+ // Consumes value and outline data, object is preserved by this call.
+ if (!emitGcStructSet<NoNullCheck>(object, outlineBase, areaOffset,
+ fieldType, value,
+ PreBarrierKind::None)) {
+ return false;
+ }
+ } else {
+ // Consumes value. object is unchanged by this call.
+ if (!emitGcStructSet<NoNullCheck>(
+ object, RegPtr(object),
+ WasmStructObject::offsetOfInlineData() + areaOffset, fieldType,
+ value, PreBarrierKind::None)) {
+ return false;
+ }
+ }
+ }
+
+ if (isOutlineStruct) {
+ freePtr(outlineBase);
+ }
+ pushRef(object);
+
+ return true;
+}
+
+bool BaseCompiler::emitStructNewDefault() {
+ uint32_t typeIndex;
+ if (!iter_.readStructNewDefault(&typeIndex)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+
+ // Allocate a default initialized struct. This requires the type definition
+ // for the struct to be pushed on the stack. This will trap on OOM.
+ pushPtr(loadTypeDefInstanceData(typeIndex));
+ return emitInstanceCall(SASigStructNew);
+}
+
+bool BaseCompiler::emitStructGet(FieldWideningOp wideningOp) {
+ uint32_t typeIndex;
+ uint32_t fieldIndex;
+ Nothing nothing;
+ if (!iter_.readStructGet(&typeIndex, &fieldIndex, wideningOp, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ const StructType& structType = (*moduleEnv_.types)[typeIndex].structType();
+
+ // Decide whether we're accessing inline or outline, and at what offset
+ FieldType fieldType = structType.fields_[fieldIndex].type;
+ uint32_t fieldOffset = structType.fields_[fieldIndex].offset;
+
+ bool areaIsOutline;
+ uint32_t areaOffset;
+ WasmStructObject::fieldOffsetToAreaAndOffset(fieldType, fieldOffset,
+ &areaIsOutline, &areaOffset);
+
+ RegRef object = popRef();
+ if (areaIsOutline) {
+ RegPtr outlineBase = needPtr();
+ SignalNullCheck::emitTrapSite(this);
+ masm.loadPtr(Address(object, WasmStructObject::offsetOfOutlineData()),
+ outlineBase);
+ // Load the value
+ emitGcGet<Address, NoNullCheck>(fieldType, wideningOp,
+ Address(outlineBase, areaOffset));
+ freePtr(outlineBase);
+ } else {
+ // Load the value
+ emitGcGet<Address, SignalNullCheck>(
+ fieldType, wideningOp,
+ Address(object, WasmStructObject::offsetOfInlineData() + areaOffset));
+ }
+ freeRef(object);
+
+ return true;
+}
+
+bool BaseCompiler::emitStructSet() {
+ uint32_t typeIndex;
+ uint32_t fieldIndex;
+ Nothing nothing;
+ if (!iter_.readStructSet(&typeIndex, &fieldIndex, &nothing, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ const StructType& structType = (*moduleEnv_.types)[typeIndex].structType();
+ const StructField& structField = structType.fields_[fieldIndex];
+
+ // Decide whether we're accessing inline or outline, and at what offset
+ FieldType fieldType = structType.fields_[fieldIndex].type;
+ uint32_t fieldOffset = structType.fields_[fieldIndex].offset;
+
+ bool areaIsOutline;
+ uint32_t areaOffset;
+ WasmStructObject::fieldOffsetToAreaAndOffset(fieldType, fieldOffset,
+ &areaIsOutline, &areaOffset);
+
+ // Reserve this register early if we will need it so that it is not taken by
+ // any register used in this function.
+ if (structField.type.isRefRepr()) {
+ needPtr(RegPtr(PreBarrierReg));
+ }
+
+ RegPtr outlineBase = areaIsOutline ? needPtr() : RegPtr();
+ AnyReg value = popAny();
+ RegRef object = popRef();
+
+ // Free the barrier reg after we've allocated all registers
+ if (structField.type.isRefRepr()) {
+ freePtr(RegPtr(PreBarrierReg));
+ }
+
+ // Make outlineBase point at the first byte of the relevant area
+ if (areaIsOutline) {
+ SignalNullCheck::emitTrapSite(this);
+ masm.loadPtr(Address(object, WasmStructObject::offsetOfOutlineData()),
+ outlineBase);
+ if (!emitGcStructSet<NoNullCheck>(object, outlineBase, areaOffset,
+ fieldType, value,
+ PreBarrierKind::Normal)) {
+ return false;
+ }
+ } else {
+ // Consumes value. object is unchanged by this call.
+ if (!emitGcStructSet<SignalNullCheck>(
+ object, RegPtr(object),
+ WasmStructObject::offsetOfInlineData() + areaOffset, fieldType,
+ value, PreBarrierKind::Normal)) {
+ return false;
+ }
+ }
+
+ if (areaIsOutline) {
+ freePtr(outlineBase);
+ }
+ freeRef(object);
+
+ return true;
+}
+
+bool BaseCompiler::emitArrayNew() {
+ uint32_t typeIndex;
+ Nothing nothing;
+ if (!iter_.readArrayNew(&typeIndex, &nothing, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ const ArrayType& arrayType = (*moduleEnv_.types)[typeIndex].arrayType();
+
+ // Allocate an uninitialized array. This requires the type definition
+ // for the array to be pushed on the stack. This will trap on OOM.
+ pushPtr(loadTypeDefInstanceData(typeIndex));
+ if (!emitInstanceCall(SASigArrayNewUninit)) {
+ return false;
+ }
+
+ // Reserve this register early if we will need it so that it is not taken by
+ // any register used in this function.
+ if (arrayType.elementType_.isRefRepr()) {
+ needPtr(RegPtr(PreBarrierReg));
+ }
+
+ RegRef rp = popRef();
+ AnyReg value = popAny();
+
+ // Acquire the data pointer from the object
+ RegPtr rdata = emitGcArrayGetData(rp);
+
+ // Acquire the number of elements
+ RegI32 numElements = emitGcArrayGetNumElements<NoNullCheck>(rp);
+
+ // Free the barrier reg after we've allocated all registers
+ if (arrayType.elementType_.isRefRepr()) {
+ freePtr(RegPtr(PreBarrierReg));
+ }
+
+ // Perform an initialization loop using `numElements` as the loop variable,
+ // counting down to zero.
+ Label done;
+ Label loop;
+ // Skip initialization if numElements = 0
+ masm.branch32(Assembler::Equal, numElements, Imm32(0), &done);
+ masm.bind(&loop);
+
+ // Move to the next element
+ masm.sub32(Imm32(1), numElements);
+
+ // Assign value to array[numElements]. All registers are preserved
+ if (!emitGcArraySet(rp, rdata, numElements, arrayType, value,
+ PreBarrierKind::None)) {
+ return false;
+ }
+
+ // Loop back if there are still elements to initialize
+ masm.branch32(Assembler::GreaterThan, numElements, Imm32(0), &loop);
+ masm.bind(&done);
+
+ freeI32(numElements);
+ freeAny(value);
+ freePtr(rdata);
+ pushRef(rp);
+
+ return true;
+}
+
+bool BaseCompiler::emitArrayNewFixed() {
+ uint32_t typeIndex, numElements;
+ BaseNothingVector nothings{};
+ if (!iter_.readArrayNewFixed(&typeIndex, &numElements, &nothings)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ const ArrayType& arrayType = (*moduleEnv_.types)[typeIndex].arrayType();
+
+ // At this point, the top section of the value stack contains the values to
+ // be used to initialise the array, with index 0 as the topmost value. Push
+ // the required number of elements and the required type on, since the call
+ // to SASigArrayNew will use them.
+ pushI32(numElements);
+ pushPtr(loadTypeDefInstanceData(typeIndex));
+ if (!emitInstanceCall(SASigArrayNew)) {
+ return false;
+ }
+
+ // Reserve this register early if we will need it so that it is not taken by
+ // any register used in this function.
+ bool avoidPreBarrierReg = arrayType.elementType_.isRefRepr();
+ if (avoidPreBarrierReg) {
+ needPtr(RegPtr(PreBarrierReg));
+ }
+
+ // Get hold of the pointer to the array, as created by SASigArrayNew.
+ RegRef rp = popRef();
+
+ // Acquire the data pointer from the object
+ RegPtr rdata = emitGcArrayGetData(rp);
+
+ // Free the barrier reg if we previously reserved it.
+ if (avoidPreBarrierReg) {
+ freePtr(RegPtr(PreBarrierReg));
+ }
+
+ // These together ensure that the max value of `index` in the loop below
+ // remains comfortably below the 2^31 boundary. See comments on equivalent
+ // assertions in EmitArrayNewFixed in WasmIonCompile.cpp for explanation.
+ static_assert(16 /* sizeof v128 */ * MaxFunctionBytes <=
+ MaxArrayPayloadBytes);
+ MOZ_RELEASE_ASSERT(numElements <= MaxFunctionBytes);
+
+ // Generate straight-line initialization code. We could do better here if
+ // there was a version of ::emitGcArraySet that took `index` as a `uint32_t`
+ // rather than a general value-in-a-reg.
+ for (uint32_t forwardIndex = 0; forwardIndex < numElements; forwardIndex++) {
+ uint32_t reverseIndex = numElements - forwardIndex - 1;
+ if (avoidPreBarrierReg) {
+ needPtr(RegPtr(PreBarrierReg));
+ }
+ AnyReg value = popAny();
+ pushI32(reverseIndex);
+ RegI32 index = popI32();
+ if (avoidPreBarrierReg) {
+ freePtr(RegPtr(PreBarrierReg));
+ }
+ if (!emitGcArraySet(rp, rdata, index, arrayType, value,
+ PreBarrierKind::None)) {
+ return false;
+ }
+ freeI32(index);
+ freeAny(value);
+ }
+
+ freePtr(rdata);
+
+ pushRef(rp);
+ return true;
+}
+
+bool BaseCompiler::emitArrayNewDefault() {
+ uint32_t typeIndex;
+ Nothing nothing;
+ if (!iter_.readArrayNewDefault(&typeIndex, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ // Allocate a default initialized array. This requires the type definition
+ // for the array to be pushed on the stack. This will trap on OOM.
+ pushPtr(loadTypeDefInstanceData(typeIndex));
+ return emitInstanceCall(SASigArrayNew);
+}
+
+bool BaseCompiler::emitArrayNewData() {
+ uint32_t typeIndex, segIndex;
+ Nothing nothing;
+ if (!iter_.readArrayNewData(&typeIndex, &segIndex, &nothing, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ pushPtr(loadTypeDefInstanceData(typeIndex));
+ pushI32(int32_t(segIndex));
+
+ // The call removes 4 items from the stack: the segment byte offset and
+ // number of elements (operands to array.new_data), and the type index and
+ // seg index as pushed above.
+ return emitInstanceCall(SASigArrayNewData);
+}
+
+bool BaseCompiler::emitArrayNewElem() {
+ uint32_t typeIndex, segIndex;
+ Nothing nothing;
+ if (!iter_.readArrayNewElem(&typeIndex, &segIndex, &nothing, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ pushPtr(loadTypeDefInstanceData(typeIndex));
+ pushI32(int32_t(segIndex));
+
+ // The call removes 4 items from the stack: the segment element offset and
+ // number of elements (operands to array.new_elem), and the type index and
+ // seg index as pushed above.
+ return emitInstanceCall(SASigArrayNewElem);
+}
+
+bool BaseCompiler::emitArrayGet(FieldWideningOp wideningOp) {
+ uint32_t typeIndex;
+ Nothing nothing;
+ if (!iter_.readArrayGet(&typeIndex, wideningOp, &nothing, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ const ArrayType& arrayType = (*moduleEnv_.types)[typeIndex].arrayType();
+
+ RegI32 index = popI32();
+ RegRef rp = popRef();
+
+ // Acquire the number of elements
+ RegI32 numElements = emitGcArrayGetNumElements<SignalNullCheck>(rp);
+
+ // Bounds check the index
+ emitGcArrayBoundsCheck(index, numElements);
+ freeI32(numElements);
+
+ // Acquire the data pointer from the object
+ RegPtr rdata = emitGcArrayGetData(rp);
+
+ // Load the value
+ uint32_t shift = arrayType.elementType_.indexingShift();
+ if (IsShiftInScaleRange(shift)) {
+ emitGcGet<BaseIndex, NoNullCheck>(
+ arrayType.elementType_, wideningOp,
+ BaseIndex(rdata, index, ShiftToScale(shift), 0));
+ } else {
+ masm.lshiftPtr(Imm32(shift), index);
+ emitGcGet<BaseIndex, NoNullCheck>(arrayType.elementType_, wideningOp,
+ BaseIndex(rdata, index, TimesOne, 0));
+ }
+
+ freePtr(rdata);
+ freeRef(rp);
+ freeI32(index);
+
+ return true;
+}
+
+bool BaseCompiler::emitArraySet() {
+ uint32_t typeIndex;
+ Nothing nothing;
+ if (!iter_.readArraySet(&typeIndex, &nothing, &nothing, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ const ArrayType& arrayType = (*moduleEnv_.types)[typeIndex].arrayType();
+
+ // Reserve this register early if we will need it so that it is not taken by
+ // any register used in this function.
+ if (arrayType.elementType_.isRefRepr()) {
+ needPtr(RegPtr(PreBarrierReg));
+ }
+
+ AnyReg value = popAny();
+ RegI32 index = popI32();
+ RegRef rp = popRef();
+
+ // Acquire the number of elements
+ RegI32 numElements = emitGcArrayGetNumElements<SignalNullCheck>(rp);
+
+ // Bounds check the index
+ emitGcArrayBoundsCheck(index, numElements);
+ freeI32(numElements);
+
+ // Acquire the data pointer from the object
+ RegPtr rdata = emitGcArrayGetData(rp);
+
+ // Free the barrier reg after we've allocated all registers
+ if (arrayType.elementType_.isRefRepr()) {
+ freePtr(RegPtr(PreBarrierReg));
+ }
+
+ // All registers are preserved. This isn't strictly necessary, as we'll just
+ // be freeing them all after this is done. But this is needed for repeated
+ // assignments used in array.new/new_default.
+ if (!emitGcArraySet(rp, rdata, index, arrayType, value,
+ PreBarrierKind::Normal)) {
+ return false;
+ }
+
+ freePtr(rdata);
+ freeRef(rp);
+ freeI32(index);
+ freeAny(value);
+
+ return true;
+}
+
+bool BaseCompiler::emitArrayLen(bool decodeIgnoredTypeIndex) {
+ Nothing nothing;
+ if (!iter_.readArrayLen(decodeIgnoredTypeIndex, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ RegRef rp = popRef();
+
+ // Acquire the number of elements
+ RegI32 numElements = emitGcArrayGetNumElements<SignalNullCheck>(rp);
+ pushI32(numElements);
+
+ freeRef(rp);
+
+ return true;
+}
+
+bool BaseCompiler::emitArrayCopy() {
+ int32_t elemSize;
+ bool elemsAreRefTyped;
+ Nothing nothing;
+ if (!iter_.readArrayCopy(&elemSize, &elemsAreRefTyped, &nothing, &nothing,
+ &nothing, &nothing, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ // readArrayCopy writes the element size in bytes to `elemSize`. Code
+ // generated here needs to hand it onwards to the instance fn, but also
+ // indicate whether the element type is reftyped, which it does by negating
+ // the size.
+ //
+ // The value stack currently looks like this:
+ //
+ // (top) [numElements, srcIndex, srcArray, dstIndex, dstArray, ..]
+ //
+ // So push `(possibly negated) elemSize` on it, giving
+ //
+ // (top) [elemSize, numElements, srcIndex, srcArray, dstIndex, dstArray, ..]
+ //
+ // and generate a call to the helper.
+
+ MOZ_ASSERT_IF(elemsAreRefTyped, elemSize == sizeof(void*));
+ MOZ_ASSERT_IF(!elemsAreRefTyped, elemSize == 1 || elemSize == 2 ||
+ elemSize == 4 || elemSize == 8 ||
+ elemSize == 16);
+ bool avoidPreBarrierReg = elemsAreRefTyped;
+
+ // Reserve this register early if we will need it so that it is not taken by
+ // any register used in this function.
+ if (avoidPreBarrierReg) {
+ needPtr(RegPtr(PreBarrierReg));
+ }
+
+ // The helper needs to know the element size.
+ pushI32(elemsAreRefTyped ? -elemSize : elemSize);
+
+ if (avoidPreBarrierReg) {
+ freePtr(RegPtr(PreBarrierReg));
+ }
+
+ return emitInstanceCall(SASigArrayCopy);
+}
+
+void BaseCompiler::emitRefTestCommon(RefType sourceType, RefType destType) {
+ Label success;
+ Label join;
+ RegRef object = popRef();
+ RegI32 result = needI32();
+
+ branchGcRefType(object, sourceType, destType, &success, /*onSuccess=*/true);
+ masm.xor32(result, result);
+ masm.jump(&join);
+ masm.bind(&success);
+ masm.move32(Imm32(1), result);
+ masm.bind(&join);
+
+ pushI32(result);
+ freeRef(object);
+}
+
+void BaseCompiler::emitRefCastCommon(RefType sourceType, RefType destType) {
+ RegRef object = popRef();
+
+ Label success;
+ branchGcRefType(object, sourceType, destType, &success, /*onSuccess=*/true);
+ masm.wasmTrap(Trap::BadCast, bytecodeOffset());
+ masm.bind(&success);
+ pushRef(object);
+}
+
+bool BaseCompiler::emitRefTestV5() {
+ Nothing nothing;
+ RefType sourceType;
+ uint32_t typeIndex;
+ if (!iter_.readRefTestV5(&sourceType, &typeIndex, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ const TypeDef& typeDef = moduleEnv_.types->type(typeIndex);
+ RefType destType = RefType::fromTypeDef(&typeDef, /*nullable=*/false);
+ emitRefTestCommon(sourceType, destType);
+
+ return true;
+}
+
+void BaseCompiler::branchGcRefType(RegRef object, RefType sourceType,
+ RefType destType, Label* label,
+ bool onSuccess) {
+ RegPtr superSuperTypeVector;
+ if (MacroAssembler::needSuperSuperTypeVectorForBranchWasmGcRefType(
+ destType)) {
+ uint32_t typeIndex = moduleEnv_.types->indexOf(*destType.typeDef());
+ superSuperTypeVector = loadSuperTypeVector(typeIndex);
+ }
+ RegI32 scratch1 = MacroAssembler::needScratch1ForBranchWasmGcRefType(destType)
+ ? needI32()
+ : RegI32::Invalid();
+ RegI32 scratch2 = MacroAssembler::needScratch2ForBranchWasmGcRefType(destType)
+ ? needI32()
+ : RegI32::Invalid();
+
+ masm.branchWasmGcObjectIsRefType(object, sourceType, destType, label,
+ onSuccess, superSuperTypeVector, scratch1,
+ scratch2);
+
+ if (scratch2.isValid()) {
+ freeI32(scratch2);
+ }
+ if (scratch1.isValid()) {
+ freeI32(scratch1);
+ }
+ if (superSuperTypeVector.isValid()) {
+ freePtr(superSuperTypeVector);
+ }
+}
+
+bool BaseCompiler::emitRefCastV5() {
+ Nothing nothing;
+ RefType sourceType;
+ uint32_t typeIndex;
+ if (!iter_.readRefCastV5(&sourceType, &typeIndex, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ const TypeDef& typeDef = moduleEnv_.types->type(typeIndex);
+ RefType destType = RefType::fromTypeDef(&typeDef, /*nullable=*/true);
+ emitRefCastCommon(sourceType, destType);
+
+ return true;
+}
+
+bool BaseCompiler::emitRefTest(bool nullable) {
+ Nothing nothing;
+ RefType sourceType;
+ RefType destType;
+ if (!iter_.readRefTest(nullable, &sourceType, &destType, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ emitRefTestCommon(sourceType, destType);
+
+ return true;
+}
+
+bool BaseCompiler::emitRefCast(bool nullable) {
+ Nothing nothing;
+ RefType sourceType;
+ RefType destType;
+ if (!iter_.readRefCast(nullable, &sourceType, &destType, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ emitRefCastCommon(sourceType, destType);
+
+ return true;
+}
+
+bool BaseCompiler::emitBrOnCastCommon(bool onSuccess,
+ uint32_t labelRelativeDepth,
+ const ResultType& labelType,
+ RefType sourceType, RefType destType) {
+ Control& target = controlItem(labelRelativeDepth);
+ target.bceSafeOnExit &= bceSafe_;
+
+ // 3. br_if $l : [T*, ref] -> [T*, ref]
+ BranchState b(&target.label, target.stackHeight, InvertBranch(false),
+ labelType);
+
+ // Don't allocate the result register used in the branch
+ if (b.hasBlockResults()) {
+ needIntegerResultRegisters(b.resultType);
+ }
+
+ // Create a copy of the ref for passing to the br_on_cast label,
+ // the original ref is used for casting in the condition.
+ RegRef object = popRef();
+ RegRef objectCondition = needRef();
+ moveRef(object, objectCondition);
+ pushRef(object);
+
+ if (b.hasBlockResults()) {
+ freeIntegerResultRegisters(b.resultType);
+ }
+
+ if (!jumpConditionalWithResults(&b, objectCondition, sourceType, destType,
+ onSuccess)) {
+ return false;
+ }
+ freeRef(objectCondition);
+
+ return true;
+}
+
+bool BaseCompiler::emitBrOnCast() {
+ MOZ_ASSERT(!hasLatentOp());
+
+ bool onSuccess;
+ uint32_t labelRelativeDepth;
+ RefType sourceType;
+ RefType destType;
+ ResultType labelType;
+ BaseNothingVector unused_values{};
+ if (!iter_.readBrOnCast(&onSuccess, &labelRelativeDepth, &sourceType,
+ &destType, &labelType, &unused_values)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ return emitBrOnCastCommon(onSuccess, labelRelativeDepth, labelType,
+ sourceType, destType);
+}
+
+bool BaseCompiler::emitBrOnCastV5(bool onSuccess) {
+ MOZ_ASSERT(!hasLatentOp());
+
+ uint32_t labelRelativeDepth;
+ RefType sourceType;
+ uint32_t castTypeIndex;
+ ResultType labelType;
+ BaseNothingVector unused_values{};
+ if (onSuccess
+ ? !iter_.readBrOnCastV5(&labelRelativeDepth, &sourceType,
+ &castTypeIndex, &labelType, &unused_values)
+ : !iter_.readBrOnCastFailV5(&labelRelativeDepth, &sourceType,
+ &castTypeIndex, &labelType,
+ &unused_values)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ const TypeDef& typeDef = moduleEnv_.types->type(castTypeIndex);
+ RefType destType = RefType::fromTypeDef(&typeDef, false);
+ return emitBrOnCastCommon(onSuccess, labelRelativeDepth, labelType,
+ sourceType, destType);
+}
+
+bool BaseCompiler::emitBrOnCastHeapV5(bool onSuccess, bool nullable) {
+ MOZ_ASSERT(!hasLatentOp());
+
+ uint32_t labelRelativeDepth;
+ RefType sourceType;
+ RefType destType;
+ ResultType labelType;
+ BaseNothingVector unused_values{};
+ if (onSuccess ? !iter_.readBrOnCastHeapV5(nullable, &labelRelativeDepth,
+ &sourceType, &destType, &labelType,
+ &unused_values)
+ : !iter_.readBrOnCastFailHeapV5(nullable, &labelRelativeDepth,
+ &sourceType, &destType,
+ &labelType, &unused_values)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ return emitBrOnCastCommon(onSuccess, labelRelativeDepth, labelType,
+ sourceType, destType);
+}
+
+bool BaseCompiler::emitRefAsStructV5() {
+ Nothing nothing;
+ return iter_.readConversion(ValType(RefType::any()),
+ ValType(RefType::struct_().asNonNullable()),
+ &nothing);
+}
+
+bool BaseCompiler::emitBrOnNonStructV5() {
+ MOZ_ASSERT(!hasLatentOp());
+
+ uint32_t labelRelativeDepth;
+ ResultType labelType;
+ BaseNothingVector unused_values{};
+ if (!iter_.readBrOnNonStructV5(&labelRelativeDepth, &labelType,
+ &unused_values)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ Control& target = controlItem(labelRelativeDepth);
+ target.bceSafeOnExit &= bceSafe_;
+
+ BranchState b(&target.label, target.stackHeight, InvertBranch(false),
+ labelType);
+ if (b.hasBlockResults()) {
+ needResultRegisters(b.resultType);
+ }
+ RegI32 condition = needI32();
+ masm.move32(Imm32(1), condition);
+ if (b.hasBlockResults()) {
+ freeResultRegisters(b.resultType);
+ }
+ if (!jumpConditionalWithResults(&b, Assembler::Equal, condition, Imm32(0))) {
+ return false;
+ }
+ freeI32(condition);
+ return true;
+}
+
+bool BaseCompiler::emitExternInternalize() {
+ // extern.internalize is a no-op because anyref and extern share the same
+ // representation
+ Nothing nothing;
+ return iter_.readRefConversion(RefType::extern_(), RefType::any(), &nothing);
+}
+
+bool BaseCompiler::emitExternExternalize() {
+ // extern.externalize is a no-op because anyref and extern share the same
+ // representation
+ Nothing nothing;
+ return iter_.readRefConversion(RefType::any(), RefType::extern_(), &nothing);
+}
+
+#endif // ENABLE_WASM_GC
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// SIMD and Relaxed SIMD.
+
+#ifdef ENABLE_WASM_SIMD
+
+// Emitter trampolines used by abstracted SIMD operations. Naming here follows
+// the SIMD spec pretty closely.
+
+static void AndV128(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.bitwiseAndSimd128(rs, rsd);
+}
+
+static void OrV128(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.bitwiseOrSimd128(rs, rsd);
+}
+
+static void XorV128(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.bitwiseXorSimd128(rs, rsd);
+}
+
+static void AddI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.addInt8x16(rsd, rs, rsd);
+}
+
+static void AddI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.addInt16x8(rsd, rs, rsd);
+}
+
+static void AddI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.addInt32x4(rsd, rs, rsd);
+}
+
+static void AddF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.addFloat32x4(rsd, rs, rsd);
+}
+
+static void AddI64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.addInt64x2(rsd, rs, rsd);
+}
+
+static void AddF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.addFloat64x2(rsd, rs, rsd);
+}
+
+static void AddSatI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.addSatInt8x16(rsd, rs, rsd);
+}
+
+static void AddSatUI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedAddSatInt8x16(rsd, rs, rsd);
+}
+
+static void AddSatI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.addSatInt16x8(rsd, rs, rsd);
+}
+
+static void AddSatUI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedAddSatInt16x8(rsd, rs, rsd);
+}
+
+static void SubI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.subInt8x16(rsd, rs, rsd);
+}
+
+static void SubI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.subInt16x8(rsd, rs, rsd);
+}
+
+static void SubI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.subInt32x4(rsd, rs, rsd);
+}
+
+static void SubF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.subFloat32x4(rsd, rs, rsd);
+}
+
+static void SubI64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.subInt64x2(rsd, rs, rsd);
+}
+
+static void SubF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.subFloat64x2(rsd, rs, rsd);
+}
+
+static void SubSatI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.subSatInt8x16(rsd, rs, rsd);
+}
+
+static void SubSatUI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedSubSatInt8x16(rsd, rs, rsd);
+}
+
+static void SubSatI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.subSatInt16x8(rsd, rs, rsd);
+}
+
+static void SubSatUI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedSubSatInt16x8(rsd, rs, rsd);
+}
+
+static void MulI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.mulInt16x8(rsd, rs, rsd);
+}
+
+static void MulI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.mulInt32x4(rsd, rs, rsd);
+}
+
+static void MulF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.mulFloat32x4(rsd, rs, rsd);
+}
+
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+static void MulI64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd,
+ RegV128 temp) {
+ masm.mulInt64x2(rsd, rs, rsd, temp);
+}
+# elif defined(JS_CODEGEN_ARM64)
+static void MulI64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd,
+ RegV128 temp1, RegV128 temp2) {
+ masm.mulInt64x2(rsd, rs, rsd, temp1, temp2);
+}
+# endif
+
+static void MulF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.mulFloat64x2(rsd, rs, rsd);
+}
+
+static void DivF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.divFloat32x4(rsd, rs, rsd);
+}
+
+static void DivF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.divFloat64x2(rsd, rs, rsd);
+}
+
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+static void MinF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd,
+ RegV128 temp1, RegV128 temp2) {
+ masm.minFloat32x4(rsd, rs, rsd, temp1, temp2);
+}
+
+static void MinF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd,
+ RegV128 temp1, RegV128 temp2) {
+ masm.minFloat64x2(rsd, rs, rsd, temp1, temp2);
+}
+
+static void MaxF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd,
+ RegV128 temp1, RegV128 temp2) {
+ masm.maxFloat32x4(rsd, rs, rsd, temp1, temp2);
+}
+
+static void MaxF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd,
+ RegV128 temp1, RegV128 temp2) {
+ masm.maxFloat64x2(rsd, rs, rsd, temp1, temp2);
+}
+
+static void PMinF32x4(MacroAssembler& masm, RegV128 rsd, RegV128 rs,
+ RhsDestOp) {
+ masm.pseudoMinFloat32x4(rsd, rs);
+}
+
+static void PMinF64x2(MacroAssembler& masm, RegV128 rsd, RegV128 rs,
+ RhsDestOp) {
+ masm.pseudoMinFloat64x2(rsd, rs);
+}
+
+static void PMaxF32x4(MacroAssembler& masm, RegV128 rsd, RegV128 rs,
+ RhsDestOp) {
+ masm.pseudoMaxFloat32x4(rsd, rs);
+}
+
+static void PMaxF64x2(MacroAssembler& masm, RegV128 rsd, RegV128 rs,
+ RhsDestOp) {
+ masm.pseudoMaxFloat64x2(rsd, rs);
+}
+# elif defined(JS_CODEGEN_ARM64)
+static void MinF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.minFloat32x4(rs, rsd);
+}
+
+static void MinF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.minFloat64x2(rs, rsd);
+}
+
+static void MaxF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.maxFloat32x4(rs, rsd);
+}
+
+static void MaxF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.maxFloat64x2(rs, rsd);
+}
+
+static void PMinF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.pseudoMinFloat32x4(rs, rsd);
+}
+
+static void PMinF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.pseudoMinFloat64x2(rs, rsd);
+}
+
+static void PMaxF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.pseudoMaxFloat32x4(rs, rsd);
+}
+
+static void PMaxF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.pseudoMaxFloat64x2(rs, rsd);
+}
+# endif
+
+static void DotI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.widenDotInt16x8(rsd, rs, rsd);
+}
+
+static void ExtMulLowI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.extMulLowInt8x16(rsd, rs, rsd);
+}
+
+static void ExtMulHighI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.extMulHighInt8x16(rsd, rs, rsd);
+}
+
+static void ExtMulLowUI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedExtMulLowInt8x16(rsd, rs, rsd);
+}
+
+static void ExtMulHighUI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedExtMulHighInt8x16(rsd, rs, rsd);
+}
+
+static void ExtMulLowI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.extMulLowInt16x8(rsd, rs, rsd);
+}
+
+static void ExtMulHighI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.extMulHighInt16x8(rsd, rs, rsd);
+}
+
+static void ExtMulLowUI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedExtMulLowInt16x8(rsd, rs, rsd);
+}
+
+static void ExtMulHighUI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedExtMulHighInt16x8(rsd, rs, rsd);
+}
+
+static void ExtMulLowI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.extMulLowInt32x4(rsd, rs, rsd);
+}
+
+static void ExtMulHighI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.extMulHighInt32x4(rsd, rs, rsd);
+}
+
+static void ExtMulLowUI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedExtMulLowInt32x4(rsd, rs, rsd);
+}
+
+static void ExtMulHighUI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedExtMulHighInt32x4(rsd, rs, rsd);
+}
+
+static void Q15MulrSatS(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.q15MulrSatInt16x8(rsd, rs, rsd);
+}
+
+static void CmpI8x16(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd) {
+ masm.compareInt8x16(cond, rs, rsd);
+}
+
+static void CmpI16x8(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd) {
+ masm.compareInt16x8(cond, rs, rsd);
+}
+
+static void CmpI32x4(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd) {
+ masm.compareInt32x4(cond, rs, rsd);
+}
+
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+static void CmpI64x2ForEquality(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd) {
+ masm.compareForEqualityInt64x2(cond, rsd, rs, rsd);
+}
+
+static void CmpI64x2ForOrdering(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd, RegV128 temp1,
+ RegV128 temp2) {
+ masm.compareForOrderingInt64x2(cond, rsd, rs, rsd, temp1, temp2);
+}
+# else
+static void CmpI64x2ForEquality(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd) {
+ masm.compareInt64x2(cond, rs, rsd);
+}
+
+static void CmpI64x2ForOrdering(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd) {
+ masm.compareInt64x2(cond, rs, rsd);
+}
+# endif // JS_CODEGEN_X86 || JS_CODEGEN_X64
+
+static void CmpUI8x16(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd) {
+ masm.compareInt8x16(cond, rs, rsd);
+}
+
+static void CmpUI16x8(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd) {
+ masm.compareInt16x8(cond, rs, rsd);
+}
+
+static void CmpUI32x4(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd) {
+ masm.compareInt32x4(cond, rs, rsd);
+}
+
+static void CmpF32x4(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd) {
+ masm.compareFloat32x4(cond, rs, rsd);
+}
+
+static void CmpF64x2(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd) {
+ masm.compareFloat64x2(cond, rs, rsd);
+}
+
+static void NegI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.negInt8x16(rs, rd);
+}
+
+static void NegI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.negInt16x8(rs, rd);
+}
+
+static void NegI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.negInt32x4(rs, rd);
+}
+
+static void NegI64x2(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.negInt64x2(rs, rd);
+}
+
+static void NegF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.negFloat32x4(rs, rd);
+}
+
+static void NegF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.negFloat64x2(rs, rd);
+}
+
+static void AbsF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.absFloat32x4(rs, rd);
+}
+
+static void AbsF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.absFloat64x2(rs, rd);
+}
+
+static void SqrtF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.sqrtFloat32x4(rs, rd);
+}
+
+static void SqrtF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.sqrtFloat64x2(rs, rd);
+}
+
+static void CeilF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.ceilFloat32x4(rs, rd);
+}
+
+static void FloorF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.floorFloat32x4(rs, rd);
+}
+
+static void TruncF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.truncFloat32x4(rs, rd);
+}
+
+static void NearestF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.nearestFloat32x4(rs, rd);
+}
+
+static void CeilF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.ceilFloat64x2(rs, rd);
+}
+
+static void FloorF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.floorFloat64x2(rs, rd);
+}
+
+static void TruncF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.truncFloat64x2(rs, rd);
+}
+
+static void NearestF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.nearestFloat64x2(rs, rd);
+}
+
+static void NotV128(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.bitwiseNotSimd128(rs, rd);
+}
+
+static void ExtAddPairwiseI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.extAddPairwiseInt8x16(rs, rsd);
+}
+
+static void ExtAddPairwiseUI8x16(MacroAssembler& masm, RegV128 rs,
+ RegV128 rsd) {
+ masm.unsignedExtAddPairwiseInt8x16(rs, rsd);
+}
+
+static void ExtAddPairwiseI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.extAddPairwiseInt16x8(rs, rsd);
+}
+
+static void ExtAddPairwiseUI16x8(MacroAssembler& masm, RegV128 rs,
+ RegV128 rsd) {
+ masm.unsignedExtAddPairwiseInt16x8(rs, rsd);
+}
+
+static void ShiftOpMask(MacroAssembler& masm, SimdOp op, RegI32 in,
+ RegI32 out) {
+ int32_t maskBits;
+
+ masm.mov(in, out);
+ if (MacroAssembler::MustMaskShiftCountSimd128(op, &maskBits)) {
+ masm.and32(Imm32(maskBits), out);
+ }
+}
+
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+static void ShiftLeftI8x16(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp1, RegV128 temp2) {
+ ShiftOpMask(masm, SimdOp::I8x16Shl, rs, temp1);
+ masm.leftShiftInt8x16(temp1, rsd, temp2);
+}
+
+static void ShiftLeftI16x8(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ ShiftOpMask(masm, SimdOp::I16x8Shl, rs, temp);
+ masm.leftShiftInt16x8(temp, rsd);
+}
+
+static void ShiftLeftI32x4(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ ShiftOpMask(masm, SimdOp::I32x4Shl, rs, temp);
+ masm.leftShiftInt32x4(temp, rsd);
+}
+
+static void ShiftLeftI64x2(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ ShiftOpMask(masm, SimdOp::I64x2Shl, rs, temp);
+ masm.leftShiftInt64x2(temp, rsd);
+}
+
+static void ShiftRightI8x16(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp1, RegV128 temp2) {
+ ShiftOpMask(masm, SimdOp::I8x16ShrS, rs, temp1);
+ masm.rightShiftInt8x16(temp1, rsd, temp2);
+}
+
+static void ShiftRightUI8x16(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp1, RegV128 temp2) {
+ ShiftOpMask(masm, SimdOp::I8x16ShrU, rs, temp1);
+ masm.unsignedRightShiftInt8x16(temp1, rsd, temp2);
+}
+
+static void ShiftRightI16x8(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ ShiftOpMask(masm, SimdOp::I16x8ShrS, rs, temp);
+ masm.rightShiftInt16x8(temp, rsd);
+}
+
+static void ShiftRightUI16x8(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ ShiftOpMask(masm, SimdOp::I16x8ShrU, rs, temp);
+ masm.unsignedRightShiftInt16x8(temp, rsd);
+}
+
+static void ShiftRightI32x4(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ ShiftOpMask(masm, SimdOp::I32x4ShrS, rs, temp);
+ masm.rightShiftInt32x4(temp, rsd);
+}
+
+static void ShiftRightUI32x4(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ ShiftOpMask(masm, SimdOp::I32x4ShrU, rs, temp);
+ masm.unsignedRightShiftInt32x4(temp, rsd);
+}
+
+static void ShiftRightUI64x2(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ ShiftOpMask(masm, SimdOp::I64x2ShrU, rs, temp);
+ masm.unsignedRightShiftInt64x2(temp, rsd);
+}
+# elif defined(JS_CODEGEN_ARM64)
+static void ShiftLeftI8x16(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ ShiftOpMask(masm, SimdOp::I8x16Shl, rs, temp);
+ masm.leftShiftInt8x16(rsd, temp, rsd);
+}
+
+static void ShiftLeftI16x8(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ ShiftOpMask(masm, SimdOp::I16x8Shl, rs, temp);
+ masm.leftShiftInt16x8(rsd, temp, rsd);
+}
+
+static void ShiftLeftI32x4(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ ShiftOpMask(masm, SimdOp::I32x4Shl, rs, temp);
+ masm.leftShiftInt32x4(rsd, temp, rsd);
+}
+
+static void ShiftLeftI64x2(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ ShiftOpMask(masm, SimdOp::I64x2Shl, rs, temp);
+ masm.leftShiftInt64x2(rsd, temp, rsd);
+}
+
+static void ShiftRightI8x16(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ ShiftOpMask(masm, SimdOp::I8x16ShrS, rs, temp);
+ masm.rightShiftInt8x16(rsd, temp, rsd);
+}
+
+static void ShiftRightUI8x16(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ ShiftOpMask(masm, SimdOp::I8x16ShrU, rs, temp);
+ masm.unsignedRightShiftInt8x16(rsd, temp, rsd);
+}
+
+static void ShiftRightI16x8(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ ShiftOpMask(masm, SimdOp::I16x8ShrS, rs, temp);
+ masm.rightShiftInt16x8(rsd, temp, rsd);
+}
+
+static void ShiftRightUI16x8(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ ShiftOpMask(masm, SimdOp::I16x8ShrU, rs, temp);
+ masm.unsignedRightShiftInt16x8(rsd, temp, rsd);
+}
+
+static void ShiftRightI32x4(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ ShiftOpMask(masm, SimdOp::I32x4ShrS, rs, temp);
+ masm.rightShiftInt32x4(rsd, temp, rsd);
+}
+
+static void ShiftRightUI32x4(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ ShiftOpMask(masm, SimdOp::I32x4ShrU, rs, temp);
+ masm.unsignedRightShiftInt32x4(rsd, temp, rsd);
+}
+
+static void ShiftRightI64x2(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ ShiftOpMask(masm, SimdOp::I64x2ShrS, rs, temp);
+ masm.rightShiftInt64x2(rsd, temp, rsd);
+}
+
+static void ShiftRightUI64x2(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ ShiftOpMask(masm, SimdOp::I64x2ShrU, rs, temp);
+ masm.unsignedRightShiftInt64x2(rsd, temp, rsd);
+}
+# endif
+
+static void AverageUI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedAverageInt8x16(rsd, rs, rsd);
+}
+
+static void AverageUI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedAverageInt16x8(rsd, rs, rsd);
+}
+
+static void MinI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.minInt8x16(rsd, rs, rsd);
+}
+
+static void MinUI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedMinInt8x16(rsd, rs, rsd);
+}
+
+static void MaxI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.maxInt8x16(rsd, rs, rsd);
+}
+
+static void MaxUI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedMaxInt8x16(rsd, rs, rsd);
+}
+
+static void MinI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.minInt16x8(rsd, rs, rsd);
+}
+
+static void MinUI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedMinInt16x8(rsd, rs, rsd);
+}
+
+static void MaxI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.maxInt16x8(rsd, rs, rsd);
+}
+
+static void MaxUI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedMaxInt16x8(rsd, rs, rsd);
+}
+
+static void MinI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.minInt32x4(rsd, rs, rsd);
+}
+
+static void MinUI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedMinInt32x4(rsd, rs, rsd);
+}
+
+static void MaxI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.maxInt32x4(rsd, rs, rsd);
+}
+
+static void MaxUI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedMaxInt32x4(rsd, rs, rsd);
+}
+
+static void NarrowI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.narrowInt16x8(rsd, rs, rsd);
+}
+
+static void NarrowUI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedNarrowInt16x8(rsd, rs, rsd);
+}
+
+static void NarrowI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.narrowInt32x4(rsd, rs, rsd);
+}
+
+static void NarrowUI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedNarrowInt32x4(rsd, rs, rsd);
+}
+
+static void WidenLowI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.widenLowInt8x16(rs, rd);
+}
+
+static void WidenHighI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.widenHighInt8x16(rs, rd);
+}
+
+static void WidenLowUI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.unsignedWidenLowInt8x16(rs, rd);
+}
+
+static void WidenHighUI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.unsignedWidenHighInt8x16(rs, rd);
+}
+
+static void WidenLowI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.widenLowInt16x8(rs, rd);
+}
+
+static void WidenHighI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.widenHighInt16x8(rs, rd);
+}
+
+static void WidenLowUI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.unsignedWidenLowInt16x8(rs, rd);
+}
+
+static void WidenHighUI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.unsignedWidenHighInt16x8(rs, rd);
+}
+
+static void WidenLowI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.widenLowInt32x4(rs, rd);
+}
+
+static void WidenHighI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.widenHighInt32x4(rs, rd);
+}
+
+static void WidenLowUI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.unsignedWidenLowInt32x4(rs, rd);
+}
+
+static void WidenHighUI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.unsignedWidenHighInt32x4(rs, rd);
+}
+
+# if defined(JS_CODEGEN_ARM64)
+static void PopcntI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.popcntInt8x16(rs, rd);
+}
+# else
+static void PopcntI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rd,
+ RegV128 temp) {
+ masm.popcntInt8x16(rs, rd, temp);
+}
+# endif // JS_CODEGEN_ARM64
+
+static void AbsI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.absInt8x16(rs, rd);
+}
+
+static void AbsI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.absInt16x8(rs, rd);
+}
+
+static void AbsI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.absInt32x4(rs, rd);
+}
+
+static void AbsI64x2(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.absInt64x2(rs, rd);
+}
+
+static void ExtractLaneI8x16(MacroAssembler& masm, uint32_t laneIndex,
+ RegV128 rs, RegI32 rd) {
+ masm.extractLaneInt8x16(laneIndex, rs, rd);
+}
+
+static void ExtractLaneUI8x16(MacroAssembler& masm, uint32_t laneIndex,
+ RegV128 rs, RegI32 rd) {
+ masm.unsignedExtractLaneInt8x16(laneIndex, rs, rd);
+}
+
+static void ExtractLaneI16x8(MacroAssembler& masm, uint32_t laneIndex,
+ RegV128 rs, RegI32 rd) {
+ masm.extractLaneInt16x8(laneIndex, rs, rd);
+}
+
+static void ExtractLaneUI16x8(MacroAssembler& masm, uint32_t laneIndex,
+ RegV128 rs, RegI32 rd) {
+ masm.unsignedExtractLaneInt16x8(laneIndex, rs, rd);
+}
+
+static void ExtractLaneI32x4(MacroAssembler& masm, uint32_t laneIndex,
+ RegV128 rs, RegI32 rd) {
+ masm.extractLaneInt32x4(laneIndex, rs, rd);
+}
+
+static void ExtractLaneI64x2(MacroAssembler& masm, uint32_t laneIndex,
+ RegV128 rs, RegI64 rd) {
+ masm.extractLaneInt64x2(laneIndex, rs, rd);
+}
+
+static void ExtractLaneF32x4(MacroAssembler& masm, uint32_t laneIndex,
+ RegV128 rs, RegF32 rd) {
+ masm.extractLaneFloat32x4(laneIndex, rs, rd);
+}
+
+static void ExtractLaneF64x2(MacroAssembler& masm, uint32_t laneIndex,
+ RegV128 rs, RegF64 rd) {
+ masm.extractLaneFloat64x2(laneIndex, rs, rd);
+}
+
+static void ReplaceLaneI8x16(MacroAssembler& masm, uint32_t laneIndex,
+ RegI32 rs, RegV128 rsd) {
+ masm.replaceLaneInt8x16(laneIndex, rs, rsd);
+}
+
+static void ReplaceLaneI16x8(MacroAssembler& masm, uint32_t laneIndex,
+ RegI32 rs, RegV128 rsd) {
+ masm.replaceLaneInt16x8(laneIndex, rs, rsd);
+}
+
+static void ReplaceLaneI32x4(MacroAssembler& masm, uint32_t laneIndex,
+ RegI32 rs, RegV128 rsd) {
+ masm.replaceLaneInt32x4(laneIndex, rs, rsd);
+}
+
+static void ReplaceLaneI64x2(MacroAssembler& masm, uint32_t laneIndex,
+ RegI64 rs, RegV128 rsd) {
+ masm.replaceLaneInt64x2(laneIndex, rs, rsd);
+}
+
+static void ReplaceLaneF32x4(MacroAssembler& masm, uint32_t laneIndex,
+ RegF32 rs, RegV128 rsd) {
+ masm.replaceLaneFloat32x4(laneIndex, rs, rsd);
+}
+
+static void ReplaceLaneF64x2(MacroAssembler& masm, uint32_t laneIndex,
+ RegF64 rs, RegV128 rsd) {
+ masm.replaceLaneFloat64x2(laneIndex, rs, rsd);
+}
+
+static void SplatI8x16(MacroAssembler& masm, RegI32 rs, RegV128 rd) {
+ masm.splatX16(rs, rd);
+}
+
+static void SplatI16x8(MacroAssembler& masm, RegI32 rs, RegV128 rd) {
+ masm.splatX8(rs, rd);
+}
+
+static void SplatI32x4(MacroAssembler& masm, RegI32 rs, RegV128 rd) {
+ masm.splatX4(rs, rd);
+}
+
+static void SplatI64x2(MacroAssembler& masm, RegI64 rs, RegV128 rd) {
+ masm.splatX2(rs, rd);
+}
+
+static void SplatF32x4(MacroAssembler& masm, RegF32 rs, RegV128 rd) {
+ masm.splatX4(rs, rd);
+}
+
+static void SplatF64x2(MacroAssembler& masm, RegF64 rs, RegV128 rd) {
+ masm.splatX2(rs, rd);
+}
+
+// This is the same op independent of lanes: it tests for any nonzero bit.
+static void AnyTrue(MacroAssembler& masm, RegV128 rs, RegI32 rd) {
+ masm.anyTrueSimd128(rs, rd);
+}
+
+static void AllTrueI8x16(MacroAssembler& masm, RegV128 rs, RegI32 rd) {
+ masm.allTrueInt8x16(rs, rd);
+}
+
+static void AllTrueI16x8(MacroAssembler& masm, RegV128 rs, RegI32 rd) {
+ masm.allTrueInt16x8(rs, rd);
+}
+
+static void AllTrueI32x4(MacroAssembler& masm, RegV128 rs, RegI32 rd) {
+ masm.allTrueInt32x4(rs, rd);
+}
+
+static void AllTrueI64x2(MacroAssembler& masm, RegV128 rs, RegI32 rd) {
+ masm.allTrueInt64x2(rs, rd);
+}
+
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+static void BitmaskI8x16(MacroAssembler& masm, RegV128 rs, RegI32 rd) {
+ masm.bitmaskInt8x16(rs, rd);
+}
+
+static void BitmaskI16x8(MacroAssembler& masm, RegV128 rs, RegI32 rd) {
+ masm.bitmaskInt16x8(rs, rd);
+}
+
+static void BitmaskI32x4(MacroAssembler& masm, RegV128 rs, RegI32 rd) {
+ masm.bitmaskInt32x4(rs, rd);
+}
+
+static void BitmaskI64x2(MacroAssembler& masm, RegV128 rs, RegI32 rd) {
+ masm.bitmaskInt64x2(rs, rd);
+}
+# elif defined(JS_CODEGEN_ARM64)
+static void BitmaskI8x16(MacroAssembler& masm, RegV128 rs, RegI32 rd,
+ RegV128 temp) {
+ masm.bitmaskInt8x16(rs, rd, temp);
+}
+
+static void BitmaskI16x8(MacroAssembler& masm, RegV128 rs, RegI32 rd,
+ RegV128 temp) {
+ masm.bitmaskInt16x8(rs, rd, temp);
+}
+
+static void BitmaskI32x4(MacroAssembler& masm, RegV128 rs, RegI32 rd,
+ RegV128 temp) {
+ masm.bitmaskInt32x4(rs, rd, temp);
+}
+
+static void BitmaskI64x2(MacroAssembler& masm, RegV128 rs, RegI32 rd,
+ RegV128 temp) {
+ masm.bitmaskInt64x2(rs, rd, temp);
+}
+# endif
+
+static void Swizzle(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.swizzleInt8x16(rsd, rs, rsd);
+}
+
+static void ConvertI32x4ToF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.convertInt32x4ToFloat32x4(rs, rd);
+}
+
+static void ConvertUI32x4ToF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.unsignedConvertInt32x4ToFloat32x4(rs, rd);
+}
+
+static void ConvertF32x4ToI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.truncSatFloat32x4ToInt32x4(rs, rd);
+}
+
+# if defined(JS_CODEGEN_ARM64)
+static void ConvertF32x4ToUI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.unsignedTruncSatFloat32x4ToInt32x4(rs, rd);
+}
+# else
+static void ConvertF32x4ToUI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd,
+ RegV128 temp) {
+ masm.unsignedTruncSatFloat32x4ToInt32x4(rs, rd, temp);
+}
+# endif // JS_CODEGEN_ARM64
+
+static void ConvertI32x4ToF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.convertInt32x4ToFloat64x2(rs, rd);
+}
+
+static void ConvertUI32x4ToF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.unsignedConvertInt32x4ToFloat64x2(rs, rd);
+}
+
+static void ConvertF64x2ToI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd,
+ RegV128 temp) {
+ masm.truncSatFloat64x2ToInt32x4(rs, rd, temp);
+}
+
+static void ConvertF64x2ToUI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd,
+ RegV128 temp) {
+ masm.unsignedTruncSatFloat64x2ToInt32x4(rs, rd, temp);
+}
+
+static void DemoteF64x2ToF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.convertFloat64x2ToFloat32x4(rs, rd);
+}
+
+static void PromoteF32x4ToF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.convertFloat32x4ToFloat64x2(rs, rd);
+}
+
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+static void BitselectV128(MacroAssembler& masm, RegV128 rhs, RegV128 control,
+ RegV128 lhsDest, RegV128 temp) {
+ // Ideally, we would have temp=control, and we can probably get away with
+ // just doing that, but don't worry about it yet.
+ masm.bitwiseSelectSimd128(control, lhsDest, rhs, lhsDest, temp);
+}
+# elif defined(JS_CODEGEN_ARM64)
+static void BitselectV128(MacroAssembler& masm, RegV128 rhs, RegV128 control,
+ RegV128 lhsDest, RegV128 temp) {
+ // The masm interface is not great for the baseline compiler here, but it's
+ // optimal for Ion, so just work around it.
+ masm.moveSimd128(control, temp);
+ masm.bitwiseSelectSimd128(lhsDest, rhs, temp);
+ masm.moveSimd128(temp, lhsDest);
+}
+# endif
+
+# ifdef ENABLE_WASM_RELAXED_SIMD
+static void RelaxedFmaF32x4(MacroAssembler& masm, RegV128 rs1, RegV128 rs2,
+ RegV128 rsd) {
+ masm.fmaFloat32x4(rs1, rs2, rsd);
+}
+
+static void RelaxedFnmaF32x4(MacroAssembler& masm, RegV128 rs1, RegV128 rs2,
+ RegV128 rsd) {
+ masm.fnmaFloat32x4(rs1, rs2, rsd);
+}
+
+static void RelaxedFmaF64x2(MacroAssembler& masm, RegV128 rs1, RegV128 rs2,
+ RegV128 rsd) {
+ masm.fmaFloat64x2(rs1, rs2, rsd);
+}
+
+static void RelaxedFnmaF64x2(MacroAssembler& masm, RegV128 rs1, RegV128 rs2,
+ RegV128 rsd) {
+ masm.fnmaFloat64x2(rs1, rs2, rsd);
+}
+
+static void RelaxedSwizzle(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.swizzleInt8x16Relaxed(rsd, rs, rsd);
+}
+
+static void RelaxedMinF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.minFloat32x4Relaxed(rs, rsd);
+}
+
+static void RelaxedMaxF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.maxFloat32x4Relaxed(rs, rsd);
+}
+
+static void RelaxedMinF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.minFloat64x2Relaxed(rs, rsd);
+}
+
+static void RelaxedMaxF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.maxFloat64x2Relaxed(rs, rsd);
+}
+
+static void RelaxedConvertF32x4ToI32x4(MacroAssembler& masm, RegV128 rs,
+ RegV128 rd) {
+ masm.truncFloat32x4ToInt32x4Relaxed(rs, rd);
+}
+
+static void RelaxedConvertF32x4ToUI32x4(MacroAssembler& masm, RegV128 rs,
+ RegV128 rd) {
+ masm.unsignedTruncFloat32x4ToInt32x4Relaxed(rs, rd);
+}
+
+static void RelaxedConvertF64x2ToI32x4(MacroAssembler& masm, RegV128 rs,
+ RegV128 rd) {
+ masm.truncFloat64x2ToInt32x4Relaxed(rs, rd);
+}
+
+static void RelaxedConvertF64x2ToUI32x4(MacroAssembler& masm, RegV128 rs,
+ RegV128 rd) {
+ masm.unsignedTruncFloat64x2ToInt32x4Relaxed(rs, rd);
+}
+
+static void RelaxedQ15MulrS(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.q15MulrInt16x8Relaxed(rsd, rs, rsd);
+}
+
+static void DotI8x16I7x16S(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.dotInt8x16Int7x16(rsd, rs, rsd);
+}
+
+void BaseCompiler::emitDotI8x16I7x16AddS() {
+ RegV128 rsd = popV128();
+ RegV128 rs0, rs1;
+ pop2xV128(&rs0, &rs1);
+# if defined(JS_CODEGEN_ARM64)
+ RegV128 temp = needV128();
+ masm.dotInt8x16Int7x16ThenAdd(rs0, rs1, rsd, temp);
+ freeV128(temp);
+# else
+ masm.dotInt8x16Int7x16ThenAdd(rs0, rs1, rsd);
+# endif
+ freeV128(rs1);
+ freeV128(rs0);
+ pushV128(rsd);
+}
+# endif // ENABLE_WASM_RELAXED_SIMD
+
+void BaseCompiler::emitVectorAndNot() {
+ // We want x & ~y but the available operation is ~x & y, so reverse the
+ // operands.
+ RegV128 r, rs;
+ pop2xV128(&r, &rs);
+ masm.bitwiseNotAndSimd128(r, rs);
+ freeV128(r);
+ pushV128(rs);
+}
+
+bool BaseCompiler::emitLoadSplat(Scalar::Type viewType) {
+ LinearMemoryAddress<Nothing> addr;
+ if (!iter_.readLoadSplat(Scalar::byteSize(viewType), &addr)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset());
+ loadSplat(&access);
+ return true;
+}
+
+bool BaseCompiler::emitLoadZero(Scalar::Type viewType) {
+ // LoadZero has the structure of LoadSplat, so reuse the reader.
+ LinearMemoryAddress<Nothing> addr;
+ if (!iter_.readLoadSplat(Scalar::byteSize(viewType), &addr)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset());
+ loadZero(&access);
+ return true;
+}
+
+bool BaseCompiler::emitLoadExtend(Scalar::Type viewType) {
+ LinearMemoryAddress<Nothing> addr;
+ if (!iter_.readLoadExtend(&addr)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ MemoryAccessDesc access(Scalar::Int64, addr.align, addr.offset,
+ bytecodeOffset());
+ loadExtend(&access, viewType);
+ return true;
+}
+
+bool BaseCompiler::emitLoadLane(uint32_t laneSize) {
+ Nothing nothing;
+ LinearMemoryAddress<Nothing> addr;
+ uint32_t laneIndex;
+ if (!iter_.readLoadLane(laneSize, &addr, &laneIndex, &nothing)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ Scalar::Type viewType;
+ switch (laneSize) {
+ case 1:
+ viewType = Scalar::Uint8;
+ break;
+ case 2:
+ viewType = Scalar::Uint16;
+ break;
+ case 4:
+ viewType = Scalar::Int32;
+ break;
+ case 8:
+ viewType = Scalar::Int64;
+ break;
+ default:
+ MOZ_CRASH("unsupported laneSize");
+ }
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset());
+ loadLane(&access, laneIndex);
+ return true;
+}
+
+bool BaseCompiler::emitStoreLane(uint32_t laneSize) {
+ Nothing nothing;
+ LinearMemoryAddress<Nothing> addr;
+ uint32_t laneIndex;
+ if (!iter_.readStoreLane(laneSize, &addr, &laneIndex, &nothing)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ Scalar::Type viewType;
+ switch (laneSize) {
+ case 1:
+ viewType = Scalar::Uint8;
+ break;
+ case 2:
+ viewType = Scalar::Uint16;
+ break;
+ case 4:
+ viewType = Scalar::Int32;
+ break;
+ case 8:
+ viewType = Scalar::Int64;
+ break;
+ default:
+ MOZ_CRASH("unsupported laneSize");
+ }
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset());
+ storeLane(&access, laneIndex);
+ return true;
+}
+
+bool BaseCompiler::emitVectorShuffle() {
+ Nothing unused_a, unused_b;
+ V128 shuffleMask;
+
+ if (!iter_.readVectorShuffle(&unused_a, &unused_b, &shuffleMask)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ RegV128 rd, rs;
+ pop2xV128(&rd, &rs);
+
+ masm.shuffleInt8x16(shuffleMask.bytes, rs, rd);
+
+ freeV128(rs);
+ pushV128(rd);
+
+ return true;
+}
+
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+bool BaseCompiler::emitVectorShiftRightI64x2() {
+ Nothing unused_a, unused_b;
+
+ if (!iter_.readVectorShift(&unused_a, &unused_b)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ RegI32 count = popI32RhsForShiftI64();
+ RegV128 lhsDest = popV128();
+ RegI64 tmp = needI64();
+ masm.and32(Imm32(63), count);
+ masm.extractLaneInt64x2(0, lhsDest, tmp);
+ masm.rshift64Arithmetic(count, tmp);
+ masm.replaceLaneInt64x2(0, tmp, lhsDest);
+ masm.extractLaneInt64x2(1, lhsDest, tmp);
+ masm.rshift64Arithmetic(count, tmp);
+ masm.replaceLaneInt64x2(1, tmp, lhsDest);
+ freeI64(tmp);
+ freeI32(count);
+ pushV128(lhsDest);
+
+ return true;
+}
+# endif
+#endif // ENABLE_WASM_SIMD
+
+#ifdef ENABLE_WASM_RELAXED_SIMD
+bool BaseCompiler::emitVectorLaneSelect() {
+ Nothing unused_a, unused_b, unused_c;
+
+ if (!iter_.readTernary(ValType::V128, &unused_a, &unused_b, &unused_c)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ RegV128 mask = popV128(RegV128(vmm0));
+ RegV128 rhsDest = popV128();
+ RegV128 lhs = popV128();
+ masm.laneSelectSimd128(mask, lhs, rhsDest, rhsDest);
+ freeV128(lhs);
+ freeV128(mask);
+ pushV128(rhsDest);
+# elif defined(JS_CODEGEN_ARM64)
+ RegV128 maskDest = popV128();
+ RegV128 rhs = popV128();
+ RegV128 lhs = popV128();
+ masm.laneSelectSimd128(maskDest, lhs, rhs, maskDest);
+ freeV128(lhs);
+ freeV128(rhs);
+ pushV128(maskDest);
+# endif
+
+ return true;
+}
+#endif // ENABLE_WASM_RELAXED_SIMD
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// "Intrinsics" - magically imported functions for internal use.
+
+bool BaseCompiler::emitIntrinsic() {
+ const Intrinsic* intrinsic;
+
+ BaseNothingVector params;
+ if (!iter_.readIntrinsic(&intrinsic, &params)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ // The final parameter of an intrinsic is implicitly the heap base
+ pushHeapBase();
+
+ // Call the intrinsic
+ return emitInstanceCall(intrinsic->signature);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Function bodies - main opcode dispatch loop.
+
+bool BaseCompiler::emitBody() {
+ AutoCreatedBy acb(masm, "(wasm)BaseCompiler::emitBody");
+
+ MOZ_ASSERT(stackMapGenerator_.framePushedAtEntryToBody.isSome());
+
+ if (!iter_.startFunction(func_.index, locals_)) {
+ return false;
+ }
+
+ initControl(controlItem(), ResultType::Empty());
+
+ for (;;) {
+ Nothing unused_a, unused_b, unused_c;
+ (void)unused_a;
+ (void)unused_b;
+ (void)unused_c;
+
+#ifdef DEBUG
+ performRegisterLeakCheck();
+ assertStackInvariants();
+#endif
+
+#define dispatchBinary0(doEmit, type) \
+ iter_.readBinary(type, &unused_a, &unused_b) && \
+ (deadCode_ || (doEmit(), true))
+
+#define dispatchBinary1(arg1, type) \
+ iter_.readBinary(type, &unused_a, &unused_b) && \
+ (deadCode_ || (emitBinop(arg1), true))
+
+#define dispatchBinary2(arg1, arg2, type) \
+ iter_.readBinary(type, &unused_a, &unused_b) && \
+ (deadCode_ || (emitBinop(arg1, arg2), true))
+
+#define dispatchBinary3(arg1, arg2, arg3, type) \
+ iter_.readBinary(type, &unused_a, &unused_b) && \
+ (deadCode_ || (emitBinop(arg1, arg2, arg3), true))
+
+#define dispatchUnary0(doEmit, type) \
+ iter_.readUnary(type, &unused_a) && (deadCode_ || (doEmit(), true))
+
+#define dispatchUnary1(arg1, type) \
+ iter_.readUnary(type, &unused_a) && (deadCode_ || (emitUnop(arg1), true))
+
+#define dispatchUnary2(arg1, arg2, type) \
+ iter_.readUnary(type, &unused_a) && \
+ (deadCode_ || (emitUnop(arg1, arg2), true))
+
+#define dispatchTernary0(doEmit, type) \
+ iter_.readTernary(type, &unused_a, &unused_b, &unused_c) && \
+ (deadCode_ || (doEmit(), true))
+
+#define dispatchTernary1(arg1, type) \
+ iter_.readTernary(type, &unused_a, &unused_b, &unused_c) && \
+ (deadCode_ || (emitTernary(arg1), true))
+
+#define dispatchTernary2(arg1, type) \
+ iter_.readTernary(type, &unused_a, &unused_b, &unused_c) && \
+ (deadCode_ || (emitTernaryResultLast(arg1), true))
+
+#define dispatchComparison0(doEmit, operandType, compareOp) \
+ iter_.readComparison(operandType, &unused_a, &unused_b) && \
+ (deadCode_ || (doEmit(compareOp, operandType), true))
+
+#define dispatchConversion0(doEmit, inType, outType) \
+ iter_.readConversion(inType, outType, &unused_a) && \
+ (deadCode_ || (doEmit(), true))
+
+#define dispatchConversion1(arg1, inType, outType) \
+ iter_.readConversion(inType, outType, &unused_a) && \
+ (deadCode_ || (emitUnop(arg1), true))
+
+#define dispatchConversionOOM(doEmit, inType, outType) \
+ iter_.readConversion(inType, outType, &unused_a) && (deadCode_ || doEmit())
+
+#define dispatchCalloutConversionOOM(doEmit, symbol, inType, outType) \
+ iter_.readConversion(inType, outType, &unused_a) && \
+ (deadCode_ || doEmit(symbol, inType, outType))
+
+#define dispatchIntDivCallout(doEmit, symbol, type) \
+ iter_.readBinary(type, &unused_a, &unused_b) && \
+ (deadCode_ || doEmit(symbol, type))
+
+#define dispatchVectorBinary(op) \
+ iter_.readBinary(ValType::V128, &unused_a, &unused_b) && \
+ (deadCode_ || (emitBinop(op), true))
+
+#define dispatchVectorUnary(op) \
+ iter_.readUnary(ValType::V128, &unused_a) && \
+ (deadCode_ || (emitUnop(op), true))
+
+#define dispatchVectorComparison(op, compareOp) \
+ iter_.readBinary(ValType::V128, &unused_a, &unused_b) && \
+ (deadCode_ || (emitBinop(compareOp, op), true))
+
+#define dispatchVectorVariableShift(op) \
+ iter_.readVectorShift(&unused_a, &unused_b) && \
+ (deadCode_ || (emitBinop(op), true))
+
+#define dispatchExtractLane(op, outType, laneLimit) \
+ iter_.readExtractLane(outType, laneLimit, &laneIndex, &unused_a) && \
+ (deadCode_ || (emitUnop(laneIndex, op), true))
+
+#define dispatchReplaceLane(op, inType, laneLimit) \
+ iter_.readReplaceLane(inType, laneLimit, &laneIndex, &unused_a, \
+ &unused_b) && \
+ (deadCode_ || (emitBinop(laneIndex, op), true))
+
+#define dispatchSplat(op, inType) \
+ iter_.readConversion(inType, ValType::V128, &unused_a) && \
+ (deadCode_ || (emitUnop(op), true))
+
+#define dispatchVectorReduction(op) \
+ iter_.readConversion(ValType::V128, ValType::I32, &unused_a) && \
+ (deadCode_ || (emitUnop(op), true))
+
+#ifdef DEBUG
+ // Check that the number of ref-typed entries in the operand stack matches
+ // reality.
+# define CHECK_POINTER_COUNT \
+ do { \
+ MOZ_ASSERT(countMemRefsOnStk() == stackMapGenerator_.memRefsOnStk); \
+ } while (0)
+#else
+# define CHECK_POINTER_COUNT \
+ do { \
+ } while (0)
+#endif
+
+#define CHECK(E) \
+ if (!(E)) return false
+#define NEXT() \
+ { \
+ CHECK_POINTER_COUNT; \
+ continue; \
+ }
+#define CHECK_NEXT(E) \
+ if (!(E)) return false; \
+ { \
+ CHECK_POINTER_COUNT; \
+ continue; \
+ }
+
+ // Opcodes that push more than MaxPushesPerOpcode (anything with multiple
+ // results) will perform additional reservation.
+ CHECK(stk_.reserve(stk_.length() + MaxPushesPerOpcode));
+
+ OpBytes op{};
+ CHECK(iter_.readOp(&op));
+
+ // When compilerEnv_.debugEnabled(), some operators get a breakpoint site.
+ if (compilerEnv_.debugEnabled() && op.shouldHaveBreakpoint()) {
+ if (previousBreakablePoint_ != masm.currentOffset()) {
+ // TODO sync only registers that can be clobbered by the exit
+ // prologue/epilogue or disable these registers for use in
+ // baseline compiler when compilerEnv_.debugEnabled() is set.
+ sync();
+
+ insertBreakablePoint(CallSiteDesc::Breakpoint);
+ if (!createStackMap("debug: per-insn breakpoint")) {
+ return false;
+ }
+ previousBreakablePoint_ = masm.currentOffset();
+ }
+ }
+
+ // Going below framePushedAtEntryToBody would imply that we've
+ // popped off the machine stack, part of the frame created by
+ // beginFunction().
+ MOZ_ASSERT(masm.framePushed() >=
+ stackMapGenerator_.framePushedAtEntryToBody.value());
+
+ // At this point we're definitely not generating code for a function call.
+ MOZ_ASSERT(
+ stackMapGenerator_.framePushedExcludingOutboundCallArgs.isNothing());
+
+ switch (op.b0) {
+ case uint16_t(Op::End):
+ if (!emitEnd()) {
+ return false;
+ }
+ if (iter_.controlStackEmpty()) {
+ return true;
+ }
+ NEXT();
+
+ // Control opcodes
+ case uint16_t(Op::Nop):
+ CHECK_NEXT(iter_.readNop());
+ case uint16_t(Op::Drop):
+ CHECK_NEXT(emitDrop());
+ case uint16_t(Op::Block):
+ CHECK_NEXT(emitBlock());
+ case uint16_t(Op::Loop):
+ CHECK_NEXT(emitLoop());
+ case uint16_t(Op::If):
+ CHECK_NEXT(emitIf());
+ case uint16_t(Op::Else):
+ CHECK_NEXT(emitElse());
+ case uint16_t(Op::Try):
+ if (!moduleEnv_.exceptionsEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(emitTry());
+ case uint16_t(Op::Catch):
+ if (!moduleEnv_.exceptionsEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(emitCatch());
+ case uint16_t(Op::CatchAll):
+ if (!moduleEnv_.exceptionsEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(emitCatchAll());
+ case uint16_t(Op::Delegate):
+ if (!moduleEnv_.exceptionsEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK(emitDelegate());
+ iter_.popDelegate();
+ NEXT();
+ case uint16_t(Op::Throw):
+ if (!moduleEnv_.exceptionsEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(emitThrow());
+ case uint16_t(Op::Rethrow):
+ if (!moduleEnv_.exceptionsEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(emitRethrow());
+ case uint16_t(Op::Br):
+ CHECK_NEXT(emitBr());
+ case uint16_t(Op::BrIf):
+ CHECK_NEXT(emitBrIf());
+ case uint16_t(Op::BrTable):
+ CHECK_NEXT(emitBrTable());
+ case uint16_t(Op::Return):
+ CHECK_NEXT(emitReturn());
+ case uint16_t(Op::Unreachable):
+ CHECK(iter_.readUnreachable());
+ if (!deadCode_) {
+ trap(Trap::Unreachable);
+ deadCode_ = true;
+ }
+ NEXT();
+
+ // Calls
+ case uint16_t(Op::Call):
+ CHECK_NEXT(emitCall());
+ case uint16_t(Op::CallIndirect):
+ CHECK_NEXT(emitCallIndirect());
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ case uint16_t(Op::CallRef):
+ if (!moduleEnv_.functionReferencesEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(emitCallRef());
+#endif
+
+ // Locals and globals
+ case uint16_t(Op::LocalGet):
+ CHECK_NEXT(emitGetLocal());
+ case uint16_t(Op::LocalSet):
+ CHECK_NEXT(emitSetLocal());
+ case uint16_t(Op::LocalTee):
+ CHECK_NEXT(emitTeeLocal());
+ case uint16_t(Op::GlobalGet):
+ CHECK_NEXT(emitGetGlobal());
+ case uint16_t(Op::GlobalSet):
+ CHECK_NEXT(emitSetGlobal());
+ case uint16_t(Op::TableGet):
+ CHECK_NEXT(emitTableGet());
+ case uint16_t(Op::TableSet):
+ CHECK_NEXT(emitTableSet());
+
+ // Select
+ case uint16_t(Op::SelectNumeric):
+ CHECK_NEXT(emitSelect(/*typed*/ false));
+ case uint16_t(Op::SelectTyped):
+ CHECK_NEXT(emitSelect(/*typed*/ true));
+
+ // I32
+ case uint16_t(Op::I32Const): {
+ int32_t i32;
+ CHECK(iter_.readI32Const(&i32));
+ if (!deadCode_) {
+ pushI32(i32);
+ }
+ NEXT();
+ }
+ case uint16_t(Op::I32Add):
+ CHECK_NEXT(dispatchBinary2(AddI32, AddImmI32, ValType::I32));
+ case uint16_t(Op::I32Sub):
+ CHECK_NEXT(dispatchBinary2(SubI32, SubImmI32, ValType::I32));
+ case uint16_t(Op::I32Mul):
+ CHECK_NEXT(dispatchBinary1(MulI32, ValType::I32));
+ case uint16_t(Op::I32DivS):
+ CHECK_NEXT(dispatchBinary0(emitQuotientI32, ValType::I32));
+ case uint16_t(Op::I32DivU):
+ CHECK_NEXT(dispatchBinary0(emitQuotientU32, ValType::I32));
+ case uint16_t(Op::I32RemS):
+ CHECK_NEXT(dispatchBinary0(emitRemainderI32, ValType::I32));
+ case uint16_t(Op::I32RemU):
+ CHECK_NEXT(dispatchBinary0(emitRemainderU32, ValType::I32));
+ case uint16_t(Op::I32Eqz):
+ CHECK_NEXT(dispatchConversion0(emitEqzI32, ValType::I32, ValType::I32));
+ case uint16_t(Op::I32TruncF32S):
+ CHECK_NEXT(dispatchConversionOOM(emitTruncateF32ToI32<0>, ValType::F32,
+ ValType::I32));
+ case uint16_t(Op::I32TruncF32U):
+ CHECK_NEXT(dispatchConversionOOM(emitTruncateF32ToI32<TRUNC_UNSIGNED>,
+ ValType::F32, ValType::I32));
+ case uint16_t(Op::I32TruncF64S):
+ CHECK_NEXT(dispatchConversionOOM(emitTruncateF64ToI32<0>, ValType::F64,
+ ValType::I32));
+ case uint16_t(Op::I32TruncF64U):
+ CHECK_NEXT(dispatchConversionOOM(emitTruncateF64ToI32<TRUNC_UNSIGNED>,
+ ValType::F64, ValType::I32));
+ case uint16_t(Op::I32WrapI64):
+ CHECK_NEXT(
+ dispatchConversion1(WrapI64ToI32, ValType::I64, ValType::I32));
+ case uint16_t(Op::I32ReinterpretF32):
+ CHECK_NEXT(dispatchConversion1(ReinterpretF32AsI32, ValType::F32,
+ ValType::I32));
+ case uint16_t(Op::I32Clz):
+ CHECK_NEXT(dispatchUnary1(ClzI32, ValType::I32));
+ case uint16_t(Op::I32Ctz):
+ CHECK_NEXT(dispatchUnary1(CtzI32, ValType::I32));
+ case uint16_t(Op::I32Popcnt):
+ CHECK_NEXT(dispatchUnary2(PopcntI32, PopcntTemp, ValType::I32));
+ case uint16_t(Op::I32Or):
+ CHECK_NEXT(dispatchBinary2(OrI32, OrImmI32, ValType::I32));
+ case uint16_t(Op::I32And):
+ CHECK_NEXT(dispatchBinary2(AndI32, AndImmI32, ValType::I32));
+ case uint16_t(Op::I32Xor):
+ CHECK_NEXT(dispatchBinary2(XorI32, XorImmI32, ValType::I32));
+ case uint16_t(Op::I32Shl):
+ CHECK_NEXT(dispatchBinary3(
+ ShlI32, ShlImmI32, &BaseCompiler::popI32RhsForShift, ValType::I32));
+ case uint16_t(Op::I32ShrS):
+ CHECK_NEXT(dispatchBinary3(
+ ShrI32, ShrImmI32, &BaseCompiler::popI32RhsForShift, ValType::I32));
+ case uint16_t(Op::I32ShrU):
+ CHECK_NEXT(dispatchBinary3(ShrUI32, ShrUImmI32,
+ &BaseCompiler::popI32RhsForShift,
+ ValType::I32));
+ case uint16_t(Op::I32Load8S):
+ CHECK_NEXT(emitLoad(ValType::I32, Scalar::Int8));
+ case uint16_t(Op::I32Load8U):
+ CHECK_NEXT(emitLoad(ValType::I32, Scalar::Uint8));
+ case uint16_t(Op::I32Load16S):
+ CHECK_NEXT(emitLoad(ValType::I32, Scalar::Int16));
+ case uint16_t(Op::I32Load16U):
+ CHECK_NEXT(emitLoad(ValType::I32, Scalar::Uint16));
+ case uint16_t(Op::I32Load):
+ CHECK_NEXT(emitLoad(ValType::I32, Scalar::Int32));
+ case uint16_t(Op::I32Store8):
+ CHECK_NEXT(emitStore(ValType::I32, Scalar::Int8));
+ case uint16_t(Op::I32Store16):
+ CHECK_NEXT(emitStore(ValType::I32, Scalar::Int16));
+ case uint16_t(Op::I32Store):
+ CHECK_NEXT(emitStore(ValType::I32, Scalar::Int32));
+ case uint16_t(Op::I32Rotr):
+ CHECK_NEXT(dispatchBinary3(RotrI32, RotrImmI32,
+ &BaseCompiler::popI32RhsForRotate,
+ ValType::I32));
+ case uint16_t(Op::I32Rotl):
+ CHECK_NEXT(dispatchBinary3(RotlI32, RotlImmI32,
+ &BaseCompiler::popI32RhsForRotate,
+ ValType::I32));
+
+ // I64
+ case uint16_t(Op::I64Const): {
+ int64_t i64;
+ CHECK(iter_.readI64Const(&i64));
+ if (!deadCode_) {
+ pushI64(i64);
+ }
+ NEXT();
+ }
+ case uint16_t(Op::I64Add):
+ CHECK_NEXT(dispatchBinary2(AddI64, AddImmI64, ValType::I64));
+ case uint16_t(Op::I64Sub):
+ CHECK_NEXT(dispatchBinary2(SubI64, SubImmI64, ValType::I64));
+ case uint16_t(Op::I64Mul):
+ CHECK_NEXT(dispatchBinary0(emitMultiplyI64, ValType::I64));
+ case uint16_t(Op::I64DivS):
+#ifdef RABALDR_INT_DIV_I64_CALLOUT
+ CHECK_NEXT(dispatchIntDivCallout(
+ emitDivOrModI64BuiltinCall, SymbolicAddress::DivI64, ValType::I64));
+#else
+ CHECK_NEXT(dispatchBinary0(emitQuotientI64, ValType::I64));
+#endif
+ case uint16_t(Op::I64DivU):
+#ifdef RABALDR_INT_DIV_I64_CALLOUT
+ CHECK_NEXT(dispatchIntDivCallout(emitDivOrModI64BuiltinCall,
+ SymbolicAddress::UDivI64,
+ ValType::I64));
+#else
+ CHECK_NEXT(dispatchBinary0(emitQuotientU64, ValType::I64));
+#endif
+ case uint16_t(Op::I64RemS):
+#ifdef RABALDR_INT_DIV_I64_CALLOUT
+ CHECK_NEXT(dispatchIntDivCallout(
+ emitDivOrModI64BuiltinCall, SymbolicAddress::ModI64, ValType::I64));
+#else
+ CHECK_NEXT(dispatchBinary0(emitRemainderI64, ValType::I64));
+#endif
+ case uint16_t(Op::I64RemU):
+#ifdef RABALDR_INT_DIV_I64_CALLOUT
+ CHECK_NEXT(dispatchIntDivCallout(emitDivOrModI64BuiltinCall,
+ SymbolicAddress::UModI64,
+ ValType::I64));
+#else
+ CHECK_NEXT(dispatchBinary0(emitRemainderU64, ValType::I64));
+#endif
+ case uint16_t(Op::I64TruncF32S):
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+ CHECK_NEXT(
+ dispatchCalloutConversionOOM(emitConvertFloatingToInt64Callout,
+ SymbolicAddress::TruncateDoubleToInt64,
+ ValType::F32, ValType::I64));
+#else
+ CHECK_NEXT(dispatchConversionOOM(emitTruncateF32ToI64<0>, ValType::F32,
+ ValType::I64));
+#endif
+ case uint16_t(Op::I64TruncF32U):
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+ CHECK_NEXT(dispatchCalloutConversionOOM(
+ emitConvertFloatingToInt64Callout,
+ SymbolicAddress::TruncateDoubleToUint64, ValType::F32,
+ ValType::I64));
+#else
+ CHECK_NEXT(dispatchConversionOOM(emitTruncateF32ToI64<TRUNC_UNSIGNED>,
+ ValType::F32, ValType::I64));
+#endif
+ case uint16_t(Op::I64TruncF64S):
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+ CHECK_NEXT(
+ dispatchCalloutConversionOOM(emitConvertFloatingToInt64Callout,
+ SymbolicAddress::TruncateDoubleToInt64,
+ ValType::F64, ValType::I64));
+#else
+ CHECK_NEXT(dispatchConversionOOM(emitTruncateF64ToI64<0>, ValType::F64,
+ ValType::I64));
+#endif
+ case uint16_t(Op::I64TruncF64U):
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+ CHECK_NEXT(dispatchCalloutConversionOOM(
+ emitConvertFloatingToInt64Callout,
+ SymbolicAddress::TruncateDoubleToUint64, ValType::F64,
+ ValType::I64));
+#else
+ CHECK_NEXT(dispatchConversionOOM(emitTruncateF64ToI64<TRUNC_UNSIGNED>,
+ ValType::F64, ValType::I64));
+#endif
+ case uint16_t(Op::I64ExtendI32S):
+ CHECK_NEXT(dispatchConversion0(emitExtendI32ToI64, ValType::I32,
+ ValType::I64));
+ case uint16_t(Op::I64ExtendI32U):
+ CHECK_NEXT(dispatchConversion0(emitExtendU32ToI64, ValType::I32,
+ ValType::I64));
+ case uint16_t(Op::I64ReinterpretF64):
+ CHECK_NEXT(dispatchConversion1(ReinterpretF64AsI64, ValType::F64,
+ ValType::I64));
+ case uint16_t(Op::I64Or):
+ CHECK_NEXT(dispatchBinary2(OrI64, OrImmI64, ValType::I64));
+ case uint16_t(Op::I64And):
+ CHECK_NEXT(dispatchBinary2(AndI64, AndImmI64, ValType::I64));
+ case uint16_t(Op::I64Xor):
+ CHECK_NEXT(dispatchBinary2(XorI64, XorImmI64, ValType::I64));
+ case uint16_t(Op::I64Shl):
+ CHECK_NEXT(dispatchBinary3(
+ ShlI64, ShlImmI64, &BaseCompiler::popI64RhsForShift, ValType::I64));
+ case uint16_t(Op::I64ShrS):
+ CHECK_NEXT(dispatchBinary3(
+ ShrI64, ShrImmI64, &BaseCompiler::popI64RhsForShift, ValType::I64));
+ case uint16_t(Op::I64ShrU):
+ CHECK_NEXT(dispatchBinary3(ShrUI64, ShrUImmI64,
+ &BaseCompiler::popI64RhsForShift,
+ ValType::I64));
+ case uint16_t(Op::I64Rotr):
+ CHECK_NEXT(dispatchBinary0(emitRotrI64, ValType::I64));
+ case uint16_t(Op::I64Rotl):
+ CHECK_NEXT(dispatchBinary0(emitRotlI64, ValType::I64));
+ case uint16_t(Op::I64Clz):
+ CHECK_NEXT(dispatchUnary1(ClzI64, ValType::I64));
+ case uint16_t(Op::I64Ctz):
+ CHECK_NEXT(dispatchUnary1(CtzI64, ValType::I64));
+ case uint16_t(Op::I64Popcnt):
+ CHECK_NEXT(dispatchUnary2(PopcntI64, PopcntTemp, ValType::I64));
+ case uint16_t(Op::I64Eqz):
+ CHECK_NEXT(dispatchConversion0(emitEqzI64, ValType::I64, ValType::I32));
+ case uint16_t(Op::I64Load8S):
+ CHECK_NEXT(emitLoad(ValType::I64, Scalar::Int8));
+ case uint16_t(Op::I64Load16S):
+ CHECK_NEXT(emitLoad(ValType::I64, Scalar::Int16));
+ case uint16_t(Op::I64Load32S):
+ CHECK_NEXT(emitLoad(ValType::I64, Scalar::Int32));
+ case uint16_t(Op::I64Load8U):
+ CHECK_NEXT(emitLoad(ValType::I64, Scalar::Uint8));
+ case uint16_t(Op::I64Load16U):
+ CHECK_NEXT(emitLoad(ValType::I64, Scalar::Uint16));
+ case uint16_t(Op::I64Load32U):
+ CHECK_NEXT(emitLoad(ValType::I64, Scalar::Uint32));
+ case uint16_t(Op::I64Load):
+ CHECK_NEXT(emitLoad(ValType::I64, Scalar::Int64));
+ case uint16_t(Op::I64Store8):
+ CHECK_NEXT(emitStore(ValType::I64, Scalar::Int8));
+ case uint16_t(Op::I64Store16):
+ CHECK_NEXT(emitStore(ValType::I64, Scalar::Int16));
+ case uint16_t(Op::I64Store32):
+ CHECK_NEXT(emitStore(ValType::I64, Scalar::Int32));
+ case uint16_t(Op::I64Store):
+ CHECK_NEXT(emitStore(ValType::I64, Scalar::Int64));
+
+ // F32
+ case uint16_t(Op::F32Const): {
+ float f32;
+ CHECK(iter_.readF32Const(&f32));
+ if (!deadCode_) {
+ pushF32(f32);
+ }
+ NEXT();
+ }
+ case uint16_t(Op::F32Add):
+ CHECK_NEXT(dispatchBinary1(AddF32, ValType::F32))
+ case uint16_t(Op::F32Sub):
+ CHECK_NEXT(dispatchBinary1(SubF32, ValType::F32));
+ case uint16_t(Op::F32Mul):
+ CHECK_NEXT(dispatchBinary1(MulF32, ValType::F32));
+ case uint16_t(Op::F32Div):
+ CHECK_NEXT(dispatchBinary1(DivF32, ValType::F32));
+ case uint16_t(Op::F32Min):
+ CHECK_NEXT(dispatchBinary1(MinF32, ValType::F32));
+ case uint16_t(Op::F32Max):
+ CHECK_NEXT(dispatchBinary1(MaxF32, ValType::F32));
+ case uint16_t(Op::F32Neg):
+ CHECK_NEXT(dispatchUnary1(NegateF32, ValType::F32));
+ case uint16_t(Op::F32Abs):
+ CHECK_NEXT(dispatchUnary1(AbsF32, ValType::F32));
+ case uint16_t(Op::F32Sqrt):
+ CHECK_NEXT(dispatchUnary1(SqrtF32, ValType::F32));
+ case uint16_t(Op::F32Ceil):
+ CHECK_NEXT(
+ emitUnaryMathBuiltinCall(SymbolicAddress::CeilF, ValType::F32));
+ case uint16_t(Op::F32Floor):
+ CHECK_NEXT(
+ emitUnaryMathBuiltinCall(SymbolicAddress::FloorF, ValType::F32));
+ case uint16_t(Op::F32DemoteF64):
+ CHECK_NEXT(
+ dispatchConversion1(ConvertF64ToF32, ValType::F64, ValType::F32));
+ case uint16_t(Op::F32ConvertI32S):
+ CHECK_NEXT(
+ dispatchConversion1(ConvertI32ToF32, ValType::I32, ValType::F32));
+ case uint16_t(Op::F32ConvertI32U):
+ CHECK_NEXT(
+ dispatchConversion1(ConvertU32ToF32, ValType::I32, ValType::F32));
+ case uint16_t(Op::F32ConvertI64S):
+#ifdef RABALDR_I64_TO_FLOAT_CALLOUT
+ CHECK_NEXT(dispatchCalloutConversionOOM(
+ emitConvertInt64ToFloatingCallout, SymbolicAddress::Int64ToFloat32,
+ ValType::I64, ValType::F32));
+#else
+ CHECK_NEXT(
+ dispatchConversion1(ConvertI64ToF32, ValType::I64, ValType::F32));
+#endif
+ case uint16_t(Op::F32ConvertI64U):
+#ifdef RABALDR_I64_TO_FLOAT_CALLOUT
+ CHECK_NEXT(dispatchCalloutConversionOOM(
+ emitConvertInt64ToFloatingCallout, SymbolicAddress::Uint64ToFloat32,
+ ValType::I64, ValType::F32));
+#else
+ CHECK_NEXT(dispatchConversion0(emitConvertU64ToF32, ValType::I64,
+ ValType::F32));
+#endif
+ case uint16_t(Op::F32ReinterpretI32):
+ CHECK_NEXT(dispatchConversion1(ReinterpretI32AsF32, ValType::I32,
+ ValType::F32));
+ case uint16_t(Op::F32Load):
+ CHECK_NEXT(emitLoad(ValType::F32, Scalar::Float32));
+ case uint16_t(Op::F32Store):
+ CHECK_NEXT(emitStore(ValType::F32, Scalar::Float32));
+ case uint16_t(Op::F32CopySign):
+ CHECK_NEXT(dispatchBinary1(CopysignF32, ValType::F32));
+ case uint16_t(Op::F32Nearest):
+ CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::NearbyIntF,
+ ValType::F32));
+ case uint16_t(Op::F32Trunc):
+ CHECK_NEXT(
+ emitUnaryMathBuiltinCall(SymbolicAddress::TruncF, ValType::F32));
+
+ // F64
+ case uint16_t(Op::F64Const): {
+ double f64;
+ CHECK(iter_.readF64Const(&f64));
+ if (!deadCode_) {
+ pushF64(f64);
+ }
+ NEXT();
+ }
+ case uint16_t(Op::F64Add):
+ CHECK_NEXT(dispatchBinary1(AddF64, ValType::F64))
+ case uint16_t(Op::F64Sub):
+ CHECK_NEXT(dispatchBinary1(SubF64, ValType::F64));
+ case uint16_t(Op::F64Mul):
+ CHECK_NEXT(dispatchBinary1(MulF64, ValType::F64));
+ case uint16_t(Op::F64Div):
+ CHECK_NEXT(dispatchBinary1(DivF64, ValType::F64));
+ case uint16_t(Op::F64Min):
+ CHECK_NEXT(dispatchBinary1(MinF64, ValType::F64));
+ case uint16_t(Op::F64Max):
+ CHECK_NEXT(dispatchBinary1(MaxF64, ValType::F64));
+ case uint16_t(Op::F64Neg):
+ CHECK_NEXT(dispatchUnary1(NegateF64, ValType::F64));
+ case uint16_t(Op::F64Abs):
+ CHECK_NEXT(dispatchUnary1(AbsF64, ValType::F64));
+ case uint16_t(Op::F64Sqrt):
+ CHECK_NEXT(dispatchUnary1(SqrtF64, ValType::F64));
+ case uint16_t(Op::F64Ceil):
+ CHECK_NEXT(
+ emitUnaryMathBuiltinCall(SymbolicAddress::CeilD, ValType::F64));
+ case uint16_t(Op::F64Floor):
+ CHECK_NEXT(
+ emitUnaryMathBuiltinCall(SymbolicAddress::FloorD, ValType::F64));
+ case uint16_t(Op::F64PromoteF32):
+ CHECK_NEXT(
+ dispatchConversion1(ConvertF32ToF64, ValType::F32, ValType::F64));
+ case uint16_t(Op::F64ConvertI32S):
+ CHECK_NEXT(
+ dispatchConversion1(ConvertI32ToF64, ValType::I32, ValType::F64));
+ case uint16_t(Op::F64ConvertI32U):
+ CHECK_NEXT(
+ dispatchConversion1(ConvertU32ToF64, ValType::I32, ValType::F64));
+ case uint16_t(Op::F64ConvertI64S):
+#ifdef RABALDR_I64_TO_FLOAT_CALLOUT
+ CHECK_NEXT(dispatchCalloutConversionOOM(
+ emitConvertInt64ToFloatingCallout, SymbolicAddress::Int64ToDouble,
+ ValType::I64, ValType::F64));
+#else
+ CHECK_NEXT(
+ dispatchConversion1(ConvertI64ToF64, ValType::I64, ValType::F64));
+#endif
+ case uint16_t(Op::F64ConvertI64U):
+#ifdef RABALDR_I64_TO_FLOAT_CALLOUT
+ CHECK_NEXT(dispatchCalloutConversionOOM(
+ emitConvertInt64ToFloatingCallout, SymbolicAddress::Uint64ToDouble,
+ ValType::I64, ValType::F64));
+#else
+ CHECK_NEXT(dispatchConversion0(emitConvertU64ToF64, ValType::I64,
+ ValType::F64));
+#endif
+ case uint16_t(Op::F64Load):
+ CHECK_NEXT(emitLoad(ValType::F64, Scalar::Float64));
+ case uint16_t(Op::F64Store):
+ CHECK_NEXT(emitStore(ValType::F64, Scalar::Float64));
+ case uint16_t(Op::F64ReinterpretI64):
+ CHECK_NEXT(dispatchConversion1(ReinterpretI64AsF64, ValType::I64,
+ ValType::F64));
+ case uint16_t(Op::F64CopySign):
+ CHECK_NEXT(dispatchBinary1(CopysignF64, ValType::F64));
+ case uint16_t(Op::F64Nearest):
+ CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::NearbyIntD,
+ ValType::F64));
+ case uint16_t(Op::F64Trunc):
+ CHECK_NEXT(
+ emitUnaryMathBuiltinCall(SymbolicAddress::TruncD, ValType::F64));
+
+ // Comparisons
+ case uint16_t(Op::I32Eq):
+ CHECK_NEXT(dispatchComparison0(emitCompareI32, ValType::I32,
+ Assembler::Equal));
+ case uint16_t(Op::I32Ne):
+ CHECK_NEXT(dispatchComparison0(emitCompareI32, ValType::I32,
+ Assembler::NotEqual));
+ case uint16_t(Op::I32LtS):
+ CHECK_NEXT(dispatchComparison0(emitCompareI32, ValType::I32,
+ Assembler::LessThan));
+ case uint16_t(Op::I32LeS):
+ CHECK_NEXT(dispatchComparison0(emitCompareI32, ValType::I32,
+ Assembler::LessThanOrEqual));
+ case uint16_t(Op::I32GtS):
+ CHECK_NEXT(dispatchComparison0(emitCompareI32, ValType::I32,
+ Assembler::GreaterThan));
+ case uint16_t(Op::I32GeS):
+ CHECK_NEXT(dispatchComparison0(emitCompareI32, ValType::I32,
+ Assembler::GreaterThanOrEqual));
+ case uint16_t(Op::I32LtU):
+ CHECK_NEXT(dispatchComparison0(emitCompareI32, ValType::I32,
+ Assembler::Below));
+ case uint16_t(Op::I32LeU):
+ CHECK_NEXT(dispatchComparison0(emitCompareI32, ValType::I32,
+ Assembler::BelowOrEqual));
+ case uint16_t(Op::I32GtU):
+ CHECK_NEXT(dispatchComparison0(emitCompareI32, ValType::I32,
+ Assembler::Above));
+ case uint16_t(Op::I32GeU):
+ CHECK_NEXT(dispatchComparison0(emitCompareI32, ValType::I32,
+ Assembler::AboveOrEqual));
+ case uint16_t(Op::I64Eq):
+ CHECK_NEXT(dispatchComparison0(emitCompareI64, ValType::I64,
+ Assembler::Equal));
+ case uint16_t(Op::I64Ne):
+ CHECK_NEXT(dispatchComparison0(emitCompareI64, ValType::I64,
+ Assembler::NotEqual));
+ case uint16_t(Op::I64LtS):
+ CHECK_NEXT(dispatchComparison0(emitCompareI64, ValType::I64,
+ Assembler::LessThan));
+ case uint16_t(Op::I64LeS):
+ CHECK_NEXT(dispatchComparison0(emitCompareI64, ValType::I64,
+ Assembler::LessThanOrEqual));
+ case uint16_t(Op::I64GtS):
+ CHECK_NEXT(dispatchComparison0(emitCompareI64, ValType::I64,
+ Assembler::GreaterThan));
+ case uint16_t(Op::I64GeS):
+ CHECK_NEXT(dispatchComparison0(emitCompareI64, ValType::I64,
+ Assembler::GreaterThanOrEqual));
+ case uint16_t(Op::I64LtU):
+ CHECK_NEXT(dispatchComparison0(emitCompareI64, ValType::I64,
+ Assembler::Below));
+ case uint16_t(Op::I64LeU):
+ CHECK_NEXT(dispatchComparison0(emitCompareI64, ValType::I64,
+ Assembler::BelowOrEqual));
+ case uint16_t(Op::I64GtU):
+ CHECK_NEXT(dispatchComparison0(emitCompareI64, ValType::I64,
+ Assembler::Above));
+ case uint16_t(Op::I64GeU):
+ CHECK_NEXT(dispatchComparison0(emitCompareI64, ValType::I64,
+ Assembler::AboveOrEqual));
+ case uint16_t(Op::F32Eq):
+ CHECK_NEXT(dispatchComparison0(emitCompareF32, ValType::F32,
+ Assembler::DoubleEqual));
+ case uint16_t(Op::F32Ne):
+ CHECK_NEXT(dispatchComparison0(emitCompareF32, ValType::F32,
+ Assembler::DoubleNotEqualOrUnordered));
+ case uint16_t(Op::F32Lt):
+ CHECK_NEXT(dispatchComparison0(emitCompareF32, ValType::F32,
+ Assembler::DoubleLessThan));
+ case uint16_t(Op::F32Le):
+ CHECK_NEXT(dispatchComparison0(emitCompareF32, ValType::F32,
+ Assembler::DoubleLessThanOrEqual));
+ case uint16_t(Op::F32Gt):
+ CHECK_NEXT(dispatchComparison0(emitCompareF32, ValType::F32,
+ Assembler::DoubleGreaterThan));
+ case uint16_t(Op::F32Ge):
+ CHECK_NEXT(dispatchComparison0(emitCompareF32, ValType::F32,
+ Assembler::DoubleGreaterThanOrEqual));
+ case uint16_t(Op::F64Eq):
+ CHECK_NEXT(dispatchComparison0(emitCompareF64, ValType::F64,
+ Assembler::DoubleEqual));
+ case uint16_t(Op::F64Ne):
+ CHECK_NEXT(dispatchComparison0(emitCompareF64, ValType::F64,
+ Assembler::DoubleNotEqualOrUnordered));
+ case uint16_t(Op::F64Lt):
+ CHECK_NEXT(dispatchComparison0(emitCompareF64, ValType::F64,
+ Assembler::DoubleLessThan));
+ case uint16_t(Op::F64Le):
+ CHECK_NEXT(dispatchComparison0(emitCompareF64, ValType::F64,
+ Assembler::DoubleLessThanOrEqual));
+ case uint16_t(Op::F64Gt):
+ CHECK_NEXT(dispatchComparison0(emitCompareF64, ValType::F64,
+ Assembler::DoubleGreaterThan));
+ case uint16_t(Op::F64Ge):
+ CHECK_NEXT(dispatchComparison0(emitCompareF64, ValType::F64,
+ Assembler::DoubleGreaterThanOrEqual));
+
+ // Sign extensions
+ case uint16_t(Op::I32Extend8S):
+ CHECK_NEXT(
+ dispatchConversion1(ExtendI32_8, ValType::I32, ValType::I32));
+ case uint16_t(Op::I32Extend16S):
+ CHECK_NEXT(
+ dispatchConversion1(ExtendI32_16, ValType::I32, ValType::I32));
+ case uint16_t(Op::I64Extend8S):
+ CHECK_NEXT(
+ dispatchConversion0(emitExtendI64_8, ValType::I64, ValType::I64));
+ case uint16_t(Op::I64Extend16S):
+ CHECK_NEXT(
+ dispatchConversion0(emitExtendI64_16, ValType::I64, ValType::I64));
+ case uint16_t(Op::I64Extend32S):
+ CHECK_NEXT(
+ dispatchConversion0(emitExtendI64_32, ValType::I64, ValType::I64));
+
+ // Memory Related
+ case uint16_t(Op::MemoryGrow):
+ CHECK_NEXT(emitMemoryGrow());
+ case uint16_t(Op::MemorySize):
+ CHECK_NEXT(emitMemorySize());
+
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ case uint16_t(Op::RefAsNonNull):
+ if (!moduleEnv_.functionReferencesEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(emitRefAsNonNull());
+ case uint16_t(Op::BrOnNull):
+ if (!moduleEnv_.functionReferencesEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(emitBrOnNull());
+ case uint16_t(Op::BrOnNonNull):
+ if (!moduleEnv_.functionReferencesEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(emitBrOnNonNull());
+#endif
+#ifdef ENABLE_WASM_GC
+ case uint16_t(Op::RefEq):
+ if (!moduleEnv_.gcEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(dispatchComparison0(emitCompareRef, RefType::eq(),
+ Assembler::Equal));
+#endif
+ case uint16_t(Op::RefFunc):
+ CHECK_NEXT(emitRefFunc());
+ break;
+ case uint16_t(Op::RefNull):
+ CHECK_NEXT(emitRefNull());
+ break;
+ case uint16_t(Op::RefIsNull):
+ CHECK_NEXT(emitRefIsNull());
+ break;
+
+#ifdef ENABLE_WASM_GC
+ // "GC" operations
+ case uint16_t(Op::GcPrefix): {
+ if (!moduleEnv_.gcEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ switch (op.b1) {
+ case uint32_t(GcOp::StructNew):
+ CHECK_NEXT(emitStructNew());
+ case uint32_t(GcOp::StructNewDefault):
+ CHECK_NEXT(emitStructNewDefault());
+ case uint32_t(GcOp::StructGet):
+ CHECK_NEXT(emitStructGet(FieldWideningOp::None));
+ case uint32_t(GcOp::StructGetS):
+ CHECK_NEXT(emitStructGet(FieldWideningOp::Signed));
+ case uint32_t(GcOp::StructGetU):
+ CHECK_NEXT(emitStructGet(FieldWideningOp::Unsigned));
+ case uint32_t(GcOp::StructSet):
+ CHECK_NEXT(emitStructSet());
+ case uint32_t(GcOp::ArrayNew):
+ CHECK_NEXT(emitArrayNew());
+ case uint32_t(GcOp::ArrayNewFixed):
+ CHECK_NEXT(emitArrayNewFixed());
+ case uint32_t(GcOp::ArrayNewDefault):
+ CHECK_NEXT(emitArrayNewDefault());
+ case uint32_t(GcOp::ArrayNewData):
+ CHECK_NEXT(emitArrayNewData());
+ case uint32_t(GcOp::ArrayInitFromElemStaticV5):
+ case uint32_t(GcOp::ArrayNewElem):
+ CHECK_NEXT(emitArrayNewElem());
+ case uint32_t(GcOp::ArrayGet):
+ CHECK_NEXT(emitArrayGet(FieldWideningOp::None));
+ case uint32_t(GcOp::ArrayGetS):
+ CHECK_NEXT(emitArrayGet(FieldWideningOp::Signed));
+ case uint32_t(GcOp::ArrayGetU):
+ CHECK_NEXT(emitArrayGet(FieldWideningOp::Unsigned));
+ case uint32_t(GcOp::ArraySet):
+ CHECK_NEXT(emitArraySet());
+ case uint32_t(GcOp::ArrayLenWithTypeIndex):
+ CHECK_NEXT(emitArrayLen(/*decodeIgnoredTypeIndex=*/true));
+ case uint32_t(GcOp::ArrayLen):
+ CHECK_NEXT(emitArrayLen(/*decodeIgnoredTypeIndex=*/false));
+ case uint32_t(GcOp::ArrayCopy):
+ CHECK_NEXT(emitArrayCopy());
+ case uint32_t(GcOp::RefTestV5):
+ CHECK_NEXT(emitRefTestV5());
+ case uint32_t(GcOp::RefCastV5):
+ CHECK_NEXT(emitRefCastV5());
+ case uint32_t(GcOp::RefTest):
+ CHECK_NEXT(emitRefTest(/*nullable=*/false));
+ case uint32_t(GcOp::RefTestNull):
+ CHECK_NEXT(emitRefTest(/*nullable=*/true));
+ case uint32_t(GcOp::RefCast):
+ CHECK_NEXT(emitRefCast(/*nullable=*/false));
+ case uint32_t(GcOp::RefCastNull):
+ CHECK_NEXT(emitRefCast(/*nullable=*/true));
+ case uint32_t(GcOp::BrOnCast):
+ CHECK_NEXT(emitBrOnCast());
+ case uint32_t(GcOp::BrOnCastV5):
+ CHECK_NEXT(emitBrOnCastV5(/*onSuccess=*/true));
+ case uint32_t(GcOp::BrOnCastFailV5):
+ CHECK_NEXT(emitBrOnCastV5(/*onSuccess=*/false));
+ case uint32_t(GcOp::BrOnCastHeapV5):
+ CHECK_NEXT(
+ emitBrOnCastHeapV5(/*onSuccess=*/true, /*nullable=*/false));
+ case uint32_t(GcOp::BrOnCastHeapNullV5):
+ CHECK_NEXT(
+ emitBrOnCastHeapV5(/*onSuccess=*/true, /*nullable=*/true));
+ case uint32_t(GcOp::BrOnCastFailHeapV5):
+ CHECK_NEXT(
+ emitBrOnCastHeapV5(/*onSuccess=*/false, /*nullable=*/false));
+ case uint32_t(GcOp::BrOnCastFailHeapNullV5):
+ CHECK_NEXT(
+ emitBrOnCastHeapV5(/*onSuccess=*/false, /*nullable=*/true));
+ case uint32_t(GcOp::RefAsStructV5):
+ CHECK_NEXT(emitRefAsStructV5());
+ case uint32_t(GcOp::BrOnNonStructV5):
+ CHECK_NEXT(emitBrOnNonStructV5());
+ case uint16_t(GcOp::ExternInternalize):
+ CHECK_NEXT(emitExternInternalize());
+ case uint16_t(GcOp::ExternExternalize):
+ CHECK_NEXT(emitExternExternalize());
+ default:
+ break;
+ } // switch (op.b1)
+ return iter_.unrecognizedOpcode(&op);
+ }
+#endif
+
+#ifdef ENABLE_WASM_SIMD
+ // SIMD operations
+ case uint16_t(Op::SimdPrefix): {
+ uint32_t laneIndex;
+ if (!moduleEnv_.simdAvailable()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ switch (op.b1) {
+ case uint32_t(SimdOp::I8x16ExtractLaneS):
+ CHECK_NEXT(dispatchExtractLane(ExtractLaneI8x16, ValType::I32, 16));
+ case uint32_t(SimdOp::I8x16ExtractLaneU):
+ CHECK_NEXT(
+ dispatchExtractLane(ExtractLaneUI8x16, ValType::I32, 16));
+ case uint32_t(SimdOp::I16x8ExtractLaneS):
+ CHECK_NEXT(dispatchExtractLane(ExtractLaneI16x8, ValType::I32, 8));
+ case uint32_t(SimdOp::I16x8ExtractLaneU):
+ CHECK_NEXT(dispatchExtractLane(ExtractLaneUI16x8, ValType::I32, 8));
+ case uint32_t(SimdOp::I32x4ExtractLane):
+ CHECK_NEXT(dispatchExtractLane(ExtractLaneI32x4, ValType::I32, 4));
+ case uint32_t(SimdOp::I64x2ExtractLane):
+ CHECK_NEXT(dispatchExtractLane(ExtractLaneI64x2, ValType::I64, 2));
+ case uint32_t(SimdOp::F32x4ExtractLane):
+ CHECK_NEXT(dispatchExtractLane(ExtractLaneF32x4, ValType::F32, 4));
+ case uint32_t(SimdOp::F64x2ExtractLane):
+ CHECK_NEXT(dispatchExtractLane(ExtractLaneF64x2, ValType::F64, 2));
+ case uint32_t(SimdOp::I8x16Splat):
+ CHECK_NEXT(dispatchSplat(SplatI8x16, ValType::I32));
+ case uint32_t(SimdOp::I16x8Splat):
+ CHECK_NEXT(dispatchSplat(SplatI16x8, ValType::I32));
+ case uint32_t(SimdOp::I32x4Splat):
+ CHECK_NEXT(dispatchSplat(SplatI32x4, ValType::I32));
+ case uint32_t(SimdOp::I64x2Splat):
+ CHECK_NEXT(dispatchSplat(SplatI64x2, ValType::I64));
+ case uint32_t(SimdOp::F32x4Splat):
+ CHECK_NEXT(dispatchSplat(SplatF32x4, ValType::F32));
+ case uint32_t(SimdOp::F64x2Splat):
+ CHECK_NEXT(dispatchSplat(SplatF64x2, ValType::F64));
+ case uint32_t(SimdOp::V128AnyTrue):
+ CHECK_NEXT(dispatchVectorReduction(AnyTrue));
+ case uint32_t(SimdOp::I8x16AllTrue):
+ CHECK_NEXT(dispatchVectorReduction(AllTrueI8x16));
+ case uint32_t(SimdOp::I16x8AllTrue):
+ CHECK_NEXT(dispatchVectorReduction(AllTrueI16x8));
+ case uint32_t(SimdOp::I32x4AllTrue):
+ CHECK_NEXT(dispatchVectorReduction(AllTrueI32x4));
+ case uint32_t(SimdOp::I64x2AllTrue):
+ CHECK_NEXT(dispatchVectorReduction(AllTrueI64x2));
+ case uint32_t(SimdOp::I8x16Bitmask):
+ CHECK_NEXT(dispatchVectorReduction(BitmaskI8x16));
+ case uint32_t(SimdOp::I16x8Bitmask):
+ CHECK_NEXT(dispatchVectorReduction(BitmaskI16x8));
+ case uint32_t(SimdOp::I32x4Bitmask):
+ CHECK_NEXT(dispatchVectorReduction(BitmaskI32x4));
+ case uint32_t(SimdOp::I64x2Bitmask):
+ CHECK_NEXT(dispatchVectorReduction(BitmaskI64x2));
+ case uint32_t(SimdOp::I8x16ReplaceLane):
+ CHECK_NEXT(dispatchReplaceLane(ReplaceLaneI8x16, ValType::I32, 16));
+ case uint32_t(SimdOp::I16x8ReplaceLane):
+ CHECK_NEXT(dispatchReplaceLane(ReplaceLaneI16x8, ValType::I32, 8));
+ case uint32_t(SimdOp::I32x4ReplaceLane):
+ CHECK_NEXT(dispatchReplaceLane(ReplaceLaneI32x4, ValType::I32, 4));
+ case uint32_t(SimdOp::I64x2ReplaceLane):
+ CHECK_NEXT(dispatchReplaceLane(ReplaceLaneI64x2, ValType::I64, 2));
+ case uint32_t(SimdOp::F32x4ReplaceLane):
+ CHECK_NEXT(dispatchReplaceLane(ReplaceLaneF32x4, ValType::F32, 4));
+ case uint32_t(SimdOp::F64x2ReplaceLane):
+ CHECK_NEXT(dispatchReplaceLane(ReplaceLaneF64x2, ValType::F64, 2));
+ case uint32_t(SimdOp::I8x16Eq):
+ CHECK_NEXT(dispatchVectorComparison(CmpI8x16, Assembler::Equal));
+ case uint32_t(SimdOp::I8x16Ne):
+ CHECK_NEXT(dispatchVectorComparison(CmpI8x16, Assembler::NotEqual));
+ case uint32_t(SimdOp::I8x16LtS):
+ CHECK_NEXT(dispatchVectorComparison(CmpI8x16, Assembler::LessThan));
+ case uint32_t(SimdOp::I8x16LtU):
+ CHECK_NEXT(dispatchVectorComparison(CmpUI8x16, Assembler::Below));
+ case uint32_t(SimdOp::I8x16GtS):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpI8x16, Assembler::GreaterThan));
+ case uint32_t(SimdOp::I8x16GtU):
+ CHECK_NEXT(dispatchVectorComparison(CmpUI8x16, Assembler::Above));
+ case uint32_t(SimdOp::I8x16LeS):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpI8x16, Assembler::LessThanOrEqual));
+ case uint32_t(SimdOp::I8x16LeU):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpUI8x16, Assembler::BelowOrEqual));
+ case uint32_t(SimdOp::I8x16GeS):
+ CHECK_NEXT(dispatchVectorComparison(CmpI8x16,
+ Assembler::GreaterThanOrEqual));
+ case uint32_t(SimdOp::I8x16GeU):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpUI8x16, Assembler::AboveOrEqual));
+ case uint32_t(SimdOp::I16x8Eq):
+ CHECK_NEXT(dispatchVectorComparison(CmpI16x8, Assembler::Equal));
+ case uint32_t(SimdOp::I16x8Ne):
+ CHECK_NEXT(dispatchVectorComparison(CmpI16x8, Assembler::NotEqual));
+ case uint32_t(SimdOp::I16x8LtS):
+ CHECK_NEXT(dispatchVectorComparison(CmpI16x8, Assembler::LessThan));
+ case uint32_t(SimdOp::I16x8LtU):
+ CHECK_NEXT(dispatchVectorComparison(CmpUI16x8, Assembler::Below));
+ case uint32_t(SimdOp::I16x8GtS):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpI16x8, Assembler::GreaterThan));
+ case uint32_t(SimdOp::I16x8GtU):
+ CHECK_NEXT(dispatchVectorComparison(CmpUI16x8, Assembler::Above));
+ case uint32_t(SimdOp::I16x8LeS):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpI16x8, Assembler::LessThanOrEqual));
+ case uint32_t(SimdOp::I16x8LeU):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpUI16x8, Assembler::BelowOrEqual));
+ case uint32_t(SimdOp::I16x8GeS):
+ CHECK_NEXT(dispatchVectorComparison(CmpI16x8,
+ Assembler::GreaterThanOrEqual));
+ case uint32_t(SimdOp::I16x8GeU):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpUI16x8, Assembler::AboveOrEqual));
+ case uint32_t(SimdOp::I32x4Eq):
+ CHECK_NEXT(dispatchVectorComparison(CmpI32x4, Assembler::Equal));
+ case uint32_t(SimdOp::I32x4Ne):
+ CHECK_NEXT(dispatchVectorComparison(CmpI32x4, Assembler::NotEqual));
+ case uint32_t(SimdOp::I32x4LtS):
+ CHECK_NEXT(dispatchVectorComparison(CmpI32x4, Assembler::LessThan));
+ case uint32_t(SimdOp::I32x4LtU):
+ CHECK_NEXT(dispatchVectorComparison(CmpUI32x4, Assembler::Below));
+ case uint32_t(SimdOp::I32x4GtS):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpI32x4, Assembler::GreaterThan));
+ case uint32_t(SimdOp::I32x4GtU):
+ CHECK_NEXT(dispatchVectorComparison(CmpUI32x4, Assembler::Above));
+ case uint32_t(SimdOp::I32x4LeS):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpI32x4, Assembler::LessThanOrEqual));
+ case uint32_t(SimdOp::I32x4LeU):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpUI32x4, Assembler::BelowOrEqual));
+ case uint32_t(SimdOp::I32x4GeS):
+ CHECK_NEXT(dispatchVectorComparison(CmpI32x4,
+ Assembler::GreaterThanOrEqual));
+ case uint32_t(SimdOp::I32x4GeU):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpUI32x4, Assembler::AboveOrEqual));
+ case uint32_t(SimdOp::I64x2Eq):
+ CHECK_NEXT(dispatchVectorComparison(CmpI64x2ForEquality,
+ Assembler::Equal));
+ case uint32_t(SimdOp::I64x2Ne):
+ CHECK_NEXT(dispatchVectorComparison(CmpI64x2ForEquality,
+ Assembler::NotEqual));
+ case uint32_t(SimdOp::I64x2LtS):
+ CHECK_NEXT(dispatchVectorComparison(CmpI64x2ForOrdering,
+ Assembler::LessThan));
+ case uint32_t(SimdOp::I64x2GtS):
+ CHECK_NEXT(dispatchVectorComparison(CmpI64x2ForOrdering,
+ Assembler::GreaterThan));
+ case uint32_t(SimdOp::I64x2LeS):
+ CHECK_NEXT(dispatchVectorComparison(CmpI64x2ForOrdering,
+ Assembler::LessThanOrEqual));
+ case uint32_t(SimdOp::I64x2GeS):
+ CHECK_NEXT(dispatchVectorComparison(CmpI64x2ForOrdering,
+ Assembler::GreaterThanOrEqual));
+ case uint32_t(SimdOp::F32x4Eq):
+ CHECK_NEXT(dispatchVectorComparison(CmpF32x4, Assembler::Equal));
+ case uint32_t(SimdOp::F32x4Ne):
+ CHECK_NEXT(dispatchVectorComparison(CmpF32x4, Assembler::NotEqual));
+ case uint32_t(SimdOp::F32x4Lt):
+ CHECK_NEXT(dispatchVectorComparison(CmpF32x4, Assembler::LessThan));
+ case uint32_t(SimdOp::F32x4Gt):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpF32x4, Assembler::GreaterThan));
+ case uint32_t(SimdOp::F32x4Le):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpF32x4, Assembler::LessThanOrEqual));
+ case uint32_t(SimdOp::F32x4Ge):
+ CHECK_NEXT(dispatchVectorComparison(CmpF32x4,
+ Assembler::GreaterThanOrEqual));
+ case uint32_t(SimdOp::F64x2Eq):
+ CHECK_NEXT(dispatchVectorComparison(CmpF64x2, Assembler::Equal));
+ case uint32_t(SimdOp::F64x2Ne):
+ CHECK_NEXT(dispatchVectorComparison(CmpF64x2, Assembler::NotEqual));
+ case uint32_t(SimdOp::F64x2Lt):
+ CHECK_NEXT(dispatchVectorComparison(CmpF64x2, Assembler::LessThan));
+ case uint32_t(SimdOp::F64x2Gt):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpF64x2, Assembler::GreaterThan));
+ case uint32_t(SimdOp::F64x2Le):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpF64x2, Assembler::LessThanOrEqual));
+ case uint32_t(SimdOp::F64x2Ge):
+ CHECK_NEXT(dispatchVectorComparison(CmpF64x2,
+ Assembler::GreaterThanOrEqual));
+ case uint32_t(SimdOp::V128And):
+ CHECK_NEXT(dispatchVectorBinary(AndV128));
+ case uint32_t(SimdOp::V128Or):
+ CHECK_NEXT(dispatchVectorBinary(OrV128));
+ case uint32_t(SimdOp::V128Xor):
+ CHECK_NEXT(dispatchVectorBinary(XorV128));
+ case uint32_t(SimdOp::V128AndNot):
+ CHECK_NEXT(dispatchBinary0(emitVectorAndNot, ValType::V128));
+ case uint32_t(SimdOp::I8x16AvgrU):
+ CHECK_NEXT(dispatchVectorBinary(AverageUI8x16));
+ case uint32_t(SimdOp::I16x8AvgrU):
+ CHECK_NEXT(dispatchVectorBinary(AverageUI16x8));
+ case uint32_t(SimdOp::I8x16Add):
+ CHECK_NEXT(dispatchVectorBinary(AddI8x16));
+ case uint32_t(SimdOp::I8x16AddSatS):
+ CHECK_NEXT(dispatchVectorBinary(AddSatI8x16));
+ case uint32_t(SimdOp::I8x16AddSatU):
+ CHECK_NEXT(dispatchVectorBinary(AddSatUI8x16));
+ case uint32_t(SimdOp::I8x16Sub):
+ CHECK_NEXT(dispatchVectorBinary(SubI8x16));
+ case uint32_t(SimdOp::I8x16SubSatS):
+ CHECK_NEXT(dispatchVectorBinary(SubSatI8x16));
+ case uint32_t(SimdOp::I8x16SubSatU):
+ CHECK_NEXT(dispatchVectorBinary(SubSatUI8x16));
+ case uint32_t(SimdOp::I8x16MinS):
+ CHECK_NEXT(dispatchVectorBinary(MinI8x16));
+ case uint32_t(SimdOp::I8x16MinU):
+ CHECK_NEXT(dispatchVectorBinary(MinUI8x16));
+ case uint32_t(SimdOp::I8x16MaxS):
+ CHECK_NEXT(dispatchVectorBinary(MaxI8x16));
+ case uint32_t(SimdOp::I8x16MaxU):
+ CHECK_NEXT(dispatchVectorBinary(MaxUI8x16));
+ case uint32_t(SimdOp::I16x8Add):
+ CHECK_NEXT(dispatchVectorBinary(AddI16x8));
+ case uint32_t(SimdOp::I16x8AddSatS):
+ CHECK_NEXT(dispatchVectorBinary(AddSatI16x8));
+ case uint32_t(SimdOp::I16x8AddSatU):
+ CHECK_NEXT(dispatchVectorBinary(AddSatUI16x8));
+ case uint32_t(SimdOp::I16x8Sub):
+ CHECK_NEXT(dispatchVectorBinary(SubI16x8));
+ case uint32_t(SimdOp::I16x8SubSatS):
+ CHECK_NEXT(dispatchVectorBinary(SubSatI16x8));
+ case uint32_t(SimdOp::I16x8SubSatU):
+ CHECK_NEXT(dispatchVectorBinary(SubSatUI16x8));
+ case uint32_t(SimdOp::I16x8Mul):
+ CHECK_NEXT(dispatchVectorBinary(MulI16x8));
+ case uint32_t(SimdOp::I16x8MinS):
+ CHECK_NEXT(dispatchVectorBinary(MinI16x8));
+ case uint32_t(SimdOp::I16x8MinU):
+ CHECK_NEXT(dispatchVectorBinary(MinUI16x8));
+ case uint32_t(SimdOp::I16x8MaxS):
+ CHECK_NEXT(dispatchVectorBinary(MaxI16x8));
+ case uint32_t(SimdOp::I16x8MaxU):
+ CHECK_NEXT(dispatchVectorBinary(MaxUI16x8));
+ case uint32_t(SimdOp::I32x4Add):
+ CHECK_NEXT(dispatchVectorBinary(AddI32x4));
+ case uint32_t(SimdOp::I32x4Sub):
+ CHECK_NEXT(dispatchVectorBinary(SubI32x4));
+ case uint32_t(SimdOp::I32x4Mul):
+ CHECK_NEXT(dispatchVectorBinary(MulI32x4));
+ case uint32_t(SimdOp::I32x4MinS):
+ CHECK_NEXT(dispatchVectorBinary(MinI32x4));
+ case uint32_t(SimdOp::I32x4MinU):
+ CHECK_NEXT(dispatchVectorBinary(MinUI32x4));
+ case uint32_t(SimdOp::I32x4MaxS):
+ CHECK_NEXT(dispatchVectorBinary(MaxI32x4));
+ case uint32_t(SimdOp::I32x4MaxU):
+ CHECK_NEXT(dispatchVectorBinary(MaxUI32x4));
+ case uint32_t(SimdOp::I64x2Add):
+ CHECK_NEXT(dispatchVectorBinary(AddI64x2));
+ case uint32_t(SimdOp::I64x2Sub):
+ CHECK_NEXT(dispatchVectorBinary(SubI64x2));
+ case uint32_t(SimdOp::I64x2Mul):
+ CHECK_NEXT(dispatchVectorBinary(MulI64x2));
+ case uint32_t(SimdOp::F32x4Add):
+ CHECK_NEXT(dispatchVectorBinary(AddF32x4));
+ case uint32_t(SimdOp::F32x4Sub):
+ CHECK_NEXT(dispatchVectorBinary(SubF32x4));
+ case uint32_t(SimdOp::F32x4Mul):
+ CHECK_NEXT(dispatchVectorBinary(MulF32x4));
+ case uint32_t(SimdOp::F32x4Div):
+ CHECK_NEXT(dispatchVectorBinary(DivF32x4));
+ case uint32_t(SimdOp::F32x4Min):
+ CHECK_NEXT(dispatchVectorBinary(MinF32x4));
+ case uint32_t(SimdOp::F32x4Max):
+ CHECK_NEXT(dispatchVectorBinary(MaxF32x4));
+ case uint32_t(SimdOp::F64x2Add):
+ CHECK_NEXT(dispatchVectorBinary(AddF64x2));
+ case uint32_t(SimdOp::F64x2Sub):
+ CHECK_NEXT(dispatchVectorBinary(SubF64x2));
+ case uint32_t(SimdOp::F64x2Mul):
+ CHECK_NEXT(dispatchVectorBinary(MulF64x2));
+ case uint32_t(SimdOp::F64x2Div):
+ CHECK_NEXT(dispatchVectorBinary(DivF64x2));
+ case uint32_t(SimdOp::F64x2Min):
+ CHECK_NEXT(dispatchVectorBinary(MinF64x2));
+ case uint32_t(SimdOp::F64x2Max):
+ CHECK_NEXT(dispatchVectorBinary(MaxF64x2));
+ case uint32_t(SimdOp::I8x16NarrowI16x8S):
+ CHECK_NEXT(dispatchVectorBinary(NarrowI16x8));
+ case uint32_t(SimdOp::I8x16NarrowI16x8U):
+ CHECK_NEXT(dispatchVectorBinary(NarrowUI16x8));
+ case uint32_t(SimdOp::I16x8NarrowI32x4S):
+ CHECK_NEXT(dispatchVectorBinary(NarrowI32x4));
+ case uint32_t(SimdOp::I16x8NarrowI32x4U):
+ CHECK_NEXT(dispatchVectorBinary(NarrowUI32x4));
+ case uint32_t(SimdOp::I8x16Swizzle):
+ CHECK_NEXT(dispatchVectorBinary(Swizzle));
+ case uint32_t(SimdOp::F32x4PMax):
+ CHECK_NEXT(dispatchVectorBinary(PMaxF32x4));
+ case uint32_t(SimdOp::F32x4PMin):
+ CHECK_NEXT(dispatchVectorBinary(PMinF32x4));
+ case uint32_t(SimdOp::F64x2PMax):
+ CHECK_NEXT(dispatchVectorBinary(PMaxF64x2));
+ case uint32_t(SimdOp::F64x2PMin):
+ CHECK_NEXT(dispatchVectorBinary(PMinF64x2));
+ case uint32_t(SimdOp::I32x4DotI16x8S):
+ CHECK_NEXT(dispatchVectorBinary(DotI16x8));
+ case uint32_t(SimdOp::I16x8ExtmulLowI8x16S):
+ CHECK_NEXT(dispatchVectorBinary(ExtMulLowI8x16));
+ case uint32_t(SimdOp::I16x8ExtmulHighI8x16S):
+ CHECK_NEXT(dispatchVectorBinary(ExtMulHighI8x16));
+ case uint32_t(SimdOp::I16x8ExtmulLowI8x16U):
+ CHECK_NEXT(dispatchVectorBinary(ExtMulLowUI8x16));
+ case uint32_t(SimdOp::I16x8ExtmulHighI8x16U):
+ CHECK_NEXT(dispatchVectorBinary(ExtMulHighUI8x16));
+ case uint32_t(SimdOp::I32x4ExtmulLowI16x8S):
+ CHECK_NEXT(dispatchVectorBinary(ExtMulLowI16x8));
+ case uint32_t(SimdOp::I32x4ExtmulHighI16x8S):
+ CHECK_NEXT(dispatchVectorBinary(ExtMulHighI16x8));
+ case uint32_t(SimdOp::I32x4ExtmulLowI16x8U):
+ CHECK_NEXT(dispatchVectorBinary(ExtMulLowUI16x8));
+ case uint32_t(SimdOp::I32x4ExtmulHighI16x8U):
+ CHECK_NEXT(dispatchVectorBinary(ExtMulHighUI16x8));
+ case uint32_t(SimdOp::I64x2ExtmulLowI32x4S):
+ CHECK_NEXT(dispatchVectorBinary(ExtMulLowI32x4));
+ case uint32_t(SimdOp::I64x2ExtmulHighI32x4S):
+ CHECK_NEXT(dispatchVectorBinary(ExtMulHighI32x4));
+ case uint32_t(SimdOp::I64x2ExtmulLowI32x4U):
+ CHECK_NEXT(dispatchVectorBinary(ExtMulLowUI32x4));
+ case uint32_t(SimdOp::I64x2ExtmulHighI32x4U):
+ CHECK_NEXT(dispatchVectorBinary(ExtMulHighUI32x4));
+ case uint32_t(SimdOp::I16x8Q15MulrSatS):
+ CHECK_NEXT(dispatchVectorBinary(Q15MulrSatS));
+ case uint32_t(SimdOp::I8x16Neg):
+ CHECK_NEXT(dispatchVectorUnary(NegI8x16));
+ case uint32_t(SimdOp::I16x8Neg):
+ CHECK_NEXT(dispatchVectorUnary(NegI16x8));
+ case uint32_t(SimdOp::I16x8ExtendLowI8x16S):
+ CHECK_NEXT(dispatchVectorUnary(WidenLowI8x16));
+ case uint32_t(SimdOp::I16x8ExtendHighI8x16S):
+ CHECK_NEXT(dispatchVectorUnary(WidenHighI8x16));
+ case uint32_t(SimdOp::I16x8ExtendLowI8x16U):
+ CHECK_NEXT(dispatchVectorUnary(WidenLowUI8x16));
+ case uint32_t(SimdOp::I16x8ExtendHighI8x16U):
+ CHECK_NEXT(dispatchVectorUnary(WidenHighUI8x16));
+ case uint32_t(SimdOp::I32x4Neg):
+ CHECK_NEXT(dispatchVectorUnary(NegI32x4));
+ case uint32_t(SimdOp::I32x4ExtendLowI16x8S):
+ CHECK_NEXT(dispatchVectorUnary(WidenLowI16x8));
+ case uint32_t(SimdOp::I32x4ExtendHighI16x8S):
+ CHECK_NEXT(dispatchVectorUnary(WidenHighI16x8));
+ case uint32_t(SimdOp::I32x4ExtendLowI16x8U):
+ CHECK_NEXT(dispatchVectorUnary(WidenLowUI16x8));
+ case uint32_t(SimdOp::I32x4ExtendHighI16x8U):
+ CHECK_NEXT(dispatchVectorUnary(WidenHighUI16x8));
+ case uint32_t(SimdOp::I32x4TruncSatF32x4S):
+ CHECK_NEXT(dispatchVectorUnary(ConvertF32x4ToI32x4));
+ case uint32_t(SimdOp::I32x4TruncSatF32x4U):
+ CHECK_NEXT(dispatchVectorUnary(ConvertF32x4ToUI32x4));
+ case uint32_t(SimdOp::I64x2Neg):
+ CHECK_NEXT(dispatchVectorUnary(NegI64x2));
+ case uint32_t(SimdOp::I64x2ExtendLowI32x4S):
+ CHECK_NEXT(dispatchVectorUnary(WidenLowI32x4));
+ case uint32_t(SimdOp::I64x2ExtendHighI32x4S):
+ CHECK_NEXT(dispatchVectorUnary(WidenHighI32x4));
+ case uint32_t(SimdOp::I64x2ExtendLowI32x4U):
+ CHECK_NEXT(dispatchVectorUnary(WidenLowUI32x4));
+ case uint32_t(SimdOp::I64x2ExtendHighI32x4U):
+ CHECK_NEXT(dispatchVectorUnary(WidenHighUI32x4));
+ case uint32_t(SimdOp::F32x4Abs):
+ CHECK_NEXT(dispatchVectorUnary(AbsF32x4));
+ case uint32_t(SimdOp::F32x4Neg):
+ CHECK_NEXT(dispatchVectorUnary(NegF32x4));
+ case uint32_t(SimdOp::F32x4Sqrt):
+ CHECK_NEXT(dispatchVectorUnary(SqrtF32x4));
+ case uint32_t(SimdOp::F32x4ConvertI32x4S):
+ CHECK_NEXT(dispatchVectorUnary(ConvertI32x4ToF32x4));
+ case uint32_t(SimdOp::F32x4ConvertI32x4U):
+ CHECK_NEXT(dispatchVectorUnary(ConvertUI32x4ToF32x4));
+ case uint32_t(SimdOp::F32x4DemoteF64x2Zero):
+ CHECK_NEXT(dispatchVectorUnary(DemoteF64x2ToF32x4));
+ case uint32_t(SimdOp::F64x2PromoteLowF32x4):
+ CHECK_NEXT(dispatchVectorUnary(PromoteF32x4ToF64x2));
+ case uint32_t(SimdOp::F64x2ConvertLowI32x4S):
+ CHECK_NEXT(dispatchVectorUnary(ConvertI32x4ToF64x2));
+ case uint32_t(SimdOp::F64x2ConvertLowI32x4U):
+ CHECK_NEXT(dispatchVectorUnary(ConvertUI32x4ToF64x2));
+ case uint32_t(SimdOp::I32x4TruncSatF64x2SZero):
+ CHECK_NEXT(dispatchVectorUnary(ConvertF64x2ToI32x4));
+ case uint32_t(SimdOp::I32x4TruncSatF64x2UZero):
+ CHECK_NEXT(dispatchVectorUnary(ConvertF64x2ToUI32x4));
+ case uint32_t(SimdOp::F64x2Abs):
+ CHECK_NEXT(dispatchVectorUnary(AbsF64x2));
+ case uint32_t(SimdOp::F64x2Neg):
+ CHECK_NEXT(dispatchVectorUnary(NegF64x2));
+ case uint32_t(SimdOp::F64x2Sqrt):
+ CHECK_NEXT(dispatchVectorUnary(SqrtF64x2));
+ case uint32_t(SimdOp::V128Not):
+ CHECK_NEXT(dispatchVectorUnary(NotV128));
+ case uint32_t(SimdOp::I8x16Popcnt):
+ CHECK_NEXT(dispatchVectorUnary(PopcntI8x16));
+ case uint32_t(SimdOp::I8x16Abs):
+ CHECK_NEXT(dispatchVectorUnary(AbsI8x16));
+ case uint32_t(SimdOp::I16x8Abs):
+ CHECK_NEXT(dispatchVectorUnary(AbsI16x8));
+ case uint32_t(SimdOp::I32x4Abs):
+ CHECK_NEXT(dispatchVectorUnary(AbsI32x4));
+ case uint32_t(SimdOp::I64x2Abs):
+ CHECK_NEXT(dispatchVectorUnary(AbsI64x2));
+ case uint32_t(SimdOp::F32x4Ceil):
+ CHECK_NEXT(dispatchVectorUnary(CeilF32x4));
+ case uint32_t(SimdOp::F32x4Floor):
+ CHECK_NEXT(dispatchVectorUnary(FloorF32x4));
+ case uint32_t(SimdOp::F32x4Trunc):
+ CHECK_NEXT(dispatchVectorUnary(TruncF32x4));
+ case uint32_t(SimdOp::F32x4Nearest):
+ CHECK_NEXT(dispatchVectorUnary(NearestF32x4));
+ case uint32_t(SimdOp::F64x2Ceil):
+ CHECK_NEXT(dispatchVectorUnary(CeilF64x2));
+ case uint32_t(SimdOp::F64x2Floor):
+ CHECK_NEXT(dispatchVectorUnary(FloorF64x2));
+ case uint32_t(SimdOp::F64x2Trunc):
+ CHECK_NEXT(dispatchVectorUnary(TruncF64x2));
+ case uint32_t(SimdOp::F64x2Nearest):
+ CHECK_NEXT(dispatchVectorUnary(NearestF64x2));
+ case uint32_t(SimdOp::I16x8ExtaddPairwiseI8x16S):
+ CHECK_NEXT(dispatchVectorUnary(ExtAddPairwiseI8x16));
+ case uint32_t(SimdOp::I16x8ExtaddPairwiseI8x16U):
+ CHECK_NEXT(dispatchVectorUnary(ExtAddPairwiseUI8x16));
+ case uint32_t(SimdOp::I32x4ExtaddPairwiseI16x8S):
+ CHECK_NEXT(dispatchVectorUnary(ExtAddPairwiseI16x8));
+ case uint32_t(SimdOp::I32x4ExtaddPairwiseI16x8U):
+ CHECK_NEXT(dispatchVectorUnary(ExtAddPairwiseUI16x8));
+ case uint32_t(SimdOp::I8x16Shl):
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftLeftI8x16));
+ case uint32_t(SimdOp::I8x16ShrS):
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftRightI8x16));
+ case uint32_t(SimdOp::I8x16ShrU):
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftRightUI8x16));
+ case uint32_t(SimdOp::I16x8Shl):
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftLeftI16x8));
+ case uint32_t(SimdOp::I16x8ShrS):
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftRightI16x8));
+ case uint32_t(SimdOp::I16x8ShrU):
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftRightUI16x8));
+ case uint32_t(SimdOp::I32x4Shl):
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftLeftI32x4));
+ case uint32_t(SimdOp::I32x4ShrS):
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftRightI32x4));
+ case uint32_t(SimdOp::I32x4ShrU):
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftRightUI32x4));
+ case uint32_t(SimdOp::I64x2Shl):
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftLeftI64x2));
+ case uint32_t(SimdOp::I64x2ShrS):
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ CHECK_NEXT(emitVectorShiftRightI64x2());
+# else
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftRightI64x2));
+# endif
+ case uint32_t(SimdOp::I64x2ShrU):
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftRightUI64x2));
+ case uint32_t(SimdOp::V128Bitselect):
+ CHECK_NEXT(dispatchTernary1(BitselectV128, ValType::V128));
+ case uint32_t(SimdOp::I8x16Shuffle):
+ CHECK_NEXT(emitVectorShuffle());
+ case uint32_t(SimdOp::V128Const): {
+ V128 v128;
+ CHECK(iter_.readV128Const(&v128));
+ if (!deadCode_) {
+ pushV128(v128);
+ }
+ NEXT();
+ }
+ case uint32_t(SimdOp::V128Load):
+ CHECK_NEXT(emitLoad(ValType::V128, Scalar::Simd128));
+ case uint32_t(SimdOp::V128Load8Splat):
+ CHECK_NEXT(emitLoadSplat(Scalar::Uint8));
+ case uint32_t(SimdOp::V128Load16Splat):
+ CHECK_NEXT(emitLoadSplat(Scalar::Uint16));
+ case uint32_t(SimdOp::V128Load32Splat):
+ CHECK_NEXT(emitLoadSplat(Scalar::Uint32));
+ case uint32_t(SimdOp::V128Load64Splat):
+ CHECK_NEXT(emitLoadSplat(Scalar::Int64));
+ case uint32_t(SimdOp::V128Load8x8S):
+ CHECK_NEXT(emitLoadExtend(Scalar::Int8));
+ case uint32_t(SimdOp::V128Load8x8U):
+ CHECK_NEXT(emitLoadExtend(Scalar::Uint8));
+ case uint32_t(SimdOp::V128Load16x4S):
+ CHECK_NEXT(emitLoadExtend(Scalar::Int16));
+ case uint32_t(SimdOp::V128Load16x4U):
+ CHECK_NEXT(emitLoadExtend(Scalar::Uint16));
+ case uint32_t(SimdOp::V128Load32x2S):
+ CHECK_NEXT(emitLoadExtend(Scalar::Int32));
+ case uint32_t(SimdOp::V128Load32x2U):
+ CHECK_NEXT(emitLoadExtend(Scalar::Uint32));
+ case uint32_t(SimdOp::V128Load32Zero):
+ CHECK_NEXT(emitLoadZero(Scalar::Float32));
+ case uint32_t(SimdOp::V128Load64Zero):
+ CHECK_NEXT(emitLoadZero(Scalar::Float64));
+ case uint32_t(SimdOp::V128Store):
+ CHECK_NEXT(emitStore(ValType::V128, Scalar::Simd128));
+ case uint32_t(SimdOp::V128Load8Lane):
+ CHECK_NEXT(emitLoadLane(1));
+ case uint32_t(SimdOp::V128Load16Lane):
+ CHECK_NEXT(emitLoadLane(2));
+ case uint32_t(SimdOp::V128Load32Lane):
+ CHECK_NEXT(emitLoadLane(4));
+ case uint32_t(SimdOp::V128Load64Lane):
+ CHECK_NEXT(emitLoadLane(8));
+ case uint32_t(SimdOp::V128Store8Lane):
+ CHECK_NEXT(emitStoreLane(1));
+ case uint32_t(SimdOp::V128Store16Lane):
+ CHECK_NEXT(emitStoreLane(2));
+ case uint32_t(SimdOp::V128Store32Lane):
+ CHECK_NEXT(emitStoreLane(4));
+ case uint32_t(SimdOp::V128Store64Lane):
+ CHECK_NEXT(emitStoreLane(8));
+# ifdef ENABLE_WASM_RELAXED_SIMD
+ case uint32_t(SimdOp::F32x4RelaxedFma):
+ if (!moduleEnv_.v128RelaxedEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(dispatchTernary2(RelaxedFmaF32x4, ValType::V128));
+ case uint32_t(SimdOp::F32x4RelaxedFnma):
+ if (!moduleEnv_.v128RelaxedEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(dispatchTernary2(RelaxedFnmaF32x4, ValType::V128));
+ case uint32_t(SimdOp::F64x2RelaxedFma):
+ if (!moduleEnv_.v128RelaxedEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(dispatchTernary2(RelaxedFmaF64x2, ValType::V128));
+ case uint32_t(SimdOp::F64x2RelaxedFnma):
+ if (!moduleEnv_.v128RelaxedEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(dispatchTernary2(RelaxedFnmaF64x2, ValType::V128));
+ break;
+ case uint32_t(SimdOp::I8x16RelaxedLaneSelect):
+ case uint32_t(SimdOp::I16x8RelaxedLaneSelect):
+ case uint32_t(SimdOp::I32x4RelaxedLaneSelect):
+ case uint32_t(SimdOp::I64x2RelaxedLaneSelect):
+ if (!moduleEnv_.v128RelaxedEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(emitVectorLaneSelect());
+ case uint32_t(SimdOp::F32x4RelaxedMin):
+ if (!moduleEnv_.v128RelaxedEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(dispatchVectorBinary(RelaxedMinF32x4));
+ case uint32_t(SimdOp::F32x4RelaxedMax):
+ if (!moduleEnv_.v128RelaxedEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(dispatchVectorBinary(RelaxedMaxF32x4));
+ case uint32_t(SimdOp::F64x2RelaxedMin):
+ if (!moduleEnv_.v128RelaxedEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(dispatchVectorBinary(RelaxedMinF64x2));
+ case uint32_t(SimdOp::F64x2RelaxedMax):
+ if (!moduleEnv_.v128RelaxedEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(dispatchVectorBinary(RelaxedMaxF64x2));
+ case uint32_t(SimdOp::I32x4RelaxedTruncF32x4S):
+ if (!moduleEnv_.v128RelaxedEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(dispatchVectorUnary(RelaxedConvertF32x4ToI32x4));
+ case uint32_t(SimdOp::I32x4RelaxedTruncF32x4U):
+ if (!moduleEnv_.v128RelaxedEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(dispatchVectorUnary(RelaxedConvertF32x4ToUI32x4));
+ case uint32_t(SimdOp::I32x4RelaxedTruncF64x2SZero):
+ if (!moduleEnv_.v128RelaxedEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(dispatchVectorUnary(RelaxedConvertF64x2ToI32x4));
+ case uint32_t(SimdOp::I32x4RelaxedTruncF64x2UZero):
+ if (!moduleEnv_.v128RelaxedEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(dispatchVectorUnary(RelaxedConvertF64x2ToUI32x4));
+ case uint32_t(SimdOp::I8x16RelaxedSwizzle):
+ if (!moduleEnv_.v128RelaxedEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(dispatchVectorBinary(RelaxedSwizzle));
+ case uint32_t(SimdOp::I16x8RelaxedQ15MulrS):
+ if (!moduleEnv_.v128RelaxedEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(dispatchVectorBinary(RelaxedQ15MulrS));
+ case uint32_t(SimdOp::I16x8DotI8x16I7x16S):
+ if (!moduleEnv_.v128RelaxedEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(dispatchVectorBinary(DotI8x16I7x16S));
+ case uint32_t(SimdOp::I32x4DotI8x16I7x16AddS):
+ if (!moduleEnv_.v128RelaxedEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(dispatchTernary0(emitDotI8x16I7x16AddS, ValType::V128));
+# endif
+ default:
+ break;
+ } // switch (op.b1)
+ return iter_.unrecognizedOpcode(&op);
+ }
+#endif // ENABLE_WASM_SIMD
+
+ // "Miscellaneous" operations
+ case uint16_t(Op::MiscPrefix): {
+ switch (op.b1) {
+ case uint32_t(MiscOp::I32TruncSatF32S):
+ CHECK_NEXT(
+ dispatchConversionOOM(emitTruncateF32ToI32<TRUNC_SATURATING>,
+ ValType::F32, ValType::I32));
+ case uint32_t(MiscOp::I32TruncSatF32U):
+ CHECK_NEXT(dispatchConversionOOM(
+ emitTruncateF32ToI32<TRUNC_UNSIGNED | TRUNC_SATURATING>,
+ ValType::F32, ValType::I32));
+ case uint32_t(MiscOp::I32TruncSatF64S):
+ CHECK_NEXT(
+ dispatchConversionOOM(emitTruncateF64ToI32<TRUNC_SATURATING>,
+ ValType::F64, ValType::I32));
+ case uint32_t(MiscOp::I32TruncSatF64U):
+ CHECK_NEXT(dispatchConversionOOM(
+ emitTruncateF64ToI32<TRUNC_UNSIGNED | TRUNC_SATURATING>,
+ ValType::F64, ValType::I32));
+ case uint32_t(MiscOp::I64TruncSatF32S):
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+ CHECK_NEXT(dispatchCalloutConversionOOM(
+ emitConvertFloatingToInt64Callout,
+ SymbolicAddress::SaturatingTruncateDoubleToInt64, ValType::F32,
+ ValType::I64));
+#else
+ CHECK_NEXT(
+ dispatchConversionOOM(emitTruncateF32ToI64<TRUNC_SATURATING>,
+ ValType::F32, ValType::I64));
+#endif
+ case uint32_t(MiscOp::I64TruncSatF32U):
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+ CHECK_NEXT(dispatchCalloutConversionOOM(
+ emitConvertFloatingToInt64Callout,
+ SymbolicAddress::SaturatingTruncateDoubleToUint64, ValType::F32,
+ ValType::I64));
+#else
+ CHECK_NEXT(dispatchConversionOOM(
+ emitTruncateF32ToI64<TRUNC_UNSIGNED | TRUNC_SATURATING>,
+ ValType::F32, ValType::I64));
+#endif
+ case uint32_t(MiscOp::I64TruncSatF64S):
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+ CHECK_NEXT(dispatchCalloutConversionOOM(
+ emitConvertFloatingToInt64Callout,
+ SymbolicAddress::SaturatingTruncateDoubleToInt64, ValType::F64,
+ ValType::I64));
+#else
+ CHECK_NEXT(
+ dispatchConversionOOM(emitTruncateF64ToI64<TRUNC_SATURATING>,
+ ValType::F64, ValType::I64));
+#endif
+ case uint32_t(MiscOp::I64TruncSatF64U):
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+ CHECK_NEXT(dispatchCalloutConversionOOM(
+ emitConvertFloatingToInt64Callout,
+ SymbolicAddress::SaturatingTruncateDoubleToUint64, ValType::F64,
+ ValType::I64));
+#else
+ CHECK_NEXT(dispatchConversionOOM(
+ emitTruncateF64ToI64<TRUNC_UNSIGNED | TRUNC_SATURATING>,
+ ValType::F64, ValType::I64));
+#endif
+ case uint32_t(MiscOp::MemoryCopy):
+ CHECK_NEXT(emitMemCopy());
+ case uint32_t(MiscOp::DataDrop):
+ CHECK_NEXT(emitDataOrElemDrop(/*isData=*/true));
+ case uint32_t(MiscOp::MemoryFill):
+ CHECK_NEXT(emitMemFill());
+#ifdef ENABLE_WASM_MEMORY_CONTROL
+ case uint32_t(MiscOp::MemoryDiscard): {
+ if (!moduleEnv_.memoryControlEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(emitMemDiscard());
+ }
+#endif
+ case uint32_t(MiscOp::MemoryInit):
+ CHECK_NEXT(emitMemInit());
+ case uint32_t(MiscOp::TableCopy):
+ CHECK_NEXT(emitTableCopy());
+ case uint32_t(MiscOp::ElemDrop):
+ CHECK_NEXT(emitDataOrElemDrop(/*isData=*/false));
+ case uint32_t(MiscOp::TableInit):
+ CHECK_NEXT(emitTableInit());
+ case uint32_t(MiscOp::TableFill):
+ CHECK_NEXT(emitTableFill());
+ case uint32_t(MiscOp::TableGrow):
+ CHECK_NEXT(emitTableGrow());
+ case uint32_t(MiscOp::TableSize):
+ CHECK_NEXT(emitTableSize());
+ default:
+ break;
+ } // switch (op.b1)
+ return iter_.unrecognizedOpcode(&op);
+ }
+
+ // Thread operations
+ case uint16_t(Op::ThreadPrefix): {
+ // Though thread ops can be used on nonshared memories, we make them
+ // unavailable if shared memory has been disabled in the prefs, for
+ // maximum predictability and safety and consistency with JS.
+ if (moduleEnv_.sharedMemoryEnabled() == Shareable::False) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ switch (op.b1) {
+ case uint32_t(ThreadOp::Wake):
+ CHECK_NEXT(emitWake());
+
+ case uint32_t(ThreadOp::I32Wait):
+ CHECK_NEXT(emitWait(ValType::I32, 4));
+ case uint32_t(ThreadOp::I64Wait):
+ CHECK_NEXT(emitWait(ValType::I64, 8));
+ case uint32_t(ThreadOp::Fence):
+ CHECK_NEXT(emitFence());
+
+ case uint32_t(ThreadOp::I32AtomicLoad):
+ CHECK_NEXT(emitAtomicLoad(ValType::I32, Scalar::Int32));
+ case uint32_t(ThreadOp::I64AtomicLoad):
+ CHECK_NEXT(emitAtomicLoad(ValType::I64, Scalar::Int64));
+ case uint32_t(ThreadOp::I32AtomicLoad8U):
+ CHECK_NEXT(emitAtomicLoad(ValType::I32, Scalar::Uint8));
+ case uint32_t(ThreadOp::I32AtomicLoad16U):
+ CHECK_NEXT(emitAtomicLoad(ValType::I32, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicLoad8U):
+ CHECK_NEXT(emitAtomicLoad(ValType::I64, Scalar::Uint8));
+ case uint32_t(ThreadOp::I64AtomicLoad16U):
+ CHECK_NEXT(emitAtomicLoad(ValType::I64, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicLoad32U):
+ CHECK_NEXT(emitAtomicLoad(ValType::I64, Scalar::Uint32));
+
+ case uint32_t(ThreadOp::I32AtomicStore):
+ CHECK_NEXT(emitAtomicStore(ValType::I32, Scalar::Int32));
+ case uint32_t(ThreadOp::I64AtomicStore):
+ CHECK_NEXT(emitAtomicStore(ValType::I64, Scalar::Int64));
+ case uint32_t(ThreadOp::I32AtomicStore8U):
+ CHECK_NEXT(emitAtomicStore(ValType::I32, Scalar::Uint8));
+ case uint32_t(ThreadOp::I32AtomicStore16U):
+ CHECK_NEXT(emitAtomicStore(ValType::I32, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicStore8U):
+ CHECK_NEXT(emitAtomicStore(ValType::I64, Scalar::Uint8));
+ case uint32_t(ThreadOp::I64AtomicStore16U):
+ CHECK_NEXT(emitAtomicStore(ValType::I64, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicStore32U):
+ CHECK_NEXT(emitAtomicStore(ValType::I64, Scalar::Uint32));
+
+ case uint32_t(ThreadOp::I32AtomicAdd):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I64AtomicAdd):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I32AtomicAdd8U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I32AtomicAdd16U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I64AtomicAdd8U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I64AtomicAdd16U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I64AtomicAdd32U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchAddOp));
+
+ case uint32_t(ThreadOp::I32AtomicSub):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I64AtomicSub):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I32AtomicSub8U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I32AtomicSub16U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I64AtomicSub8U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I64AtomicSub16U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I64AtomicSub32U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchSubOp));
+
+ case uint32_t(ThreadOp::I32AtomicAnd):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I64AtomicAnd):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I32AtomicAnd8U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I32AtomicAnd16U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I64AtomicAnd8U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I64AtomicAnd16U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I64AtomicAnd32U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchAndOp));
+
+ case uint32_t(ThreadOp::I32AtomicOr):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I64AtomicOr):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I32AtomicOr8U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I32AtomicOr16U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I64AtomicOr8U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I64AtomicOr16U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I64AtomicOr32U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchOrOp));
+
+ case uint32_t(ThreadOp::I32AtomicXor):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I64AtomicXor):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I32AtomicXor8U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I32AtomicXor16U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I64AtomicXor8U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I64AtomicXor16U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I64AtomicXor32U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchXorOp));
+
+ case uint32_t(ThreadOp::I32AtomicXchg):
+ CHECK_NEXT(emitAtomicXchg(ValType::I32, Scalar::Int32));
+ case uint32_t(ThreadOp::I64AtomicXchg):
+ CHECK_NEXT(emitAtomicXchg(ValType::I64, Scalar::Int64));
+ case uint32_t(ThreadOp::I32AtomicXchg8U):
+ CHECK_NEXT(emitAtomicXchg(ValType::I32, Scalar::Uint8));
+ case uint32_t(ThreadOp::I32AtomicXchg16U):
+ CHECK_NEXT(emitAtomicXchg(ValType::I32, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicXchg8U):
+ CHECK_NEXT(emitAtomicXchg(ValType::I64, Scalar::Uint8));
+ case uint32_t(ThreadOp::I64AtomicXchg16U):
+ CHECK_NEXT(emitAtomicXchg(ValType::I64, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicXchg32U):
+ CHECK_NEXT(emitAtomicXchg(ValType::I64, Scalar::Uint32));
+
+ case uint32_t(ThreadOp::I32AtomicCmpXchg):
+ CHECK_NEXT(emitAtomicCmpXchg(ValType::I32, Scalar::Int32));
+ case uint32_t(ThreadOp::I64AtomicCmpXchg):
+ CHECK_NEXT(emitAtomicCmpXchg(ValType::I64, Scalar::Int64));
+ case uint32_t(ThreadOp::I32AtomicCmpXchg8U):
+ CHECK_NEXT(emitAtomicCmpXchg(ValType::I32, Scalar::Uint8));
+ case uint32_t(ThreadOp::I32AtomicCmpXchg16U):
+ CHECK_NEXT(emitAtomicCmpXchg(ValType::I32, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicCmpXchg8U):
+ CHECK_NEXT(emitAtomicCmpXchg(ValType::I64, Scalar::Uint8));
+ case uint32_t(ThreadOp::I64AtomicCmpXchg16U):
+ CHECK_NEXT(emitAtomicCmpXchg(ValType::I64, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicCmpXchg32U):
+ CHECK_NEXT(emitAtomicCmpXchg(ValType::I64, Scalar::Uint32));
+
+ default:
+ return iter_.unrecognizedOpcode(&op);
+ }
+ break;
+ }
+
+ // asm.js and other private operations
+ case uint16_t(Op::MozPrefix): {
+ if (op.b1 != uint32_t(MozOp::Intrinsic) ||
+ !moduleEnv_.intrinsicsEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ // private intrinsic operations
+ CHECK_NEXT(emitIntrinsic());
+ }
+
+ default:
+ return iter_.unrecognizedOpcode(&op);
+ }
+
+#undef CHECK
+#undef NEXT
+#undef CHECK_NEXT
+#undef CHECK_POINTER_COUNT
+#undef dispatchBinary0
+#undef dispatchBinary1
+#undef dispatchBinary2
+#undef dispatchBinary3
+#undef dispatchUnary0
+#undef dispatchUnary1
+#undef dispatchUnary2
+#undef dispatchComparison0
+#undef dispatchConversion0
+#undef dispatchConversion1
+#undef dispatchConversionOOM
+#undef dispatchCalloutConversionOOM
+#undef dispatchIntDivCallout
+#undef dispatchVectorBinary
+#undef dispatchVectorUnary
+#undef dispatchVectorComparison
+#undef dispatchExtractLane
+#undef dispatchReplaceLane
+#undef dispatchSplat
+#undef dispatchVectorReduction
+
+ MOZ_CRASH("unreachable");
+ }
+
+ MOZ_CRASH("unreachable");
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Various helpers.
+
+void BaseCompiler::assertResultRegistersAvailable(ResultType type) {
+#ifdef DEBUG
+ for (ABIResultIter iter(type); !iter.done(); iter.next()) {
+ ABIResult result = iter.cur();
+ if (!result.inRegister()) {
+ return;
+ }
+ switch (result.type().kind()) {
+ case ValType::I32:
+ MOZ_ASSERT(isAvailableI32(RegI32(result.gpr())));
+ break;
+ case ValType::I64:
+ MOZ_ASSERT(isAvailableI64(RegI64(result.gpr64())));
+ break;
+ case ValType::V128:
+# ifdef ENABLE_WASM_SIMD
+ MOZ_ASSERT(isAvailableV128(RegV128(result.fpr())));
+ break;
+# else
+ MOZ_CRASH("No SIMD support");
+# endif
+ case ValType::F32:
+ MOZ_ASSERT(isAvailableF32(RegF32(result.fpr())));
+ break;
+ case ValType::F64:
+ MOZ_ASSERT(isAvailableF64(RegF64(result.fpr())));
+ break;
+ case ValType::Ref:
+ MOZ_ASSERT(isAvailableRef(RegRef(result.gpr())));
+ break;
+ }
+ }
+#endif
+}
+
+void BaseCompiler::saveTempPtr(const RegPtr& r) {
+ MOZ_ASSERT(!ra.isAvailablePtr(r));
+ fr.pushGPR(r);
+ ra.freePtr(r);
+ MOZ_ASSERT(ra.isAvailablePtr(r));
+}
+
+void BaseCompiler::restoreTempPtr(const RegPtr& r) {
+ MOZ_ASSERT(ra.isAvailablePtr(r));
+ ra.needPtr(r);
+ fr.popGPR(r);
+ MOZ_ASSERT(!ra.isAvailablePtr(r));
+}
+
+#ifdef DEBUG
+void BaseCompiler::performRegisterLeakCheck() {
+ BaseRegAlloc::LeakCheck check(ra);
+ for (auto& item : stk_) {
+ switch (item.kind_) {
+ case Stk::RegisterI32:
+ check.addKnownI32(item.i32reg());
+ break;
+ case Stk::RegisterI64:
+ check.addKnownI64(item.i64reg());
+ break;
+ case Stk::RegisterF32:
+ check.addKnownF32(item.f32reg());
+ break;
+ case Stk::RegisterF64:
+ check.addKnownF64(item.f64reg());
+ break;
+# ifdef ENABLE_WASM_SIMD
+ case Stk::RegisterV128:
+ check.addKnownV128(item.v128reg());
+ break;
+# endif
+ case Stk::RegisterRef:
+ check.addKnownRef(item.refReg());
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void BaseCompiler::assertStackInvariants() const {
+ if (deadCode_) {
+ // Nonlocal control flow can pass values in stack locations in a way that
+ // isn't accounted for by the value stack. In dead code, which occurs
+ // after unconditional non-local control flow, there is no invariant to
+ // assert.
+ return;
+ }
+ size_t size = 0;
+ for (const Stk& v : stk_) {
+ switch (v.kind()) {
+ case Stk::MemRef:
+ size += BaseStackFrame::StackSizeOfPtr;
+ break;
+ case Stk::MemI32:
+ size += BaseStackFrame::StackSizeOfPtr;
+ break;
+ case Stk::MemI64:
+ size += BaseStackFrame::StackSizeOfInt64;
+ break;
+ case Stk::MemF64:
+ size += BaseStackFrame::StackSizeOfDouble;
+ break;
+ case Stk::MemF32:
+ size += BaseStackFrame::StackSizeOfFloat;
+ break;
+# ifdef ENABLE_WASM_SIMD
+ case Stk::MemV128:
+ size += BaseStackFrame::StackSizeOfV128;
+ break;
+# endif
+ default:
+ MOZ_ASSERT(!v.isMem());
+ break;
+ }
+ }
+ MOZ_ASSERT(size == fr.dynamicHeight());
+}
+
+void BaseCompiler::showStack(const char* who) const {
+ fprintf(stderr, "Stack at %s {{\n", who);
+ size_t n = 0;
+ for (const Stk& elem : stk_) {
+ fprintf(stderr, " [%zu] ", n++);
+ elem.showStackElem();
+ fprintf(stderr, "\n");
+ }
+ fprintf(stderr, "}}\n");
+}
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Main compilation logic.
+
+bool BaseCompiler::emitFunction() {
+ AutoCreatedBy acb(masm, "(wasm)BaseCompiler::emitFunction");
+
+ if (!beginFunction()) {
+ return false;
+ }
+
+ if (!emitBody()) {
+ return false;
+ }
+
+ if (!endFunction()) {
+ return false;
+ }
+
+ return true;
+}
+
+BaseCompiler::BaseCompiler(const ModuleEnvironment& moduleEnv,
+ const CompilerEnvironment& compilerEnv,
+ const FuncCompileInput& func,
+ const ValTypeVector& locals,
+ const RegisterOffsets& trapExitLayout,
+ size_t trapExitLayoutNumWords, Decoder& decoder,
+ StkVector& stkSource, TempAllocator* alloc,
+ MacroAssembler* masm, StackMaps* stackMaps)
+ : // Environment
+ moduleEnv_(moduleEnv),
+ compilerEnv_(compilerEnv),
+ func_(func),
+ locals_(locals),
+ previousBreakablePoint_(UINT32_MAX),
+ stkSource_(stkSource),
+ // Output-only data structures
+ alloc_(alloc->fallible()),
+ masm(*masm),
+ // Compilation state
+ decoder_(decoder),
+ iter_(moduleEnv, decoder),
+ fr(*masm),
+ stackMapGenerator_(stackMaps, trapExitLayout, trapExitLayoutNumWords,
+ *masm),
+ deadCode_(false),
+ bceSafe_(0),
+ latentOp_(LatentOp::None),
+ latentType_(ValType::I32),
+ latentIntCmp_(Assembler::Equal),
+ latentDoubleCmp_(Assembler::DoubleEqual) {
+ // Our caller, BaselineCompileFunctions, will lend us the vector contents to
+ // use for the eval stack. To get hold of those contents, we'll temporarily
+ // installing an empty one in its place.
+ MOZ_ASSERT(stk_.empty());
+ stk_.swap(stkSource_);
+
+ // Assuming that previously processed wasm functions are well formed, the
+ // eval stack should now be empty. But empty it anyway; any non-emptyness
+ // at this point will cause chaos.
+ stk_.clear();
+}
+
+BaseCompiler::~BaseCompiler() {
+ stk_.swap(stkSource_);
+ // We've returned the eval stack vector contents to our caller,
+ // BaselineCompileFunctions. We expect the vector we get in return to be
+ // empty since that's what we swapped for the stack vector in our
+ // constructor.
+ MOZ_ASSERT(stk_.empty());
+}
+
+bool BaseCompiler::init() {
+ // We may lift this restriction in the future.
+ MOZ_ASSERT_IF(usesMemory() && isMem64(), !moduleEnv_.hugeMemoryEnabled());
+ // asm.js is not supported in baseline
+ MOZ_ASSERT(!moduleEnv_.isAsmJS());
+ // Only asm.js modules have call site line numbers
+ MOZ_ASSERT(func_.callSiteLineNums.empty());
+
+ ra.init(this);
+
+ if (!SigD_.append(ValType::F64)) {
+ return false;
+ }
+ if (!SigF_.append(ValType::F32)) {
+ return false;
+ }
+
+ ArgTypeVector args(funcType());
+ return fr.setupLocals(locals_, args, compilerEnv_.debugEnabled(),
+ &localInfo_);
+}
+
+FuncOffsets BaseCompiler::finish() {
+ MOZ_ASSERT(iter_.done(), "all bytes must be consumed");
+ MOZ_ASSERT(stk_.empty());
+ MOZ_ASSERT(stackMapGenerator_.memRefsOnStk == 0);
+
+ masm.flushBuffer();
+
+ return offsets_;
+}
+
+} // namespace wasm
+} // namespace js
+
+bool js::wasm::BaselinePlatformSupport() {
+#if defined(JS_CODEGEN_ARM)
+ // Simplifying assumption: require SDIV and UDIV.
+ //
+ // I have no good data on ARM populations allowing me to say that
+ // X% of devices in the market implement SDIV and UDIV. However,
+ // they are definitely implemented on the Cortex-A7 and Cortex-A15
+ // and on all ARMv8 systems.
+ if (!HasIDIV()) {
+ return false;
+ }
+#endif
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || \
+ defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_LOONG64)
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool js::wasm::BaselineCompileFunctions(const ModuleEnvironment& moduleEnv,
+ const CompilerEnvironment& compilerEnv,
+ LifoAlloc& lifo,
+ const FuncCompileInputVector& inputs,
+ CompiledCode* code,
+ UniqueChars* error) {
+ MOZ_ASSERT(compilerEnv.tier() == Tier::Baseline);
+ MOZ_ASSERT(moduleEnv.kind == ModuleKind::Wasm);
+
+ // The MacroAssembler will sometimes access the jitContext.
+
+ TempAllocator alloc(&lifo);
+ JitContext jitContext;
+ MOZ_ASSERT(IsCompilingWasm());
+ WasmMacroAssembler masm(alloc, moduleEnv);
+
+ // Swap in already-allocated empty vectors to avoid malloc/free.
+ MOZ_ASSERT(code->empty());
+ if (!code->swap(masm)) {
+ return false;
+ }
+
+ // Create a description of the stack layout created by GenerateTrapExit().
+ RegisterOffsets trapExitLayout;
+ size_t trapExitLayoutNumWords;
+ GenerateTrapExitRegisterOffsets(&trapExitLayout, &trapExitLayoutNumWords);
+
+ // The compiler's operand stack. We reuse it across all functions so as to
+ // avoid malloc/free. Presize it to 128 elements in the hope of avoiding
+ // reallocation later.
+ StkVector stk;
+ if (!stk.reserve(128)) {
+ return false;
+ }
+
+ for (const FuncCompileInput& func : inputs) {
+ Decoder d(func.begin, func.end, func.lineOrBytecode, error);
+
+ // Build the local types vector.
+
+ ValTypeVector locals;
+ if (!locals.appendAll(moduleEnv.funcs[func.index].type->args())) {
+ return false;
+ }
+ if (!DecodeLocalEntries(d, *moduleEnv.types, moduleEnv.features, &locals)) {
+ return false;
+ }
+
+ // One-pass baseline compilation.
+
+ BaseCompiler f(moduleEnv, compilerEnv, func, locals, trapExitLayout,
+ trapExitLayoutNumWords, d, stk, &alloc, &masm,
+ &code->stackMaps);
+ if (!f.init()) {
+ return false;
+ }
+ if (!f.emitFunction()) {
+ return false;
+ }
+ if (!code->codeRanges.emplaceBack(func.index, func.lineOrBytecode,
+ f.finish())) {
+ return false;
+ }
+ }
+
+ masm.finish();
+ if (masm.oom()) {
+ return false;
+ }
+
+ return code->swap(masm);
+}
diff --git a/js/src/wasm/WasmBaselineCompile.h b/js/src/wasm/WasmBaselineCompile.h
new file mode 100644
index 0000000000..cd4f74e560
--- /dev/null
+++ b/js/src/wasm/WasmBaselineCompile.h
@@ -0,0 +1,103 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef asmjs_wasm_baseline_compile_h
+#define asmjs_wasm_baseline_compile_h
+
+#include "jit/ABIArgGenerator.h"
+#include "wasm/WasmGenerator.h"
+
+namespace js {
+namespace wasm {
+
+// Return whether BaselineCompileFunction can generate code on the current
+// device. Usually you do *not* want to call this, you want
+// BaselineAvailable().
+[[nodiscard]] bool BaselinePlatformSupport();
+
+// Generate adequate code quickly.
+[[nodiscard]] bool BaselineCompileFunctions(
+ const ModuleEnvironment& moduleEnv, const CompilerEnvironment& compilerEnv,
+ LifoAlloc& lifo, const FuncCompileInputVector& inputs, CompiledCode* code,
+ UniqueChars* error);
+
+// BaseLocalIter iterates over a vector of types of locals and provides offsets
+// from the Frame address for those locals, and associated data.
+//
+// The implementation of BaseLocalIter is the property of the BaseStackFrame.
+// But it is also exposed for eg the debugger to use.
+class BaseLocalIter {
+ private:
+ using ConstValTypeRange = mozilla::Range<const ValType>;
+
+ const ValTypeVector& locals_;
+ const ArgTypeVector& args_;
+ jit::WasmABIArgIter<ArgTypeVector> argsIter_;
+ size_t index_;
+ int32_t frameSize_;
+ int32_t nextFrameSize_;
+ int32_t frameOffset_;
+ int32_t stackResultPointerOffset_;
+ jit::MIRType mirType_;
+ bool done_;
+
+ void settle();
+ int32_t pushLocal(size_t nbytes);
+
+ public:
+ BaseLocalIter(const ValTypeVector& locals, const ArgTypeVector& args,
+ bool debugEnabled);
+ void operator++(int);
+ bool done() const { return done_; }
+
+ jit::MIRType mirType() const {
+ MOZ_ASSERT(!done_);
+ return mirType_;
+ }
+ int32_t frameOffset() const {
+ MOZ_ASSERT(!done_);
+ MOZ_ASSERT(frameOffset_ != INT32_MAX);
+ return frameOffset_;
+ }
+ size_t index() const {
+ MOZ_ASSERT(!done_);
+ return index_;
+ }
+ // The size in bytes taken up by the previous `index_` locals, also including
+ // fixed allocations like the DebugFrame and "hidden" locals like a spilled
+ // stack results pointer.
+ int32_t frameSize() const { return frameSize_; }
+
+ int32_t stackResultPointerOffset() const {
+ MOZ_ASSERT(args_.hasSyntheticStackResultPointerArg());
+ MOZ_ASSERT(stackResultPointerOffset_ != INT32_MAX);
+ return stackResultPointerOffset_;
+ }
+
+#ifdef DEBUG
+ bool isArg() const {
+ MOZ_ASSERT(!done_);
+ return !argsIter_.done();
+ }
+#endif
+};
+
+} // namespace wasm
+} // namespace js
+
+#endif // asmjs_wasm_baseline_compile_h
diff --git a/js/src/wasm/WasmBinary.cpp b/js/src/wasm/WasmBinary.cpp
new file mode 100644
index 0000000000..ccf50f9d2d
--- /dev/null
+++ b/js/src/wasm/WasmBinary.cpp
@@ -0,0 +1,335 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmBinary.h"
+
+#include "js/Printf.h"
+#include "wasm/WasmValidate.h"
+
+using namespace js;
+using namespace js::wasm;
+
+// Decoder implementation.
+
+bool Decoder::failf(const char* msg, ...) {
+ va_list ap;
+ va_start(ap, msg);
+ UniqueChars str(JS_vsmprintf(msg, ap));
+ va_end(ap);
+ if (!str) {
+ return false;
+ }
+
+ return fail(str.get());
+}
+
+void Decoder::warnf(const char* msg, ...) {
+ if (!warnings_) {
+ return;
+ }
+
+ va_list ap;
+ va_start(ap, msg);
+ UniqueChars str(JS_vsmprintf(msg, ap));
+ va_end(ap);
+ if (!str) {
+ return;
+ }
+
+ (void)warnings_->append(std::move(str));
+}
+
+bool Decoder::fail(size_t errorOffset, const char* msg) {
+ MOZ_ASSERT(error_);
+ UniqueChars strWithOffset(JS_smprintf("at offset %zu: %s", errorOffset, msg));
+ if (!strWithOffset) {
+ return false;
+ }
+
+ *error_ = std::move(strWithOffset);
+ return false;
+}
+
+bool Decoder::readSectionHeader(uint8_t* id, SectionRange* range) {
+ if (!readFixedU8(id)) {
+ return false;
+ }
+
+ uint32_t size;
+ if (!readVarU32(&size)) {
+ return false;
+ }
+
+ range->start = currentOffset();
+ range->size = size;
+ return true;
+}
+
+bool Decoder::startSection(SectionId id, ModuleEnvironment* env,
+ MaybeSectionRange* range, const char* sectionName) {
+ MOZ_ASSERT(!*range);
+
+ // Record state at beginning of section to allow rewinding to this point
+ // if, after skipping through several custom sections, we don't find the
+ // section 'id'.
+ const uint8_t* const initialCur = cur_;
+ const size_t initialCustomSectionsLength = env->customSections.length();
+
+ // Maintain a pointer to the current section that gets updated as custom
+ // sections are skipped.
+ const uint8_t* currentSectionStart = cur_;
+
+ // Only start a section with 'id', skipping any custom sections before it.
+
+ uint8_t idValue;
+ if (!readFixedU8(&idValue)) {
+ goto rewind;
+ }
+
+ while (idValue != uint8_t(id)) {
+ if (idValue != uint8_t(SectionId::Custom)) {
+ goto rewind;
+ }
+
+ // Rewind to the beginning of the current section since this is what
+ // skipCustomSection() assumes.
+ cur_ = currentSectionStart;
+ if (!skipCustomSection(env)) {
+ return false;
+ }
+
+ // Having successfully skipped a custom section, consider the next
+ // section.
+ currentSectionStart = cur_;
+ if (!readFixedU8(&idValue)) {
+ goto rewind;
+ }
+ }
+
+ // Don't check the size since the range of bytes being decoded might not
+ // contain the section body. (This is currently the case when streaming: the
+ // code section header is decoded with the module environment bytes, the
+ // body of the code section is streamed in separately.)
+
+ uint32_t size;
+ if (!readVarU32(&size)) {
+ goto fail;
+ }
+
+ range->emplace();
+ (*range)->start = currentOffset();
+ (*range)->size = size;
+ return true;
+
+rewind:
+ cur_ = initialCur;
+ env->customSections.shrinkTo(initialCustomSectionsLength);
+ return true;
+
+fail:
+ return failf("failed to start %s section", sectionName);
+}
+
+bool Decoder::finishSection(const SectionRange& range,
+ const char* sectionName) {
+ if (resilientMode_) {
+ return true;
+ }
+ if (range.size != currentOffset() - range.start) {
+ return failf("byte size mismatch in %s section", sectionName);
+ }
+ return true;
+}
+
+bool Decoder::startCustomSection(const char* expected, size_t expectedLength,
+ ModuleEnvironment* env,
+ MaybeSectionRange* range) {
+ // Record state at beginning of section to allow rewinding to this point
+ // if, after skipping through several custom sections, we don't find the
+ // section 'id'.
+ const uint8_t* const initialCur = cur_;
+ const size_t initialCustomSectionsLength = env->customSections.length();
+
+ while (true) {
+ // Try to start a custom section. If we can't, rewind to the beginning
+ // since we may have skipped several custom sections already looking for
+ // 'expected'.
+ if (!startSection(SectionId::Custom, env, range, "custom")) {
+ return false;
+ }
+ if (!*range) {
+ goto rewind;
+ }
+
+ if (bytesRemain() < (*range)->size) {
+ goto fail;
+ }
+
+ CustomSectionEnv sec;
+ if (!readVarU32(&sec.nameLength) || sec.nameLength > bytesRemain()) {
+ goto fail;
+ }
+
+ sec.nameOffset = currentOffset();
+ sec.payloadOffset = sec.nameOffset + sec.nameLength;
+
+ uint32_t payloadEnd = (*range)->start + (*range)->size;
+ if (sec.payloadOffset > payloadEnd) {
+ goto fail;
+ }
+
+ sec.payloadLength = payloadEnd - sec.payloadOffset;
+
+ // Now that we have a valid custom section, record its offsets in the
+ // metadata which can be queried by the user via Module.customSections.
+ // Note: after an entry is appended, it may be popped if this loop or
+ // the loop in startSection needs to rewind.
+ if (!env->customSections.append(sec)) {
+ return false;
+ }
+
+ // If this is the expected custom section, we're done.
+ if (!expected || (expectedLength == sec.nameLength &&
+ !memcmp(cur_, expected, sec.nameLength))) {
+ cur_ += sec.nameLength;
+ return true;
+ }
+
+ // Otherwise, blindly skip the custom section and keep looking.
+ skipAndFinishCustomSection(**range);
+ range->reset();
+ }
+ MOZ_CRASH("unreachable");
+
+rewind:
+ cur_ = initialCur;
+ env->customSections.shrinkTo(initialCustomSectionsLength);
+ return true;
+
+fail:
+ return fail("failed to start custom section");
+}
+
+void Decoder::finishCustomSection(const char* name, const SectionRange& range) {
+ MOZ_ASSERT(cur_ >= beg_);
+ MOZ_ASSERT(cur_ <= end_);
+
+ if (error_ && *error_) {
+ warnf("in the '%s' custom section: %s", name, error_->get());
+ skipAndFinishCustomSection(range);
+ return;
+ }
+
+ uint32_t actualSize = currentOffset() - range.start;
+ if (range.size != actualSize) {
+ if (actualSize < range.size) {
+ warnf("in the '%s' custom section: %" PRIu32 " unconsumed bytes", name,
+ uint32_t(range.size - actualSize));
+ } else {
+ warnf("in the '%s' custom section: %" PRIu32
+ " bytes consumed past the end",
+ name, uint32_t(actualSize - range.size));
+ }
+ skipAndFinishCustomSection(range);
+ return;
+ }
+
+ // Nothing to do! (c.f. skipAndFinishCustomSection())
+}
+
+void Decoder::skipAndFinishCustomSection(const SectionRange& range) {
+ MOZ_ASSERT(cur_ >= beg_);
+ MOZ_ASSERT(cur_ <= end_);
+ cur_ = (beg_ + (range.start - offsetInModule_)) + range.size;
+ MOZ_ASSERT(cur_ <= end_);
+ clearError();
+}
+
+bool Decoder::skipCustomSection(ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!startCustomSection(nullptr, 0, env, &range)) {
+ return false;
+ }
+ if (!range) {
+ return fail("expected custom section");
+ }
+
+ skipAndFinishCustomSection(*range);
+ return true;
+}
+
+bool Decoder::startNameSubsection(NameType nameType,
+ Maybe<uint32_t>* endOffset) {
+ MOZ_ASSERT(!*endOffset);
+
+ const uint8_t* const initialPosition = cur_;
+
+ uint8_t nameTypeValue;
+ if (!readFixedU8(&nameTypeValue)) {
+ goto rewind;
+ }
+
+ if (nameTypeValue != uint8_t(nameType)) {
+ goto rewind;
+ }
+
+ uint32_t payloadLength;
+ if (!readVarU32(&payloadLength) || payloadLength > bytesRemain()) {
+ return fail("bad name subsection payload length");
+ }
+
+ *endOffset = Some(currentOffset() + payloadLength);
+ return true;
+
+rewind:
+ cur_ = initialPosition;
+ return true;
+}
+
+bool Decoder::finishNameSubsection(uint32_t endOffset) {
+ uint32_t actual = currentOffset();
+ if (endOffset != actual) {
+ return failf("bad name subsection length (endOffset: %" PRIu32
+ ", actual: %" PRIu32 ")",
+ endOffset, actual);
+ }
+
+ return true;
+}
+
+bool Decoder::skipNameSubsection() {
+ uint8_t nameTypeValue;
+ if (!readFixedU8(&nameTypeValue)) {
+ return fail("unable to read name subsection id");
+ }
+
+ switch (nameTypeValue) {
+ case uint8_t(NameType::Module):
+ case uint8_t(NameType::Function):
+ return fail("out of order name subsections");
+ default:
+ break;
+ }
+
+ uint32_t payloadLength;
+ if (!readVarU32(&payloadLength) || !readBytes(payloadLength)) {
+ return fail("bad name subsection payload length");
+ }
+
+ return true;
+}
diff --git a/js/src/wasm/WasmBinary.h b/js/src/wasm/WasmBinary.h
new file mode 100644
index 0000000000..04015c55a4
--- /dev/null
+++ b/js/src/wasm/WasmBinary.h
@@ -0,0 +1,890 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_binary_h
+#define wasm_binary_h
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Maybe.h"
+
+#include <type_traits>
+
+#include "js/WasmFeatures.h"
+
+#include "wasm/WasmCompile.h"
+#include "wasm/WasmCompileArgs.h"
+#include "wasm/WasmConstants.h"
+#include "wasm/WasmTypeDecls.h"
+#include "wasm/WasmTypeDef.h"
+#include "wasm/WasmValType.h"
+
+namespace js {
+namespace wasm {
+
+using mozilla::DebugOnly;
+using mozilla::Maybe;
+
+struct ModuleEnvironment;
+
+// The Opcode compactly and safely represents the primary opcode plus any
+// extension, with convenient predicates and accessors.
+
+class Opcode {
+ uint32_t bits_;
+
+ public:
+ MOZ_IMPLICIT Opcode(Op op) : bits_(uint32_t(op)) {
+ static_assert(size_t(Op::Limit) == 256, "fits");
+ MOZ_ASSERT(size_t(op) < size_t(Op::Limit));
+ }
+ MOZ_IMPLICIT Opcode(MiscOp op)
+ : bits_((uint32_t(op) << 8) | uint32_t(Op::MiscPrefix)) {
+ static_assert(size_t(MiscOp::Limit) <= 0xFFFFFF, "fits");
+ MOZ_ASSERT(size_t(op) < size_t(MiscOp::Limit));
+ }
+ MOZ_IMPLICIT Opcode(ThreadOp op)
+ : bits_((uint32_t(op) << 8) | uint32_t(Op::ThreadPrefix)) {
+ static_assert(size_t(ThreadOp::Limit) <= 0xFFFFFF, "fits");
+ MOZ_ASSERT(size_t(op) < size_t(ThreadOp::Limit));
+ }
+ MOZ_IMPLICIT Opcode(MozOp op)
+ : bits_((uint32_t(op) << 8) | uint32_t(Op::MozPrefix)) {
+ static_assert(size_t(MozOp::Limit) <= 0xFFFFFF, "fits");
+ MOZ_ASSERT(size_t(op) < size_t(MozOp::Limit));
+ }
+ MOZ_IMPLICIT Opcode(SimdOp op)
+ : bits_((uint32_t(op) << 8) | uint32_t(Op::SimdPrefix)) {
+ static_assert(size_t(SimdOp::Limit) <= 0xFFFFFF, "fits");
+ MOZ_ASSERT(size_t(op) < size_t(SimdOp::Limit));
+ }
+
+ bool isOp() const { return bits_ < uint32_t(Op::FirstPrefix); }
+ bool isMisc() const { return (bits_ & 255) == uint32_t(Op::MiscPrefix); }
+ bool isThread() const { return (bits_ & 255) == uint32_t(Op::ThreadPrefix); }
+ bool isMoz() const { return (bits_ & 255) == uint32_t(Op::MozPrefix); }
+ bool isSimd() const { return (bits_ & 255) == uint32_t(Op::SimdPrefix); }
+
+ Op asOp() const {
+ MOZ_ASSERT(isOp());
+ return Op(bits_);
+ }
+ MiscOp asMisc() const {
+ MOZ_ASSERT(isMisc());
+ return MiscOp(bits_ >> 8);
+ }
+ ThreadOp asThread() const {
+ MOZ_ASSERT(isThread());
+ return ThreadOp(bits_ >> 8);
+ }
+ MozOp asMoz() const {
+ MOZ_ASSERT(isMoz());
+ return MozOp(bits_ >> 8);
+ }
+ SimdOp asSimd() const {
+ MOZ_ASSERT(isSimd());
+ return SimdOp(bits_ >> 8);
+ }
+
+ uint32_t bits() const { return bits_; }
+
+ bool operator==(const Opcode& that) const { return bits_ == that.bits_; }
+ bool operator!=(const Opcode& that) const { return bits_ != that.bits_; }
+};
+
+// This struct captures the bytecode offset of a section's payload (so not
+// including the header) and the size of the payload.
+
+struct SectionRange {
+ uint32_t start;
+ uint32_t size;
+
+ uint32_t end() const { return start + size; }
+ bool operator==(const SectionRange& rhs) const {
+ return start == rhs.start && size == rhs.size;
+ }
+};
+
+using MaybeSectionRange = Maybe<SectionRange>;
+
+// The Encoder class appends bytes to the Bytes object it is given during
+// construction. The client is responsible for the Bytes's lifetime and must
+// keep the Bytes alive as long as the Encoder is used.
+
+class Encoder {
+ Bytes& bytes_;
+
+ template <class T>
+ [[nodiscard]] bool write(const T& v) {
+ return bytes_.append(reinterpret_cast<const uint8_t*>(&v), sizeof(T));
+ }
+
+ template <typename UInt>
+ [[nodiscard]] bool writeVarU(UInt i) {
+ do {
+ uint8_t byte = i & 0x7f;
+ i >>= 7;
+ if (i != 0) {
+ byte |= 0x80;
+ }
+ if (!bytes_.append(byte)) {
+ return false;
+ }
+ } while (i != 0);
+ return true;
+ }
+
+ template <typename SInt>
+ [[nodiscard]] bool writeVarS(SInt i) {
+ bool done;
+ do {
+ uint8_t byte = i & 0x7f;
+ i >>= 7;
+ done = ((i == 0) && !(byte & 0x40)) || ((i == -1) && (byte & 0x40));
+ if (!done) {
+ byte |= 0x80;
+ }
+ if (!bytes_.append(byte)) {
+ return false;
+ }
+ } while (!done);
+ return true;
+ }
+
+ void patchVarU32(size_t offset, uint32_t patchBits, uint32_t assertBits) {
+ do {
+ uint8_t assertByte = assertBits & 0x7f;
+ uint8_t patchByte = patchBits & 0x7f;
+ assertBits >>= 7;
+ patchBits >>= 7;
+ if (assertBits != 0) {
+ assertByte |= 0x80;
+ patchByte |= 0x80;
+ }
+ MOZ_ASSERT(assertByte == bytes_[offset]);
+ (void)assertByte;
+ bytes_[offset] = patchByte;
+ offset++;
+ } while (assertBits != 0);
+ }
+
+ void patchFixedU7(size_t offset, uint8_t patchBits, uint8_t assertBits) {
+ MOZ_ASSERT(patchBits <= uint8_t(INT8_MAX));
+ patchFixedU8(offset, patchBits, assertBits);
+ }
+
+ void patchFixedU8(size_t offset, uint8_t patchBits, uint8_t assertBits) {
+ MOZ_ASSERT(bytes_[offset] == assertBits);
+ bytes_[offset] = patchBits;
+ }
+
+ uint32_t varU32ByteLength(size_t offset) const {
+ size_t start = offset;
+ while (bytes_[offset] & 0x80) {
+ offset++;
+ }
+ return offset - start + 1;
+ }
+
+ public:
+ explicit Encoder(Bytes& bytes) : bytes_(bytes) { MOZ_ASSERT(empty()); }
+
+ size_t currentOffset() const { return bytes_.length(); }
+ bool empty() const { return currentOffset() == 0; }
+
+ // Fixed-size encoding operations simply copy the literal bytes (without
+ // attempting to align).
+
+ [[nodiscard]] bool writeFixedU7(uint8_t i) {
+ MOZ_ASSERT(i <= uint8_t(INT8_MAX));
+ return writeFixedU8(i);
+ }
+ [[nodiscard]] bool writeFixedU8(uint8_t i) { return write<uint8_t>(i); }
+ [[nodiscard]] bool writeFixedU32(uint32_t i) { return write<uint32_t>(i); }
+ [[nodiscard]] bool writeFixedF32(float f) { return write<float>(f); }
+ [[nodiscard]] bool writeFixedF64(double d) { return write<double>(d); }
+
+ // Variable-length encodings that all use LEB128.
+
+ [[nodiscard]] bool writeVarU32(uint32_t i) { return writeVarU<uint32_t>(i); }
+ [[nodiscard]] bool writeVarS32(int32_t i) { return writeVarS<int32_t>(i); }
+ [[nodiscard]] bool writeVarU64(uint64_t i) { return writeVarU<uint64_t>(i); }
+ [[nodiscard]] bool writeVarS64(int64_t i) { return writeVarS<int64_t>(i); }
+ [[nodiscard]] bool writeValType(ValType type) {
+ static_assert(size_t(TypeCode::Limit) <= UINT8_MAX, "fits");
+ // writeValType is only used by asm.js, which doesn't use type
+ // references
+ MOZ_RELEASE_ASSERT(!type.isTypeRef(), "NYI");
+ TypeCode tc = type.packed().typeCode();
+ MOZ_ASSERT(size_t(tc) < size_t(TypeCode::Limit));
+ return writeFixedU8(uint8_t(tc));
+ }
+ [[nodiscard]] bool writeOp(Opcode opcode) {
+ // The Opcode constructor has asserted that `opcode` is meaningful, so no
+ // further correctness checking is necessary here.
+ uint32_t bits = opcode.bits();
+ if (!writeFixedU8(bits & 255)) {
+ return false;
+ }
+ if (opcode.isOp()) {
+ return true;
+ }
+ return writeVarU32(bits >> 8);
+ }
+
+ // Fixed-length encodings that allow back-patching.
+
+ [[nodiscard]] bool writePatchableFixedU7(size_t* offset) {
+ *offset = bytes_.length();
+ return writeFixedU8(UINT8_MAX);
+ }
+ void patchFixedU7(size_t offset, uint8_t patchBits) {
+ return patchFixedU7(offset, patchBits, UINT8_MAX);
+ }
+
+ // Variable-length encodings that allow back-patching.
+
+ [[nodiscard]] bool writePatchableVarU32(size_t* offset) {
+ *offset = bytes_.length();
+ return writeVarU32(UINT32_MAX);
+ }
+ void patchVarU32(size_t offset, uint32_t patchBits) {
+ return patchVarU32(offset, patchBits, UINT32_MAX);
+ }
+
+ // Byte ranges start with an LEB128 length followed by an arbitrary sequence
+ // of bytes. When used for strings, bytes are to be interpreted as utf8.
+
+ [[nodiscard]] bool writeBytes(const void* bytes, uint32_t numBytes) {
+ return writeVarU32(numBytes) &&
+ bytes_.append(reinterpret_cast<const uint8_t*>(bytes), numBytes);
+ }
+
+ // A "section" is a contiguous range of bytes that stores its own size so
+ // that it may be trivially skipped without examining the payload. Sections
+ // require backpatching since the size of the section is only known at the
+ // end while the size's varU32 must be stored at the beginning. Immediately
+ // after the section length is the string id of the section.
+
+ [[nodiscard]] bool startSection(SectionId id, size_t* offset) {
+ MOZ_ASSERT(uint32_t(id) < 128);
+ return writeVarU32(uint32_t(id)) && writePatchableVarU32(offset);
+ }
+ void finishSection(size_t offset) {
+ return patchVarU32(offset,
+ bytes_.length() - offset - varU32ByteLength(offset));
+ }
+};
+
+// The Decoder class decodes the bytes in the range it is given during
+// construction. The client is responsible for keeping the byte range alive as
+// long as the Decoder is used.
+
+class Decoder {
+ const uint8_t* const beg_;
+ const uint8_t* const end_;
+ const uint8_t* cur_;
+ const size_t offsetInModule_;
+ UniqueChars* error_;
+ UniqueCharsVector* warnings_;
+ bool resilientMode_;
+
+ template <class T>
+ [[nodiscard]] bool read(T* out) {
+ if (bytesRemain() < sizeof(T)) {
+ return false;
+ }
+ memcpy((void*)out, cur_, sizeof(T));
+ cur_ += sizeof(T);
+ return true;
+ }
+
+ template <class T>
+ T uncheckedRead() {
+ MOZ_ASSERT(bytesRemain() >= sizeof(T));
+ T ret;
+ memcpy(&ret, cur_, sizeof(T));
+ cur_ += sizeof(T);
+ return ret;
+ }
+
+ template <class T>
+ void uncheckedRead(T* ret) {
+ MOZ_ASSERT(bytesRemain() >= sizeof(T));
+ memcpy(ret, cur_, sizeof(T));
+ cur_ += sizeof(T);
+ }
+
+ template <typename UInt>
+ [[nodiscard]] bool readVarU(UInt* out) {
+ DebugOnly<const uint8_t*> before = cur_;
+ const unsigned numBits = sizeof(UInt) * CHAR_BIT;
+ const unsigned remainderBits = numBits % 7;
+ const unsigned numBitsInSevens = numBits - remainderBits;
+ UInt u = 0;
+ uint8_t byte;
+ UInt shift = 0;
+ do {
+ if (!readFixedU8(&byte)) {
+ return false;
+ }
+ if (!(byte & 0x80)) {
+ *out = u | UInt(byte) << shift;
+ return true;
+ }
+ u |= UInt(byte & 0x7F) << shift;
+ shift += 7;
+ } while (shift != numBitsInSevens);
+ if (!readFixedU8(&byte) || (byte & (unsigned(-1) << remainderBits))) {
+ return false;
+ }
+ *out = u | (UInt(byte) << numBitsInSevens);
+ MOZ_ASSERT_IF(sizeof(UInt) == 4,
+ unsigned(cur_ - before) <= MaxVarU32DecodedBytes);
+ return true;
+ }
+
+ template <typename SInt>
+ [[nodiscard]] bool readVarS(SInt* out) {
+ using UInt = std::make_unsigned_t<SInt>;
+ const unsigned numBits = sizeof(SInt) * CHAR_BIT;
+ const unsigned remainderBits = numBits % 7;
+ const unsigned numBitsInSevens = numBits - remainderBits;
+ SInt s = 0;
+ uint8_t byte;
+ unsigned shift = 0;
+ do {
+ if (!readFixedU8(&byte)) {
+ return false;
+ }
+ s |= SInt(byte & 0x7f) << shift;
+ shift += 7;
+ if (!(byte & 0x80)) {
+ if (byte & 0x40) {
+ s |= UInt(-1) << shift;
+ }
+ *out = s;
+ return true;
+ }
+ } while (shift < numBitsInSevens);
+ if (!remainderBits || !readFixedU8(&byte) || (byte & 0x80)) {
+ return false;
+ }
+ uint8_t mask = 0x7f & (uint8_t(-1) << remainderBits);
+ if ((byte & mask) != ((byte & (1 << (remainderBits - 1))) ? mask : 0)) {
+ return false;
+ }
+ *out = s | UInt(byte) << shift;
+ return true;
+ }
+
+ public:
+ Decoder(const uint8_t* begin, const uint8_t* end, size_t offsetInModule,
+ UniqueChars* error, UniqueCharsVector* warnings = nullptr,
+ bool resilientMode = false)
+ : beg_(begin),
+ end_(end),
+ cur_(begin),
+ offsetInModule_(offsetInModule),
+ error_(error),
+ warnings_(warnings),
+ resilientMode_(resilientMode) {
+ MOZ_ASSERT(begin <= end);
+ }
+ explicit Decoder(const Bytes& bytes, size_t offsetInModule = 0,
+ UniqueChars* error = nullptr,
+ UniqueCharsVector* warnings = nullptr)
+ : beg_(bytes.begin()),
+ end_(bytes.end()),
+ cur_(bytes.begin()),
+ offsetInModule_(offsetInModule),
+ error_(error),
+ warnings_(warnings),
+ resilientMode_(false) {}
+
+ // These convenience functions use currentOffset() as the errorOffset.
+ bool fail(const char* msg) { return fail(currentOffset(), msg); }
+ bool failf(const char* msg, ...) MOZ_FORMAT_PRINTF(2, 3);
+ void warnf(const char* msg, ...) MOZ_FORMAT_PRINTF(2, 3);
+
+ // Report an error at the given offset (relative to the whole module).
+ bool fail(size_t errorOffset, const char* msg);
+
+ UniqueChars* error() { return error_; }
+
+ void clearError() {
+ if (error_) {
+ error_->reset();
+ }
+ }
+
+ bool done() const {
+ MOZ_ASSERT(cur_ <= end_);
+ return cur_ == end_;
+ }
+ bool resilientMode() const { return resilientMode_; }
+
+ size_t bytesRemain() const {
+ MOZ_ASSERT(end_ >= cur_);
+ return size_t(end_ - cur_);
+ }
+ // pos must be a value previously returned from currentPosition.
+ void rollbackPosition(const uint8_t* pos) { cur_ = pos; }
+ const uint8_t* currentPosition() const { return cur_; }
+ size_t beginOffset() const { return offsetInModule_; }
+ size_t currentOffset() const { return offsetInModule_ + (cur_ - beg_); }
+ const uint8_t* begin() const { return beg_; }
+ const uint8_t* end() const { return end_; }
+
+ // Peek at the next byte, if it exists, without advancing the position.
+
+ bool peekByte(uint8_t* byte) {
+ if (done()) {
+ return false;
+ }
+ *byte = *cur_;
+ return true;
+ }
+
+ // Fixed-size encoding operations simply copy the literal bytes (without
+ // attempting to align).
+
+ [[nodiscard]] bool readFixedU8(uint8_t* i) { return read<uint8_t>(i); }
+ [[nodiscard]] bool readFixedU32(uint32_t* u) { return read<uint32_t>(u); }
+ [[nodiscard]] bool readFixedF32(float* f) { return read<float>(f); }
+ [[nodiscard]] bool readFixedF64(double* d) { return read<double>(d); }
+#ifdef ENABLE_WASM_SIMD
+ [[nodiscard]] bool readFixedV128(V128* d) {
+ for (unsigned i = 0; i < 16; i++) {
+ if (!read<uint8_t>(d->bytes + i)) {
+ return false;
+ }
+ }
+ return true;
+ }
+#endif
+
+ // Variable-length encodings that all use LEB128.
+
+ [[nodiscard]] bool readVarU32(uint32_t* out) {
+ return readVarU<uint32_t>(out);
+ }
+ [[nodiscard]] bool readVarS32(int32_t* out) { return readVarS<int32_t>(out); }
+ [[nodiscard]] bool readVarU64(uint64_t* out) {
+ return readVarU<uint64_t>(out);
+ }
+ [[nodiscard]] bool readVarS64(int64_t* out) { return readVarS<int64_t>(out); }
+
+ // Value and reference types
+
+ [[nodiscard]] ValType uncheckedReadValType(const TypeContext& types);
+
+ template <class T>
+ [[nodiscard]] bool readPackedType(const TypeContext& types,
+ const FeatureArgs& features, T* type);
+
+ [[nodiscard]] bool readValType(const TypeContext& types,
+ const FeatureArgs& features, ValType* type);
+
+ [[nodiscard]] bool readFieldType(const TypeContext& types,
+ const FeatureArgs& features,
+ FieldType* type);
+
+ [[nodiscard]] bool readHeapType(const TypeContext& types,
+ const FeatureArgs& features, bool nullable,
+ RefType* type);
+
+ [[nodiscard]] bool readRefType(const TypeContext& types,
+ const FeatureArgs& features, RefType* type);
+
+ // Instruction opcode
+
+ [[nodiscard]] bool readOp(OpBytes* op);
+
+ // Instruction immediates for constant instructions
+
+ [[nodiscard]] bool readBinary() { return true; }
+ [[nodiscard]] bool readTypeIndex(uint32_t* typeIndex);
+ [[nodiscard]] bool readGlobalIndex(uint32_t* globalIndex);
+ [[nodiscard]] bool readFuncIndex(uint32_t* funcIndex);
+ [[nodiscard]] bool readI32Const(int32_t* i32);
+ [[nodiscard]] bool readI64Const(int64_t* i64);
+ [[nodiscard]] bool readF32Const(float* f32);
+ [[nodiscard]] bool readF64Const(double* f64);
+#ifdef ENABLE_WASM_SIMD
+ [[nodiscard]] bool readV128Const(V128* value);
+#endif
+ [[nodiscard]] bool readRefNull(const TypeContext& types,
+ const FeatureArgs& features, RefType* type);
+
+ // See writeBytes comment.
+
+ [[nodiscard]] bool readBytes(uint32_t numBytes,
+ const uint8_t** bytes = nullptr) {
+ if (bytes) {
+ *bytes = cur_;
+ }
+ if (bytesRemain() < numBytes) {
+ return false;
+ }
+ cur_ += numBytes;
+ return true;
+ }
+
+ // See "section" description in Encoder.
+
+ [[nodiscard]] bool readSectionHeader(uint8_t* id, SectionRange* range);
+
+ [[nodiscard]] bool startSection(SectionId id, ModuleEnvironment* env,
+ MaybeSectionRange* range,
+ const char* sectionName);
+ [[nodiscard]] bool finishSection(const SectionRange& range,
+ const char* sectionName);
+
+ // Custom sections do not cause validation errors unless the error is in
+ // the section header itself.
+
+ [[nodiscard]] bool startCustomSection(const char* expected,
+ size_t expectedLength,
+ ModuleEnvironment* env,
+ MaybeSectionRange* range);
+
+ template <size_t NameSizeWith0>
+ [[nodiscard]] bool startCustomSection(const char (&name)[NameSizeWith0],
+ ModuleEnvironment* env,
+ MaybeSectionRange* range) {
+ MOZ_ASSERT(name[NameSizeWith0 - 1] == '\0');
+ return startCustomSection(name, NameSizeWith0 - 1, env, range);
+ }
+
+ void finishCustomSection(const char* name, const SectionRange& range);
+ void skipAndFinishCustomSection(const SectionRange& range);
+
+ [[nodiscard]] bool skipCustomSection(ModuleEnvironment* env);
+
+ // The Name section has its own optional subsections.
+
+ [[nodiscard]] bool startNameSubsection(NameType nameType,
+ Maybe<uint32_t>* endOffset);
+ [[nodiscard]] bool finishNameSubsection(uint32_t endOffset);
+ [[nodiscard]] bool skipNameSubsection();
+
+ // The infallible "unchecked" decoding functions can be used when we are
+ // sure that the bytes are well-formed (by construction or due to previous
+ // validation).
+
+ uint8_t uncheckedReadFixedU8() { return uncheckedRead<uint8_t>(); }
+ uint32_t uncheckedReadFixedU32() { return uncheckedRead<uint32_t>(); }
+ void uncheckedReadFixedF32(float* out) { uncheckedRead<float>(out); }
+ void uncheckedReadFixedF64(double* out) { uncheckedRead<double>(out); }
+ template <typename UInt>
+ UInt uncheckedReadVarU() {
+ static const unsigned numBits = sizeof(UInt) * CHAR_BIT;
+ static const unsigned remainderBits = numBits % 7;
+ static const unsigned numBitsInSevens = numBits - remainderBits;
+ UInt decoded = 0;
+ uint32_t shift = 0;
+ do {
+ uint8_t byte = *cur_++;
+ if (!(byte & 0x80)) {
+ return decoded | (UInt(byte) << shift);
+ }
+ decoded |= UInt(byte & 0x7f) << shift;
+ shift += 7;
+ } while (shift != numBitsInSevens);
+ uint8_t byte = *cur_++;
+ MOZ_ASSERT(!(byte & 0xf0));
+ return decoded | (UInt(byte) << numBitsInSevens);
+ }
+ uint32_t uncheckedReadVarU32() { return uncheckedReadVarU<uint32_t>(); }
+ int32_t uncheckedReadVarS32() {
+ int32_t i32 = 0;
+ MOZ_ALWAYS_TRUE(readVarS32(&i32));
+ return i32;
+ }
+ uint64_t uncheckedReadVarU64() { return uncheckedReadVarU<uint64_t>(); }
+ int64_t uncheckedReadVarS64() {
+ int64_t i64 = 0;
+ MOZ_ALWAYS_TRUE(readVarS64(&i64));
+ return i64;
+ }
+ Op uncheckedReadOp() {
+ static_assert(size_t(Op::Limit) == 256, "fits");
+ uint8_t u8 = uncheckedReadFixedU8();
+ return u8 != UINT8_MAX ? Op(u8) : Op(uncheckedReadFixedU8() + UINT8_MAX);
+ }
+};
+
+// Value and reference types
+
+inline ValType Decoder::uncheckedReadValType(const TypeContext& types) {
+ uint8_t code = uncheckedReadFixedU8();
+ switch (code) {
+ case uint8_t(TypeCode::FuncRef):
+ case uint8_t(TypeCode::ExternRef):
+ return RefType::fromTypeCode(TypeCode(code), true);
+ case uint8_t(TypeCode::Ref):
+ case uint8_t(TypeCode::NullableRef): {
+ bool nullable = code == uint8_t(TypeCode::NullableRef);
+
+ uint8_t nextByte;
+ peekByte(&nextByte);
+
+ if ((nextByte & SLEB128SignMask) == SLEB128SignBit) {
+ uint8_t code = uncheckedReadFixedU8();
+ return RefType::fromTypeCode(TypeCode(code), nullable);
+ }
+
+ int32_t x = uncheckedReadVarS32();
+ const TypeDef* typeDef = &types.type(x);
+ return RefType::fromTypeDef(typeDef, nullable);
+ }
+ default:
+ return ValType::fromNonRefTypeCode(TypeCode(code));
+ }
+}
+
+template <class T>
+inline bool Decoder::readPackedType(const TypeContext& types,
+ const FeatureArgs& features, T* type) {
+ static_assert(uint8_t(TypeCode::Limit) <= UINT8_MAX, "fits");
+ uint8_t code;
+ if (!readFixedU8(&code)) {
+ return fail("expected type code");
+ }
+ switch (code) {
+ case uint8_t(TypeCode::V128): {
+#ifdef ENABLE_WASM_SIMD
+ if (!features.simd) {
+ return fail("v128 not enabled");
+ }
+ *type = T::fromNonRefTypeCode(TypeCode(code));
+ return true;
+#else
+ break;
+#endif
+ }
+ case uint8_t(TypeCode::FuncRef):
+ case uint8_t(TypeCode::ExternRef): {
+ *type = RefType::fromTypeCode(TypeCode(code), true);
+ return true;
+ }
+ case uint8_t(TypeCode::Ref):
+ case uint8_t(TypeCode::NullableRef): {
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ if (!features.functionReferences) {
+ return fail("(ref T) types not enabled");
+ }
+ bool nullable = code == uint8_t(TypeCode::NullableRef);
+ RefType refType;
+ if (!readHeapType(types, features, nullable, &refType)) {
+ return false;
+ }
+ *type = refType;
+ return true;
+#else
+ break;
+#endif
+ }
+ case uint8_t(TypeCode::AnyRef):
+ case uint8_t(TypeCode::EqRef):
+ case uint8_t(TypeCode::StructRef):
+ case uint8_t(TypeCode::ArrayRef):
+ case uint8_t(TypeCode::NullFuncRef):
+ case uint8_t(TypeCode::NullExternRef):
+ case uint8_t(TypeCode::NullAnyRef): {
+#ifdef ENABLE_WASM_GC
+ if (!features.gc) {
+ return fail("gc types not enabled");
+ }
+ *type = RefType::fromTypeCode(TypeCode(code), true);
+ return true;
+#else
+ break;
+#endif
+ }
+ default: {
+ if (!T::isValidTypeCode(TypeCode(code))) {
+ break;
+ }
+ *type = T::fromNonRefTypeCode(TypeCode(code));
+ return true;
+ }
+ }
+ return fail("bad type");
+}
+
+inline bool Decoder::readValType(const TypeContext& types,
+ const FeatureArgs& features, ValType* type) {
+ return readPackedType<ValType>(types, features, type);
+}
+
+inline bool Decoder::readFieldType(const TypeContext& types,
+ const FeatureArgs& features,
+ FieldType* type) {
+ return readPackedType<FieldType>(types, features, type);
+}
+
+inline bool Decoder::readHeapType(const TypeContext& types,
+ const FeatureArgs& features, bool nullable,
+ RefType* type) {
+ uint8_t nextByte;
+ if (!peekByte(&nextByte)) {
+ return fail("expected heap type code");
+ }
+
+ if ((nextByte & SLEB128SignMask) == SLEB128SignBit) {
+ uint8_t code;
+ if (!readFixedU8(&code)) {
+ return false;
+ }
+
+ switch (code) {
+ case uint8_t(TypeCode::FuncRef):
+ case uint8_t(TypeCode::ExternRef):
+ *type = RefType::fromTypeCode(TypeCode(code), nullable);
+ return true;
+#ifdef ENABLE_WASM_GC
+ case uint8_t(TypeCode::AnyRef):
+ case uint8_t(TypeCode::EqRef):
+ case uint8_t(TypeCode::StructRef):
+ case uint8_t(TypeCode::ArrayRef):
+ case uint8_t(TypeCode::NullFuncRef):
+ case uint8_t(TypeCode::NullExternRef):
+ case uint8_t(TypeCode::NullAnyRef):
+ if (!features.gc) {
+ return fail("gc types not enabled");
+ }
+ *type = RefType::fromTypeCode(TypeCode(code), nullable);
+ return true;
+#endif
+ default:
+ return fail("invalid heap type");
+ }
+ }
+
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ if (features.functionReferences) {
+ int32_t x;
+ if (!readVarS32(&x) || x < 0 || uint32_t(x) >= types.length()) {
+ return fail("invalid heap type index");
+ }
+ const TypeDef* typeDef = &types.type(x);
+ *type = RefType::fromTypeDef(typeDef, nullable);
+ return true;
+ }
+#endif
+ return fail("invalid heap type");
+}
+
+inline bool Decoder::readRefType(const TypeContext& types,
+ const FeatureArgs& features, RefType* type) {
+ ValType valType;
+ if (!readValType(types, features, &valType)) {
+ return false;
+ }
+ if (!valType.isRefType()) {
+ return fail("bad type");
+ }
+ *type = valType.refType();
+ return true;
+}
+
+// Instruction opcode
+
+inline bool Decoder::readOp(OpBytes* op) {
+ static_assert(size_t(Op::Limit) == 256, "fits");
+ uint8_t u8;
+ if (!readFixedU8(&u8)) {
+ return false;
+ }
+ op->b0 = u8;
+ if (MOZ_LIKELY(!IsPrefixByte(u8))) {
+ return true;
+ }
+ return readVarU32(&op->b1);
+}
+
+// Instruction immediates for constant instructions
+
+inline bool Decoder::readTypeIndex(uint32_t* typeIndex) {
+ if (!readVarU32(typeIndex)) {
+ return fail("unable to read type index");
+ }
+ return true;
+}
+
+inline bool Decoder::readGlobalIndex(uint32_t* globalIndex) {
+ if (!readVarU32(globalIndex)) {
+ return fail("unable to read global index");
+ }
+ return true;
+}
+
+inline bool Decoder::readFuncIndex(uint32_t* funcIndex) {
+ if (!readVarU32(funcIndex)) {
+ return fail("unable to read function index");
+ }
+ return true;
+}
+
+inline bool Decoder::readI32Const(int32_t* i32) {
+ if (!readVarS32(i32)) {
+ return fail("failed to read I32 constant");
+ }
+ return true;
+}
+
+inline bool Decoder::readI64Const(int64_t* i64) {
+ if (!readVarS64(i64)) {
+ return fail("failed to read I64 constant");
+ }
+ return true;
+}
+
+inline bool Decoder::readF32Const(float* f32) {
+ if (!readFixedF32(f32)) {
+ return fail("failed to read F32 constant");
+ }
+ return true;
+}
+
+inline bool Decoder::readF64Const(double* f64) {
+ if (!readFixedF64(f64)) {
+ return fail("failed to read F64 constant");
+ }
+ return true;
+}
+
+#ifdef ENABLE_WASM_SIMD
+inline bool Decoder::readV128Const(V128* value) {
+ if (!readFixedV128(value)) {
+ return fail("unable to read V128 constant");
+ }
+ return true;
+}
+#endif
+
+inline bool Decoder::readRefNull(const TypeContext& types,
+ const FeatureArgs& features, RefType* type) {
+ return readHeapType(types, features, true, type);
+}
+
+} // namespace wasm
+} // namespace js
+
+#endif // namespace wasm_binary_h
diff --git a/js/src/wasm/WasmBuiltins.cpp b/js/src/wasm/WasmBuiltins.cpp
new file mode 100644
index 0000000000..86cc45a9ed
--- /dev/null
+++ b/js/src/wasm/WasmBuiltins.cpp
@@ -0,0 +1,1980 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2017 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmBuiltins.h"
+
+#include "mozilla/Atomics.h"
+
+#include "fdlibm.h"
+#include "jslibmath.h"
+#include "jsmath.h"
+
+#include "gc/Allocator.h"
+#include "jit/AtomicOperations.h"
+#include "jit/InlinableNatives.h"
+#include "jit/MacroAssembler.h"
+#include "jit/Simulator.h"
+#include "js/experimental/JitInfo.h" // JSJitInfo
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/friend/StackLimits.h" // js::AutoCheckRecursionLimit
+#include "threading/Mutex.h"
+#include "util/Memory.h"
+#include "util/Poison.h"
+#include "vm/BigIntType.h"
+#include "vm/ErrorObject.h"
+#include "wasm/WasmCodegenTypes.h"
+#include "wasm/WasmDebug.h"
+#include "wasm/WasmDebugFrame.h"
+#include "wasm/WasmGcObject.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmStubs.h"
+
+#include "debugger/DebugAPI-inl.h"
+#include "vm/ErrorObject-inl.h"
+#include "vm/Stack-inl.h"
+#include "wasm/WasmInstance-inl.h"
+
+using namespace js;
+using namespace jit;
+using namespace wasm;
+
+using mozilla::HashGeneric;
+using mozilla::MakeEnumeratedRange;
+
+static const unsigned BUILTIN_THUNK_LIFO_SIZE = 64 * 1024;
+
+// ============================================================================
+// WebAssembly builtin C++ functions called from wasm code to implement internal
+// wasm operations: type descriptions.
+
+// Some abbreviations, for the sake of conciseness.
+#define _F64 MIRType::Double
+#define _F32 MIRType::Float32
+#define _I32 MIRType::Int32
+#define _I64 MIRType::Int64
+#define _PTR MIRType::Pointer
+#define _RoN MIRType::RefOrNull
+#define _VOID MIRType::None
+#define _END MIRType::None
+#define _Infallible FailureMode::Infallible
+#define _FailOnNegI32 FailureMode::FailOnNegI32
+#define _FailOnNullPtr FailureMode::FailOnNullPtr
+#define _FailOnInvalidRef FailureMode::FailOnInvalidRef
+
+namespace js {
+namespace wasm {
+
+const SymbolicAddressSignature SASigSinNativeD = {
+ SymbolicAddress::SinNativeD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigSinFdlibmD = {
+ SymbolicAddress::SinFdlibmD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigCosNativeD = {
+ SymbolicAddress::CosNativeD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigCosFdlibmD = {
+ SymbolicAddress::CosFdlibmD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigTanNativeD = {
+ SymbolicAddress::TanNativeD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigTanFdlibmD = {
+ SymbolicAddress::TanFdlibmD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigASinD = {
+ SymbolicAddress::ASinD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigACosD = {
+ SymbolicAddress::ACosD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigATanD = {
+ SymbolicAddress::ATanD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigCeilD = {
+ SymbolicAddress::CeilD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigCeilF = {
+ SymbolicAddress::CeilF, _F32, _Infallible, 1, {_F32, _END}};
+const SymbolicAddressSignature SASigFloorD = {
+ SymbolicAddress::FloorD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigFloorF = {
+ SymbolicAddress::FloorF, _F32, _Infallible, 1, {_F32, _END}};
+const SymbolicAddressSignature SASigTruncD = {
+ SymbolicAddress::TruncD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigTruncF = {
+ SymbolicAddress::TruncF, _F32, _Infallible, 1, {_F32, _END}};
+const SymbolicAddressSignature SASigNearbyIntD = {
+ SymbolicAddress::NearbyIntD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigNearbyIntF = {
+ SymbolicAddress::NearbyIntF, _F32, _Infallible, 1, {_F32, _END}};
+const SymbolicAddressSignature SASigExpD = {
+ SymbolicAddress::ExpD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigLogD = {
+ SymbolicAddress::LogD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigPowD = {
+ SymbolicAddress::PowD, _F64, _Infallible, 2, {_F64, _F64, _END}};
+const SymbolicAddressSignature SASigATan2D = {
+ SymbolicAddress::ATan2D, _F64, _Infallible, 2, {_F64, _F64, _END}};
+const SymbolicAddressSignature SASigMemoryGrowM32 = {
+ SymbolicAddress::MemoryGrowM32, _I32, _Infallible, 2, {_PTR, _I32, _END}};
+const SymbolicAddressSignature SASigMemoryGrowM64 = {
+ SymbolicAddress::MemoryGrowM64, _I64, _Infallible, 2, {_PTR, _I64, _END}};
+const SymbolicAddressSignature SASigMemorySizeM32 = {
+ SymbolicAddress::MemorySizeM32, _I32, _Infallible, 1, {_PTR, _END}};
+const SymbolicAddressSignature SASigMemorySizeM64 = {
+ SymbolicAddress::MemorySizeM64, _I64, _Infallible, 1, {_PTR, _END}};
+const SymbolicAddressSignature SASigWaitI32M32 = {
+ SymbolicAddress::WaitI32M32,
+ _I32,
+ _FailOnNegI32,
+ 4,
+ {_PTR, _I32, _I32, _I64, _END}};
+const SymbolicAddressSignature SASigWaitI32M64 = {
+ SymbolicAddress::WaitI32M64,
+ _I32,
+ _FailOnNegI32,
+ 4,
+ {_PTR, _I64, _I32, _I64, _END}};
+const SymbolicAddressSignature SASigWaitI64M32 = {
+ SymbolicAddress::WaitI64M32,
+ _I32,
+ _FailOnNegI32,
+ 4,
+ {_PTR, _I32, _I64, _I64, _END}};
+const SymbolicAddressSignature SASigWaitI64M64 = {
+ SymbolicAddress::WaitI64M64,
+ _I32,
+ _FailOnNegI32,
+ 4,
+ {_PTR, _I64, _I64, _I64, _END}};
+const SymbolicAddressSignature SASigWakeM32 = {
+ SymbolicAddress::WakeM32, _I32, _FailOnNegI32, 3, {_PTR, _I32, _I32, _END}};
+const SymbolicAddressSignature SASigWakeM64 = {
+ SymbolicAddress::WakeM64, _I32, _FailOnNegI32, 3, {_PTR, _I64, _I32, _END}};
+const SymbolicAddressSignature SASigMemCopyM32 = {
+ SymbolicAddress::MemCopyM32,
+ _VOID,
+ _FailOnNegI32,
+ 5,
+ {_PTR, _I32, _I32, _I32, _PTR, _END}};
+const SymbolicAddressSignature SASigMemCopySharedM32 = {
+ SymbolicAddress::MemCopySharedM32,
+ _VOID,
+ _FailOnNegI32,
+ 5,
+ {_PTR, _I32, _I32, _I32, _PTR, _END}};
+const SymbolicAddressSignature SASigMemCopyM64 = {
+ SymbolicAddress::MemCopyM64,
+ _VOID,
+ _FailOnNegI32,
+ 5,
+ {_PTR, _I64, _I64, _I64, _PTR, _END}};
+const SymbolicAddressSignature SASigMemCopySharedM64 = {
+ SymbolicAddress::MemCopySharedM64,
+ _VOID,
+ _FailOnNegI32,
+ 5,
+ {_PTR, _I64, _I64, _I64, _PTR, _END}};
+const SymbolicAddressSignature SASigDataDrop = {
+ SymbolicAddress::DataDrop, _VOID, _FailOnNegI32, 2, {_PTR, _I32, _END}};
+const SymbolicAddressSignature SASigMemFillM32 = {
+ SymbolicAddress::MemFillM32,
+ _VOID,
+ _FailOnNegI32,
+ 5,
+ {_PTR, _I32, _I32, _I32, _PTR, _END}};
+const SymbolicAddressSignature SASigMemFillSharedM32 = {
+ SymbolicAddress::MemFillSharedM32,
+ _VOID,
+ _FailOnNegI32,
+ 5,
+ {_PTR, _I32, _I32, _I32, _PTR, _END}};
+const SymbolicAddressSignature SASigMemFillM64 = {
+ SymbolicAddress::MemFillM64,
+ _VOID,
+ _FailOnNegI32,
+ 5,
+ {_PTR, _I64, _I32, _I64, _PTR, _END}};
+const SymbolicAddressSignature SASigMemFillSharedM64 = {
+ SymbolicAddress::MemFillSharedM64,
+ _VOID,
+ _FailOnNegI32,
+ 5,
+ {_PTR, _I64, _I32, _I64, _PTR, _END}};
+const SymbolicAddressSignature SASigMemDiscardM32 = {
+ SymbolicAddress::MemDiscardM32,
+ _VOID,
+ _FailOnNegI32,
+ 4,
+ {_PTR, _I32, _I32, _PTR, _END}};
+const SymbolicAddressSignature SASigMemDiscardSharedM32 = {
+ SymbolicAddress::MemDiscardSharedM32,
+ _VOID,
+ _FailOnNegI32,
+ 4,
+ {_PTR, _I32, _I32, _PTR, _END}};
+const SymbolicAddressSignature SASigMemDiscardM64 = {
+ SymbolicAddress::MemDiscardM64,
+ _VOID,
+ _FailOnNegI32,
+ 4,
+ {_PTR, _I64, _I64, _PTR, _END}};
+const SymbolicAddressSignature SASigMemDiscardSharedM64 = {
+ SymbolicAddress::MemDiscardSharedM64,
+ _VOID,
+ _FailOnNegI32,
+ 4,
+ {_PTR, _I64, _I64, _PTR, _END}};
+const SymbolicAddressSignature SASigMemInitM32 = {
+ SymbolicAddress::MemInitM32,
+ _VOID,
+ _FailOnNegI32,
+ 5,
+ {_PTR, _I32, _I32, _I32, _I32, _END}};
+const SymbolicAddressSignature SASigMemInitM64 = {
+ SymbolicAddress::MemInitM64,
+ _VOID,
+ _FailOnNegI32,
+ 5,
+ {_PTR, _I64, _I32, _I32, _I32, _END}};
+const SymbolicAddressSignature SASigTableCopy = {
+ SymbolicAddress::TableCopy,
+ _VOID,
+ _FailOnNegI32,
+ 6,
+ {_PTR, _I32, _I32, _I32, _I32, _I32, _END}};
+const SymbolicAddressSignature SASigElemDrop = {
+ SymbolicAddress::ElemDrop, _VOID, _FailOnNegI32, 2, {_PTR, _I32, _END}};
+const SymbolicAddressSignature SASigTableFill = {
+ SymbolicAddress::TableFill,
+ _VOID,
+ _FailOnNegI32,
+ 5,
+ {_PTR, _I32, _RoN, _I32, _I32, _END}};
+const SymbolicAddressSignature SASigTableGet = {SymbolicAddress::TableGet,
+ _RoN,
+ _FailOnInvalidRef,
+ 3,
+ {_PTR, _I32, _I32, _END}};
+const SymbolicAddressSignature SASigTableGrow = {
+ SymbolicAddress::TableGrow,
+ _I32,
+ _Infallible,
+ 4,
+ {_PTR, _RoN, _I32, _I32, _END}};
+const SymbolicAddressSignature SASigTableInit = {
+ SymbolicAddress::TableInit,
+ _VOID,
+ _FailOnNegI32,
+ 6,
+ {_PTR, _I32, _I32, _I32, _I32, _I32, _END}};
+const SymbolicAddressSignature SASigTableSet = {SymbolicAddress::TableSet,
+ _VOID,
+ _FailOnNegI32,
+ 4,
+ {_PTR, _I32, _RoN, _I32, _END}};
+const SymbolicAddressSignature SASigTableSize = {
+ SymbolicAddress::TableSize, _I32, _Infallible, 2, {_PTR, _I32, _END}};
+const SymbolicAddressSignature SASigRefFunc = {
+ SymbolicAddress::RefFunc, _RoN, _FailOnInvalidRef, 2, {_PTR, _I32, _END}};
+const SymbolicAddressSignature SASigPostBarrier = {
+ SymbolicAddress::PostBarrier, _VOID, _Infallible, 2, {_PTR, _PTR, _END}};
+const SymbolicAddressSignature SASigPostBarrierPrecise = {
+ SymbolicAddress::PostBarrierPrecise,
+ _VOID,
+ _Infallible,
+ 3,
+ {_PTR, _PTR, _RoN, _END}};
+const SymbolicAddressSignature SASigPostBarrierPreciseWithOffset = {
+ SymbolicAddress::PostBarrierPreciseWithOffset,
+ _VOID,
+ _Infallible,
+ 4,
+ {_PTR, _PTR, _I32, _RoN, _END}};
+const SymbolicAddressSignature SASigExceptionNew = {
+ SymbolicAddress::ExceptionNew, _RoN, _FailOnNullPtr, 2, {_PTR, _RoN, _END}};
+const SymbolicAddressSignature SASigThrowException = {
+ SymbolicAddress::ThrowException,
+ _VOID,
+ _FailOnNegI32,
+ 2,
+ {_PTR, _RoN, _END}};
+const SymbolicAddressSignature SASigStructNew = {
+ SymbolicAddress::StructNew, _RoN, _FailOnNullPtr, 2, {_PTR, _PTR, _END}};
+const SymbolicAddressSignature SASigStructNewUninit = {
+ SymbolicAddress::StructNewUninit,
+ _RoN,
+ _FailOnNullPtr,
+ 2,
+ {_PTR, _PTR, _END}};
+const SymbolicAddressSignature SASigArrayNew = {SymbolicAddress::ArrayNew,
+ _RoN,
+ _FailOnNullPtr,
+ 3,
+ {_PTR, _I32, _PTR, _END}};
+const SymbolicAddressSignature SASigArrayNewUninit = {
+ SymbolicAddress::ArrayNewUninit,
+ _RoN,
+ _FailOnNullPtr,
+ 3,
+ {_PTR, _I32, _PTR, _END}};
+const SymbolicAddressSignature SASigArrayNewData = {
+ SymbolicAddress::ArrayNewData,
+ _RoN,
+ _FailOnNullPtr,
+ 5,
+ {_PTR, _I32, _I32, _PTR, _I32, _END}};
+const SymbolicAddressSignature SASigArrayNewElem = {
+ SymbolicAddress::ArrayNewElem,
+ _RoN,
+ _FailOnNullPtr,
+ 5,
+ {_PTR, _I32, _I32, _PTR, _I32, _END}};
+const SymbolicAddressSignature SASigArrayCopy = {
+ SymbolicAddress::ArrayCopy,
+ _VOID,
+ _FailOnNegI32,
+ 7,
+ {_PTR, _RoN, _I32, _RoN, _I32, _I32, _I32, _END}};
+
+#define DECL_SAS_FOR_INTRINSIC(op, export, sa_name, abitype, entry, idx) \
+ const SymbolicAddressSignature SASig##sa_name = { \
+ SymbolicAddress::sa_name, _VOID, _FailOnNegI32, \
+ DECLARE_INTRINSIC_PARAM_TYPES_##op};
+
+FOR_EACH_INTRINSIC(DECL_SAS_FOR_INTRINSIC)
+#undef DECL_SAS_FOR_INTRINSIC
+
+} // namespace wasm
+} // namespace js
+
+#undef _F64
+#undef _F32
+#undef _I32
+#undef _I64
+#undef _PTR
+#undef _RoN
+#undef _VOID
+#undef _END
+#undef _Infallible
+#undef _FailOnNegI32
+#undef _FailOnNullPtr
+
+#ifdef DEBUG
+ABIArgType ToABIType(FailureMode mode) {
+ switch (mode) {
+ case FailureMode::FailOnNegI32:
+ return ArgType_Int32;
+ case FailureMode::FailOnNullPtr:
+ case FailureMode::FailOnInvalidRef:
+ return ArgType_General;
+ default:
+ MOZ_CRASH("unexpected failure mode");
+ }
+}
+
+ABIArgType ToABIType(MIRType type) {
+ switch (type) {
+ case MIRType::None:
+ case MIRType::Int32:
+ return ArgType_Int32;
+ case MIRType::Int64:
+ return ArgType_Int64;
+ case MIRType::Pointer:
+ case MIRType::RefOrNull:
+ return ArgType_General;
+ case MIRType::Float32:
+ return ArgType_Float32;
+ case MIRType::Double:
+ return ArgType_Float64;
+ default:
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+ABIFunctionType ToABIType(const SymbolicAddressSignature& sig) {
+ MOZ_ASSERT_IF(sig.failureMode != FailureMode::Infallible,
+ ToABIType(sig.failureMode) == ToABIType(sig.retType));
+ int abiType = ToABIType(sig.retType) << RetType_Shift;
+ for (int i = 0; i < sig.numArgs; i++) {
+ abiType |= (ToABIType(sig.argTypes[i]) << (ArgType_Shift * (i + 1)));
+ }
+ return ABIFunctionType(abiType);
+}
+#endif
+
+// ============================================================================
+// WebAssembly builtin C++ functions called from wasm code to implement internal
+// wasm operations: implementations.
+
+#if defined(JS_CODEGEN_ARM)
+extern "C" {
+
+extern MOZ_EXPORT int64_t __aeabi_idivmod(int, int);
+
+extern MOZ_EXPORT int64_t __aeabi_uidivmod(int, int);
+}
+#endif
+
+// This utility function can only be called for builtins that are called
+// directly from wasm code.
+static JitActivation* CallingActivation(JSContext* cx) {
+ Activation* act = cx->activation();
+ MOZ_ASSERT(act->asJit()->hasWasmExitFP());
+ return act->asJit();
+}
+
+static bool WasmHandleDebugTrap() {
+ JSContext* cx = TlsContext.get(); // Cold code
+ JitActivation* activation = CallingActivation(cx);
+ Frame* fp = activation->wasmExitFP();
+ Instance* instance = GetNearestEffectiveInstance(fp);
+ const Code& code = instance->code();
+ MOZ_ASSERT(code.metadata().debugEnabled);
+
+ // The debug trap stub is the innermost frame. It's return address is the
+ // actual trap site.
+ const CallSite* site = code.lookupCallSite(fp->returnAddress());
+ MOZ_ASSERT(site);
+
+ // Advance to the actual trapping frame.
+ fp = fp->wasmCaller();
+ DebugFrame* debugFrame = DebugFrame::from(fp);
+
+ if (site->kind() == CallSite::EnterFrame) {
+ if (!instance->debug().enterFrameTrapsEnabled()) {
+ return true;
+ }
+ debugFrame->setIsDebuggee();
+ debugFrame->observe(cx);
+ if (!DebugAPI::onEnterFrame(cx, debugFrame)) {
+ if (cx->isPropagatingForcedReturn()) {
+ cx->clearPropagatingForcedReturn();
+ // Ignoring forced return because changing code execution order is
+ // not yet implemented in the wasm baseline.
+ // TODO properly handle forced return and resume wasm execution.
+ JS_ReportErrorASCII(cx,
+ "Unexpected resumption value from onEnterFrame");
+ }
+ return false;
+ }
+ return true;
+ }
+ if (site->kind() == CallSite::LeaveFrame) {
+ if (!debugFrame->updateReturnJSValue(cx)) {
+ return false;
+ }
+ bool ok = DebugAPI::onLeaveFrame(cx, debugFrame, nullptr, true);
+ debugFrame->leave(cx);
+ return ok;
+ }
+
+ DebugState& debug = instance->debug();
+ MOZ_ASSERT(debug.hasBreakpointTrapAtOffset(site->lineOrBytecode()));
+ if (debug.stepModeEnabled(debugFrame->funcIndex())) {
+ if (!DebugAPI::onSingleStep(cx)) {
+ if (cx->isPropagatingForcedReturn()) {
+ cx->clearPropagatingForcedReturn();
+ // TODO properly handle forced return.
+ JS_ReportErrorASCII(cx,
+ "Unexpected resumption value from onSingleStep");
+ }
+ return false;
+ }
+ }
+ if (debug.hasBreakpointSite(site->lineOrBytecode())) {
+ if (!DebugAPI::onTrap(cx)) {
+ if (cx->isPropagatingForcedReturn()) {
+ cx->clearPropagatingForcedReturn();
+ // TODO properly handle forced return.
+ JS_ReportErrorASCII(
+ cx, "Unexpected resumption value from breakpoint handler");
+ }
+ return false;
+ }
+ }
+ return true;
+}
+
+// Check if the pending exception, if any, is catchable by wasm.
+static bool HasCatchableException(JitActivation* activation, JSContext* cx,
+ MutableHandleValue exn) {
+ if (!cx->isExceptionPending()) {
+ return false;
+ }
+
+ // Traps are generally not catchable as wasm exceptions. The only case in
+ // which they are catchable is for Trap::ThrowReported, which the wasm
+ // compiler uses to throw exceptions and is the source of exceptions from C++.
+ if (activation->isWasmTrapping() &&
+ activation->wasmTrapData().trap != Trap::ThrowReported) {
+ return false;
+ }
+
+ if (cx->isThrowingOverRecursed() || cx->isThrowingOutOfMemory()) {
+ return false;
+ }
+
+ // Write the exception out here to exn to avoid having to get the pending
+ // exception and checking for OOM multiple times.
+ if (cx->getPendingException(exn)) {
+ // Check if a JS exception originated from a wasm trap.
+ if (exn.isObject() && exn.toObject().is<ErrorObject>()) {
+ ErrorObject& err = exn.toObject().as<ErrorObject>();
+ if (err.fromWasmTrap()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ MOZ_ASSERT(cx->isThrowingOutOfMemory());
+ return false;
+}
+
+// Unwind the entire activation in response to a thrown exception. This function
+// is responsible for notifying the debugger of each unwound frame. The return
+// value is the new stack address which the calling stub will set to the sp
+// register before executing a return instruction.
+//
+// This function will also look for try-catch handlers and, if not trapping or
+// throwing an uncatchable exception, will write the handler info in the return
+// argument and return true.
+//
+// Returns false if a handler isn't found or shouldn't be used (e.g., traps).
+
+bool wasm::HandleThrow(JSContext* cx, WasmFrameIter& iter,
+ jit::ResumeFromException* rfe) {
+ // WasmFrameIter iterates down wasm frames in the activation starting at
+ // JitActivation::wasmExitFP(). Calling WasmFrameIter::startUnwinding pops
+ // JitActivation::wasmExitFP() once each time WasmFrameIter is incremented,
+ // ultimately leaving exit FP null when the WasmFrameIter is done(). This
+ // is necessary to prevent a DebugFrame from being observed again after we
+ // just called onLeaveFrame (which would lead to the frame being re-added
+ // to the map of live frames, right as it becomes trash).
+
+ MOZ_ASSERT(CallingActivation(cx) == iter.activation());
+ MOZ_ASSERT(!iter.done());
+ iter.setUnwind(WasmFrameIter::Unwind::True);
+
+ // Live wasm code on the stack is kept alive (in TraceJitActivation) by
+ // marking the instance of every wasm::Frame found by WasmFrameIter.
+ // However, as explained above, we're popping frames while iterating which
+ // means that a GC during this loop could collect the code of frames whose
+ // code is still on the stack. This is actually mostly fine: as soon as we
+ // return to the throw stub, the entire stack will be popped as a whole,
+ // returning to the C++ caller. However, we must keep the throw stub alive
+ // itself which is owned by the innermost instance.
+ Rooted<WasmInstanceObject*> keepAlive(cx, iter.instance()->object());
+
+ JitActivation* activation = CallingActivation(cx);
+ RootedValue exn(cx);
+ bool hasCatchableException = HasCatchableException(activation, cx, &exn);
+
+ for (; !iter.done(); ++iter) {
+ // Wasm code can enter same-compartment realms, so reset cx->realm to
+ // this frame's realm.
+ cx->setRealmForJitExceptionHandler(iter.instance()->realm());
+
+ // Only look for an exception handler if there's a catchable exception.
+ if (hasCatchableException) {
+ const wasm::Code& code = iter.instance()->code();
+ const uint8_t* pc = iter.resumePCinCurrentFrame();
+ Tier tier;
+ const wasm::TryNote* tryNote = code.lookupTryNote((void*)pc, &tier);
+
+ if (tryNote) {
+ cx->clearPendingException();
+ RootedAnyRef ref(cx, AnyRef::null());
+ if (!BoxAnyRef(cx, exn, &ref)) {
+ MOZ_ASSERT(cx->isThrowingOutOfMemory());
+ hasCatchableException = false;
+ continue;
+ }
+
+ MOZ_ASSERT(iter.instance() == iter.instance());
+ iter.instance()->setPendingException(ref);
+
+ rfe->kind = ExceptionResumeKind::WasmCatch;
+ rfe->framePointer = (uint8_t*)iter.frame();
+ rfe->instance = iter.instance();
+
+ rfe->stackPointer =
+ (uint8_t*)(rfe->framePointer - tryNote->landingPadFramePushed());
+ rfe->target =
+ iter.instance()->codeBase(tier) + tryNote->landingPadEntryPoint();
+
+ // Make sure to clear trapping state if we got here due to a trap.
+ if (activation->isWasmTrapping()) {
+ activation->finishWasmTrap();
+ }
+
+ return true;
+ }
+ }
+
+ if (!iter.debugEnabled()) {
+ continue;
+ }
+
+ DebugFrame* frame = iter.debugFrame();
+ frame->clearReturnJSValue();
+
+ // Assume ResumeMode::Terminate if no exception is pending --
+ // no onExceptionUnwind handlers must be fired.
+ if (cx->isExceptionPending()) {
+ if (!DebugAPI::onExceptionUnwind(cx, frame)) {
+ if (cx->isPropagatingForcedReturn()) {
+ cx->clearPropagatingForcedReturn();
+ // Unexpected trap return -- raising error since throw recovery
+ // is not yet implemented in the wasm baseline.
+ // TODO properly handle forced return and resume wasm execution.
+ JS_ReportErrorASCII(
+ cx, "Unexpected resumption value from onExceptionUnwind");
+ }
+ }
+ }
+
+ bool ok = DebugAPI::onLeaveFrame(cx, frame, nullptr, false);
+ if (ok) {
+ // Unexpected success from the handler onLeaveFrame -- raising error
+ // since throw recovery is not yet implemented in the wasm baseline.
+ // TODO properly handle success and resume wasm execution.
+ JS_ReportErrorASCII(cx, "Unexpected success from onLeaveFrame");
+ }
+ frame->leave(cx);
+ }
+
+ MOZ_ASSERT(!cx->activation()->asJit()->isWasmTrapping(),
+ "unwinding clears the trapping state");
+
+ // In case of no handler, exit wasm via ret().
+ // FailInstanceReg signals to wasm stub to do a failure return.
+ rfe->kind = ExceptionResumeKind::Wasm;
+ rfe->framePointer = (uint8_t*)iter.unwoundCallerFP();
+ rfe->stackPointer = (uint8_t*)iter.unwoundAddressOfReturnAddress();
+ rfe->instance = (Instance*)FailInstanceReg;
+ rfe->target = nullptr;
+ return false;
+}
+
+static void* WasmHandleThrow(jit::ResumeFromException* rfe) {
+ JSContext* cx = TlsContext.get(); // Cold code
+ JitActivation* activation = CallingActivation(cx);
+ WasmFrameIter iter(activation);
+ // We can ignore the return result here because the throw stub code
+ // can just check the resume kind to see if a handler was found or not.
+ HandleThrow(cx, iter, rfe);
+ return rfe;
+}
+
+// Has the same return-value convention as HandleTrap().
+static void* CheckInterrupt(JSContext* cx, JitActivation* activation) {
+ ResetInterruptState(cx);
+
+ if (!CheckForInterrupt(cx)) {
+ return nullptr;
+ }
+
+ void* resumePC = activation->wasmTrapData().resumePC;
+ activation->finishWasmTrap();
+ return resumePC;
+}
+
+// The calling convention between this function and its caller in the stub
+// generated by GenerateTrapExit() is:
+// - return nullptr if the stub should jump to the throw stub to unwind
+// the activation;
+// - return the (non-null) resumePC that should be jumped if execution should
+// resume after the trap.
+static void* WasmHandleTrap() {
+ JSContext* cx = TlsContext.get(); // Cold code
+ JitActivation* activation = CallingActivation(cx);
+
+ switch (activation->wasmTrapData().trap) {
+ case Trap::Unreachable: {
+ ReportTrapError(cx, JSMSG_WASM_UNREACHABLE);
+ return nullptr;
+ }
+ case Trap::IntegerOverflow: {
+ ReportTrapError(cx, JSMSG_WASM_INTEGER_OVERFLOW);
+ return nullptr;
+ }
+ case Trap::InvalidConversionToInteger: {
+ ReportTrapError(cx, JSMSG_WASM_INVALID_CONVERSION);
+ return nullptr;
+ }
+ case Trap::IntegerDivideByZero: {
+ ReportTrapError(cx, JSMSG_WASM_INT_DIVIDE_BY_ZERO);
+ return nullptr;
+ }
+ case Trap::IndirectCallToNull: {
+ ReportTrapError(cx, JSMSG_WASM_IND_CALL_TO_NULL);
+ return nullptr;
+ }
+ case Trap::IndirectCallBadSig: {
+ ReportTrapError(cx, JSMSG_WASM_IND_CALL_BAD_SIG);
+ return nullptr;
+ }
+ case Trap::NullPointerDereference: {
+ ReportTrapError(cx, JSMSG_WASM_DEREF_NULL);
+ return nullptr;
+ }
+ case Trap::BadCast: {
+ ReportTrapError(cx, JSMSG_WASM_BAD_CAST);
+ return nullptr;
+ }
+ case Trap::OutOfBounds: {
+ ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ return nullptr;
+ }
+ case Trap::UnalignedAccess: {
+ ReportTrapError(cx, JSMSG_WASM_UNALIGNED_ACCESS);
+ return nullptr;
+ }
+ case Trap::CheckInterrupt:
+ return CheckInterrupt(cx, activation);
+ case Trap::StackOverflow: {
+ // Instance::setInterrupt() causes a fake stack overflow. Since
+ // Instance::setInterrupt() is called racily, it's possible for a real
+ // stack overflow to trap, followed by a racy call to setInterrupt().
+ // Thus, we must check for a real stack overflow first before we
+ // CheckInterrupt() and possibly resume execution.
+ AutoCheckRecursionLimit recursion(cx);
+ if (!recursion.check(cx)) {
+ return nullptr;
+ }
+ if (activation->wasmExitInstance()->isInterrupted()) {
+ return CheckInterrupt(cx, activation);
+ }
+ ReportTrapError(cx, JSMSG_OVER_RECURSED);
+ return nullptr;
+ }
+ case Trap::ThrowReported:
+ // Error was already reported under another name.
+ return nullptr;
+ case Trap::Limit:
+ break;
+ }
+
+ MOZ_CRASH("unexpected trap");
+}
+
+static void WasmReportV128JSCall() {
+ JSContext* cx = TlsContext.get(); // Cold code
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_VAL_TYPE);
+}
+
+static int32_t CoerceInPlace_ToInt32(Value* rawVal) {
+ JSContext* cx = TlsContext.get(); // Cold code
+
+ int32_t i32;
+ RootedValue val(cx, *rawVal);
+ if (!ToInt32(cx, val, &i32)) {
+ *rawVal = PoisonedObjectValue(0x42);
+ return false;
+ }
+
+ *rawVal = Int32Value(i32);
+ return true;
+}
+
+static int32_t CoerceInPlace_ToBigInt(Value* rawVal) {
+ JSContext* cx = TlsContext.get(); // Cold code
+
+ RootedValue val(cx, *rawVal);
+ BigInt* bi = ToBigInt(cx, val);
+ if (!bi) {
+ *rawVal = PoisonedObjectValue(0x43);
+ return false;
+ }
+
+ *rawVal = BigIntValue(bi);
+ return true;
+}
+
+static int32_t CoerceInPlace_ToNumber(Value* rawVal) {
+ JSContext* cx = TlsContext.get(); // Cold code
+
+ double dbl;
+ RootedValue val(cx, *rawVal);
+ if (!ToNumber(cx, val, &dbl)) {
+ *rawVal = PoisonedObjectValue(0x42);
+ return false;
+ }
+
+ *rawVal = DoubleValue(dbl);
+ return true;
+}
+
+static void* BoxValue_Anyref(Value* rawVal) {
+ JSContext* cx = TlsContext.get(); // Cold code
+ RootedValue val(cx, *rawVal);
+ RootedAnyRef result(cx, AnyRef::null());
+ if (!BoxAnyRef(cx, val, &result)) {
+ return nullptr;
+ }
+ return result.get().forCompiledCode();
+}
+
+static int32_t CoerceInPlace_JitEntry(int funcExportIndex, Instance* instance,
+ Value* argv) {
+ JSContext* cx = TlsContext.get(); // Cold code
+
+ const Code& code = instance->code();
+ const FuncExport& fe =
+ code.metadata(code.stableTier()).funcExports[funcExportIndex];
+ const FuncType& funcType = code.metadata().getFuncExportType(fe);
+
+ for (size_t i = 0; i < funcType.args().length(); i++) {
+ HandleValue arg = HandleValue::fromMarkedLocation(&argv[i]);
+ switch (funcType.args()[i].kind()) {
+ case ValType::I32: {
+ int32_t i32;
+ if (!ToInt32(cx, arg, &i32)) {
+ return false;
+ }
+ argv[i] = Int32Value(i32);
+ break;
+ }
+ case ValType::I64: {
+ // In this case we store a BigInt value as there is no value type
+ // corresponding directly to an I64. The conversion to I64 happens
+ // in the JIT entry stub.
+ BigInt* bigint = ToBigInt(cx, arg);
+ if (!bigint) {
+ return false;
+ }
+ argv[i] = BigIntValue(bigint);
+ break;
+ }
+ case ValType::F32:
+ case ValType::F64: {
+ double dbl;
+ if (!ToNumber(cx, arg, &dbl)) {
+ return false;
+ }
+ // No need to convert double-to-float for f32, it's done inline
+ // in the wasm stub later.
+ argv[i] = DoubleValue(dbl);
+ break;
+ }
+ case ValType::Ref: {
+ // Guarded against by temporarilyUnsupportedReftypeForEntry()
+ MOZ_RELEASE_ASSERT(funcType.args()[i].refType().isExtern());
+ // Leave Object and Null alone, we will unbox inline. All we need
+ // to do is convert other values to an Object representation.
+ if (!arg.isObjectOrNull()) {
+ RootedAnyRef result(cx, AnyRef::null());
+ if (!BoxAnyRef(cx, arg, &result)) {
+ return false;
+ }
+ argv[i].setObject(*result.get().asJSObject());
+ }
+ break;
+ }
+ case ValType::V128: {
+ // Guarded against by hasV128ArgOrRet()
+ MOZ_CRASH("unexpected input argument in CoerceInPlace_JitEntry");
+ }
+ default: {
+ MOZ_CRASH("unexpected input argument in CoerceInPlace_JitEntry");
+ }
+ }
+ }
+
+ return true;
+}
+
+// Allocate a BigInt without GC, corresponds to the similar VMFunction.
+static BigInt* AllocateBigIntTenuredNoGC() {
+ JSContext* cx = TlsContext.get(); // Cold code (the caller is elaborate)
+
+ return cx->newCell<BigInt, NoGC>(gc::Heap::Tenured);
+}
+
+static int64_t DivI64(uint32_t x_hi, uint32_t x_lo, uint32_t y_hi,
+ uint32_t y_lo) {
+ int64_t x = ((uint64_t)x_hi << 32) + x_lo;
+ int64_t y = ((uint64_t)y_hi << 32) + y_lo;
+ MOZ_ASSERT(x != INT64_MIN || y != -1);
+ MOZ_ASSERT(y != 0);
+ return x / y;
+}
+
+static int64_t UDivI64(uint32_t x_hi, uint32_t x_lo, uint32_t y_hi,
+ uint32_t y_lo) {
+ uint64_t x = ((uint64_t)x_hi << 32) + x_lo;
+ uint64_t y = ((uint64_t)y_hi << 32) + y_lo;
+ MOZ_ASSERT(y != 0);
+ return int64_t(x / y);
+}
+
+static int64_t ModI64(uint32_t x_hi, uint32_t x_lo, uint32_t y_hi,
+ uint32_t y_lo) {
+ int64_t x = ((uint64_t)x_hi << 32) + x_lo;
+ int64_t y = ((uint64_t)y_hi << 32) + y_lo;
+ MOZ_ASSERT(x != INT64_MIN || y != -1);
+ MOZ_ASSERT(y != 0);
+ return x % y;
+}
+
+static int64_t UModI64(uint32_t x_hi, uint32_t x_lo, uint32_t y_hi,
+ uint32_t y_lo) {
+ uint64_t x = ((uint64_t)x_hi << 32) + x_lo;
+ uint64_t y = ((uint64_t)y_hi << 32) + y_lo;
+ MOZ_ASSERT(y != 0);
+ return int64_t(x % y);
+}
+
+static int64_t TruncateDoubleToInt64(double input) {
+ // Note: INT64_MAX is not representable in double. It is actually
+ // INT64_MAX + 1. Therefore also sending the failure value.
+ if (input >= double(INT64_MAX) || input < double(INT64_MIN) ||
+ std::isnan(input)) {
+ return int64_t(0x8000000000000000);
+ }
+ return int64_t(input);
+}
+
+static uint64_t TruncateDoubleToUint64(double input) {
+ // Note: UINT64_MAX is not representable in double. It is actually
+ // UINT64_MAX + 1. Therefore also sending the failure value.
+ if (input >= double(UINT64_MAX) || input <= -1.0 || std::isnan(input)) {
+ return int64_t(0x8000000000000000);
+ }
+ return uint64_t(input);
+}
+
+static int64_t SaturatingTruncateDoubleToInt64(double input) {
+ // Handle in-range values (except INT64_MIN).
+ if (fabs(input) < -double(INT64_MIN)) {
+ return int64_t(input);
+ }
+ // Handle NaN.
+ if (std::isnan(input)) {
+ return 0;
+ }
+ // Handle positive overflow.
+ if (input > 0) {
+ return INT64_MAX;
+ }
+ // Handle negative overflow.
+ return INT64_MIN;
+}
+
+static uint64_t SaturatingTruncateDoubleToUint64(double input) {
+ // Handle positive overflow.
+ if (input >= -double(INT64_MIN) * 2.0) {
+ return UINT64_MAX;
+ }
+ // Handle in-range values.
+ if (input > -1.0) {
+ return uint64_t(input);
+ }
+ // Handle NaN and negative overflow.
+ return 0;
+}
+
+static double Int64ToDouble(int32_t x_hi, uint32_t x_lo) {
+ int64_t x = int64_t((uint64_t(x_hi) << 32)) + int64_t(x_lo);
+ return double(x);
+}
+
+static float Int64ToFloat32(int32_t x_hi, uint32_t x_lo) {
+ int64_t x = int64_t((uint64_t(x_hi) << 32)) + int64_t(x_lo);
+ return float(x);
+}
+
+static double Uint64ToDouble(int32_t x_hi, uint32_t x_lo) {
+ uint64_t x = (uint64_t(x_hi) << 32) + uint64_t(x_lo);
+ return double(x);
+}
+
+static float Uint64ToFloat32(int32_t x_hi, uint32_t x_lo) {
+ uint64_t x = (uint64_t(x_hi) << 32) + uint64_t(x_lo);
+ return float(x);
+}
+
+template <class F>
+static inline void* FuncCast(F* funcPtr, ABIFunctionType abiType) {
+ void* pf = JS_FUNC_TO_DATA_PTR(void*, funcPtr);
+#ifdef JS_SIMULATOR
+ pf = Simulator::RedirectNativeFunction(pf, abiType);
+#endif
+ return pf;
+}
+
+#ifdef WASM_CODEGEN_DEBUG
+void wasm::PrintI32(int32_t val) { fprintf(stderr, "i32(%d) ", val); }
+
+void wasm::PrintPtr(uint8_t* val) { fprintf(stderr, "ptr(%p) ", val); }
+
+void wasm::PrintF32(float val) { fprintf(stderr, "f32(%f) ", val); }
+
+void wasm::PrintF64(double val) { fprintf(stderr, "f64(%lf) ", val); }
+
+void wasm::PrintText(const char* out) { fprintf(stderr, "%s", out); }
+#endif
+
+void* wasm::AddressOf(SymbolicAddress imm, ABIFunctionType* abiType) {
+ // See NeedsBuiltinThunk for a classification of the different names here.
+ switch (imm) {
+ case SymbolicAddress::HandleDebugTrap:
+ *abiType = Args_General0;
+ return FuncCast(WasmHandleDebugTrap, *abiType);
+ case SymbolicAddress::HandleThrow:
+ *abiType = Args_General1;
+ return FuncCast(WasmHandleThrow, *abiType);
+ case SymbolicAddress::HandleTrap:
+ *abiType = Args_General0;
+ return FuncCast(WasmHandleTrap, *abiType);
+ case SymbolicAddress::ReportV128JSCall:
+ *abiType = Args_General0;
+ return FuncCast(WasmReportV128JSCall, *abiType);
+ case SymbolicAddress::CallImport_General:
+ *abiType = Args_Int32_GeneralInt32Int32General;
+ return FuncCast(Instance::callImport_general, *abiType);
+ case SymbolicAddress::CoerceInPlace_ToInt32:
+ *abiType = Args_General1;
+ return FuncCast(CoerceInPlace_ToInt32, *abiType);
+ case SymbolicAddress::CoerceInPlace_ToBigInt:
+ *abiType = Args_General1;
+ return FuncCast(CoerceInPlace_ToBigInt, *abiType);
+ case SymbolicAddress::CoerceInPlace_ToNumber:
+ *abiType = Args_General1;
+ return FuncCast(CoerceInPlace_ToNumber, *abiType);
+ case SymbolicAddress::CoerceInPlace_JitEntry:
+ *abiType = Args_General3;
+ return FuncCast(CoerceInPlace_JitEntry, *abiType);
+ case SymbolicAddress::ToInt32:
+ *abiType = Args_Int_Double;
+ return FuncCast<int32_t(double)>(JS::ToInt32, *abiType);
+ case SymbolicAddress::BoxValue_Anyref:
+ *abiType = Args_General1;
+ return FuncCast(BoxValue_Anyref, *abiType);
+ case SymbolicAddress::AllocateBigInt:
+ *abiType = Args_General0;
+ return FuncCast(AllocateBigIntTenuredNoGC, *abiType);
+ case SymbolicAddress::DivI64:
+ *abiType = Args_General4;
+ return FuncCast(DivI64, *abiType);
+ case SymbolicAddress::UDivI64:
+ *abiType = Args_General4;
+ return FuncCast(UDivI64, *abiType);
+ case SymbolicAddress::ModI64:
+ *abiType = Args_General4;
+ return FuncCast(ModI64, *abiType);
+ case SymbolicAddress::UModI64:
+ *abiType = Args_General4;
+ return FuncCast(UModI64, *abiType);
+ case SymbolicAddress::TruncateDoubleToUint64:
+ *abiType = Args_Int64_Double;
+ return FuncCast(TruncateDoubleToUint64, *abiType);
+ case SymbolicAddress::TruncateDoubleToInt64:
+ *abiType = Args_Int64_Double;
+ return FuncCast(TruncateDoubleToInt64, *abiType);
+ case SymbolicAddress::SaturatingTruncateDoubleToUint64:
+ *abiType = Args_Int64_Double;
+ return FuncCast(SaturatingTruncateDoubleToUint64, *abiType);
+ case SymbolicAddress::SaturatingTruncateDoubleToInt64:
+ *abiType = Args_Int64_Double;
+ return FuncCast(SaturatingTruncateDoubleToInt64, *abiType);
+ case SymbolicAddress::Uint64ToDouble:
+ *abiType = Args_Double_IntInt;
+ return FuncCast(Uint64ToDouble, *abiType);
+ case SymbolicAddress::Uint64ToFloat32:
+ *abiType = Args_Float32_IntInt;
+ return FuncCast(Uint64ToFloat32, *abiType);
+ case SymbolicAddress::Int64ToDouble:
+ *abiType = Args_Double_IntInt;
+ return FuncCast(Int64ToDouble, *abiType);
+ case SymbolicAddress::Int64ToFloat32:
+ *abiType = Args_Float32_IntInt;
+ return FuncCast(Int64ToFloat32, *abiType);
+#if defined(JS_CODEGEN_ARM)
+ case SymbolicAddress::aeabi_idivmod:
+ *abiType = Args_General2;
+ return FuncCast(__aeabi_idivmod, *abiType);
+ case SymbolicAddress::aeabi_uidivmod:
+ *abiType = Args_General2;
+ return FuncCast(__aeabi_uidivmod, *abiType);
+#endif
+ case SymbolicAddress::ModD:
+ *abiType = Args_Double_DoubleDouble;
+ return FuncCast(NumberMod, *abiType);
+ case SymbolicAddress::SinNativeD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(sin, *abiType);
+ case SymbolicAddress::SinFdlibmD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(fdlibm::sin, *abiType);
+ case SymbolicAddress::CosNativeD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(cos, *abiType);
+ case SymbolicAddress::CosFdlibmD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(fdlibm::cos, *abiType);
+ case SymbolicAddress::TanNativeD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(tan, *abiType);
+ case SymbolicAddress::TanFdlibmD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(fdlibm::tan, *abiType);
+ case SymbolicAddress::ASinD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(fdlibm::asin, *abiType);
+ case SymbolicAddress::ACosD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(fdlibm::acos, *abiType);
+ case SymbolicAddress::ATanD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(fdlibm::atan, *abiType);
+ case SymbolicAddress::CeilD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(fdlibm::ceil, *abiType);
+ case SymbolicAddress::CeilF:
+ *abiType = Args_Float32_Float32;
+ return FuncCast<float(float)>(fdlibm::ceilf, *abiType);
+ case SymbolicAddress::FloorD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(fdlibm::floor, *abiType);
+ case SymbolicAddress::FloorF:
+ *abiType = Args_Float32_Float32;
+ return FuncCast<float(float)>(fdlibm::floorf, *abiType);
+ case SymbolicAddress::TruncD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(fdlibm::trunc, *abiType);
+ case SymbolicAddress::TruncF:
+ *abiType = Args_Float32_Float32;
+ return FuncCast<float(float)>(fdlibm::truncf, *abiType);
+ case SymbolicAddress::NearbyIntD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(fdlibm::nearbyint, *abiType);
+ case SymbolicAddress::NearbyIntF:
+ *abiType = Args_Float32_Float32;
+ return FuncCast<float(float)>(fdlibm::nearbyintf, *abiType);
+ case SymbolicAddress::ExpD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(fdlibm::exp, *abiType);
+ case SymbolicAddress::LogD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(fdlibm::log, *abiType);
+ case SymbolicAddress::PowD:
+ *abiType = Args_Double_DoubleDouble;
+ return FuncCast(ecmaPow, *abiType);
+ case SymbolicAddress::ATan2D:
+ *abiType = Args_Double_DoubleDouble;
+ return FuncCast(ecmaAtan2, *abiType);
+
+ case SymbolicAddress::MemoryGrowM32:
+ *abiType = Args_Int32_GeneralInt32;
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemoryGrowM32));
+ return FuncCast(Instance::memoryGrow_m32, *abiType);
+ case SymbolicAddress::MemoryGrowM64:
+ *abiType = Args_Int64_GeneralInt64;
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemoryGrowM64));
+ return FuncCast(Instance::memoryGrow_m64, *abiType);
+ case SymbolicAddress::MemorySizeM32:
+ *abiType = Args_Int32_General;
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemorySizeM32));
+ return FuncCast(Instance::memorySize_m32, *abiType);
+ case SymbolicAddress::MemorySizeM64:
+ *abiType = Args_Int64_General;
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemorySizeM64));
+ return FuncCast(Instance::memorySize_m64, *abiType);
+ case SymbolicAddress::WaitI32M32:
+ *abiType = Args_Int32_GeneralInt32Int32Int64;
+ MOZ_ASSERT(*abiType == ToABIType(SASigWaitI32M32));
+ return FuncCast(Instance::wait_i32_m32, *abiType);
+ case SymbolicAddress::WaitI32M64:
+ *abiType = Args_Int32_GeneralInt64Int32Int64;
+ MOZ_ASSERT(*abiType == ToABIType(SASigWaitI32M64));
+ return FuncCast(Instance::wait_i32_m64, *abiType);
+ case SymbolicAddress::WaitI64M32:
+ *abiType = Args_Int32_GeneralInt32Int64Int64;
+ MOZ_ASSERT(*abiType == ToABIType(SASigWaitI64M32));
+ return FuncCast(Instance::wait_i64_m32, *abiType);
+ case SymbolicAddress::WaitI64M64:
+ *abiType = Args_Int32_GeneralInt64Int64Int64;
+ MOZ_ASSERT(*abiType == ToABIType(SASigWaitI64M64));
+ return FuncCast(Instance::wait_i64_m64, *abiType);
+ case SymbolicAddress::WakeM32:
+ *abiType = Args_Int32_GeneralInt32Int32;
+ MOZ_ASSERT(*abiType == ToABIType(SASigWakeM32));
+ return FuncCast(Instance::wake_m32, *abiType);
+ case SymbolicAddress::WakeM64:
+ *abiType = Args_Int32_GeneralInt64Int32;
+ MOZ_ASSERT(*abiType == ToABIType(SASigWakeM64));
+ return FuncCast(Instance::wake_m64, *abiType);
+ case SymbolicAddress::MemCopyM32:
+ *abiType = Args_Int32_GeneralInt32Int32Int32General;
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemCopyM32));
+ return FuncCast(Instance::memCopy_m32, *abiType);
+ case SymbolicAddress::MemCopySharedM32:
+ *abiType = Args_Int32_GeneralInt32Int32Int32General;
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemCopySharedM32));
+ return FuncCast(Instance::memCopyShared_m32, *abiType);
+ case SymbolicAddress::MemCopyM64:
+ *abiType = Args_Int32_GeneralInt64Int64Int64General;
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemCopyM64));
+ return FuncCast(Instance::memCopy_m64, *abiType);
+ case SymbolicAddress::MemCopySharedM64:
+ *abiType = Args_Int32_GeneralInt64Int64Int64General;
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemCopySharedM64));
+ return FuncCast(Instance::memCopyShared_m64, *abiType);
+ case SymbolicAddress::DataDrop:
+ *abiType = Args_Int32_GeneralInt32;
+ MOZ_ASSERT(*abiType == ToABIType(SASigDataDrop));
+ return FuncCast(Instance::dataDrop, *abiType);
+ case SymbolicAddress::MemFillM32:
+ *abiType = Args_Int32_GeneralInt32Int32Int32General;
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemFillM32));
+ return FuncCast(Instance::memFill_m32, *abiType);
+ case SymbolicAddress::MemFillSharedM32:
+ *abiType = Args_Int32_GeneralInt32Int32Int32General;
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemFillSharedM32));
+ return FuncCast(Instance::memFillShared_m32, *abiType);
+ case SymbolicAddress::MemFillM64:
+ *abiType = Args_Int32_GeneralInt64Int32Int64General;
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemFillM64));
+ return FuncCast(Instance::memFill_m64, *abiType);
+ case SymbolicAddress::MemFillSharedM64:
+ *abiType = Args_Int32_GeneralInt64Int32Int64General;
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemFillSharedM64));
+ return FuncCast(Instance::memFillShared_m64, *abiType);
+ case SymbolicAddress::MemDiscardM32:
+ *abiType = Args_Int32_GeneralInt32Int32General;
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemDiscardM32));
+ return FuncCast(Instance::memDiscard_m32, *abiType);
+ case SymbolicAddress::MemDiscardSharedM32:
+ *abiType = Args_Int32_GeneralInt32Int32General;
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemDiscardSharedM32));
+ return FuncCast(Instance::memDiscardShared_m32, *abiType);
+ case SymbolicAddress::MemDiscardM64:
+ *abiType = Args_Int32_GeneralInt64Int64General;
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemDiscardM64));
+ return FuncCast(Instance::memDiscard_m64, *abiType);
+ case SymbolicAddress::MemDiscardSharedM64:
+ *abiType = Args_Int32_GeneralInt64Int64General;
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemDiscardSharedM64));
+ return FuncCast(Instance::memDiscardShared_m64, *abiType);
+ case SymbolicAddress::MemInitM32:
+ *abiType = Args_Int32_GeneralInt32Int32Int32Int32;
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemInitM32));
+ return FuncCast(Instance::memInit_m32, *abiType);
+ case SymbolicAddress::MemInitM64:
+ *abiType = Args_Int32_GeneralInt64Int32Int32Int32;
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemInitM64));
+ return FuncCast(Instance::memInit_m64, *abiType);
+ case SymbolicAddress::TableCopy:
+ *abiType = Args_Int32_GeneralInt32Int32Int32Int32Int32;
+ MOZ_ASSERT(*abiType == ToABIType(SASigTableCopy));
+ return FuncCast(Instance::tableCopy, *abiType);
+ case SymbolicAddress::ElemDrop:
+ *abiType = Args_Int32_GeneralInt32;
+ MOZ_ASSERT(*abiType == ToABIType(SASigElemDrop));
+ return FuncCast(Instance::elemDrop, *abiType);
+ case SymbolicAddress::TableFill:
+ *abiType = Args_Int32_GeneralInt32GeneralInt32Int32;
+ MOZ_ASSERT(*abiType == ToABIType(SASigTableFill));
+ return FuncCast(Instance::tableFill, *abiType);
+ case SymbolicAddress::TableInit:
+ *abiType = Args_Int32_GeneralInt32Int32Int32Int32Int32;
+ MOZ_ASSERT(*abiType == ToABIType(SASigTableInit));
+ return FuncCast(Instance::tableInit, *abiType);
+ case SymbolicAddress::TableGet:
+ *abiType = Args_General_GeneralInt32Int32;
+ MOZ_ASSERT(*abiType == ToABIType(SASigTableGet));
+ return FuncCast(Instance::tableGet, *abiType);
+ case SymbolicAddress::TableGrow:
+ *abiType = Args_Int32_GeneralGeneralInt32Int32;
+ MOZ_ASSERT(*abiType == ToABIType(SASigTableGrow));
+ return FuncCast(Instance::tableGrow, *abiType);
+ case SymbolicAddress::TableSet:
+ *abiType = Args_Int32_GeneralInt32GeneralInt32;
+ MOZ_ASSERT(*abiType == ToABIType(SASigTableSet));
+ return FuncCast(Instance::tableSet, *abiType);
+ case SymbolicAddress::TableSize:
+ *abiType = Args_Int32_GeneralInt32;
+ MOZ_ASSERT(*abiType == ToABIType(SASigTableSize));
+ return FuncCast(Instance::tableSize, *abiType);
+ case SymbolicAddress::RefFunc:
+ *abiType = Args_General_GeneralInt32;
+ MOZ_ASSERT(*abiType == ToABIType(SASigRefFunc));
+ return FuncCast(Instance::refFunc, *abiType);
+ case SymbolicAddress::PostBarrier:
+ *abiType = Args_Int32_GeneralGeneral;
+ MOZ_ASSERT(*abiType == ToABIType(SASigPostBarrier));
+ return FuncCast(Instance::postBarrier, *abiType);
+ case SymbolicAddress::PostBarrierPrecise:
+ *abiType = Args_Int32_GeneralGeneralGeneral;
+ MOZ_ASSERT(*abiType == ToABIType(SASigPostBarrierPrecise));
+ return FuncCast(Instance::postBarrierPrecise, *abiType);
+ case SymbolicAddress::PostBarrierPreciseWithOffset:
+ *abiType = Args_Int32_GeneralGeneralInt32General;
+ MOZ_ASSERT(*abiType == ToABIType(SASigPostBarrierPreciseWithOffset));
+ return FuncCast(Instance::postBarrierPreciseWithOffset, *abiType);
+ case SymbolicAddress::StructNew:
+ *abiType = Args_General2;
+ MOZ_ASSERT(*abiType == ToABIType(SASigStructNew));
+ return FuncCast(Instance::structNew, *abiType);
+ case SymbolicAddress::StructNewUninit:
+ *abiType = Args_General2;
+ MOZ_ASSERT(*abiType == ToABIType(SASigStructNewUninit));
+ return FuncCast(Instance::structNewUninit, *abiType);
+ case SymbolicAddress::ArrayNew:
+ *abiType = Args_General_GeneralInt32General;
+ MOZ_ASSERT(*abiType == ToABIType(SASigArrayNew));
+ return FuncCast(Instance::arrayNew, *abiType);
+ case SymbolicAddress::ArrayNewUninit:
+ *abiType = Args_General_GeneralInt32General;
+ MOZ_ASSERT(*abiType == ToABIType(SASigArrayNewUninit));
+ return FuncCast(Instance::arrayNewUninit, *abiType);
+ case SymbolicAddress::ArrayNewData:
+ *abiType = Args_General_GeneralInt32Int32GeneralInt32;
+ MOZ_ASSERT(*abiType == ToABIType(SASigArrayNewData));
+ return FuncCast(Instance::arrayNewData, *abiType);
+ case SymbolicAddress::ArrayNewElem:
+ *abiType = Args_General_GeneralInt32Int32GeneralInt32;
+ MOZ_ASSERT(*abiType == ToABIType(SASigArrayNewElem));
+ return FuncCast(Instance::arrayNewElem, *abiType);
+ case SymbolicAddress::ArrayCopy:
+ *abiType = Args_Int32_GeneralGeneralInt32GeneralInt32Int32Int32;
+ MOZ_ASSERT(*abiType == ToABIType(SASigArrayCopy));
+ return FuncCast(Instance::arrayCopy, *abiType);
+
+ case SymbolicAddress::ExceptionNew:
+ *abiType = Args_General2;
+ MOZ_ASSERT(*abiType == ToABIType(SASigExceptionNew));
+ return FuncCast(Instance::exceptionNew, *abiType);
+ case SymbolicAddress::ThrowException:
+ *abiType = Args_Int32_GeneralGeneral;
+ MOZ_ASSERT(*abiType == ToABIType(SASigThrowException));
+ return FuncCast(Instance::throwException, *abiType);
+
+#ifdef WASM_CODEGEN_DEBUG
+ case SymbolicAddress::PrintI32:
+ *abiType = Args_General1;
+ return FuncCast(PrintI32, *abiType);
+ case SymbolicAddress::PrintPtr:
+ *abiType = Args_General1;
+ return FuncCast(PrintPtr, *abiType);
+ case SymbolicAddress::PrintF32:
+ *abiType = Args_Int_Float32;
+ return FuncCast(PrintF32, *abiType);
+ case SymbolicAddress::PrintF64:
+ *abiType = Args_Int_Double;
+ return FuncCast(PrintF64, *abiType);
+ case SymbolicAddress::PrintText:
+ *abiType = Args_General1;
+ return FuncCast(PrintText, *abiType);
+#endif
+#define DECL_SAS_TYPE_AND_FN(op, export, sa_name, abitype, entry, idx) \
+ case SymbolicAddress::sa_name: \
+ *abiType = abitype; \
+ return FuncCast(entry, *abiType);
+ FOR_EACH_INTRINSIC(DECL_SAS_TYPE_AND_FN)
+#undef DECL_SAS_TYPE_AND_FN
+ case SymbolicAddress::Limit:
+ break;
+ }
+
+ MOZ_CRASH("Bad SymbolicAddress");
+}
+
+bool wasm::IsRoundingFunction(SymbolicAddress callee, jit::RoundingMode* mode) {
+ switch (callee) {
+ case SymbolicAddress::FloorD:
+ case SymbolicAddress::FloorF:
+ *mode = jit::RoundingMode::Down;
+ return true;
+ case SymbolicAddress::CeilD:
+ case SymbolicAddress::CeilF:
+ *mode = jit::RoundingMode::Up;
+ return true;
+ case SymbolicAddress::TruncD:
+ case SymbolicAddress::TruncF:
+ *mode = jit::RoundingMode::TowardsZero;
+ return true;
+ case SymbolicAddress::NearbyIntD:
+ case SymbolicAddress::NearbyIntF:
+ *mode = jit::RoundingMode::NearestTiesToEven;
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool wasm::NeedsBuiltinThunk(SymbolicAddress sym) {
+ // Also see "The Wasm Builtin ABIs" in WasmFrame.h.
+ switch (sym) {
+ // No thunk, because they do their work within the activation
+ case SymbolicAddress::HandleThrow: // GenerateThrowStub
+ case SymbolicAddress::HandleTrap: // GenerateTrapExit
+ return false;
+
+ // No thunk, because some work has to be done within the activation before
+ // the activation exit: when called, arbitrary wasm registers are live and
+ // must be saved, and the stack pointer may not be aligned for any ABI.
+ case SymbolicAddress::HandleDebugTrap: // GenerateDebugTrapStub
+
+ // No thunk, because their caller manages the activation exit explicitly
+ case SymbolicAddress::CallImport_General: // GenerateImportInterpExit
+ case SymbolicAddress::CoerceInPlace_ToInt32: // GenerateImportJitExit
+ case SymbolicAddress::CoerceInPlace_ToNumber: // GenerateImportJitExit
+ case SymbolicAddress::CoerceInPlace_ToBigInt: // GenerateImportJitExit
+ case SymbolicAddress::BoxValue_Anyref: // GenerateImportJitExit
+ return false;
+
+#ifdef WASM_CODEGEN_DEBUG
+ // No thunk, because they call directly into C++ code that does not interact
+ // with the rest of the VM at all.
+ case SymbolicAddress::PrintI32: // Debug stub printers
+ case SymbolicAddress::PrintPtr:
+ case SymbolicAddress::PrintF32:
+ case SymbolicAddress::PrintF64:
+ case SymbolicAddress::PrintText:
+ return false;
+#endif
+
+ // Everyone else gets a thunk to handle the exit from the activation
+ case SymbolicAddress::ToInt32:
+ case SymbolicAddress::DivI64:
+ case SymbolicAddress::UDivI64:
+ case SymbolicAddress::ModI64:
+ case SymbolicAddress::UModI64:
+ case SymbolicAddress::TruncateDoubleToUint64:
+ case SymbolicAddress::TruncateDoubleToInt64:
+ case SymbolicAddress::SaturatingTruncateDoubleToUint64:
+ case SymbolicAddress::SaturatingTruncateDoubleToInt64:
+ case SymbolicAddress::Uint64ToDouble:
+ case SymbolicAddress::Uint64ToFloat32:
+ case SymbolicAddress::Int64ToDouble:
+ case SymbolicAddress::Int64ToFloat32:
+#if defined(JS_CODEGEN_ARM)
+ case SymbolicAddress::aeabi_idivmod:
+ case SymbolicAddress::aeabi_uidivmod:
+#endif
+ case SymbolicAddress::AllocateBigInt:
+ case SymbolicAddress::ModD:
+ case SymbolicAddress::SinNativeD:
+ case SymbolicAddress::SinFdlibmD:
+ case SymbolicAddress::CosNativeD:
+ case SymbolicAddress::CosFdlibmD:
+ case SymbolicAddress::TanNativeD:
+ case SymbolicAddress::TanFdlibmD:
+ case SymbolicAddress::ASinD:
+ case SymbolicAddress::ACosD:
+ case SymbolicAddress::ATanD:
+ case SymbolicAddress::CeilD:
+ case SymbolicAddress::CeilF:
+ case SymbolicAddress::FloorD:
+ case SymbolicAddress::FloorF:
+ case SymbolicAddress::TruncD:
+ case SymbolicAddress::TruncF:
+ case SymbolicAddress::NearbyIntD:
+ case SymbolicAddress::NearbyIntF:
+ case SymbolicAddress::ExpD:
+ case SymbolicAddress::LogD:
+ case SymbolicAddress::PowD:
+ case SymbolicAddress::ATan2D:
+ case SymbolicAddress::MemoryGrowM32:
+ case SymbolicAddress::MemoryGrowM64:
+ case SymbolicAddress::MemorySizeM32:
+ case SymbolicAddress::MemorySizeM64:
+ case SymbolicAddress::WaitI32M32:
+ case SymbolicAddress::WaitI32M64:
+ case SymbolicAddress::WaitI64M32:
+ case SymbolicAddress::WaitI64M64:
+ case SymbolicAddress::WakeM32:
+ case SymbolicAddress::WakeM64:
+ case SymbolicAddress::CoerceInPlace_JitEntry:
+ case SymbolicAddress::ReportV128JSCall:
+ case SymbolicAddress::MemCopyM32:
+ case SymbolicAddress::MemCopySharedM32:
+ case SymbolicAddress::MemCopyM64:
+ case SymbolicAddress::MemCopySharedM64:
+ case SymbolicAddress::DataDrop:
+ case SymbolicAddress::MemFillM32:
+ case SymbolicAddress::MemFillSharedM32:
+ case SymbolicAddress::MemFillM64:
+ case SymbolicAddress::MemFillSharedM64:
+ case SymbolicAddress::MemDiscardM32:
+ case SymbolicAddress::MemDiscardSharedM32:
+ case SymbolicAddress::MemDiscardM64:
+ case SymbolicAddress::MemDiscardSharedM64:
+ case SymbolicAddress::MemInitM32:
+ case SymbolicAddress::MemInitM64:
+ case SymbolicAddress::TableCopy:
+ case SymbolicAddress::ElemDrop:
+ case SymbolicAddress::TableFill:
+ case SymbolicAddress::TableGet:
+ case SymbolicAddress::TableGrow:
+ case SymbolicAddress::TableInit:
+ case SymbolicAddress::TableSet:
+ case SymbolicAddress::TableSize:
+ case SymbolicAddress::RefFunc:
+ case SymbolicAddress::PostBarrier:
+ case SymbolicAddress::PostBarrierPrecise:
+ case SymbolicAddress::PostBarrierPreciseWithOffset:
+ case SymbolicAddress::ExceptionNew:
+ case SymbolicAddress::ThrowException:
+ case SymbolicAddress::StructNew:
+ case SymbolicAddress::StructNewUninit:
+ case SymbolicAddress::ArrayNew:
+ case SymbolicAddress::ArrayNewUninit:
+ case SymbolicAddress::ArrayNewData:
+ case SymbolicAddress::ArrayNewElem:
+ case SymbolicAddress::ArrayCopy:
+#define OP(op, export, sa_name, abitype, entry, idx) \
+ case SymbolicAddress::sa_name:
+ FOR_EACH_INTRINSIC(OP)
+#undef OP
+ return true;
+
+ case SymbolicAddress::Limit:
+ break;
+ }
+
+ MOZ_CRASH("unexpected symbolic address");
+}
+
+// ============================================================================
+// [SMDOC] JS Fast Wasm Imports
+//
+// JS builtins that can be imported by wasm modules and called efficiently
+// through thunks. These thunks conform to the internal wasm ABI and thus can be
+// patched in for import calls. Calling a JS builtin through a thunk is much
+// faster than calling out through the generic import call trampoline which will
+// end up in the slowest C++ Instance::callImport path.
+//
+// Each JS builtin can have several overloads. These must all be enumerated in
+// PopulateTypedNatives() so they can be included in the process-wide thunk set.
+// Additionally to the traditional overloading based on types, every builtin
+// can also have a version implemented by fdlibm or the native math library.
+// This is useful for fingerprinting resistance.
+
+#define FOR_EACH_SIN_COS_TAN_NATIVE(_) \
+ _(math_sin, MathSin) \
+ _(math_tan, MathTan) \
+ _(math_cos, MathCos)
+
+#define FOR_EACH_UNARY_NATIVE(_) \
+ _(math_exp, MathExp) \
+ _(math_log, MathLog) \
+ _(math_asin, MathASin) \
+ _(math_atan, MathATan) \
+ _(math_acos, MathACos) \
+ _(math_log10, MathLog10) \
+ _(math_log2, MathLog2) \
+ _(math_log1p, MathLog1P) \
+ _(math_expm1, MathExpM1) \
+ _(math_sinh, MathSinH) \
+ _(math_tanh, MathTanH) \
+ _(math_cosh, MathCosH) \
+ _(math_asinh, MathASinH) \
+ _(math_atanh, MathATanH) \
+ _(math_acosh, MathACosH) \
+ _(math_sign, MathSign) \
+ _(math_trunc, MathTrunc) \
+ _(math_cbrt, MathCbrt)
+
+#define FOR_EACH_BINARY_NATIVE(_) \
+ _(ecmaAtan2, MathATan2) \
+ _(ecmaHypot, MathHypot) \
+ _(ecmaPow, MathPow)
+
+#define DEFINE_SIN_COS_TAN_FLOAT_WRAPPER(func, _) \
+ static float func##_native_impl_f32(float x) { \
+ return float(func##_native_impl(double(x))); \
+ } \
+ static float func##_fdlibm_impl_f32(float x) { \
+ return float(func##_fdlibm_impl(double(x))); \
+ }
+
+#define DEFINE_UNARY_FLOAT_WRAPPER(func, _) \
+ static float func##_impl_f32(float x) { \
+ return float(func##_impl(double(x))); \
+ }
+
+#define DEFINE_BINARY_FLOAT_WRAPPER(func, _) \
+ static float func##_f32(float x, float y) { \
+ return float(func(double(x), double(y))); \
+ }
+
+FOR_EACH_SIN_COS_TAN_NATIVE(DEFINE_SIN_COS_TAN_FLOAT_WRAPPER)
+FOR_EACH_UNARY_NATIVE(DEFINE_UNARY_FLOAT_WRAPPER)
+FOR_EACH_BINARY_NATIVE(DEFINE_BINARY_FLOAT_WRAPPER)
+
+#undef DEFINE_UNARY_FLOAT_WRAPPER
+#undef DEFINE_BINARY_FLOAT_WRAPPER
+
+struct TypedNative {
+ InlinableNative native;
+ ABIFunctionType abiType;
+ enum class FdlibmImpl : uint8_t { No, Yes } fdlibm;
+
+ TypedNative(InlinableNative native, ABIFunctionType abiType,
+ FdlibmImpl fdlibm)
+ : native(native), abiType(abiType), fdlibm(fdlibm) {}
+
+ using Lookup = TypedNative;
+ static HashNumber hash(const Lookup& l) {
+ return HashGeneric(uint32_t(l.native), uint32_t(l.abiType),
+ uint32_t(l.fdlibm));
+ }
+ static bool match(const TypedNative& lhs, const Lookup& rhs) {
+ return lhs.native == rhs.native && lhs.abiType == rhs.abiType &&
+ lhs.fdlibm == rhs.fdlibm;
+ }
+};
+
+using TypedNativeToFuncPtrMap =
+ HashMap<TypedNative, void*, TypedNative, SystemAllocPolicy>;
+
+static bool PopulateTypedNatives(TypedNativeToFuncPtrMap* typedNatives) {
+#define ADD_OVERLOAD(funcName, native, abiType, fdlibm) \
+ if (!typedNatives->putNew(TypedNative(InlinableNative::native, abiType, \
+ TypedNative::FdlibmImpl::fdlibm), \
+ FuncCast(funcName, abiType))) \
+ return false;
+
+#define ADD_SIN_COS_TAN_OVERLOADS(funcName, native) \
+ ADD_OVERLOAD(funcName##_native_impl, native, Args_Double_Double, No) \
+ ADD_OVERLOAD(funcName##_fdlibm_impl, native, Args_Double_Double, Yes) \
+ ADD_OVERLOAD(funcName##_native_impl_f32, native, Args_Float32_Float32, No) \
+ ADD_OVERLOAD(funcName##_fdlibm_impl_f32, native, Args_Float32_Float32, Yes)
+
+#define ADD_UNARY_OVERLOADS(funcName, native) \
+ ADD_OVERLOAD(funcName##_impl, native, Args_Double_Double, No) \
+ ADD_OVERLOAD(funcName##_impl_f32, native, Args_Float32_Float32, No)
+
+#define ADD_BINARY_OVERLOADS(funcName, native) \
+ ADD_OVERLOAD(funcName, native, Args_Double_DoubleDouble, No) \
+ ADD_OVERLOAD(funcName##_f32, native, Args_Float32_Float32Float32, No)
+
+ FOR_EACH_SIN_COS_TAN_NATIVE(ADD_SIN_COS_TAN_OVERLOADS)
+ FOR_EACH_UNARY_NATIVE(ADD_UNARY_OVERLOADS)
+ FOR_EACH_BINARY_NATIVE(ADD_BINARY_OVERLOADS)
+
+#undef ADD_UNARY_OVERLOADS
+#undef ADD_BINARY_OVERLOADS
+
+ return true;
+}
+
+#undef FOR_EACH_UNARY_NATIVE
+#undef FOR_EACH_BINARY_NATIVE
+
+// ============================================================================
+// [SMDOC] Process-wide builtin thunk set
+//
+// Thunks are inserted between wasm calls and the C++ callee and achieve two
+// things:
+// - bridging the few differences between the internal wasm ABI and the
+// external native ABI (viz. float returns on x86 and soft-fp ARM)
+// - executing an exit prologue/epilogue which in turn allows any profiling
+// iterator to see the full stack up to the wasm operation that called out
+//
+// Thunks are created for two kinds of C++ callees, enumerated above:
+// - SymbolicAddress: for statically compiled calls in the wasm module
+// - Imported JS builtins: optimized calls to imports
+//
+// All thunks are created up front, lazily, when the first wasm module is
+// compiled in the process. Thunks are kept alive until the JS engine shuts down
+// in the process. No thunks are created at runtime after initialization. This
+// simple scheme allows several simplifications:
+// - no reference counting to keep thunks alive
+// - no problems toggling W^X permissions which, because of multiple executing
+// threads, would require each thunk allocation to be on its own page
+// The cost for creating all thunks at once is relatively low since all thunks
+// fit within the smallest executable-code allocation quantum (64k).
+
+using TypedNativeToCodeRangeMap =
+ HashMap<TypedNative, uint32_t, TypedNative, SystemAllocPolicy>;
+
+using SymbolicAddressToCodeRangeArray =
+ EnumeratedArray<SymbolicAddress, SymbolicAddress::Limit, uint32_t>;
+
+struct BuiltinThunks {
+ uint8_t* codeBase;
+ size_t codeSize;
+ CodeRangeVector codeRanges;
+ TypedNativeToCodeRangeMap typedNativeToCodeRange;
+ SymbolicAddressToCodeRangeArray symbolicAddressToCodeRange;
+ uint32_t provisionalLazyJitEntryOffset;
+
+ BuiltinThunks() : codeBase(nullptr), codeSize(0) {}
+
+ ~BuiltinThunks() {
+ if (codeBase) {
+ DeallocateExecutableMemory(codeBase, codeSize);
+ }
+ }
+};
+
+Mutex initBuiltinThunks(mutexid::WasmInitBuiltinThunks);
+Atomic<const BuiltinThunks*> builtinThunks;
+
+bool wasm::EnsureBuiltinThunksInitialized() {
+ LockGuard<Mutex> guard(initBuiltinThunks);
+ if (builtinThunks) {
+ return true;
+ }
+
+ auto thunks = MakeUnique<BuiltinThunks>();
+ if (!thunks) {
+ return false;
+ }
+
+ LifoAlloc lifo(BUILTIN_THUNK_LIFO_SIZE);
+ TempAllocator tempAlloc(&lifo);
+ WasmMacroAssembler masm(tempAlloc);
+ AutoCreatedBy acb(masm, "wasm::EnsureBuiltinThunksInitialized");
+
+ for (auto sym : MakeEnumeratedRange(SymbolicAddress::Limit)) {
+ if (!NeedsBuiltinThunk(sym)) {
+ thunks->symbolicAddressToCodeRange[sym] = UINT32_MAX;
+ continue;
+ }
+
+ uint32_t codeRangeIndex = thunks->codeRanges.length();
+ thunks->symbolicAddressToCodeRange[sym] = codeRangeIndex;
+
+ ABIFunctionType abiType;
+ void* funcPtr = AddressOf(sym, &abiType);
+
+ ExitReason exitReason(sym);
+
+ CallableOffsets offsets;
+ if (!GenerateBuiltinThunk(masm, abiType, exitReason, funcPtr, &offsets)) {
+ return false;
+ }
+ if (!thunks->codeRanges.emplaceBack(CodeRange::BuiltinThunk, offsets)) {
+ return false;
+ }
+ }
+
+ TypedNativeToFuncPtrMap typedNatives;
+ if (!PopulateTypedNatives(&typedNatives)) {
+ return false;
+ }
+
+ for (TypedNativeToFuncPtrMap::Range r = typedNatives.all(); !r.empty();
+ r.popFront()) {
+ TypedNative typedNative = r.front().key();
+
+ uint32_t codeRangeIndex = thunks->codeRanges.length();
+ if (!thunks->typedNativeToCodeRange.putNew(typedNative, codeRangeIndex)) {
+ return false;
+ }
+
+ ABIFunctionType abiType = typedNative.abiType;
+ void* funcPtr = r.front().value();
+
+ ExitReason exitReason = ExitReason::Fixed::BuiltinNative;
+
+ CallableOffsets offsets;
+ if (!GenerateBuiltinThunk(masm, abiType, exitReason, funcPtr, &offsets)) {
+ return false;
+ }
+ if (!thunks->codeRanges.emplaceBack(CodeRange::BuiltinThunk, offsets)) {
+ return false;
+ }
+ }
+
+ // Provisional lazy JitEntry stub: This is a shared stub that can be installed
+ // in the jit-entry jump table. It uses the JIT ABI and when invoked will
+ // retrieve (via TlsContext()) and invoke the context-appropriate
+ // invoke-from-interpreter jit stub, thus serving as the initial, unoptimized
+ // jit-entry stub for any exported wasm function that has a jit-entry.
+
+#ifdef DEBUG
+ // We need to allow this machine code to bake in a C++ code pointer, so we
+ // disable the wasm restrictions while generating this stub.
+ JitContext jitContext;
+ bool oldFlag = jitContext.setIsCompilingWasm(false);
+#endif
+
+ Offsets provisionalLazyJitEntryOffsets;
+ if (!GenerateProvisionalLazyJitEntryStub(masm,
+ &provisionalLazyJitEntryOffsets)) {
+ return false;
+ }
+ thunks->provisionalLazyJitEntryOffset = provisionalLazyJitEntryOffsets.begin;
+
+#ifdef DEBUG
+ jitContext.setIsCompilingWasm(oldFlag);
+#endif
+
+ masm.finish();
+ if (masm.oom()) {
+ return false;
+ }
+
+ size_t allocSize = AlignBytes(masm.bytesNeeded(), ExecutableCodePageSize);
+
+ thunks->codeSize = allocSize;
+ thunks->codeBase = (uint8_t*)AllocateExecutableMemory(
+ allocSize, ProtectionSetting::Writable, MemCheckKind::MakeUndefined);
+ if (!thunks->codeBase) {
+ return false;
+ }
+
+ masm.executableCopy(thunks->codeBase);
+ memset(thunks->codeBase + masm.bytesNeeded(), 0,
+ allocSize - masm.bytesNeeded());
+
+ masm.processCodeLabels(thunks->codeBase);
+ PatchDebugSymbolicAccesses(thunks->codeBase, masm);
+
+ MOZ_ASSERT(masm.callSites().empty());
+ MOZ_ASSERT(masm.callSiteTargets().empty());
+ MOZ_ASSERT(masm.trapSites().empty());
+ MOZ_ASSERT(masm.tryNotes().empty());
+
+ if (!ExecutableAllocator::makeExecutableAndFlushICache(thunks->codeBase,
+ thunks->codeSize)) {
+ return false;
+ }
+
+ builtinThunks = thunks.release();
+ return true;
+}
+
+void wasm::ReleaseBuiltinThunks() {
+ if (builtinThunks) {
+ const BuiltinThunks* ptr = builtinThunks;
+ js_delete(const_cast<BuiltinThunks*>(ptr));
+ builtinThunks = nullptr;
+ }
+}
+
+void* wasm::SymbolicAddressTarget(SymbolicAddress sym) {
+ MOZ_ASSERT(builtinThunks);
+
+ ABIFunctionType abiType;
+ void* funcPtr = AddressOf(sym, &abiType);
+
+ if (!NeedsBuiltinThunk(sym)) {
+ return funcPtr;
+ }
+
+ const BuiltinThunks& thunks = *builtinThunks;
+ uint32_t codeRangeIndex = thunks.symbolicAddressToCodeRange[sym];
+ return thunks.codeBase + thunks.codeRanges[codeRangeIndex].begin();
+}
+
+void* wasm::ProvisionalLazyJitEntryStub() {
+ MOZ_ASSERT(builtinThunks);
+
+ const BuiltinThunks& thunks = *builtinThunks;
+ return thunks.codeBase + thunks.provisionalLazyJitEntryOffset;
+}
+
+static Maybe<ABIFunctionType> ToBuiltinABIFunctionType(
+ const FuncType& funcType) {
+ const ValTypeVector& args = funcType.args();
+ const ValTypeVector& results = funcType.results();
+
+ if (results.length() != 1) {
+ return Nothing();
+ }
+
+ uint32_t abiType;
+ switch (results[0].kind()) {
+ case ValType::F32:
+ abiType = ArgType_Float32 << RetType_Shift;
+ break;
+ case ValType::F64:
+ abiType = ArgType_Float64 << RetType_Shift;
+ break;
+ default:
+ return Nothing();
+ }
+
+ if ((args.length() + 1) > (sizeof(uint32_t) * 8 / ArgType_Shift)) {
+ return Nothing();
+ }
+
+ for (size_t i = 0; i < args.length(); i++) {
+ switch (args[i].kind()) {
+ case ValType::F32:
+ abiType |= (ArgType_Float32 << (ArgType_Shift * (i + 1)));
+ break;
+ case ValType::F64:
+ abiType |= (ArgType_Float64 << (ArgType_Shift * (i + 1)));
+ break;
+ default:
+ return Nothing();
+ }
+ }
+
+ return Some(ABIFunctionType(abiType));
+}
+
+void* wasm::MaybeGetBuiltinThunk(JSFunction* f, const FuncType& funcType) {
+ MOZ_ASSERT(builtinThunks);
+
+ if (!f->isNativeFun() || !f->hasJitInfo() ||
+ f->jitInfo()->type() != JSJitInfo::InlinableNative) {
+ return nullptr;
+ }
+
+ Maybe<ABIFunctionType> abiType = ToBuiltinABIFunctionType(funcType);
+ if (!abiType) {
+ return nullptr;
+ }
+
+ const BuiltinThunks& thunks = *builtinThunks;
+
+ // If this function should resist fingerprinting first try to lookup
+ // the fdlibm version. If that version doesn't exist we still fallback to
+ // the normal native.
+ if (math_use_fdlibm_for_sin_cos_tan() ||
+ f->realm()->behaviors().shouldResistFingerprinting()) {
+ TypedNative typedNative(f->jitInfo()->inlinableNative, *abiType,
+ TypedNative::FdlibmImpl::Yes);
+ auto p =
+ thunks.typedNativeToCodeRange.readonlyThreadsafeLookup(typedNative);
+ if (p) {
+ return thunks.codeBase + thunks.codeRanges[p->value()].begin();
+ }
+ }
+
+ TypedNative typedNative(f->jitInfo()->inlinableNative, *abiType,
+ TypedNative::FdlibmImpl::No);
+ auto p = thunks.typedNativeToCodeRange.readonlyThreadsafeLookup(typedNative);
+ if (!p) {
+ return nullptr;
+ }
+
+ return thunks.codeBase + thunks.codeRanges[p->value()].begin();
+}
+
+bool wasm::LookupBuiltinThunk(void* pc, const CodeRange** codeRange,
+ uint8_t** codeBase) {
+ if (!builtinThunks) {
+ return false;
+ }
+
+ const BuiltinThunks& thunks = *builtinThunks;
+ if (pc < thunks.codeBase || pc >= thunks.codeBase + thunks.codeSize) {
+ return false;
+ }
+
+ *codeBase = thunks.codeBase;
+
+ CodeRange::OffsetInCode target((uint8_t*)pc - thunks.codeBase);
+ *codeRange = LookupInSorted(thunks.codeRanges, target);
+
+ return !!*codeRange;
+}
diff --git a/js/src/wasm/WasmBuiltins.h b/js/src/wasm/WasmBuiltins.h
new file mode 100644
index 0000000000..0d876f439d
--- /dev/null
+++ b/js/src/wasm/WasmBuiltins.h
@@ -0,0 +1,324 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2017 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_builtins_h
+#define wasm_builtins_h
+
+#include "intgemm/IntegerGemmIntrinsic.h"
+#include "jit/IonTypes.h"
+#include "wasm/WasmIntrinsicGenerated.h"
+
+namespace js {
+namespace jit {
+struct ResumeFromException;
+}
+namespace wasm {
+
+class WasmFrameIter;
+class CodeRange;
+class FuncType;
+
+// A wasm::SymbolicAddress represents a pointer to a well-known function/global
+// that is embedded in wasm code. Since wasm code is serialized and later
+// deserialized into a different address space, symbolic addresses must be used
+// for *all* pointers into the address space. The MacroAssembler records a list
+// of all SymbolicAddresses and the offsets of their use in the code for later
+// patching during static linking.
+
+enum class SymbolicAddress {
+ ToInt32,
+#if defined(JS_CODEGEN_ARM)
+ aeabi_idivmod,
+ aeabi_uidivmod,
+#endif
+ ModD,
+ SinNativeD,
+ SinFdlibmD,
+ CosNativeD,
+ CosFdlibmD,
+ TanNativeD,
+ TanFdlibmD,
+ ASinD,
+ ACosD,
+ ATanD,
+ CeilD,
+ CeilF,
+ FloorD,
+ FloorF,
+ TruncD,
+ TruncF,
+ NearbyIntD,
+ NearbyIntF,
+ ExpD,
+ LogD,
+ PowD,
+ ATan2D,
+ HandleDebugTrap,
+ HandleThrow,
+ HandleTrap,
+ ReportV128JSCall,
+ CallImport_General,
+ CoerceInPlace_ToInt32,
+ CoerceInPlace_ToNumber,
+ CoerceInPlace_JitEntry,
+ CoerceInPlace_ToBigInt,
+ AllocateBigInt,
+ BoxValue_Anyref,
+ DivI64,
+ UDivI64,
+ ModI64,
+ UModI64,
+ TruncateDoubleToInt64,
+ TruncateDoubleToUint64,
+ SaturatingTruncateDoubleToInt64,
+ SaturatingTruncateDoubleToUint64,
+ Uint64ToFloat32,
+ Uint64ToDouble,
+ Int64ToFloat32,
+ Int64ToDouble,
+ MemoryGrowM32,
+ MemoryGrowM64,
+ MemorySizeM32,
+ MemorySizeM64,
+ WaitI32M32,
+ WaitI32M64,
+ WaitI64M32,
+ WaitI64M64,
+ WakeM32,
+ WakeM64,
+ MemCopyM32,
+ MemCopySharedM32,
+ MemCopyM64,
+ MemCopySharedM64,
+ DataDrop,
+ MemFillM32,
+ MemFillSharedM32,
+ MemFillM64,
+ MemFillSharedM64,
+ MemDiscardM32,
+ MemDiscardSharedM32,
+ MemDiscardM64,
+ MemDiscardSharedM64,
+ MemInitM32,
+ MemInitM64,
+ TableCopy,
+ ElemDrop,
+ TableFill,
+ TableGet,
+ TableGrow,
+ TableInit,
+ TableSet,
+ TableSize,
+ RefFunc,
+ PostBarrier,
+ PostBarrierPrecise,
+ PostBarrierPreciseWithOffset,
+ ExceptionNew,
+ ThrowException,
+ StructNew,
+ StructNewUninit,
+ ArrayNew,
+ ArrayNewUninit,
+ ArrayNewData,
+ ArrayNewElem,
+ ArrayCopy,
+#define DECL_INTRINSIC_SA(op, export, sa_name, abitype, entry, idx) sa_name,
+ FOR_EACH_INTRINSIC(DECL_INTRINSIC_SA)
+#undef DECL_INTRINSIC_SA
+#ifdef WASM_CODEGEN_DEBUG
+ PrintI32,
+ PrintPtr,
+ PrintF32,
+ PrintF64,
+ PrintText,
+#endif
+ Limit
+};
+
+// The FailureMode indicates whether, immediately after a call to a builtin
+// returns, the return value should be checked against an error condition
+// (and if so, which one) which signals that the C++ calle has already
+// reported an error and thus wasm needs to wasmTrap(Trap::ThrowReported).
+
+enum class FailureMode : uint8_t {
+ Infallible,
+ FailOnNegI32,
+ FailOnNullPtr,
+ FailOnInvalidRef
+};
+
+// SymbolicAddressSignature carries type information for a function referred
+// to by a SymbolicAddress. In order that |argTypes| can be written out as a
+// static initialiser, it has to have fixed length. At present
+// SymbolicAddressType is used to describe functions with at most 14 arguments,
+// so |argTypes| has 15 entries in order to allow the last value to be
+// MIRType::None, in the hope of catching any accidental overruns of the
+// defined section of the array.
+
+static constexpr size_t SymbolicAddressSignatureMaxArgs = 14;
+
+struct SymbolicAddressSignature {
+ // The SymbolicAddress that is described.
+ const SymbolicAddress identity;
+ // The return type, or MIRType::None to denote 'void'.
+ const jit::MIRType retType;
+ // The failure mode, which is checked by masm.wasmCallBuiltinInstanceMethod.
+ const FailureMode failureMode;
+ // The number of arguments, 0 .. SymbolicAddressSignatureMaxArgs only.
+ const uint8_t numArgs;
+ // The argument types; SymbolicAddressSignatureMaxArgs + 1 guard, which
+ // should be MIRType::None.
+ const jit::MIRType argTypes[SymbolicAddressSignatureMaxArgs + 1];
+};
+
+// The 32 in this assertion is derived as follows: SymbolicAddress is probably
+// size-4 aligned-4, but it's at the start of the struct, so there's no
+// alignment hole before it. All other components (MIRType and uint8_t) are
+// size-1 aligned-1, and there are 18 in total, so it is reasonable to assume
+// that they also don't create any alignment holes. Hence it is also
+// reasonable to assume that the actual size is 1 * 4 + 18 * 1 == 22. The
+// worst-plausible-case rounding will take that up to 32. Hence, the
+// assertion uses 32.
+
+static_assert(sizeof(SymbolicAddressSignature) <= 32,
+ "SymbolicAddressSignature unexpectedly large");
+
+// These provide argument type information for a subset of the SymbolicAddress
+// targets, for which type info is needed to generate correct stackmaps.
+
+extern const SymbolicAddressSignature SASigSinNativeD;
+extern const SymbolicAddressSignature SASigSinFdlibmD;
+extern const SymbolicAddressSignature SASigCosNativeD;
+extern const SymbolicAddressSignature SASigCosFdlibmD;
+extern const SymbolicAddressSignature SASigTanNativeD;
+extern const SymbolicAddressSignature SASigTanFdlibmD;
+extern const SymbolicAddressSignature SASigASinD;
+extern const SymbolicAddressSignature SASigACosD;
+extern const SymbolicAddressSignature SASigATanD;
+extern const SymbolicAddressSignature SASigCeilD;
+extern const SymbolicAddressSignature SASigCeilF;
+extern const SymbolicAddressSignature SASigFloorD;
+extern const SymbolicAddressSignature SASigFloorF;
+extern const SymbolicAddressSignature SASigTruncD;
+extern const SymbolicAddressSignature SASigTruncF;
+extern const SymbolicAddressSignature SASigNearbyIntD;
+extern const SymbolicAddressSignature SASigNearbyIntF;
+extern const SymbolicAddressSignature SASigExpD;
+extern const SymbolicAddressSignature SASigLogD;
+extern const SymbolicAddressSignature SASigPowD;
+extern const SymbolicAddressSignature SASigATan2D;
+extern const SymbolicAddressSignature SASigMemoryGrowM32;
+extern const SymbolicAddressSignature SASigMemoryGrowM64;
+extern const SymbolicAddressSignature SASigMemorySizeM32;
+extern const SymbolicAddressSignature SASigMemorySizeM64;
+extern const SymbolicAddressSignature SASigWaitI32M32;
+extern const SymbolicAddressSignature SASigWaitI32M64;
+extern const SymbolicAddressSignature SASigWaitI64M32;
+extern const SymbolicAddressSignature SASigWaitI64M64;
+extern const SymbolicAddressSignature SASigWakeM32;
+extern const SymbolicAddressSignature SASigWakeM64;
+extern const SymbolicAddressSignature SASigMemCopyM32;
+extern const SymbolicAddressSignature SASigMemCopySharedM32;
+extern const SymbolicAddressSignature SASigMemCopyM64;
+extern const SymbolicAddressSignature SASigMemCopySharedM64;
+extern const SymbolicAddressSignature SASigDataDrop;
+extern const SymbolicAddressSignature SASigMemFillM32;
+extern const SymbolicAddressSignature SASigMemFillSharedM32;
+extern const SymbolicAddressSignature SASigMemFillM64;
+extern const SymbolicAddressSignature SASigMemFillSharedM64;
+extern const SymbolicAddressSignature SASigMemDiscardM32;
+extern const SymbolicAddressSignature SASigMemDiscardSharedM32;
+extern const SymbolicAddressSignature SASigMemDiscardM64;
+extern const SymbolicAddressSignature SASigMemDiscardSharedM64;
+extern const SymbolicAddressSignature SASigMemInitM32;
+extern const SymbolicAddressSignature SASigMemInitM64;
+extern const SymbolicAddressSignature SASigTableCopy;
+extern const SymbolicAddressSignature SASigElemDrop;
+extern const SymbolicAddressSignature SASigTableFill;
+extern const SymbolicAddressSignature SASigTableGet;
+extern const SymbolicAddressSignature SASigTableGrow;
+extern const SymbolicAddressSignature SASigTableInit;
+extern const SymbolicAddressSignature SASigTableSet;
+extern const SymbolicAddressSignature SASigTableSize;
+extern const SymbolicAddressSignature SASigRefFunc;
+extern const SymbolicAddressSignature SASigPostBarrier;
+extern const SymbolicAddressSignature SASigPostBarrierPrecise;
+extern const SymbolicAddressSignature SASigPostBarrierPreciseWithOffset;
+extern const SymbolicAddressSignature SASigExceptionNew;
+extern const SymbolicAddressSignature SASigThrowException;
+extern const SymbolicAddressSignature SASigStructNew;
+extern const SymbolicAddressSignature SASigStructNewUninit;
+extern const SymbolicAddressSignature SASigArrayNew;
+extern const SymbolicAddressSignature SASigArrayNewUninit;
+extern const SymbolicAddressSignature SASigArrayNewData;
+extern const SymbolicAddressSignature SASigArrayNewElem;
+extern const SymbolicAddressSignature SASigArrayCopy;
+#define EXT_INTR_SA_DECL(op, export, sa_name, abitype, entry, idx) \
+ extern const SymbolicAddressSignature SASig##sa_name;
+FOR_EACH_INTRINSIC(EXT_INTR_SA_DECL)
+#undef EXT_INTR_SA_DECL
+
+bool IsRoundingFunction(SymbolicAddress callee, jit::RoundingMode* mode);
+
+// A SymbolicAddress that NeedsBuiltinThunk() will call through a thunk to the
+// C++ function. This will be true for all normal calls from normal wasm
+// function code. Only calls to C++ from other exits/thunks do not need a thunk.
+// See "The Wasm-builtin ABIs in WasmFrame.h".
+
+bool NeedsBuiltinThunk(SymbolicAddress sym);
+
+// This function queries whether pc is in one of the process's builtin thunks
+// and, if so, returns the CodeRange and pointer to the code segment that the
+// CodeRange is relative to.
+
+bool LookupBuiltinThunk(void* pc, const CodeRange** codeRange,
+ uint8_t** codeBase);
+
+// EnsureBuiltinThunksInitialized() must be called, and must succeed, before
+// SymbolicAddressTarget() or MaybeGetBuiltinThunk(). This function creates all
+// thunks for the process. ReleaseBuiltinThunks() should be called before
+// ReleaseProcessExecutableMemory() so that the latter can assert that all
+// executable code has been released.
+
+bool EnsureBuiltinThunksInitialized();
+
+bool HandleThrow(JSContext* cx, WasmFrameIter& iter,
+ jit::ResumeFromException* rfe);
+
+void* SymbolicAddressTarget(SymbolicAddress sym);
+
+void* ProvisionalLazyJitEntryStub();
+
+void* MaybeGetBuiltinThunk(JSFunction* f, const FuncType& funcType);
+
+void ReleaseBuiltinThunks();
+
+void* AddressOf(SymbolicAddress imm, jit::ABIFunctionType* abiType);
+
+#ifdef WASM_CODEGEN_DEBUG
+void PrintI32(int32_t val);
+void PrintF32(float val);
+void PrintF64(double val);
+void PrintPtr(uint8_t* val);
+void PrintText(const char* out);
+#endif
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_builtins_h
diff --git a/js/src/wasm/WasmCode.cpp b/js/src/wasm/WasmCode.cpp
new file mode 100644
index 0000000000..063d2a020c
--- /dev/null
+++ b/js/src/wasm/WasmCode.cpp
@@ -0,0 +1,1253 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmCode.h"
+
+#include "mozilla/Atomics.h"
+#include "mozilla/BinarySearch.h"
+#include "mozilla/EnumeratedRange.h"
+#include "mozilla/Sprintf.h"
+
+#include <algorithm>
+
+#include "jsnum.h"
+
+#include "jit/Disassemble.h"
+#include "jit/ExecutableAllocator.h"
+#include "jit/MacroAssembler.h"
+#include "jit/PerfSpewer.h"
+#include "util/Poison.h"
+#ifdef MOZ_VTUNE
+# include "vtune/VTuneWrapper.h"
+#endif
+#include "wasm/WasmModule.h"
+#include "wasm/WasmProcess.h"
+#include "wasm/WasmSerialize.h"
+#include "wasm/WasmStubs.h"
+#include "wasm/WasmUtility.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+using mozilla::BinarySearch;
+using mozilla::BinarySearchIf;
+using mozilla::MakeEnumeratedRange;
+using mozilla::PodAssign;
+
+size_t LinkData::SymbolicLinkArray::sizeOfExcludingThis(
+ MallocSizeOf mallocSizeOf) const {
+ size_t size = 0;
+ for (const Uint32Vector& offsets : *this) {
+ size += offsets.sizeOfExcludingThis(mallocSizeOf);
+ }
+ return size;
+}
+
+CodeSegment::~CodeSegment() {
+ if (unregisterOnDestroy_) {
+ UnregisterCodeSegment(this);
+ }
+}
+
+static uint32_t RoundupCodeLength(uint32_t codeLength) {
+ // AllocateExecutableMemory() requires a multiple of ExecutableCodePageSize.
+ return RoundUp(codeLength, ExecutableCodePageSize);
+}
+
+UniqueCodeBytes wasm::AllocateCodeBytes(uint32_t codeLength) {
+ if (codeLength > MaxCodeBytesPerProcess) {
+ return nullptr;
+ }
+
+ static_assert(MaxCodeBytesPerProcess <= INT32_MAX, "rounding won't overflow");
+ uint32_t roundedCodeLength = RoundupCodeLength(codeLength);
+
+ void* p =
+ AllocateExecutableMemory(roundedCodeLength, ProtectionSetting::Writable,
+ MemCheckKind::MakeUndefined);
+
+ // If the allocation failed and the embedding gives us a last-ditch attempt
+ // to purge all memory (which, in gecko, does a purging GC/CC/GC), do that
+ // then retry the allocation.
+ if (!p) {
+ if (OnLargeAllocationFailure) {
+ OnLargeAllocationFailure();
+ p = AllocateExecutableMemory(roundedCodeLength,
+ ProtectionSetting::Writable,
+ MemCheckKind::MakeUndefined);
+ }
+ }
+
+ if (!p) {
+ return nullptr;
+ }
+
+ // Zero the padding.
+ memset(((uint8_t*)p) + codeLength, 0, roundedCodeLength - codeLength);
+
+ // We account for the bytes allocated in WasmModuleObject::create, where we
+ // have the necessary JSContext.
+
+ return UniqueCodeBytes((uint8_t*)p, FreeCode(roundedCodeLength));
+}
+
+bool CodeSegment::initialize(const CodeTier& codeTier) {
+ MOZ_ASSERT(!initialized());
+ codeTier_ = &codeTier;
+ MOZ_ASSERT(initialized());
+
+ // In the case of tiering, RegisterCodeSegment() immediately makes this code
+ // segment live to access from other threads executing the containing
+ // module. So only call once the CodeSegment is fully initialized.
+ if (!RegisterCodeSegment(this)) {
+ return false;
+ }
+
+ // This bool is only used by the destructor which cannot be called racily
+ // and so it is not a problem to mutate it after RegisterCodeSegment().
+ MOZ_ASSERT(!unregisterOnDestroy_);
+ unregisterOnDestroy_ = true;
+ return true;
+}
+
+const Code& CodeSegment::code() const {
+ MOZ_ASSERT(codeTier_);
+ return codeTier_->code();
+}
+
+void CodeSegment::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code) const {
+ *code += RoundupCodeLength(length());
+}
+
+void FreeCode::operator()(uint8_t* bytes) {
+ MOZ_ASSERT(codeLength);
+ MOZ_ASSERT(codeLength == RoundupCodeLength(codeLength));
+
+#ifdef MOZ_VTUNE
+ vtune::UnmarkBytes(bytes, codeLength);
+#endif
+ DeallocateExecutableMemory(bytes, codeLength);
+}
+
+bool wasm::StaticallyLink(const ModuleSegment& ms, const LinkData& linkData) {
+ for (LinkData::InternalLink link : linkData.internalLinks) {
+ CodeLabel label;
+ label.patchAt()->bind(link.patchAtOffset);
+ label.target()->bind(link.targetOffset);
+#ifdef JS_CODELABEL_LINKMODE
+ label.setLinkMode(static_cast<CodeLabel::LinkMode>(link.mode));
+#endif
+ Assembler::Bind(ms.base(), label);
+ }
+
+ if (!EnsureBuiltinThunksInitialized()) {
+ return false;
+ }
+
+ for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
+ const Uint32Vector& offsets = linkData.symbolicLinks[imm];
+ if (offsets.empty()) {
+ continue;
+ }
+
+ void* target = SymbolicAddressTarget(imm);
+ for (uint32_t offset : offsets) {
+ uint8_t* patchAt = ms.base() + offset;
+ Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
+ PatchedImmPtr(target),
+ PatchedImmPtr((void*)-1));
+ }
+ }
+
+ return true;
+}
+
+void wasm::StaticallyUnlink(uint8_t* base, const LinkData& linkData) {
+ for (LinkData::InternalLink link : linkData.internalLinks) {
+ CodeLabel label;
+ label.patchAt()->bind(link.patchAtOffset);
+ label.target()->bind(-size_t(base)); // to reset immediate to null
+#ifdef JS_CODELABEL_LINKMODE
+ label.setLinkMode(static_cast<CodeLabel::LinkMode>(link.mode));
+#endif
+ Assembler::Bind(base, label);
+ }
+
+ for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
+ const Uint32Vector& offsets = linkData.symbolicLinks[imm];
+ if (offsets.empty()) {
+ continue;
+ }
+
+ void* target = SymbolicAddressTarget(imm);
+ for (uint32_t offset : offsets) {
+ uint8_t* patchAt = base + offset;
+ Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
+ PatchedImmPtr((void*)-1),
+ PatchedImmPtr(target));
+ }
+ }
+}
+
+static bool AppendToString(const char* str, UTF8Bytes* bytes) {
+ return bytes->append(str, strlen(str)) && bytes->append('\0');
+}
+
+static void SendCodeRangesToProfiler(const ModuleSegment& ms,
+ const Metadata& metadata,
+ const CodeRangeVector& codeRanges) {
+ bool enabled = false;
+ enabled |= PerfEnabled();
+#ifdef MOZ_VTUNE
+ enabled |= vtune::IsProfilingActive();
+#endif
+ if (!enabled) {
+ return;
+ }
+
+ for (const CodeRange& codeRange : codeRanges) {
+ if (!codeRange.hasFuncIndex()) {
+ continue;
+ }
+
+ uintptr_t start = uintptr_t(ms.base() + codeRange.begin());
+ uintptr_t size = codeRange.end() - codeRange.begin();
+
+ UTF8Bytes name;
+ if (!metadata.getFuncNameStandalone(codeRange.funcIndex(), &name)) {
+ return;
+ }
+
+ // Avoid "unused" warnings
+ (void)start;
+ (void)size;
+
+ if (PerfEnabled()) {
+ const char* file = metadata.filename.get();
+ if (codeRange.isFunction()) {
+ if (!name.append('\0')) {
+ return;
+ }
+ unsigned line = codeRange.funcLineOrBytecode();
+ CollectPerfSpewerWasmFunctionMap(start, size, file, line, name.begin());
+ } else if (codeRange.isInterpEntry()) {
+ if (!AppendToString(" slow entry", &name)) {
+ return;
+ }
+ CollectPerfSpewerWasmMap(start, size, file, name.begin());
+ } else if (codeRange.isJitEntry()) {
+ if (!AppendToString(" fast entry", &name)) {
+ return;
+ }
+ CollectPerfSpewerWasmMap(start, size, file, name.begin());
+ } else if (codeRange.isImportInterpExit()) {
+ if (!AppendToString(" slow exit", &name)) {
+ return;
+ }
+ CollectPerfSpewerWasmMap(start, size, file, name.begin());
+ } else if (codeRange.isImportJitExit()) {
+ if (!AppendToString(" fast exit", &name)) {
+ return;
+ }
+ CollectPerfSpewerWasmMap(start, size, file, name.begin());
+ } else {
+ MOZ_CRASH("unhandled perf hasFuncIndex type");
+ }
+ }
+#ifdef MOZ_VTUNE
+ if (!vtune::IsProfilingActive()) {
+ continue;
+ }
+ if (!codeRange.isFunction()) {
+ continue;
+ }
+ if (!name.append('\0')) {
+ return;
+ }
+ vtune::MarkWasm(vtune::GenerateUniqueMethodID(), name.begin(), (void*)start,
+ size);
+#endif
+ }
+}
+
+ModuleSegment::ModuleSegment(Tier tier, UniqueCodeBytes codeBytes,
+ uint32_t codeLength, const LinkData& linkData)
+ : CodeSegment(std::move(codeBytes), codeLength, CodeSegment::Kind::Module),
+ tier_(tier),
+ trapCode_(base() + linkData.trapOffset) {}
+
+/* static */
+UniqueModuleSegment ModuleSegment::create(Tier tier, MacroAssembler& masm,
+ const LinkData& linkData) {
+ uint32_t codeLength = masm.bytesNeeded();
+
+ UniqueCodeBytes codeBytes = AllocateCodeBytes(codeLength);
+ if (!codeBytes) {
+ return nullptr;
+ }
+
+ masm.executableCopy(codeBytes.get());
+
+ return js::MakeUnique<ModuleSegment>(tier, std::move(codeBytes), codeLength,
+ linkData);
+}
+
+/* static */
+UniqueModuleSegment ModuleSegment::create(Tier tier, const Bytes& unlinkedBytes,
+ const LinkData& linkData) {
+ uint32_t codeLength = unlinkedBytes.length();
+
+ UniqueCodeBytes codeBytes = AllocateCodeBytes(codeLength);
+ if (!codeBytes) {
+ return nullptr;
+ }
+
+ memcpy(codeBytes.get(), unlinkedBytes.begin(), codeLength);
+
+ return js::MakeUnique<ModuleSegment>(tier, std::move(codeBytes), codeLength,
+ linkData);
+}
+
+bool ModuleSegment::initialize(const CodeTier& codeTier,
+ const LinkData& linkData,
+ const Metadata& metadata,
+ const MetadataTier& metadataTier) {
+ if (!StaticallyLink(*this, linkData)) {
+ return false;
+ }
+
+ // Optimized compilation finishes on a background thread, so we must make sure
+ // to flush the icaches of all the executing threads.
+ // Reprotect the whole region to avoid having separate RW and RX mappings.
+ if (!ExecutableAllocator::makeExecutableAndFlushICache(
+ base(), RoundupCodeLength(length()))) {
+ return false;
+ }
+
+ SendCodeRangesToProfiler(*this, metadata, metadataTier.codeRanges);
+
+ // See comments in CodeSegment::initialize() for why this must be last.
+ return CodeSegment::initialize(codeTier);
+}
+
+void ModuleSegment::addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf,
+ size_t* code, size_t* data) const {
+ CodeSegment::addSizeOfMisc(mallocSizeOf, code);
+ *data += mallocSizeOf(this);
+}
+
+const CodeRange* ModuleSegment::lookupRange(const void* pc) const {
+ return codeTier().lookupRange(pc);
+}
+
+size_t CacheableChars::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(get());
+}
+
+size_t MetadataTier::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return funcToCodeRange.sizeOfExcludingThis(mallocSizeOf) +
+ codeRanges.sizeOfExcludingThis(mallocSizeOf) +
+ callSites.sizeOfExcludingThis(mallocSizeOf) +
+ tryNotes.sizeOfExcludingThis(mallocSizeOf) +
+ trapSites.sizeOfExcludingThis(mallocSizeOf) +
+ funcImports.sizeOfExcludingThis(mallocSizeOf) +
+ funcExports.sizeOfExcludingThis(mallocSizeOf);
+}
+
+UniqueLazyStubSegment LazyStubSegment::create(const CodeTier& codeTier,
+ size_t length) {
+ UniqueCodeBytes codeBytes = AllocateCodeBytes(length);
+ if (!codeBytes) {
+ return nullptr;
+ }
+
+ auto segment = js::MakeUnique<LazyStubSegment>(std::move(codeBytes), length);
+ if (!segment || !segment->initialize(codeTier)) {
+ return nullptr;
+ }
+
+ return segment;
+}
+
+bool LazyStubSegment::hasSpace(size_t bytes) const {
+ MOZ_ASSERT(AlignBytesNeeded(bytes) == bytes);
+ return bytes <= length() && usedBytes_ <= length() - bytes;
+}
+
+bool LazyStubSegment::addStubs(const Metadata& metadata, size_t codeLength,
+ const Uint32Vector& funcExportIndices,
+ const FuncExportVector& funcExports,
+ const CodeRangeVector& codeRanges,
+ uint8_t** codePtr,
+ size_t* indexFirstInsertedCodeRange) {
+ MOZ_ASSERT(hasSpace(codeLength));
+
+ size_t offsetInSegment = usedBytes_;
+ *codePtr = base() + usedBytes_;
+ usedBytes_ += codeLength;
+
+ *indexFirstInsertedCodeRange = codeRanges_.length();
+
+ if (!codeRanges_.reserve(codeRanges_.length() + 2 * codeRanges.length())) {
+ return false;
+ }
+
+ size_t i = 0;
+ for (uint32_t funcExportIndex : funcExportIndices) {
+ const FuncExport& fe = funcExports[funcExportIndex];
+ const FuncType& funcType = metadata.getFuncExportType(fe);
+ const CodeRange& interpRange = codeRanges[i];
+ MOZ_ASSERT(interpRange.isInterpEntry());
+ MOZ_ASSERT(interpRange.funcIndex() ==
+ funcExports[funcExportIndex].funcIndex());
+
+ codeRanges_.infallibleAppend(interpRange);
+ codeRanges_.back().offsetBy(offsetInSegment);
+ i++;
+
+ if (!funcType.canHaveJitEntry()) {
+ continue;
+ }
+
+ const CodeRange& jitRange = codeRanges[i];
+ MOZ_ASSERT(jitRange.isJitEntry());
+ MOZ_ASSERT(jitRange.funcIndex() == interpRange.funcIndex());
+
+ codeRanges_.infallibleAppend(jitRange);
+ codeRanges_.back().offsetBy(offsetInSegment);
+ i++;
+ }
+
+ return true;
+}
+
+const CodeRange* LazyStubSegment::lookupRange(const void* pc) const {
+ // Do not search if the search will not find anything. There can be many
+ // segments, each with many entries.
+ if (pc < base() || pc >= base() + length()) {
+ return nullptr;
+ }
+ return LookupInSorted(codeRanges_,
+ CodeRange::OffsetInCode((uint8_t*)pc - base()));
+}
+
+void LazyStubSegment::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
+ size_t* data) const {
+ CodeSegment::addSizeOfMisc(mallocSizeOf, code);
+ *data += codeRanges_.sizeOfExcludingThis(mallocSizeOf);
+ *data += mallocSizeOf(this);
+}
+
+// When allocating a single stub to a page, we should not always place the stub
+// at the beginning of the page as the stubs will tend to thrash the icache by
+// creating conflicts (everything ends up in the same cache set). Instead,
+// locate stubs at different line offsets up to 3/4 the system page size (the
+// code allocation quantum).
+//
+// This may be called on background threads, hence the atomic.
+
+static void PadCodeForSingleStub(MacroAssembler& masm) {
+ // Assume 64B icache line size
+ static uint8_t zeroes[64];
+
+ // The counter serves only to spread the code out, it has no other meaning and
+ // can wrap around.
+ static mozilla::Atomic<uint32_t, mozilla::MemoryOrdering::ReleaseAcquire>
+ counter(0);
+
+ uint32_t maxPadLines = ((gc::SystemPageSize() * 3) / 4) / sizeof(zeroes);
+ uint32_t padLines = counter++ % maxPadLines;
+ for (uint32_t i = 0; i < padLines; i++) {
+ masm.appendRawCode(zeroes, sizeof(zeroes));
+ }
+}
+
+static constexpr unsigned LAZY_STUB_LIFO_DEFAULT_CHUNK_SIZE = 8 * 1024;
+
+bool LazyStubTier::createManyEntryStubs(const Uint32Vector& funcExportIndices,
+ const Metadata& metadata,
+ const CodeTier& codeTier,
+ size_t* stubSegmentIndex) {
+ MOZ_ASSERT(funcExportIndices.length());
+
+ LifoAlloc lifo(LAZY_STUB_LIFO_DEFAULT_CHUNK_SIZE);
+ TempAllocator alloc(&lifo);
+ JitContext jitContext;
+ WasmMacroAssembler masm(alloc);
+
+ if (funcExportIndices.length() == 1) {
+ PadCodeForSingleStub(masm);
+ }
+
+ const MetadataTier& metadataTier = codeTier.metadata();
+ const FuncExportVector& funcExports = metadataTier.funcExports;
+ uint8_t* moduleSegmentBase = codeTier.segment().base();
+
+ CodeRangeVector codeRanges;
+ DebugOnly<uint32_t> numExpectedRanges = 0;
+ for (uint32_t funcExportIndex : funcExportIndices) {
+ const FuncExport& fe = funcExports[funcExportIndex];
+ const FuncType& funcType = metadata.getFuncExportType(fe);
+ // Exports that don't support a jit entry get only the interp entry.
+ numExpectedRanges += (funcType.canHaveJitEntry() ? 2 : 1);
+ void* calleePtr =
+ moduleSegmentBase + metadataTier.codeRange(fe).funcUncheckedCallEntry();
+ Maybe<ImmPtr> callee;
+ callee.emplace(calleePtr, ImmPtr::NoCheckToken());
+ if (!GenerateEntryStubs(masm, funcExportIndex, fe, funcType, callee,
+ /* asmjs */ false, &codeRanges)) {
+ return false;
+ }
+ }
+ MOZ_ASSERT(codeRanges.length() == numExpectedRanges,
+ "incorrect number of entries per function");
+
+ masm.finish();
+
+ MOZ_ASSERT(masm.callSites().empty());
+ MOZ_ASSERT(masm.callSiteTargets().empty());
+ MOZ_ASSERT(masm.trapSites().empty());
+ MOZ_ASSERT(masm.tryNotes().empty());
+
+ if (masm.oom()) {
+ return false;
+ }
+
+ size_t codeLength = LazyStubSegment::AlignBytesNeeded(masm.bytesNeeded());
+
+ if (!stubSegments_.length() ||
+ !stubSegments_[lastStubSegmentIndex_]->hasSpace(codeLength)) {
+ size_t newSegmentSize = std::max(codeLength, ExecutableCodePageSize);
+ UniqueLazyStubSegment newSegment =
+ LazyStubSegment::create(codeTier, newSegmentSize);
+ if (!newSegment) {
+ return false;
+ }
+ lastStubSegmentIndex_ = stubSegments_.length();
+ if (!stubSegments_.emplaceBack(std::move(newSegment))) {
+ return false;
+ }
+ }
+
+ LazyStubSegment* segment = stubSegments_[lastStubSegmentIndex_].get();
+ *stubSegmentIndex = lastStubSegmentIndex_;
+
+ size_t interpRangeIndex;
+ uint8_t* codePtr = nullptr;
+ if (!segment->addStubs(metadata, codeLength, funcExportIndices, funcExports,
+ codeRanges, &codePtr, &interpRangeIndex)) {
+ return false;
+ }
+
+ masm.executableCopy(codePtr);
+ PatchDebugSymbolicAccesses(codePtr, masm);
+ memset(codePtr + masm.bytesNeeded(), 0, codeLength - masm.bytesNeeded());
+
+ for (const CodeLabel& label : masm.codeLabels()) {
+ Assembler::Bind(codePtr, label);
+ }
+
+ if (!ExecutableAllocator::makeExecutableAndFlushICache(codePtr, codeLength)) {
+ return false;
+ }
+
+ // Create lazy function exports for funcIndex -> entry lookup.
+ if (!exports_.reserve(exports_.length() + funcExportIndices.length())) {
+ return false;
+ }
+
+ for (uint32_t funcExportIndex : funcExportIndices) {
+ const FuncExport& fe = funcExports[funcExportIndex];
+ const FuncType& funcType = metadata.getFuncExportType(fe);
+
+ DebugOnly<CodeRange> cr = segment->codeRanges()[interpRangeIndex];
+ MOZ_ASSERT(cr.value.isInterpEntry());
+ MOZ_ASSERT(cr.value.funcIndex() == fe.funcIndex());
+
+ LazyFuncExport lazyExport(fe.funcIndex(), *stubSegmentIndex,
+ interpRangeIndex);
+
+ size_t exportIndex;
+ const uint32_t targetFunctionIndex = fe.funcIndex();
+ MOZ_ALWAYS_FALSE(BinarySearchIf(
+ exports_, 0, exports_.length(),
+ [targetFunctionIndex](const LazyFuncExport& funcExport) {
+ return targetFunctionIndex - funcExport.funcIndex;
+ },
+ &exportIndex));
+ MOZ_ALWAYS_TRUE(
+ exports_.insert(exports_.begin() + exportIndex, std::move(lazyExport)));
+
+ // Exports that don't support a jit entry get only the interp entry.
+ interpRangeIndex += (funcType.canHaveJitEntry() ? 2 : 1);
+ }
+
+ return true;
+}
+
+bool LazyStubTier::createOneEntryStub(uint32_t funcExportIndex,
+ const Metadata& metadata,
+ const CodeTier& codeTier) {
+ Uint32Vector funcExportIndexes;
+ if (!funcExportIndexes.append(funcExportIndex)) {
+ return false;
+ }
+
+ size_t stubSegmentIndex;
+ if (!createManyEntryStubs(funcExportIndexes, metadata, codeTier,
+ &stubSegmentIndex)) {
+ return false;
+ }
+
+ const UniqueLazyStubSegment& segment = stubSegments_[stubSegmentIndex];
+ const CodeRangeVector& codeRanges = segment->codeRanges();
+
+ const FuncExport& fe = codeTier.metadata().funcExports[funcExportIndex];
+ const FuncType& funcType = metadata.getFuncExportType(fe);
+
+ // Exports that don't support a jit entry get only the interp entry.
+ if (!funcType.canHaveJitEntry()) {
+ MOZ_ASSERT(codeRanges.length() >= 1);
+ MOZ_ASSERT(codeRanges.back().isInterpEntry());
+ return true;
+ }
+
+ MOZ_ASSERT(codeRanges.length() >= 2);
+ MOZ_ASSERT(codeRanges[codeRanges.length() - 2].isInterpEntry());
+
+ const CodeRange& cr = codeRanges[codeRanges.length() - 1];
+ MOZ_ASSERT(cr.isJitEntry());
+
+ codeTier.code().setJitEntry(cr.funcIndex(), segment->base() + cr.begin());
+ return true;
+}
+
+bool LazyStubTier::createTier2(const Uint32Vector& funcExportIndices,
+ const Metadata& metadata,
+ const CodeTier& codeTier,
+ Maybe<size_t>* outStubSegmentIndex) {
+ if (!funcExportIndices.length()) {
+ return true;
+ }
+
+ size_t stubSegmentIndex;
+ if (!createManyEntryStubs(funcExportIndices, metadata, codeTier,
+ &stubSegmentIndex)) {
+ return false;
+ }
+
+ outStubSegmentIndex->emplace(stubSegmentIndex);
+ return true;
+}
+
+void LazyStubTier::setJitEntries(const Maybe<size_t>& stubSegmentIndex,
+ const Code& code) {
+ if (!stubSegmentIndex) {
+ return;
+ }
+ const UniqueLazyStubSegment& segment = stubSegments_[*stubSegmentIndex];
+ for (const CodeRange& cr : segment->codeRanges()) {
+ if (!cr.isJitEntry()) {
+ continue;
+ }
+ code.setJitEntry(cr.funcIndex(), segment->base() + cr.begin());
+ }
+}
+
+bool LazyStubTier::hasEntryStub(uint32_t funcIndex) const {
+ size_t match;
+ return BinarySearchIf(
+ exports_, 0, exports_.length(),
+ [funcIndex](const LazyFuncExport& funcExport) {
+ return funcIndex - funcExport.funcIndex;
+ },
+ &match);
+}
+
+void* LazyStubTier::lookupInterpEntry(uint32_t funcIndex) const {
+ size_t match;
+ if (!BinarySearchIf(
+ exports_, 0, exports_.length(),
+ [funcIndex](const LazyFuncExport& funcExport) {
+ return funcIndex - funcExport.funcIndex;
+ },
+ &match)) {
+ return nullptr;
+ }
+ const LazyFuncExport& fe = exports_[match];
+ const LazyStubSegment& stub = *stubSegments_[fe.lazyStubSegmentIndex];
+ return stub.base() + stub.codeRanges()[fe.funcCodeRangeIndex].begin();
+}
+
+void LazyStubTier::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
+ size_t* data) const {
+ *data += sizeof(*this);
+ *data += exports_.sizeOfExcludingThis(mallocSizeOf);
+ for (const UniqueLazyStubSegment& stub : stubSegments_) {
+ stub->addSizeOfMisc(mallocSizeOf, code, data);
+ }
+}
+
+size_t Metadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return types->sizeOfExcludingThis(mallocSizeOf) +
+ globals.sizeOfExcludingThis(mallocSizeOf) +
+ tables.sizeOfExcludingThis(mallocSizeOf) +
+ tags.sizeOfExcludingThis(mallocSizeOf) +
+ funcNames.sizeOfExcludingThis(mallocSizeOf) +
+ filename.sizeOfExcludingThis(mallocSizeOf) +
+ sourceMapURL.sizeOfExcludingThis(mallocSizeOf);
+}
+
+struct ProjectFuncIndex {
+ const FuncExportVector& funcExports;
+ explicit ProjectFuncIndex(const FuncExportVector& funcExports)
+ : funcExports(funcExports) {}
+ uint32_t operator[](size_t index) const {
+ return funcExports[index].funcIndex();
+ }
+};
+
+FuncExport& MetadataTier::lookupFuncExport(
+ uint32_t funcIndex, size_t* funcExportIndex /* = nullptr */) {
+ size_t match;
+ if (!BinarySearch(ProjectFuncIndex(funcExports), 0, funcExports.length(),
+ funcIndex, &match)) {
+ MOZ_CRASH("missing function export");
+ }
+ if (funcExportIndex) {
+ *funcExportIndex = match;
+ }
+ return funcExports[match];
+}
+
+const FuncExport& MetadataTier::lookupFuncExport(
+ uint32_t funcIndex, size_t* funcExportIndex) const {
+ return const_cast<MetadataTier*>(this)->lookupFuncExport(funcIndex,
+ funcExportIndex);
+}
+
+static bool AppendName(const Bytes& namePayload, const Name& name,
+ UTF8Bytes* bytes) {
+ MOZ_RELEASE_ASSERT(name.offsetInNamePayload <= namePayload.length());
+ MOZ_RELEASE_ASSERT(name.length <=
+ namePayload.length() - name.offsetInNamePayload);
+ return bytes->append(
+ (const char*)namePayload.begin() + name.offsetInNamePayload, name.length);
+}
+
+static bool AppendFunctionIndexName(uint32_t funcIndex, UTF8Bytes* bytes) {
+ const char beforeFuncIndex[] = "wasm-function[";
+ const char afterFuncIndex[] = "]";
+
+ Int32ToCStringBuf cbuf;
+ size_t funcIndexStrLen;
+ const char* funcIndexStr =
+ Uint32ToCString(&cbuf, funcIndex, &funcIndexStrLen);
+ MOZ_ASSERT(funcIndexStr);
+
+ return bytes->append(beforeFuncIndex, strlen(beforeFuncIndex)) &&
+ bytes->append(funcIndexStr, funcIndexStrLen) &&
+ bytes->append(afterFuncIndex, strlen(afterFuncIndex));
+}
+
+bool Metadata::getFuncName(NameContext ctx, uint32_t funcIndex,
+ UTF8Bytes* name) const {
+ if (moduleName && moduleName->length != 0) {
+ if (!AppendName(namePayload->bytes, *moduleName, name)) {
+ return false;
+ }
+ if (!name->append('.')) {
+ return false;
+ }
+ }
+
+ if (funcIndex < funcNames.length() && funcNames[funcIndex].length != 0) {
+ return AppendName(namePayload->bytes, funcNames[funcIndex], name);
+ }
+
+ if (ctx == NameContext::BeforeLocation) {
+ return true;
+ }
+
+ return AppendFunctionIndexName(funcIndex, name);
+}
+
+bool CodeTier::initialize(const Code& code, const LinkData& linkData,
+ const Metadata& metadata) {
+ MOZ_ASSERT(!initialized());
+ code_ = &code;
+
+ MOZ_ASSERT(lazyStubs_.readLock()->entryStubsEmpty());
+
+ // See comments in CodeSegment::initialize() for why this must be last.
+ if (!segment_->initialize(*this, linkData, metadata, *metadata_)) {
+ return false;
+ }
+
+ MOZ_ASSERT(initialized());
+ return true;
+}
+
+void CodeTier::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
+ size_t* data) const {
+ segment_->addSizeOfMisc(mallocSizeOf, code, data);
+ lazyStubs_.readLock()->addSizeOfMisc(mallocSizeOf, code, data);
+ *data += metadata_->sizeOfExcludingThis(mallocSizeOf);
+}
+
+const CodeRange* CodeTier::lookupRange(const void* pc) const {
+ CodeRange::OffsetInCode target((uint8_t*)pc - segment_->base());
+ return LookupInSorted(metadata_->codeRanges, target);
+}
+
+const wasm::TryNote* CodeTier::lookupTryNote(const void* pc) const {
+ size_t target = (uint8_t*)pc - segment_->base();
+ const TryNoteVector& tryNotes = metadata_->tryNotes;
+
+ // We find the first hit (there may be multiple) to obtain the innermost
+ // handler, which is why we cannot binary search here.
+ for (const auto& tryNote : tryNotes) {
+ if (tryNote.offsetWithinTryBody(target)) {
+ return &tryNote;
+ }
+ }
+
+ return nullptr;
+}
+
+bool JumpTables::init(CompileMode mode, const ModuleSegment& ms,
+ const CodeRangeVector& codeRanges) {
+ static_assert(JSScript::offsetOfJitCodeRaw() == 0,
+ "wasm fast jit entry is at (void*) jit[funcIndex]");
+
+ mode_ = mode;
+
+ size_t numFuncs = 0;
+ for (const CodeRange& cr : codeRanges) {
+ if (cr.isFunction()) {
+ numFuncs++;
+ }
+ }
+
+ numFuncs_ = numFuncs;
+
+ if (mode_ == CompileMode::Tier1) {
+ tiering_ = TablePointer(js_pod_calloc<void*>(numFuncs));
+ if (!tiering_) {
+ return false;
+ }
+ }
+
+ // The number of jit entries is overestimated, but it is simpler when
+ // filling/looking up the jit entries and safe (worst case we'll crash
+ // because of a null deref when trying to call the jit entry of an
+ // unexported function).
+ jit_ = TablePointer(js_pod_calloc<void*>(numFuncs));
+ if (!jit_) {
+ return false;
+ }
+
+ uint8_t* codeBase = ms.base();
+ for (const CodeRange& cr : codeRanges) {
+ if (cr.isFunction()) {
+ setTieringEntry(cr.funcIndex(), codeBase + cr.funcTierEntry());
+ } else if (cr.isJitEntry()) {
+ setJitEntry(cr.funcIndex(), codeBase + cr.begin());
+ }
+ }
+ return true;
+}
+
+Code::Code(UniqueCodeTier tier1, const Metadata& metadata,
+ JumpTables&& maybeJumpTables)
+ : tier1_(std::move(tier1)),
+ metadata_(&metadata),
+ profilingLabels_(mutexid::WasmCodeProfilingLabels,
+ CacheableCharsVector()),
+ jumpTables_(std::move(maybeJumpTables)) {}
+
+bool Code::initialize(const LinkData& linkData) {
+ MOZ_ASSERT(!initialized());
+
+ if (!tier1_->initialize(*this, linkData, *metadata_)) {
+ return false;
+ }
+
+ MOZ_ASSERT(initialized());
+ return true;
+}
+
+bool Code::setAndBorrowTier2(UniqueCodeTier tier2, const LinkData& linkData,
+ const CodeTier** borrowedTier) const {
+ MOZ_RELEASE_ASSERT(!hasTier2());
+ MOZ_RELEASE_ASSERT(tier2->tier() == Tier::Optimized &&
+ tier1_->tier() == Tier::Baseline);
+
+ if (!tier2->initialize(*this, linkData, *metadata_)) {
+ return false;
+ }
+
+ tier2_ = std::move(tier2);
+ *borrowedTier = &*tier2_;
+
+ return true;
+}
+
+void Code::commitTier2() const {
+ MOZ_RELEASE_ASSERT(!hasTier2());
+ hasTier2_ = true;
+ MOZ_ASSERT(hasTier2());
+
+ // To maintain the invariant that tier2_ is never read without the tier having
+ // been committed, this checks tier2_ here instead of before setting hasTier2_
+ // (as would be natural). See comment in WasmCode.h.
+ MOZ_RELEASE_ASSERT(tier2_.get());
+}
+
+uint32_t Code::getFuncIndex(JSFunction* fun) const {
+ MOZ_ASSERT(fun->isWasm() || fun->isAsmJSNative());
+ if (!fun->isWasmWithJitEntry()) {
+ return fun->wasmFuncIndex();
+ }
+ return jumpTables_.funcIndexFromJitEntry(fun->wasmJitEntry());
+}
+
+Tiers Code::tiers() const {
+ if (hasTier2()) {
+ return Tiers(tier1_->tier(), tier2_->tier());
+ }
+ return Tiers(tier1_->tier());
+}
+
+bool Code::hasTier(Tier t) const {
+ if (hasTier2() && tier2_->tier() == t) {
+ return true;
+ }
+ return tier1_->tier() == t;
+}
+
+Tier Code::stableTier() const { return tier1_->tier(); }
+
+Tier Code::bestTier() const {
+ if (hasTier2()) {
+ return tier2_->tier();
+ }
+ return tier1_->tier();
+}
+
+const CodeTier& Code::codeTier(Tier tier) const {
+ switch (tier) {
+ case Tier::Baseline:
+ if (tier1_->tier() == Tier::Baseline) {
+ MOZ_ASSERT(tier1_->initialized());
+ return *tier1_;
+ }
+ MOZ_CRASH("No code segment at this tier");
+ case Tier::Optimized:
+ if (tier1_->tier() == Tier::Optimized) {
+ MOZ_ASSERT(tier1_->initialized());
+ return *tier1_;
+ }
+ // It is incorrect to ask for the optimized tier without there being such
+ // a tier and the tier having been committed. The guard here could
+ // instead be `if (hasTier2()) ... ` but codeTier(t) should not be called
+ // in contexts where that test is necessary.
+ MOZ_RELEASE_ASSERT(hasTier2());
+ MOZ_ASSERT(tier2_->initialized());
+ return *tier2_;
+ }
+ MOZ_CRASH();
+}
+
+bool Code::containsCodePC(const void* pc) const {
+ for (Tier t : tiers()) {
+ const ModuleSegment& ms = segment(t);
+ if (ms.containsCodePC(pc)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+struct CallSiteRetAddrOffset {
+ const CallSiteVector& callSites;
+ explicit CallSiteRetAddrOffset(const CallSiteVector& callSites)
+ : callSites(callSites) {}
+ uint32_t operator[](size_t index) const {
+ return callSites[index].returnAddressOffset();
+ }
+};
+
+const CallSite* Code::lookupCallSite(void* returnAddress) const {
+ for (Tier t : tiers()) {
+ uint32_t target = ((uint8_t*)returnAddress) - segment(t).base();
+ size_t lowerBound = 0;
+ size_t upperBound = metadata(t).callSites.length();
+
+ size_t match;
+ if (BinarySearch(CallSiteRetAddrOffset(metadata(t).callSites), lowerBound,
+ upperBound, target, &match)) {
+ return &metadata(t).callSites[match];
+ }
+ }
+
+ return nullptr;
+}
+
+const CodeRange* Code::lookupFuncRange(void* pc) const {
+ for (Tier t : tiers()) {
+ const CodeRange* result = codeTier(t).lookupRange(pc);
+ if (result && result->isFunction()) {
+ return result;
+ }
+ }
+ return nullptr;
+}
+
+const StackMap* Code::lookupStackMap(uint8_t* nextPC) const {
+ for (Tier t : tiers()) {
+ const StackMap* result = metadata(t).stackMaps.findMap(nextPC);
+ if (result) {
+ return result;
+ }
+ }
+ return nullptr;
+}
+
+const wasm::TryNote* Code::lookupTryNote(void* pc, Tier* tier) const {
+ for (Tier t : tiers()) {
+ const TryNote* result = codeTier(t).lookupTryNote(pc);
+ if (result) {
+ *tier = t;
+ return result;
+ }
+ }
+ return nullptr;
+}
+
+struct TrapSitePCOffset {
+ const TrapSiteVector& trapSites;
+ explicit TrapSitePCOffset(const TrapSiteVector& trapSites)
+ : trapSites(trapSites) {}
+ uint32_t operator[](size_t index) const { return trapSites[index].pcOffset; }
+};
+
+bool Code::lookupTrap(void* pc, Trap* trapOut, BytecodeOffset* bytecode) const {
+ for (Tier t : tiers()) {
+ uint32_t target = ((uint8_t*)pc) - segment(t).base();
+ const TrapSiteVectorArray& trapSitesArray = metadata(t).trapSites;
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ const TrapSiteVector& trapSites = trapSitesArray[trap];
+
+ size_t upperBound = trapSites.length();
+ size_t match;
+ if (BinarySearch(TrapSitePCOffset(trapSites), 0, upperBound, target,
+ &match)) {
+ MOZ_ASSERT(segment(t).containsCodePC(pc));
+ *trapOut = trap;
+ *bytecode = trapSites[match].bytecode;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+// When enabled, generate profiling labels for every name in funcNames_ that is
+// the name of some Function CodeRange. This involves malloc() so do it now
+// since, once we start sampling, we'll be in a signal-handing context where we
+// cannot malloc.
+void Code::ensureProfilingLabels(bool profilingEnabled) const {
+ auto labels = profilingLabels_.lock();
+
+ if (!profilingEnabled) {
+ labels->clear();
+ return;
+ }
+
+ if (!labels->empty()) {
+ return;
+ }
+
+ // Any tier will do, we only need tier-invariant data that are incidentally
+ // stored with the code ranges.
+
+ for (const CodeRange& codeRange : metadata(stableTier()).codeRanges) {
+ if (!codeRange.isFunction()) {
+ continue;
+ }
+
+ Int32ToCStringBuf cbuf;
+ size_t bytecodeStrLen;
+ const char* bytecodeStr =
+ Uint32ToCString(&cbuf, codeRange.funcLineOrBytecode(), &bytecodeStrLen);
+ MOZ_ASSERT(bytecodeStr);
+
+ UTF8Bytes name;
+ if (!metadata().getFuncNameStandalone(codeRange.funcIndex(), &name)) {
+ return;
+ }
+ if (!name.append(" (", 2)) {
+ return;
+ }
+
+ if (const char* filename = metadata().filename.get()) {
+ if (!name.append(filename, strlen(filename))) {
+ return;
+ }
+ } else {
+ if (!name.append('?')) {
+ return;
+ }
+ }
+
+ if (!name.append(':') || !name.append(bytecodeStr, bytecodeStrLen) ||
+ !name.append(")\0", 2)) {
+ return;
+ }
+
+ UniqueChars label(name.extractOrCopyRawBuffer());
+ if (!label) {
+ return;
+ }
+
+ if (codeRange.funcIndex() >= labels->length()) {
+ if (!labels->resize(codeRange.funcIndex() + 1)) {
+ return;
+ }
+ }
+
+ ((CacheableCharsVector&)labels)[codeRange.funcIndex()] = std::move(label);
+ }
+}
+
+const char* Code::profilingLabel(uint32_t funcIndex) const {
+ auto labels = profilingLabels_.lock();
+
+ if (funcIndex >= labels->length() ||
+ !((CacheableCharsVector&)labels)[funcIndex]) {
+ return "?";
+ }
+ return ((CacheableCharsVector&)labels)[funcIndex].get();
+}
+
+void Code::addSizeOfMiscIfNotSeen(MallocSizeOf mallocSizeOf,
+ Metadata::SeenSet* seenMetadata,
+ Code::SeenSet* seenCode, size_t* code,
+ size_t* data) const {
+ auto p = seenCode->lookupForAdd(this);
+ if (p) {
+ return;
+ }
+ bool ok = seenCode->add(p, this);
+ (void)ok; // oh well
+
+ *data += mallocSizeOf(this) +
+ metadata().sizeOfIncludingThisIfNotSeen(mallocSizeOf, seenMetadata) +
+ profilingLabels_.lock()->sizeOfExcludingThis(mallocSizeOf) +
+ jumpTables_.sizeOfMiscExcludingThis();
+
+ for (auto t : tiers()) {
+ codeTier(t).addSizeOfMisc(mallocSizeOf, code, data);
+ }
+}
+
+void Code::disassemble(JSContext* cx, Tier tier, int kindSelection,
+ PrintCallback printString) const {
+ const MetadataTier& metadataTier = metadata(tier);
+ const CodeTier& codeTier = this->codeTier(tier);
+ const ModuleSegment& segment = codeTier.segment();
+
+ for (const CodeRange& range : metadataTier.codeRanges) {
+ if (kindSelection & (1 << range.kind())) {
+ MOZ_ASSERT(range.begin() < segment.length());
+ MOZ_ASSERT(range.end() < segment.length());
+
+ const char* kind;
+ char kindbuf[128];
+ switch (range.kind()) {
+ case CodeRange::Function:
+ kind = "Function";
+ break;
+ case CodeRange::InterpEntry:
+ kind = "InterpEntry";
+ break;
+ case CodeRange::JitEntry:
+ kind = "JitEntry";
+ break;
+ case CodeRange::ImportInterpExit:
+ kind = "ImportInterpExit";
+ break;
+ case CodeRange::ImportJitExit:
+ kind = "ImportJitExit";
+ break;
+ default:
+ SprintfLiteral(kindbuf, "CodeRange::Kind(%d)", range.kind());
+ kind = kindbuf;
+ break;
+ }
+ const char* separator =
+ "\n--------------------------------------------------\n";
+ // The buffer is quite large in order to accomodate mangled C++ names;
+ // lengths over 3500 have been observed in the wild.
+ char buf[4096];
+ if (range.hasFuncIndex()) {
+ const char* funcName = "(unknown)";
+ UTF8Bytes namebuf;
+ if (metadata().getFuncNameStandalone(range.funcIndex(), &namebuf) &&
+ namebuf.append('\0')) {
+ funcName = namebuf.begin();
+ }
+ SprintfLiteral(buf, "%sKind = %s, index = %d, name = %s:\n", separator,
+ kind, range.funcIndex(), funcName);
+ } else {
+ SprintfLiteral(buf, "%sKind = %s\n", separator, kind);
+ }
+ printString(buf);
+
+ uint8_t* theCode = segment.base() + range.begin();
+ jit::Disassemble(theCode, range.end() - range.begin(), printString);
+ }
+ }
+}
+
+void wasm::PatchDebugSymbolicAccesses(uint8_t* codeBase, MacroAssembler& masm) {
+#ifdef WASM_CODEGEN_DEBUG
+ for (auto& access : masm.symbolicAccesses()) {
+ switch (access.target) {
+ case SymbolicAddress::PrintI32:
+ case SymbolicAddress::PrintPtr:
+ case SymbolicAddress::PrintF32:
+ case SymbolicAddress::PrintF64:
+ case SymbolicAddress::PrintText:
+ break;
+ default:
+ MOZ_CRASH("unexpected symbol in PatchDebugSymbolicAccesses");
+ }
+ ABIFunctionType abiType;
+ void* target = AddressOf(access.target, &abiType);
+ uint8_t* patchAt = codeBase + access.patchAt.offset();
+ Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
+ PatchedImmPtr(target),
+ PatchedImmPtr((void*)-1));
+ }
+#else
+ MOZ_ASSERT(masm.symbolicAccesses().empty());
+#endif
+}
diff --git a/js/src/wasm/WasmCode.h b/js/src/wasm/WasmCode.h
new file mode 100644
index 0000000000..518495199c
--- /dev/null
+++ b/js/src/wasm/WasmCode.h
@@ -0,0 +1,874 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_code_h
+#define wasm_code_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/EnumeratedArray.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/UniquePtr.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include <utility>
+
+#include "jstypes.h"
+
+#include "gc/Memory.h"
+#include "js/AllocPolicy.h"
+#include "js/UniquePtr.h"
+#include "js/Utility.h"
+#include "js/Vector.h"
+#include "threading/ExclusiveData.h"
+#include "util/Memory.h"
+#include "vm/MutexIDs.h"
+#include "wasm/WasmBuiltins.h"
+#include "wasm/WasmCodegenConstants.h"
+#include "wasm/WasmCodegenTypes.h"
+#include "wasm/WasmCompileArgs.h"
+#include "wasm/WasmConstants.h"
+#include "wasm/WasmExprType.h"
+#include "wasm/WasmGC.h"
+#include "wasm/WasmLog.h"
+#include "wasm/WasmModuleTypes.h"
+#include "wasm/WasmSerialize.h"
+#include "wasm/WasmShareable.h"
+#include "wasm/WasmTypeDecls.h"
+#include "wasm/WasmTypeDef.h"
+#include "wasm/WasmValType.h"
+
+struct JS_PUBLIC_API JSContext;
+class JSFunction;
+
+namespace js {
+
+struct AsmJSMetadata;
+class ScriptSource;
+
+namespace jit {
+class MacroAssembler;
+};
+
+namespace wasm {
+
+struct MetadataTier;
+struct Metadata;
+
+// LinkData contains all the metadata necessary to patch all the locations
+// that depend on the absolute address of a ModuleSegment. This happens in a
+// "linking" step after compilation and after the module's code is serialized.
+// The LinkData is serialized along with the Module but does not (normally, see
+// Module::debugLinkData_ comment) persist after (de)serialization, which
+// distinguishes it from Metadata, which is stored in the Code object.
+
+struct LinkDataCacheablePod {
+ uint32_t trapOffset = 0;
+
+ WASM_CHECK_CACHEABLE_POD(trapOffset);
+
+ LinkDataCacheablePod() = default;
+};
+
+WASM_DECLARE_CACHEABLE_POD(LinkDataCacheablePod);
+
+WASM_CHECK_CACHEABLE_POD_PADDING(LinkDataCacheablePod)
+
+struct LinkData : LinkDataCacheablePod {
+ explicit LinkData(Tier tier) : tier(tier) {}
+
+ LinkDataCacheablePod& pod() { return *this; }
+ const LinkDataCacheablePod& pod() const { return *this; }
+
+ struct InternalLink {
+ uint32_t patchAtOffset;
+ uint32_t targetOffset;
+#ifdef JS_CODELABEL_LINKMODE
+ uint32_t mode;
+#endif
+
+ WASM_CHECK_CACHEABLE_POD(patchAtOffset, targetOffset);
+#ifdef JS_CODELABEL_LINKMODE
+ WASM_CHECK_CACHEABLE_POD(mode)
+#endif
+ };
+ using InternalLinkVector = Vector<InternalLink, 0, SystemAllocPolicy>;
+
+ struct SymbolicLinkArray
+ : EnumeratedArray<SymbolicAddress, SymbolicAddress::Limit, Uint32Vector> {
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ };
+
+ const Tier tier;
+ InternalLinkVector internalLinks;
+ SymbolicLinkArray symbolicLinks;
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+};
+
+WASM_DECLARE_CACHEABLE_POD(LinkData::InternalLink);
+
+using UniqueLinkData = UniquePtr<LinkData>;
+
+// Executable code must be deallocated specially.
+
+struct FreeCode {
+ uint32_t codeLength;
+ FreeCode() : codeLength(0) {}
+ explicit FreeCode(uint32_t codeLength) : codeLength(codeLength) {}
+ void operator()(uint8_t* codeBytes);
+};
+
+using UniqueCodeBytes = UniquePtr<uint8_t, FreeCode>;
+
+class Code;
+class CodeTier;
+class ModuleSegment;
+class LazyStubSegment;
+
+// CodeSegment contains common helpers for determining the base and length of a
+// code segment and if a pc belongs to this segment. It is inherited by:
+// - ModuleSegment, i.e. the code segment of a Module, generated
+// eagerly when a Module is instanciated.
+// - LazyStubSegment, i.e. the code segment of entry stubs that are lazily
+// generated.
+
+class CodeSegment {
+ protected:
+ enum class Kind { LazyStubs, Module };
+
+ CodeSegment(UniqueCodeBytes bytes, uint32_t length, Kind kind)
+ : bytes_(std::move(bytes)),
+ length_(length),
+ kind_(kind),
+ codeTier_(nullptr),
+ unregisterOnDestroy_(false) {}
+
+ bool initialize(const CodeTier& codeTier);
+
+ private:
+ const UniqueCodeBytes bytes_;
+ const uint32_t length_;
+ const Kind kind_;
+ const CodeTier* codeTier_;
+ bool unregisterOnDestroy_;
+
+ public:
+ bool initialized() const { return !!codeTier_; }
+ ~CodeSegment();
+
+ bool isLazyStubs() const { return kind_ == Kind::LazyStubs; }
+ bool isModule() const { return kind_ == Kind::Module; }
+ const ModuleSegment* asModule() const {
+ MOZ_ASSERT(isModule());
+ return (ModuleSegment*)this;
+ }
+ const LazyStubSegment* asLazyStub() const {
+ MOZ_ASSERT(isLazyStubs());
+ return (LazyStubSegment*)this;
+ }
+
+ uint8_t* base() const { return bytes_.get(); }
+ uint32_t length() const {
+ MOZ_ASSERT(length_ != UINT32_MAX);
+ return length_;
+ }
+
+ bool containsCodePC(const void* pc) const {
+ return pc >= base() && pc < (base() + length_);
+ }
+
+ const CodeTier& codeTier() const {
+ MOZ_ASSERT(initialized());
+ return *codeTier_;
+ }
+ const Code& code() const;
+
+ void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code) const;
+};
+
+// A wasm ModuleSegment owns the allocated executable code for a wasm module.
+
+using UniqueModuleSegment = UniquePtr<ModuleSegment>;
+
+class ModuleSegment : public CodeSegment {
+ const Tier tier_;
+ uint8_t* const trapCode_;
+
+ public:
+ ModuleSegment(Tier tier, UniqueCodeBytes codeBytes, uint32_t codeLength,
+ const LinkData& linkData);
+
+ static UniqueModuleSegment create(Tier tier, jit::MacroAssembler& masm,
+ const LinkData& linkData);
+ static UniqueModuleSegment create(Tier tier, const Bytes& unlinkedBytes,
+ const LinkData& linkData);
+
+ bool initialize(const CodeTier& codeTier, const LinkData& linkData,
+ const Metadata& metadata, const MetadataTier& metadataTier);
+
+ Tier tier() const { return tier_; }
+
+ // Pointers to stubs to which PC is redirected from the signal-handler.
+
+ uint8_t* trapCode() const { return trapCode_; }
+
+ const CodeRange* lookupRange(const void* pc) const;
+
+ void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* code,
+ size_t* data) const;
+
+ WASM_DECLARE_FRIEND_SERIALIZE(ModuleSegment);
+};
+
+extern UniqueCodeBytes AllocateCodeBytes(uint32_t codeLength);
+extern bool StaticallyLink(const ModuleSegment& ms, const LinkData& linkData);
+extern void StaticallyUnlink(uint8_t* base, const LinkData& linkData);
+
+// A FuncExport represents a single function definition inside a wasm Module
+// that has been exported one or more times. A FuncExport represents an
+// internal entry point that can be called via function definition index by
+// Instance::callExport(). To allow O(log(n)) lookup of a FuncExport by
+// function definition index, the FuncExportVector is stored sorted by
+// function definition index.
+
+class FuncExport {
+ uint32_t typeIndex_;
+ uint32_t funcIndex_;
+ uint32_t eagerInterpEntryOffset_; // Machine code offset
+ bool hasEagerStubs_;
+
+ WASM_CHECK_CACHEABLE_POD(typeIndex_, funcIndex_, eagerInterpEntryOffset_,
+ hasEagerStubs_);
+
+ public:
+ FuncExport() = default;
+ explicit FuncExport(uint32_t typeIndex, uint32_t funcIndex,
+ bool hasEagerStubs) {
+ typeIndex_ = typeIndex;
+ funcIndex_ = funcIndex;
+ eagerInterpEntryOffset_ = UINT32_MAX;
+ hasEagerStubs_ = hasEagerStubs;
+ }
+ void initEagerInterpEntryOffset(uint32_t entryOffset) {
+ MOZ_ASSERT(eagerInterpEntryOffset_ == UINT32_MAX);
+ MOZ_ASSERT(hasEagerStubs());
+ eagerInterpEntryOffset_ = entryOffset;
+ }
+
+ bool hasEagerStubs() const { return hasEagerStubs_; }
+ uint32_t typeIndex() const { return typeIndex_; }
+ uint32_t funcIndex() const { return funcIndex_; }
+ uint32_t eagerInterpEntryOffset() const {
+ MOZ_ASSERT(eagerInterpEntryOffset_ != UINT32_MAX);
+ MOZ_ASSERT(hasEagerStubs());
+ return eagerInterpEntryOffset_;
+ }
+};
+
+WASM_DECLARE_CACHEABLE_POD(FuncExport);
+
+using FuncExportVector = Vector<FuncExport, 0, SystemAllocPolicy>;
+
+// An FuncImport contains the runtime metadata needed to implement a call to an
+// imported function. Each function import has two call stubs: an optimized path
+// into JIT code and a slow path into the generic C++ js::Invoke and these
+// offsets of these stubs are stored so that function-import callsites can be
+// dynamically patched at runtime.
+
+class FuncImport {
+ private:
+ uint32_t typeIndex_;
+ uint32_t instanceOffset_;
+ uint32_t interpExitCodeOffset_; // Machine code offset
+ uint32_t jitExitCodeOffset_; // Machine code offset
+
+ WASM_CHECK_CACHEABLE_POD(typeIndex_, instanceOffset_, interpExitCodeOffset_,
+ jitExitCodeOffset_);
+
+ public:
+ FuncImport()
+ : typeIndex_(0),
+ instanceOffset_(0),
+ interpExitCodeOffset_(0),
+ jitExitCodeOffset_(0) {}
+
+ FuncImport(uint32_t typeIndex, uint32_t instanceOffset) {
+ typeIndex_ = typeIndex;
+ instanceOffset_ = instanceOffset;
+ interpExitCodeOffset_ = 0;
+ jitExitCodeOffset_ = 0;
+ }
+
+ void initInterpExitOffset(uint32_t off) {
+ MOZ_ASSERT(!interpExitCodeOffset_);
+ interpExitCodeOffset_ = off;
+ }
+ void initJitExitOffset(uint32_t off) {
+ MOZ_ASSERT(!jitExitCodeOffset_);
+ jitExitCodeOffset_ = off;
+ }
+
+ uint32_t typeIndex() const { return typeIndex_; }
+ uint32_t instanceOffset() const { return instanceOffset_; }
+ uint32_t interpExitCodeOffset() const { return interpExitCodeOffset_; }
+ uint32_t jitExitCodeOffset() const { return jitExitCodeOffset_; }
+};
+
+WASM_DECLARE_CACHEABLE_POD(FuncImport)
+
+using FuncImportVector = Vector<FuncImport, 0, SystemAllocPolicy>;
+
+// Metadata holds all the data that is needed to describe compiled wasm code
+// at runtime (as opposed to data that is only used to statically link or
+// instantiate a module).
+//
+// Metadata is built incrementally by ModuleGenerator and then shared immutably
+// between modules.
+//
+// The Metadata structure is split into tier-invariant and tier-variant parts;
+// the former points to instances of the latter. Additionally, the asm.js
+// subsystem subclasses the Metadata, adding more tier-invariant data, some of
+// which is serialized. See AsmJS.cpp.
+
+struct MetadataCacheablePod {
+ ModuleKind kind;
+ Maybe<MemoryDesc> memory;
+ uint32_t instanceDataLength;
+ Maybe<uint32_t> startFuncIndex;
+ Maybe<uint32_t> nameCustomSectionIndex;
+ bool filenameIsURL;
+ bool omitsBoundsChecks;
+ uint32_t typeDefsOffsetStart;
+ uint32_t tablesOffsetStart;
+ uint32_t tagsOffsetStart;
+ uint32_t padding;
+
+ WASM_CHECK_CACHEABLE_POD(kind, memory, instanceDataLength, startFuncIndex,
+ nameCustomSectionIndex, filenameIsURL,
+ omitsBoundsChecks, typeDefsOffsetStart,
+ tablesOffsetStart, tagsOffsetStart)
+
+ explicit MetadataCacheablePod(ModuleKind kind)
+ : kind(kind),
+ instanceDataLength(0),
+ filenameIsURL(false),
+ omitsBoundsChecks(false),
+ typeDefsOffsetStart(UINT32_MAX),
+ tablesOffsetStart(UINT32_MAX),
+ tagsOffsetStart(UINT32_MAX),
+ padding(0) {}
+};
+
+WASM_DECLARE_CACHEABLE_POD(MetadataCacheablePod)
+
+WASM_CHECK_CACHEABLE_POD_PADDING(MetadataCacheablePod)
+
+using ModuleHash = uint8_t[8];
+
+struct Metadata : public ShareableBase<Metadata>, public MetadataCacheablePod {
+ SharedTypeContext types;
+ GlobalDescVector globals;
+ TableDescVector tables;
+ TagDescVector tags;
+ CacheableChars filename;
+ CacheableChars sourceMapURL;
+
+ // namePayload points at the name section's CustomSection::payload so that
+ // the Names (which are use payload-relative offsets) can be used
+ // independently of the Module without duplicating the name section.
+ SharedBytes namePayload;
+ Maybe<Name> moduleName;
+ NameVector funcNames;
+
+ // Debug-enabled code is not serialized.
+ bool debugEnabled;
+ Uint32Vector debugFuncTypeIndices;
+ ModuleHash debugHash;
+
+ explicit Metadata(ModuleKind kind = ModuleKind::Wasm)
+ : MetadataCacheablePod(kind), debugEnabled(false), debugHash() {}
+ virtual ~Metadata() = default;
+
+ MetadataCacheablePod& pod() { return *this; }
+ const MetadataCacheablePod& pod() const { return *this; }
+
+ bool usesMemory() const { return memory.isSome(); }
+ bool usesSharedMemory() const {
+ return memory.isSome() && memory->isShared();
+ }
+
+ const FuncType& getFuncImportType(const FuncImport& funcImport) const {
+ return types->type(funcImport.typeIndex()).funcType();
+ }
+ const FuncType& getFuncExportType(const FuncExport& funcExport) const {
+ return types->type(funcExport.typeIndex()).funcType();
+ }
+
+ size_t debugNumFuncs() const { return debugFuncTypeIndices.length(); }
+ const FuncType& debugFuncType(uint32_t funcIndex) const {
+ MOZ_ASSERT(debugEnabled);
+ return types->type(debugFuncTypeIndices[funcIndex]).funcType();
+ }
+
+ // AsmJSMetadata derives Metadata iff isAsmJS(). Mostly this distinction is
+ // encapsulated within AsmJS.cpp, but the additional virtual functions allow
+ // asm.js to override wasm behavior in the handful of cases that can't be
+ // easily encapsulated by AsmJS.cpp.
+
+ bool isAsmJS() const { return kind == ModuleKind::AsmJS; }
+ const AsmJSMetadata& asAsmJS() const {
+ MOZ_ASSERT(isAsmJS());
+ return *(const AsmJSMetadata*)this;
+ }
+ virtual bool mutedErrors() const { return false; }
+ virtual const char16_t* displayURL() const { return nullptr; }
+ virtual ScriptSource* maybeScriptSource() const { return nullptr; }
+
+ // The Developer-Facing Display Conventions section of the WebAssembly Web
+ // API spec defines two cases for displaying a wasm function name:
+ // 1. the function name stands alone
+ // 2. the function name precedes the location
+
+ enum NameContext { Standalone, BeforeLocation };
+
+ virtual bool getFuncName(NameContext ctx, uint32_t funcIndex,
+ UTF8Bytes* name) const;
+
+ bool getFuncNameStandalone(uint32_t funcIndex, UTF8Bytes* name) const {
+ return getFuncName(NameContext::Standalone, funcIndex, name);
+ }
+ bool getFuncNameBeforeLocation(uint32_t funcIndex, UTF8Bytes* name) const {
+ return getFuncName(NameContext::BeforeLocation, funcIndex, name);
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ WASM_DECLARE_FRIEND_SERIALIZE(Metadata);
+};
+
+using MutableMetadata = RefPtr<Metadata>;
+using SharedMetadata = RefPtr<const Metadata>;
+
+struct MetadataTier {
+ explicit MetadataTier(Tier tier = Tier::Serialized)
+ : tier(tier), debugTrapOffset(0) {}
+
+ const Tier tier;
+
+ Uint32Vector funcToCodeRange;
+ CodeRangeVector codeRanges;
+ CallSiteVector callSites;
+ TrapSiteVectorArray trapSites;
+ FuncImportVector funcImports;
+ FuncExportVector funcExports;
+ StackMaps stackMaps;
+ TryNoteVector tryNotes;
+
+ // Debug information, not serialized.
+ uint32_t debugTrapOffset;
+
+ FuncExport& lookupFuncExport(uint32_t funcIndex,
+ size_t* funcExportIndex = nullptr);
+ const FuncExport& lookupFuncExport(uint32_t funcIndex,
+ size_t* funcExportIndex = nullptr) const;
+
+ const CodeRange& codeRange(const FuncExport& funcExport) const {
+ return codeRanges[funcToCodeRange[funcExport.funcIndex()]];
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+};
+
+using UniqueMetadataTier = UniquePtr<MetadataTier>;
+
+// LazyStubSegment is a code segment lazily generated for function entry stubs
+// (both interpreter and jit ones).
+//
+// Because a stub is usually small (a few KiB) and an executable code segment
+// isn't (64KiB), a given stub segment can contain entry stubs of many
+// functions.
+
+using UniqueLazyStubSegment = UniquePtr<LazyStubSegment>;
+using LazyStubSegmentVector =
+ Vector<UniqueLazyStubSegment, 0, SystemAllocPolicy>;
+
+class LazyStubSegment : public CodeSegment {
+ CodeRangeVector codeRanges_;
+ size_t usedBytes_;
+
+ public:
+ LazyStubSegment(UniqueCodeBytes bytes, size_t length)
+ : CodeSegment(std::move(bytes), length, CodeSegment::Kind::LazyStubs),
+ usedBytes_(0) {}
+
+ static UniqueLazyStubSegment create(const CodeTier& codeTier,
+ size_t codeLength);
+
+ static size_t AlignBytesNeeded(size_t bytes) {
+ return AlignBytes(bytes, gc::SystemPageSize());
+ }
+
+ bool hasSpace(size_t bytes) const;
+ [[nodiscard]] bool addStubs(const Metadata& metadata, size_t codeLength,
+ const Uint32Vector& funcExportIndices,
+ const FuncExportVector& funcExports,
+ const CodeRangeVector& codeRanges,
+ uint8_t** codePtr,
+ size_t* indexFirstInsertedCodeRange);
+
+ const CodeRangeVector& codeRanges() const { return codeRanges_; }
+ [[nodiscard]] const CodeRange* lookupRange(const void* pc) const;
+
+ void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
+ size_t* data) const;
+};
+
+// LazyFuncExport helps to efficiently lookup a CodeRange from a given function
+// index. It is inserted in a vector sorted by function index, to perform
+// binary search on it later.
+
+struct LazyFuncExport {
+ size_t funcIndex;
+ size_t lazyStubSegmentIndex;
+ size_t funcCodeRangeIndex;
+ LazyFuncExport(size_t funcIndex, size_t lazyStubSegmentIndex,
+ size_t funcCodeRangeIndex)
+ : funcIndex(funcIndex),
+ lazyStubSegmentIndex(lazyStubSegmentIndex),
+ funcCodeRangeIndex(funcCodeRangeIndex) {}
+};
+
+using LazyFuncExportVector = Vector<LazyFuncExport, 0, SystemAllocPolicy>;
+
+// LazyStubTier contains all the necessary information for lazy function entry
+// stubs that are generated at runtime. None of its data are ever serialized.
+//
+// It must be protected by a lock, because the main thread can both read and
+// write lazy stubs at any time while a background thread can regenerate lazy
+// stubs for tier2 at any time.
+
+class LazyStubTier {
+ LazyStubSegmentVector stubSegments_;
+ LazyFuncExportVector exports_;
+ size_t lastStubSegmentIndex_;
+
+ [[nodiscard]] bool createManyEntryStubs(const Uint32Vector& funcExportIndices,
+ const Metadata& metadata,
+ const CodeTier& codeTier,
+ size_t* stubSegmentIndex);
+
+ public:
+ LazyStubTier() : lastStubSegmentIndex_(0) {}
+
+ // Creates one lazy stub for the exported function, for which the jit entry
+ // will be set to the lazily-generated one.
+ [[nodiscard]] bool createOneEntryStub(uint32_t funcExportIndex,
+ const Metadata& metadata,
+ const CodeTier& codeTier);
+
+ bool entryStubsEmpty() const { return stubSegments_.empty(); }
+ bool hasEntryStub(uint32_t funcIndex) const;
+
+ // Returns a pointer to the raw interpreter entry of a given function for
+ // which stubs have been lazily generated.
+ [[nodiscard]] void* lookupInterpEntry(uint32_t funcIndex) const;
+
+ // Create one lazy stub for all the functions in funcExportIndices, putting
+ // them in a single stub. Jit entries won't be used until
+ // setJitEntries() is actually called, after the Code owner has committed
+ // tier2.
+ [[nodiscard]] bool createTier2(const Uint32Vector& funcExportIndices,
+ const Metadata& metadata,
+ const CodeTier& codeTier,
+ Maybe<size_t>* stubSegmentIndex);
+ void setJitEntries(const Maybe<size_t>& stubSegmentIndex, const Code& code);
+
+ void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
+ size_t* data) const;
+};
+
+// CodeTier contains all the data related to a given compilation tier. It is
+// built during module generation and then immutably stored in a Code.
+
+using UniqueCodeTier = UniquePtr<CodeTier>;
+using UniqueConstCodeTier = UniquePtr<const CodeTier>;
+
+class CodeTier {
+ const Code* code_;
+
+ // Serialized information.
+ const UniqueMetadataTier metadata_;
+ const UniqueModuleSegment segment_;
+
+ // Lazy stubs, not serialized.
+ RWExclusiveData<LazyStubTier> lazyStubs_;
+
+ static const MutexId& mutexForTier(Tier tier) {
+ if (tier == Tier::Baseline) {
+ return mutexid::WasmLazyStubsTier1;
+ }
+ MOZ_ASSERT(tier == Tier::Optimized);
+ return mutexid::WasmLazyStubsTier2;
+ }
+
+ public:
+ CodeTier(UniqueMetadataTier metadata, UniqueModuleSegment segment)
+ : code_(nullptr),
+ metadata_(std::move(metadata)),
+ segment_(std::move(segment)),
+ lazyStubs_(mutexForTier(segment_->tier())) {}
+
+ bool initialized() const { return !!code_ && segment_->initialized(); }
+ bool initialize(const Code& code, const LinkData& linkData,
+ const Metadata& metadata);
+
+ Tier tier() const { return segment_->tier(); }
+ const RWExclusiveData<LazyStubTier>& lazyStubs() const { return lazyStubs_; }
+ const MetadataTier& metadata() const { return *metadata_.get(); }
+ const ModuleSegment& segment() const { return *segment_.get(); }
+ const Code& code() const {
+ MOZ_ASSERT(initialized());
+ return *code_;
+ }
+
+ const CodeRange* lookupRange(const void* pc) const;
+ const TryNote* lookupTryNote(const void* pc) const;
+
+ void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
+ size_t* data) const;
+
+ WASM_DECLARE_FRIEND_SERIALIZE_ARGS(CodeTier, const wasm::LinkData& data);
+};
+
+// Jump tables that implement function tiering and fast js-to-wasm calls.
+//
+// There is one JumpTable object per Code object, holding two jump tables: the
+// tiering jump table and the jit-entry jump table. The JumpTable is not
+// serialized with its Code, but is a run-time entity only. At run-time it is
+// shared across threads with its owning Code (and the Module that owns the
+// Code). Values in the JumpTable /must/ /always/ be JSContext-agnostic and
+// Instance-agnostic, because of this sharing.
+//
+// Both jump tables have a number of entries equal to the number of functions in
+// their Module, including imports. In the tiering table, the elements
+// corresponding to the Module's imported functions are unused; in the jit-entry
+// table, the elements corresponding to the Module's non-exported functions are
+// unused. (Functions can be exported explicitly via the exports section or
+// implicitly via a mention of their indices outside function bodies.) See
+// comments at JumpTables::init() and WasmInstanceObject::getExportedFunction().
+// The entries are void*. Unused entries are null.
+//
+// The tiering jump table.
+//
+// This table holds code pointers that are used by baseline functions to enter
+// optimized code. See the large comment block in WasmCompile.cpp for
+// information about how tiering works.
+//
+// The jit-entry jump table.
+//
+// The jit-entry jump table entry for a function holds a stub that allows Jitted
+// JS code to call wasm using the JS JIT ABI. See large comment block at
+// WasmInstanceObject::getExportedFunction() for more about exported functions
+// and stubs and the lifecycle of the entries in the jit-entry table - there are
+// complex invariants.
+
+class JumpTables {
+ using TablePointer = mozilla::UniquePtr<void*[], JS::FreePolicy>;
+
+ CompileMode mode_;
+ TablePointer tiering_;
+ TablePointer jit_;
+ size_t numFuncs_;
+
+ static_assert(
+ JumpTableJitEntryOffset == 0,
+ "Each jit entry in table must have compatible layout with BaseScript and"
+ "SelfHostedLazyScript");
+
+ public:
+ bool init(CompileMode mode, const ModuleSegment& ms,
+ const CodeRangeVector& codeRanges);
+
+ void setJitEntry(size_t i, void* target) const {
+ // Make sure that write is atomic; see comment in wasm::Module::finishTier2
+ // to that effect.
+ MOZ_ASSERT(i < numFuncs_);
+ jit_.get()[i] = target;
+ }
+ void setJitEntryIfNull(size_t i, void* target) const {
+ // Make sure that compare-and-write is atomic; see comment in
+ // wasm::Module::finishTier2 to that effect.
+ MOZ_ASSERT(i < numFuncs_);
+ void* expected = nullptr;
+ (void)__atomic_compare_exchange_n(&jit_.get()[i], &expected, target,
+ /*weak=*/false, __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED);
+ }
+ void** getAddressOfJitEntry(size_t i) const {
+ MOZ_ASSERT(i < numFuncs_);
+ MOZ_ASSERT(jit_.get()[i]);
+ return &jit_.get()[i];
+ }
+ size_t funcIndexFromJitEntry(void** target) const {
+ MOZ_ASSERT(target >= &jit_.get()[0]);
+ MOZ_ASSERT(target <= &(jit_.get()[numFuncs_ - 1]));
+ return (intptr_t*)target - (intptr_t*)&jit_.get()[0];
+ }
+
+ void setTieringEntry(size_t i, void* target) const {
+ MOZ_ASSERT(i < numFuncs_);
+ // See comment in wasm::Module::finishTier2.
+ if (mode_ == CompileMode::Tier1) {
+ tiering_.get()[i] = target;
+ }
+ }
+ void** tiering() const { return tiering_.get(); }
+
+ size_t sizeOfMiscExcludingThis() const {
+ // 2 words per function for the jit entry table, plus maybe 1 per
+ // function if we're tiering.
+ return sizeof(void*) * (2 + (tiering_ ? 1 : 0)) * numFuncs_;
+ }
+};
+
+// Code objects own executable code and the metadata that describe it. A single
+// Code object is normally shared between a module and all its instances.
+//
+// profilingLabels_ is lazily initialized, but behind a lock.
+
+using SharedCode = RefPtr<const Code>;
+using MutableCode = RefPtr<Code>;
+
+class Code : public ShareableBase<Code> {
+ UniqueCodeTier tier1_;
+
+ // [SMDOC] Tier-2 data
+ //
+ // hasTier2_ and tier2_ implement a three-state protocol for broadcasting
+ // tier-2 data; this also amounts to a single-writer/multiple-reader setup.
+ //
+ // Initially hasTier2_ is false and tier2_ is null.
+ //
+ // While hasTier2_ is false, *no* thread may read tier2_, but one thread may
+ // make tier2_ non-null (this will be the tier-2 compiler thread). That same
+ // thread must then later set hasTier2_ to true to broadcast the tier2_ value
+ // and its availability. Note that the writing thread may not itself read
+ // tier2_ before setting hasTier2_, in order to simplify reasoning about
+ // global invariants.
+ //
+ // Once hasTier2_ is true, *no* thread may write tier2_ and *no* thread may
+ // read tier2_ without having observed hasTier2_ as true first. Once
+ // hasTier2_ is true, it stays true.
+ mutable UniqueConstCodeTier tier2_;
+ mutable Atomic<bool> hasTier2_;
+
+ SharedMetadata metadata_;
+ ExclusiveData<CacheableCharsVector> profilingLabels_;
+ JumpTables jumpTables_;
+
+ public:
+ Code(UniqueCodeTier tier1, const Metadata& metadata,
+ JumpTables&& maybeJumpTables);
+ bool initialized() const { return tier1_->initialized(); }
+
+ bool initialize(const LinkData& linkData);
+
+ void setTieringEntry(size_t i, void* target) const {
+ jumpTables_.setTieringEntry(i, target);
+ }
+ void** tieringJumpTable() const { return jumpTables_.tiering(); }
+
+ void setJitEntry(size_t i, void* target) const {
+ jumpTables_.setJitEntry(i, target);
+ }
+ void setJitEntryIfNull(size_t i, void* target) const {
+ jumpTables_.setJitEntryIfNull(i, target);
+ }
+ void** getAddressOfJitEntry(size_t i) const {
+ return jumpTables_.getAddressOfJitEntry(i);
+ }
+ uint32_t getFuncIndex(JSFunction* fun) const;
+
+ // Install the tier2 code without committing it. To maintain the invariant
+ // that tier2_ is never accessed without the tier having been committed, this
+ // returns a pointer to the installed tier that the caller can use for
+ // subsequent operations.
+ bool setAndBorrowTier2(UniqueCodeTier tier2, const LinkData& linkData,
+ const CodeTier** borrowedTier) const;
+ void commitTier2() const;
+
+ bool hasTier2() const { return hasTier2_; }
+ Tiers tiers() const;
+ bool hasTier(Tier t) const;
+
+ Tier stableTier() const; // This is stable during a run
+ Tier bestTier()
+ const; // This may transition from Baseline -> Ion at any time
+
+ const CodeTier& codeTier(Tier tier) const;
+ const Metadata& metadata() const { return *metadata_; }
+
+ const ModuleSegment& segment(Tier iter) const {
+ return codeTier(iter).segment();
+ }
+ const MetadataTier& metadata(Tier iter) const {
+ return codeTier(iter).metadata();
+ }
+
+ // Metadata lookup functions:
+
+ const CallSite* lookupCallSite(void* returnAddress) const;
+ const CodeRange* lookupFuncRange(void* pc) const;
+ const StackMap* lookupStackMap(uint8_t* nextPC) const;
+ const TryNote* lookupTryNote(void* pc, Tier* tier) const;
+ bool containsCodePC(const void* pc) const;
+ bool lookupTrap(void* pc, Trap* trap, BytecodeOffset* bytecode) const;
+
+ // To save memory, profilingLabels_ are generated lazily when profiling mode
+ // is enabled.
+
+ void ensureProfilingLabels(bool profilingEnabled) const;
+ const char* profilingLabel(uint32_t funcIndex) const;
+
+ // Wasm disassembly support
+
+ void disassemble(JSContext* cx, Tier tier, int kindSelection,
+ PrintCallback printString) const;
+
+ // about:memory reporting:
+
+ void addSizeOfMiscIfNotSeen(MallocSizeOf mallocSizeOf,
+ Metadata::SeenSet* seenMetadata,
+ Code::SeenSet* seenCode, size_t* code,
+ size_t* data) const;
+
+ WASM_DECLARE_FRIEND_SERIALIZE_ARGS(SharedCode, const wasm::LinkData& data);
+};
+
+void PatchDebugSymbolicAccesses(uint8_t* codeBase, jit::MacroAssembler& masm);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_code_h
diff --git a/js/src/wasm/WasmCodegenConstants.h b/js/src/wasm/WasmCodegenConstants.h
new file mode 100644
index 0000000000..8577c646ff
--- /dev/null
+++ b/js/src/wasm/WasmCodegenConstants.h
@@ -0,0 +1,76 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_codegen_constants_h
+#define wasm_codegen_constants_h
+
+#include <stdint.h>
+
+namespace js {
+namespace wasm {
+
+static const unsigned MaxArgsForJitInlineCall = 8;
+static const unsigned MaxResultsForJitEntry = 1;
+static const unsigned MaxResultsForJitExit = 1;
+static const unsigned MaxResultsForJitInlineCall = MaxResultsForJitEntry;
+// The maximum number of results of a function call or block that may be
+// returned in registers.
+static const unsigned MaxRegisterResults = 1;
+
+// A magic value of the InstanceReg to indicate after a return to the entry
+// stub that an exception has been caught and that we should throw.
+
+static const unsigned FailInstanceReg = 0xbad;
+
+// The following thresholds were derived from a microbenchmark. If we begin to
+// ship this optimization for more platforms, we will need to extend this list.
+
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64)
+static const uint32_t MaxInlineMemoryCopyLength = 64;
+static const uint32_t MaxInlineMemoryFillLength = 64;
+#elif defined(JS_CODEGEN_X86)
+static const uint32_t MaxInlineMemoryCopyLength = 32;
+static const uint32_t MaxInlineMemoryFillLength = 32;
+#else
+static const uint32_t MaxInlineMemoryCopyLength = 0;
+static const uint32_t MaxInlineMemoryFillLength = 0;
+#endif
+
+// The size we round all super type vectors to. All accesses below this length
+// can avoid bounds checks. The value of 8 was chosen after a bit of profiling
+// with the Dart Barista benchmark.
+//
+// Keep jit-tests/tests/wasm/gc/casting.js in sync with this constant.
+static const uint32_t MinSuperTypeVectorLength = 8;
+
+// An exported wasm function may have a 'jit entry' stub attached that can be
+// called using the JS JIT ABI. This relies on the pointer we store in the
+// `NativeJitInfoOrInterpretedScriptSlot` slot of JSFunction to have a
+// compatible representation with BaseScript/SelfHostedLazyScript so that
+// `masm.loadJitCodeRaw` works.
+//
+// We store jit entry pointers in an array (see wasm::JumpTable) and store the
+// pointer to a function's jit entry in the JSFunction slot. We rely on the
+// below offset of each entry in the jump table to be compatible with
+// BaseScript/SelfHostedLazyScript.
+static const uint32_t JumpTableJitEntryOffset = 0;
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_codegen_constants_h
diff --git a/js/src/wasm/WasmCodegenTypes.cpp b/js/src/wasm/WasmCodegenTypes.cpp
new file mode 100644
index 0000000000..6460fd3322
--- /dev/null
+++ b/js/src/wasm/WasmCodegenTypes.cpp
@@ -0,0 +1,240 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmCodegenTypes.h"
+
+#include "wasm/WasmExprType.h"
+#include "wasm/WasmStubs.h"
+#include "wasm/WasmTypeDef.h"
+#include "wasm/WasmValidate.h"
+#include "wasm/WasmValue.h"
+
+using mozilla::MakeEnumeratedRange;
+using mozilla::PodZero;
+
+using namespace js;
+using namespace js::wasm;
+
+ArgTypeVector::ArgTypeVector(const FuncType& funcType)
+ : args_(funcType.args()),
+ hasStackResults_(ABIResultIter::HasStackResults(
+ ResultType::Vector(funcType.results()))) {}
+
+bool TrapSiteVectorArray::empty() const {
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ if (!(*this)[trap].empty()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void TrapSiteVectorArray::clear() {
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ (*this)[trap].clear();
+ }
+}
+
+void TrapSiteVectorArray::swap(TrapSiteVectorArray& rhs) {
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ (*this)[trap].swap(rhs[trap]);
+ }
+}
+
+void TrapSiteVectorArray::shrinkStorageToFit() {
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ (*this)[trap].shrinkStorageToFit();
+ }
+}
+
+size_t TrapSiteVectorArray::sizeOfExcludingThis(
+ MallocSizeOf mallocSizeOf) const {
+ size_t ret = 0;
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ ret += (*this)[trap].sizeOfExcludingThis(mallocSizeOf);
+ }
+ return ret;
+}
+
+CodeRange::CodeRange(Kind kind, Offsets offsets)
+ : begin_(offsets.begin), ret_(0), end_(offsets.end), kind_(kind) {
+ MOZ_ASSERT(begin_ <= end_);
+ PodZero(&u);
+#ifdef DEBUG
+ switch (kind_) {
+ case FarJumpIsland:
+ case TrapExit:
+ case Throw:
+ break;
+ default:
+ MOZ_CRASH("should use more specific constructor");
+ }
+#endif
+}
+
+CodeRange::CodeRange(Kind kind, uint32_t funcIndex, Offsets offsets)
+ : begin_(offsets.begin), ret_(0), end_(offsets.end), kind_(kind) {
+ u.funcIndex_ = funcIndex;
+ u.func.lineOrBytecode_ = 0;
+ u.func.beginToUncheckedCallEntry_ = 0;
+ u.func.beginToTierEntry_ = 0;
+ MOZ_ASSERT(isEntry());
+ MOZ_ASSERT(begin_ <= end_);
+}
+
+CodeRange::CodeRange(Kind kind, CallableOffsets offsets)
+ : begin_(offsets.begin), ret_(offsets.ret), end_(offsets.end), kind_(kind) {
+ MOZ_ASSERT(begin_ < ret_);
+ MOZ_ASSERT(ret_ < end_);
+ PodZero(&u);
+#ifdef DEBUG
+ switch (kind_) {
+ case DebugTrap:
+ case BuiltinThunk:
+ break;
+ default:
+ MOZ_CRASH("should use more specific constructor");
+ }
+#endif
+}
+
+CodeRange::CodeRange(Kind kind, uint32_t funcIndex, CallableOffsets offsets)
+ : begin_(offsets.begin), ret_(offsets.ret), end_(offsets.end), kind_(kind) {
+ MOZ_ASSERT(isImportExit() || isJitEntry());
+ MOZ_ASSERT(begin_ < ret_);
+ MOZ_ASSERT(ret_ < end_);
+ u.funcIndex_ = funcIndex;
+ u.func.lineOrBytecode_ = 0;
+ u.func.beginToUncheckedCallEntry_ = 0;
+ u.func.beginToTierEntry_ = 0;
+}
+
+CodeRange::CodeRange(uint32_t funcIndex, uint32_t funcLineOrBytecode,
+ FuncOffsets offsets)
+ : begin_(offsets.begin),
+ ret_(offsets.ret),
+ end_(offsets.end),
+ kind_(Function) {
+ MOZ_ASSERT(begin_ < ret_);
+ MOZ_ASSERT(ret_ < end_);
+ MOZ_ASSERT(offsets.uncheckedCallEntry - begin_ <= UINT16_MAX);
+ MOZ_ASSERT(offsets.tierEntry - begin_ <= UINT16_MAX);
+ u.funcIndex_ = funcIndex;
+ u.func.lineOrBytecode_ = funcLineOrBytecode;
+ u.func.beginToUncheckedCallEntry_ = offsets.uncheckedCallEntry - begin_;
+ u.func.beginToTierEntry_ = offsets.tierEntry - begin_;
+}
+
+const CodeRange* wasm::LookupInSorted(const CodeRangeVector& codeRanges,
+ CodeRange::OffsetInCode target) {
+ size_t lowerBound = 0;
+ size_t upperBound = codeRanges.length();
+
+ size_t match;
+ if (!BinarySearch(codeRanges, lowerBound, upperBound, target, &match)) {
+ return nullptr;
+ }
+
+ return &codeRanges[match];
+}
+
+CallIndirectId CallIndirectId::forAsmJSFunc() {
+ return CallIndirectId(CallIndirectIdKind::AsmJS, 0);
+}
+
+CallIndirectId CallIndirectId::forFunc(const ModuleEnvironment& moduleEnv,
+ uint32_t funcIndex) {
+ // asm.js tables are homogenous and don't require a signature check
+ if (moduleEnv.isAsmJS()) {
+ return CallIndirectId::forAsmJSFunc();
+ }
+
+ FuncDesc func = moduleEnv.funcs[funcIndex];
+ if (!func.canRefFunc()) {
+ return CallIndirectId();
+ }
+ return CallIndirectId::forFuncType(moduleEnv,
+ moduleEnv.funcs[funcIndex].typeIndex);
+}
+
+CallIndirectId CallIndirectId::forFuncType(const ModuleEnvironment& moduleEnv,
+ uint32_t funcTypeIndex) {
+ // asm.js tables are homogenous and don't require a signature check
+ if (moduleEnv.isAsmJS()) {
+ return CallIndirectId::forAsmJSFunc();
+ }
+
+ const FuncType& funcType = moduleEnv.types->type(funcTypeIndex).funcType();
+ if (funcType.hasImmediateTypeId()) {
+ return CallIndirectId(CallIndirectIdKind::Immediate,
+ funcType.immediateTypeId());
+ }
+ return CallIndirectId(CallIndirectIdKind::Global,
+ moduleEnv.offsetOfTypeDef(funcTypeIndex));
+}
+
+CalleeDesc CalleeDesc::function(uint32_t funcIndex) {
+ CalleeDesc c;
+ c.which_ = Func;
+ c.u.funcIndex_ = funcIndex;
+ return c;
+}
+CalleeDesc CalleeDesc::import(uint32_t instanceDataOffset) {
+ CalleeDesc c;
+ c.which_ = Import;
+ c.u.import.instanceDataOffset_ = instanceDataOffset;
+ return c;
+}
+CalleeDesc CalleeDesc::wasmTable(const ModuleEnvironment& moduleEnv,
+ const TableDesc& desc, uint32_t tableIndex,
+ CallIndirectId callIndirectId) {
+ CalleeDesc c;
+ c.which_ = WasmTable;
+ c.u.table.instanceDataOffset_ =
+ moduleEnv.offsetOfTableInstanceData(tableIndex);
+ c.u.table.minLength_ = desc.initialLength;
+ c.u.table.maxLength_ = desc.maximumLength;
+ c.u.table.callIndirectId_ = callIndirectId;
+ return c;
+}
+CalleeDesc CalleeDesc::asmJSTable(const ModuleEnvironment& moduleEnv,
+ uint32_t tableIndex) {
+ CalleeDesc c;
+ c.which_ = AsmJSTable;
+ c.u.table.instanceDataOffset_ =
+ moduleEnv.offsetOfTableInstanceData(tableIndex);
+ return c;
+}
+CalleeDesc CalleeDesc::builtin(SymbolicAddress callee) {
+ CalleeDesc c;
+ c.which_ = Builtin;
+ c.u.builtin_ = callee;
+ return c;
+}
+CalleeDesc CalleeDesc::builtinInstanceMethod(SymbolicAddress callee) {
+ CalleeDesc c;
+ c.which_ = BuiltinInstanceMethod;
+ c.u.builtin_ = callee;
+ return c;
+}
+CalleeDesc CalleeDesc::wasmFuncRef() {
+ CalleeDesc c;
+ c.which_ = FuncRef;
+ return c;
+}
diff --git a/js/src/wasm/WasmCodegenTypes.h b/js/src/wasm/WasmCodegenTypes.h
new file mode 100644
index 0000000000..162298d4a8
--- /dev/null
+++ b/js/src/wasm/WasmCodegenTypes.h
@@ -0,0 +1,765 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_codegen_types_h
+#define wasm_codegen_types_h
+
+#include "mozilla/EnumeratedArray.h"
+#include "mozilla/PodOperations.h"
+
+#include <stdint.h>
+
+#include "jit/IonTypes.h"
+#include "wasm/WasmBuiltins.h"
+#include "wasm/WasmCodegenConstants.h"
+#include "wasm/WasmConstants.h"
+#include "wasm/WasmInstanceData.h"
+#include "wasm/WasmSerialize.h"
+#include "wasm/WasmTypeDef.h"
+#include "wasm/WasmUtility.h"
+
+namespace js {
+
+namespace jit {
+template <class VecT, class ABIArgGeneratorT>
+class ABIArgIterBase;
+} // namespace jit
+
+namespace wasm {
+
+using mozilla::EnumeratedArray;
+
+struct ModuleEnvironment;
+struct TableDesc;
+struct V128;
+
+// ArgTypeVector type.
+//
+// Functions usually receive one ABI argument per WebAssembly argument. However
+// if a function has multiple results and some of those results go to the stack,
+// then it additionally receives a synthetic ABI argument holding a pointer to
+// the stack result area.
+//
+// Given the presence of synthetic arguments, sometimes we need a name for
+// non-synthetic arguments. We call those "natural" arguments.
+
+enum class StackResults { HasStackResults, NoStackResults };
+
+class ArgTypeVector {
+ const ValTypeVector& args_;
+ bool hasStackResults_;
+
+ // To allow ABIArgIterBase<VecT, ABIArgGeneratorT>, we define a private
+ // length() method. To prevent accidental errors, other users need to be
+ // explicit and call lengthWithStackResults() or
+ // lengthWithoutStackResults().
+ size_t length() const { return args_.length() + size_t(hasStackResults_); }
+ template <class VecT, class ABIArgGeneratorT>
+ friend class jit::ABIArgIterBase;
+
+ public:
+ ArgTypeVector(const ValTypeVector& args, StackResults stackResults)
+ : args_(args),
+ hasStackResults_(stackResults == StackResults::HasStackResults) {}
+ explicit ArgTypeVector(const FuncType& funcType);
+
+ bool hasSyntheticStackResultPointerArg() const { return hasStackResults_; }
+ StackResults stackResults() const {
+ return hasSyntheticStackResultPointerArg() ? StackResults::HasStackResults
+ : StackResults::NoStackResults;
+ }
+ size_t lengthWithoutStackResults() const { return args_.length(); }
+ bool isSyntheticStackResultPointerArg(size_t idx) const {
+ // The pointer to stack results area, if present, is a synthetic argument
+ // tacked on at the end.
+ MOZ_ASSERT(idx < lengthWithStackResults());
+ return idx == args_.length();
+ }
+ bool isNaturalArg(size_t idx) const {
+ return !isSyntheticStackResultPointerArg(idx);
+ }
+ size_t naturalIndex(size_t idx) const {
+ MOZ_ASSERT(isNaturalArg(idx));
+ // Because the synthetic argument, if present, is tacked on the end, an
+ // argument index that isn't synthetic is natural.
+ return idx;
+ }
+
+ size_t lengthWithStackResults() const { return length(); }
+ jit::MIRType operator[](size_t i) const {
+ MOZ_ASSERT(i < lengthWithStackResults());
+ if (isSyntheticStackResultPointerArg(i)) {
+ return jit::MIRType::StackResults;
+ }
+ return args_[naturalIndex(i)].toMIRType();
+ }
+};
+
+// A wrapper around the bytecode offset of a wasm instruction within a whole
+// module, used for trap offsets or call offsets. These offsets should refer to
+// the first byte of the instruction that triggered the trap / did the call and
+// should ultimately derive from OpIter::bytecodeOffset.
+
+class BytecodeOffset {
+ static const uint32_t INVALID = -1;
+ uint32_t offset_;
+
+ WASM_CHECK_CACHEABLE_POD(offset_);
+
+ public:
+ BytecodeOffset() : offset_(INVALID) {}
+ explicit BytecodeOffset(uint32_t offset) : offset_(offset) {}
+
+ bool isValid() const { return offset_ != INVALID; }
+ uint32_t offset() const {
+ MOZ_ASSERT(isValid());
+ return offset_;
+ }
+};
+
+WASM_DECLARE_CACHEABLE_POD(BytecodeOffset);
+
+// A TrapSite (in the TrapSiteVector for a given Trap code) represents a wasm
+// instruction at a given bytecode offset that can fault at the given pc offset.
+// When such a fault occurs, a signal/exception handler looks up the TrapSite to
+// confirm the fault is intended/safe and redirects pc to the trap stub.
+
+struct TrapSite {
+ uint32_t pcOffset;
+ BytecodeOffset bytecode;
+
+ WASM_CHECK_CACHEABLE_POD(pcOffset, bytecode);
+
+ TrapSite() : pcOffset(-1), bytecode() {}
+ TrapSite(uint32_t pcOffset, BytecodeOffset bytecode)
+ : pcOffset(pcOffset), bytecode(bytecode) {}
+
+ void offsetBy(uint32_t offset) { pcOffset += offset; }
+};
+
+WASM_DECLARE_CACHEABLE_POD(TrapSite);
+WASM_DECLARE_POD_VECTOR(TrapSite, TrapSiteVector)
+
+struct TrapSiteVectorArray
+ : EnumeratedArray<Trap, Trap::Limit, TrapSiteVector> {
+ bool empty() const;
+ void clear();
+ void swap(TrapSiteVectorArray& rhs);
+ void shrinkStorageToFit();
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+};
+
+// On trap, the bytecode offset to be reported in callstacks is saved.
+
+struct TrapData {
+ // The resumePC indicates where, if the trap doesn't throw, the trap stub
+ // should jump to after restoring all register state.
+ void* resumePC;
+
+ // The unwoundPC is the PC after adjustment by wasm::StartUnwinding(), which
+ // basically unwinds partially-construted wasm::Frames when pc is in the
+ // prologue/epilogue. Stack traces during a trap should use this PC since
+ // it corresponds to the JitActivation::wasmExitFP.
+ void* unwoundPC;
+
+ Trap trap;
+ uint32_t bytecodeOffset;
+};
+
+// The (,Callable,Func)Offsets classes are used to record the offsets of
+// different key points in a CodeRange during compilation.
+
+struct Offsets {
+ explicit Offsets(uint32_t begin = 0, uint32_t end = 0)
+ : begin(begin), end(end) {}
+
+ // These define a [begin, end) contiguous range of instructions compiled
+ // into a CodeRange.
+ uint32_t begin;
+ uint32_t end;
+
+ WASM_CHECK_CACHEABLE_POD(begin, end);
+};
+
+WASM_DECLARE_CACHEABLE_POD(Offsets);
+
+struct CallableOffsets : Offsets {
+ MOZ_IMPLICIT CallableOffsets(uint32_t ret = 0) : Offsets(), ret(ret) {}
+
+ // The offset of the return instruction precedes 'end' by a variable number
+ // of instructions due to out-of-line codegen.
+ uint32_t ret;
+
+ WASM_CHECK_CACHEABLE_POD_WITH_PARENT(Offsets, ret);
+};
+
+WASM_DECLARE_CACHEABLE_POD(CallableOffsets);
+
+struct FuncOffsets : CallableOffsets {
+ MOZ_IMPLICIT FuncOffsets()
+ : CallableOffsets(), uncheckedCallEntry(0), tierEntry(0) {}
+
+ // Function CodeRanges have a checked call entry which takes an extra
+ // signature argument which is checked against the callee's signature before
+ // falling through to the normal prologue. The checked call entry is thus at
+ // the beginning of the CodeRange and the unchecked call entry is at some
+ // offset after the checked call entry.
+ //
+ // Note that there won't always be a checked call entry because not all
+ // functions require them. See GenerateFunctionPrologue.
+ uint32_t uncheckedCallEntry;
+
+ // The tierEntry is the point within a function to which the patching code
+ // within a Tier-1 function jumps. It could be the instruction following
+ // the jump in the Tier-1 function, or the point following the standard
+ // prologue within a Tier-2 function.
+ uint32_t tierEntry;
+
+ WASM_CHECK_CACHEABLE_POD_WITH_PARENT(CallableOffsets, uncheckedCallEntry,
+ tierEntry);
+};
+
+WASM_DECLARE_CACHEABLE_POD(FuncOffsets);
+
+using FuncOffsetsVector = Vector<FuncOffsets, 0, SystemAllocPolicy>;
+
+// A CodeRange describes a single contiguous range of code within a wasm
+// module's code segment. A CodeRange describes what the code does and, for
+// function bodies, the name and source coordinates of the function.
+
+class CodeRange {
+ public:
+ enum Kind {
+ Function, // function definition
+ InterpEntry, // calls into wasm from C++
+ JitEntry, // calls into wasm from jit code
+ ImportInterpExit, // slow-path calling from wasm into C++ interp
+ ImportJitExit, // fast-path calling from wasm into jit code
+ BuiltinThunk, // fast-path calling from wasm into a C++ native
+ TrapExit, // calls C++ to report and jumps to throw stub
+ DebugTrap, // calls C++ to handle debug event
+ FarJumpIsland, // inserted to connect otherwise out-of-range insns
+ Throw // special stack-unwinding stub jumped to by other stubs
+ };
+
+ private:
+ // All fields are treated as cacheable POD:
+ uint32_t begin_;
+ uint32_t ret_;
+ uint32_t end_;
+ union {
+ struct {
+ uint32_t funcIndex_;
+ union {
+ struct {
+ uint32_t lineOrBytecode_;
+ uint16_t beginToUncheckedCallEntry_;
+ uint16_t beginToTierEntry_;
+ } func;
+ };
+ };
+ Trap trap_;
+ } u;
+ Kind kind_ : 8;
+
+ WASM_CHECK_CACHEABLE_POD(begin_, ret_, end_, u.funcIndex_,
+ u.func.lineOrBytecode_,
+ u.func.beginToUncheckedCallEntry_,
+ u.func.beginToTierEntry_, u.trap_, kind_);
+
+ public:
+ CodeRange() = default;
+ CodeRange(Kind kind, Offsets offsets);
+ CodeRange(Kind kind, uint32_t funcIndex, Offsets offsets);
+ CodeRange(Kind kind, CallableOffsets offsets);
+ CodeRange(Kind kind, uint32_t funcIndex, CallableOffsets);
+ CodeRange(uint32_t funcIndex, uint32_t lineOrBytecode, FuncOffsets offsets);
+
+ void offsetBy(uint32_t offset) {
+ begin_ += offset;
+ end_ += offset;
+ if (hasReturn()) {
+ ret_ += offset;
+ }
+ }
+
+ // All CodeRanges have a begin and end.
+
+ uint32_t begin() const { return begin_; }
+ uint32_t end() const { return end_; }
+
+ // Other fields are only available for certain CodeRange::Kinds.
+
+ Kind kind() const { return kind_; }
+
+ bool isFunction() const { return kind() == Function; }
+ bool isImportExit() const {
+ return kind() == ImportJitExit || kind() == ImportInterpExit ||
+ kind() == BuiltinThunk;
+ }
+ bool isImportInterpExit() const { return kind() == ImportInterpExit; }
+ bool isImportJitExit() const { return kind() == ImportJitExit; }
+ bool isTrapExit() const { return kind() == TrapExit; }
+ bool isDebugTrap() const { return kind() == DebugTrap; }
+ bool isThunk() const { return kind() == FarJumpIsland; }
+
+ // Functions, import exits, trap exits and JitEntry stubs have standard
+ // callable prologues and epilogues. Asynchronous frame iteration needs to
+ // know the offset of the return instruction to calculate the frame pointer.
+
+ bool hasReturn() const {
+ return isFunction() || isImportExit() || isDebugTrap() || isJitEntry();
+ }
+ uint32_t ret() const {
+ MOZ_ASSERT(hasReturn());
+ return ret_;
+ }
+
+ // Functions, export stubs and import stubs all have an associated function
+ // index.
+
+ bool isJitEntry() const { return kind() == JitEntry; }
+ bool isInterpEntry() const { return kind() == InterpEntry; }
+ bool isEntry() const { return isInterpEntry() || isJitEntry(); }
+ bool hasFuncIndex() const {
+ return isFunction() || isImportExit() || isEntry();
+ }
+ uint32_t funcIndex() const {
+ MOZ_ASSERT(hasFuncIndex());
+ return u.funcIndex_;
+ }
+
+ // TrapExit CodeRanges have a Trap field.
+
+ Trap trap() const {
+ MOZ_ASSERT(isTrapExit());
+ return u.trap_;
+ }
+
+ // Function CodeRanges have two entry points: one for normal calls (with a
+ // known signature) and one for table calls (which involves dynamic
+ // signature checking).
+
+ uint32_t funcCheckedCallEntry() const {
+ MOZ_ASSERT(isFunction());
+ // not all functions have the checked call prologue;
+ // see GenerateFunctionPrologue
+ MOZ_ASSERT(u.func.beginToUncheckedCallEntry_ != 0);
+ return begin_;
+ }
+ uint32_t funcUncheckedCallEntry() const {
+ MOZ_ASSERT(isFunction());
+ return begin_ + u.func.beginToUncheckedCallEntry_;
+ }
+ uint32_t funcTierEntry() const {
+ MOZ_ASSERT(isFunction());
+ return begin_ + u.func.beginToTierEntry_;
+ }
+ uint32_t funcLineOrBytecode() const {
+ MOZ_ASSERT(isFunction());
+ return u.func.lineOrBytecode_;
+ }
+
+ // A sorted array of CodeRanges can be looked up via BinarySearch and
+ // OffsetInCode.
+
+ struct OffsetInCode {
+ size_t offset;
+ explicit OffsetInCode(size_t offset) : offset(offset) {}
+ bool operator==(const CodeRange& rhs) const {
+ return offset >= rhs.begin() && offset < rhs.end();
+ }
+ bool operator<(const CodeRange& rhs) const { return offset < rhs.begin(); }
+ };
+};
+
+WASM_DECLARE_CACHEABLE_POD(CodeRange);
+WASM_DECLARE_POD_VECTOR(CodeRange, CodeRangeVector)
+
+extern const CodeRange* LookupInSorted(const CodeRangeVector& codeRanges,
+ CodeRange::OffsetInCode target);
+
+// While the frame-pointer chain allows the stack to be unwound without
+// metadata, Error.stack still needs to know the line/column of every call in
+// the chain. A CallSiteDesc describes a single callsite to which CallSite adds
+// the metadata necessary to walk up to the next frame. Lastly CallSiteAndTarget
+// adds the function index of the callee.
+
+class CallSiteDesc {
+ static constexpr size_t LINE_OR_BYTECODE_BITS_SIZE = 28;
+ uint32_t lineOrBytecode_ : LINE_OR_BYTECODE_BITS_SIZE;
+ uint32_t kind_ : 4;
+
+ WASM_CHECK_CACHEABLE_POD(lineOrBytecode_, kind_);
+
+ public:
+ static constexpr uint32_t MAX_LINE_OR_BYTECODE_VALUE =
+ (1 << LINE_OR_BYTECODE_BITS_SIZE) - 1;
+
+ enum Kind {
+ Func, // pc-relative call to a specific function
+ Import, // wasm import call
+ Indirect, // dynamic callee called via register, context on stack
+ IndirectFast, // dynamically determined to be same-instance
+ FuncRef, // call using direct function reference
+ FuncRefFast, // call using direct function reference within same-instance
+ Symbolic, // call to a single symbolic callee
+ EnterFrame, // call to a enter frame handler
+ LeaveFrame, // call to a leave frame handler
+ Breakpoint // call to instruction breakpoint
+ };
+ CallSiteDesc() : lineOrBytecode_(0), kind_(0) {}
+ explicit CallSiteDesc(Kind kind) : lineOrBytecode_(0), kind_(kind) {
+ MOZ_ASSERT(kind == Kind(kind_));
+ }
+ CallSiteDesc(uint32_t lineOrBytecode, Kind kind)
+ : lineOrBytecode_(lineOrBytecode), kind_(kind) {
+ MOZ_ASSERT(kind == Kind(kind_));
+ MOZ_ASSERT(lineOrBytecode == lineOrBytecode_);
+ }
+ CallSiteDesc(BytecodeOffset bytecodeOffset, Kind kind)
+ : lineOrBytecode_(bytecodeOffset.offset()), kind_(kind) {
+ MOZ_ASSERT(kind == Kind(kind_));
+ MOZ_ASSERT(bytecodeOffset.offset() == lineOrBytecode_);
+ }
+ uint32_t lineOrBytecode() const { return lineOrBytecode_; }
+ Kind kind() const { return Kind(kind_); }
+ bool isImportCall() const { return kind() == CallSiteDesc::Import; }
+ bool isIndirectCall() const { return kind() == CallSiteDesc::Indirect; }
+ bool isFuncRefCall() const { return kind() == CallSiteDesc::FuncRef; }
+ bool mightBeCrossInstance() const {
+ return isImportCall() || isIndirectCall() || isFuncRefCall();
+ }
+};
+
+static_assert(js::wasm::MaxFunctionBytes <=
+ CallSiteDesc::MAX_LINE_OR_BYTECODE_VALUE);
+
+WASM_DECLARE_CACHEABLE_POD(CallSiteDesc);
+
+class CallSite : public CallSiteDesc {
+ uint32_t returnAddressOffset_;
+
+ WASM_CHECK_CACHEABLE_POD_WITH_PARENT(CallSiteDesc, returnAddressOffset_);
+
+ public:
+ CallSite() : returnAddressOffset_(0) {}
+
+ CallSite(CallSiteDesc desc, uint32_t returnAddressOffset)
+ : CallSiteDesc(desc), returnAddressOffset_(returnAddressOffset) {}
+
+ void offsetBy(uint32_t delta) { returnAddressOffset_ += delta; }
+ uint32_t returnAddressOffset() const { return returnAddressOffset_; }
+};
+
+WASM_DECLARE_CACHEABLE_POD(CallSite);
+WASM_DECLARE_POD_VECTOR(CallSite, CallSiteVector)
+
+// A CallSiteTarget describes the callee of a CallSite, either a function or a
+// trap exit. Although checked in debug builds, a CallSiteTarget doesn't
+// officially know whether it targets a function or trap, relying on the Kind of
+// the CallSite to discriminate.
+
+class CallSiteTarget {
+ uint32_t packed_;
+
+ WASM_CHECK_CACHEABLE_POD(packed_);
+#ifdef DEBUG
+ enum Kind { None, FuncIndex, TrapExit } kind_;
+ WASM_CHECK_CACHEABLE_POD(kind_);
+#endif
+
+ public:
+ explicit CallSiteTarget()
+ : packed_(UINT32_MAX)
+#ifdef DEBUG
+ ,
+ kind_(None)
+#endif
+ {
+ }
+
+ explicit CallSiteTarget(uint32_t funcIndex)
+ : packed_(funcIndex)
+#ifdef DEBUG
+ ,
+ kind_(FuncIndex)
+#endif
+ {
+ }
+
+ explicit CallSiteTarget(Trap trap)
+ : packed_(uint32_t(trap))
+#ifdef DEBUG
+ ,
+ kind_(TrapExit)
+#endif
+ {
+ }
+
+ uint32_t funcIndex() const {
+ MOZ_ASSERT(kind_ == FuncIndex);
+ return packed_;
+ }
+
+ Trap trap() const {
+ MOZ_ASSERT(kind_ == TrapExit);
+ MOZ_ASSERT(packed_ < uint32_t(Trap::Limit));
+ return Trap(packed_);
+ }
+};
+
+WASM_DECLARE_CACHEABLE_POD(CallSiteTarget);
+
+using CallSiteTargetVector = Vector<CallSiteTarget, 0, SystemAllocPolicy>;
+
+// TryNotes are stored in a vector that acts as an exception table for
+// wasm try-catch blocks. These represent the information needed to take
+// exception handling actions after a throw is executed.
+struct TryNote {
+ private:
+ // Sentinel value to detect a try note that has not been given a try body.
+ static const uint32_t BEGIN_NONE = UINT32_MAX;
+
+ // Begin code offset of the try body.
+ uint32_t begin_;
+ // Exclusive end code offset of the try body.
+ uint32_t end_;
+ // The code offset of the landing pad.
+ uint32_t entryPoint_;
+ // Track offset from frame of stack pointer.
+ uint32_t framePushed_;
+
+ WASM_CHECK_CACHEABLE_POD(begin_, end_, entryPoint_, framePushed_);
+
+ public:
+ explicit TryNote()
+ : begin_(BEGIN_NONE), end_(0), entryPoint_(0), framePushed_(0) {}
+
+ // Returns whether a try note has been assigned a range for the try body.
+ bool hasTryBody() const { return begin_ != BEGIN_NONE; }
+
+ // The code offset of the beginning of the try body.
+ uint32_t tryBodyBegin() const { return begin_; }
+
+ // The code offset of the exclusive end of the try body.
+ uint32_t tryBodyEnd() const { return end_; }
+
+ // Returns whether an offset is within this try note's body.
+ bool offsetWithinTryBody(uint32_t offset) const {
+ return offset > begin_ && offset <= end_;
+ }
+
+ // The code offset of the entry to the landing pad.
+ uint32_t landingPadEntryPoint() const { return entryPoint_; }
+
+ // The stack frame pushed amount at the entry to the landing pad.
+ uint32_t landingPadFramePushed() const { return framePushed_; }
+
+ // Set the beginning of the try body.
+ void setTryBodyBegin(uint32_t begin) {
+ // There must not be a begin to the try body yet
+ MOZ_ASSERT(begin_ == BEGIN_NONE);
+ begin_ = begin;
+ }
+
+ // Set the end of the try body.
+ void setTryBodyEnd(uint32_t end) {
+ // There must be a begin to the try body
+ MOZ_ASSERT(begin_ != BEGIN_NONE);
+ end_ = end;
+ // We do not allow empty try bodies
+ MOZ_ASSERT(end_ > begin_);
+ }
+
+ // Set the entry point and frame pushed of the landing pad.
+ void setLandingPad(uint32_t entryPoint, uint32_t framePushed) {
+ entryPoint_ = entryPoint;
+ framePushed_ = framePushed;
+ }
+
+ // Adjust all code offsets in this try note by a delta.
+ void offsetBy(uint32_t offset) {
+ begin_ += offset;
+ end_ += offset;
+ entryPoint_ += offset;
+ }
+
+ bool operator<(const TryNote& other) const {
+ // Special case comparison with self. This avoids triggering the assertion
+ // about non-intersection below. This case can arise in std::sort.
+ if (this == &other) {
+ return false;
+ }
+ // Try notes must be properly nested without touching at begin and end
+ MOZ_ASSERT(end_ <= other.begin_ || begin_ >= other.end_ ||
+ (begin_ > other.begin_ && end_ < other.end_) ||
+ (other.begin_ > begin_ && other.end_ < end_));
+ // A total order is therefore given solely by comparing end points. This
+ // order will be such that the first try note to intersect a point is the
+ // innermost try note for that point.
+ return end_ < other.end_;
+ }
+};
+
+WASM_DECLARE_CACHEABLE_POD(TryNote);
+WASM_DECLARE_POD_VECTOR(TryNote, TryNoteVector)
+
+// CallIndirectId describes how to compile a call_indirect and matching
+// signature check in the function prologue for a given function type.
+
+enum class CallIndirectIdKind { AsmJS, Immediate, Global, None };
+
+class CallIndirectId {
+ CallIndirectIdKind kind_;
+ size_t bits_;
+
+ CallIndirectId(CallIndirectIdKind kind, size_t bits)
+ : kind_(kind), bits_(bits) {}
+
+ public:
+ CallIndirectId() : kind_(CallIndirectIdKind::None), bits_(0) {}
+
+ // Get a CallIndirectId for an asm.js function which will generate a no-op
+ // checked call prologue.
+ static CallIndirectId forAsmJSFunc();
+
+ // Get the CallIndirectId for a function in a specific module.
+ static CallIndirectId forFunc(const ModuleEnvironment& moduleEnv,
+ uint32_t funcIndex);
+
+ // Get the CallIndirectId for a function type in a specific module.
+ static CallIndirectId forFuncType(const ModuleEnvironment& moduleEnv,
+ uint32_t funcTypeIndex);
+
+ CallIndirectIdKind kind() const { return kind_; }
+ bool isGlobal() const { return kind_ == CallIndirectIdKind::Global; }
+
+ uint32_t immediate() const {
+ MOZ_ASSERT(kind_ == CallIndirectIdKind::Immediate);
+ return bits_;
+ }
+ uint32_t instanceDataOffset() const {
+ MOZ_ASSERT(kind_ == CallIndirectIdKind::Global);
+ return bits_;
+ }
+};
+
+// CalleeDesc describes how to compile one of the variety of asm.js/wasm calls.
+// This is hoisted into WasmCodegenTypes.h for sharing between Ion and Baseline.
+
+class CalleeDesc {
+ public:
+ enum Which {
+ // Calls a function defined in the same module by its index.
+ Func,
+
+ // Calls the import identified by the offset of its FuncImportInstanceData
+ // in
+ // thread-local data.
+ Import,
+
+ // Calls a WebAssembly table (heterogeneous, index must be bounds
+ // checked, callee instance depends on TableDesc).
+ WasmTable,
+
+ // Calls an asm.js table (homogeneous, masked index, same-instance).
+ AsmJSTable,
+
+ // Call a C++ function identified by SymbolicAddress.
+ Builtin,
+
+ // Like Builtin, but automatically passes Instance* as first argument.
+ BuiltinInstanceMethod,
+
+ // Calls a function reference.
+ FuncRef,
+ };
+
+ private:
+ // which_ shall be initialized in the static constructors
+ MOZ_INIT_OUTSIDE_CTOR Which which_;
+ union U {
+ U() : funcIndex_(0) {}
+ uint32_t funcIndex_;
+ struct {
+ uint32_t instanceDataOffset_;
+ } import;
+ struct {
+ uint32_t instanceDataOffset_;
+ uint32_t minLength_;
+ Maybe<uint32_t> maxLength_;
+ CallIndirectId callIndirectId_;
+ } table;
+ SymbolicAddress builtin_;
+ } u;
+
+ public:
+ CalleeDesc() = default;
+ static CalleeDesc function(uint32_t funcIndex);
+ static CalleeDesc import(uint32_t instanceDataOffset);
+ static CalleeDesc wasmTable(const ModuleEnvironment& moduleEnv,
+ const TableDesc& desc, uint32_t tableIndex,
+ CallIndirectId callIndirectId);
+ static CalleeDesc asmJSTable(const ModuleEnvironment& moduleEnv,
+ uint32_t tableIndex);
+ static CalleeDesc builtin(SymbolicAddress callee);
+ static CalleeDesc builtinInstanceMethod(SymbolicAddress callee);
+ static CalleeDesc wasmFuncRef();
+ Which which() const { return which_; }
+ uint32_t funcIndex() const {
+ MOZ_ASSERT(which_ == Func);
+ return u.funcIndex_;
+ }
+ uint32_t importInstanceDataOffset() const {
+ MOZ_ASSERT(which_ == Import);
+ return u.import.instanceDataOffset_;
+ }
+ bool isTable() const { return which_ == WasmTable || which_ == AsmJSTable; }
+ uint32_t tableLengthInstanceDataOffset() const {
+ MOZ_ASSERT(isTable());
+ return u.table.instanceDataOffset_ + offsetof(TableInstanceData, length);
+ }
+ uint32_t tableFunctionBaseInstanceDataOffset() const {
+ MOZ_ASSERT(isTable());
+ return u.table.instanceDataOffset_ + offsetof(TableInstanceData, elements);
+ }
+ CallIndirectId wasmTableSigId() const {
+ MOZ_ASSERT(which_ == WasmTable);
+ return u.table.callIndirectId_;
+ }
+ uint32_t wasmTableMinLength() const {
+ MOZ_ASSERT(which_ == WasmTable);
+ return u.table.minLength_;
+ }
+ Maybe<uint32_t> wasmTableMaxLength() const {
+ MOZ_ASSERT(which_ == WasmTable);
+ return u.table.maxLength_;
+ }
+ SymbolicAddress builtin() const {
+ MOZ_ASSERT(which_ == Builtin || which_ == BuiltinInstanceMethod);
+ return u.builtin_;
+ }
+ bool isFuncRef() const { return which_ == FuncRef; }
+};
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_codegen_types_h
diff --git a/js/src/wasm/WasmCompile.cpp b/js/src/wasm/WasmCompile.cpp
new file mode 100644
index 0000000000..3471de1ad2
--- /dev/null
+++ b/js/src/wasm/WasmCompile.cpp
@@ -0,0 +1,919 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmCompile.h"
+
+#include "mozilla/Maybe.h"
+
+#include <algorithm>
+
+#ifndef __wasi__
+# include "jit/ProcessExecutableMemory.h"
+#endif
+
+#include "jit/FlushICache.h"
+#include "util/Text.h"
+#include "vm/HelperThreads.h"
+#include "vm/Realm.h"
+#include "wasm/WasmBaselineCompile.h"
+#include "wasm/WasmGenerator.h"
+#include "wasm/WasmIonCompile.h"
+#include "wasm/WasmOpIter.h"
+#include "wasm/WasmProcess.h"
+#include "wasm/WasmSignalHandlers.h"
+#include "wasm/WasmValidate.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+uint32_t wasm::ObservedCPUFeatures() {
+ enum Arch {
+ X86 = 0x1,
+ X64 = 0x2,
+ ARM = 0x3,
+ MIPS = 0x4,
+ MIPS64 = 0x5,
+ ARM64 = 0x6,
+ LOONG64 = 0x7,
+ RISCV64 = 0x8,
+ ARCH_BITS = 3
+ };
+
+#if defined(JS_CODEGEN_X86)
+ MOZ_ASSERT(uint32_t(jit::CPUInfo::GetFingerprint()) <=
+ (UINT32_MAX >> ARCH_BITS));
+ return X86 | (uint32_t(jit::CPUInfo::GetFingerprint()) << ARCH_BITS);
+#elif defined(JS_CODEGEN_X64)
+ MOZ_ASSERT(uint32_t(jit::CPUInfo::GetFingerprint()) <=
+ (UINT32_MAX >> ARCH_BITS));
+ return X64 | (uint32_t(jit::CPUInfo::GetFingerprint()) << ARCH_BITS);
+#elif defined(JS_CODEGEN_ARM)
+ MOZ_ASSERT(jit::GetARMFlags() <= (UINT32_MAX >> ARCH_BITS));
+ return ARM | (jit::GetARMFlags() << ARCH_BITS);
+#elif defined(JS_CODEGEN_ARM64)
+ MOZ_ASSERT(jit::GetARM64Flags() <= (UINT32_MAX >> ARCH_BITS));
+ return ARM64 | (jit::GetARM64Flags() << ARCH_BITS);
+#elif defined(JS_CODEGEN_MIPS64)
+ MOZ_ASSERT(jit::GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS));
+ return MIPS64 | (jit::GetMIPSFlags() << ARCH_BITS);
+#elif defined(JS_CODEGEN_LOONG64)
+ MOZ_ASSERT(jit::GetLOONG64Flags() <= (UINT32_MAX >> ARCH_BITS));
+ return LOONG64 | (jit::GetLOONG64Flags() << ARCH_BITS);
+#elif defined(JS_CODEGEN_RISCV64)
+ MOZ_ASSERT(jit::GetRISCV64Flags() <= (UINT32_MAX >> ARCH_BITS));
+ return RISCV64 | (jit::GetRISCV64Flags() << ARCH_BITS);
+#elif defined(JS_CODEGEN_NONE) || defined(JS_CODEGEN_WASM32)
+ return 0;
+#else
+# error "unknown architecture"
+#endif
+}
+
+FeatureArgs FeatureArgs::build(JSContext* cx, const FeatureOptions& options) {
+ FeatureArgs features;
+
+#define WASM_FEATURE(NAME, LOWER_NAME, ...) \
+ features.LOWER_NAME = wasm::NAME##Available(cx);
+ JS_FOR_WASM_FEATURES(WASM_FEATURE, WASM_FEATURE, WASM_FEATURE);
+#undef WASM_FEATURE
+
+ features.sharedMemory =
+ wasm::ThreadsAvailable(cx) ? Shareable::True : Shareable::False;
+
+ features.simd = jit::JitSupportsWasmSimd();
+ features.intrinsics = options.intrinsics;
+
+ return features;
+}
+
+SharedCompileArgs CompileArgs::build(JSContext* cx,
+ ScriptedCaller&& scriptedCaller,
+ const FeatureOptions& options,
+ CompileArgsError* error) {
+ bool baseline = BaselineAvailable(cx);
+ bool ion = IonAvailable(cx);
+
+ // Debug information such as source view or debug traps will require
+ // additional memory and permanently stay in baseline code, so we try to
+ // only enable it when a developer actually cares: when the debugger tab
+ // is open.
+ bool debug = cx->realm() && cx->realm()->debuggerObservesWasm();
+
+ bool forceTiering =
+ cx->options().testWasmAwaitTier2() || JitOptions.wasmDelayTier2;
+
+ // The <Compiler>Available() predicates should ensure no failure here, but
+ // when we're fuzzing we allow inconsistent switches and the check may thus
+ // fail. Let it go to a run-time error instead of crashing.
+ if (debug && ion) {
+ *error = CompileArgsError::NoCompiler;
+ return nullptr;
+ }
+
+ if (forceTiering && !(baseline && ion)) {
+ // This can happen only in testing, and in this case we don't have a
+ // proper way to signal the error, so just silently override the default,
+ // instead of adding a skip-if directive to every test using debug/gc.
+ forceTiering = false;
+ }
+
+ if (!(baseline || ion)) {
+ *error = CompileArgsError::NoCompiler;
+ return nullptr;
+ }
+
+ CompileArgs* target = cx->new_<CompileArgs>(std::move(scriptedCaller));
+ if (!target) {
+ *error = CompileArgsError::OutOfMemory;
+ return nullptr;
+ }
+
+ target->baselineEnabled = baseline;
+ target->ionEnabled = ion;
+ target->debugEnabled = debug;
+ target->forceTiering = forceTiering;
+ target->features = FeatureArgs::build(cx, options);
+
+ return target;
+}
+
+SharedCompileArgs CompileArgs::buildForAsmJS(ScriptedCaller&& scriptedCaller) {
+ CompileArgs* target = js_new<CompileArgs>(std::move(scriptedCaller));
+ if (!target) {
+ return nullptr;
+ }
+
+ // AsmJS is deprecated and doesn't have mechanisms for experimental features,
+ // so we don't need to initialize the FeatureArgs. It also only targets the
+ // Ion backend and does not need WASM debug support since it is de-optimized
+ // to JS in that case.
+ target->ionEnabled = true;
+ target->debugEnabled = false;
+
+ return target;
+}
+
+SharedCompileArgs CompileArgs::buildAndReport(JSContext* cx,
+ ScriptedCaller&& scriptedCaller,
+ const FeatureOptions& options,
+ bool reportOOM) {
+ CompileArgsError error;
+ SharedCompileArgs args =
+ CompileArgs::build(cx, std::move(scriptedCaller), options, &error);
+ if (args) {
+ Log(cx, "available wasm compilers: tier1=%s tier2=%s",
+ args->baselineEnabled ? "baseline" : "none",
+ args->ionEnabled ? "ion" : "none");
+ return args;
+ }
+
+ switch (error) {
+ case CompileArgsError::NoCompiler: {
+ JS_ReportErrorASCII(cx, "no WebAssembly compiler available");
+ break;
+ }
+ case CompileArgsError::OutOfMemory: {
+ // Most callers are required to return 'false' without reporting an OOM,
+ // so we make reporting it optional here.
+ if (reportOOM) {
+ ReportOutOfMemory(cx);
+ }
+ break;
+ }
+ }
+ return nullptr;
+}
+
+/*
+ * [SMDOC] Tiered wasm compilation.
+ *
+ * "Tiered compilation" refers to the mechanism where we first compile the code
+ * with a fast non-optimizing compiler so that we can start running the code
+ * quickly, while in the background recompiling the code with the slower
+ * optimizing compiler. Code created by baseline is called "tier-1"; code
+ * created by the optimizing compiler is called "tier-2". When the tier-2 code
+ * is ready, we "tier up" the code by creating paths from tier-1 code into their
+ * tier-2 counterparts; this patching is performed as the program is running.
+ *
+ * ## Selecting the compilation mode
+ *
+ * When wasm bytecode arrives, we choose the compilation strategy based on
+ * switches and on aspects of the code and the hardware. If switches allow
+ * tiered compilation to happen (the normal case), the following logic applies.
+ *
+ * If the code is sufficiently large that tiered compilation would be beneficial
+ * but not so large that it might blow our compiled code budget and make
+ * compilation fail, we choose tiered compilation. Otherwise we go straight to
+ * optimized code.
+ *
+ * The expected benefit of tiering is computed by TieringBeneficial(), below,
+ * based on various estimated parameters of the hardware: ratios of object code
+ * to byte code, speed of the system, number of cores.
+ *
+ * ## Mechanics of tiering up; patching
+ *
+ * Every time control enters a tier-1 function, the function prologue loads its
+ * tiering pointer from the tiering jump table (see JumpTable in WasmCode.h) and
+ * jumps to it.
+ *
+ * Initially, an entry in the tiering table points to the instruction inside the
+ * tier-1 function that follows the jump instruction (hence the jump is an
+ * expensive nop). When the tier-2 compiler is finished, the table is patched
+ * racily to point into the tier-2 function at the correct prologue location
+ * (see loop near the end of Module::finishTier2()). As tier-2 compilation is
+ * performed at most once per Module, there is at most one such racy overwrite
+ * per table element during the lifetime of the Module.
+ *
+ * The effect of the patching is to cause the tier-1 function to jump to its
+ * tier-2 counterpart whenever the tier-1 function is called subsequently. That
+ * is, tier-1 code performs standard frame setup on behalf of whatever code it
+ * jumps to, and the target code (tier-1 or tier-2) allocates its own frame in
+ * whatever way it wants.
+ *
+ * The racy writing means that it is often nondeterministic whether tier-1 or
+ * tier-2 code is reached by any call during the tiering-up process; if F calls
+ * A and B in that order, it may reach tier-2 code for A and tier-1 code for B.
+ * If F is running concurrently on threads T1 and T2, T1 and T2 may see code
+ * from different tiers for either function.
+ *
+ * Note, tiering up also requires upgrading the jit-entry stubs so that they
+ * reference tier-2 code. The mechanics of this upgrading are described at
+ * WasmInstanceObject::getExportedFunction().
+ *
+ * ## Current limitations of tiering
+ *
+ * Tiering is not always seamless. Partly, it is possible for a program to get
+ * stuck in tier-1 code. Partly, a function that has tiered up continues to
+ * force execution to go via tier-1 code to reach tier-2 code, paying for an
+ * additional jump and a slightly less optimized prologue than tier-2 code could
+ * have had on its own.
+ *
+ * Known tiering limitiations:
+ *
+ * - We can tier up only at function boundaries. If a tier-1 function has a
+ * long-running loop it will not tier up until it returns to its caller. If
+ * this loop never exits (a runloop in a worker, for example) then the
+ * function will never tier up.
+ *
+ * To do better, we need OSR.
+ *
+ * - Wasm Table entries are never patched during tier-up. A Table of funcref
+ * holds not a JSFunction pointer, but a (code*,instance*) pair of pointers.
+ * When a table.set operation is performed, the JSFunction value is decomposed
+ * and its code and instance pointers are stored in the table; subsequently,
+ * when a table.get operation is performed, the JSFunction value is
+ * reconstituted from its code pointer using fairly elaborate machinery. (The
+ * mechanics are the same also for the reflected JS operations on a
+ * WebAssembly.Table. For everything, see WasmTable.{cpp,h}.) The code pointer
+ * in the Table will always be the code pointer belonging to the best tier that
+ * was active at the time when that function was stored in that Table slot; in
+ * many cases, it will be tier-1 code. As a consequence, a call through a table
+ * will first enter tier-1 code and then jump to tier-2 code.
+ *
+ * To do better, we must update all the tables in the system when an instance
+ * tiers up. This is expected to be very hard.
+ *
+ * - Imported Wasm functions are never patched during tier-up. Imports are held
+ * in FuncImportInstanceData values in the instance, and for a wasm
+ * callee, what's stored is the raw code pointer into the best tier of the
+ * callee that was active at the time the import was resolved. That could be
+ * baseline code, and if it is, the situation is as for Table entries: a call
+ * to an import will always go via that import's tier-1 code, which will tier
+ * up with an indirect jump.
+ *
+ * To do better, we must update all the import tables in the system that
+ * import functions from instances whose modules have tiered up. This is
+ * expected to be hard.
+ */
+
+// Classify the current system as one of a set of recognizable classes. This
+// really needs to get our tier-1 systems right.
+//
+// TODO: We don't yet have a good measure of how fast a system is. We
+// distinguish between mobile and desktop because these are very different kinds
+// of systems, but we could further distinguish between low / medium / high end
+// within those major classes. If we do so, then constants below would be
+// provided for each (class, architecture, system-tier) combination, not just
+// (class, architecture) as now.
+//
+// CPU clock speed is not by itself a good predictor of system performance, as
+// there are high-performance systems with slow clocks (recent Intel) and
+// low-performance systems with fast clocks (older AMD). We can also use
+// physical memory, core configuration, OS details, CPU class and family, and
+// CPU manufacturer to disambiguate.
+
+enum class SystemClass {
+ DesktopX86,
+ DesktopX64,
+ DesktopUnknown32,
+ DesktopUnknown64,
+ MobileX86,
+ MobileArm32,
+ MobileArm64,
+ MobileUnknown32,
+ MobileUnknown64
+};
+
+static SystemClass ClassifySystem() {
+ bool isDesktop;
+
+#if defined(ANDROID) || defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+ isDesktop = false;
+#else
+ isDesktop = true;
+#endif
+
+ if (isDesktop) {
+#if defined(JS_CODEGEN_X64)
+ return SystemClass::DesktopX64;
+#elif defined(JS_CODEGEN_X86)
+ return SystemClass::DesktopX86;
+#elif defined(JS_64BIT)
+ return SystemClass::DesktopUnknown64;
+#else
+ return SystemClass::DesktopUnknown32;
+#endif
+ } else {
+#if defined(JS_CODEGEN_X86)
+ return SystemClass::MobileX86;
+#elif defined(JS_CODEGEN_ARM)
+ return SystemClass::MobileArm32;
+#elif defined(JS_CODEGEN_ARM64)
+ return SystemClass::MobileArm64;
+#elif defined(JS_64BIT)
+ return SystemClass::MobileUnknown64;
+#else
+ return SystemClass::MobileUnknown32;
+#endif
+ }
+}
+
+// Code sizes in machine code bytes per bytecode byte, again empirical except
+// where marked.
+//
+// The Ion estimate for ARM64 is the measured Baseline value scaled by a
+// plausible factor for optimized code.
+
+static const double x64Tox86Inflation = 1.25;
+
+static const double x64IonBytesPerBytecode = 2.45;
+static const double x86IonBytesPerBytecode =
+ x64IonBytesPerBytecode * x64Tox86Inflation;
+static const double arm32IonBytesPerBytecode = 3.3;
+static const double arm64IonBytesPerBytecode = 3.0 / 1.4; // Estimate
+
+static const double x64BaselineBytesPerBytecode = x64IonBytesPerBytecode * 1.43;
+static const double x86BaselineBytesPerBytecode =
+ x64BaselineBytesPerBytecode * x64Tox86Inflation;
+static const double arm32BaselineBytesPerBytecode =
+ arm32IonBytesPerBytecode * 1.39;
+static const double arm64BaselineBytesPerBytecode = 3.0;
+
+static double OptimizedBytesPerBytecode(SystemClass cls) {
+ switch (cls) {
+ case SystemClass::DesktopX86:
+ case SystemClass::MobileX86:
+ case SystemClass::DesktopUnknown32:
+ return x86IonBytesPerBytecode;
+ case SystemClass::DesktopX64:
+ case SystemClass::DesktopUnknown64:
+ return x64IonBytesPerBytecode;
+ case SystemClass::MobileArm32:
+ case SystemClass::MobileUnknown32:
+ return arm32IonBytesPerBytecode;
+ case SystemClass::MobileArm64:
+ case SystemClass::MobileUnknown64:
+ return arm64IonBytesPerBytecode;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+static double BaselineBytesPerBytecode(SystemClass cls) {
+ switch (cls) {
+ case SystemClass::DesktopX86:
+ case SystemClass::MobileX86:
+ case SystemClass::DesktopUnknown32:
+ return x86BaselineBytesPerBytecode;
+ case SystemClass::DesktopX64:
+ case SystemClass::DesktopUnknown64:
+ return x64BaselineBytesPerBytecode;
+ case SystemClass::MobileArm32:
+ case SystemClass::MobileUnknown32:
+ return arm32BaselineBytesPerBytecode;
+ case SystemClass::MobileArm64:
+ case SystemClass::MobileUnknown64:
+ return arm64BaselineBytesPerBytecode;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+double wasm::EstimateCompiledCodeSize(Tier tier, size_t bytecodeSize) {
+ SystemClass cls = ClassifySystem();
+ switch (tier) {
+ case Tier::Baseline:
+ return double(bytecodeSize) * BaselineBytesPerBytecode(cls);
+ case Tier::Optimized:
+ return double(bytecodeSize) * OptimizedBytesPerBytecode(cls);
+ }
+ MOZ_CRASH("bad tier");
+}
+
+// If parallel Ion compilation is going to take longer than this, we should
+// tier.
+
+static const double tierCutoffMs = 10;
+
+// Compilation rate values are empirical except when noted, the reference
+// systems are:
+//
+// Late-2013 MacBook Pro (2.6GHz 4 x hyperthreaded Haswell, Mac OS X)
+// Late-2015 Nexus 5X (1.4GHz 4 x Cortex-A53 + 1.8GHz 2 x Cortex-A57, Android)
+// Ca-2016 SoftIron Overdrive 1000 (1.7GHz 4 x Cortex-A57, Fedora)
+//
+// The rates are always per core.
+//
+// The estimate for ARM64 is the Baseline compilation rate on the SoftIron
+// (because we have no Ion yet), divided by 5 to estimate Ion compile rate and
+// then divided by 2 to make it more reasonable for consumer ARM64 systems.
+
+static const double x64IonBytecodesPerMs = 2100;
+static const double x86IonBytecodesPerMs = 1500;
+static const double arm32IonBytecodesPerMs = 450;
+static const double arm64IonBytecodesPerMs = 750; // Estimate
+
+// Tiering cutoff values: if code section sizes are below these values (when
+// divided by the effective number of cores) we do not tier, because we guess
+// that parallel Ion compilation will be fast enough.
+
+static const double x64DesktopTierCutoff = x64IonBytecodesPerMs * tierCutoffMs;
+static const double x86DesktopTierCutoff = x86IonBytecodesPerMs * tierCutoffMs;
+static const double x86MobileTierCutoff = x86DesktopTierCutoff / 2; // Guess
+static const double arm32MobileTierCutoff =
+ arm32IonBytecodesPerMs * tierCutoffMs;
+static const double arm64MobileTierCutoff =
+ arm64IonBytecodesPerMs * tierCutoffMs;
+
+static double CodesizeCutoff(SystemClass cls) {
+ switch (cls) {
+ case SystemClass::DesktopX86:
+ case SystemClass::DesktopUnknown32:
+ return x86DesktopTierCutoff;
+ case SystemClass::DesktopX64:
+ case SystemClass::DesktopUnknown64:
+ return x64DesktopTierCutoff;
+ case SystemClass::MobileX86:
+ return x86MobileTierCutoff;
+ case SystemClass::MobileArm32:
+ case SystemClass::MobileUnknown32:
+ return arm32MobileTierCutoff;
+ case SystemClass::MobileArm64:
+ case SystemClass::MobileUnknown64:
+ return arm64MobileTierCutoff;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+// As the number of cores grows the effectiveness of each core dwindles (on the
+// systems we care about for SpiderMonkey).
+//
+// The data are empirical, computed from the observed compilation time of the
+// Tanks demo code on a variable number of cores.
+//
+// The heuristic may fail on NUMA systems where the core count is high but the
+// performance increase is nil or negative once the program moves beyond one
+// socket. However, few browser users have such systems.
+
+static double EffectiveCores(uint32_t cores) {
+ if (cores <= 3) {
+ return pow(cores, 0.9);
+ }
+ return pow(cores, 0.75);
+}
+
+#ifndef JS_64BIT
+// Don't tier if tiering will fill code memory to more to more than this
+// fraction.
+
+static const double spaceCutoffPct = 0.9;
+#endif
+
+// Figure out whether we should use tiered compilation or not.
+static bool TieringBeneficial(uint32_t codeSize) {
+ uint32_t cpuCount = GetHelperThreadCPUCount();
+ MOZ_ASSERT(cpuCount > 0);
+
+ // It's mostly sensible not to background compile when there's only one
+ // hardware thread as we want foreground computation to have access to that.
+ // However, if wasm background compilation helper threads can be given lower
+ // priority then background compilation on single-core systems still makes
+ // some kind of sense. That said, this is a non-issue: as of September 2017
+ // 1-core was down to 3.5% of our population and falling.
+
+ if (cpuCount == 1) {
+ return false;
+ }
+
+ // Compute the max number of threads available to do actual background
+ // compilation work.
+
+ uint32_t workers = GetMaxWasmCompilationThreads();
+
+ // The number of cores we will use is bounded both by the CPU count and the
+ // worker count, since the worker count already takes this into account.
+
+ uint32_t cores = workers;
+
+ SystemClass cls = ClassifySystem();
+
+ // Ion compilation on available cores must take long enough to be worth the
+ // bother.
+
+ double cutoffSize = CodesizeCutoff(cls);
+ double effectiveCores = EffectiveCores(cores);
+
+ if ((codeSize / effectiveCores) < cutoffSize) {
+ return false;
+ }
+
+ // Do not implement a size cutoff for 64-bit systems since the code size
+ // budget for 64 bit is so large that it will hardly ever be an issue.
+ // (Also the cutoff percentage might be different on 64-bit.)
+
+#ifndef JS_64BIT
+ // If the amount of executable code for baseline compilation jeopardizes the
+ // availability of executable memory for ion code then do not tier, for now.
+ //
+ // TODO: For now we consider this module in isolation. We should really
+ // worry about what else is going on in this process and might be filling up
+ // the code memory. It's like we need some kind of code memory reservation
+ // system or JIT compilation for large modules.
+
+ double ionRatio = OptimizedBytesPerBytecode(cls);
+ double baselineRatio = BaselineBytesPerBytecode(cls);
+ double needMemory = codeSize * (ionRatio + baselineRatio);
+ double availMemory = LikelyAvailableExecutableMemory();
+ double cutoff = spaceCutoffPct * MaxCodeBytesPerProcess;
+
+ // If the sum of baseline and ion code makes us exceeds some set percentage
+ // of the executable memory then disable tiering.
+
+ if ((MaxCodeBytesPerProcess - availMemory) + needMemory > cutoff) {
+ return false;
+ }
+#endif
+
+ return true;
+}
+
+// Ensure that we have the non-compiler requirements to tier safely.
+static bool PlatformCanTier() {
+ return CanUseExtraThreads() && jit::CanFlushExecutionContextForAllThreads();
+}
+
+CompilerEnvironment::CompilerEnvironment(const CompileArgs& args)
+ : state_(InitialWithArgs), args_(&args) {}
+
+CompilerEnvironment::CompilerEnvironment(CompileMode mode, Tier tier,
+ DebugEnabled debugEnabled)
+ : state_(InitialWithModeTierDebug),
+ mode_(mode),
+ tier_(tier),
+ debug_(debugEnabled) {}
+
+void CompilerEnvironment::computeParameters() {
+ MOZ_ASSERT(state_ == InitialWithModeTierDebug);
+
+ state_ = Computed;
+}
+
+void CompilerEnvironment::computeParameters(Decoder& d) {
+ MOZ_ASSERT(!isComputed());
+
+ if (state_ == InitialWithModeTierDebug) {
+ computeParameters();
+ return;
+ }
+
+ bool baselineEnabled = args_->baselineEnabled;
+ bool ionEnabled = args_->ionEnabled;
+ bool debugEnabled = args_->debugEnabled;
+ bool forceTiering = args_->forceTiering;
+
+ bool hasSecondTier = ionEnabled;
+ MOZ_ASSERT_IF(debugEnabled, baselineEnabled);
+ MOZ_ASSERT_IF(forceTiering, baselineEnabled && hasSecondTier);
+
+ // Various constraints in various places should prevent failure here.
+ MOZ_RELEASE_ASSERT(baselineEnabled || ionEnabled);
+
+ uint32_t codeSectionSize = 0;
+
+ SectionRange range;
+ if (StartsCodeSection(d.begin(), d.end(), &range)) {
+ codeSectionSize = range.size;
+ }
+
+ if (baselineEnabled && hasSecondTier &&
+ (TieringBeneficial(codeSectionSize) || forceTiering) &&
+ PlatformCanTier()) {
+ mode_ = CompileMode::Tier1;
+ tier_ = Tier::Baseline;
+ } else {
+ mode_ = CompileMode::Once;
+ tier_ = hasSecondTier ? Tier::Optimized : Tier::Baseline;
+ }
+
+ debug_ = debugEnabled ? DebugEnabled::True : DebugEnabled::False;
+
+ state_ = Computed;
+}
+
+template <class DecoderT>
+static bool DecodeFunctionBody(DecoderT& d, ModuleGenerator& mg,
+ uint32_t funcIndex) {
+ uint32_t bodySize;
+ if (!d.readVarU32(&bodySize)) {
+ return d.fail("expected number of function body bytes");
+ }
+
+ if (bodySize > MaxFunctionBytes) {
+ return d.fail("function body too big");
+ }
+
+ const size_t offsetInModule = d.currentOffset();
+
+ // Skip over the function body; it will be validated by the compilation
+ // thread.
+ const uint8_t* bodyBegin;
+ if (!d.readBytes(bodySize, &bodyBegin)) {
+ return d.fail("function body length too big");
+ }
+
+ return mg.compileFuncDef(funcIndex, offsetInModule, bodyBegin,
+ bodyBegin + bodySize);
+}
+
+template <class DecoderT>
+static bool DecodeCodeSection(const ModuleEnvironment& env, DecoderT& d,
+ ModuleGenerator& mg) {
+ if (!env.codeSection) {
+ if (env.numFuncDefs() != 0) {
+ return d.fail("expected code section");
+ }
+
+ return mg.finishFuncDefs();
+ }
+
+ uint32_t numFuncDefs;
+ if (!d.readVarU32(&numFuncDefs)) {
+ return d.fail("expected function body count");
+ }
+
+ if (numFuncDefs != env.numFuncDefs()) {
+ return d.fail(
+ "function body count does not match function signature count");
+ }
+
+ for (uint32_t funcDefIndex = 0; funcDefIndex < numFuncDefs; funcDefIndex++) {
+ if (!DecodeFunctionBody(d, mg, env.numFuncImports + funcDefIndex)) {
+ return false;
+ }
+ }
+
+ if (!d.finishSection(*env.codeSection, "code")) {
+ return false;
+ }
+
+ return mg.finishFuncDefs();
+}
+
+SharedModule wasm::CompileBuffer(const CompileArgs& args,
+ const ShareableBytes& bytecode,
+ UniqueChars* error,
+ UniqueCharsVector* warnings,
+ JS::OptimizedEncodingListener* listener) {
+ Decoder d(bytecode.bytes, 0, error, warnings);
+
+ ModuleEnvironment moduleEnv(args.features);
+ if (!moduleEnv.init() || !DecodeModuleEnvironment(d, &moduleEnv)) {
+ return nullptr;
+ }
+ CompilerEnvironment compilerEnv(args);
+ compilerEnv.computeParameters(d);
+
+ ModuleGenerator mg(args, &moduleEnv, &compilerEnv, nullptr, error, warnings);
+ if (!mg.init(nullptr)) {
+ return nullptr;
+ }
+
+ if (!DecodeCodeSection(moduleEnv, d, mg)) {
+ return nullptr;
+ }
+
+ if (!DecodeModuleTail(d, &moduleEnv)) {
+ return nullptr;
+ }
+
+ return mg.finishModule(bytecode, listener);
+}
+
+bool wasm::CompileTier2(const CompileArgs& args, const Bytes& bytecode,
+ const Module& module, UniqueChars* error,
+ UniqueCharsVector* warnings, Atomic<bool>* cancelled) {
+ Decoder d(bytecode, 0, error);
+
+ ModuleEnvironment moduleEnv(args.features);
+ if (!moduleEnv.init() || !DecodeModuleEnvironment(d, &moduleEnv)) {
+ return false;
+ }
+ CompilerEnvironment compilerEnv(CompileMode::Tier2, Tier::Optimized,
+ DebugEnabled::False);
+ compilerEnv.computeParameters(d);
+
+ ModuleGenerator mg(args, &moduleEnv, &compilerEnv, cancelled, error,
+ warnings);
+ if (!mg.init(nullptr)) {
+ return false;
+ }
+
+ if (!DecodeCodeSection(moduleEnv, d, mg)) {
+ return false;
+ }
+
+ if (!DecodeModuleTail(d, &moduleEnv)) {
+ return false;
+ }
+
+ return mg.finishTier2(module);
+}
+
+class StreamingDecoder {
+ Decoder d_;
+ const ExclusiveBytesPtr& codeBytesEnd_;
+ const Atomic<bool>& cancelled_;
+
+ public:
+ StreamingDecoder(const ModuleEnvironment& env, const Bytes& begin,
+ const ExclusiveBytesPtr& codeBytesEnd,
+ const Atomic<bool>& cancelled, UniqueChars* error,
+ UniqueCharsVector* warnings)
+ : d_(begin, env.codeSection->start, error, warnings),
+ codeBytesEnd_(codeBytesEnd),
+ cancelled_(cancelled) {}
+
+ bool fail(const char* msg) { return d_.fail(msg); }
+
+ bool done() const { return d_.done(); }
+
+ size_t currentOffset() const { return d_.currentOffset(); }
+
+ bool waitForBytes(size_t numBytes) {
+ numBytes = std::min(numBytes, d_.bytesRemain());
+ const uint8_t* requiredEnd = d_.currentPosition() + numBytes;
+ auto codeBytesEnd = codeBytesEnd_.lock();
+ while (codeBytesEnd < requiredEnd) {
+ if (cancelled_) {
+ return false;
+ }
+ codeBytesEnd.wait();
+ }
+ return true;
+ }
+
+ bool readVarU32(uint32_t* u32) {
+ return waitForBytes(MaxVarU32DecodedBytes) && d_.readVarU32(u32);
+ }
+
+ bool readBytes(size_t size, const uint8_t** begin) {
+ return waitForBytes(size) && d_.readBytes(size, begin);
+ }
+
+ bool finishSection(const SectionRange& range, const char* name) {
+ return d_.finishSection(range, name);
+ }
+};
+
+static SharedBytes CreateBytecode(const Bytes& env, const Bytes& code,
+ const Bytes& tail, UniqueChars* error) {
+ size_t size = env.length() + code.length() + tail.length();
+ if (size > MaxModuleBytes) {
+ *error = DuplicateString("module too big");
+ return nullptr;
+ }
+
+ MutableBytes bytecode = js_new<ShareableBytes>();
+ if (!bytecode || !bytecode->bytes.resize(size)) {
+ return nullptr;
+ }
+
+ uint8_t* p = bytecode->bytes.begin();
+
+ memcpy(p, env.begin(), env.length());
+ p += env.length();
+
+ memcpy(p, code.begin(), code.length());
+ p += code.length();
+
+ memcpy(p, tail.begin(), tail.length());
+ p += tail.length();
+
+ MOZ_ASSERT(p == bytecode->end());
+
+ return bytecode;
+}
+
+SharedModule wasm::CompileStreaming(
+ const CompileArgs& args, const Bytes& envBytes, const Bytes& codeBytes,
+ const ExclusiveBytesPtr& codeBytesEnd,
+ const ExclusiveStreamEndData& exclusiveStreamEnd,
+ const Atomic<bool>& cancelled, UniqueChars* error,
+ UniqueCharsVector* warnings) {
+ CompilerEnvironment compilerEnv(args);
+ ModuleEnvironment moduleEnv(args.features);
+ if (!moduleEnv.init()) {
+ return nullptr;
+ }
+
+ {
+ Decoder d(envBytes, 0, error, warnings);
+
+ if (!DecodeModuleEnvironment(d, &moduleEnv)) {
+ return nullptr;
+ }
+ compilerEnv.computeParameters(d);
+
+ if (!moduleEnv.codeSection) {
+ d.fail("unknown section before code section");
+ return nullptr;
+ }
+
+ MOZ_RELEASE_ASSERT(moduleEnv.codeSection->size == codeBytes.length());
+ MOZ_RELEASE_ASSERT(d.done());
+ }
+
+ ModuleGenerator mg(args, &moduleEnv, &compilerEnv, &cancelled, error,
+ warnings);
+ if (!mg.init(nullptr)) {
+ return nullptr;
+ }
+
+ {
+ StreamingDecoder d(moduleEnv, codeBytes, codeBytesEnd, cancelled, error,
+ warnings);
+
+ if (!DecodeCodeSection(moduleEnv, d, mg)) {
+ return nullptr;
+ }
+
+ MOZ_RELEASE_ASSERT(d.done());
+ }
+
+ {
+ auto streamEnd = exclusiveStreamEnd.lock();
+ while (!streamEnd->reached) {
+ if (cancelled) {
+ return nullptr;
+ }
+ streamEnd.wait();
+ }
+ }
+
+ const StreamEndData& streamEnd = exclusiveStreamEnd.lock();
+ const Bytes& tailBytes = *streamEnd.tailBytes;
+
+ {
+ Decoder d(tailBytes, moduleEnv.codeSection->end(), error, warnings);
+
+ if (!DecodeModuleTail(d, &moduleEnv)) {
+ return nullptr;
+ }
+
+ MOZ_RELEASE_ASSERT(d.done());
+ }
+
+ SharedBytes bytecode = CreateBytecode(envBytes, codeBytes, tailBytes, error);
+ if (!bytecode) {
+ return nullptr;
+ }
+
+ return mg.finishModule(*bytecode, streamEnd.tier2Listener);
+}
diff --git a/js/src/wasm/WasmCompile.h b/js/src/wasm/WasmCompile.h
new file mode 100644
index 0000000000..2b07881eea
--- /dev/null
+++ b/js/src/wasm/WasmCompile.h
@@ -0,0 +1,99 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_compile_h
+#define wasm_compile_h
+
+#include "vm/Runtime.h"
+#include "wasm/WasmModule.h"
+
+namespace JS {
+class OptimizedEncodingListener;
+}
+
+namespace js {
+namespace wasm {
+
+// Return a uint32_t which captures the observed properties of the CPU that
+// affect compilation. If code compiled now is to be serialized and executed
+// later, the ObservedCPUFeatures() must be ensured to be the same.
+
+uint32_t ObservedCPUFeatures();
+
+// Return the estimated compiled (machine) code size for the given bytecode size
+// compiled at the given tier.
+
+double EstimateCompiledCodeSize(Tier tier, size_t bytecodeSize);
+
+// Compile the given WebAssembly bytecode with the given arguments into a
+// wasm::Module. On success, the Module is returned. On failure, the returned
+// SharedModule pointer is null and either:
+// - *error points to a string description of the error
+// - *error is null and the caller should report out-of-memory.
+
+SharedModule CompileBuffer(const CompileArgs& args,
+ const ShareableBytes& bytecode, UniqueChars* error,
+ UniqueCharsVector* warnings,
+ JS::OptimizedEncodingListener* listener = nullptr);
+
+// Attempt to compile the second tier of the given wasm::Module.
+
+bool CompileTier2(const CompileArgs& args, const Bytes& bytecode,
+ const Module& module, UniqueChars* error,
+ UniqueCharsVector* warnings, Atomic<bool>* cancelled);
+
+// Compile the given WebAssembly module which has been broken into three
+// partitions:
+// - envBytes contains a complete ModuleEnvironment that has already been
+// copied in from the stream.
+// - codeBytes is pre-sized to hold the complete code section when the stream
+// completes.
+// - The range [codeBytes.begin(), codeBytesEnd) contains the bytes currently
+// read from the stream and codeBytesEnd will advance until either
+// the stream is cancelled or codeBytesEnd == codeBytes.end().
+// - streamEnd contains the final information received after the code section:
+// the remaining module bytecodes and maybe a JS::OptimizedEncodingListener.
+// When the stream is successfully closed, streamEnd.reached is set.
+// The ExclusiveWaitableData are notified when CompileStreaming() can make
+// progress (i.e., codeBytesEnd advances or streamEnd.reached is set).
+// If cancelled is set to true, compilation aborts and returns null. After
+// cancellation is set, both ExclusiveWaitableData will be notified and so every
+// wait() loop must check cancelled.
+
+using ExclusiveBytesPtr = ExclusiveWaitableData<const uint8_t*>;
+
+struct StreamEndData {
+ bool reached;
+ const Bytes* tailBytes;
+ Tier2Listener tier2Listener;
+
+ StreamEndData() : reached(false) {}
+};
+using ExclusiveStreamEndData = ExclusiveWaitableData<StreamEndData>;
+
+SharedModule CompileStreaming(const CompileArgs& args, const Bytes& envBytes,
+ const Bytes& codeBytes,
+ const ExclusiveBytesPtr& codeBytesEnd,
+ const ExclusiveStreamEndData& streamEnd,
+ const Atomic<bool>& cancelled, UniqueChars* error,
+ UniqueCharsVector* warnings);
+
+} // namespace wasm
+} // namespace js
+
+#endif // namespace wasm_compile_h
diff --git a/js/src/wasm/WasmCompileArgs.h b/js/src/wasm/WasmCompileArgs.h
new file mode 100644
index 0000000000..1cf45cb020
--- /dev/null
+++ b/js/src/wasm/WasmCompileArgs.h
@@ -0,0 +1,238 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_compile_args_h
+#define wasm_compile_args_h
+
+#include "mozilla/RefPtr.h"
+
+#include "js/Utility.h"
+#include "js/WasmFeatures.h"
+#include "wasm/WasmConstants.h"
+#include "wasm/WasmShareable.h"
+
+namespace js {
+namespace wasm {
+
+enum class Shareable { False, True };
+
+// Code can be compiled either with the Baseline compiler or the Ion compiler,
+// and tier-variant data are tagged with the Tier value.
+//
+// A tier value is used to request tier-variant aspects of code, metadata, or
+// linkdata. The tiers are normally explicit (Baseline and Ion); implicit tiers
+// can be obtained through accessors on Code objects (eg, stableTier).
+
+enum class Tier {
+ Baseline,
+ Debug = Baseline,
+ Optimized,
+ Serialized = Optimized
+};
+
+// Iterator over tiers present in a tiered data structure.
+
+class Tiers {
+ Tier t_[2];
+ uint32_t n_;
+
+ public:
+ explicit Tiers() { n_ = 0; }
+ explicit Tiers(Tier t) {
+ t_[0] = t;
+ n_ = 1;
+ }
+ explicit Tiers(Tier t, Tier u) {
+ MOZ_ASSERT(t != u);
+ t_[0] = t;
+ t_[1] = u;
+ n_ = 2;
+ }
+
+ Tier* begin() { return t_; }
+ Tier* end() { return t_ + n_; }
+};
+
+// Describes per-compilation settings that are controlled by an options bag
+// passed to compilation and validation functions. (Nonstandard extension
+// available under prefs.)
+
+struct FeatureOptions {
+ FeatureOptions() : intrinsics(false) {}
+
+ // Enables intrinsic opcodes, only set in WasmIntrinsic.cpp.
+ bool intrinsics;
+};
+
+// Describes the features that control wasm compilation.
+
+struct FeatureArgs {
+ FeatureArgs()
+ :
+#define WASM_FEATURE(NAME, LOWER_NAME, ...) LOWER_NAME(false),
+ JS_FOR_WASM_FEATURES(WASM_FEATURE, WASM_FEATURE, WASM_FEATURE)
+#undef WASM_FEATURE
+ sharedMemory(Shareable::False),
+ simd(false),
+ intrinsics(false) {
+ }
+ FeatureArgs(const FeatureArgs&) = default;
+ FeatureArgs& operator=(const FeatureArgs&) = default;
+ FeatureArgs(FeatureArgs&&) = default;
+
+ static FeatureArgs build(JSContext* cx, const FeatureOptions& options);
+
+#define WASM_FEATURE(NAME, LOWER_NAME, ...) bool LOWER_NAME;
+ JS_FOR_WASM_FEATURES(WASM_FEATURE, WASM_FEATURE, WASM_FEATURE)
+#undef WASM_FEATURE
+
+ Shareable sharedMemory;
+ bool simd;
+ bool intrinsics;
+};
+
+// Describes the JS scripted caller of a request to compile a wasm module.
+
+struct ScriptedCaller {
+ UniqueChars filename; // UTF-8 encoded
+ bool filenameIsURL;
+ unsigned line;
+
+ ScriptedCaller() : filenameIsURL(false), line(0) {}
+};
+
+// Describes the reasons we cannot compute compile args
+
+enum class CompileArgsError {
+ OutOfMemory,
+ NoCompiler,
+};
+
+// Describes all the parameters that control wasm compilation.
+
+struct CompileArgs;
+using MutableCompileArgs = RefPtr<CompileArgs>;
+using SharedCompileArgs = RefPtr<const CompileArgs>;
+
+struct CompileArgs : ShareableBase<CompileArgs> {
+ ScriptedCaller scriptedCaller;
+ UniqueChars sourceMapURL;
+
+ bool baselineEnabled;
+ bool ionEnabled;
+ bool debugEnabled;
+ bool forceTiering;
+
+ FeatureArgs features;
+
+ // CompileArgs has several constructors:
+ //
+ // - two through factory functions `build`/`buildAndReport`, which checks
+ // that flags are consistent with each other, and optionally reports any
+ // errors.
+ // - the 'buildForAsmJS' one, which uses the appropriate configuration for
+ // legacy asm.js code.
+ // - one that gives complete access to underlying fields.
+ //
+ // You should use the factory functions in general, unless you have a very
+ // good reason (i.e. no JSContext around and you know which flags have been
+ // used).
+
+ static SharedCompileArgs build(JSContext* cx, ScriptedCaller&& scriptedCaller,
+ const FeatureOptions& options,
+ CompileArgsError* error);
+ static SharedCompileArgs buildForAsmJS(ScriptedCaller&& scriptedCaller);
+ static SharedCompileArgs buildAndReport(JSContext* cx,
+ ScriptedCaller&& scriptedCaller,
+ const FeatureOptions& options,
+ bool reportOOM = false);
+
+ explicit CompileArgs(ScriptedCaller&& scriptedCaller)
+ : scriptedCaller(std::move(scriptedCaller)),
+ baselineEnabled(false),
+ ionEnabled(false),
+ debugEnabled(false),
+ forceTiering(false) {}
+};
+
+// CompilerEnvironment holds any values that will be needed to compute
+// compilation parameters once the module's feature opt-in sections have been
+// parsed.
+//
+// Subsequent to construction a computeParameters() call will compute the final
+// compilation parameters, and the object can then be queried for their values.
+
+struct CompileArgs;
+class Decoder;
+
+struct CompilerEnvironment {
+ // The object starts in one of two "initial" states; computeParameters moves
+ // it into the "computed" state.
+ enum State { InitialWithArgs, InitialWithModeTierDebug, Computed };
+
+ State state_;
+ union {
+ // Value if the state_ == InitialWithArgs.
+ const CompileArgs* args_;
+
+ // Value in the other two states.
+ struct {
+ CompileMode mode_;
+ Tier tier_;
+ DebugEnabled debug_;
+ };
+ };
+
+ public:
+ // Retain a reference to the CompileArgs. A subsequent computeParameters()
+ // will compute all parameters from the CompileArgs and additional values.
+ explicit CompilerEnvironment(const CompileArgs& args);
+
+ // Save the provided values for mode, tier, and debug, and the initial value
+ // for gc/refTypes. A subsequent computeParameters() will compute the
+ // final value of gc/refTypes.
+ CompilerEnvironment(CompileMode mode, Tier tier, DebugEnabled debugEnabled);
+
+ // Compute any remaining compilation parameters.
+ void computeParameters(Decoder& d);
+
+ // Compute any remaining compilation parameters. Only use this method if
+ // the CompilerEnvironment was created with values for mode, tier, and
+ // debug.
+ void computeParameters();
+
+ bool isComputed() const { return state_ == Computed; }
+ CompileMode mode() const {
+ MOZ_ASSERT(isComputed());
+ return mode_;
+ }
+ Tier tier() const {
+ MOZ_ASSERT(isComputed());
+ return tier_;
+ }
+ DebugEnabled debug() const {
+ MOZ_ASSERT(isComputed());
+ return debug_;
+ }
+ bool debugEnabled() const { return debug() == DebugEnabled::True; }
+};
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_compile_args_h
diff --git a/js/src/wasm/WasmConstants.h b/js/src/wasm/WasmConstants.h
new file mode 100644
index 0000000000..11bbc46dd2
--- /dev/null
+++ b/js/src/wasm/WasmConstants.h
@@ -0,0 +1,1144 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_constants_h
+#define wasm_constants_h
+
+#include <stdint.h>
+
+#include "wasm/WasmIntrinsicGenerated.h"
+
+namespace js {
+namespace wasm {
+
+static const uint32_t MagicNumber = 0x6d736100; // "\0asm"
+static const uint32_t EncodingVersion = 0x01;
+
+enum class SectionId {
+ Custom = 0,
+ Type = 1,
+ Import = 2,
+ Function = 3,
+ Table = 4,
+ Memory = 5,
+ Global = 6,
+ Export = 7,
+ Start = 8,
+ Elem = 9,
+ Code = 10,
+ Data = 11,
+ DataCount = 12,
+ Tag = 13,
+};
+
+// WebAssembly type encodings are all single-byte negative SLEB128s, hence:
+// forall tc:TypeCode. ((tc & SLEB128SignMask) == SLEB128SignBit
+static const uint8_t SLEB128SignMask = 0xc0;
+static const uint8_t SLEB128SignBit = 0x40;
+
+enum class TypeCode {
+
+ // If more "simple primitive" (non-reference, non-constructor,
+ // non-special-purpose) types are added here then you MUST update
+ // LowestPrimitiveTypeCode, below.
+
+ I32 = 0x7f, // SLEB128(-0x01)
+ I64 = 0x7e, // SLEB128(-0x02)
+ F32 = 0x7d, // SLEB128(-0x03)
+ F64 = 0x7c, // SLEB128(-0x04)
+ V128 = 0x7b, // SLEB128(-0x05)
+
+ I8 = 0x7a, // SLEB128(-0x06)
+ I16 = 0x79, // SLEB128(-0x07)
+
+ // A function pointer with any signature
+ FuncRef = 0x70, // SLEB128(-0x10)
+
+ // A reference to any host value.
+ ExternRef = 0x6f, // SLEB128(-0x11)
+
+ // A reference to any wasm gc value.
+ AnyRef = 0x6e, // SLEB128(-0x12)
+
+ // A reference to a struct/array value.
+ EqRef = 0x6d, // SLEB128(-0x13)
+
+ // Type constructor for nullable reference types.
+ NullableRef = 0x6c, // SLEB128(-0x14)
+
+ // Type constructor for non-nullable reference types.
+ Ref = 0x6b, // SLEB128(-0x15)
+
+ // A null reference in the extern hierarchy.
+ NullExternRef = 0x69, // SLEB128(-0x17)
+
+ // A null reference in the func hierarchy.
+ NullFuncRef = 0x68, // SLEB128(-0x18)
+
+ // A reference to any struct value.
+ StructRef = 0x67, // SLEB128(-0x19)
+
+ // A reference to any array value.
+ ArrayRef = 0x66, // SLEB128(-0x1A)
+
+ // A null reference in the any hierarchy.
+ NullAnyRef = 0x65, // SLEB128(-0x1B)
+
+ // Type constructor for function types
+ Func = 0x60, // SLEB128(-0x20)
+
+ // Type constructor for structure types - gc proposal
+ Struct = 0x5f, // SLEB128(-0x21)
+
+ // Type constructor for array types - gc proposal
+ Array = 0x5e, // SLEB128(-0x22)
+
+ // Value for non-nullable type present.
+ TableHasInitExpr = 0x40,
+
+ // The 'empty' case of blocktype.
+ BlockVoid = 0x40, // SLEB128(-0x40)
+
+ // Type constructor for recursion groups - gc proposal
+ RecGroup = 0x4f,
+
+ // TODO: update wasm-tools to use the correct prefix
+ RecGroupOld = 0x45,
+
+ // Type prefix for parent types - gc proposal
+ SubType = 0x50,
+
+ Limit = 0x80
+};
+
+// This is the lowest-valued TypeCode that is a primitive type, used in
+// UnpackTypeCodeTypeAbstracted(). If primitive typecodes are added below any
+// reference typecode then the logic in that function MUST change.
+
+static constexpr TypeCode LowestPrimitiveTypeCode = TypeCode::I16;
+
+// An arbitrary reference type used as the result of
+// UnpackTypeCodeTypeAbstracted() when a value type is a reference.
+
+static constexpr TypeCode AbstractReferenceTypeCode = TypeCode::ExternRef;
+
+// A type code used to represent (ref null? T) whether or not the type
+// is encoded with 'Ref' or 'NullableRef'.
+
+static constexpr TypeCode AbstractTypeRefCode = TypeCode::Ref;
+
+// A wasm::Trap represents a wasm-defined trap that can occur during execution
+// which triggers a WebAssembly.RuntimeError. Generated code may jump to a Trap
+// symbolically, passing the bytecode offset to report as the trap offset. The
+// generated jump will be bound to a tiny stub which fills the offset and
+// then jumps to a per-Trap shared stub at the end of the module.
+
+enum class Trap {
+ // The Unreachable opcode has been executed.
+ Unreachable,
+ // An integer arithmetic operation led to an overflow.
+ IntegerOverflow,
+ // Trying to coerce NaN to an integer.
+ InvalidConversionToInteger,
+ // Integer division by zero.
+ IntegerDivideByZero,
+ // Out of bounds on wasm memory accesses.
+ OutOfBounds,
+ // Unaligned on wasm atomic accesses; also used for non-standard ARM
+ // unaligned access faults.
+ UnalignedAccess,
+ // call_indirect to null.
+ IndirectCallToNull,
+ // call_indirect signature mismatch.
+ IndirectCallBadSig,
+ // Dereference null pointer in operation on (Ref T)
+ NullPointerDereference,
+ // Failed to cast a (Ref T) in a ref.cast instruction
+ BadCast,
+
+ // The internal stack space was exhausted. For compatibility, this throws
+ // the same over-recursed error as JS.
+ StackOverflow,
+
+ // The wasm execution has potentially run too long and the engine must call
+ // CheckForInterrupt(). This trap is resumable.
+ CheckInterrupt,
+
+ // Signal an error that was reported in C++ code.
+ ThrowReported,
+
+ Limit
+};
+
+// The representation of a null reference value throughout the compiler.
+
+static const intptr_t NULLREF_VALUE = intptr_t((void*)nullptr);
+
+enum class DefinitionKind {
+ Function = 0x00,
+ Table = 0x01,
+ Memory = 0x02,
+ Global = 0x03,
+ Tag = 0x04,
+};
+
+enum class GlobalTypeImmediate { IsMutable = 0x1, AllowedMask = 0x1 };
+
+enum class LimitsFlags {
+ Default = 0x0,
+ HasMaximum = 0x1,
+ IsShared = 0x2,
+ IsI64 = 0x4,
+};
+
+enum class LimitsMask {
+ Table = uint8_t(LimitsFlags::HasMaximum),
+#ifdef ENABLE_WASM_MEMORY64
+ Memory = uint8_t(LimitsFlags::HasMaximum) | uint8_t(LimitsFlags::IsShared) |
+ uint8_t(LimitsFlags::IsI64),
+#else
+ Memory = uint8_t(LimitsFlags::HasMaximum) | uint8_t(LimitsFlags::IsShared),
+#endif
+};
+
+enum class DataSegmentKind {
+ Active = 0x00,
+ Passive = 0x01,
+ ActiveWithMemoryIndex = 0x02
+};
+
+enum class ElemSegmentKind : uint32_t {
+ Active = 0x0,
+ Passive = 0x1,
+ ActiveWithTableIndex = 0x2,
+ Declared = 0x3,
+};
+
+enum class ElemSegmentPayload : uint32_t {
+ ExternIndex = 0x0,
+ ElemExpression = 0x4,
+};
+
+enum class TagKind {
+ Exception = 0x0,
+};
+
+enum class Op {
+ // Control flow operators
+ Unreachable = 0x00,
+ Nop = 0x01,
+ Block = 0x02,
+ Loop = 0x03,
+ If = 0x04,
+ Else = 0x05,
+ Try = 0x06,
+ Catch = 0x07,
+ Throw = 0x08,
+ Rethrow = 0x09,
+ End = 0x0b,
+ Br = 0x0c,
+ BrIf = 0x0d,
+ BrTable = 0x0e,
+ Return = 0x0f,
+
+ // Call operators
+ Call = 0x10,
+ CallIndirect = 0x11,
+ CallRef = 0x14,
+
+ // Additional exception operators
+ Delegate = 0x18,
+ CatchAll = 0x19,
+
+ // Parametric operators
+ Drop = 0x1a,
+ SelectNumeric = 0x1b,
+ SelectTyped = 0x1c,
+
+ // Variable access
+ LocalGet = 0x20,
+ LocalSet = 0x21,
+ LocalTee = 0x22,
+ GlobalGet = 0x23,
+ GlobalSet = 0x24,
+ TableGet = 0x25, // Reftypes,
+ TableSet = 0x26, // per proposal as of February 2019
+
+ // Memory-related operators
+ I32Load = 0x28,
+ I64Load = 0x29,
+ F32Load = 0x2a,
+ F64Load = 0x2b,
+ I32Load8S = 0x2c,
+ I32Load8U = 0x2d,
+ I32Load16S = 0x2e,
+ I32Load16U = 0x2f,
+ I64Load8S = 0x30,
+ I64Load8U = 0x31,
+ I64Load16S = 0x32,
+ I64Load16U = 0x33,
+ I64Load32S = 0x34,
+ I64Load32U = 0x35,
+ I32Store = 0x36,
+ I64Store = 0x37,
+ F32Store = 0x38,
+ F64Store = 0x39,
+ I32Store8 = 0x3a,
+ I32Store16 = 0x3b,
+ I64Store8 = 0x3c,
+ I64Store16 = 0x3d,
+ I64Store32 = 0x3e,
+ MemorySize = 0x3f,
+ MemoryGrow = 0x40,
+
+ // Constants
+ I32Const = 0x41,
+ I64Const = 0x42,
+ F32Const = 0x43,
+ F64Const = 0x44,
+
+ // Comparison operators
+ I32Eqz = 0x45,
+ I32Eq = 0x46,
+ I32Ne = 0x47,
+ I32LtS = 0x48,
+ I32LtU = 0x49,
+ I32GtS = 0x4a,
+ I32GtU = 0x4b,
+ I32LeS = 0x4c,
+ I32LeU = 0x4d,
+ I32GeS = 0x4e,
+ I32GeU = 0x4f,
+ I64Eqz = 0x50,
+ I64Eq = 0x51,
+ I64Ne = 0x52,
+ I64LtS = 0x53,
+ I64LtU = 0x54,
+ I64GtS = 0x55,
+ I64GtU = 0x56,
+ I64LeS = 0x57,
+ I64LeU = 0x58,
+ I64GeS = 0x59,
+ I64GeU = 0x5a,
+ F32Eq = 0x5b,
+ F32Ne = 0x5c,
+ F32Lt = 0x5d,
+ F32Gt = 0x5e,
+ F32Le = 0x5f,
+ F32Ge = 0x60,
+ F64Eq = 0x61,
+ F64Ne = 0x62,
+ F64Lt = 0x63,
+ F64Gt = 0x64,
+ F64Le = 0x65,
+ F64Ge = 0x66,
+
+ // Numeric operators
+ I32Clz = 0x67,
+ I32Ctz = 0x68,
+ I32Popcnt = 0x69,
+ I32Add = 0x6a,
+ I32Sub = 0x6b,
+ I32Mul = 0x6c,
+ I32DivS = 0x6d,
+ I32DivU = 0x6e,
+ I32RemS = 0x6f,
+ I32RemU = 0x70,
+ I32And = 0x71,
+ I32Or = 0x72,
+ I32Xor = 0x73,
+ I32Shl = 0x74,
+ I32ShrS = 0x75,
+ I32ShrU = 0x76,
+ I32Rotl = 0x77,
+ I32Rotr = 0x78,
+ I64Clz = 0x79,
+ I64Ctz = 0x7a,
+ I64Popcnt = 0x7b,
+ I64Add = 0x7c,
+ I64Sub = 0x7d,
+ I64Mul = 0x7e,
+ I64DivS = 0x7f,
+ I64DivU = 0x80,
+ I64RemS = 0x81,
+ I64RemU = 0x82,
+ I64And = 0x83,
+ I64Or = 0x84,
+ I64Xor = 0x85,
+ I64Shl = 0x86,
+ I64ShrS = 0x87,
+ I64ShrU = 0x88,
+ I64Rotl = 0x89,
+ I64Rotr = 0x8a,
+ F32Abs = 0x8b,
+ F32Neg = 0x8c,
+ F32Ceil = 0x8d,
+ F32Floor = 0x8e,
+ F32Trunc = 0x8f,
+ F32Nearest = 0x90,
+ F32Sqrt = 0x91,
+ F32Add = 0x92,
+ F32Sub = 0x93,
+ F32Mul = 0x94,
+ F32Div = 0x95,
+ F32Min = 0x96,
+ F32Max = 0x97,
+ F32CopySign = 0x98,
+ F64Abs = 0x99,
+ F64Neg = 0x9a,
+ F64Ceil = 0x9b,
+ F64Floor = 0x9c,
+ F64Trunc = 0x9d,
+ F64Nearest = 0x9e,
+ F64Sqrt = 0x9f,
+ F64Add = 0xa0,
+ F64Sub = 0xa1,
+ F64Mul = 0xa2,
+ F64Div = 0xa3,
+ F64Min = 0xa4,
+ F64Max = 0xa5,
+ F64CopySign = 0xa6,
+
+ // Conversions
+ I32WrapI64 = 0xa7,
+ I32TruncF32S = 0xa8,
+ I32TruncF32U = 0xa9,
+ I32TruncF64S = 0xaa,
+ I32TruncF64U = 0xab,
+ I64ExtendI32S = 0xac,
+ I64ExtendI32U = 0xad,
+ I64TruncF32S = 0xae,
+ I64TruncF32U = 0xaf,
+ I64TruncF64S = 0xb0,
+ I64TruncF64U = 0xb1,
+ F32ConvertI32S = 0xb2,
+ F32ConvertI32U = 0xb3,
+ F32ConvertI64S = 0xb4,
+ F32ConvertI64U = 0xb5,
+ F32DemoteF64 = 0xb6,
+ F64ConvertI32S = 0xb7,
+ F64ConvertI32U = 0xb8,
+ F64ConvertI64S = 0xb9,
+ F64ConvertI64U = 0xba,
+ F64PromoteF32 = 0xbb,
+
+ // Reinterpretations
+ I32ReinterpretF32 = 0xbc,
+ I64ReinterpretF64 = 0xbd,
+ F32ReinterpretI32 = 0xbe,
+ F64ReinterpretI64 = 0xbf,
+
+ // Sign extension
+ I32Extend8S = 0xc0,
+ I32Extend16S = 0xc1,
+ I64Extend8S = 0xc2,
+ I64Extend16S = 0xc3,
+ I64Extend32S = 0xc4,
+
+ // Reference types
+ RefNull = 0xd0,
+ RefIsNull = 0xd1,
+ RefFunc = 0xd2,
+
+ // Function references
+ RefAsNonNull = 0xd3,
+ BrOnNull = 0xd4,
+
+ // GC (experimental)
+ RefEq = 0xd5,
+
+ // Function references
+ BrOnNonNull = 0xd6,
+
+ FirstPrefix = 0xfa,
+ GcPrefix = 0xfb,
+ MiscPrefix = 0xfc,
+ SimdPrefix = 0xfd,
+ ThreadPrefix = 0xfe,
+ MozPrefix = 0xff,
+
+ Limit = 0x100
+};
+
+inline bool IsPrefixByte(uint8_t b) { return b >= uint8_t(Op::FirstPrefix); }
+
+// Opcodes in the GC opcode space.
+enum class GcOp {
+ // Structure operations
+ StructNew = 0x7,
+ StructNewDefault = 0x8,
+ StructGet = 0x03,
+ StructGetS = 0x04,
+ StructGetU = 0x05,
+ StructSet = 0x06,
+
+ // Array operations
+ ArrayNew = 0x1b,
+ ArrayNewFixed = 0x1a,
+ ArrayNewDefault = 0x1c,
+ ArrayNewData = 0x1d,
+ // array.init_from_elem_static in V5 became array.new_elem in V6, changing
+ // opcodes in the process
+ ArrayInitFromElemStaticV5 = 0x10,
+ ArrayNewElem = 0x1f,
+ ArrayGet = 0x13,
+ ArrayGetS = 0x14,
+ ArrayGetU = 0x15,
+ ArraySet = 0x16,
+ ArrayLenWithTypeIndex = 0x17,
+ ArrayCopy = 0x18,
+ ArrayLen = 0x19,
+
+ // Ref operations
+ RefTestV5 = 0x44,
+ RefCastV5 = 0x45,
+ BrOnCastV5 = 0x46,
+ BrOnCastHeapV5 = 0x42,
+ BrOnCastHeapNullV5 = 0x4a,
+ BrOnCastFailV5 = 0x47,
+ BrOnCastFailHeapV5 = 0x43,
+ BrOnCastFailHeapNullV5 = 0x4b,
+ RefTest = 0x40,
+ RefCast = 0x41,
+ RefTestNull = 0x48,
+ RefCastNull = 0x49,
+ BrOnCast = 0x4f,
+
+ // Dart compatibility instruction
+ RefAsStructV5 = 0x59,
+ BrOnNonStructV5 = 0x64,
+
+ // Extern/any coercion operations
+ ExternInternalize = 0x70,
+ ExternExternalize = 0x71,
+
+ Limit
+};
+
+// Opcode list from the SIMD proposal post-renumbering in May, 2020.
+
+// Opcodes with suffix 'Experimental' are proposed but not standardized, and are
+// compatible with those same opcodes in V8. No opcode labeled 'Experimental'
+// will ship in a Release build where SIMD is enabled by default.
+
+enum class SimdOp {
+ V128Load = 0x00,
+ V128Load8x8S = 0x01,
+ V128Load8x8U = 0x02,
+ V128Load16x4S = 0x03,
+ V128Load16x4U = 0x04,
+ V128Load32x2S = 0x05,
+ V128Load32x2U = 0x06,
+ V128Load8Splat = 0x07,
+ V128Load16Splat = 0x08,
+ V128Load32Splat = 0x09,
+ V128Load64Splat = 0x0a,
+ V128Store = 0x0b,
+ V128Const = 0x0c,
+ I8x16Shuffle = 0x0d,
+ I8x16Swizzle = 0x0e,
+ I8x16Splat = 0x0f,
+ I16x8Splat = 0x10,
+ I32x4Splat = 0x11,
+ I64x2Splat = 0x12,
+ F32x4Splat = 0x13,
+ F64x2Splat = 0x14,
+ I8x16ExtractLaneS = 0x15,
+ I8x16ExtractLaneU = 0x16,
+ I8x16ReplaceLane = 0x17,
+ I16x8ExtractLaneS = 0x18,
+ I16x8ExtractLaneU = 0x19,
+ I16x8ReplaceLane = 0x1a,
+ I32x4ExtractLane = 0x1b,
+ I32x4ReplaceLane = 0x1c,
+ I64x2ExtractLane = 0x1d,
+ I64x2ReplaceLane = 0x1e,
+ F32x4ExtractLane = 0x1f,
+ F32x4ReplaceLane = 0x20,
+ F64x2ExtractLane = 0x21,
+ F64x2ReplaceLane = 0x22,
+ I8x16Eq = 0x23,
+ I8x16Ne = 0x24,
+ I8x16LtS = 0x25,
+ I8x16LtU = 0x26,
+ I8x16GtS = 0x27,
+ I8x16GtU = 0x28,
+ I8x16LeS = 0x29,
+ I8x16LeU = 0x2a,
+ I8x16GeS = 0x2b,
+ I8x16GeU = 0x2c,
+ I16x8Eq = 0x2d,
+ I16x8Ne = 0x2e,
+ I16x8LtS = 0x2f,
+ I16x8LtU = 0x30,
+ I16x8GtS = 0x31,
+ I16x8GtU = 0x32,
+ I16x8LeS = 0x33,
+ I16x8LeU = 0x34,
+ I16x8GeS = 0x35,
+ I16x8GeU = 0x36,
+ I32x4Eq = 0x37,
+ I32x4Ne = 0x38,
+ I32x4LtS = 0x39,
+ I32x4LtU = 0x3a,
+ I32x4GtS = 0x3b,
+ I32x4GtU = 0x3c,
+ I32x4LeS = 0x3d,
+ I32x4LeU = 0x3e,
+ I32x4GeS = 0x3f,
+ I32x4GeU = 0x40,
+ F32x4Eq = 0x41,
+ F32x4Ne = 0x42,
+ F32x4Lt = 0x43,
+ F32x4Gt = 0x44,
+ F32x4Le = 0x45,
+ F32x4Ge = 0x46,
+ F64x2Eq = 0x47,
+ F64x2Ne = 0x48,
+ F64x2Lt = 0x49,
+ F64x2Gt = 0x4a,
+ F64x2Le = 0x4b,
+ F64x2Ge = 0x4c,
+ V128Not = 0x4d,
+ V128And = 0x4e,
+ V128AndNot = 0x4f,
+ V128Or = 0x50,
+ V128Xor = 0x51,
+ V128Bitselect = 0x52,
+ V128AnyTrue = 0x53,
+ V128Load8Lane = 0x54,
+ V128Load16Lane = 0x55,
+ V128Load32Lane = 0x56,
+ V128Load64Lane = 0x57,
+ V128Store8Lane = 0x58,
+ V128Store16Lane = 0x59,
+ V128Store32Lane = 0x5a,
+ V128Store64Lane = 0x5b,
+ V128Load32Zero = 0x5c,
+ V128Load64Zero = 0x5d,
+ F32x4DemoteF64x2Zero = 0x5e,
+ F64x2PromoteLowF32x4 = 0x5f,
+ I8x16Abs = 0x60,
+ I8x16Neg = 0x61,
+ I8x16Popcnt = 0x62,
+ I8x16AllTrue = 0x63,
+ I8x16Bitmask = 0x64,
+ I8x16NarrowI16x8S = 0x65,
+ I8x16NarrowI16x8U = 0x66,
+ F32x4Ceil = 0x67,
+ F32x4Floor = 0x68,
+ F32x4Trunc = 0x69,
+ F32x4Nearest = 0x6a,
+ I8x16Shl = 0x6b,
+ I8x16ShrS = 0x6c,
+ I8x16ShrU = 0x6d,
+ I8x16Add = 0x6e,
+ I8x16AddSatS = 0x6f,
+ I8x16AddSatU = 0x70,
+ I8x16Sub = 0x71,
+ I8x16SubSatS = 0x72,
+ I8x16SubSatU = 0x73,
+ F64x2Ceil = 0x74,
+ F64x2Floor = 0x75,
+ I8x16MinS = 0x76,
+ I8x16MinU = 0x77,
+ I8x16MaxS = 0x78,
+ I8x16MaxU = 0x79,
+ F64x2Trunc = 0x7a,
+ I8x16AvgrU = 0x7b,
+ I16x8ExtaddPairwiseI8x16S = 0x7c,
+ I16x8ExtaddPairwiseI8x16U = 0x7d,
+ I32x4ExtaddPairwiseI16x8S = 0x7e,
+ I32x4ExtaddPairwiseI16x8U = 0x7f,
+ I16x8Abs = 0x80,
+ I16x8Neg = 0x81,
+ I16x8Q15MulrSatS = 0x82,
+ I16x8AllTrue = 0x83,
+ I16x8Bitmask = 0x84,
+ I16x8NarrowI32x4S = 0x85,
+ I16x8NarrowI32x4U = 0x86,
+ I16x8ExtendLowI8x16S = 0x87,
+ I16x8ExtendHighI8x16S = 0x88,
+ I16x8ExtendLowI8x16U = 0x89,
+ I16x8ExtendHighI8x16U = 0x8a,
+ I16x8Shl = 0x8b,
+ I16x8ShrS = 0x8c,
+ I16x8ShrU = 0x8d,
+ I16x8Add = 0x8e,
+ I16x8AddSatS = 0x8f,
+ I16x8AddSatU = 0x90,
+ I16x8Sub = 0x91,
+ I16x8SubSatS = 0x92,
+ I16x8SubSatU = 0x93,
+ F64x2Nearest = 0x94,
+ I16x8Mul = 0x95,
+ I16x8MinS = 0x96,
+ I16x8MinU = 0x97,
+ I16x8MaxS = 0x98,
+ I16x8MaxU = 0x99,
+ // Unused = 0x9a
+ I16x8AvgrU = 0x9b,
+ I16x8ExtmulLowI8x16S = 0x9c,
+ I16x8ExtmulHighI8x16S = 0x9d,
+ I16x8ExtmulLowI8x16U = 0x9e,
+ I16x8ExtmulHighI8x16U = 0x9f,
+ I32x4Abs = 0xa0,
+ I32x4Neg = 0xa1,
+ // Unused = 0xa2
+ I32x4AllTrue = 0xa3,
+ I32x4Bitmask = 0xa4,
+ // Unused = 0xa5
+ // Unused = 0xa6
+ I32x4ExtendLowI16x8S = 0xa7,
+ I32x4ExtendHighI16x8S = 0xa8,
+ I32x4ExtendLowI16x8U = 0xa9,
+ I32x4ExtendHighI16x8U = 0xaa,
+ I32x4Shl = 0xab,
+ I32x4ShrS = 0xac,
+ I32x4ShrU = 0xad,
+ I32x4Add = 0xae,
+ // Unused = 0xaf
+ // Unused = 0xb0
+ I32x4Sub = 0xb1,
+ // Unused = 0xb2
+ // Unused = 0xb3
+ // Unused = 0xb4
+ I32x4Mul = 0xb5,
+ I32x4MinS = 0xb6,
+ I32x4MinU = 0xb7,
+ I32x4MaxS = 0xb8,
+ I32x4MaxU = 0xb9,
+ I32x4DotI16x8S = 0xba,
+ // Unused = 0xbb
+ I32x4ExtmulLowI16x8S = 0xbc,
+ I32x4ExtmulHighI16x8S = 0xbd,
+ I32x4ExtmulLowI16x8U = 0xbe,
+ I32x4ExtmulHighI16x8U = 0xbf,
+ I64x2Abs = 0xc0,
+ I64x2Neg = 0xc1,
+ // AnyTrue = 0xc2
+ I64x2AllTrue = 0xc3,
+ I64x2Bitmask = 0xc4,
+ // Unused = 0xc5
+ // Unused = 0xc6
+ I64x2ExtendLowI32x4S = 0xc7,
+ I64x2ExtendHighI32x4S = 0xc8,
+ I64x2ExtendLowI32x4U = 0xc9,
+ I64x2ExtendHighI32x4U = 0xca,
+ I64x2Shl = 0xcb,
+ I64x2ShrS = 0xcc,
+ I64x2ShrU = 0xcd,
+ I64x2Add = 0xce,
+ // Unused = 0xcf
+ // Unused = 0xd0
+ I64x2Sub = 0xd1,
+ // Unused = 0xd2
+ // Unused = 0xd3
+ // Unused = 0xd4
+ I64x2Mul = 0xd5,
+ I64x2Eq = 0xd6,
+ I64x2Ne = 0xd7,
+ I64x2LtS = 0xd8,
+ I64x2GtS = 0xd9,
+ I64x2LeS = 0xda,
+ I64x2GeS = 0xdb,
+ I64x2ExtmulLowI32x4S = 0xdc,
+ I64x2ExtmulHighI32x4S = 0xdd,
+ I64x2ExtmulLowI32x4U = 0xde,
+ I64x2ExtmulHighI32x4U = 0xdf,
+ F32x4Abs = 0xe0,
+ F32x4Neg = 0xe1,
+ // Unused = 0xe2
+ F32x4Sqrt = 0xe3,
+ F32x4Add = 0xe4,
+ F32x4Sub = 0xe5,
+ F32x4Mul = 0xe6,
+ F32x4Div = 0xe7,
+ F32x4Min = 0xe8,
+ F32x4Max = 0xe9,
+ F32x4PMin = 0xea,
+ F32x4PMax = 0xeb,
+ F64x2Abs = 0xec,
+ F64x2Neg = 0xed,
+ // Unused = 0xee
+ F64x2Sqrt = 0xef,
+ F64x2Add = 0xf0,
+ F64x2Sub = 0xf1,
+ F64x2Mul = 0xf2,
+ F64x2Div = 0xf3,
+ F64x2Min = 0xf4,
+ F64x2Max = 0xf5,
+ F64x2PMin = 0xf6,
+ F64x2PMax = 0xf7,
+ I32x4TruncSatF32x4S = 0xf8,
+ I32x4TruncSatF32x4U = 0xf9,
+ F32x4ConvertI32x4S = 0xfa,
+ F32x4ConvertI32x4U = 0xfb,
+ I32x4TruncSatF64x2SZero = 0xfc,
+ I32x4TruncSatF64x2UZero = 0xfd,
+ F64x2ConvertLowI32x4S = 0xfe,
+ F64x2ConvertLowI32x4U = 0xff,
+ I8x16RelaxedSwizzle = 0x100,
+ I32x4RelaxedTruncF32x4S = 0x101,
+ I32x4RelaxedTruncF32x4U = 0x102,
+ I32x4RelaxedTruncF64x2SZero = 0x103,
+ I32x4RelaxedTruncF64x2UZero = 0x104,
+ F32x4RelaxedFma = 0x105,
+ F32x4RelaxedFnma = 0x106,
+ F64x2RelaxedFma = 0x107,
+ F64x2RelaxedFnma = 0x108,
+ I8x16RelaxedLaneSelect = 0x109,
+ I16x8RelaxedLaneSelect = 0x10a,
+ I32x4RelaxedLaneSelect = 0x10b,
+ I64x2RelaxedLaneSelect = 0x10c,
+ F32x4RelaxedMin = 0x10d,
+ F32x4RelaxedMax = 0x10e,
+ F64x2RelaxedMin = 0x10f,
+ F64x2RelaxedMax = 0x110,
+ I16x8RelaxedQ15MulrS = 0x111,
+ I16x8DotI8x16I7x16S = 0x112,
+ I32x4DotI8x16I7x16AddS = 0x113,
+
+ // Reserved for Relaxed SIMD = 0x114-0x12f
+
+ // Unused = 0x130 and up
+
+ // Mozilla extensions
+ MozPMADDUBSW = 0x201,
+
+ Limit
+};
+
+// Opcodes in the "miscellaneous" opcode space.
+enum class MiscOp {
+ // Saturating float-to-int conversions
+ I32TruncSatF32S = 0x00,
+ I32TruncSatF32U = 0x01,
+ I32TruncSatF64S = 0x02,
+ I32TruncSatF64U = 0x03,
+ I64TruncSatF32S = 0x04,
+ I64TruncSatF32U = 0x05,
+ I64TruncSatF64S = 0x06,
+ I64TruncSatF64U = 0x07,
+
+ // Bulk memory operations, per proposal as of February 2019.
+ MemoryInit = 0x08,
+ DataDrop = 0x09,
+ MemoryCopy = 0x0a,
+ MemoryFill = 0x0b,
+ TableInit = 0x0c,
+ ElemDrop = 0x0d,
+ TableCopy = 0x0e,
+
+ // Reftypes, per proposal as of February 2019.
+ TableGrow = 0x0f,
+ TableSize = 0x10,
+ TableFill = 0x11,
+
+ MemoryDiscard = 0x12,
+
+ Limit
+};
+
+// Opcodes from threads proposal as of June 30, 2017
+enum class ThreadOp {
+ // Wait and wake
+ Wake = 0x00,
+ I32Wait = 0x01,
+ I64Wait = 0x02,
+ Fence = 0x03,
+
+ // Load and store
+ I32AtomicLoad = 0x10,
+ I64AtomicLoad = 0x11,
+ I32AtomicLoad8U = 0x12,
+ I32AtomicLoad16U = 0x13,
+ I64AtomicLoad8U = 0x14,
+ I64AtomicLoad16U = 0x15,
+ I64AtomicLoad32U = 0x16,
+ I32AtomicStore = 0x17,
+ I64AtomicStore = 0x18,
+ I32AtomicStore8U = 0x19,
+ I32AtomicStore16U = 0x1a,
+ I64AtomicStore8U = 0x1b,
+ I64AtomicStore16U = 0x1c,
+ I64AtomicStore32U = 0x1d,
+
+ // Read-modify-write operations
+ I32AtomicAdd = 0x1e,
+ I64AtomicAdd = 0x1f,
+ I32AtomicAdd8U = 0x20,
+ I32AtomicAdd16U = 0x21,
+ I64AtomicAdd8U = 0x22,
+ I64AtomicAdd16U = 0x23,
+ I64AtomicAdd32U = 0x24,
+
+ I32AtomicSub = 0x25,
+ I64AtomicSub = 0x26,
+ I32AtomicSub8U = 0x27,
+ I32AtomicSub16U = 0x28,
+ I64AtomicSub8U = 0x29,
+ I64AtomicSub16U = 0x2a,
+ I64AtomicSub32U = 0x2b,
+
+ I32AtomicAnd = 0x2c,
+ I64AtomicAnd = 0x2d,
+ I32AtomicAnd8U = 0x2e,
+ I32AtomicAnd16U = 0x2f,
+ I64AtomicAnd8U = 0x30,
+ I64AtomicAnd16U = 0x31,
+ I64AtomicAnd32U = 0x32,
+
+ I32AtomicOr = 0x33,
+ I64AtomicOr = 0x34,
+ I32AtomicOr8U = 0x35,
+ I32AtomicOr16U = 0x36,
+ I64AtomicOr8U = 0x37,
+ I64AtomicOr16U = 0x38,
+ I64AtomicOr32U = 0x39,
+
+ I32AtomicXor = 0x3a,
+ I64AtomicXor = 0x3b,
+ I32AtomicXor8U = 0x3c,
+ I32AtomicXor16U = 0x3d,
+ I64AtomicXor8U = 0x3e,
+ I64AtomicXor16U = 0x3f,
+ I64AtomicXor32U = 0x40,
+
+ I32AtomicXchg = 0x41,
+ I64AtomicXchg = 0x42,
+ I32AtomicXchg8U = 0x43,
+ I32AtomicXchg16U = 0x44,
+ I64AtomicXchg8U = 0x45,
+ I64AtomicXchg16U = 0x46,
+ I64AtomicXchg32U = 0x47,
+
+ // CompareExchange
+ I32AtomicCmpXchg = 0x48,
+ I64AtomicCmpXchg = 0x49,
+ I32AtomicCmpXchg8U = 0x4a,
+ I32AtomicCmpXchg16U = 0x4b,
+ I64AtomicCmpXchg8U = 0x4c,
+ I64AtomicCmpXchg16U = 0x4d,
+ I64AtomicCmpXchg32U = 0x4e,
+
+ Limit
+};
+
+enum class IntrinsicId {
+// ------------------------------------------------------------------------
+// These are part/suffix of the MozOp::Intrinsic operators that are emitted
+// internally when compiling intrinsic modules and are rejected by wasm
+// validation.
+// See wasm/WasmIntrinsic.yaml for the list.
+#define DECL_INTRINSIC_OP(op, export, sa_name, abitype, entry, idx) \
+ op = idx, // NOLINT
+ FOR_EACH_INTRINSIC(DECL_INTRINSIC_OP)
+#undef DECL_INTRINSIC_OP
+
+ // Op limit.
+ Limit
+};
+
+enum class MozOp {
+ // ------------------------------------------------------------------------
+ // These operators are emitted internally when compiling asm.js and are
+ // rejected by wasm validation. They are prefixed by MozPrefix.
+
+ // asm.js-specific operators. They start at 1 so as to check for
+ // uninitialized (zeroed) storage.
+ TeeGlobal = 0x01,
+ I32Min,
+ I32Max,
+ I32Neg,
+ I32BitNot,
+ I32Abs,
+ F32TeeStoreF64,
+ F64TeeStoreF32,
+ I32TeeStore8,
+ I32TeeStore16,
+ I64TeeStore8,
+ I64TeeStore16,
+ I64TeeStore32,
+ I32TeeStore,
+ I64TeeStore,
+ F32TeeStore,
+ F64TeeStore,
+ F64Mod,
+ F64SinNative,
+ F64SinFdlibm,
+ F64CosNative,
+ F64CosFdlibm,
+ F64TanNative,
+ F64TanFdlibm,
+ F64Asin,
+ F64Acos,
+ F64Atan,
+ F64Exp,
+ F64Log,
+ F64Pow,
+ F64Atan2,
+
+ // asm.js-style call_indirect with the callee evaluated first.
+ OldCallDirect,
+ OldCallIndirect,
+
+ // Intrinsic modules operations. The operator has argument leb u32 to specify
+ // particular operation id. See IntrinsicId above.
+ Intrinsic,
+
+ Limit
+};
+
+struct OpBytes {
+ // b0 is a byte value but has a 16-bit representation to allow for a full
+ // 256-value range plus a sentinel Limit value.
+ uint16_t b0;
+ // b1 is a LEB128 value but 32 bits is enough for now.
+ uint32_t b1;
+
+ explicit OpBytes(Op x) {
+ b0 = uint16_t(x);
+ b1 = 0;
+ }
+ OpBytes() = default;
+
+ // Whether this opcode should have a breakpoint site inserted directly before
+ // the opcode in baseline when debugging. We use this as a heuristic to
+ // reduce the number of breakpoint sites.
+ bool shouldHaveBreakpoint() const {
+ switch (Op(b0)) {
+ // Block-like instructions don't get their own breakpoint site, a
+ // breakpoint can be used on instructions in the block.
+ case Op::Block:
+ case Op::Loop:
+ case Op::If:
+ case Op::Else:
+ case Op::Try:
+ case Op::Delegate:
+ case Op::Catch:
+ case Op::CatchAll:
+ case Op::End:
+ // Effect-less instructions without inputs are leaf nodes in expressions,
+ // a breakpoint can be used on instructions that consume these values.
+ case Op::LocalGet:
+ case Op::GlobalGet:
+ case Op::I32Const:
+ case Op::I64Const:
+ case Op::F32Const:
+ case Op::F64Const:
+ case Op::RefNull:
+ case Op::Drop:
+ return false;
+ default:
+ return true;
+ }
+ }
+};
+
+static const char NameSectionName[] = "name";
+static const char SourceMappingURLSectionName[] = "sourceMappingURL";
+
+enum class NameType { Module = 0, Function = 1, Local = 2 };
+
+enum class FieldFlags { Mutable = 0x01, AllowedMask = 0x01 };
+
+enum class FieldWideningOp { None, Signed, Unsigned };
+
+// The WebAssembly spec hard-codes the virtual page size to be 64KiB and
+// requires the size of linear memory to always be a multiple of 64KiB.
+
+static const unsigned PageSize = 64 * 1024;
+static const unsigned PageBits = 16;
+static_assert(PageSize == (1u << PageBits));
+
+static const unsigned PageMask = ((1u << PageBits) - 1);
+
+// These limits are agreed upon with other engines for consistency.
+
+static const unsigned MaxTypes = 1000000;
+static const unsigned MaxFuncs = 1000000;
+static const unsigned MaxTables = 100000;
+static const unsigned MaxImports = 100000;
+static const unsigned MaxExports = 100000;
+static const unsigned MaxGlobals = 1000000;
+static const unsigned MaxDataSegments = 100000;
+static const unsigned MaxDataSegmentLengthPages = 16384;
+static const unsigned MaxElemSegments = 10000000;
+static const unsigned MaxElemSegmentLength = 10000000;
+static const unsigned MaxTableLimitField = UINT32_MAX;
+static const unsigned MaxTableLength = 10000000;
+static const unsigned MaxLocals = 50000;
+static const unsigned MaxParams = 1000;
+static const unsigned MaxResults = 1000;
+static const unsigned MaxStructFields = 2000;
+static const uint64_t MaxMemory32LimitField = uint64_t(1) << 16;
+static const uint64_t MaxMemory64LimitField = uint64_t(1) << 48;
+static const unsigned MaxStringBytes = 100000;
+static const unsigned MaxModuleBytes = 1024 * 1024 * 1024;
+static const unsigned MaxFunctionBytes = 7654321;
+
+// These limits pertain to our WebAssembly implementation only, but may make
+// sense to get into the shared limits spec eventually.
+
+static const unsigned MaxRecGroups = 1000000;
+static const unsigned MaxSubTypingDepth = 31;
+static const unsigned MaxTags = 1000000;
+
+// Maximum payload size, in bytes, of a gc-proposal Array. Puts it fairly
+// close to 2^31 without exposing us to potential danger at the signed-i32
+// wraparound boundary. Note that gc-proposal Struct sizes are limited by
+// MaxStructFields above. Some code assumes that the payload size will fit in
+// a uint32_t, hence the static assert.
+static const unsigned MaxArrayPayloadBytes = 1987654321;
+static_assert(uint64_t(MaxArrayPayloadBytes) <
+ (uint64_t(1) << (8 * sizeof(uint32_t))));
+
+// These limits pertain to our WebAssembly implementation only.
+
+static const unsigned MaxBrTableElems = 1000000;
+static const unsigned MaxCodeSectionBytes = MaxModuleBytes;
+
+// 512KiB should be enough, considering how Rabaldr uses the stack and
+// what the standard limits are:
+//
+// - 1,000 parameters
+// - 50,000 locals
+// - 10,000 values on the eval stack (not an official limit)
+//
+// At sizeof(int64) bytes per slot this works out to about 480KiB.
+
+static const unsigned MaxFrameSize = 512 * 1024;
+
+// Asserted by Decoder::readVarU32.
+
+static const unsigned MaxVarU32DecodedBytes = 5;
+
+// The CompileMode controls how compilation of a module is performed (notably,
+// how many times we compile it).
+
+enum class CompileMode { Once, Tier1, Tier2 };
+
+// Typed enum for whether debugging is enabled.
+
+enum class DebugEnabled { False, True };
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_constants_h
diff --git a/js/src/wasm/WasmContext.h b/js/src/wasm/WasmContext.h
new file mode 100644
index 0000000000..5064401a81
--- /dev/null
+++ b/js/src/wasm/WasmContext.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2020 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_context_h
+#define wasm_context_h
+
+namespace js {
+namespace wasm {
+
+// wasm::Context lives in JSContext and contains the wasm-related per-context
+// state.
+
+class Context {
+ public:
+ Context() : triedToInstallSignalHandlers(false), haveSignalHandlers(false) {}
+
+ // Used by wasm::EnsureThreadSignalHandlers(cx) to install thread signal
+ // handlers once per JSContext/thread.
+ bool triedToInstallSignalHandlers;
+ bool haveSignalHandlers;
+};
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_context_h
diff --git a/js/src/wasm/WasmDebug.cpp b/js/src/wasm/WasmDebug.cpp
new file mode 100644
index 0000000000..66a59e826e
--- /dev/null
+++ b/js/src/wasm/WasmDebug.cpp
@@ -0,0 +1,528 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmDebug.h"
+
+#include "mozilla/BinarySearch.h"
+
+#include "debugger/Debugger.h"
+#include "ds/Sort.h"
+#include "jit/MacroAssembler.h"
+#include "wasm/WasmJS.h"
+#include "wasm/WasmStubs.h"
+#include "wasm/WasmValidate.h"
+
+#include "gc/GCContext-inl.h"
+#include "wasm/WasmInstance-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::BinarySearchIf;
+
+DebugState::DebugState(const Code& code, const Module& module)
+ : code_(&code),
+ module_(&module),
+ enterFrameTrapsEnabled_(false),
+ enterAndLeaveFrameTrapsCounter_(0) {
+ MOZ_RELEASE_ASSERT(code.metadata().debugEnabled);
+ MOZ_RELEASE_ASSERT(code.hasTier(Tier::Debug));
+}
+
+void DebugState::trace(JSTracer* trc) {
+ for (auto iter = breakpointSites_.iter(); !iter.done(); iter.next()) {
+ WasmBreakpointSite* site = iter.get().value();
+ site->trace(trc);
+ }
+}
+
+void DebugState::finalize(JS::GCContext* gcx) {
+ for (auto iter = breakpointSites_.iter(); !iter.done(); iter.next()) {
+ WasmBreakpointSite* site = iter.get().value();
+ site->delete_(gcx);
+ }
+}
+
+static const uint32_t DefaultBinarySourceColumnNumber = 1;
+
+static const CallSite* SlowCallSiteSearchByOffset(const MetadataTier& metadata,
+ uint32_t offset) {
+ for (const CallSite& callSite : metadata.callSites) {
+ if (callSite.lineOrBytecode() == offset &&
+ callSite.kind() == CallSiteDesc::Breakpoint) {
+ return &callSite;
+ }
+ }
+ return nullptr;
+}
+
+bool DebugState::getLineOffsets(size_t lineno, Vector<uint32_t>* offsets) {
+ const CallSite* callsite =
+ SlowCallSiteSearchByOffset(metadata(Tier::Debug), lineno);
+ return !(callsite && !offsets->append(lineno));
+}
+
+bool DebugState::getAllColumnOffsets(Vector<ExprLoc>* offsets) {
+ for (const CallSite& callSite : metadata(Tier::Debug).callSites) {
+ if (callSite.kind() != CallSite::Breakpoint) {
+ continue;
+ }
+ uint32_t offset = callSite.lineOrBytecode();
+ if (!offsets->emplaceBack(offset, DefaultBinarySourceColumnNumber,
+ offset)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool DebugState::getOffsetLocation(uint32_t offset, size_t* lineno,
+ size_t* column) {
+ if (!SlowCallSiteSearchByOffset(metadata(Tier::Debug), offset)) {
+ return false;
+ }
+ *lineno = offset;
+ *column = DefaultBinarySourceColumnNumber;
+ return true;
+}
+
+bool DebugState::stepModeEnabled(uint32_t funcIndex) const {
+ return stepperCounters_.lookup(funcIndex).found();
+}
+
+bool DebugState::incrementStepperCount(JSContext* cx, Instance* instance,
+ uint32_t funcIndex) {
+ StepperCounters::AddPtr p = stepperCounters_.lookupForAdd(funcIndex);
+ if (p) {
+ MOZ_ASSERT(p->value() > 0);
+ p->value()++;
+ return true;
+ }
+
+ if (!stepperCounters_.add(p, funcIndex, 1)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ enableDebuggingForFunction(instance, funcIndex);
+ enableDebugTrap(instance);
+
+ return true;
+}
+
+void DebugState::decrementStepperCount(JS::GCContext* gcx, Instance* instance,
+ uint32_t funcIndex) {
+ const CodeRange& codeRange =
+ codeRanges(Tier::Debug)[funcToCodeRangeIndex(funcIndex)];
+ MOZ_ASSERT(codeRange.isFunction());
+
+ MOZ_ASSERT(!stepperCounters_.empty());
+ StepperCounters::Ptr p = stepperCounters_.lookup(funcIndex);
+ MOZ_ASSERT(p);
+ if (--p->value()) {
+ return;
+ }
+
+ stepperCounters_.remove(p);
+
+ bool anyStepping = !stepperCounters_.empty();
+ bool anyBreakpoints = !breakpointSites_.empty();
+ bool anyEnterAndLeave = enterAndLeaveFrameTrapsCounter_ > 0;
+
+ bool keepDebugging = false;
+ for (const CallSite& callSite : callSites(Tier::Debug)) {
+ if (callSite.kind() != CallSite::Breakpoint) {
+ continue;
+ }
+ uint32_t offset = callSite.returnAddressOffset();
+ if (codeRange.begin() <= offset && offset <= codeRange.end()) {
+ keepDebugging = keepDebugging || breakpointSites_.has(offset);
+ }
+ }
+
+ if (!keepDebugging && !anyEnterAndLeave) {
+ disableDebuggingForFunction(instance, funcIndex);
+ if (!anyStepping && !anyBreakpoints) {
+ disableDebugTrap(instance);
+ }
+ }
+}
+
+bool DebugState::hasBreakpointTrapAtOffset(uint32_t offset) {
+ return SlowCallSiteSearchByOffset(metadata(Tier::Debug), offset);
+}
+
+void DebugState::toggleBreakpointTrap(JSRuntime* rt, Instance* instance,
+ uint32_t offset, bool enabled) {
+ const CallSite* callSite =
+ SlowCallSiteSearchByOffset(metadata(Tier::Debug), offset);
+ if (!callSite) {
+ return;
+ }
+ size_t debugTrapOffset = callSite->returnAddressOffset();
+
+ const ModuleSegment& codeSegment = code_->segment(Tier::Debug);
+ const CodeRange* codeRange =
+ code_->lookupFuncRange(codeSegment.base() + debugTrapOffset);
+ MOZ_ASSERT(codeRange);
+
+ uint32_t funcIndex = codeRange->funcIndex();
+ if (stepperCounters_.lookup(funcIndex)) {
+ return; // no need to toggle when step mode is enabled
+ }
+
+ bool anyEnterAndLeave = enterAndLeaveFrameTrapsCounter_ > 0;
+ bool anyStepping = !stepperCounters_.empty();
+ bool anyBreakpoints = !breakpointSites_.empty();
+
+ if (enabled) {
+ enableDebuggingForFunction(instance, funcIndex);
+ enableDebugTrap(instance);
+ } else if (!anyEnterAndLeave) {
+ disableDebuggingForFunction(instance, funcIndex);
+ if (!anyStepping && !anyBreakpoints) {
+ disableDebugTrap(instance);
+ }
+ }
+}
+
+WasmBreakpointSite* DebugState::getBreakpointSite(uint32_t offset) const {
+ WasmBreakpointSiteMap::Ptr p = breakpointSites_.lookup(offset);
+ if (!p) {
+ return nullptr;
+ }
+
+ return p->value();
+}
+
+WasmBreakpointSite* DebugState::getOrCreateBreakpointSite(JSContext* cx,
+ Instance* instance,
+ uint32_t offset) {
+ WasmBreakpointSite* site;
+
+ WasmBreakpointSiteMap::AddPtr p = breakpointSites_.lookupForAdd(offset);
+ if (!p) {
+ site = cx->new_<WasmBreakpointSite>(instance->object(), offset);
+ if (!site) {
+ return nullptr;
+ }
+
+ if (!breakpointSites_.add(p, offset, site)) {
+ js_delete(site);
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ AddCellMemory(instance->object(), sizeof(WasmBreakpointSite),
+ MemoryUse::BreakpointSite);
+
+ toggleBreakpointTrap(cx->runtime(), instance, offset, true);
+ } else {
+ site = p->value();
+ }
+ return site;
+}
+
+bool DebugState::hasBreakpointSite(uint32_t offset) {
+ return breakpointSites_.has(offset);
+}
+
+void DebugState::destroyBreakpointSite(JS::GCContext* gcx, Instance* instance,
+ uint32_t offset) {
+ WasmBreakpointSiteMap::Ptr p = breakpointSites_.lookup(offset);
+ MOZ_ASSERT(p);
+ gcx->delete_(instance->objectUnbarriered(), p->value(),
+ MemoryUse::BreakpointSite);
+ breakpointSites_.remove(p);
+ toggleBreakpointTrap(gcx->runtime(), instance, offset, false);
+}
+
+void DebugState::clearBreakpointsIn(JS::GCContext* gcx,
+ WasmInstanceObject* instance,
+ js::Debugger* dbg, JSObject* handler) {
+ MOZ_ASSERT(instance);
+
+ // Breakpoints hold wrappers in the instance's compartment for the handler.
+ // Make sure we don't try to search for the unwrapped handler.
+ MOZ_ASSERT_IF(handler, instance->compartment() == handler->compartment());
+
+ if (breakpointSites_.empty()) {
+ return;
+ }
+ for (WasmBreakpointSiteMap::Enum e(breakpointSites_); !e.empty();
+ e.popFront()) {
+ WasmBreakpointSite* site = e.front().value();
+ MOZ_ASSERT(site->instanceObject == instance);
+
+ Breakpoint* nextbp;
+ for (Breakpoint* bp = site->firstBreakpoint(); bp; bp = nextbp) {
+ nextbp = bp->nextInSite();
+ MOZ_ASSERT(bp->site == site);
+ if ((!dbg || bp->debugger == dbg) &&
+ (!handler || bp->getHandler() == handler)) {
+ bp->delete_(gcx);
+ }
+ }
+ if (site->isEmpty()) {
+ gcx->delete_(instance, site, MemoryUse::BreakpointSite);
+ e.removeFront();
+ }
+ }
+}
+
+void DebugState::enableDebuggingForFunction(Instance* instance,
+ uint32_t funcIndex) {
+ instance->setDebugFilter(funcIndex, true);
+}
+
+void DebugState::disableDebuggingForFunction(Instance* instance,
+ uint32_t funcIndex) {
+ instance->setDebugFilter(funcIndex, false);
+}
+
+void DebugState::enableDebugTrap(Instance* instance) {
+ instance->setDebugTrapHandler(code_->segment(Tier::Debug).base() +
+ metadata(Tier::Debug).debugTrapOffset);
+}
+
+void DebugState::disableDebugTrap(Instance* instance) {
+ instance->setDebugTrapHandler(nullptr);
+}
+
+void DebugState::adjustEnterAndLeaveFrameTrapsState(JSContext* cx,
+ Instance* instance,
+ bool enabled) {
+ MOZ_ASSERT_IF(!enabled, enterAndLeaveFrameTrapsCounter_ > 0);
+
+ bool wasEnabled = enterAndLeaveFrameTrapsCounter_ > 0;
+ enterAndLeaveFrameTrapsCounter_ += enabled ? 1 : -1;
+ bool stillEnabled = enterAndLeaveFrameTrapsCounter_ > 0;
+ if (wasEnabled == stillEnabled) {
+ return;
+ }
+
+ MOZ_RELEASE_ASSERT(&instance->metadata() == &metadata());
+ uint32_t numFuncs = metadata().debugNumFuncs();
+ if (enabled) {
+ MOZ_ASSERT(enterAndLeaveFrameTrapsCounter_ > 0);
+ for (uint32_t funcIdx = 0; funcIdx < numFuncs; funcIdx++) {
+ enableDebuggingForFunction(instance, funcIdx);
+ }
+ enableDebugTrap(instance);
+ } else {
+ MOZ_ASSERT(enterAndLeaveFrameTrapsCounter_ == 0);
+ bool anyEnabled = false;
+ for (uint32_t funcIdx = 0; funcIdx < numFuncs; funcIdx++) {
+ // For each function, disable the bit if nothing else is going on. This
+ // means determining if there's stepping or breakpoints.
+ bool mustLeaveEnabled = stepperCounters_.lookup(funcIdx).found();
+ for (auto iter = breakpointSites_.iter();
+ !iter.done() && !mustLeaveEnabled; iter.next()) {
+ WasmBreakpointSite* site = iter.get().value();
+ const CallSite* callSite =
+ SlowCallSiteSearchByOffset(metadata(Tier::Debug), site->offset);
+ if (callSite) {
+ size_t debugTrapOffset = callSite->returnAddressOffset();
+ const ModuleSegment& codeSegment = code_->segment(Tier::Debug);
+ const CodeRange* codeRange =
+ code_->lookupFuncRange(codeSegment.base() + debugTrapOffset);
+ MOZ_ASSERT(codeRange);
+ mustLeaveEnabled = codeRange->funcIndex() == funcIdx;
+ }
+ }
+ if (mustLeaveEnabled) {
+ anyEnabled = true;
+ } else {
+ disableDebuggingForFunction(instance, funcIdx);
+ }
+ }
+ if (!anyEnabled) {
+ disableDebugTrap(instance);
+ }
+ }
+}
+
+void DebugState::ensureEnterFrameTrapsState(JSContext* cx, Instance* instance,
+ bool enabled) {
+ if (enterFrameTrapsEnabled_ == enabled) {
+ return;
+ }
+
+ adjustEnterAndLeaveFrameTrapsState(cx, instance, enabled);
+
+ enterFrameTrapsEnabled_ = enabled;
+}
+
+bool DebugState::debugGetLocalTypes(uint32_t funcIndex, ValTypeVector* locals,
+ size_t* argsLength,
+ StackResults* stackResults) {
+ const TypeContext& types = *metadata().types;
+ const FuncType& funcType = metadata().debugFuncType(funcIndex);
+ const ValTypeVector& args = funcType.args();
+ const ValTypeVector& results = funcType.results();
+ ResultType resultType(ResultType::Vector(results));
+ *argsLength = args.length();
+ *stackResults = ABIResultIter::HasStackResults(resultType)
+ ? StackResults::HasStackResults
+ : StackResults::NoStackResults;
+ if (!locals->appendAll(args)) {
+ return false;
+ }
+
+ // Decode local var types from wasm binary function body.
+ const CodeRange& range =
+ codeRanges(Tier::Debug)[funcToCodeRangeIndex(funcIndex)];
+ // In wasm, the Code points to the function start via funcLineOrBytecode.
+ size_t offsetInModule = range.funcLineOrBytecode();
+ Decoder d(bytecode().begin() + offsetInModule, bytecode().end(),
+ offsetInModule,
+ /* error = */ nullptr);
+ return DecodeValidatedLocalEntries(types, d, locals);
+}
+
+bool DebugState::getGlobal(Instance& instance, uint32_t globalIndex,
+ MutableHandleValue vp) {
+ const GlobalDesc& global = metadata().globals[globalIndex];
+
+ if (global.isConstant()) {
+ LitVal value = global.constantValue();
+ switch (value.type().kind()) {
+ case ValType::I32:
+ vp.set(Int32Value(value.i32()));
+ break;
+ case ValType::I64:
+ // Just display as a Number; it's ok if we lose some precision
+ vp.set(NumberValue((double)value.i64()));
+ break;
+ case ValType::F32:
+ vp.set(NumberValue(JS::CanonicalizeNaN(value.f32())));
+ break;
+ case ValType::F64:
+ vp.set(NumberValue(JS::CanonicalizeNaN(value.f64())));
+ break;
+ case ValType::Ref:
+ // It's possible to do better. We could try some kind of hashing
+ // scheme, to make the pointer recognizable without revealing it.
+ vp.set(MagicValue(JS_OPTIMIZED_OUT));
+ break;
+ case ValType::V128:
+ // Debugger must be updated to handle this, and should be updated to
+ // handle i64 in any case.
+ vp.set(MagicValue(JS_OPTIMIZED_OUT));
+ break;
+ default:
+ MOZ_CRASH("Global constant type");
+ }
+ return true;
+ }
+
+ void* dataPtr = instance.data() + global.offset();
+ if (global.isIndirect()) {
+ dataPtr = *static_cast<void**>(dataPtr);
+ }
+ switch (global.type().kind()) {
+ case ValType::I32: {
+ vp.set(Int32Value(*static_cast<int32_t*>(dataPtr)));
+ break;
+ }
+ case ValType::I64: {
+ // Just display as a Number; it's ok if we lose some precision
+ vp.set(NumberValue((double)*static_cast<int64_t*>(dataPtr)));
+ break;
+ }
+ case ValType::F32: {
+ vp.set(NumberValue(JS::CanonicalizeNaN(*static_cast<float*>(dataPtr))));
+ break;
+ }
+ case ValType::F64: {
+ vp.set(NumberValue(JS::CanonicalizeNaN(*static_cast<double*>(dataPtr))));
+ break;
+ }
+ case ValType::Ref: {
+ // Just hide it. See above.
+ vp.set(MagicValue(JS_OPTIMIZED_OUT));
+ break;
+ }
+ case ValType::V128: {
+ // Just hide it. See above.
+ vp.set(MagicValue(JS_OPTIMIZED_OUT));
+ break;
+ }
+ default: {
+ MOZ_CRASH("Global variable type");
+ break;
+ }
+ }
+ return true;
+}
+
+bool DebugState::getSourceMappingURL(JSContext* cx,
+ MutableHandleString result) const {
+ result.set(nullptr);
+
+ for (const CustomSection& customSection : module_->customSections()) {
+ const Bytes& sectionName = customSection.name;
+ if (strlen(SourceMappingURLSectionName) != sectionName.length() ||
+ memcmp(SourceMappingURLSectionName, sectionName.begin(),
+ sectionName.length()) != 0) {
+ continue;
+ }
+
+ // Parse found "SourceMappingURL" custom section.
+ Decoder d(customSection.payload->begin(), customSection.payload->end(), 0,
+ /* error = */ nullptr);
+ uint32_t nchars;
+ if (!d.readVarU32(&nchars)) {
+ return true; // ignoring invalid section data
+ }
+ const uint8_t* chars;
+ if (!d.readBytes(nchars, &chars) || d.currentPosition() != d.end()) {
+ return true; // ignoring invalid section data
+ }
+
+ JS::UTF8Chars utf8Chars(reinterpret_cast<const char*>(chars), nchars);
+ JSString* str = JS_NewStringCopyUTF8N(cx, utf8Chars);
+ if (!str) {
+ return false;
+ }
+ result.set(str);
+ return true;
+ }
+
+ // Check presence of "SourceMap:" HTTP response header.
+ char* sourceMapURL = metadata().sourceMapURL.get();
+ if (sourceMapURL && strlen(sourceMapURL)) {
+ JS::UTF8Chars utf8Chars(sourceMapURL, strlen(sourceMapURL));
+ JSString* str = JS_NewStringCopyUTF8N(cx, utf8Chars);
+ if (!str) {
+ return false;
+ }
+ result.set(str);
+ }
+ return true;
+}
+
+void DebugState::addSizeOfMisc(MallocSizeOf mallocSizeOf,
+ Metadata::SeenSet* seenMetadata,
+ Code::SeenSet* seenCode, size_t* code,
+ size_t* data) const {
+ code_->addSizeOfMiscIfNotSeen(mallocSizeOf, seenMetadata, seenCode, code,
+ data);
+ module_->addSizeOfMisc(mallocSizeOf, seenMetadata, seenCode, code, data);
+}
diff --git a/js/src/wasm/WasmDebug.h b/js/src/wasm/WasmDebug.h
new file mode 100644
index 0000000000..35c8a838ed
--- /dev/null
+++ b/js/src/wasm/WasmDebug.h
@@ -0,0 +1,187 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_debug_h
+#define wasm_debug_h
+
+#include "js/HashTable.h"
+#include "wasm/WasmCode.h"
+#include "wasm/WasmCodegenTypes.h"
+#include "wasm/WasmConstants.h"
+#include "wasm/WasmExprType.h"
+#include "wasm/WasmModule.h"
+#include "wasm/WasmTypeDecls.h"
+#include "wasm/WasmValType.h"
+
+namespace js {
+
+class Debugger;
+class WasmBreakpointSite;
+class WasmInstanceObject;
+
+namespace wasm {
+
+struct MetadataTier;
+
+// The generated source location for the AST node/expression. The offset field
+// refers an offset in an binary format file.
+
+struct ExprLoc {
+ uint32_t lineno;
+ uint32_t column;
+ uint32_t offset;
+ ExprLoc() : lineno(0), column(0), offset(0) {}
+ ExprLoc(uint32_t lineno_, uint32_t column_, uint32_t offset_)
+ : lineno(lineno_), column(column_), offset(offset_) {}
+};
+
+using StepperCounters =
+ HashMap<uint32_t, uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy>;
+using WasmBreakpointSiteMap =
+ HashMap<uint32_t, WasmBreakpointSite*, DefaultHasher<uint32_t>,
+ SystemAllocPolicy>;
+
+/*
+ * [SMDOC] Wasm debug traps
+ *
+ * There is a single debug trap handler for the process, WasmHandleDebugTrap in
+ * WasmBuiltins.cpp. That function is invoked through the Debug Trap Stub,
+ * generated by GenerateDebugTrapStub in WasmStubs.cpp. When any function in an
+ * instance needs to trap for any reason (enter frame, leave frame, breakpoint,
+ * or single-stepping) then a pointer to the Debug Trap Stub is installed in the
+ * Instance. Debug-enabled code will look for this pointer and call it if it is
+ * not null.
+ *
+ * WasmHandleDebugTrap may therefore be called very frequently when any function
+ * in the instance is being debugged, and must filter the trap against the
+ * tables in the DebugState. It can make use of the return address for the
+ * call, which identifies the site uniquely.
+ *
+ * In order to greatly reduce the frequency of calls to the Debug Trap Stub, an
+ * array of flag bits, one per function, is attached to the instance. The code
+ * at the breakable point calls a shared stub within the function containing the
+ * breakable point to check whether the bit is set for the function. If it is
+ * not set, the stub can return to its caller immediately; if the bit is set,
+ * the stub will jump to the installed Debug Trap Stub.
+ */
+
+class DebugState {
+ const SharedCode code_;
+ const SharedModule module_;
+
+ // State maintained when debugging is enabled.
+
+ bool enterFrameTrapsEnabled_;
+ uint32_t enterAndLeaveFrameTrapsCounter_;
+ WasmBreakpointSiteMap breakpointSites_;
+ StepperCounters stepperCounters_;
+
+ void enableDebuggingForFunction(Instance* instance, uint32_t funcIndex);
+ void disableDebuggingForFunction(Instance* instance, uint32_t funcIndex);
+ void enableDebugTrap(Instance* instance);
+ void disableDebugTrap(Instance* instance);
+
+ public:
+ DebugState(const Code& code, const Module& module);
+
+ void trace(JSTracer* trc);
+ void finalize(JS::GCContext* gcx);
+
+ const Bytes& bytecode() const { return module_->debugBytecode(); }
+
+ [[nodiscard]] bool getLineOffsets(size_t lineno, Vector<uint32_t>* offsets);
+ [[nodiscard]] bool getAllColumnOffsets(Vector<ExprLoc>* offsets);
+ [[nodiscard]] bool getOffsetLocation(uint32_t offset, size_t* lineno,
+ size_t* column);
+
+ // The Code can track enter/leave frame events. Any such event triggers
+ // debug trap. The enter/leave frame events enabled or disabled across
+ // all functions.
+
+ void adjustEnterAndLeaveFrameTrapsState(JSContext* cx, Instance* instance,
+ bool enabled);
+ void ensureEnterFrameTrapsState(JSContext* cx, Instance* instance,
+ bool enabled);
+ bool enterFrameTrapsEnabled() const { return enterFrameTrapsEnabled_; }
+
+ // When the Code is debugEnabled, individual breakpoints can be enabled or
+ // disabled at instruction offsets.
+
+ bool hasBreakpointTrapAtOffset(uint32_t offset);
+ void toggleBreakpointTrap(JSRuntime* rt, Instance* instance, uint32_t offset,
+ bool enabled);
+ WasmBreakpointSite* getBreakpointSite(uint32_t offset) const;
+ WasmBreakpointSite* getOrCreateBreakpointSite(JSContext* cx,
+ Instance* instance,
+ uint32_t offset);
+ bool hasBreakpointSite(uint32_t offset);
+ void destroyBreakpointSite(JS::GCContext* gcx, Instance* instance,
+ uint32_t offset);
+ void clearBreakpointsIn(JS::GCContext* gcx, WasmInstanceObject* instance,
+ js::Debugger* dbg, JSObject* handler);
+
+ // When the Code is debug-enabled, single-stepping mode can be toggled on
+ // the granularity of individual functions.
+
+ bool stepModeEnabled(uint32_t funcIndex) const;
+ [[nodiscard]] bool incrementStepperCount(JSContext* cx, Instance* instance,
+ uint32_t funcIndex);
+ void decrementStepperCount(JS::GCContext* gcx, Instance* instance,
+ uint32_t funcIndex);
+
+ // Stack inspection helpers.
+
+ [[nodiscard]] bool debugGetLocalTypes(uint32_t funcIndex,
+ ValTypeVector* locals,
+ size_t* argsLength,
+ StackResults* stackResults);
+ [[nodiscard]] bool getGlobal(Instance& instance, uint32_t globalIndex,
+ MutableHandleValue vp);
+
+ // Debug URL helpers.
+
+ [[nodiscard]] bool getSourceMappingURL(JSContext* cx,
+ MutableHandleString result) const;
+
+ // Accessors for commonly used elements of linked structures.
+
+ const MetadataTier& metadata(Tier t) const { return code_->metadata(t); }
+ const Metadata& metadata() const { return code_->metadata(); }
+ const CodeRangeVector& codeRanges(Tier t) const {
+ return metadata(t).codeRanges;
+ }
+ const CallSiteVector& callSites(Tier t) const {
+ return metadata(t).callSites;
+ }
+
+ uint32_t funcToCodeRangeIndex(uint32_t funcIndex) const {
+ return metadata(Tier::Debug).funcToCodeRange[funcIndex];
+ }
+
+ // about:memory reporting:
+
+ void addSizeOfMisc(MallocSizeOf mallocSizeOf, Metadata::SeenSet* seenMetadata,
+ Code::SeenSet* seenCode, size_t* code, size_t* data) const;
+};
+
+using UniqueDebugState = UniquePtr<DebugState>;
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_debug_h
diff --git a/js/src/wasm/WasmDebugFrame.cpp b/js/src/wasm/WasmDebugFrame.cpp
new file mode 100644
index 0000000000..2c93e750bc
--- /dev/null
+++ b/js/src/wasm/WasmDebugFrame.cpp
@@ -0,0 +1,176 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmDebugFrame.h"
+
+#include "vm/EnvironmentObject.h"
+#include "wasm/WasmBaselineCompile.h"
+#include "wasm/WasmDebug.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmInstanceData.h"
+#include "wasm/WasmStubs.h"
+
+#include "vm/NativeObject-inl.h"
+#include "wasm/WasmInstance-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+/* static */
+DebugFrame* DebugFrame::from(Frame* fp) {
+ MOZ_ASSERT(GetNearestEffectiveInstance(fp)->code().metadata().debugEnabled);
+ auto* df =
+ reinterpret_cast<DebugFrame*>((uint8_t*)fp - DebugFrame::offsetOfFrame());
+ MOZ_ASSERT(GetNearestEffectiveInstance(fp) == df->instance());
+ return df;
+}
+
+void DebugFrame::alignmentStaticAsserts() {
+ // VS2017 doesn't consider offsetOfFrame() to be a constexpr, so we have
+ // to use offsetof directly. These asserts can't be at class-level
+ // because the type is incomplete.
+
+ static_assert(WasmStackAlignment >= Alignment,
+ "Aligned by ABI before pushing DebugFrame");
+#ifndef JS_CODEGEN_NONE
+ static_assert((offsetof(DebugFrame, frame_) + sizeof(Frame)) % Alignment == 0,
+ "Aligned after pushing DebugFrame");
+#endif
+#ifdef JS_CODEGEN_ARM64
+ // This constraint may or may not be necessary. If you hit this because
+ // you've changed the frame size then feel free to remove it, but be extra
+ // aware of possible problems.
+ static_assert(sizeof(DebugFrame) % 16 == 0, "ARM64 SP alignment");
+#endif
+}
+
+Instance* DebugFrame::instance() {
+ return GetNearestEffectiveInstance(&frame_);
+}
+
+const Instance* DebugFrame::instance() const {
+ return GetNearestEffectiveInstance(&frame_);
+}
+
+GlobalObject* DebugFrame::global() { return &instance()->object()->global(); }
+
+bool DebugFrame::hasGlobal(const GlobalObject* global) const {
+ return global == &instance()->objectUnbarriered()->global();
+}
+
+JSObject* DebugFrame::environmentChain() {
+ return &global()->lexicalEnvironment();
+}
+
+bool DebugFrame::getLocal(uint32_t localIndex, MutableHandleValue vp) {
+ ValTypeVector locals;
+ size_t argsLength;
+ StackResults stackResults;
+ if (!instance()->debug().debugGetLocalTypes(funcIndex(), &locals, &argsLength,
+ &stackResults)) {
+ return false;
+ }
+
+ ValTypeVector args;
+ MOZ_ASSERT(argsLength <= locals.length());
+ if (!args.append(locals.begin(), argsLength)) {
+ return false;
+ }
+ ArgTypeVector abiArgs(args, stackResults);
+
+ BaseLocalIter iter(locals, abiArgs, /* debugEnabled = */ true);
+ while (!iter.done() && iter.index() < localIndex) {
+ iter++;
+ }
+ MOZ_ALWAYS_TRUE(!iter.done());
+
+ uint8_t* frame = static_cast<uint8_t*>((void*)this) + offsetOfFrame();
+ void* dataPtr = frame - iter.frameOffset();
+ switch (iter.mirType()) {
+ case jit::MIRType::Int32:
+ vp.set(Int32Value(*static_cast<int32_t*>(dataPtr)));
+ break;
+ case jit::MIRType::Int64:
+ // Just display as a Number; it's ok if we lose some precision
+ vp.set(NumberValue((double)*static_cast<int64_t*>(dataPtr)));
+ break;
+ case jit::MIRType::Float32:
+ vp.set(NumberValue(JS::CanonicalizeNaN(*static_cast<float*>(dataPtr))));
+ break;
+ case jit::MIRType::Double:
+ vp.set(NumberValue(JS::CanonicalizeNaN(*static_cast<double*>(dataPtr))));
+ break;
+ case jit::MIRType::RefOrNull:
+ vp.set(ObjectOrNullValue(*(JSObject**)dataPtr));
+ break;
+#ifdef ENABLE_WASM_SIMD
+ case jit::MIRType::Simd128:
+ vp.set(NumberValue(0));
+ break;
+#endif
+ default:
+ MOZ_CRASH("local type");
+ }
+ return true;
+}
+
+bool DebugFrame::updateReturnJSValue(JSContext* cx) {
+ MutableHandleValue rval =
+ MutableHandleValue::fromMarkedLocation(&cachedReturnJSValue_);
+ rval.setUndefined();
+ flags_.hasCachedReturnJSValue = true;
+ ResultType resultType = ResultType::Vector(
+ instance()->metadata().debugFuncType(funcIndex()).results());
+ Maybe<char*> stackResultsLoc;
+ if (ABIResultIter::HasStackResults(resultType)) {
+ stackResultsLoc = Some(static_cast<char*>(stackResultsPointer_));
+ }
+ DebugCodegen(DebugChannel::Function,
+ "wasm-function[%d] updateReturnJSValue [", funcIndex());
+ bool ok =
+ ResultsToJSValue(cx, resultType, registerResults_, stackResultsLoc, rval);
+ DebugCodegen(DebugChannel::Function, "]\n");
+ return ok;
+}
+
+HandleValue DebugFrame::returnValue() const {
+ MOZ_ASSERT(flags_.hasCachedReturnJSValue);
+ return HandleValue::fromMarkedLocation(&cachedReturnJSValue_);
+}
+
+void DebugFrame::clearReturnJSValue() {
+ flags_.hasCachedReturnJSValue = true;
+ cachedReturnJSValue_.setUndefined();
+}
+
+void DebugFrame::observe(JSContext* cx) {
+ if (!flags_.observing) {
+ instance()->debug().adjustEnterAndLeaveFrameTrapsState(
+ cx, instance(), /* enabled = */ true);
+ flags_.observing = true;
+ }
+}
+
+void DebugFrame::leave(JSContext* cx) {
+ if (flags_.observing) {
+ instance()->debug().adjustEnterAndLeaveFrameTrapsState(
+ cx, instance(), /* enabled = */ false);
+ flags_.observing = false;
+ }
+}
diff --git a/js/src/wasm/WasmDebugFrame.h b/js/src/wasm/WasmDebugFrame.h
new file mode 100644
index 0000000000..5a9ec6c783
--- /dev/null
+++ b/js/src/wasm/WasmDebugFrame.h
@@ -0,0 +1,217 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_debugframe_h
+#define wasm_debugframe_h
+
+#include "mozilla/Assertions.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "js/TypeDecls.h"
+#include "js/Value.h"
+#include "wasm/WasmCodegenConstants.h"
+#include "wasm/WasmFrame.h"
+#include "wasm/WasmValType.h"
+#include "wasm/WasmValue.h"
+
+namespace js {
+
+class GlobalObject;
+
+namespace wasm {
+
+class Instance;
+
+// A DebugFrame is a Frame with additional fields that are added after the
+// normal function prologue by the baseline compiler. If a Module is compiled
+// with debugging enabled, then all its code creates DebugFrames on the stack
+// instead of just Frames. These extra fields are used by the Debugger API.
+
+class DebugFrame {
+ // The register results field. Initialized only during the baseline
+ // compiler's return sequence to allow the debugger to inspect and
+ // modify the return values of a frame being debugged.
+ union SpilledRegisterResult {
+ private:
+ int32_t i32_;
+ int64_t i64_;
+ float f32_;
+ double f64_;
+#ifdef ENABLE_WASM_SIMD
+ V128 v128_;
+#endif
+ AnyRef anyref_;
+
+#ifdef DEBUG
+ // Should we add a new value representation, this will remind us to update
+ // SpilledRegisterResult.
+ static inline void assertAllValueTypesHandled(ValType type) {
+ switch (type.kind()) {
+ case ValType::I32:
+ case ValType::I64:
+ case ValType::F32:
+ case ValType::F64:
+ case ValType::V128:
+ case ValType::Ref:
+ return;
+ }
+ }
+#endif
+ };
+ SpilledRegisterResult registerResults_[MaxRegisterResults];
+
+ // The returnValue() method returns a HandleValue pointing to this field.
+ JS::Value cachedReturnJSValue_;
+
+ // If the function returns multiple results, this field is initialized
+ // to a pointer to the stack results.
+ void* stackResultsPointer_;
+
+ // The function index of this frame. Technically, this could be derived
+ // given a PC into this frame (which could lookup the CodeRange which has
+ // the function index), but this isn't always readily available.
+ uint32_t funcIndex_;
+
+ // Flags whose meaning are described below.
+ union Flags {
+ struct {
+ uint32_t observing : 1;
+ uint32_t isDebuggee : 1;
+ uint32_t prevUpToDate : 1;
+ uint32_t hasCachedSavedFrame : 1;
+ uint32_t hasCachedReturnJSValue : 1;
+ uint32_t hasSpilledRefRegisterResult : MaxRegisterResults;
+ };
+ uint32_t allFlags;
+ } flags_;
+
+ // Avoid -Wunused-private-field warnings.
+ protected:
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_X86) || defined(__wasi__)
+ // See alignmentStaticAsserts(). For ARM32 and X86 DebugFrame is only
+ // 4-byte aligned, so we add another word to get up to 8-byte
+ // alignment.
+ uint32_t padding_;
+#endif
+#if defined(ENABLE_WASM_SIMD) && defined(JS_CODEGEN_ARM64)
+ uint64_t padding_;
+#endif
+
+ private:
+ // The Frame goes at the end since the stack grows down.
+ Frame frame_;
+
+ public:
+ static DebugFrame* from(Frame* fp);
+ Frame& frame() { return frame_; }
+ uint32_t funcIndex() const { return funcIndex_; }
+ Instance* instance();
+ const Instance* instance() const;
+ GlobalObject* global();
+ bool hasGlobal(const GlobalObject* global) const;
+ JSObject* environmentChain();
+ bool getLocal(uint32_t localIndex, JS::MutableHandleValue vp);
+
+ // The return value must be written from the unboxed representation in the
+ // results union into cachedReturnJSValue_ by updateReturnJSValue() before
+ // returnValue() can return a Handle to it.
+
+ bool hasCachedReturnJSValue() const { return flags_.hasCachedReturnJSValue; }
+ [[nodiscard]] bool updateReturnJSValue(JSContext* cx);
+ JS::HandleValue returnValue() const;
+ void clearReturnJSValue();
+
+ // Once the debugger observes a frame, it must be notified via
+ // onLeaveFrame() before the frame is popped. Calling observe() ensures the
+ // leave frame traps are enabled. Both methods are idempotent so the caller
+ // doesn't have to worry about calling them more than once.
+
+ void observe(JSContext* cx);
+ void leave(JSContext* cx);
+
+ // The 'isDebugge' bit is initialized to false and set by the WebAssembly
+ // runtime right before a frame is exposed to the debugger, as required by
+ // the Debugger API. The bit is then used for Debugger-internal purposes
+ // afterwards.
+
+ bool isDebuggee() const { return flags_.isDebuggee; }
+ void setIsDebuggee() { flags_.isDebuggee = true; }
+ void unsetIsDebuggee() { flags_.isDebuggee = false; }
+
+ // These are opaque boolean flags used by the debugger to implement
+ // AbstractFramePtr. They are initialized to false and not otherwise read or
+ // written by wasm code or runtime.
+
+ bool prevUpToDate() const { return flags_.prevUpToDate; }
+ void setPrevUpToDate() { flags_.prevUpToDate = true; }
+ void unsetPrevUpToDate() { flags_.prevUpToDate = false; }
+
+ bool hasCachedSavedFrame() const { return flags_.hasCachedSavedFrame; }
+ void setHasCachedSavedFrame() { flags_.hasCachedSavedFrame = true; }
+ void clearHasCachedSavedFrame() { flags_.hasCachedSavedFrame = false; }
+
+ bool hasSpilledRegisterRefResult(size_t n) const {
+ uint32_t mask = hasSpilledRegisterRefResultBitMask(n);
+ return (flags_.allFlags & mask) != 0;
+ }
+
+ // DebugFrame is accessed directly by JIT code.
+
+ static constexpr size_t offsetOfRegisterResults() {
+ return offsetof(DebugFrame, registerResults_);
+ }
+ static constexpr size_t offsetOfRegisterResult(size_t n) {
+ MOZ_ASSERT(n < MaxRegisterResults);
+ return offsetOfRegisterResults() + n * sizeof(SpilledRegisterResult);
+ }
+ static constexpr size_t offsetOfCachedReturnJSValue() {
+ return offsetof(DebugFrame, cachedReturnJSValue_);
+ }
+ static constexpr size_t offsetOfStackResultsPointer() {
+ return offsetof(DebugFrame, stackResultsPointer_);
+ }
+ static constexpr size_t offsetOfFlags() {
+ return offsetof(DebugFrame, flags_);
+ }
+ static constexpr uint32_t hasSpilledRegisterRefResultBitMask(size_t n) {
+ MOZ_ASSERT(n < MaxRegisterResults);
+ union Flags flags = {.allFlags = 0};
+ flags.hasSpilledRefRegisterResult = 1 << n;
+ MOZ_ASSERT(flags.allFlags != 0);
+ return flags.allFlags;
+ }
+ static constexpr size_t offsetOfFuncIndex() {
+ return offsetof(DebugFrame, funcIndex_);
+ }
+ static constexpr size_t offsetOfFrame() {
+ return offsetof(DebugFrame, frame_);
+ }
+
+ // DebugFrames are aligned to 8-byte aligned, allowing them to be placed in
+ // an AbstractFramePtr.
+
+ static const unsigned Alignment = 8;
+ static void alignmentStaticAsserts();
+};
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_debugframe_h
diff --git a/js/src/wasm/WasmException.h b/js/src/wasm/WasmException.h
new file mode 100644
index 0000000000..d9cfcfb8b0
--- /dev/null
+++ b/js/src/wasm/WasmException.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_exception_h
+#define wasm_exception_h
+
+namespace js {
+namespace wasm {
+
+static const uint32_t CatchAllIndex = UINT32_MAX;
+static_assert(CatchAllIndex > MaxTags);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_exception_h
diff --git a/js/src/wasm/WasmExprType.h b/js/src/wasm/WasmExprType.h
new file mode 100644
index 0000000000..4b02cd5e60
--- /dev/null
+++ b/js/src/wasm/WasmExprType.h
@@ -0,0 +1,330 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_expr_type_h
+#define wasm_expr_type_h
+
+#include <stdint.h>
+
+#include "wasm/WasmTypeDef.h"
+#include "wasm/WasmValType.h"
+
+namespace js {
+namespace wasm {
+
+template <typename PointerType>
+class TaggedValue {
+ public:
+ enum Kind {
+ ImmediateKind1 = 0,
+ ImmediateKind2 = 1,
+ PointerKind1 = 2,
+ PointerKind2 = 3
+ };
+ using PackedRepr = uint64_t;
+ static_assert(std::is_same<PackedTypeCode::PackedRepr, uint64_t>(),
+ "can use pointer tagging with PackedTypeCode");
+
+ private:
+ PackedRepr bits_;
+
+ static constexpr PackedRepr PayloadShift = 2;
+ static constexpr PackedRepr KindMask = 0x3;
+ static constexpr PackedRepr PointerKindBit = 0x2;
+
+ constexpr static bool IsPointerKind(Kind kind) {
+ return PackedRepr(kind) & PointerKindBit;
+ }
+ constexpr static bool IsImmediateKind(Kind kind) {
+ return !IsPointerKind(kind);
+ }
+
+ static_assert(IsImmediateKind(ImmediateKind1), "immediate kind 1");
+ static_assert(IsImmediateKind(ImmediateKind2), "immediate kind 2");
+ static_assert(IsPointerKind(PointerKind1), "pointer kind 1");
+ static_assert(IsPointerKind(PointerKind2), "pointer kind 2");
+
+ static PackedRepr PackImmediate(Kind kind, PackedRepr imm) {
+ MOZ_ASSERT(IsImmediateKind(kind));
+ MOZ_ASSERT((PackedRepr(kind) & KindMask) == kind);
+ MOZ_ASSERT((imm & (PackedRepr(KindMask)
+ << ((sizeof(PackedRepr) * 8) - PayloadShift))) == 0);
+ return PackedRepr(kind) | (PackedRepr(imm) << PayloadShift);
+ }
+
+ static PackedRepr PackPointer(Kind kind, PointerType* ptr) {
+ PackedRepr ptrBits = reinterpret_cast<PackedRepr>(ptr);
+ MOZ_ASSERT(IsPointerKind(kind));
+ MOZ_ASSERT((PackedRepr(kind) & KindMask) == kind);
+ MOZ_ASSERT((ptrBits & KindMask) == 0);
+ return PackedRepr(kind) | ptrBits;
+ }
+
+ public:
+ TaggedValue(Kind kind, PackedRepr imm) : bits_(PackImmediate(kind, imm)) {}
+ TaggedValue(Kind kind, PointerType* ptr) : bits_(PackPointer(kind, ptr)) {}
+
+ PackedRepr bits() const { return bits_; }
+ Kind kind() const { return Kind(bits() & KindMask); }
+ PackedRepr immediate() const {
+ MOZ_ASSERT(IsImmediateKind(kind()));
+ return mozilla::AssertedCast<PackedRepr>(bits() >> PayloadShift);
+ }
+ PointerType* pointer() const {
+ MOZ_ASSERT(IsPointerKind(kind()));
+ return reinterpret_cast<PointerType*>(bits() & ~KindMask);
+ }
+};
+
+// ResultType represents the WebAssembly spec's `resulttype`. Semantically, a
+// result type is just a vec(valtype). For effiency, though, the ResultType
+// value is packed into a word, with separate encodings for these 3 cases:
+// []
+// [valtype]
+// pointer to ValTypeVector
+//
+// Additionally there is an encoding indicating uninitialized ResultType
+// values.
+//
+// Generally in the latter case the ValTypeVector is the args() or results() of
+// a FuncType in the compilation unit, so as long as the lifetime of the
+// ResultType value is less than the OpIter, we can just borrow the pointer
+// without ownership or copying.
+class ResultType {
+ using Tagged = TaggedValue<const ValTypeVector>;
+ Tagged tagged_;
+
+ enum Kind {
+ EmptyKind = Tagged::ImmediateKind1,
+ SingleKind = Tagged::ImmediateKind2,
+ VectorKind = Tagged::PointerKind1,
+ InvalidKind = Tagged::PointerKind2,
+ };
+
+ ResultType(Kind kind, Tagged::PackedRepr imm)
+ : tagged_(Tagged::Kind(kind), imm) {}
+ explicit ResultType(const ValTypeVector* ptr)
+ : tagged_(Tagged::Kind(VectorKind), ptr) {}
+
+ Kind kind() const { return Kind(tagged_.kind()); }
+
+ ValType singleValType() const {
+ MOZ_ASSERT(kind() == SingleKind);
+ return ValType(PackedTypeCode::fromBits(tagged_.immediate()));
+ }
+
+ const ValTypeVector& values() const {
+ MOZ_ASSERT(kind() == VectorKind);
+ return *tagged_.pointer();
+ }
+
+ public:
+ ResultType() : tagged_(Tagged::Kind(InvalidKind), nullptr) {}
+
+ static ResultType Empty() {
+ return ResultType(EmptyKind, Tagged::PackedRepr(0));
+ }
+ static ResultType Single(ValType vt) {
+ return ResultType(SingleKind, vt.bitsUnsafe());
+ }
+ static ResultType Vector(const ValTypeVector& vals) {
+ switch (vals.length()) {
+ case 0:
+ return Empty();
+ case 1:
+ return Single(vals[0]);
+ default:
+ return ResultType(&vals);
+ }
+ }
+
+ [[nodiscard]] bool cloneToVector(ValTypeVector* out) {
+ MOZ_ASSERT(out->empty());
+ switch (kind()) {
+ case EmptyKind:
+ return true;
+ case SingleKind:
+ return out->append(singleValType());
+ case VectorKind:
+ return out->appendAll(values());
+ default:
+ MOZ_CRASH("bad resulttype");
+ }
+ }
+
+ bool valid() const { return kind() != InvalidKind; }
+ bool empty() const { return kind() == EmptyKind; }
+
+ size_t length() const {
+ switch (kind()) {
+ case EmptyKind:
+ return 0;
+ case SingleKind:
+ return 1;
+ case VectorKind:
+ return values().length();
+ default:
+ MOZ_CRASH("bad resulttype");
+ }
+ }
+
+ // Polyfill the Span API, which is polyfilling the std library
+ size_t size() const { return length(); }
+
+ ValType operator[](size_t i) const {
+ switch (kind()) {
+ case SingleKind:
+ MOZ_ASSERT(i == 0);
+ return singleValType();
+ case VectorKind:
+ return values()[i];
+ default:
+ MOZ_CRASH("bad resulttype");
+ }
+ }
+
+ bool operator==(ResultType rhs) const {
+ switch (kind()) {
+ case EmptyKind:
+ case SingleKind:
+ case InvalidKind:
+ return tagged_.bits() == rhs.tagged_.bits();
+ case VectorKind: {
+ if (rhs.kind() != VectorKind) {
+ return false;
+ }
+ return EqualContainers(values(), rhs.values());
+ }
+ default:
+ MOZ_CRASH("bad resulttype");
+ }
+ }
+ bool operator!=(ResultType rhs) const { return !(*this == rhs); }
+};
+
+// BlockType represents the WebAssembly spec's `blocktype`. Semantically, a
+// block type is just a (vec(valtype) -> vec(valtype)) with four special
+// encodings which are represented explicitly in BlockType:
+// [] -> []
+// [] -> [valtype]
+// [params] -> [results] via pointer to FuncType
+// [] -> [results] via pointer to FuncType (ignoring [params])
+
+class BlockType {
+ using Tagged = TaggedValue<const FuncType>;
+ Tagged tagged_;
+
+ enum Kind {
+ VoidToVoidKind = Tagged::ImmediateKind1,
+ VoidToSingleKind = Tagged::ImmediateKind2,
+ FuncKind = Tagged::PointerKind1,
+ FuncResultsKind = Tagged::PointerKind2
+ };
+
+ BlockType(Kind kind, Tagged::PackedRepr imm)
+ : tagged_(Tagged::Kind(kind), imm) {}
+ BlockType(Kind kind, const FuncType& type)
+ : tagged_(Tagged::Kind(kind), &type) {}
+
+ Kind kind() const { return Kind(tagged_.kind()); }
+ ValType singleValType() const {
+ MOZ_ASSERT(kind() == VoidToSingleKind);
+ return ValType(PackedTypeCode::fromBits(tagged_.immediate()));
+ }
+
+ const FuncType& funcType() const { return *tagged_.pointer(); }
+
+ public:
+ BlockType()
+ : tagged_(Tagged::Kind(VoidToVoidKind),
+ PackedTypeCode::invalid().bits()) {}
+
+ static BlockType VoidToVoid() {
+ return BlockType(VoidToVoidKind, Tagged::PackedRepr(0));
+ }
+ static BlockType VoidToSingle(ValType vt) {
+ return BlockType(VoidToSingleKind, vt.bitsUnsafe());
+ }
+ static BlockType Func(const FuncType& type) {
+ if (type.args().length() == 0) {
+ return FuncResults(type);
+ }
+ return BlockType(FuncKind, type);
+ }
+ static BlockType FuncResults(const FuncType& type) {
+ switch (type.results().length()) {
+ case 0:
+ return VoidToVoid();
+ case 1:
+ return VoidToSingle(type.results()[0]);
+ default:
+ return BlockType(FuncResultsKind, type);
+ }
+ }
+
+ ResultType params() const {
+ switch (kind()) {
+ case VoidToVoidKind:
+ case VoidToSingleKind:
+ case FuncResultsKind:
+ return ResultType::Empty();
+ case FuncKind:
+ return ResultType::Vector(funcType().args());
+ default:
+ MOZ_CRASH("unexpected kind");
+ }
+ }
+
+ ResultType results() const {
+ switch (kind()) {
+ case VoidToVoidKind:
+ return ResultType::Empty();
+ case VoidToSingleKind:
+ return ResultType::Single(singleValType());
+ case FuncKind:
+ case FuncResultsKind:
+ return ResultType::Vector(funcType().results());
+ default:
+ MOZ_CRASH("unexpected kind");
+ }
+ }
+
+ bool operator==(BlockType rhs) const {
+ if (kind() != rhs.kind()) {
+ return false;
+ }
+ switch (kind()) {
+ case VoidToVoidKind:
+ case VoidToSingleKind:
+ return tagged_.bits() == rhs.tagged_.bits();
+ case FuncKind:
+ return FuncType::strictlyEquals(funcType(), rhs.funcType());
+ case FuncResultsKind:
+ return EqualContainers(funcType().results(), rhs.funcType().results());
+ default:
+ MOZ_CRASH("unexpected kind");
+ }
+ }
+
+ bool operator!=(BlockType rhs) const { return !(*this == rhs); }
+};
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_expr_type_h
diff --git a/js/src/wasm/WasmFrame.h b/js/src/wasm/WasmFrame.h
new file mode 100644
index 0000000000..19bebae9ca
--- /dev/null
+++ b/js/src/wasm/WasmFrame.h
@@ -0,0 +1,409 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* [SMDOC] The WASM ABIs
+ *
+ * Wasm-internal ABI.
+ *
+ * The *Wasm-internal ABI* is the ABI a wasm function assumes when it is
+ * entered, and the one it assumes when it is making a call to what it believes
+ * is another wasm function.
+ *
+ * We pass the first function arguments in registers (GPR and FPU both) and the
+ * rest on the stack, generally according to platform ABI conventions (which can
+ * be hairy). On x86-32 there are no register arguments.
+ *
+ * We have no callee-saves registers in the wasm-internal ABI, regardless of the
+ * platform ABI conventions, though see below about InstanceReg or HeapReg.
+ *
+ * We return the last return value in the first return register, according to
+ * platform ABI conventions. If there is more than one return value, an area is
+ * allocated in the caller's frame to receive the other return values, and the
+ * address of this area is passed to the callee as the last argument. Return
+ * values except the last are stored in ascending order within this area. Also
+ * see below about alignment of this area and the values in it.
+ *
+ * When a function is entered, there are two incoming register values in
+ * addition to the function's declared parameters: InstanceReg must have the
+ * correct instance pointer, and HeapReg the correct memoryBase, for the
+ * function. (On x86-32 there is no HeapReg.) From the instance we can get to
+ * the JSContext, the instance, the MemoryBase, and many other things. The
+ * instance maps one-to-one with an instance.
+ *
+ * HeapReg and InstanceReg are not parameters in the usual sense, nor are they
+ * callee-saves registers. Instead they constitute global register state, the
+ * purpose of which is to bias the call ABI in favor of intra-instance calls,
+ * the predominant case where the caller and the callee have the same
+ * InstanceReg and HeapReg values.
+ *
+ * With this global register state, literally no work needs to take place to
+ * save and restore the instance and MemoryBase values across intra-instance
+ * call boundaries.
+ *
+ * For inter-instance calls, in contrast, there must be an instance switch at
+ * the call boundary: Before the call, the callee's instance must be loaded
+ * (from a closure or from the import table), and from the instance we load the
+ * callee's MemoryBase, the realm, and the JSContext. The caller's and callee's
+ * instance values must be stored into the frame (to aid unwinding), the
+ * callee's realm must be stored into the JSContext, and the callee's instance
+ * and MemoryBase values must be moved to appropriate registers. After the
+ * call, the caller's instance must be loaded, and from it the caller's
+ * MemoryBase and realm, and the JSContext. The realm must be stored into the
+ * JSContext and the caller's instance and MemoryBase values must be moved to
+ * appropriate registers.
+ *
+ * Direct calls to functions within the same module are always intra-instance,
+ * while direct calls to imported functions are always inter-instance. Indirect
+ * calls -- call_indirect in the MVP, future call_ref and call_funcref -- may or
+ * may not be intra-instance.
+ *
+ * call_indirect, and future call_funcref, also pass a signature value in a
+ * register (even on x86-32), this is a small integer or a pointer value
+ * denoting the caller's expected function signature. The callee must compare
+ * it to the value or pointer that denotes its actual signature, and trap on
+ * mismatch.
+ *
+ * This is what the stack looks like during a call, after the callee has
+ * completed the prologue:
+ *
+ * | |
+ * +-----------------------------------+ <-+
+ * | ... | |
+ * | Caller's private frame | |
+ * +-----------------------------------+ |
+ * | Multi-value return (optional) | |
+ * | ... | |
+ * +-----------------------------------+ |
+ * | Stack args (optional) | |
+ * | ... | |
+ * +-----------------------------------+ -+|
+ * | Caller instance slot | \
+ * | Callee instance slot | | \
+ * +-----------------------------------+ | \
+ * | Shadowstack area (Win64) | | wasm::FrameWithInstances
+ * | (32 bytes) | | /
+ * +-----------------------------------+ | / <= SP "Before call"
+ * | Return address | // <= SP "After call"
+ * | Saved FP ----|--+/
+ * +-----------------------------------+ -+ <= FP (a wasm::Frame*)
+ * | DebugFrame, Locals, spills, etc |
+ * | (i.e., callee's private frame) |
+ * | .... |
+ * +-----------------------------------+ <= SP
+ *
+ * The FrameWithInstances is a struct with four fields: the saved FP, the return
+ * address, and the two instance slots; the shadow stack area is there only on
+ * Win64 and is unused by wasm but is part of the native ABI, with which the
+ * wasm ABI is mostly compatible. The slots for caller and callee instance are
+ * only populated by the instance switching code in inter-instance calls so that
+ * stack unwinding can keep track of the correct instance value for each frame,
+ * the instance not being obtainable from anywhere else. Nothing in the frame
+ * itself indicates directly whether the instance slots are valid - for that,
+ * the return address must be used to look up a CallSite structure that carries
+ * that information.
+ *
+ * The stack area above the return address is owned by the caller, which may
+ * deallocate the area on return or choose to reuse it for subsequent calls.
+ * (The baseline compiler allocates and frees the stack args area and the
+ * multi-value result area per call. Ion reuses the areas and allocates them as
+ * part of the overall activation frame when the procedure is entered; indeed,
+ * the multi-value return area can be anywhere within the caller's private
+ * frame, not necessarily directly above the stack args.)
+ *
+ * If the stack args area contain references, it is up to the callee's stack map
+ * to name the locations where those references exist, and the caller's stack
+ * map must not (redundantly) name those locations. (The callee's ownership of
+ * this area will be crucial for making tail calls work, as the types of the
+ * locations can change if the callee makes a tail call.) If pointer values are
+ * spilled by anyone into the Shadowstack area they will not be traced.
+ *
+ * References in the multi-return area are covered by the caller's map, as these
+ * slots outlive the call.
+ *
+ * The address "Before call", ie the part of the FrameWithInstances above the
+ * Frame, must be aligned to WasmStackAlignment, and everything follows from
+ * that, with padding inserted for alignment as required for stack arguments. In
+ * turn WasmStackAlignment is at least as large as the largest parameter type.
+ *
+ * The address of the multiple-results area is currently 8-byte aligned by Ion
+ * and its alignment in baseline is uncertain, see bug 1747787. Result values
+ * are stored packed within the area in fields whose size is given by
+ * ResultStackSize(ValType), this breaks alignment too. This all seems
+ * underdeveloped.
+ *
+ * In the wasm-internal ABI, the ARM64 PseudoStackPointer (PSP) is garbage on
+ * entry but must be synced with the real SP at the point the function returns.
+ *
+ *
+ * The Wasm Builtin ABIs.
+ *
+ * Also see `[SMDOC] Process-wide builtin thunk set` in WasmBuiltins.cpp.
+ *
+ * The *Wasm-builtin ABIs* comprise the ABIs used when wasm makes calls directly
+ * to the C++ runtime (but not to the JS interpreter), including instance
+ * methods, helpers for operations such as 64-bit division on 32-bit systems,
+ * allocation and writer barriers, conversions to/from JS values, special
+ * fast-path JS imports, and trap handling.
+ *
+ * The callee of a builtin call will always assume the C/C++ ABI. Therefore
+ * every volatile (caller-saves) register that wasm uses must be saved across
+ * the call, the stack must be aligned as for a C/C++-ABI call before the call,
+ * and any ABI registers the callee expect to have specific values must be set
+ * up (eg the frame pointer, if the C/C++ ABI assumes it is set).
+ *
+ * Most builtin calls are straightforward: the wasm caller knows that it is
+ * performing a call, and so it saves live registers, moves arguments into ABI
+ * locations, etc, before calling. Abstractions in the masm make sure to pass
+ * the instance pointer to an instance "method" call and to restore the
+ * InstanceReg and HeapReg after the call. In these straightforward cases,
+ * calling the builtin additionally amounts to:
+ *
+ * - exiting the wasm activation
+ * - adjusting parameter values to account for platform weirdness (FP arguments
+ * are handled differently in the C/C++ ABIs on ARM and x86-32 than in the
+ * Wasm ABI)
+ * - copying stack arguments into place for the C/C++ ABIs
+ * - making the call
+ * - adjusting the return values on return
+ * - re-entering the wasm activation and returning to the wasm caller
+ *
+ * The steps above are performed by the *builtin thunk* for the builtin and the
+ * builtin itself is said to be *thunked*. Going via the thunk is simple and,
+ * except for always having to copy stack arguments on x86-32 and the extra call
+ * in the thunk, close to as fast as we can make it without heroics. Except for
+ * the arithmetic helpers on 32-bit systems, most builtins are rarely used, are
+ * asm.js-specific, or are expensive anyway, and the overhead of the extra call
+ * doesn't matter.
+ *
+ * A few builtins for special purposes are *unthunked* and fall into two
+ * classes: they would normally be thunked but are used in circumstances where
+ * the VM is in an unusual state; or they do their work within the activation.
+ *
+ * In the former class, we find the debug trap handler, which must preserve all
+ * live registers because it is called in contexts where live registers have not
+ * been saved; argument coercion functions, which are called while a call frame
+ * is being built for a JS->Wasm or Wasm->JS call; and other routines that have
+ * special needs for constructing the call. These all exit the activation, but
+ * handle the exit specially.
+ *
+ * In the latter class, we find two functions that abandon the VM state and
+ * unwind the activation, HandleThrow and HandleTrap; and some debug print
+ * functions that do not affect the VM state at all.
+ *
+ * To summarize, when wasm calls a builtin thunk the stack will end up looking
+ * like this from within the C++ code:
+ *
+ * | |
+ * +-------------------------+
+ * | Wasm frame |
+ * +-------------------------+
+ * | Thunk frame (exit) |
+ * +-------------------------+
+ * | Builtin frame (C++) |
+ * +-------------------------+ <= SP
+ *
+ * There is an assumption in the profiler (in initFromExitFP) that an exit has
+ * left precisely one frame on the stack for the thunk itself. There may be
+ * additional assumptions elsewhere, not yet found.
+ *
+ * Very occasionally, Wasm will call C++ without going through the builtin
+ * thunks, and this can be a source of problems. The one case I know about
+ * right now is that the JS pre-barrier filtering code is called directly from
+ * Wasm, see bug 1464157.
+ *
+ *
+ * Wasm stub ABIs.
+ *
+ * Also see `[SMDOC] Exported wasm functions and the jit-entry stubs` in
+ * WasmJS.cpp.
+ *
+ * The "stub ABIs" are not properly speaking ABIs themselves, but ABI
+ * converters. An "entry" stub calls in to wasm and an "exit" stub calls out
+ * from wasm. The entry stubs must convert from whatever data formats the
+ * caller has to wasm formats (and in the future must provide some kind of type
+ * checking for pointer types); the exit stubs convert from wasm formats to the
+ * callee's expected format.
+ *
+ * There are different entry paths from the JS interpreter (using the C++ ABI
+ * and data formats) and from jitted JS code (using the JIT ABI and data
+ * formats); indeed there is a "normal" JitEntry path ("JitEntry") that will
+ * perform argument and return value conversion, and the "fast" JitEntry path
+ * ("DirectCallFromJit") that is only used when it is known that the JIT will
+ * only pass and receive wasm-compatible data and no conversion is needed.
+ *
+ * Similarly, there are different exit paths to the interpreter (using the C++
+ * ABI and data formats) and to JS JIT code (using the JIT ABI and data
+ * formats). Also, builtin calls described above are themselves a type of exit,
+ * and builtin thunks are properly a type of exit stub.
+ *
+ * Data conversions are difficult because the VM is in an intermediate state
+ * when they happen, we want them to be fast when possible, and some conversions
+ * can re-enter both JS code and wasm code.
+ */
+
+#ifndef wasm_frame_h
+#define wasm_frame_h
+
+#include "mozilla/Assertions.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <type_traits>
+
+#include "jit/Registers.h" // For js::jit::ShadowStackSpace
+
+namespace js {
+namespace wasm {
+
+class Instance;
+
+// Bit tag set when exiting wasm code in JitActivation's exitFP.
+constexpr uintptr_t ExitFPTag = 0x1;
+
+// wasm::Frame represents the bytes pushed by the call instruction and the
+// fixed prologue generated by wasm::GenerateCallablePrologue.
+//
+// Across all architectures it is assumed that, before the call instruction, the
+// stack pointer is WasmStackAlignment-aligned. Thus after the prologue, and
+// before the function has made its stack reservation, the stack alignment is
+// sizeof(Frame) % WasmStackAlignment.
+//
+// During MacroAssembler code generation, the bytes pushed after the wasm::Frame
+// are counted by masm.framePushed. Thus, the stack alignment at any point in
+// time is (sizeof(wasm::Frame) + masm.framePushed) % WasmStackAlignment.
+
+class Frame {
+ // See GenerateCallableEpilogue for why this must be
+ // the first field of wasm::Frame (in a downward-growing stack).
+ // It's either the caller's Frame*, for wasm callers, or the JIT caller frame
+ // plus a tag otherwise.
+ uint8_t* callerFP_;
+
+ // The return address pushed by the call (in the case of ARM/MIPS the return
+ // address is pushed by the first instruction of the prologue).
+ void* returnAddress_;
+
+ public:
+ static constexpr uint32_t callerFPOffset() {
+ return offsetof(Frame, callerFP_);
+ }
+ static constexpr uint32_t returnAddressOffset() {
+ return offsetof(Frame, returnAddress_);
+ }
+
+ uint8_t* returnAddress() const {
+ return reinterpret_cast<uint8_t*>(returnAddress_);
+ }
+
+ void** addressOfReturnAddress() {
+ return reinterpret_cast<void**>(&returnAddress_);
+ }
+
+ uint8_t* rawCaller() const { return callerFP_; }
+
+ Frame* wasmCaller() const { return reinterpret_cast<Frame*>(callerFP_); }
+
+ uint8_t* jitEntryCaller() const { return callerFP_; }
+
+ static const Frame* fromUntaggedWasmExitFP(const void* savedFP) {
+ MOZ_ASSERT(!isExitFP(savedFP));
+ return reinterpret_cast<const Frame*>(savedFP);
+ }
+
+ static bool isExitFP(const void* fp) {
+ return reinterpret_cast<uintptr_t>(fp) & ExitFPTag;
+ }
+
+ static uint8_t* untagExitFP(const void* fp) {
+ MOZ_ASSERT(isExitFP(fp));
+ return reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(fp) &
+ ~ExitFPTag);
+ }
+
+ static uint8_t* addExitFPTag(const Frame* fp) {
+ MOZ_ASSERT(!isExitFP(fp));
+ return reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(fp) |
+ ExitFPTag);
+ }
+};
+
+static_assert(!std::is_polymorphic_v<Frame>, "Frame doesn't need a vtable.");
+static_assert(sizeof(Frame) == 2 * sizeof(void*),
+ "Frame is a two pointer structure");
+
+// Note that sizeof(FrameWithInstances) does not account for ShadowStackSpace.
+// Use FrameWithInstances::sizeOf() if you are not incorporating
+// ShadowStackSpace through other means (eg the ABIArgIter).
+
+class FrameWithInstances : public Frame {
+ // `ShadowStackSpace` bytes will be allocated here on Win64, at higher
+ // addresses than Frame and at lower addresses than the instance fields.
+
+ // The instance area MUST be two pointers exactly.
+ Instance* calleeInstance_;
+ Instance* callerInstance_;
+
+ public:
+ Instance* calleeInstance() { return calleeInstance_; }
+ Instance* callerInstance() { return callerInstance_; }
+
+ constexpr static uint32_t sizeOf() {
+ return sizeof(wasm::FrameWithInstances) + js::jit::ShadowStackSpace;
+ }
+
+ constexpr static uint32_t sizeOfInstanceFields() {
+ return sizeof(wasm::FrameWithInstances) - sizeof(wasm::Frame);
+ }
+
+ constexpr static uint32_t calleeInstanceOffset() {
+ return offsetof(FrameWithInstances, calleeInstance_) +
+ js::jit::ShadowStackSpace;
+ }
+
+ constexpr static uint32_t calleeInstanceOffsetWithoutFrame() {
+ return calleeInstanceOffset() - sizeof(wasm::Frame);
+ }
+
+ constexpr static uint32_t callerInstanceOffset() {
+ return offsetof(FrameWithInstances, callerInstance_) +
+ js::jit::ShadowStackSpace;
+ }
+
+ constexpr static uint32_t callerInstanceOffsetWithoutFrame() {
+ return callerInstanceOffset() - sizeof(wasm::Frame);
+ }
+};
+
+static_assert(FrameWithInstances::calleeInstanceOffsetWithoutFrame() ==
+ js::jit::ShadowStackSpace,
+ "Callee instance stored right above the return address.");
+static_assert(FrameWithInstances::callerInstanceOffsetWithoutFrame() ==
+ js::jit::ShadowStackSpace + sizeof(void*),
+ "Caller instance stored right above the callee instance.");
+
+static_assert(FrameWithInstances::sizeOfInstanceFields() == 2 * sizeof(void*),
+ "There are only two additional slots");
+
+#if defined(JS_CODEGEN_ARM64)
+static_assert(sizeof(Frame) % 16 == 0, "frame is aligned");
+#endif
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_frame_h
diff --git a/js/src/wasm/WasmFrameIter.cpp b/js/src/wasm/WasmFrameIter.cpp
new file mode 100644
index 0000000000..8b57fd42fa
--- /dev/null
+++ b/js/src/wasm/WasmFrameIter.cpp
@@ -0,0 +1,1764 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2014 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmFrameIter.h"
+
+#include "jit/JitFrames.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+#include "wasm/WasmDebugFrame.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmInstanceData.h"
+#include "wasm/WasmIntrinsicGenerated.h"
+#include "wasm/WasmStubs.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "wasm/WasmInstance-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::DebugOnly;
+using mozilla::Maybe;
+
+static Instance* ExtractCallerInstanceFromFrameWithInstances(Frame* fp) {
+ return *reinterpret_cast<Instance**>(
+ reinterpret_cast<uint8_t*>(fp) +
+ FrameWithInstances::callerInstanceOffset());
+}
+
+static const Instance* ExtractCalleeInstanceFromFrameWithInstances(
+ const Frame* fp) {
+ return *reinterpret_cast<Instance* const*>(
+ reinterpret_cast<const uint8_t*>(fp) +
+ FrameWithInstances::calleeInstanceOffset());
+}
+
+/*****************************************************************************/
+// WasmFrameIter implementation
+
+WasmFrameIter::WasmFrameIter(JitActivation* activation, wasm::Frame* fp)
+ : activation_(activation),
+ code_(nullptr),
+ codeRange_(nullptr),
+ lineOrBytecode_(0),
+ fp_(fp ? fp : activation->wasmExitFP()),
+ instance_(nullptr),
+ unwoundCallerFP_(nullptr),
+ unwoundJitFrameType_(),
+ unwind_(Unwind::False),
+ unwoundAddressOfReturnAddress_(nullptr),
+ resumePCinCurrentFrame_(nullptr) {
+ MOZ_ASSERT(fp_);
+ instance_ = GetNearestEffectiveInstance(fp_);
+
+ // When the stack is captured during a trap (viz., to create the .stack
+ // for an Error object), use the pc/bytecode information captured by the
+ // signal handler in the runtime. Take care not to use this trap unwind
+ // state for wasm frames in the middle of a JitActivation, i.e., wasm frames
+ // that called into JIT frames before the trap.
+
+ if (activation->isWasmTrapping() && fp_ == activation->wasmExitFP()) {
+ const TrapData& trapData = activation->wasmTrapData();
+ void* unwoundPC = trapData.unwoundPC;
+
+ code_ = &instance_->code();
+ MOZ_ASSERT(code_ == LookupCode(unwoundPC));
+
+ codeRange_ = code_->lookupFuncRange(unwoundPC);
+ MOZ_ASSERT(codeRange_);
+
+ lineOrBytecode_ = trapData.bytecodeOffset;
+
+ MOZ_ASSERT(!done());
+ return;
+ }
+
+ // Otherwise, execution exits wasm code via an exit stub which sets exitFP
+ // to the exit stub's frame. Thus, in this case, we want to start iteration
+ // at the caller of the exit frame, whose Code, CodeRange and CallSite are
+ // indicated by the returnAddress of the exit stub's frame. If the caller
+ // was Ion, we can just skip the wasm frames.
+
+ popFrame();
+ MOZ_ASSERT(!done() || unwoundCallerFP_);
+}
+
+bool WasmFrameIter::done() const {
+ MOZ_ASSERT(!!fp_ == !!code_);
+ MOZ_ASSERT(!!fp_ == !!codeRange_);
+ return !fp_;
+}
+
+void WasmFrameIter::operator++() {
+ MOZ_ASSERT(!done());
+
+ // When the iterator is set to unwind, each time the iterator pops a frame,
+ // the JitActivation is updated so that the just-popped frame is no longer
+ // visible. This is necessary since Debugger::onLeaveFrame is called before
+ // popping each frame and, once onLeaveFrame is called for a given frame,
+ // that frame must not be visible to subsequent stack iteration (or it
+ // could be added as a "new" frame just as it becomes garbage). When the
+ // frame is trapping, then exitFP is included in the callstack (otherwise,
+ // it is skipped, as explained above). So to unwind the innermost frame, we
+ // just clear the trapping state.
+
+ if (unwind_ == Unwind::True) {
+ if (activation_->isWasmTrapping()) {
+ activation_->finishWasmTrap();
+ }
+ activation_->setWasmExitFP(fp_);
+ }
+
+ popFrame();
+}
+
+static inline void AssertDirectJitCall(const void* fp) {
+ // Called via an inlined fast JIT to wasm call: in this case, FP is
+ // pointing in the middle of the exit frame, right before the exit
+ // footer; ensure the exit frame type is the expected one.
+#ifdef DEBUG
+ auto* jitCaller = (ExitFrameLayout*)fp;
+ MOZ_ASSERT(jitCaller->footer()->type() ==
+ jit::ExitFrameType::DirectWasmJitCall);
+#endif
+}
+
+void WasmFrameIter::popFrame() {
+ uint8_t* returnAddress = fp_->returnAddress();
+ code_ = LookupCode(returnAddress, &codeRange_);
+
+ if (!code_) {
+ // This is a direct call from the jit into the wasm function's body. The
+ // call stack resembles this at this point:
+ //
+ // |---------------------|
+ // | JIT FRAME |
+ // | JIT FAKE EXIT FRAME | <-- fp_->callerFP_
+ // | WASM FRAME | <-- fp_
+ // |---------------------|
+ //
+ // fp_->callerFP_ points to the fake exit frame set up by the jit caller,
+ // and the return-address-to-fp is in JIT code, thus doesn't belong to any
+ // wasm instance's code (in particular, there's no associated CodeRange).
+ // Mark the frame as such.
+ AssertDirectJitCall(fp_->jitEntryCaller());
+
+ unwoundCallerFP_ = fp_->jitEntryCaller();
+ unwoundJitFrameType_.emplace(FrameType::Exit);
+
+ if (unwind_ == Unwind::True) {
+ activation_->setJSExitFP(unwoundCallerFP());
+ unwoundAddressOfReturnAddress_ = fp_->addressOfReturnAddress();
+ }
+
+ fp_ = nullptr;
+ code_ = nullptr;
+ codeRange_ = nullptr;
+
+ MOZ_ASSERT(done());
+ return;
+ }
+
+ MOZ_ASSERT(codeRange_);
+
+ Frame* prevFP = fp_;
+ fp_ = fp_->wasmCaller();
+ resumePCinCurrentFrame_ = returnAddress;
+
+ if (codeRange_->isInterpEntry()) {
+ // Interpreter entry has a simple frame, record FP from it.
+ unwoundCallerFP_ = reinterpret_cast<uint8_t*>(fp_);
+
+ fp_ = nullptr;
+ code_ = nullptr;
+ codeRange_ = nullptr;
+
+ if (unwind_ == Unwind::True) {
+ // We're exiting via the interpreter entry; we can safely reset
+ // exitFP.
+ activation_->setWasmExitFP(nullptr);
+ unwoundAddressOfReturnAddress_ = prevFP->addressOfReturnAddress();
+ }
+
+ MOZ_ASSERT(done());
+ return;
+ }
+
+ if (codeRange_->isJitEntry()) {
+ // This wasm function has been called through the generic JIT entry by
+ // a JIT caller, so the call stack resembles this:
+ //
+ // |---------------------|
+ // | JIT FRAME |
+ // | JSJIT TO WASM EXIT | <-- fp_
+ // | WASM JIT ENTRY | <-- prevFP (already unwound)
+ // | WASM FRAME | (already unwound)
+ // |---------------------|
+ //
+ // The next value of FP is just a regular jit frame used as a marker to
+ // know that we should transition to a JSJit frame iterator.
+ unwoundCallerFP_ = reinterpret_cast<uint8_t*>(fp_);
+ unwoundJitFrameType_.emplace(FrameType::JSJitToWasm);
+
+ fp_ = nullptr;
+ code_ = nullptr;
+ codeRange_ = nullptr;
+
+ if (unwind_ == Unwind::True) {
+ activation_->setJSExitFP(unwoundCallerFP());
+ unwoundAddressOfReturnAddress_ = prevFP->addressOfReturnAddress();
+ }
+
+ MOZ_ASSERT(done());
+ return;
+ }
+
+ MOZ_ASSERT(codeRange_->kind() == CodeRange::Function);
+
+ const CallSite* callsite = code_->lookupCallSite(returnAddress);
+ MOZ_ASSERT(callsite);
+
+ if (callsite->mightBeCrossInstance()) {
+ instance_ = ExtractCallerInstanceFromFrameWithInstances(prevFP);
+ }
+
+ MOZ_ASSERT(code_ == &instance()->code());
+ lineOrBytecode_ = callsite->lineOrBytecode();
+
+ MOZ_ASSERT(!done());
+}
+
+const char* WasmFrameIter::filename() const {
+ MOZ_ASSERT(!done());
+ return code_->metadata().filename.get();
+}
+
+const char16_t* WasmFrameIter::displayURL() const {
+ MOZ_ASSERT(!done());
+ return code_->metadata().displayURL();
+}
+
+bool WasmFrameIter::mutedErrors() const {
+ MOZ_ASSERT(!done());
+ return code_->metadata().mutedErrors();
+}
+
+JSAtom* WasmFrameIter::functionDisplayAtom() const {
+ MOZ_ASSERT(!done());
+
+ JSContext* cx = activation_->cx();
+ JSAtom* atom = instance()->getFuncDisplayAtom(cx, codeRange_->funcIndex());
+ if (!atom) {
+ cx->clearPendingException();
+ return cx->names().empty;
+ }
+
+ return atom;
+}
+
+unsigned WasmFrameIter::lineOrBytecode() const {
+ MOZ_ASSERT(!done());
+ return lineOrBytecode_;
+}
+
+uint32_t WasmFrameIter::funcIndex() const {
+ MOZ_ASSERT(!done());
+ return codeRange_->funcIndex();
+}
+
+unsigned WasmFrameIter::computeLine(uint32_t* column) const {
+ if (instance()->isAsmJS()) {
+ if (column) {
+ *column = 1;
+ }
+ return lineOrBytecode_;
+ }
+
+ // As a terrible hack to avoid changing the tons of places that pass around
+ // (url, line, column) tuples to instead passing around a Variant that
+ // stores a (url, func-index, bytecode-offset) tuple for wasm frames,
+ // wasm stuffs its tuple into the existing (url, line, column) tuple,
+ // tagging the high bit of the column to indicate "this is a wasm frame".
+ // When knowing clients see this bit, they shall render the tuple
+ // (url, line, column|bit) as "url:wasm-function[column]:0xline" according
+ // to the WebAssembly Web API's Developer-Facing Display Conventions.
+ // https://webassembly.github.io/spec/web-api/index.html#conventions
+ // The wasm bytecode offset continues to be passed as the JS line to avoid
+ // breaking existing devtools code written when this used to be the case.
+
+ MOZ_ASSERT(!(codeRange_->funcIndex() & ColumnBit));
+ if (column) {
+ *column = codeRange_->funcIndex() | ColumnBit;
+ }
+ return lineOrBytecode_;
+}
+
+void** WasmFrameIter::unwoundAddressOfReturnAddress() const {
+ MOZ_ASSERT(done());
+ MOZ_ASSERT(unwind_ == Unwind::True);
+ MOZ_ASSERT(unwoundAddressOfReturnAddress_);
+ return unwoundAddressOfReturnAddress_;
+}
+
+bool WasmFrameIter::debugEnabled() const {
+ MOZ_ASSERT(!done());
+
+ // Only non-imported functions can have debug frames.
+ //
+ // Metadata::debugEnabled is only set if debugging is actually enabled (both
+ // requested, and available via baseline compilation), and Tier::Debug code
+ // will be available.
+ return code_->metadata().debugEnabled &&
+ codeRange_->funcIndex() >=
+ code_->metadata(Tier::Debug).funcImports.length();
+}
+
+DebugFrame* WasmFrameIter::debugFrame() const {
+ MOZ_ASSERT(!done());
+ return DebugFrame::from(fp_);
+}
+
+bool WasmFrameIter::hasUnwoundJitFrame() const {
+ return unwoundCallerFP_ && unwoundJitFrameType_.isSome();
+}
+
+jit::FrameType WasmFrameIter::unwoundJitFrameType() const {
+ MOZ_ASSERT(unwoundCallerFP_);
+ MOZ_ASSERT(unwoundJitFrameType_.isSome());
+ return *unwoundJitFrameType_;
+}
+
+uint8_t* WasmFrameIter::resumePCinCurrentFrame() const {
+ if (resumePCinCurrentFrame_) {
+ return resumePCinCurrentFrame_;
+ }
+ MOZ_ASSERT(activation_->isWasmTrapping());
+ // The next instruction is the instruction following the trap instruction.
+ return (uint8_t*)activation_->wasmTrapData().resumePC;
+}
+
+/*****************************************************************************/
+// Prologue/epilogue code generation
+
+// These constants reflect statically-determined offsets in the
+// prologue/epilogue. The offsets are dynamically asserted during code
+// generation.
+#if defined(JS_CODEGEN_X64)
+static const unsigned PushedRetAddr = 0;
+static const unsigned PushedFP = 1;
+static const unsigned SetFP = 4;
+static const unsigned PoppedFP = 0;
+static const unsigned PoppedFPJitEntry = 0;
+#elif defined(JS_CODEGEN_X86)
+static const unsigned PushedRetAddr = 0;
+static const unsigned PushedFP = 1;
+static const unsigned SetFP = 3;
+static const unsigned PoppedFP = 0;
+static const unsigned PoppedFPJitEntry = 0;
+#elif defined(JS_CODEGEN_ARM)
+static const unsigned BeforePushRetAddr = 0;
+static const unsigned PushedRetAddr = 4;
+static const unsigned PushedFP = 8;
+static const unsigned SetFP = 12;
+static const unsigned PoppedFP = 0;
+static const unsigned PoppedFPJitEntry = 0;
+#elif defined(JS_CODEGEN_ARM64)
+// On ARM64 we do not use push or pop; the prologues and epilogues are
+// structured differently due to restrictions on SP alignment. Even so,
+// PushedRetAddr and PushedFP are used in some restricted contexts
+// and must be superficially meaningful.
+static const unsigned BeforePushRetAddr = 0;
+static const unsigned PushedRetAddr = 8;
+static const unsigned PushedFP = 12;
+static const unsigned SetFP = 16;
+static const unsigned PoppedFP = 4;
+static const unsigned PoppedFPJitEntry = 8;
+static_assert(BeforePushRetAddr == 0, "Required by StartUnwinding");
+static_assert(PushedFP > PushedRetAddr, "Required by StartUnwinding");
+#elif defined(JS_CODEGEN_MIPS64)
+static const unsigned PushedRetAddr = 8;
+static const unsigned PushedFP = 16;
+static const unsigned SetFP = 20;
+static const unsigned PoppedFP = 4;
+static const unsigned PoppedFPJitEntry = 0;
+#elif defined(JS_CODEGEN_LOONG64)
+static const unsigned PushedRetAddr = 8;
+static const unsigned PushedFP = 16;
+static const unsigned SetFP = 20;
+static const unsigned PoppedFP = 4;
+static const unsigned PoppedFPJitEntry = 0;
+#elif defined(JS_CODEGEN_RISCV64)
+static const unsigned PushedRetAddr = 8;
+static const unsigned PushedFP = 16;
+static const unsigned SetFP = 20;
+static const unsigned PoppedFP = 4;
+static const unsigned PoppedFPJitEntry = 0;
+#elif defined(JS_CODEGEN_NONE) || defined(JS_CODEGEN_WASM32)
+// Synthetic values to satisfy asserts and avoid compiler warnings.
+static const unsigned PushedRetAddr = 0;
+static const unsigned PushedFP = 1;
+static const unsigned SetFP = 2;
+static const unsigned PoppedFP = 3;
+static const unsigned PoppedFPJitEntry = 4;
+#else
+# error "Unknown architecture!"
+#endif
+
+static void LoadActivation(MacroAssembler& masm, const Register& dest) {
+ // WasmCall pushes a JitActivation.
+ masm.loadPtr(Address(InstanceReg, wasm::Instance::offsetOfCx()), dest);
+ masm.loadPtr(Address(dest, JSContext::offsetOfActivation()), dest);
+}
+
+void wasm::SetExitFP(MacroAssembler& masm, ExitReason reason,
+ Register scratch) {
+ MOZ_ASSERT(!reason.isNone());
+
+ LoadActivation(masm, scratch);
+
+ masm.store32(
+ Imm32(reason.encode()),
+ Address(scratch, JitActivation::offsetOfEncodedWasmExitReason()));
+
+ masm.orPtr(Imm32(ExitFPTag), FramePointer);
+ masm.storePtr(FramePointer,
+ Address(scratch, JitActivation::offsetOfPackedExitFP()));
+ masm.andPtr(Imm32(int32_t(~ExitFPTag)), FramePointer);
+}
+
+void wasm::ClearExitFP(MacroAssembler& masm, Register scratch) {
+ LoadActivation(masm, scratch);
+ masm.storePtr(ImmWord(0x0),
+ Address(scratch, JitActivation::offsetOfPackedExitFP()));
+ masm.store32(
+ Imm32(0x0),
+ Address(scratch, JitActivation::offsetOfEncodedWasmExitReason()));
+}
+
+static void GenerateCallablePrologue(MacroAssembler& masm, uint32_t* entry) {
+ AutoCreatedBy acb(masm, "GenerateCallablePrologue");
+ masm.setFramePushed(0);
+
+ // ProfilingFrameIterator needs to know the offsets of several key
+ // instructions from entry. To save space, we make these offsets static
+ // constants and assert that they match the actual codegen below. On ARM,
+ // this requires AutoForbidPoolsAndNops to prevent a constant pool from being
+ // randomly inserted between two instructions.
+
+#if defined(JS_CODEGEN_MIPS64)
+ {
+ *entry = masm.currentOffset();
+
+ masm.ma_push(ra);
+ MOZ_ASSERT_IF(!masm.oom(), PushedRetAddr == masm.currentOffset() - *entry);
+ masm.ma_push(FramePointer);
+ MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - *entry);
+ masm.moveStackPtrTo(FramePointer);
+ MOZ_ASSERT_IF(!masm.oom(), SetFP == masm.currentOffset() - *entry);
+ }
+#elif defined(JS_CODEGEN_LOONG64)
+ {
+ *entry = masm.currentOffset();
+
+ masm.ma_push(ra);
+ MOZ_ASSERT_IF(!masm.oom(), PushedRetAddr == masm.currentOffset() - *entry);
+ masm.ma_push(FramePointer);
+ MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - *entry);
+ masm.moveStackPtrTo(FramePointer);
+ MOZ_ASSERT_IF(!masm.oom(), SetFP == masm.currentOffset() - *entry);
+ }
+#elif defined(JS_CODEGEN_RISCV64)
+ {
+ *entry = masm.currentOffset();
+ BlockTrampolinePoolScope block_trampoline_pool(&masm, 5);
+ masm.ma_push(ra);
+ MOZ_ASSERT_IF(!masm.oom(), PushedRetAddr == masm.currentOffset() - *entry);
+ masm.ma_push(FramePointer);
+ MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - *entry);
+ masm.moveStackPtrTo(FramePointer);
+ MOZ_ASSERT_IF(!masm.oom(), SetFP == masm.currentOffset() - *entry);
+ }
+#elif defined(JS_CODEGEN_ARM64)
+ {
+ // We do not use the PseudoStackPointer. However, we may be called in a
+ // context -- compilation using Ion -- in which the PseudoStackPointer is
+ // in use. Rather than risk confusion in the uses of `masm` here, let's
+ // just switch in the real SP, do what we need to do, and restore the
+ // existing setting afterwards.
+ const vixl::Register stashedSPreg = masm.GetStackPointer64();
+ masm.SetStackPointer64(vixl::sp);
+
+ AutoForbidPoolsAndNops afp(&masm,
+ /* number of instructions in scope = */ 4);
+
+ *entry = masm.currentOffset();
+
+ masm.Sub(sp, sp, sizeof(Frame));
+ masm.Str(ARMRegister(lr, 64), MemOperand(sp, Frame::returnAddressOffset()));
+ MOZ_ASSERT_IF(!masm.oom(), PushedRetAddr == masm.currentOffset() - *entry);
+ masm.Str(ARMRegister(FramePointer, 64),
+ MemOperand(sp, Frame::callerFPOffset()));
+ MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - *entry);
+ masm.Mov(ARMRegister(FramePointer, 64), sp);
+ MOZ_ASSERT_IF(!masm.oom(), SetFP == masm.currentOffset() - *entry);
+
+ // And restore the SP-reg setting, per comment above.
+ masm.SetStackPointer64(stashedSPreg);
+ }
+#else
+ {
+# if defined(JS_CODEGEN_ARM)
+ AutoForbidPoolsAndNops afp(&masm,
+ /* number of instructions in scope = */ 3);
+
+ *entry = masm.currentOffset();
+
+ static_assert(BeforePushRetAddr == 0);
+ masm.push(lr);
+# else
+ *entry = masm.currentOffset();
+ // The x86/x64 call instruction pushes the return address.
+# endif
+
+ MOZ_ASSERT_IF(!masm.oom(), PushedRetAddr == masm.currentOffset() - *entry);
+ masm.push(FramePointer);
+ MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - *entry);
+ masm.moveStackPtrTo(FramePointer);
+ MOZ_ASSERT_IF(!masm.oom(), SetFP == masm.currentOffset() - *entry);
+ }
+#endif
+}
+
+static void GenerateCallableEpilogue(MacroAssembler& masm, unsigned framePushed,
+ ExitReason reason, uint32_t* ret) {
+ AutoCreatedBy acb(masm, "GenerateCallableEpilogue");
+
+ if (framePushed) {
+ masm.freeStack(framePushed);
+ }
+
+ if (!reason.isNone()) {
+ ClearExitFP(masm, ABINonArgReturnVolatileReg);
+ }
+
+ DebugOnly<uint32_t> poppedFP{};
+
+#if defined(JS_CODEGEN_MIPS64)
+
+ masm.loadPtr(Address(StackPointer, Frame::callerFPOffset()), FramePointer);
+ poppedFP = masm.currentOffset();
+ masm.loadPtr(Address(StackPointer, Frame::returnAddressOffset()), ra);
+
+ *ret = masm.currentOffset();
+ masm.as_jr(ra);
+ masm.addToStackPtr(Imm32(sizeof(Frame)));
+
+#elif defined(JS_CODEGEN_LOONG64)
+
+ masm.loadPtr(Address(StackPointer, Frame::callerFPOffset()), FramePointer);
+ poppedFP = masm.currentOffset();
+ masm.loadPtr(Address(StackPointer, Frame::returnAddressOffset()), ra);
+
+ *ret = masm.currentOffset();
+ masm.addToStackPtr(Imm32(sizeof(Frame)));
+ masm.as_jirl(zero, ra, BOffImm16(0));
+
+#elif defined(JS_CODEGEN_RISCV64)
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(&masm, 20);
+ masm.loadPtr(Address(StackPointer, Frame::callerFPOffset()), FramePointer);
+ poppedFP = masm.currentOffset();
+ masm.loadPtr(Address(StackPointer, Frame::returnAddressOffset()), ra);
+
+ *ret = masm.currentOffset();
+ masm.addToStackPtr(Imm32(sizeof(Frame)));
+ masm.jalr(zero, ra, 0);
+ masm.nop();
+ }
+#elif defined(JS_CODEGEN_ARM64)
+
+ // See comment at equivalent place in |GenerateCallablePrologue| above.
+ const vixl::Register stashedSPreg = masm.GetStackPointer64();
+ masm.SetStackPointer64(vixl::sp);
+
+ AutoForbidPoolsAndNops afp(&masm, /* number of instructions in scope = */ 5);
+
+ masm.Ldr(ARMRegister(FramePointer, 64),
+ MemOperand(sp, Frame::callerFPOffset()));
+ poppedFP = masm.currentOffset();
+
+ masm.Ldr(ARMRegister(lr, 64), MemOperand(sp, Frame::returnAddressOffset()));
+ *ret = masm.currentOffset();
+
+ masm.Add(sp, sp, sizeof(Frame));
+
+ // Reinitialise PSP from SP. This is less than elegant because the prologue
+ // operates on the raw stack pointer SP and does not keep the PSP in sync.
+ // We can't use initPseudoStackPtr here because we just set up masm to not
+ // use it. Hence we have to do it "by hand".
+ masm.Mov(PseudoStackPointer64, vixl::sp);
+
+ masm.Ret(ARMRegister(lr, 64));
+
+ // See comment at equivalent place in |GenerateCallablePrologue| above.
+ masm.SetStackPointer64(stashedSPreg);
+
+#else
+ // Forbid pools for the same reason as described in GenerateCallablePrologue.
+# if defined(JS_CODEGEN_ARM)
+ AutoForbidPoolsAndNops afp(&masm, /* number of instructions in scope = */ 6);
+# endif
+
+ // There is an important ordering constraint here: fp must be repointed to
+ // the caller's frame before any field of the frame currently pointed to by
+ // fp is popped: asynchronous signal handlers (which use stack space
+ // starting at sp) could otherwise clobber these fields while they are still
+ // accessible via fp (fp fields are read during frame iteration which is
+ // *also* done asynchronously).
+
+ masm.pop(FramePointer);
+ poppedFP = masm.currentOffset();
+
+ *ret = masm.currentOffset();
+ masm.ret();
+
+#endif
+
+ MOZ_ASSERT_IF(!masm.oom(), PoppedFP == *ret - poppedFP);
+}
+
+void wasm::GenerateFunctionPrologue(MacroAssembler& masm,
+ const CallIndirectId& callIndirectId,
+ const Maybe<uint32_t>& tier1FuncIndex,
+ FuncOffsets* offsets) {
+ AutoCreatedBy acb(masm, "wasm::GenerateFunctionPrologue");
+
+ // We are going to generate this code layout:
+ // ---------------------------------------------
+ // checked call entry: callable prologue
+ // check signature
+ // jump functionBody ──┐
+ // unchecked call entry: callable prologue │
+ // functionBody <─────┘
+ // -----------------------------------------------
+ // checked call entry - used for call_indirect when we have to check the
+ // signature.
+ //
+ // unchecked call entry - used for regular direct same-instance calls.
+
+ // The checked call entry is a call target, so must have CodeAlignment.
+ // Its offset is normally zero.
+ static_assert(WasmCheckedCallEntryOffset % CodeAlignment == 0,
+ "code aligned");
+
+ // Flush pending pools so they do not get dumped between the 'begin' and
+ // 'uncheckedCallEntry' offsets since the difference must be less than
+ // UINT8_MAX to be stored in CodeRange::funcbeginToUncheckedCallEntry_.
+ // (Pending pools can be large.)
+ masm.flushBuffer();
+ masm.haltingAlign(CodeAlignment);
+
+ Label functionBody;
+
+ offsets->begin = masm.currentOffset();
+
+ // Only first-class functions (those that can be referenced in a table) need
+ // the checked call prologue w/ signature check. It is impossible to perform
+ // a checked call otherwise.
+ //
+ // asm.js function tables are homogeneous and don't need a signature check.
+ // However, they can be put in tables which expect a checked call entry point,
+ // so we generate a no-op entry point for consistency. If asm.js performance
+ // was important we could refine this in the future.
+ if (callIndirectId.kind() != CallIndirectIdKind::None) {
+ // Generate checked call entry. The BytecodeOffset of the trap is fixed up
+ // to be the bytecode offset of the callsite by
+ // JitActivation::startWasmTrap.
+ MOZ_ASSERT_IF(!masm.oom(), masm.currentOffset() - offsets->begin ==
+ WasmCheckedCallEntryOffset);
+ uint32_t dummy;
+ GenerateCallablePrologue(masm, &dummy);
+
+ switch (callIndirectId.kind()) {
+ case CallIndirectIdKind::Global: {
+ Register scratch = WasmTableCallScratchReg0;
+ masm.loadPtr(
+ Address(InstanceReg, Instance::offsetInData(
+ callIndirectId.instanceDataOffset())),
+ scratch);
+ masm.branchPtr(Assembler::Condition::Equal, WasmTableCallSigReg,
+ scratch, &functionBody);
+ masm.wasmTrap(Trap::IndirectCallBadSig, BytecodeOffset(0));
+ break;
+ }
+ case CallIndirectIdKind::Immediate: {
+ masm.branch32(Assembler::Condition::Equal, WasmTableCallSigReg,
+ Imm32(callIndirectId.immediate()), &functionBody);
+ masm.wasmTrap(Trap::IndirectCallBadSig, BytecodeOffset(0));
+ break;
+ }
+ case CallIndirectIdKind::AsmJS:
+ masm.jump(&functionBody);
+ break;
+ case CallIndirectIdKind::None:
+ break;
+ }
+
+ // The preceding code may have generated a small constant pool to support
+ // the comparison in the signature check. But if we flush the pool here we
+ // will also force the creation of an unused branch veneer in the pool for
+ // the jump to functionBody from the signature check on some platforms, thus
+ // needlessly inflating the size of the prologue.
+ //
+ // On no supported platform that uses a pool (arm, arm64) is there any risk
+ // at present of that branch or other elements in the pool going out of
+ // range while we're generating the following padding and prologue,
+ // therefore no pool elements will be emitted in the prologue, therefore it
+ // is safe not to flush here.
+ //
+ // We assert that this holds at runtime by comparing the expected entry
+ // offset to the recorded ditto; if they are not the same then
+ // GenerateCallablePrologue flushed a pool before the prologue code,
+ // contrary to assumption.
+
+ masm.nopAlign(CodeAlignment);
+ }
+
+ // Generate unchecked call entry:
+ DebugOnly<uint32_t> expectedEntry = masm.currentOffset();
+ GenerateCallablePrologue(masm, &offsets->uncheckedCallEntry);
+ MOZ_ASSERT(expectedEntry == offsets->uncheckedCallEntry);
+ masm.bind(&functionBody);
+#ifdef JS_CODEGEN_ARM64
+ // GenerateCallablePrologue creates a prologue which operates on the raw
+ // stack pointer and does not keep the PSP in sync. So we have to resync it
+ // here. But we can't use initPseudoStackPtr here because masm may not be
+ // set up to use it, depending on which compiler is in use. Hence do it
+ // "manually".
+ masm.Mov(PseudoStackPointer64, vixl::sp);
+#endif
+
+ // See comment block in WasmCompile.cpp for an explanation tiering.
+ if (tier1FuncIndex) {
+ Register scratch = ABINonArgReg0;
+ masm.loadPtr(Address(InstanceReg, Instance::offsetOfJumpTable()), scratch);
+ masm.jump(Address(scratch, *tier1FuncIndex * sizeof(uintptr_t)));
+ }
+
+ offsets->tierEntry = masm.currentOffset();
+
+ MOZ_ASSERT(masm.framePushed() == 0);
+}
+
+void wasm::GenerateFunctionEpilogue(MacroAssembler& masm, unsigned framePushed,
+ FuncOffsets* offsets) {
+ // Inverse of GenerateFunctionPrologue:
+ MOZ_ASSERT(masm.framePushed() == framePushed);
+ GenerateCallableEpilogue(masm, framePushed, ExitReason::None(),
+ &offsets->ret);
+ MOZ_ASSERT(masm.framePushed() == 0);
+}
+
+void wasm::GenerateExitPrologue(MacroAssembler& masm, unsigned framePushed,
+ ExitReason reason, CallableOffsets* offsets) {
+ masm.haltingAlign(CodeAlignment);
+
+ GenerateCallablePrologue(masm, &offsets->begin);
+
+ // This frame will be exiting compiled code to C++ so record the fp and
+ // reason in the JitActivation so the frame iterators can unwind.
+ SetExitFP(masm, reason, ABINonArgReturnVolatileReg);
+
+ MOZ_ASSERT(masm.framePushed() == 0);
+ masm.reserveStack(framePushed);
+}
+
+void wasm::GenerateExitEpilogue(MacroAssembler& masm, unsigned framePushed,
+ ExitReason reason, CallableOffsets* offsets) {
+ // Inverse of GenerateExitPrologue:
+ MOZ_ASSERT(masm.framePushed() == framePushed);
+ GenerateCallableEpilogue(masm, framePushed, reason, &offsets->ret);
+ MOZ_ASSERT(masm.framePushed() == 0);
+}
+
+static void AssertNoWasmExitFPInJitExit(MacroAssembler& masm) {
+ // As a general stack invariant, if Activation::packedExitFP is tagged as
+ // wasm, it must point to a valid wasm::Frame. The JIT exit stub calls into
+ // JIT code and thus does not really exit, thus, when entering/leaving the
+ // JIT exit stub from/to normal wasm code, packedExitFP is not tagged wasm.
+#ifdef DEBUG
+ Register scratch = ABINonArgReturnReg0;
+ LoadActivation(masm, scratch);
+
+ Label ok;
+ masm.branchTestPtr(Assembler::Zero,
+ Address(scratch, JitActivation::offsetOfPackedExitFP()),
+ Imm32(ExitFPTag), &ok);
+ masm.breakpoint();
+ masm.bind(&ok);
+#endif
+}
+
+void wasm::GenerateJitExitPrologue(MacroAssembler& masm, unsigned framePushed,
+ CallableOffsets* offsets) {
+ masm.haltingAlign(CodeAlignment);
+
+ GenerateCallablePrologue(masm, &offsets->begin);
+ AssertNoWasmExitFPInJitExit(masm);
+
+ MOZ_ASSERT(masm.framePushed() == 0);
+ masm.reserveStack(framePushed);
+}
+
+void wasm::GenerateJitExitEpilogue(MacroAssembler& masm, unsigned framePushed,
+ CallableOffsets* offsets) {
+ // Inverse of GenerateJitExitPrologue:
+ MOZ_ASSERT(masm.framePushed() == framePushed);
+ AssertNoWasmExitFPInJitExit(masm);
+ GenerateCallableEpilogue(masm, framePushed, ExitReason::None(),
+ &offsets->ret);
+ MOZ_ASSERT(masm.framePushed() == 0);
+}
+
+void wasm::GenerateJitEntryPrologue(MacroAssembler& masm,
+ CallableOffsets* offsets) {
+ masm.haltingAlign(CodeAlignment);
+
+ {
+ // Push the return address.
+#if defined(JS_CODEGEN_ARM)
+ AutoForbidPoolsAndNops afp(&masm,
+ /* number of instructions in scope = */ 3);
+ offsets->begin = masm.currentOffset();
+ static_assert(BeforePushRetAddr == 0);
+ masm.push(lr);
+#elif defined(JS_CODEGEN_MIPS64)
+ offsets->begin = masm.currentOffset();
+ masm.push(ra);
+#elif defined(JS_CODEGEN_LOONG64)
+ offsets->begin = masm.currentOffset();
+ masm.push(ra);
+#elif defined(JS_CODEGEN_RISCV64)
+ BlockTrampolinePoolScope block_trampoline_pool(&masm, 10);
+ offsets->begin = masm.currentOffset();
+ masm.push(ra);
+#elif defined(JS_CODEGEN_ARM64)
+ AutoForbidPoolsAndNops afp(&masm,
+ /* number of instructions in scope = */ 4);
+ offsets->begin = masm.currentOffset();
+ static_assert(BeforePushRetAddr == 0);
+ // Subtract from SP first as SP must be aligned before offsetting.
+ masm.Sub(sp, sp, 16);
+ static_assert(JitFrameLayout::offsetOfReturnAddress() == 8);
+ masm.Str(ARMRegister(lr, 64), MemOperand(sp, 8));
+#else
+ // The x86/x64 call instruction pushes the return address.
+ offsets->begin = masm.currentOffset();
+#endif
+ MOZ_ASSERT_IF(!masm.oom(),
+ PushedRetAddr == masm.currentOffset() - offsets->begin);
+ // Save jit frame pointer, so unwinding from wasm to jit frames is trivial.
+#if defined(JS_CODEGEN_ARM64)
+ static_assert(JitFrameLayout::offsetOfCallerFramePtr() == 0);
+ masm.Str(ARMRegister(FramePointer, 64), MemOperand(sp, 0));
+#else
+ masm.Push(FramePointer);
+#endif
+ MOZ_ASSERT_IF(!masm.oom(),
+ PushedFP == masm.currentOffset() - offsets->begin);
+
+ masm.moveStackPtrTo(FramePointer);
+ MOZ_ASSERT_IF(!masm.oom(), SetFP == masm.currentOffset() - offsets->begin);
+ }
+
+ masm.setFramePushed(0);
+}
+
+void wasm::GenerateJitEntryEpilogue(MacroAssembler& masm,
+ CallableOffsets* offsets) {
+ DebugOnly<uint32_t> poppedFP{};
+#ifdef JS_CODEGEN_ARM64
+ RegisterOrSP sp = masm.getStackPointer();
+ AutoForbidPoolsAndNops afp(&masm,
+ /* number of instructions in scope = */ 5);
+ masm.loadPtr(Address(sp, 8), lr);
+ masm.loadPtr(Address(sp, 0), FramePointer);
+ poppedFP = masm.currentOffset();
+
+ masm.addToStackPtr(Imm32(2 * sizeof(void*)));
+ // Copy SP into PSP to enforce return-point invariants (SP == PSP).
+ // `addToStackPtr` won't sync them because SP is the active pointer here.
+ // For the same reason, we can't use initPseudoStackPtr to do the sync, so
+ // we have to do it "by hand". Omitting this causes many tests to segfault.
+ masm.moveStackPtrTo(PseudoStackPointer);
+
+ offsets->ret = masm.currentOffset();
+ masm.Ret(ARMRegister(lr, 64));
+ masm.setFramePushed(0);
+#else
+ // Forbid pools for the same reason as described in GenerateCallablePrologue.
+# if defined(JS_CODEGEN_ARM)
+ AutoForbidPoolsAndNops afp(&masm, /* number of instructions in scope = */ 2);
+# endif
+
+ masm.pop(FramePointer);
+ poppedFP = masm.currentOffset();
+
+ offsets->ret = masm.currentOffset();
+ masm.ret();
+#endif
+ MOZ_ASSERT_IF(!masm.oom(), PoppedFPJitEntry == offsets->ret - poppedFP);
+}
+
+/*****************************************************************************/
+// ProfilingFrameIterator
+
+ProfilingFrameIterator::ProfilingFrameIterator()
+ : code_(nullptr),
+ codeRange_(nullptr),
+ callerFP_(nullptr),
+ callerPC_(nullptr),
+ stackAddress_(nullptr),
+ unwoundJitCallerFP_(nullptr),
+ exitReason_(ExitReason::Fixed::None) {
+ MOZ_ASSERT(done());
+}
+
+ProfilingFrameIterator::ProfilingFrameIterator(const JitActivation& activation)
+ : code_(nullptr),
+ codeRange_(nullptr),
+ callerFP_(nullptr),
+ callerPC_(nullptr),
+ stackAddress_(nullptr),
+ unwoundJitCallerFP_(nullptr),
+ exitReason_(activation.wasmExitReason()) {
+ initFromExitFP(activation.wasmExitFP());
+}
+
+ProfilingFrameIterator::ProfilingFrameIterator(const Frame* fp)
+ : code_(nullptr),
+ codeRange_(nullptr),
+ callerFP_(nullptr),
+ callerPC_(nullptr),
+ stackAddress_(nullptr),
+ unwoundJitCallerFP_(nullptr),
+ exitReason_(ExitReason::Fixed::ImportJit) {
+ MOZ_ASSERT(fp);
+ initFromExitFP(fp);
+}
+
+static inline void AssertMatchesCallSite(void* callerPC, uint8_t* callerFP) {
+#ifdef DEBUG
+ const CodeRange* callerCodeRange;
+ const Code* code = LookupCode(callerPC, &callerCodeRange);
+
+ if (!code) {
+ AssertDirectJitCall(callerFP);
+ return;
+ }
+
+ MOZ_ASSERT(callerCodeRange);
+
+ if (callerCodeRange->isInterpEntry()) {
+ // callerFP is the value of the frame pointer register when we were called
+ // from C++.
+ return;
+ }
+
+ if (callerCodeRange->isJitEntry()) {
+ MOZ_ASSERT(callerFP != nullptr);
+ return;
+ }
+
+ const CallSite* callsite = code->lookupCallSite(callerPC);
+ MOZ_ASSERT(callsite);
+#endif
+}
+
+void ProfilingFrameIterator::initFromExitFP(const Frame* fp) {
+ MOZ_ASSERT(fp);
+ stackAddress_ = (void*)fp;
+ endStackAddress_ = stackAddress_;
+ code_ = LookupCode(fp->returnAddress(), &codeRange_);
+
+ if (!code_) {
+ // This is a direct call from the JIT, the caller FP is pointing to the JIT
+ // caller's frame.
+ AssertDirectJitCall(fp->jitEntryCaller());
+
+ unwoundJitCallerFP_ = fp->jitEntryCaller();
+ MOZ_ASSERT(done());
+ return;
+ }
+
+ MOZ_ASSERT(codeRange_);
+
+ // Since we don't have the pc for fp, start unwinding at the caller of fp.
+ // This means that the innermost frame is skipped. This is fine because:
+ // - for import exit calls, the innermost frame is a thunk, so the first
+ // frame that shows up is the function calling the import;
+ // - for Math and other builtin calls, we note the absence of an exit
+ // reason and inject a fake "builtin" frame; and
+ switch (codeRange_->kind()) {
+ case CodeRange::InterpEntry:
+ callerPC_ = nullptr;
+ callerFP_ = nullptr;
+ break;
+ case CodeRange::JitEntry:
+ callerPC_ = nullptr;
+ callerFP_ = fp->rawCaller();
+ break;
+ case CodeRange::Function:
+ fp = fp->wasmCaller();
+ callerPC_ = fp->returnAddress();
+ callerFP_ = fp->rawCaller();
+ AssertMatchesCallSite(callerPC_, callerFP_);
+ break;
+ case CodeRange::ImportJitExit:
+ case CodeRange::ImportInterpExit:
+ case CodeRange::BuiltinThunk:
+ case CodeRange::TrapExit:
+ case CodeRange::DebugTrap:
+ case CodeRange::Throw:
+ case CodeRange::FarJumpIsland:
+ MOZ_CRASH("Unexpected CodeRange kind");
+ }
+
+ MOZ_ASSERT(!done());
+}
+
+static bool isSignatureCheckFail(uint32_t offsetInCode,
+ const CodeRange* codeRange) {
+ if (!codeRange->isFunction()) {
+ return false;
+ }
+ // checked call entry: 1. push Frame
+ // 2. set FP
+ // 3. signature check <--- check if we are here.
+ // 4. jump 7
+ // unchecked call entry: 5. push Frame
+ // 6. set FP
+ // 7. function's code
+ return offsetInCode < codeRange->funcUncheckedCallEntry() &&
+ (offsetInCode - codeRange->funcCheckedCallEntry()) > SetFP;
+}
+
+const Instance* js::wasm::GetNearestEffectiveInstance(const Frame* fp) {
+ while (true) {
+ uint8_t* returnAddress = fp->returnAddress();
+ const CodeRange* codeRange = nullptr;
+ const Code* code = LookupCode(returnAddress, &codeRange);
+
+ if (!code) {
+ // It is a direct call from JIT.
+ AssertDirectJitCall(fp->jitEntryCaller());
+ return ExtractCalleeInstanceFromFrameWithInstances(fp);
+ }
+
+ MOZ_ASSERT(codeRange);
+
+ if (codeRange->isEntry()) {
+ return ExtractCalleeInstanceFromFrameWithInstances(fp);
+ }
+
+ MOZ_ASSERT(codeRange->kind() == CodeRange::Function);
+ MOZ_ASSERT(code);
+ const CallSite* callsite = code->lookupCallSite(returnAddress);
+ if (callsite->mightBeCrossInstance()) {
+ return ExtractCalleeInstanceFromFrameWithInstances(fp);
+ }
+
+ fp = fp->wasmCaller();
+ }
+}
+
+Instance* js::wasm::GetNearestEffectiveInstance(Frame* fp) {
+ return const_cast<Instance*>(
+ GetNearestEffectiveInstance(const_cast<const Frame*>(fp)));
+}
+
+bool js::wasm::StartUnwinding(const RegisterState& registers,
+ UnwindState* unwindState, bool* unwoundCaller) {
+ // Shorthands.
+ uint8_t* const pc = (uint8_t*)registers.pc;
+ void** const sp = (void**)registers.sp;
+
+ // The frame pointer might be:
+ // - in the process of tagging/untagging when calling into C++ code (this
+ // happens in wasm::SetExitFP); make sure it's untagged.
+ // - unreliable if it's not been set yet, in prologues.
+ uint8_t* fp = Frame::isExitFP(registers.fp)
+ ? Frame::untagExitFP(registers.fp)
+ : reinterpret_cast<uint8_t*>(registers.fp);
+
+ // Get the CodeRange describing pc and the base address to which the
+ // CodeRange is relative. If the pc is not in a wasm module or a builtin
+ // thunk, then execution must be entering from or leaving to the C++ caller
+ // that pushed the JitActivation.
+ const CodeRange* codeRange;
+ uint8_t* codeBase;
+ const Code* code = nullptr;
+
+ const CodeSegment* codeSegment = LookupCodeSegment(pc, &codeRange);
+ if (codeSegment) {
+ code = &codeSegment->code();
+ codeBase = codeSegment->base();
+ MOZ_ASSERT(codeRange);
+ } else if (!LookupBuiltinThunk(pc, &codeRange, &codeBase)) {
+ return false;
+ }
+
+ // When the pc is inside the prologue/epilogue, the innermost call's Frame
+ // is not complete and thus fp points to the second-to-innermost call's
+ // Frame. Since fp can only tell you about its caller, naively unwinding
+ // while pc is in the prologue/epilogue would skip the second-to-innermost
+ // call. To avoid this problem, we use the static structure of the code in
+ // the prologue and epilogue to do the Right Thing.
+ uint32_t offsetInCode = pc - codeBase;
+ MOZ_ASSERT(offsetInCode >= codeRange->begin());
+ MOZ_ASSERT(offsetInCode < codeRange->end());
+
+ // Compute the offset of the pc from the (unchecked call) entry of the code
+ // range. The checked call entry and the unchecked call entry have common
+ // prefix, so pc before signature check in the checked call entry is
+ // equivalent to the pc of the unchecked-call-entry. Thus, we can simplify the
+ // below case analysis by redirecting all pc-in-checked-call-entry before
+ // signature check cases to the pc-at-unchecked-call-entry case.
+ uint32_t offsetFromEntry;
+ if (codeRange->isFunction()) {
+ if (offsetInCode < codeRange->funcUncheckedCallEntry()) {
+ offsetFromEntry = offsetInCode - codeRange->funcCheckedCallEntry();
+ } else {
+ offsetFromEntry = offsetInCode - codeRange->funcUncheckedCallEntry();
+ }
+ } else {
+ offsetFromEntry = offsetInCode - codeRange->begin();
+ }
+
+ // Most cases end up unwinding to the caller state; not unwinding is the
+ // exception here.
+ *unwoundCaller = true;
+
+ uint8_t* fixedFP = nullptr;
+ void* fixedPC = nullptr;
+ switch (codeRange->kind()) {
+ case CodeRange::Function:
+ case CodeRange::FarJumpIsland:
+ case CodeRange::ImportJitExit:
+ case CodeRange::ImportInterpExit:
+ case CodeRange::BuiltinThunk:
+ case CodeRange::DebugTrap:
+#if defined(JS_CODEGEN_MIPS64)
+ if (codeRange->isThunk()) {
+ // The FarJumpIsland sequence temporary scrambles ra.
+ // Don't unwind to caller.
+ fixedPC = pc;
+ fixedFP = fp;
+ *unwoundCaller = false;
+ AssertMatchesCallSite(
+ Frame::fromUntaggedWasmExitFP(fp)->returnAddress(),
+ Frame::fromUntaggedWasmExitFP(fp)->rawCaller());
+ } else if (offsetFromEntry < PushedFP) {
+ // On MIPS we rely on register state instead of state saved on
+ // stack until the wasm::Frame is completely built.
+ // On entry the return address is in ra (registers.lr) and
+ // fp holds the caller's fp.
+ fixedPC = (uint8_t*)registers.lr;
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+ } else
+#elif defined(JS_CODEGEN_LOONG64)
+ if (codeRange->isThunk()) {
+ // The FarJumpIsland sequence temporary scrambles ra.
+ // Don't unwind to caller.
+ fixedPC = pc;
+ fixedFP = fp;
+ *unwoundCaller = false;
+ AssertMatchesCallSite(
+ Frame::fromUntaggedWasmExitFP(fp)->returnAddress(),
+ Frame::fromUntaggedWasmExitFP(fp)->rawCaller());
+ } else if (offsetFromEntry < PushedFP) {
+ // On LoongArch we rely on register state instead of state saved on
+ // stack until the wasm::Frame is completely built.
+ // On entry the return address is in ra (registers.lr) and
+ // fp holds the caller's fp.
+ fixedPC = (uint8_t*)registers.lr;
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+ } else
+#elif defined(JS_CODEGEN_RISCV64)
+ if (codeRange->isThunk()) {
+ // The FarJumpIsland sequence temporary scrambles ra.
+ // Don't unwind to caller.
+ fixedPC = pc;
+ fixedFP = fp;
+ *unwoundCaller = false;
+ AssertMatchesCallSite(
+ Frame::fromUntaggedWasmExitFP(fp)->returnAddress(),
+ Frame::fromUntaggedWasmExitFP(fp)->rawCaller());
+ } else if (offsetFromEntry < PushedFP) {
+ // On Riscv64 we rely on register state instead of state saved on
+ // stack until the wasm::Frame is completely built.
+ // On entry the return address is in ra (registers.lr) and
+ // fp holds the caller's fp.
+ fixedPC = (uint8_t*)registers.lr;
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+ } else
+#elif defined(JS_CODEGEN_ARM64)
+ if (offsetFromEntry < PushedFP || codeRange->isThunk()) {
+ // Constraints above ensure that this covers BeforePushRetAddr and
+ // PushedRetAddr.
+ //
+ // On ARM64 we subtract the size of the Frame from SP and then store
+ // values into the stack. Execution can be interrupted at various
+ // places in that sequence. We rely on the register state for our
+ // values.
+ fixedPC = (uint8_t*)registers.lr;
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+ } else
+#elif defined(JS_CODEGEN_ARM)
+ if (offsetFromEntry == BeforePushRetAddr || codeRange->isThunk()) {
+ // The return address is still in lr and fp holds the caller's fp.
+ fixedPC = (uint8_t*)registers.lr;
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+ } else
+#endif
+ if (offsetFromEntry == PushedRetAddr || codeRange->isThunk()) {
+ // The return address has been pushed on the stack but fp still
+ // points to the caller's fp.
+ fixedPC = sp[0];
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+ } else if (offsetFromEntry == PushedFP) {
+ // The full Frame has been pushed; fp is still the caller's fp.
+ const auto* frame = Frame::fromUntaggedWasmExitFP(sp);
+ MOZ_ASSERT(frame->rawCaller() == fp);
+ fixedPC = frame->returnAddress();
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+#if defined(JS_CODEGEN_MIPS64)
+ } else if (offsetInCode >= codeRange->ret() - PoppedFP &&
+ offsetInCode <= codeRange->ret()) {
+ // The fixedFP field of the Frame has been loaded into fp.
+ // The ra and instance might also be loaded, but the Frame structure is
+ // still on stack, so we can acess the ra form there.
+ MOZ_ASSERT(*sp == fp);
+ fixedPC = Frame::fromUntaggedWasmExitFP(sp)->returnAddress();
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+#elif defined(JS_CODEGEN_LOONG64)
+ } else if (offsetInCode >= codeRange->ret() - PoppedFP &&
+ offsetInCode <= codeRange->ret()) {
+ // The fixedFP field of the Frame has been loaded into fp.
+ // The ra might also be loaded, but the Frame structure is still on
+ // stack, so we can acess the ra from there.
+ MOZ_ASSERT(*sp == fp);
+ fixedPC = Frame::fromUntaggedWasmExitFP(sp)->returnAddress();
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+#elif defined(JS_CODEGEN_RISCV64)
+ } else if (offsetInCode >= codeRange->ret() - PoppedFP &&
+ offsetInCode <= codeRange->ret()) {
+ // The fixedFP field of the Frame has been loaded into fp.
+ // The ra might also be loaded, but the Frame structure is still on
+ // stack, so we can acess the ra from there.
+ MOZ_ASSERT(*sp == fp);
+ fixedPC = Frame::fromUntaggedWasmExitFP(sp)->returnAddress();
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+#elif defined(JS_CODEGEN_ARM64)
+ // The stack pointer does not move until all values have
+ // been restored so several cases can be coalesced here.
+ } else if (offsetInCode >= codeRange->ret() - PoppedFP &&
+ offsetInCode <= codeRange->ret()) {
+ fixedPC = Frame::fromUntaggedWasmExitFP(sp)->returnAddress();
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+#else
+ } else if (offsetInCode >= codeRange->ret() - PoppedFP &&
+ offsetInCode < codeRange->ret()) {
+ // The fixedFP field of the Frame has been popped into fp.
+ fixedPC = sp[1];
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+ } else if (offsetInCode == codeRange->ret()) {
+ // Both the instance and fixedFP fields have been popped and fp now
+ // points to the caller's frame.
+ fixedPC = sp[0];
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+#endif
+ } else {
+ if (isSignatureCheckFail(offsetInCode, codeRange)) {
+ // Frame has been pushed and FP has been set.
+ const auto* frame = Frame::fromUntaggedWasmExitFP(fp);
+ fixedFP = frame->rawCaller();
+ fixedPC = frame->returnAddress();
+ AssertMatchesCallSite(fixedPC, fixedFP);
+ break;
+ }
+
+ // Not in the prologue/epilogue.
+ fixedPC = pc;
+ fixedFP = fp;
+ *unwoundCaller = false;
+ AssertMatchesCallSite(
+ Frame::fromUntaggedWasmExitFP(fp)->returnAddress(),
+ Frame::fromUntaggedWasmExitFP(fp)->rawCaller());
+ break;
+ }
+ break;
+ case CodeRange::TrapExit:
+ // These code stubs execute after the prologue/epilogue have completed
+ // so pc/fp contains the right values here.
+ fixedPC = pc;
+ fixedFP = fp;
+ *unwoundCaller = false;
+ AssertMatchesCallSite(Frame::fromUntaggedWasmExitFP(fp)->returnAddress(),
+ Frame::fromUntaggedWasmExitFP(fp)->rawCaller());
+ break;
+ case CodeRange::InterpEntry:
+ // The entry trampoline is the final frame in an wasm JitActivation. The
+ // entry trampoline also doesn't GeneratePrologue/Epilogue so we can't
+ // use the general unwinding logic above.
+ break;
+ case CodeRange::JitEntry:
+ // There's a jit frame above the current one; we don't care about pc
+ // since the Jit entry frame is a jit frame which can be considered as
+ // an exit frame.
+ if (offsetFromEntry < PushedFP) {
+ // We haven't pushed the jit caller's frame pointer yet, thus the jit
+ // frame is incomplete. During profiling frame iteration, it means that
+ // the jit profiling frame iterator won't be able to unwind this frame;
+ // drop it.
+ return false;
+ }
+ if (offsetInCode >= codeRange->ret() - PoppedFPJitEntry &&
+ offsetInCode <= codeRange->ret()) {
+ // We've popped FP but still have to return. Similar to the
+ // |offsetFromEntry < PushedFP| case above, the JIT frame is now
+ // incomplete and we can't unwind.
+ return false;
+ }
+ // Set fixedFP to the address of the JitFrameLayout on the stack.
+ if (offsetFromEntry < SetFP) {
+ fixedFP = reinterpret_cast<uint8_t*>(sp);
+ } else {
+ fixedFP = fp;
+ }
+ fixedPC = nullptr;
+ break;
+ case CodeRange::Throw:
+ // The throw stub executes a small number of instructions before popping
+ // the entire activation. To simplify testing, we simply pretend throw
+ // stubs have already popped the entire stack.
+ return false;
+ }
+
+ unwindState->code = code;
+ unwindState->codeRange = codeRange;
+ unwindState->fp = fixedFP;
+ unwindState->pc = fixedPC;
+ return true;
+}
+
+ProfilingFrameIterator::ProfilingFrameIterator(const JitActivation& activation,
+ const RegisterState& state)
+ : code_(nullptr),
+ codeRange_(nullptr),
+ callerFP_(nullptr),
+ callerPC_(nullptr),
+ stackAddress_(nullptr),
+ unwoundJitCallerFP_(nullptr),
+ exitReason_(ExitReason::Fixed::None) {
+ // Let wasmExitFP take precedence to StartUnwinding when it is set since
+ // during the body of an exit stub, the register state may not be valid
+ // causing StartUnwinding() to abandon unwinding this activation.
+ if (activation.hasWasmExitFP()) {
+ exitReason_ = activation.wasmExitReason();
+ initFromExitFP(activation.wasmExitFP());
+ return;
+ }
+
+ bool unwoundCaller;
+ UnwindState unwindState;
+ if (!StartUnwinding(state, &unwindState, &unwoundCaller)) {
+ MOZ_ASSERT(done());
+ return;
+ }
+
+ MOZ_ASSERT(unwindState.codeRange);
+
+ if (unwoundCaller) {
+ callerFP_ = unwindState.fp;
+ callerPC_ = unwindState.pc;
+ } else {
+ callerFP_ = Frame::fromUntaggedWasmExitFP(unwindState.fp)->rawCaller();
+ callerPC_ = Frame::fromUntaggedWasmExitFP(unwindState.fp)->returnAddress();
+ }
+
+ code_ = unwindState.code;
+ codeRange_ = unwindState.codeRange;
+ stackAddress_ = state.sp;
+ endStackAddress_ = state.sp;
+ MOZ_ASSERT(!done());
+}
+
+void ProfilingFrameIterator::operator++() {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(!unwoundJitCallerFP_);
+
+ if (!exitReason_.isNone()) {
+ exitReason_ = ExitReason::None();
+ MOZ_ASSERT(codeRange_);
+ MOZ_ASSERT(!done());
+ return;
+ }
+
+ if (codeRange_->isInterpEntry()) {
+ codeRange_ = nullptr;
+ MOZ_ASSERT(done());
+ return;
+ }
+
+ if (codeRange_->isJitEntry()) {
+ MOZ_ASSERT(callerFP_);
+ unwoundJitCallerFP_ = callerFP_;
+ callerPC_ = nullptr;
+ callerFP_ = nullptr;
+ codeRange_ = nullptr;
+ MOZ_ASSERT(done());
+ return;
+ }
+
+ MOZ_RELEASE_ASSERT(callerPC_);
+
+ code_ = LookupCode(callerPC_, &codeRange_);
+
+ if (!code_) {
+ // The parent frame is an inlined wasm call, callerFP_ points to the fake
+ // exit frame.
+ MOZ_ASSERT(!codeRange_);
+ AssertDirectJitCall(callerFP_);
+ unwoundJitCallerFP_ = callerFP_;
+ MOZ_ASSERT(done());
+ return;
+ }
+
+ MOZ_ASSERT(codeRange_);
+
+ if (codeRange_->isInterpEntry()) {
+ callerPC_ = nullptr;
+ callerFP_ = nullptr;
+ MOZ_ASSERT(!done());
+ return;
+ }
+
+ if (codeRange_->isJitEntry()) {
+ MOZ_ASSERT(!done());
+ return;
+ }
+
+ MOZ_ASSERT(code_ == &GetNearestEffectiveInstance(
+ Frame::fromUntaggedWasmExitFP(callerFP_))
+ ->code());
+
+ switch (codeRange_->kind()) {
+ case CodeRange::Function:
+ case CodeRange::ImportJitExit:
+ case CodeRange::ImportInterpExit:
+ case CodeRange::BuiltinThunk:
+ case CodeRange::TrapExit:
+ case CodeRange::DebugTrap:
+ case CodeRange::FarJumpIsland: {
+ stackAddress_ = callerFP_;
+ const auto* frame = Frame::fromUntaggedWasmExitFP(callerFP_);
+ callerPC_ = frame->returnAddress();
+ AssertMatchesCallSite(callerPC_, frame->rawCaller());
+ callerFP_ = frame->rawCaller();
+ break;
+ }
+ case CodeRange::InterpEntry:
+ case CodeRange::JitEntry:
+ MOZ_CRASH("should have been guarded above");
+ case CodeRange::Throw:
+ MOZ_CRASH("code range doesn't have frame");
+ }
+
+ MOZ_ASSERT(!done());
+}
+
+static const char* ThunkedNativeToDescription(SymbolicAddress func) {
+ MOZ_ASSERT(NeedsBuiltinThunk(func));
+ switch (func) {
+ case SymbolicAddress::HandleDebugTrap:
+ case SymbolicAddress::HandleThrow:
+ case SymbolicAddress::HandleTrap:
+ case SymbolicAddress::CallImport_General:
+ case SymbolicAddress::CoerceInPlace_ToInt32:
+ case SymbolicAddress::CoerceInPlace_ToNumber:
+ case SymbolicAddress::CoerceInPlace_ToBigInt:
+ case SymbolicAddress::BoxValue_Anyref:
+ MOZ_ASSERT(!NeedsBuiltinThunk(func),
+ "not in sync with NeedsBuiltinThunk");
+ break;
+ case SymbolicAddress::ToInt32:
+ return "call to asm.js native ToInt32 coercion (in wasm)";
+ case SymbolicAddress::DivI64:
+ return "call to native i64.div_s (in wasm)";
+ case SymbolicAddress::UDivI64:
+ return "call to native i64.div_u (in wasm)";
+ case SymbolicAddress::ModI64:
+ return "call to native i64.rem_s (in wasm)";
+ case SymbolicAddress::UModI64:
+ return "call to native i64.rem_u (in wasm)";
+ case SymbolicAddress::TruncateDoubleToUint64:
+ return "call to native i64.trunc_u/f64 (in wasm)";
+ case SymbolicAddress::TruncateDoubleToInt64:
+ return "call to native i64.trunc_s/f64 (in wasm)";
+ case SymbolicAddress::SaturatingTruncateDoubleToUint64:
+ return "call to native i64.trunc_u:sat/f64 (in wasm)";
+ case SymbolicAddress::SaturatingTruncateDoubleToInt64:
+ return "call to native i64.trunc_s:sat/f64 (in wasm)";
+ case SymbolicAddress::Uint64ToDouble:
+ return "call to native f64.convert_u/i64 (in wasm)";
+ case SymbolicAddress::Uint64ToFloat32:
+ return "call to native f32.convert_u/i64 (in wasm)";
+ case SymbolicAddress::Int64ToDouble:
+ return "call to native f64.convert_s/i64 (in wasm)";
+ case SymbolicAddress::Int64ToFloat32:
+ return "call to native f32.convert_s/i64 (in wasm)";
+#if defined(JS_CODEGEN_ARM)
+ case SymbolicAddress::aeabi_idivmod:
+ return "call to native i32.div_s (in wasm)";
+ case SymbolicAddress::aeabi_uidivmod:
+ return "call to native i32.div_u (in wasm)";
+#endif
+ case SymbolicAddress::AllocateBigInt:
+ return "call to native newCell<BigInt, NoGC> (in wasm)";
+ case SymbolicAddress::ModD:
+ return "call to asm.js native f64 % (mod)";
+ case SymbolicAddress::SinNativeD:
+ return "call to asm.js native f64 Math.sin";
+ case SymbolicAddress::SinFdlibmD:
+ return "call to asm.js fdlibm f64 Math.sin";
+ case SymbolicAddress::CosNativeD:
+ return "call to asm.js native f64 Math.cos";
+ case SymbolicAddress::CosFdlibmD:
+ return "call to asm.js fdlibm f64 Math.cos";
+ case SymbolicAddress::TanNativeD:
+ return "call to asm.js native f64 Math.tan";
+ case SymbolicAddress::TanFdlibmD:
+ return "call to asm.js fdlibm f64 Math.tan";
+ case SymbolicAddress::ASinD:
+ return "call to asm.js native f64 Math.asin";
+ case SymbolicAddress::ACosD:
+ return "call to asm.js native f64 Math.acos";
+ case SymbolicAddress::ATanD:
+ return "call to asm.js native f64 Math.atan";
+ case SymbolicAddress::CeilD:
+ return "call to native f64.ceil (in wasm)";
+ case SymbolicAddress::CeilF:
+ return "call to native f32.ceil (in wasm)";
+ case SymbolicAddress::FloorD:
+ return "call to native f64.floor (in wasm)";
+ case SymbolicAddress::FloorF:
+ return "call to native f32.floor (in wasm)";
+ case SymbolicAddress::TruncD:
+ return "call to native f64.trunc (in wasm)";
+ case SymbolicAddress::TruncF:
+ return "call to native f32.trunc (in wasm)";
+ case SymbolicAddress::NearbyIntD:
+ return "call to native f64.nearest (in wasm)";
+ case SymbolicAddress::NearbyIntF:
+ return "call to native f32.nearest (in wasm)";
+ case SymbolicAddress::ExpD:
+ return "call to asm.js native f64 Math.exp";
+ case SymbolicAddress::LogD:
+ return "call to asm.js native f64 Math.log";
+ case SymbolicAddress::PowD:
+ return "call to asm.js native f64 Math.pow";
+ case SymbolicAddress::ATan2D:
+ return "call to asm.js native f64 Math.atan2";
+ case SymbolicAddress::MemoryGrowM32:
+ return "call to native memory.grow m32 (in wasm)";
+ case SymbolicAddress::MemoryGrowM64:
+ return "call to native memory.grow m64 (in wasm)";
+ case SymbolicAddress::MemorySizeM32:
+ return "call to native memory.size m32 (in wasm)";
+ case SymbolicAddress::MemorySizeM64:
+ return "call to native memory.size m64 (in wasm)";
+ case SymbolicAddress::WaitI32M32:
+ return "call to native i32.wait m32 (in wasm)";
+ case SymbolicAddress::WaitI32M64:
+ return "call to native i32.wait m64 (in wasm)";
+ case SymbolicAddress::WaitI64M32:
+ return "call to native i64.wait m32 (in wasm)";
+ case SymbolicAddress::WaitI64M64:
+ return "call to native i64.wait m64 (in wasm)";
+ case SymbolicAddress::WakeM32:
+ return "call to native wake m32 (in wasm)";
+ case SymbolicAddress::WakeM64:
+ return "call to native wake m64 (in wasm)";
+ case SymbolicAddress::CoerceInPlace_JitEntry:
+ return "out-of-line coercion for jit entry arguments (in wasm)";
+ case SymbolicAddress::ReportV128JSCall:
+ return "jit call to v128 wasm function";
+ case SymbolicAddress::MemCopyM32:
+ case SymbolicAddress::MemCopySharedM32:
+ return "call to native memory.copy m32 function";
+ case SymbolicAddress::MemCopyM64:
+ case SymbolicAddress::MemCopySharedM64:
+ return "call to native memory.copy m64 function";
+ case SymbolicAddress::DataDrop:
+ return "call to native data.drop function";
+ case SymbolicAddress::MemFillM32:
+ case SymbolicAddress::MemFillSharedM32:
+ return "call to native memory.fill m32 function";
+ case SymbolicAddress::MemFillM64:
+ case SymbolicAddress::MemFillSharedM64:
+ return "call to native memory.fill m64 function";
+ case SymbolicAddress::MemInitM32:
+ return "call to native memory.init m32 function";
+ case SymbolicAddress::MemInitM64:
+ return "call to native memory.init m64 function";
+ case SymbolicAddress::TableCopy:
+ return "call to native table.copy function";
+ case SymbolicAddress::TableFill:
+ return "call to native table.fill function";
+ case SymbolicAddress::MemDiscardM32:
+ case SymbolicAddress::MemDiscardSharedM32:
+ return "call to native memory.discard m32 function";
+ case SymbolicAddress::MemDiscardM64:
+ case SymbolicAddress::MemDiscardSharedM64:
+ return "call to native memory.discard m64 function";
+ case SymbolicAddress::ElemDrop:
+ return "call to native elem.drop function";
+ case SymbolicAddress::TableGet:
+ return "call to native table.get function";
+ case SymbolicAddress::TableGrow:
+ return "call to native table.grow function";
+ case SymbolicAddress::TableInit:
+ return "call to native table.init function";
+ case SymbolicAddress::TableSet:
+ return "call to native table.set function";
+ case SymbolicAddress::TableSize:
+ return "call to native table.size function";
+ case SymbolicAddress::RefFunc:
+ return "call to native ref.func function";
+ case SymbolicAddress::PostBarrier:
+ case SymbolicAddress::PostBarrierPrecise:
+ case SymbolicAddress::PostBarrierPreciseWithOffset:
+ return "call to native GC postbarrier (in wasm)";
+ case SymbolicAddress::ExceptionNew:
+ return "call to native exception new (in wasm)";
+ case SymbolicAddress::ThrowException:
+ return "call to native throw exception (in wasm)";
+ case SymbolicAddress::StructNew:
+ case SymbolicAddress::StructNewUninit:
+ return "call to native struct.new (in wasm)";
+ case SymbolicAddress::ArrayNew:
+ case SymbolicAddress::ArrayNewUninit:
+ return "call to native array.new (in wasm)";
+ case SymbolicAddress::ArrayNewData:
+ return "call to native array.new_data function";
+ case SymbolicAddress::ArrayNewElem:
+ return "call to native array.new_elem function";
+ case SymbolicAddress::ArrayCopy:
+ return "call to native array.copy function";
+#define OP(op, export, sa_name, abitype, entry, idx) \
+ case SymbolicAddress::sa_name: \
+ return "call to native " #op " intrinsic (in wasm)";
+ FOR_EACH_INTRINSIC(OP)
+#undef OP
+#ifdef WASM_CODEGEN_DEBUG
+ case SymbolicAddress::PrintI32:
+ case SymbolicAddress::PrintPtr:
+ case SymbolicAddress::PrintF32:
+ case SymbolicAddress::PrintF64:
+ case SymbolicAddress::PrintText:
+#endif
+ case SymbolicAddress::Limit:
+ break;
+ }
+ return "?";
+}
+
+const char* ProfilingFrameIterator::label() const {
+ MOZ_ASSERT(!done());
+
+ // Use the same string for both time inside and under so that the two
+ // entries will be coalesced by the profiler.
+ // Must be kept in sync with /tools/profiler/tests/test_asm.js
+ static const char importJitDescription[] = "fast exit trampoline (in wasm)";
+ static const char importInterpDescription[] =
+ "slow exit trampoline (in wasm)";
+ static const char builtinNativeDescription[] =
+ "fast exit trampoline to native (in wasm)";
+ static const char trapDescription[] = "trap handling (in wasm)";
+ static const char debugTrapDescription[] = "debug trap handling (in wasm)";
+
+ if (!exitReason_.isFixed()) {
+ return ThunkedNativeToDescription(exitReason_.symbolic());
+ }
+
+ switch (exitReason_.fixed()) {
+ case ExitReason::Fixed::None:
+ break;
+ case ExitReason::Fixed::ImportJit:
+ return importJitDescription;
+ case ExitReason::Fixed::ImportInterp:
+ return importInterpDescription;
+ case ExitReason::Fixed::BuiltinNative:
+ return builtinNativeDescription;
+ case ExitReason::Fixed::Trap:
+ return trapDescription;
+ case ExitReason::Fixed::DebugTrap:
+ return debugTrapDescription;
+ }
+
+ switch (codeRange_->kind()) {
+ case CodeRange::Function:
+ return code_->profilingLabel(codeRange_->funcIndex());
+ case CodeRange::InterpEntry:
+ return "slow entry trampoline (in wasm)";
+ case CodeRange::JitEntry:
+ return "fast entry trampoline (in wasm)";
+ case CodeRange::ImportJitExit:
+ return importJitDescription;
+ case CodeRange::BuiltinThunk:
+ return builtinNativeDescription;
+ case CodeRange::ImportInterpExit:
+ return importInterpDescription;
+ case CodeRange::TrapExit:
+ return trapDescription;
+ case CodeRange::DebugTrap:
+ return debugTrapDescription;
+ case CodeRange::FarJumpIsland:
+ return "interstitial (in wasm)";
+ case CodeRange::Throw:
+ MOZ_CRASH("does not have a frame");
+ }
+
+ MOZ_CRASH("bad code range kind");
+}
diff --git a/js/src/wasm/WasmFrameIter.h b/js/src/wasm/WasmFrameIter.h
new file mode 100644
index 0000000000..cec227a43f
--- /dev/null
+++ b/js/src/wasm/WasmFrameIter.h
@@ -0,0 +1,277 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2014 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_frame_iter_h
+#define wasm_frame_iter_h
+
+#include "js/ProfilingFrameIterator.h"
+#include "js/TypeDecls.h"
+
+namespace js {
+
+namespace jit {
+class JitActivation;
+class MacroAssembler;
+struct Register;
+enum class FrameType;
+} // namespace jit
+
+namespace wasm {
+
+class CallIndirectId;
+class Code;
+class CodeRange;
+class DebugFrame;
+class Instance;
+class Instance;
+
+struct CallableOffsets;
+struct FuncOffsets;
+struct Offsets;
+class Frame;
+
+using RegisterState = JS::ProfilingFrameIterator::RegisterState;
+
+// Iterates over a linear group of wasm frames of a single wasm JitActivation,
+// called synchronously from C++ in the wasm thread. It will stop at the first
+// frame that is not of the same kind, or at the end of an activation.
+//
+// If you want to handle every kind of frames (including JS jit frames), use
+// JitFrameIter.
+
+class WasmFrameIter {
+ public:
+ enum class Unwind { True, False };
+ static constexpr uint32_t ColumnBit = 1u << 31;
+
+ private:
+ jit::JitActivation* activation_;
+ const Code* code_;
+ const CodeRange* codeRange_;
+ unsigned lineOrBytecode_;
+ Frame* fp_;
+ Instance* instance_;
+ uint8_t* unwoundCallerFP_;
+ mozilla::Maybe<jit::FrameType> unwoundJitFrameType_;
+ Unwind unwind_;
+ void** unwoundAddressOfReturnAddress_;
+ uint8_t* resumePCinCurrentFrame_;
+
+ void popFrame();
+
+ public:
+ // See comment above this class definition.
+ explicit WasmFrameIter(jit::JitActivation* activation, Frame* fp = nullptr);
+ const jit::JitActivation* activation() const { return activation_; }
+ void setUnwind(Unwind unwind) { unwind_ = unwind; }
+ void operator++();
+ bool done() const;
+ const char* filename() const;
+ const char16_t* displayURL() const;
+ bool mutedErrors() const;
+ JSAtom* functionDisplayAtom() const;
+ unsigned lineOrBytecode() const;
+ uint32_t funcIndex() const;
+ unsigned computeLine(uint32_t* column) const;
+ const CodeRange* codeRange() const { return codeRange_; }
+ void** unwoundAddressOfReturnAddress() const;
+ bool debugEnabled() const;
+ DebugFrame* debugFrame() const;
+ jit::FrameType unwoundJitFrameType() const;
+ bool hasUnwoundJitFrame() const;
+ uint8_t* unwoundCallerFP() const { return unwoundCallerFP_; }
+ Frame* frame() const { return fp_; }
+ Instance* instance() const { return instance_; }
+
+ // Returns the address of the next instruction that will execute in this
+ // frame, once control returns to this frame.
+ uint8_t* resumePCinCurrentFrame() const;
+};
+
+enum class SymbolicAddress;
+
+// An ExitReason describes the possible reasons for leaving compiled wasm
+// code or the state of not having left compiled wasm code
+// (ExitReason::None). It is either a known reason, or a enumeration to a native
+// function that is used for better display in the profiler.
+class ExitReason {
+ public:
+ enum class Fixed : uint32_t {
+ None, // default state, the pc is in wasm code
+ ImportJit, // fast-path call directly into JIT code
+ ImportInterp, // slow-path call into C++ Invoke()
+ BuiltinNative, // fast-path call directly into native C++ code
+ Trap, // call to trap handler
+ DebugTrap // call to debug trap handler
+ };
+
+ private:
+ uint32_t payload_;
+
+ ExitReason() : ExitReason(Fixed::None) {}
+
+ public:
+ MOZ_IMPLICIT ExitReason(Fixed exitReason)
+ : payload_(0x0 | (uint32_t(exitReason) << 1)) {
+ MOZ_ASSERT(isFixed());
+ MOZ_ASSERT_IF(isNone(), payload_ == 0);
+ }
+
+ explicit ExitReason(SymbolicAddress sym)
+ : payload_(0x1 | (uint32_t(sym) << 1)) {
+ MOZ_ASSERT(uint32_t(sym) <= (UINT32_MAX << 1), "packing constraints");
+ MOZ_ASSERT(!isFixed());
+ }
+
+ static ExitReason Decode(uint32_t payload) {
+ ExitReason reason;
+ reason.payload_ = payload;
+ return reason;
+ }
+
+ static ExitReason None() { return ExitReason(ExitReason::Fixed::None); }
+
+ bool isFixed() const { return (payload_ & 0x1) == 0; }
+ bool isNone() const { return isFixed() && fixed() == Fixed::None; }
+ bool isNative() const {
+ return !isFixed() || fixed() == Fixed::BuiltinNative;
+ }
+
+ uint32_t encode() const { return payload_; }
+ Fixed fixed() const {
+ MOZ_ASSERT(isFixed());
+ return Fixed(payload_ >> 1);
+ }
+ SymbolicAddress symbolic() const {
+ MOZ_ASSERT(!isFixed());
+ return SymbolicAddress(payload_ >> 1);
+ }
+};
+
+// Iterates over the frames of a single wasm JitActivation, given an
+// asynchronously-profiled thread's state.
+class ProfilingFrameIterator {
+ const Code* code_;
+ const CodeRange* codeRange_;
+ uint8_t* callerFP_;
+ void* callerPC_;
+ void* stackAddress_;
+ // See JS::ProfilingFrameIterator::endStackAddress_ comment.
+ void* endStackAddress_ = nullptr;
+ uint8_t* unwoundJitCallerFP_;
+ ExitReason exitReason_;
+
+ void initFromExitFP(const Frame* fp);
+
+ public:
+ ProfilingFrameIterator();
+
+ // Start unwinding at a non-innermost activation that has necessarily been
+ // exited from wasm code (and thus activation.hasWasmExitFP).
+ explicit ProfilingFrameIterator(const jit::JitActivation& activation);
+
+ // Start unwinding at a group of wasm frames after unwinding an inner group
+ // of JSJit frames.
+ explicit ProfilingFrameIterator(const Frame* fp);
+
+ // Start unwinding at the innermost activation given the register state when
+ // the thread was suspended.
+ ProfilingFrameIterator(const jit::JitActivation& activation,
+ const RegisterState& state);
+
+ void operator++();
+
+ bool done() const {
+ MOZ_ASSERT_IF(!exitReason_.isNone(), codeRange_);
+ return !codeRange_;
+ }
+
+ void* stackAddress() const {
+ MOZ_ASSERT(!done());
+ return stackAddress_;
+ }
+ uint8_t* unwoundJitCallerFP() const {
+ MOZ_ASSERT(done());
+ return unwoundJitCallerFP_;
+ }
+ const char* label() const;
+
+ void* endStackAddress() const { return endStackAddress_; }
+};
+
+// Prologue/epilogue code generation
+
+void SetExitFP(jit::MacroAssembler& masm, ExitReason reason,
+ jit::Register scratch);
+void ClearExitFP(jit::MacroAssembler& masm, jit::Register scratch);
+
+void GenerateExitPrologue(jit::MacroAssembler& masm, unsigned framePushed,
+ ExitReason reason, CallableOffsets* offsets);
+void GenerateExitEpilogue(jit::MacroAssembler& masm, unsigned framePushed,
+ ExitReason reason, CallableOffsets* offsets);
+
+void GenerateJitExitPrologue(jit::MacroAssembler& masm, unsigned framePushed,
+ CallableOffsets* offsets);
+void GenerateJitExitEpilogue(jit::MacroAssembler& masm, unsigned framePushed,
+ CallableOffsets* offsets);
+
+void GenerateJitEntryPrologue(jit::MacroAssembler& masm,
+ CallableOffsets* offsets);
+void GenerateJitEntryEpilogue(jit::MacroAssembler& masm,
+ CallableOffsets* offsets);
+
+void GenerateFunctionPrologue(jit::MacroAssembler& masm,
+ const CallIndirectId& callIndirectId,
+ const mozilla::Maybe<uint32_t>& tier1FuncIndex,
+ FuncOffsets* offsets);
+void GenerateFunctionEpilogue(jit::MacroAssembler& masm, unsigned framePushed,
+ FuncOffsets* offsets);
+
+// Iterates through frames for either possible cross-instance call or an entry
+// stub to obtain instance that corresponds to the passed fp.
+const Instance* GetNearestEffectiveInstance(const Frame* fp);
+Instance* GetNearestEffectiveInstance(Frame* fp);
+
+// Describes register state and associated code at a given call frame.
+
+struct UnwindState {
+ uint8_t* fp;
+ void* pc;
+ const Code* code;
+ const CodeRange* codeRange;
+ UnwindState() : fp(nullptr), pc(nullptr), code(nullptr), codeRange(nullptr) {}
+};
+
+// Ensures the register state at a call site is consistent: pc must be in the
+// code range of the code described by fp. This prevents issues when using
+// the values of pc/fp, especially at call sites boundaries, where the state
+// hasn't fully transitioned from the caller's to the callee's.
+//
+// unwoundCaller is set to true if we were in a transitional state and had to
+// rewind to the caller's frame instead of the current frame.
+//
+// Returns true if it was possible to get to a clear state, or false if the
+// frame should be ignored.
+
+bool StartUnwinding(const RegisterState& registers, UnwindState* unwindState,
+ bool* unwoundCaller);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_frame_iter_h
diff --git a/js/src/wasm/WasmGC.cpp b/js/src/wasm/WasmGC.cpp
new file mode 100644
index 0000000000..60ca38ebc1
--- /dev/null
+++ b/js/src/wasm/WasmGC.cpp
@@ -0,0 +1,314 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2019 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmGC.h"
+#include "wasm/WasmInstance.h"
+#include "jit/MacroAssembler-inl.h"
+
+using mozilla::DebugOnly;
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+wasm::StackMap* wasm::ConvertStackMapBoolVectorToStackMap(
+ const StackMapBoolVector& vec, bool hasRefs) {
+ wasm::StackMap* stackMap = wasm::StackMap::create(vec.length());
+ if (!stackMap) {
+ return nullptr;
+ }
+
+ bool hasRefsObserved = false;
+ size_t i = 0;
+ for (bool b : vec) {
+ if (b) {
+ stackMap->setBit(i);
+ hasRefsObserved = true;
+ }
+ i++;
+ }
+ MOZ_RELEASE_ASSERT(hasRefs == hasRefsObserved);
+
+ return stackMap;
+}
+
+// Generate a stackmap for a function's stack-overflow-at-entry trap, with
+// the structure:
+//
+// <reg dump area>
+// | ++ <space reserved before trap, if any>
+// | ++ <space for Frame>
+// | ++ <inbound arg area>
+// | |
+// Lowest Addr Highest Addr
+//
+// The caller owns the resulting stackmap. This assumes a grow-down stack.
+//
+// For non-debug builds, if the stackmap would contain no pointers, no
+// stackmap is created, and nullptr is returned. For a debug build, a
+// stackmap is always created and returned.
+//
+// The "space reserved before trap" is the space reserved by
+// MacroAssembler::wasmReserveStackChecked, in the case where the frame is
+// "small", as determined by that function.
+bool wasm::CreateStackMapForFunctionEntryTrap(
+ const wasm::ArgTypeVector& argTypes, const RegisterOffsets& trapExitLayout,
+ size_t trapExitLayoutWords, size_t nBytesReservedBeforeTrap,
+ size_t nInboundStackArgBytes, wasm::StackMap** result) {
+ // Ensure this is defined on all return paths.
+ *result = nullptr;
+
+ // The size of the wasm::Frame itself.
+ const size_t nFrameBytes = sizeof(wasm::Frame);
+
+ // The size of the register dump (trap) area.
+ const size_t trapExitLayoutBytes = trapExitLayoutWords * sizeof(void*);
+
+ // This is the total number of bytes covered by the map.
+ const DebugOnly<size_t> nTotalBytes = trapExitLayoutBytes +
+ nBytesReservedBeforeTrap + nFrameBytes +
+ nInboundStackArgBytes;
+
+ // Create the stackmap initially in this vector. Since most frames will
+ // contain 128 or fewer words, heap allocation is avoided in the majority of
+ // cases. vec[0] is for the lowest address in the map, vec[N-1] is for the
+ // highest address in the map.
+ StackMapBoolVector vec;
+
+ // Keep track of whether we've actually seen any refs.
+ bool hasRefs = false;
+
+ // REG DUMP AREA
+ wasm::ExitStubMapVector trapExitExtras;
+ if (!GenerateStackmapEntriesForTrapExit(
+ argTypes, trapExitLayout, trapExitLayoutWords, &trapExitExtras)) {
+ return false;
+ }
+ MOZ_ASSERT(trapExitExtras.length() == trapExitLayoutWords);
+
+ if (!vec.appendN(false, trapExitLayoutWords)) {
+ return false;
+ }
+ for (size_t i = 0; i < trapExitLayoutWords; i++) {
+ vec[i] = trapExitExtras[i];
+ hasRefs |= vec[i];
+ }
+
+ // SPACE RESERVED BEFORE TRAP
+ MOZ_ASSERT(nBytesReservedBeforeTrap % sizeof(void*) == 0);
+ if (!vec.appendN(false, nBytesReservedBeforeTrap / sizeof(void*))) {
+ return false;
+ }
+
+ // SPACE FOR FRAME
+ if (!vec.appendN(false, nFrameBytes / sizeof(void*))) {
+ return false;
+ }
+
+ // INBOUND ARG AREA
+ MOZ_ASSERT(nInboundStackArgBytes % sizeof(void*) == 0);
+ const size_t numStackArgWords = nInboundStackArgBytes / sizeof(void*);
+
+ const size_t wordsSoFar = vec.length();
+ if (!vec.appendN(false, numStackArgWords)) {
+ return false;
+ }
+
+ for (WasmABIArgIter i(argTypes); !i.done(); i++) {
+ ABIArg argLoc = *i;
+ if (argLoc.kind() == ABIArg::Stack &&
+ argTypes[i.index()] == MIRType::RefOrNull) {
+ uint32_t offset = argLoc.offsetFromArgBase();
+ MOZ_ASSERT(offset < nInboundStackArgBytes);
+ MOZ_ASSERT(offset % sizeof(void*) == 0);
+ vec[wordsSoFar + offset / sizeof(void*)] = true;
+ hasRefs = true;
+ }
+ }
+
+#ifndef DEBUG
+ // We saw no references, and this is a non-debug build, so don't bother
+ // building the stackmap.
+ if (!hasRefs) {
+ return true;
+ }
+#endif
+
+ // Convert vec into a wasm::StackMap.
+ MOZ_ASSERT(vec.length() * sizeof(void*) == nTotalBytes);
+ wasm::StackMap* stackMap = ConvertStackMapBoolVectorToStackMap(vec, hasRefs);
+ if (!stackMap) {
+ return false;
+ }
+ stackMap->setExitStubWords(trapExitLayoutWords);
+
+ stackMap->setFrameOffsetFromTop(nFrameBytes / sizeof(void*) +
+ numStackArgWords);
+#ifdef DEBUG
+ for (uint32_t i = 0; i < nFrameBytes / sizeof(void*); i++) {
+ MOZ_ASSERT(stackMap->getBit(stackMap->header.numMappedWords -
+ stackMap->header.frameOffsetFromTop + i) == 0);
+ }
+#endif
+
+ *result = stackMap;
+ return true;
+}
+
+bool wasm::GenerateStackmapEntriesForTrapExit(
+ const ArgTypeVector& args, const RegisterOffsets& trapExitLayout,
+ const size_t trapExitLayoutNumWords, ExitStubMapVector* extras) {
+ MOZ_ASSERT(extras->empty());
+
+ if (!extras->appendN(false, trapExitLayoutNumWords)) {
+ return false;
+ }
+
+ for (WasmABIArgIter i(args); !i.done(); i++) {
+ if (!i->argInRegister() || i.mirType() != MIRType::RefOrNull) {
+ continue;
+ }
+
+ size_t offsetFromTop = trapExitLayout.getOffset(i->gpr());
+
+ // If this doesn't hold, the associated register wasn't saved by
+ // the trap exit stub. Better to crash now than much later, in
+ // some obscure place, and possibly with security consequences.
+ MOZ_RELEASE_ASSERT(offsetFromTop < trapExitLayoutNumWords);
+
+ // offsetFromTop is an offset in words down from the highest
+ // address in the exit stub save area. Switch it around to be an
+ // offset up from the bottom of the (integer register) save area.
+ size_t offsetFromBottom = trapExitLayoutNumWords - 1 - offsetFromTop;
+
+ (*extras)[offsetFromBottom] = true;
+ }
+
+ return true;
+}
+
+void wasm::EmitWasmPreBarrierGuard(MacroAssembler& masm, Register instance,
+ Register scratch, Register valueAddr,
+ size_t valueOffset, Label* skipBarrier,
+ BytecodeOffset* trapOffset) {
+ // If no incremental GC has started, we don't need the barrier.
+ masm.loadPtr(
+ Address(instance, Instance::offsetOfAddressOfNeedsIncrementalBarrier()),
+ scratch);
+ masm.branchTest32(Assembler::Zero, Address(scratch, 0), Imm32(0x1),
+ skipBarrier);
+
+ // Emit metadata for a potential null access when reading the previous value.
+ if (trapOffset) {
+ masm.append(wasm::Trap::NullPointerDereference,
+ wasm::TrapSite(masm.currentOffset(), *trapOffset));
+ }
+
+ // If the previous value is null, we don't need the barrier.
+ masm.loadPtr(Address(valueAddr, valueOffset), scratch);
+ masm.branchTestPtr(Assembler::Zero, scratch, scratch, skipBarrier);
+}
+
+void wasm::EmitWasmPreBarrierCall(MacroAssembler& masm, Register instance,
+ Register scratch, Register valueAddr,
+ size_t valueOffset) {
+ MOZ_ASSERT(valueAddr == PreBarrierReg);
+
+ // Add the offset to the PreBarrierReg, if any.
+ if (valueOffset != 0) {
+ masm.addPtr(Imm32(valueOffset), valueAddr);
+ }
+
+#if defined(DEBUG) && defined(JS_CODEGEN_ARM64)
+ // The prebarrier assumes that x28 == sp.
+ Label ok;
+ masm.Cmp(sp, vixl::Operand(x28));
+ masm.B(&ok, Assembler::Equal);
+ masm.breakpoint();
+ masm.bind(&ok);
+#endif
+
+ // Load and call the pre-write barrier code. It will preserve all volatile
+ // registers.
+ masm.loadPtr(Address(instance, Instance::offsetOfPreBarrierCode()), scratch);
+ masm.call(scratch);
+
+ // Remove the offset we folded into PreBarrierReg, if any.
+ if (valueOffset != 0) {
+ masm.subPtr(Imm32(valueOffset), valueAddr);
+ }
+}
+
+void wasm::EmitWasmPostBarrierGuard(MacroAssembler& masm,
+ const Maybe<Register>& object,
+ Register otherScratch, Register setValue,
+ Label* skipBarrier) {
+ // If the pointer being stored is null, no barrier.
+ masm.branchTestPtr(Assembler::Zero, setValue, setValue, skipBarrier);
+
+ // If there is a containing object and it is in the nursery, no barrier.
+ if (object) {
+ masm.branchPtrInNurseryChunk(Assembler::Equal, *object, otherScratch,
+ skipBarrier);
+ }
+
+ // If the pointer being stored is to a tenured object, no barrier.
+ masm.branchPtrInNurseryChunk(Assembler::NotEqual, setValue, otherScratch,
+ skipBarrier);
+}
+
+#ifdef DEBUG
+bool wasm::IsValidStackMapKey(bool debugEnabled, const uint8_t* nextPC) {
+# if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+ const uint8_t* insn = nextPC;
+ return (insn[-2] == 0x0F && insn[-1] == 0x0B) || // ud2
+ (insn[-2] == 0xFF && (insn[-1] & 0xF8) == 0xD0) || // call *%r_
+ insn[-5] == 0xE8; // call simm32
+
+# elif defined(JS_CODEGEN_ARM)
+ const uint32_t* insn = (const uint32_t*)nextPC;
+ return ((uintptr_t(insn) & 3) == 0) && // must be ARM, not Thumb
+ (insn[-1] == 0xe7f000f0 || // udf
+ (insn[-1] & 0xfffffff0) == 0xe12fff30 || // blx reg (ARM, enc A1)
+ (insn[-1] & 0x0f000000) == 0x0b000000); // bl.cc simm24 (ARM, enc A1)
+
+# elif defined(JS_CODEGEN_ARM64)
+ const uint32_t hltInsn = 0xd4a00000;
+ const uint32_t* insn = (const uint32_t*)nextPC;
+ return ((uintptr_t(insn) & 3) == 0) &&
+ (insn[-1] == hltInsn || // hlt
+ (insn[-1] & 0xfffffc1f) == 0xd63f0000 || // blr reg
+ (insn[-1] & 0xfc000000) == 0x94000000); // bl simm26
+
+# elif defined(JS_CODEGEN_MIPS64)
+ // TODO (bug 1699696): Implement this. As for the platforms above, we need to
+ // enumerate all code sequences that can precede the stackmap location.
+ return true;
+# elif defined(JS_CODEGEN_LOONG64)
+ // TODO(loong64): Implement IsValidStackMapKey.
+ return true;
+# elif defined(JS_CODEGEN_RISCV64)
+ const uint32_t* insn = (const uint32_t*)nextPC;
+ return (((uintptr_t(insn) & 3) == 0) &&
+ (insn[-1] == 0x00006037 && insn[-2] == 0x00100073) || // break;
+ ((insn[-1] & kBaseOpcodeMask) == JALR));
+# else
+ MOZ_CRASH("IsValidStackMapKey: requires implementation on this platform");
+# endif
+}
+#endif
diff --git a/js/src/wasm/WasmGC.h b/js/src/wasm/WasmGC.h
new file mode 100644
index 0000000000..7a42a17988
--- /dev/null
+++ b/js/src/wasm/WasmGC.h
@@ -0,0 +1,495 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2019 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_gc_h
+#define wasm_gc_h
+
+#include "mozilla/BinarySearch.h"
+
+#include "jit/ABIArgGenerator.h" // For ABIArgIter
+#include "js/AllocPolicy.h"
+#include "js/Vector.h"
+#include "util/Memory.h"
+#include "wasm/WasmBuiltins.h"
+#include "wasm/WasmFrame.h"
+#include "wasm/WasmSerialize.h"
+
+namespace js {
+
+namespace jit {
+class Label;
+class MacroAssembler;
+} // namespace jit
+
+namespace wasm {
+
+class ArgTypeVector;
+class BytecodeOffset;
+
+using jit::Label;
+using jit::MIRType;
+using jit::Register;
+
+// Definitions for stackmaps.
+
+using ExitStubMapVector = Vector<bool, 32, SystemAllocPolicy>;
+
+struct StackMapHeader {
+ explicit StackMapHeader(uint32_t numMappedWords = 0)
+ : numMappedWords(numMappedWords),
+ numExitStubWords(0),
+ frameOffsetFromTop(0),
+ hasDebugFrameWithLiveRefs(0) {}
+
+ // The total number of stack words covered by the map ..
+ static constexpr size_t MappedWordsBits = 30;
+ uint32_t numMappedWords : MappedWordsBits;
+
+ // .. of which this many are "exit stub" extras
+ static constexpr size_t ExitStubWordsBits = 6;
+ uint32_t numExitStubWords : ExitStubWordsBits;
+
+ // Where is Frame* relative to the top? This is an offset in words. On every
+ // platform, FrameOffsetBits needs to be at least
+ // ceil(log2(MaxParams*sizeof-biggest-param-type-in-words)). The most
+ // constraining platforms are 32-bit with SIMD support, currently x86-32.
+ static constexpr size_t FrameOffsetBits = 12;
+ uint32_t frameOffsetFromTop : FrameOffsetBits;
+
+ // Notes the presence of a DebugFrame with possibly-live references. A
+ // DebugFrame may or may not contain GC-managed data; in situations when it is
+ // possible that any pointers in the DebugFrame are non-null, the DebugFrame
+ // gets a stackmap.
+ uint32_t hasDebugFrameWithLiveRefs : 1;
+
+ WASM_CHECK_CACHEABLE_POD(numMappedWords, numExitStubWords, frameOffsetFromTop,
+ hasDebugFrameWithLiveRefs);
+
+ static constexpr uint32_t maxMappedWords = (1 << MappedWordsBits) - 1;
+ static constexpr uint32_t maxExitStubWords = (1 << ExitStubWordsBits) - 1;
+ static constexpr uint32_t maxFrameOffsetFromTop = (1 << FrameOffsetBits) - 1;
+
+ static constexpr size_t MaxParamSize =
+ std::max(sizeof(jit::FloatRegisters::RegisterContent),
+ sizeof(jit::Registers::RegisterContent));
+
+ // Add 16 words to account for the size of FrameWithInstances including any
+ // shadow stack (at worst 8 words total), and then a little headroom in case
+ // the argument area had to be aligned.
+ static_assert(FrameWithInstances::sizeOf() / sizeof(void*) <= 8);
+ static_assert(maxFrameOffsetFromTop >=
+ (MaxParams * MaxParamSize / sizeof(void*)) + 16,
+ "limited size of the offset field");
+};
+
+WASM_DECLARE_CACHEABLE_POD(StackMapHeader);
+
+// This is the expected size for the header
+static_assert(sizeof(StackMapHeader) == 8,
+ "wasm::StackMapHeader has unexpected size");
+
+// A StackMap is a bit-array containing numMappedWords bits, one bit per
+// word of stack. Bit index zero is for the lowest addressed word in the
+// range.
+//
+// This is a variable-length structure whose size must be known at creation
+// time.
+//
+// Users of the map will know the address of the wasm::Frame that is covered
+// by this map. In order that they can calculate the exact address range
+// covered by the map, the map also stores the offset, from the highest
+// addressed word of the map, of the embedded wasm::Frame. This is an offset
+// down from the highest address, rather than up from the lowest, so as to
+// limit its range to FrameOffsetBits bits.
+//
+// The stackmap may also cover a DebugFrame (all DebugFrames which may
+// potentially contain live pointers into the JS heap get a map). If so that
+// can be noted, since users of the map need to trace pointers in a
+// DebugFrame.
+//
+// Finally, for sanity checking only, for stackmaps associated with a wasm
+// trap exit stub, the number of words used by the trap exit stub save area
+// is also noted. This is used in Instance::traceFrame to check that the
+// TrapExitDummyValue is in the expected place in the frame.
+struct StackMap final {
+ // The header contains the constant-sized fields before the variable-sized
+ // bitmap that follows.
+ StackMapHeader header;
+
+ private:
+ // The variable-sized bitmap.
+ uint32_t bitmap[1];
+
+ explicit StackMap(uint32_t numMappedWords) : header(numMappedWords) {
+ const uint32_t nBitmap = calcNBitmap(header.numMappedWords);
+ memset(bitmap, 0, nBitmap * sizeof(bitmap[0]));
+ }
+ explicit StackMap(const StackMapHeader& header) : header(header) {
+ const uint32_t nBitmap = calcNBitmap(header.numMappedWords);
+ memset(bitmap, 0, nBitmap * sizeof(bitmap[0]));
+ }
+
+ public:
+ static StackMap* create(uint32_t numMappedWords) {
+ size_t size = allocationSizeInBytes(numMappedWords);
+ char* buf = (char*)js_malloc(size);
+ if (!buf) {
+ return nullptr;
+ }
+ return ::new (buf) StackMap(numMappedWords);
+ }
+ static StackMap* create(const StackMapHeader& header) {
+ size_t size = allocationSizeInBytes(header.numMappedWords);
+ char* buf = (char*)js_malloc(size);
+ if (!buf) {
+ return nullptr;
+ }
+ return ::new (buf) StackMap(header);
+ }
+
+ void destroy() { js_free((char*)this); }
+
+ // Returns the size of a `StackMap` allocated with `numMappedWords`.
+ static size_t allocationSizeInBytes(uint32_t numMappedWords) {
+ uint32_t nBitmap = calcNBitmap(numMappedWords);
+ return sizeof(StackMap) + (nBitmap - 1) * sizeof(bitmap[0]);
+ }
+
+ // Returns the allocated size of this `StackMap`.
+ size_t allocationSizeInBytes() const {
+ return allocationSizeInBytes(header.numMappedWords);
+ }
+
+ // Record the number of words in the map used as a wasm trap exit stub
+ // save area. See comment above.
+ void setExitStubWords(uint32_t nWords) {
+ MOZ_ASSERT(header.numExitStubWords == 0);
+ MOZ_RELEASE_ASSERT(nWords <= header.maxExitStubWords);
+ MOZ_ASSERT(nWords <= header.numMappedWords);
+ header.numExitStubWords = nWords;
+ }
+
+ // Record the offset from the highest-addressed word of the map, that the
+ // wasm::Frame lives at. See comment above.
+ void setFrameOffsetFromTop(uint32_t nWords) {
+ MOZ_ASSERT(header.frameOffsetFromTop == 0);
+ MOZ_RELEASE_ASSERT(nWords <= StackMapHeader::maxFrameOffsetFromTop);
+ MOZ_ASSERT(header.frameOffsetFromTop < header.numMappedWords);
+ header.frameOffsetFromTop = nWords;
+ }
+
+ // If the frame described by this StackMap includes a DebugFrame, call here to
+ // record that fact.
+ void setHasDebugFrameWithLiveRefs() {
+ MOZ_ASSERT(header.hasDebugFrameWithLiveRefs == 0);
+ header.hasDebugFrameWithLiveRefs = 1;
+ }
+
+ inline void setBit(uint32_t bitIndex) {
+ MOZ_ASSERT(bitIndex < header.numMappedWords);
+ uint32_t wordIndex = bitIndex / wordsPerBitmapElem;
+ uint32_t wordOffset = bitIndex % wordsPerBitmapElem;
+ bitmap[wordIndex] |= (1 << wordOffset);
+ }
+
+ inline uint32_t getBit(uint32_t bitIndex) const {
+ MOZ_ASSERT(bitIndex < header.numMappedWords);
+ uint32_t wordIndex = bitIndex / wordsPerBitmapElem;
+ uint32_t wordOffset = bitIndex % wordsPerBitmapElem;
+ return (bitmap[wordIndex] >> wordOffset) & 1;
+ }
+
+ inline uint8_t* rawBitmap() { return (uint8_t*)&bitmap; }
+ inline const uint8_t* rawBitmap() const { return (const uint8_t*)&bitmap; }
+ inline size_t rawBitmapLengthInBytes() const {
+ return calcNBitmap(header.numMappedWords) * sizeof(uint32_t);
+ }
+
+ private:
+ static constexpr uint32_t wordsPerBitmapElem = sizeof(bitmap[0]) * 8;
+
+ static uint32_t calcNBitmap(uint32_t numMappedWords) {
+ MOZ_RELEASE_ASSERT(numMappedWords <= StackMapHeader::maxMappedWords);
+ uint32_t nBitmap =
+ (numMappedWords + wordsPerBitmapElem - 1) / wordsPerBitmapElem;
+ return nBitmap == 0 ? 1 : nBitmap;
+ }
+};
+
+// This is the expected size for a map that covers 32 or fewer words.
+static_assert(sizeof(StackMap) == 12, "wasm::StackMap has unexpected size");
+
+class StackMaps {
+ public:
+ // A Maplet holds a single code-address-to-map binding. Note that the
+ // code address is the lowest address of the instruction immediately
+ // following the instruction of interest, not of the instruction of
+ // interest itself. In practice (at least for the Wasm Baseline compiler)
+ // this means that |nextInsnAddr| points either immediately after a call
+ // instruction, after a trap instruction or after a no-op.
+ struct Maplet {
+ const uint8_t* nextInsnAddr;
+ StackMap* map;
+ Maplet(const uint8_t* nextInsnAddr, StackMap* map)
+ : nextInsnAddr(nextInsnAddr), map(map) {}
+ void offsetBy(uintptr_t delta) { nextInsnAddr += delta; }
+ bool operator<(const Maplet& other) const {
+ return uintptr_t(nextInsnAddr) < uintptr_t(other.nextInsnAddr);
+ }
+ };
+
+ private:
+ bool sorted_;
+ Vector<Maplet, 0, SystemAllocPolicy> mapping_;
+
+ public:
+ StackMaps() : sorted_(false) {}
+ ~StackMaps() {
+ for (auto& maplet : mapping_) {
+ maplet.map->destroy();
+ maplet.map = nullptr;
+ }
+ }
+ [[nodiscard]] bool add(const uint8_t* nextInsnAddr, StackMap* map) {
+ MOZ_ASSERT(!sorted_);
+ return mapping_.append(Maplet(nextInsnAddr, map));
+ }
+ [[nodiscard]] bool add(const Maplet& maplet) {
+ return add(maplet.nextInsnAddr, maplet.map);
+ }
+ void clear() {
+ for (auto& maplet : mapping_) {
+ maplet.nextInsnAddr = nullptr;
+ maplet.map = nullptr;
+ }
+ mapping_.clear();
+ }
+ bool empty() const { return mapping_.empty(); }
+ size_t length() const { return mapping_.length(); }
+ Maplet* getRef(size_t i) { return &mapping_[i]; }
+ Maplet get(size_t i) const { return mapping_[i]; }
+ Maplet move(size_t i) {
+ Maplet m = mapping_[i];
+ mapping_[i].map = nullptr;
+ return m;
+ }
+ void offsetBy(uintptr_t delta) {
+ for (auto& maplet : mapping_) maplet.offsetBy(delta);
+ }
+ void finishAndSort() {
+ MOZ_ASSERT(!sorted_);
+ std::sort(mapping_.begin(), mapping_.end());
+ sorted_ = true;
+ }
+ void finishAlreadySorted() {
+ MOZ_ASSERT(!sorted_);
+ MOZ_ASSERT(std::is_sorted(mapping_.begin(), mapping_.end()));
+ sorted_ = true;
+ }
+ const StackMap* findMap(const uint8_t* nextInsnAddr) const {
+ struct Comparator {
+ int operator()(Maplet aVal) const {
+ if (uintptr_t(mTarget) < uintptr_t(aVal.nextInsnAddr)) {
+ return -1;
+ }
+ if (uintptr_t(mTarget) > uintptr_t(aVal.nextInsnAddr)) {
+ return 1;
+ }
+ return 0;
+ }
+ explicit Comparator(const uint8_t* aTarget) : mTarget(aTarget) {}
+ const uint8_t* mTarget;
+ };
+
+ size_t result;
+ if (mozilla::BinarySearchIf(mapping_, 0, mapping_.length(),
+ Comparator(nextInsnAddr), &result)) {
+ return mapping_[result].map;
+ }
+
+ return nullptr;
+ }
+};
+
+// Supporting code for creation of stackmaps.
+
+// StackArgAreaSizeUnaligned returns the size, in bytes, of the stack arg area
+// size needed to pass |argTypes|, excluding any alignment padding beyond the
+// size of the area as a whole. The size is as determined by the platforms
+// native ABI.
+//
+// StackArgAreaSizeAligned returns the same, but rounded up to the nearest 16
+// byte boundary.
+//
+// Note, StackArgAreaSize{Unaligned,Aligned}() must process all the arguments
+// in order to take into account all necessary alignment constraints. The
+// signature must include any receiver argument -- in other words, it must be
+// the complete native-ABI-level call signature.
+template <class T>
+static inline size_t StackArgAreaSizeUnaligned(const T& argTypes) {
+ jit::WasmABIArgIter<const T> i(argTypes);
+ while (!i.done()) {
+ i++;
+ }
+ return i.stackBytesConsumedSoFar();
+}
+
+static inline size_t StackArgAreaSizeUnaligned(
+ const SymbolicAddressSignature& saSig) {
+ // WasmABIArgIter::ABIArgIter wants the items to be iterated over to be
+ // presented in some type that has methods length() and operator[]. So we
+ // have to wrap up |saSig|'s array of types in this API-matching class.
+ class MOZ_STACK_CLASS ItemsAndLength {
+ const MIRType* items_;
+ size_t length_;
+
+ public:
+ ItemsAndLength(const MIRType* items, size_t length)
+ : items_(items), length_(length) {}
+ size_t length() const { return length_; }
+ MIRType operator[](size_t i) const { return items_[i]; }
+ };
+
+ // Assert, at least crudely, that we're not accidentally going to run off
+ // the end of the array of types, nor into undefined parts of it, while
+ // iterating.
+ MOZ_ASSERT(saSig.numArgs <
+ sizeof(saSig.argTypes) / sizeof(saSig.argTypes[0]));
+ MOZ_ASSERT(saSig.argTypes[saSig.numArgs] == MIRType::None /*the end marker*/);
+
+ ItemsAndLength itemsAndLength(saSig.argTypes, saSig.numArgs);
+ return StackArgAreaSizeUnaligned(itemsAndLength);
+}
+
+static inline size_t AlignStackArgAreaSize(size_t unalignedSize) {
+ return AlignBytes(unalignedSize, jit::WasmStackAlignment);
+}
+
+// A stackmap creation helper. Create a stackmap from a vector of booleans.
+// The caller owns the resulting stackmap.
+
+using StackMapBoolVector = Vector<bool, 128, SystemAllocPolicy>;
+
+wasm::StackMap* ConvertStackMapBoolVectorToStackMap(
+ const StackMapBoolVector& vec, bool hasRefs);
+
+// Generate a stackmap for a function's stack-overflow-at-entry trap, with
+// the structure:
+//
+// <reg dump area>
+// | ++ <space reserved before trap, if any>
+// | ++ <space for Frame>
+// | ++ <inbound arg area>
+// | |
+// Lowest Addr Highest Addr
+//
+// The caller owns the resulting stackmap. This assumes a grow-down stack.
+//
+// For non-debug builds, if the stackmap would contain no pointers, no
+// stackmap is created, and nullptr is returned. For a debug build, a
+// stackmap is always created and returned.
+//
+// The "space reserved before trap" is the space reserved by
+// MacroAssembler::wasmReserveStackChecked, in the case where the frame is
+// "small", as determined by that function.
+[[nodiscard]] bool CreateStackMapForFunctionEntryTrap(
+ const ArgTypeVector& argTypes, const jit::RegisterOffsets& trapExitLayout,
+ size_t trapExitLayoutWords, size_t nBytesReservedBeforeTrap,
+ size_t nInboundStackArgBytes, wasm::StackMap** result);
+
+// At a resumable wasm trap, the machine's registers are saved on the stack by
+// (code generated by) GenerateTrapExit(). This function writes into |args| a
+// vector of booleans describing the ref-ness of the saved integer registers.
+// |args[0]| corresponds to the low addressed end of the described section of
+// the save area.
+[[nodiscard]] bool GenerateStackmapEntriesForTrapExit(
+ const ArgTypeVector& args, const jit::RegisterOffsets& trapExitLayout,
+ const size_t trapExitLayoutNumWords, ExitStubMapVector* extras);
+
+// Shared write barrier code.
+//
+// A barriered store looks like this:
+//
+// Label skipPreBarrier;
+// EmitWasmPreBarrierGuard(..., &skipPreBarrier);
+// <COMPILER-SPECIFIC ACTIONS HERE>
+// EmitWasmPreBarrierCall(...);
+// bind(&skipPreBarrier);
+//
+// <STORE THE VALUE IN MEMORY HERE>
+//
+// Label skipPostBarrier;
+// <COMPILER-SPECIFIC ACTIONS HERE>
+// EmitWasmPostBarrierGuard(..., &skipPostBarrier);
+// <CALL POST-BARRIER HERE IN A COMPILER-SPECIFIC WAY>
+// bind(&skipPostBarrier);
+//
+// The actions are divided up to allow other actions to be placed between them,
+// such as saving and restoring live registers. The postbarrier call invokes
+// C++ and will kill all live registers.
+
+// Before storing a GC pointer value in memory, skip to `skipBarrier` if the
+// prebarrier is not needed. Will clobber `scratch`.
+//
+// It is OK for `instance` and `scratch` to be the same register.
+//
+// If `trapOffset` is non-null, then metadata to catch a null access and emit
+// a null pointer exception will be emitted. This will only catch a null access
+// due to an incremental GC being in progress, the write that follows this
+// pre-barrier guard must also be guarded against null.
+
+void EmitWasmPreBarrierGuard(jit::MacroAssembler& masm, Register instance,
+ Register scratch, Register valueAddr,
+ size_t valueOffset, Label* skipBarrier,
+ BytecodeOffset* trapOffset);
+
+// Before storing a GC pointer value in memory, call out-of-line prebarrier
+// code. This assumes `PreBarrierReg` contains the address that will be updated.
+// On ARM64 it also assums that x28 (the PseudoStackPointer) has the same value
+// as SP. `PreBarrierReg` is preserved by the barrier function. Will clobber
+// `scratch`.
+//
+// It is OK for `instance` and `scratch` to be the same register.
+
+void EmitWasmPreBarrierCall(jit::MacroAssembler& masm, Register instance,
+ Register scratch, Register valueAddr,
+ size_t valueOffset);
+
+// After storing a GC pointer value in memory, skip to `skipBarrier` if a
+// postbarrier is not needed. If the location being set is in an heap-allocated
+// object then `object` must reference that object; otherwise it should be None.
+// The value that was stored is `setValue`. Will clobber `otherScratch` and
+// will use other available scratch registers.
+//
+// `otherScratch` cannot be a designated scratch register.
+
+void EmitWasmPostBarrierGuard(jit::MacroAssembler& masm,
+ const mozilla::Maybe<Register>& object,
+ Register otherScratch, Register setValue,
+ Label* skipBarrier);
+
+#ifdef DEBUG
+// Check whether |nextPC| is a valid code address for a stackmap created by
+// this compiler.
+bool IsValidStackMapKey(bool debugEnabled, const uint8_t* nextPC);
+#endif
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_gc_h
diff --git a/js/src/wasm/WasmGcObject.cpp b/js/src/wasm/WasmGcObject.cpp
new file mode 100644
index 0000000000..6b2e5d40cf
--- /dev/null
+++ b/js/src/wasm/WasmGcObject.cpp
@@ -0,0 +1,872 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "wasm/WasmGcObject.h"
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/Casting.h"
+#include "mozilla/CheckedInt.h"
+
+#include <algorithm>
+
+#include "gc/Marking.h"
+#include "js/CharacterEncoding.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/PropertySpec.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "js/Vector.h"
+#include "util/StringBuffer.h"
+#include "vm/GlobalObject.h"
+#include "vm/JSFunction.h"
+#include "vm/JSObject.h"
+#include "vm/PlainObject.h" // js::PlainObject
+#include "vm/Realm.h"
+#include "vm/SelfHosting.h"
+#include "vm/StringType.h"
+#include "vm/TypedArrayObject.h"
+#include "vm/Uint8Clamped.h"
+
+#include "gc/GCContext-inl.h"
+#include "gc/Marking-inl.h"
+#include "gc/Nursery-inl.h"
+#include "gc/StoreBuffer-inl.h"
+#include "vm/JSAtom-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/NativeObject-inl.h"
+#include "vm/Shape-inl.h"
+
+using mozilla::AssertedCast;
+using mozilla::CheckedUint32;
+using mozilla::IsPowerOfTwo;
+using mozilla::PodCopy;
+using mozilla::PointerRangeSize;
+
+using namespace js;
+using namespace wasm;
+
+// [SMDOC] Management of OOL storage areas for Wasm{Array,Struct}Object.
+//
+// WasmArrayObject always has its payload data stored in a block the C++-heap,
+// which is pointed to from the WasmArrayObject. The same is true for
+// WasmStructObject in the case where the fields cannot fit in the object
+// itself. These C++ blocks are in some places referred to as "trailer blocks".
+//
+// The presence of trailer blocks complicates the use of generational GC (that
+// is, Nursery allocation) of Wasm{Array,Struct}Object. In particular:
+//
+// (1) For objects which do not get tenured at minor collection, there must be
+// a way to free the associated trailer, but there is no way to visit
+// non-tenured blocks during minor collection.
+//
+// (2) Even if (1) were solved, calling js_malloc/js_free for every object
+// creation-death cycle is expensive, possibly around 400 machine
+// instructions, and we expressly want to avoid that in a generational GC
+// scenario.
+//
+// The following scheme is therefore employed.
+//
+// (a) gc::Nursery maintains a pool of available C++-heap-allocated blocks --
+// a js::MallocedBlockCache -- and the intention is that trailers are
+// allocated from this pool and freed back into it whenever possible.
+//
+// (b) WasmArrayObject::createArray and WasmStructObject::createStructOOL
+// always request trailer allocation from the nursery's cache (a). If the
+// cache cannot honour the request directly it will allocate directly from
+// js_malloc; we hope this happens only infrequently.
+//
+// (c) The allocated block is returned as a js::PointerAndUint7, a pair that
+// holds the trailer block pointer and an auxiliary tag that the
+// js::MallocedBlockCache needs to see when the block is freed.
+//
+// The raw trailer block pointer (a `void*`) is stored in the
+// Wasm{Array,Struct}Object OOL data field. These objects are not aware
+// of and do not interact with js::PointerAndUint7, and nor does any
+// JIT-generated code.
+//
+// (d) Still in WasmArrayObject::createArray and
+// WasmStructObject::createStructOOL, if the object was allocated in the
+// nursery, then the resulting js::PointerAndUint7 is "registered" with
+// the nursery by handing it to Nursery::registerTrailer.
+//
+// (e) When a minor collection happens (Nursery::doCollection), we are
+// notified of objects that are moved by calls to the ::obj_moved methods
+// in this file. For those objects that have been tenured, the raw
+// trailer pointer is "deregistered" with the nursery by handing it to
+// Nursery::deregisterTrailer.
+//
+// (f) Still during minor collection: The nursery now knows both the set of
+// trailer blocks added, and those removed because the corresponding
+// object has been tenured. The difference between these two sets (that
+// is, `added - removed`) is the set of trailer blocks corresponding to
+// blocks that didn't get tenured. That set is computed and freed (back
+// to the nursery's js::MallocedBlockCache) by
+// :Nursery::freeTrailerBlocks.
+//
+// (g) At the end of minor collection, the added and removed sets are made
+// empty, and the cycle begins again.
+//
+// (h) Also at the end of minor collection, a call to
+// `mallocedBlockCache_.preen` hands a few blocks in the cache back to
+// js_free. This mechanism exists so as to ensure that unused blocks do
+// not remain in the cache indefinitely.
+//
+// (i) For objects that got tenured, we are eventually notified of their death
+// by a call to the ::obj_finalize methods below. At that point we hand
+// their block pointers to js_free.
+//
+// (j) When the nursery is eventually destroyed, all blocks in its block cache
+// are handed to js_free. Hence, at process exit, provided all nurseries
+// are first collected and then their destructors run, no C++ heap blocks
+// are leaked.
+//
+// As a result of this scheme, trailer blocks associated with what we hope is
+// the frequent case -- objects that are allocated but never make it out of
+// the nursery -- are cycled through the nursery's block cache.
+//
+// Trailers associated with tenured blocks cannot participate though; they are
+// always returned to js_free. It would be possible to enable them to
+// participate by changing their owning object's OOL data pointer to be a
+// js::PointerAndUint7 rather than a raw `void*`, so that then the blocks
+// could be released to the cache in the ::obj_finalize methods. This would
+// however require changes in the generated code for array element and OOL
+// struct element accesses.
+//
+// Here's a short summary of the trailer block life cycle:
+//
+// * allocated:
+//
+// - in WasmArrayObject::createArray / WasmStructObject::createStructOOL
+//
+// - by calling the nursery's MallocBlockCache alloc method
+//
+// * deallocated:
+//
+// - for non-tenured objects, in the collector itself,
+// in Nursery::doCollection calling Nursery::freeTrailerBlocks,
+// releasing to the nursery's block cache
+//
+// - for tenured objects, in the ::obj_finalize methods, releasing directly
+// to js_free
+//
+// If this seems confusing ("why is it ok to allocate from the cache but
+// release to js_free?"), remember that the cache holds blocks previously
+// obtained from js_malloc but which are *not* currently in use. Hence it is
+// fine to give them back to js_free; that just makes the cache a bit emptier
+// but has no effect on correctness.
+
+//=========================================================================
+// WasmGcObject
+
+bool WasmGcObject::lookupProperty(JSContext* cx, Handle<WasmGcObject*> object,
+ jsid id, PropOffset* offset,
+ FieldType* type) {
+ switch (kind()) {
+ case wasm::TypeDefKind::Struct: {
+ const auto& structType = typeDef().structType();
+ uint32_t index;
+ if (!IdIsIndex(id, &index)) {
+ return false;
+ }
+ if (index >= structType.fields_.length()) {
+ return false;
+ }
+ const StructField& field = structType.fields_[index];
+ offset->set(field.offset);
+ *type = field.type;
+ return true;
+ }
+ case wasm::TypeDefKind::Array: {
+ const auto& arrayType = typeDef().arrayType();
+
+ // Special case for property 'length' that loads the length field at the
+ // beginning of the data buffer
+ if (id.isString() &&
+ id.toString() == cx->runtime()->commonNames->length) {
+ STATIC_ASSERT_WASMARRAYELEMENTS_NUMELEMENTS_IS_U32;
+ *type = FieldType::I32;
+ offset->set(UINT32_MAX);
+ return true;
+ }
+
+ // Normal case of indexed properties for loading array elements
+ uint32_t index;
+ if (!IdIsIndex(id, &index)) {
+ return false;
+ }
+ uint32_t numElements = object->as<WasmArrayObject>().numElements_;
+ if (index >= numElements) {
+ return false;
+ }
+ uint64_t scaledIndex =
+ uint64_t(index) * uint64_t(arrayType.elementType_.size());
+ if (scaledIndex >= uint64_t(UINT32_MAX)) {
+ // It's unrepresentable as an WasmGcObject::PropOffset. Give up.
+ return false;
+ }
+ offset->set(uint32_t(scaledIndex));
+ *type = arrayType.elementType_;
+ return true;
+ }
+ default:
+ MOZ_ASSERT_UNREACHABLE();
+ return false;
+ }
+}
+
+const ObjectOps WasmGcObject::objectOps_ = {
+ WasmGcObject::obj_lookupProperty, // lookupProperty
+ WasmGcObject::obj_defineProperty, // defineProperty
+ WasmGcObject::obj_hasProperty, // hasProperty
+ WasmGcObject::obj_getProperty, // getProperty
+ WasmGcObject::obj_setProperty, // setProperty
+ WasmGcObject::obj_getOwnPropertyDescriptor, // getOwnPropertyDescriptor
+ WasmGcObject::obj_deleteProperty, // deleteProperty
+ nullptr, // getElements
+ nullptr, // funToString
+};
+
+/* static */
+bool WasmGcObject::obj_lookupProperty(JSContext* cx, HandleObject obj,
+ HandleId id, MutableHandleObject objp,
+ PropertyResult* propp) {
+ Rooted<WasmGcObject*> typedObj(cx, &obj->as<WasmGcObject>());
+ if (typedObj->hasProperty(cx, typedObj, id)) {
+ propp->setWasmGcProperty();
+ objp.set(obj);
+ return true;
+ }
+
+ RootedObject proto(cx, obj->staticPrototype());
+ if (!proto) {
+ objp.set(nullptr);
+ propp->setNotFound();
+ return true;
+ }
+
+ return LookupProperty(cx, proto, id, objp, propp);
+}
+
+bool WasmGcObject::obj_defineProperty(JSContext* cx, HandleObject obj,
+ HandleId id,
+ Handle<PropertyDescriptor> desc,
+ ObjectOpResult& result) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_OBJECT_NOT_EXTENSIBLE, "WasmGcObject");
+ return false;
+}
+
+bool WasmGcObject::obj_hasProperty(JSContext* cx, HandleObject obj, HandleId id,
+ bool* foundp) {
+ Rooted<WasmGcObject*> typedObj(cx, &obj->as<WasmGcObject>());
+ if (typedObj->hasProperty(cx, typedObj, id)) {
+ *foundp = true;
+ return true;
+ }
+
+ RootedObject proto(cx, obj->staticPrototype());
+ if (!proto) {
+ *foundp = false;
+ return true;
+ }
+
+ return HasProperty(cx, proto, id, foundp);
+}
+
+bool WasmGcObject::obj_getProperty(JSContext* cx, HandleObject obj,
+ HandleValue receiver, HandleId id,
+ MutableHandleValue vp) {
+ Rooted<WasmGcObject*> typedObj(cx, &obj->as<WasmGcObject>());
+
+ WasmGcObject::PropOffset offset;
+ FieldType type;
+ if (typedObj->lookupProperty(cx, typedObj, id, &offset, &type)) {
+ return typedObj->loadValue(cx, offset, type, vp);
+ }
+
+ RootedObject proto(cx, obj->staticPrototype());
+ if (!proto) {
+ vp.setUndefined();
+ return true;
+ }
+
+ return GetProperty(cx, proto, receiver, id, vp);
+}
+
+bool WasmGcObject::obj_setProperty(JSContext* cx, HandleObject obj, HandleId id,
+ HandleValue v, HandleValue receiver,
+ ObjectOpResult& result) {
+ Rooted<WasmGcObject*> typedObj(cx, &obj->as<WasmGcObject>());
+
+ if (typedObj->hasProperty(cx, typedObj, id)) {
+ if (!receiver.isObject() || obj != &receiver.toObject()) {
+ return SetPropertyByDefining(cx, id, v, receiver, result);
+ }
+
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_TYPEDOBJECT_SETTING_IMMUTABLE);
+ return false;
+ }
+
+ return SetPropertyOnProto(cx, obj, id, v, receiver, result);
+}
+
+bool WasmGcObject::obj_getOwnPropertyDescriptor(
+ JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandle<mozilla::Maybe<PropertyDescriptor>> desc) {
+ Rooted<WasmGcObject*> typedObj(cx, &obj->as<WasmGcObject>());
+
+ WasmGcObject::PropOffset offset;
+ FieldType type;
+ if (typedObj->lookupProperty(cx, typedObj, id, &offset, &type)) {
+ RootedValue value(cx);
+ if (!typedObj->loadValue(cx, offset, type, &value)) {
+ return false;
+ }
+ desc.set(mozilla::Some(PropertyDescriptor::Data(
+ value,
+ {JS::PropertyAttribute::Enumerable, JS::PropertyAttribute::Writable})));
+ return true;
+ }
+
+ desc.reset();
+ return true;
+}
+
+bool WasmGcObject::obj_deleteProperty(JSContext* cx, HandleObject obj,
+ HandleId id, ObjectOpResult& result) {
+ Rooted<WasmGcObject*> typedObj(cx, &obj->as<WasmGcObject>());
+ if (typedObj->hasProperty(cx, typedObj, id)) {
+ return Throw(cx, id, JSMSG_CANT_DELETE);
+ }
+
+ RootedObject proto(cx, obj->staticPrototype());
+ if (!proto) {
+ return result.succeed();
+ }
+
+ return DeleteProperty(cx, proto, id, result);
+}
+
+/* static */
+WasmGcObject* WasmGcObject::create(JSContext* cx,
+ wasm::TypeDefInstanceData* typeDefData,
+ js::gc::Heap initialHeap) {
+ MOZ_ASSERT(IsWasmGcObjectClass(typeDefData->clasp));
+ MOZ_ASSERT(!typeDefData->clasp->isNativeObject());
+
+ debugCheckNewObject(typeDefData->shape, typeDefData->allocKind, initialHeap);
+
+ WasmGcObject* obj =
+ cx->newCell<WasmGcObject>(typeDefData->allocKind, initialHeap,
+ typeDefData->clasp, &typeDefData->allocSite);
+ if (!obj) {
+ return nullptr;
+ }
+
+ obj->initShape(typeDefData->shape);
+ obj->superTypeVector_ = typeDefData->superTypeVector;
+
+ js::gc::gcprobes::CreateObject(obj);
+ probes::CreateObject(cx, obj);
+
+ return obj;
+}
+
+bool WasmGcObject::loadValue(JSContext* cx,
+ const WasmGcObject::PropOffset& offset,
+ FieldType type, MutableHandleValue vp) {
+ // Temporary hack, (ref T) is not exposable to JS yet but some tests would
+ // like to access it so we erase (ref T) with eqref when loading. This is
+ // safe as (ref T) <: eqref and we're not in the writing case where we
+ // would need to perform a type check.
+ if (type.isTypeRef()) {
+ type = RefType::fromTypeCode(TypeCode::EqRef, true);
+ }
+
+ if (!type.isExposable()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_VAL_TYPE);
+ return false;
+ }
+
+ if (is<WasmStructObject>()) {
+ // `offset` is the field offset, without regard to the in/out-line split.
+ // That is handled by the call to `fieldOffsetToAddress`.
+ WasmStructObject& structObj = as<WasmStructObject>();
+ // Ensure no out-of-range access possible
+ MOZ_RELEASE_ASSERT(structObj.kind() == TypeDefKind::Struct);
+ MOZ_RELEASE_ASSERT(offset.get() + type.size() <=
+ structObj.typeDef().structType().size_);
+ return ToJSValue(cx, structObj.fieldOffsetToAddress(type, offset.get()),
+ type, vp);
+ }
+
+ MOZ_ASSERT(is<WasmArrayObject>());
+ WasmArrayObject& arrayObj = as<WasmArrayObject>();
+ if (offset.get() == UINT32_MAX) {
+ // This denotes "length"
+ uint32_t numElements = arrayObj.numElements_;
+ // We can't use `ToJSValue(.., ValType::I32, ..)` here since it will
+ // treat the integer as signed, which it isn't. `vp.set(..)` will
+ // coerce correctly to a JS::Value, though.
+ vp.set(NumberValue(numElements));
+ return true;
+ }
+ return ToJSValue(cx, arrayObj.data_ + offset.get(), type, vp);
+}
+
+bool WasmGcObject::isRuntimeSubtypeOf(
+ const wasm::TypeDef* parentTypeDef) const {
+ return TypeDef::isSubTypeOf(&typeDef(), parentTypeDef);
+}
+
+bool WasmGcObject::obj_newEnumerate(JSContext* cx, HandleObject obj,
+ MutableHandleIdVector properties,
+ bool enumerableOnly) {
+ MOZ_ASSERT(obj->is<WasmGcObject>());
+ Rooted<WasmGcObject*> typedObj(cx, &obj->as<WasmGcObject>());
+
+ size_t indexCount = 0;
+ size_t otherCount = 0;
+ switch (typedObj->kind()) {
+ case wasm::TypeDefKind::Struct: {
+ indexCount = typedObj->typeDef().structType().fields_.length();
+ break;
+ }
+ case wasm::TypeDefKind::Array: {
+ indexCount = typedObj->as<WasmArrayObject>().numElements_;
+ otherCount = 1;
+ break;
+ }
+ default:
+ MOZ_ASSERT_UNREACHABLE();
+ }
+
+ if (!properties.reserve(indexCount + otherCount)) {
+ return false;
+ }
+ RootedId id(cx);
+ for (size_t index = 0; index < indexCount; index++) {
+ id = PropertyKey::Int(int32_t(index));
+ properties.infallibleAppend(id);
+ }
+
+ if (typedObj->kind() == wasm::TypeDefKind::Array) {
+ properties.infallibleAppend(NameToId(cx->runtime()->commonNames->length));
+ }
+
+ return true;
+}
+
+static void WriteValTo(const Val& val, FieldType ty, void* dest) {
+ switch (ty.kind()) {
+ case FieldType::I8:
+ *((uint8_t*)dest) = val.i32();
+ break;
+ case FieldType::I16:
+ *((uint16_t*)dest) = val.i32();
+ break;
+ case FieldType::I32:
+ *((uint32_t*)dest) = val.i32();
+ break;
+ case FieldType::I64:
+ *((uint64_t*)dest) = val.i64();
+ break;
+ case FieldType::F32:
+ *((float*)dest) = val.f32();
+ break;
+ case FieldType::F64:
+ *((double*)dest) = val.f64();
+ break;
+ case FieldType::V128:
+ *((V128*)dest) = val.v128();
+ break;
+ case FieldType::Ref:
+ *((GCPtr<AnyRef>*)dest) = val.ref();
+ break;
+ }
+}
+
+//=========================================================================
+// WasmArrayObject
+
+/* static */
+gc::AllocKind WasmArrayObject::allocKind() {
+ return gc::GetGCObjectKindForBytes(sizeof(WasmArrayObject));
+}
+
+/* static */
+template <bool ZeroFields>
+WasmArrayObject* WasmArrayObject::createArray(
+ JSContext* cx, wasm::TypeDefInstanceData* typeDefData,
+ js::gc::Heap initialHeap, uint32_t numElements) {
+ const TypeDef* typeDef = typeDefData->typeDef;
+ STATIC_ASSERT_WASMARRAYELEMENTS_NUMELEMENTS_IS_U32;
+ MOZ_ASSERT(typeDef->kind() == wasm::TypeDefKind::Array);
+
+ // Calculate the byte length of the outline storage, being careful to check
+ // for overflow. Note this logic assumes that MaxArrayPayloadBytes is
+ // within uint32_t range.
+ CheckedUint32 outlineBytes = typeDef->arrayType().elementType_.size();
+ outlineBytes *= numElements;
+ if (!outlineBytes.isValid() ||
+ outlineBytes.value() > uint32_t(MaxArrayPayloadBytes)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_ARRAY_IMP_LIMIT);
+ return nullptr;
+ }
+
+ // Allocate the outline data before allocating the object so that we can
+ // infallibly initialize the pointer on the array object after it is
+ // allocated.
+ Nursery& nursery = cx->nursery();
+ PointerAndUint7 outlineData(nullptr, 0);
+ if (outlineBytes.value() > 0) {
+ outlineData = nursery.mallocedBlockCache().alloc(outlineBytes.value());
+ if (!outlineData.pointer()) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+ }
+
+ // It's unfortunate that `arrayObj` has to be rooted, since this is a hot
+ // path and rooting costs around 15 instructions. It is the call to
+ // registerTrailer that makes it necessary.
+ Rooted<WasmArrayObject*> arrayObj(cx);
+ arrayObj =
+ (WasmArrayObject*)WasmGcObject::create(cx, typeDefData, initialHeap);
+ if (!arrayObj) {
+ ReportOutOfMemory(cx);
+ if (outlineData.pointer()) {
+ nursery.mallocedBlockCache().free(outlineData);
+ }
+ return nullptr;
+ }
+
+ arrayObj->numElements_ = numElements;
+ arrayObj->data_ = (uint8_t*)outlineData.pointer();
+ if (outlineData.pointer()) {
+ if constexpr (ZeroFields) {
+ memset(outlineData.pointer(), 0, outlineBytes.value());
+ }
+ if (js::gc::IsInsideNursery(arrayObj)) {
+ // We need to register the OOL area with the nursery, so it will be
+ // freed after GCing of the nursery if `arrayObj_` doesn't make it into
+ // the tenured heap.
+ if (!nursery.registerTrailer(outlineData, outlineBytes.value())) {
+ nursery.mallocedBlockCache().free(outlineData);
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+ }
+ }
+
+ return arrayObj;
+}
+
+template WasmArrayObject* WasmArrayObject::createArray<true>(
+ JSContext* cx, wasm::TypeDefInstanceData* typeDefData,
+ js::gc::Heap initialHeap, uint32_t numElements);
+template WasmArrayObject* WasmArrayObject::createArray<false>(
+ JSContext* cx, wasm::TypeDefInstanceData* typeDefData,
+ js::gc::Heap initialHeap, uint32_t numElements);
+
+/* static */
+void WasmArrayObject::obj_trace(JSTracer* trc, JSObject* object) {
+ WasmArrayObject& arrayObj = object->as<WasmArrayObject>();
+ uint8_t* data = arrayObj.data_;
+ if (!data) {
+ MOZ_ASSERT(arrayObj.numElements_ == 0);
+ return;
+ }
+
+ const auto& typeDef = arrayObj.typeDef();
+ const auto& arrayType = typeDef.arrayType();
+ if (!arrayType.elementType_.isRefRepr()) {
+ return;
+ }
+
+ uint32_t numElements = arrayObj.numElements_;
+ MOZ_ASSERT(numElements > 0);
+ uint32_t elemSize = arrayType.elementType_.size();
+ for (uint32_t i = 0; i < numElements; i++) {
+ GCPtr<JSObject*>* objectPtr =
+ reinterpret_cast<GCPtr<JSObject*>*>(data + i * elemSize);
+ TraceNullableEdge(trc, objectPtr, "reference-obj");
+ }
+}
+
+/* static */
+void WasmArrayObject::obj_finalize(JS::GCContext* gcx, JSObject* object) {
+ WasmArrayObject& arrayObj = object->as<WasmArrayObject>();
+ MOZ_ASSERT((arrayObj.data_ == nullptr) == (arrayObj.numElements_ == 0));
+ if (arrayObj.data_) {
+ js_free(arrayObj.data_);
+ arrayObj.data_ = nullptr;
+ }
+}
+
+/* static */
+size_t WasmArrayObject::obj_moved(JSObject* obj, JSObject* old) {
+ MOZ_ASSERT(!IsInsideNursery(obj));
+ if (IsInsideNursery(old)) {
+ // It's been tenured.
+ MOZ_ASSERT(obj->isTenured());
+ WasmArrayObject& arrayObj = obj->as<WasmArrayObject>();
+ if (arrayObj.data_) {
+ Nursery& nursery = obj->runtimeFromMainThread()->gc.nursery();
+ nursery.unregisterTrailer(arrayObj.data_);
+ }
+ }
+ return 0;
+}
+
+void WasmArrayObject::storeVal(const Val& val, uint32_t itemIndex) {
+ const ArrayType& arrayType = typeDef().arrayType();
+ size_t elementSize = arrayType.elementType_.size();
+ MOZ_ASSERT(itemIndex < numElements_);
+ uint8_t* data = data_ + elementSize * itemIndex;
+ WriteValTo(val, arrayType.elementType_, data);
+}
+
+void WasmArrayObject::fillVal(const Val& val, uint32_t itemIndex,
+ uint32_t len) {
+ const ArrayType& arrayType = typeDef().arrayType();
+ size_t elementSize = arrayType.elementType_.size();
+ uint8_t* data = data_ + elementSize * itemIndex;
+ MOZ_ASSERT(itemIndex <= numElements_ && len <= numElements_ - itemIndex);
+ for (uint32_t i = 0; i < len; i++) {
+ WriteValTo(val, arrayType.elementType_, data);
+ data += elementSize;
+ }
+}
+
+static const JSClassOps WasmArrayObjectClassOps = {
+ nullptr, /* addProperty */
+ nullptr, /* delProperty */
+ nullptr, /* enumerate */
+ WasmGcObject::obj_newEnumerate,
+ nullptr, /* resolve */
+ nullptr, /* mayResolve */
+ WasmArrayObject::obj_finalize, /* finalize */
+ nullptr, /* call */
+ nullptr, /* construct */
+ WasmArrayObject::obj_trace,
+};
+static const ClassExtension WasmArrayObjectClassExt = {
+ WasmArrayObject::obj_moved /* objectMovedOp */
+};
+const JSClass WasmArrayObject::class_ = {
+ "WasmArrayObject",
+ JSClass::NON_NATIVE | JSCLASS_DELAY_METADATA_BUILDER |
+ JSCLASS_BACKGROUND_FINALIZE | JSCLASS_SKIP_NURSERY_FINALIZE,
+ &WasmArrayObjectClassOps,
+ JS_NULL_CLASS_SPEC,
+ &WasmArrayObjectClassExt,
+ &WasmGcObject::objectOps_};
+
+//=========================================================================
+// WasmStructObject
+
+/* static */
+const JSClass* js::WasmStructObject::classForTypeDef(
+ const wasm::TypeDef* typeDef) {
+ MOZ_ASSERT(typeDef->kind() == wasm::TypeDefKind::Struct);
+ size_t nbytes = typeDef->structType().size_;
+ return nbytes > WasmStructObject_MaxInlineBytes
+ ? &WasmStructObject::classOutline_
+ : &WasmStructObject::classInline_;
+}
+
+/* static */
+js::gc::AllocKind js::WasmStructObject::allocKindForTypeDef(
+ const wasm::TypeDef* typeDef) {
+ MOZ_ASSERT(typeDef->kind() == wasm::TypeDefKind::Struct);
+ size_t nbytes = typeDef->structType().size_;
+
+ // `nbytes` is the total required size for all struct fields, including
+ // padding. What we need is the size of resulting WasmStructObject,
+ // ignoring any space used for out-of-line data. First, restrict `nbytes`
+ // to cover just the inline data.
+ if (nbytes > WasmStructObject_MaxInlineBytes) {
+ nbytes = WasmStructObject_MaxInlineBytes;
+ }
+
+ // Now convert it to size of the WasmStructObject as a whole.
+ nbytes = sizeOfIncludingInlineData(nbytes);
+
+ return gc::GetGCObjectKindForBytes(nbytes);
+}
+
+/* static MOZ_NEVER_INLINE */
+template <bool ZeroFields>
+WasmStructObject* WasmStructObject::createStructOOL(
+ JSContext* cx, wasm::TypeDefInstanceData* typeDefData,
+ js::gc::Heap initialHeap, uint32_t inlineBytes, uint32_t outlineBytes) {
+ // This method is called as the slow path from the (inlineable)
+ // WasmStructObject::createStruct. It handles the case where an object
+ // needs OOL storage. It doesn't handle the non-OOL case at all.
+
+ // Allocate the outline data area before allocating the object so that we can
+ // infallibly initialize the outline data area.
+ Nursery& nursery = cx->nursery();
+ PointerAndUint7 outlineData =
+ nursery.mallocedBlockCache().alloc(outlineBytes);
+ if (MOZ_UNLIKELY(!outlineData.pointer())) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ // See corresponding comment in WasmArrayObject::createArray.
+ Rooted<WasmStructObject*> structObj(cx);
+ structObj =
+ (WasmStructObject*)WasmGcObject::create(cx, typeDefData, initialHeap);
+ if (MOZ_UNLIKELY(!structObj)) {
+ ReportOutOfMemory(cx);
+ if (outlineData.pointer()) {
+ nursery.mallocedBlockCache().free(outlineData);
+ }
+ return nullptr;
+ }
+
+ // Initialize the outline data fields
+ structObj->outlineData_ = (uint8_t*)outlineData.pointer();
+ if constexpr (ZeroFields) {
+ memset(&(structObj->inlineData_[0]), 0, inlineBytes);
+ memset(outlineData.pointer(), 0, outlineBytes);
+ }
+ if (MOZ_LIKELY(js::gc::IsInsideNursery(structObj))) {
+ // See corresponding comment in WasmArrayObject::createArray.
+ if (!nursery.registerTrailer(outlineData, outlineBytes)) {
+ nursery.mallocedBlockCache().free(outlineData);
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+ }
+
+ return structObj;
+}
+
+template WasmStructObject* WasmStructObject::createStruct<true>(
+ JSContext* cx, wasm::TypeDefInstanceData* typeDefData,
+ js::gc::Heap initialHeap);
+template WasmStructObject* WasmStructObject::createStruct<false>(
+ JSContext* cx, wasm::TypeDefInstanceData* typeDefData,
+ js::gc::Heap initialHeap);
+
+/* static */
+void WasmStructObject::obj_trace(JSTracer* trc, JSObject* object) {
+ WasmStructObject& structObj = object->as<WasmStructObject>();
+
+ const auto& structType = structObj.typeDef().structType();
+ for (uint32_t offset : structType.inlineTraceOffsets_) {
+ GCPtr<JSObject*>* objectPtr =
+ reinterpret_cast<GCPtr<JSObject*>*>(&structObj.inlineData_[0] + offset);
+ TraceNullableEdge(trc, objectPtr, "reference-obj");
+ }
+ for (uint32_t offset : structType.outlineTraceOffsets_) {
+ GCPtr<JSObject*>* objectPtr =
+ reinterpret_cast<GCPtr<JSObject*>*>(structObj.outlineData_ + offset);
+ TraceNullableEdge(trc, objectPtr, "reference-obj");
+ }
+}
+
+/* static */
+void WasmStructObject::obj_finalize(JS::GCContext* gcx, JSObject* object) {
+ WasmStructObject& structObj = object->as<WasmStructObject>();
+
+ if (structObj.outlineData_) {
+ js_free(structObj.outlineData_);
+ structObj.outlineData_ = nullptr;
+ }
+}
+
+/* static */
+size_t WasmStructObject::obj_moved(JSObject* obj, JSObject* old) {
+ MOZ_ASSERT(!IsInsideNursery(obj));
+ if (IsInsideNursery(old)) {
+ // It's been tenured.
+ MOZ_ASSERT(obj->isTenured());
+ WasmStructObject& structObj = obj->as<WasmStructObject>();
+ // WasmStructObject::classForTypeDef ensures we only get called for
+ // structs with OOL data. Hence:
+ MOZ_ASSERT(structObj.outlineData_);
+ Nursery& nursery = obj->runtimeFromMainThread()->gc.nursery();
+ nursery.unregisterTrailer(structObj.outlineData_);
+ }
+ return 0;
+}
+
+void WasmStructObject::storeVal(const Val& val, uint32_t fieldIndex) {
+ const StructType& structType = typeDef().structType();
+ FieldType fieldType = structType.fields_[fieldIndex].type;
+ uint32_t fieldOffset = structType.fields_[fieldIndex].offset;
+
+ MOZ_ASSERT(fieldIndex < structType.fields_.length());
+ bool areaIsOutline;
+ uint32_t areaOffset;
+ fieldOffsetToAreaAndOffset(fieldType, fieldOffset, &areaIsOutline,
+ &areaOffset);
+
+ uint8_t* data;
+ if (areaIsOutline) {
+ data = outlineData_ + areaOffset;
+ } else {
+ data = inlineData_ + areaOffset;
+ }
+
+ WriteValTo(val, fieldType, data);
+}
+
+static const JSClassOps WasmStructObjectOutlineClassOps = {
+ nullptr, /* addProperty */
+ nullptr, /* delProperty */
+ nullptr, /* enumerate */
+ WasmGcObject::obj_newEnumerate,
+ nullptr, /* resolve */
+ nullptr, /* mayResolve */
+ WasmStructObject::obj_finalize, /* finalize */
+ nullptr, /* call */
+ nullptr, /* construct */
+ WasmStructObject::obj_trace,
+};
+static const ClassExtension WasmStructObjectOutlineClassExt = {
+ WasmStructObject::obj_moved /* objectMovedOp */
+};
+const JSClass WasmStructObject::classOutline_ = {
+ "WasmStructObject",
+ JSClass::NON_NATIVE | JSCLASS_DELAY_METADATA_BUILDER |
+ JSCLASS_BACKGROUND_FINALIZE | JSCLASS_SKIP_NURSERY_FINALIZE,
+ &WasmStructObjectOutlineClassOps,
+ JS_NULL_CLASS_SPEC,
+ &WasmStructObjectOutlineClassExt,
+ &WasmGcObject::objectOps_};
+
+// Structs that only have inline data get a different class without a
+// finalizer. This class should otherwise be identical to the class for
+// structs with outline data.
+static const JSClassOps WasmStructObjectInlineClassOps = {
+ nullptr, /* addProperty */
+ nullptr, /* delProperty */
+ nullptr, /* enumerate */
+ WasmGcObject::obj_newEnumerate,
+ nullptr, /* resolve */
+ nullptr, /* mayResolve */
+ nullptr, /* finalize */
+ nullptr, /* call */
+ nullptr, /* construct */
+ WasmStructObject::obj_trace,
+};
+static const ClassExtension WasmStructObjectInlineClassExt = {
+ nullptr /* objectMovedOp */
+};
+const JSClass WasmStructObject::classInline_ = {
+ "WasmStructObject",
+ JSClass::NON_NATIVE | JSCLASS_DELAY_METADATA_BUILDER,
+ &WasmStructObjectInlineClassOps,
+ JS_NULL_CLASS_SPEC,
+ &WasmStructObjectInlineClassExt,
+ &WasmGcObject::objectOps_};
diff --git a/js/src/wasm/WasmGcObject.h b/js/src/wasm/WasmGcObject.h
new file mode 100644
index 0000000000..f3d82c97fe
--- /dev/null
+++ b/js/src/wasm/WasmGcObject.h
@@ -0,0 +1,396 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef wasm_WasmGcObject_h
+#define wasm_WasmGcObject_h
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/Maybe.h"
+
+#include "gc/Allocator.h"
+#include "gc/Pretenuring.h"
+#include "vm/ArrayBufferObject.h"
+#include "vm/JSObject.h"
+#include "wasm/WasmInstanceData.h"
+#include "wasm/WasmTypeDef.h"
+#include "wasm/WasmValType.h"
+
+using js::wasm::FieldType;
+
+namespace js {
+
+//=========================================================================
+// WasmGcObject
+
+class WasmGcObject : public JSObject {
+ protected:
+ const wasm::SuperTypeVector* superTypeVector_;
+
+ static const ObjectOps objectOps_;
+
+ [[nodiscard]] static bool obj_lookupProperty(JSContext* cx, HandleObject obj,
+ HandleId id,
+ MutableHandleObject objp,
+ PropertyResult* propp);
+
+ [[nodiscard]] static bool obj_defineProperty(JSContext* cx, HandleObject obj,
+ HandleId id,
+ Handle<PropertyDescriptor> desc,
+ ObjectOpResult& result);
+
+ [[nodiscard]] static bool obj_hasProperty(JSContext* cx, HandleObject obj,
+ HandleId id, bool* foundp);
+
+ [[nodiscard]] static bool obj_getProperty(JSContext* cx, HandleObject obj,
+ HandleValue receiver, HandleId id,
+ MutableHandleValue vp);
+
+ [[nodiscard]] static bool obj_setProperty(JSContext* cx, HandleObject obj,
+ HandleId id, HandleValue v,
+ HandleValue receiver,
+ ObjectOpResult& result);
+
+ [[nodiscard]] static bool obj_getOwnPropertyDescriptor(
+ JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandle<mozilla::Maybe<PropertyDescriptor>> desc);
+
+ [[nodiscard]] static bool obj_deleteProperty(JSContext* cx, HandleObject obj,
+ HandleId id,
+ ObjectOpResult& result);
+
+ // PropOffset is a uint32_t that is used to carry information about the
+ // location of an value from WasmGcObject::lookupProperty to
+ // WasmGcObject::loadValue. It is distinct from a normal uint32_t to
+ // emphasise the fact that it cannot be interpreted as an offset in any
+ // single contiguous area of memory:
+ //
+ // * If the object in question is a WasmStructObject, it is the value of
+ // `wasm::StructField::offset` for the relevant field, without regard to
+ // the inline/outline split.
+ //
+ // * If the object in question is a WasmArrayObject, then
+ // - u32 == UINT32_MAX (0xFFFF'FFFF) means the "length" property
+ // is requested
+ // - u32 < UINT32_MAX means the array element starting at that byte
+ // offset in WasmArrayObject::data_. It is not an array index value.
+ // See WasmGcObject::lookupProperty for details.
+ class PropOffset {
+ uint32_t u32_;
+
+ public:
+ PropOffset() : u32_(0) {}
+ uint32_t get() const { return u32_; }
+ void set(uint32_t u32) { u32_ = u32; }
+ };
+
+ [[nodiscard]] bool lookupProperty(JSContext* cx,
+ js::Handle<WasmGcObject*> object, jsid id,
+ PropOffset* offset, wasm::FieldType* type);
+ [[nodiscard]] bool hasProperty(JSContext* cx,
+ js::Handle<WasmGcObject*> object, jsid id) {
+ WasmGcObject::PropOffset offset;
+ wasm::FieldType type;
+ return lookupProperty(cx, object, id, &offset, &type);
+ }
+
+ bool loadValue(JSContext* cx, const WasmGcObject::PropOffset& offset,
+ wasm::FieldType type, MutableHandleValue vp);
+
+ public:
+ const wasm::SuperTypeVector& superTypeVector() const {
+ return *superTypeVector_;
+ }
+
+ static size_t offsetOfSuperTypeVector() {
+ return offsetof(WasmGcObject, superTypeVector_);
+ }
+
+ // These are both expensive in that they involve a double indirection.
+ // Avoid them if possible.
+ const wasm::TypeDef& typeDef() const { return *superTypeVector().typeDef(); }
+ wasm::TypeDefKind kind() const { return superTypeVector().typeDef()->kind(); }
+
+ [[nodiscard]] bool isRuntimeSubtypeOf(
+ const wasm::TypeDef* parentTypeDef) const;
+
+ [[nodiscard]] static bool obj_newEnumerate(JSContext* cx, HandleObject obj,
+ MutableHandleIdVector properties,
+ bool enumerableOnly);
+
+ protected:
+ // Create the GcObject (struct/array-specific fields are uninitialised).
+ // The type, shape, class pointer, alloc site and alloc kind are taken
+ // from `typeDefData`; the initial heap must be specified separately.
+ static WasmGcObject* create(JSContext* cx,
+ wasm::TypeDefInstanceData* typeDefData,
+ js::gc::Heap initialHeap);
+};
+
+//=========================================================================
+// WasmArrayObject
+
+// Class for a wasm array. It contains a pointer to the array contents, that
+// lives in the C++ heap.
+
+class WasmArrayObject : public WasmGcObject {
+ public:
+ static const JSClass class_;
+
+ // The number of elements in the array.
+ uint32_t numElements_;
+
+ // Owned data pointer, holding `numElements_` entries. This is null if
+ // `numElements_` is zero; otherwise it must be non-null. See bug 1812283.
+ uint8_t* data_;
+
+ // AllocKind for object creation
+ static gc::AllocKind allocKind();
+
+ // Creates a new array typed object, optionally initialized to zero, for the
+ // specified number of elements. Reports an error if the number of elements
+ // is too large, or if there is an out of memory error. The element type,
+ // shape, class pointer, alloc site and alloc kind are taken from
+ // `typeDefData`; the initial heap must be specified separately.
+ template <bool ZeroFields = true>
+ static WasmArrayObject* createArray(JSContext* cx,
+ wasm::TypeDefInstanceData* typeDefData,
+ js::gc::Heap initialHeap,
+ uint32_t numElements);
+
+ // JIT accessors
+ static constexpr size_t offsetOfNumElements() {
+ return offsetof(WasmArrayObject, numElements_);
+ }
+ static constexpr size_t offsetOfData() {
+ return offsetof(WasmArrayObject, data_);
+ }
+
+ // Tracing and finalization
+ static void obj_trace(JSTracer* trc, JSObject* object);
+ static void obj_finalize(JS::GCContext* gcx, JSObject* object);
+ static size_t obj_moved(JSObject* obj, JSObject* old);
+
+ void storeVal(const wasm::Val& val, uint32_t itemIndex);
+ void fillVal(const wasm::Val& val, uint32_t itemIndex, uint32_t len);
+};
+
+// Helper to mark all locations that assume that the type of
+// WasmArrayObject::numElements is uint32_t.
+#define STATIC_ASSERT_WASMARRAYELEMENTS_NUMELEMENTS_IS_U32 \
+ static_assert(sizeof(js::WasmArrayObject::numElements_) == sizeof(uint32_t))
+
+//=========================================================================
+// WasmStructObject
+
+// Class for a wasm struct. It has inline data and, if the inline area is
+// insufficient, a pointer to outline data that lives in the C++ heap.
+// Computing the field offsets is somewhat tricky; see block comment on `class
+// StructLayout` for background.
+
+class WasmStructObject : public WasmGcObject {
+ public:
+ static const JSClass classInline_;
+ static const JSClass classOutline_;
+
+ // Owned pointer to a malloc'd block containing out-of-line fields, or
+ // nullptr if none. Note that MIR alias analysis assumes this is readonly
+ // for the life of the object; do not change it once the object is created.
+ // See MWasmLoadObjectField::congruentTo.
+ uint8_t* outlineData_;
+
+ // The inline (wasm-struct-level) data fields. This must be a multiple of
+ // 16 bytes long in order to ensure that no field gets split across the
+ // inline-outline boundary. As a refinement, we request this field to begin
+ // at an 8-aligned offset relative to the start of the object, so as to
+ // guarantee that `double` typed fields are not subject to misaligned-access
+ // penalties on any target, whilst wasting at maximum 4 bytes of space.
+ //
+ // `inlineData_` is in reality a variable length block with maximum size
+ // WasmStructObject_MaxInlineBytes bytes. Do not add any (C++-level) fields
+ // after this point!
+ alignas(8) uint8_t inlineData_[0];
+
+ // This tells us how big the object is if we know the number of inline bytes
+ // it was created with.
+ static inline size_t sizeOfIncludingInlineData(size_t sizeOfInlineData) {
+ size_t n = sizeof(WasmStructObject) + sizeOfInlineData;
+ MOZ_ASSERT(n <= JSObject::MAX_BYTE_SIZE);
+ return n;
+ }
+
+ static const JSClass* classForTypeDef(const wasm::TypeDef* typeDef);
+ static js::gc::AllocKind allocKindForTypeDef(const wasm::TypeDef* typeDef);
+
+ // Slow path for ::createStruct, in which we know we need to generate a
+ // struct with an out-of-line storage area. This is marked as never-inline
+ // so as to maximise the chance that WasmStructObject::createStruct itself
+ // can be inlined.
+ template <bool ZeroFields>
+ static MOZ_NEVER_INLINE WasmStructObject* createStructOOL(
+ JSContext* cx, wasm::TypeDefInstanceData* typeDefData,
+ js::gc::Heap initialHeap, uint32_t inlineBytes, uint32_t outlineBytes);
+
+ // Creates a new struct typed object, optionally initialized to zero.
+ // Reports if there is an out of memory error. The structure's type, shape,
+ // class pointer, alloc site and alloc kind are taken from `typeDefData`;
+ // the initial heap must be specified separately. For structs with no OOL
+ // storage requirement, the required work is accomplished in-line; otherwise
+ // we slow-path to WasmStructObject::createStructOOL.
+ template <bool ZeroFields = true>
+ static inline WasmStructObject* createStruct(
+ JSContext* cx, wasm::TypeDefInstanceData* typeDefData,
+ js::gc::Heap initialHeap) {
+ const wasm::TypeDef* typeDef = typeDefData->typeDef;
+ MOZ_ASSERT(typeDef->kind() == wasm::TypeDefKind::Struct);
+
+ uint32_t totalBytes = typeDef->structType().size_;
+ uint32_t inlineBytes, outlineBytes;
+ WasmStructObject::getDataByteSizes(totalBytes, &inlineBytes, &outlineBytes);
+
+ if (MOZ_LIKELY(outlineBytes == 0)) {
+ // This doesn't need to be rooted, since all we do with it prior to
+ // return is to zero out the fields (and then only if ZeroFields is true).
+ WasmStructObject* structObj =
+ (WasmStructObject*)WasmGcObject::create(cx, typeDefData, initialHeap);
+ if (MOZ_UNLIKELY(!structObj)) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ structObj->outlineData_ = nullptr;
+ if constexpr (ZeroFields) {
+ memset(&(structObj->inlineData_[0]), 0, inlineBytes);
+ }
+ return structObj;
+ }
+
+ // OOL storage is required, so hand off to non-inlined code.
+ return WasmStructObject::createStructOOL<ZeroFields>(
+ cx, typeDefData, initialHeap, inlineBytes, outlineBytes);
+ }
+
+ // Given the total number of data bytes required (including alignment
+ // holes), return the number of inline and outline bytes required.
+ static inline void getDataByteSizes(uint32_t totalBytes,
+ uint32_t* inlineBytes,
+ uint32_t* outlineBytes);
+
+ // Given the offset of a field, produce the offset in `inlineData_` or
+ // `*outlineData_` to use, plus a bool indicating which area it is.
+ // `fieldType` is for assertional purposes only.
+ static inline void fieldOffsetToAreaAndOffset(FieldType fieldType,
+ uint32_t fieldOffset,
+ bool* areaIsOutline,
+ uint32_t* areaOffset);
+
+ // Given the offset of a field, return its actual address. `fieldType` is
+ // for assertional purposes only.
+ inline uint8_t* fieldOffsetToAddress(FieldType fieldType,
+ uint32_t fieldOffset);
+
+ // JIT accessors
+ static constexpr size_t offsetOfOutlineData() {
+ return offsetof(WasmStructObject, outlineData_);
+ }
+ static constexpr size_t offsetOfInlineData() {
+ return offsetof(WasmStructObject, inlineData_);
+ }
+
+ // Tracing and finalization
+ static void obj_trace(JSTracer* trc, JSObject* object);
+ static void obj_finalize(JS::GCContext* gcx, JSObject* object);
+ static size_t obj_moved(JSObject* obj, JSObject* old);
+
+ void storeVal(const wasm::Val& val, uint32_t fieldIndex);
+};
+
+// This is ensured by the use of `alignas` on `WasmStructObject::inlineData_`.
+static_assert((offsetof(WasmStructObject, inlineData_) % 8) == 0);
+
+// MaxInlineBytes must be a multiple of 16 for reasons described in the
+// comment on `class StructLayout`. This unfortunately can't be defined
+// inside the class definition itself because the sizeof(..) expression isn't
+// valid until after the end of the class definition.
+const size_t WasmStructObject_MaxInlineBytes =
+ ((JSObject::MAX_BYTE_SIZE - sizeof(WasmStructObject)) / 16) * 16;
+
+static_assert((WasmStructObject_MaxInlineBytes % 16) == 0);
+
+/*static*/
+inline void WasmStructObject::getDataByteSizes(uint32_t totalBytes,
+ uint32_t* inlineBytes,
+ uint32_t* outlineBytes) {
+ if (MOZ_UNLIKELY(totalBytes > WasmStructObject_MaxInlineBytes)) {
+ *inlineBytes = WasmStructObject_MaxInlineBytes;
+ *outlineBytes = totalBytes - WasmStructObject_MaxInlineBytes;
+ } else {
+ *inlineBytes = totalBytes;
+ *outlineBytes = 0;
+ }
+}
+
+/*static*/
+inline void WasmStructObject::fieldOffsetToAreaAndOffset(FieldType fieldType,
+ uint32_t fieldOffset,
+ bool* areaIsOutline,
+ uint32_t* areaOffset) {
+ if (fieldOffset < WasmStructObject_MaxInlineBytes) {
+ *areaIsOutline = false;
+ *areaOffset = fieldOffset;
+ } else {
+ *areaIsOutline = true;
+ *areaOffset = fieldOffset - WasmStructObject_MaxInlineBytes;
+ }
+ // Assert that the first and last bytes for the field agree on which side of
+ // the inline/outline boundary they live.
+ MOZ_RELEASE_ASSERT(
+ (fieldOffset < WasmStructObject_MaxInlineBytes) ==
+ ((fieldOffset + fieldType.size() - 1) < WasmStructObject_MaxInlineBytes));
+}
+
+inline uint8_t* WasmStructObject::fieldOffsetToAddress(FieldType fieldType,
+ uint32_t fieldOffset) {
+ bool areaIsOutline;
+ uint32_t areaOffset;
+ fieldOffsetToAreaAndOffset(fieldType, fieldOffset, &areaIsOutline,
+ &areaOffset);
+ return ((uint8_t*)(areaIsOutline ? outlineData_ : &inlineData_[0])) +
+ areaOffset;
+}
+
+// Ensure that faulting loads/stores for WasmStructObject and WasmArrayObject
+// are in the NULL pointer guard page.
+static_assert(WasmStructObject_MaxInlineBytes <= wasm::NullPtrGuardSize);
+static_assert(sizeof(WasmArrayObject) <= wasm::NullPtrGuardSize);
+
+} // namespace js
+
+//=========================================================================
+// misc
+
+namespace js {
+
+inline bool IsWasmGcObjectClass(const JSClass* class_) {
+ return class_ == &WasmArrayObject::class_ ||
+ class_ == &WasmStructObject::classInline_ ||
+ class_ == &WasmStructObject::classOutline_;
+}
+
+} // namespace js
+
+template <>
+inline bool JSObject::is<js::WasmGcObject>() const {
+ return js::IsWasmGcObjectClass(getClass());
+}
+
+template <>
+inline bool JSObject::is<js::WasmStructObject>() const {
+ const JSClass* class_ = getClass();
+ return class_ == &js::WasmStructObject::classInline_ ||
+ class_ == &js::WasmStructObject::classOutline_;
+}
+
+#endif /* wasm_WasmGcObject_h */
diff --git a/js/src/wasm/WasmGenerator.cpp b/js/src/wasm/WasmGenerator.cpp
new file mode 100644
index 0000000000..503bbd70ee
--- /dev/null
+++ b/js/src/wasm/WasmGenerator.cpp
@@ -0,0 +1,1279 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmGenerator.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/EnumeratedRange.h"
+#include "mozilla/SHA1.h"
+
+#include <algorithm>
+
+#include "jit/Assembler.h"
+#include "jit/JitOptions.h"
+#include "js/Printf.h"
+#include "threading/Thread.h"
+#include "util/Memory.h"
+#include "util/Text.h"
+#include "vm/HelperThreads.h"
+#include "vm/Time.h"
+#include "wasm/WasmBaselineCompile.h"
+#include "wasm/WasmCompile.h"
+#include "wasm/WasmGC.h"
+#include "wasm/WasmIonCompile.h"
+#include "wasm/WasmStubs.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::CheckedInt;
+using mozilla::MakeEnumeratedRange;
+
+bool CompiledCode::swap(MacroAssembler& masm) {
+ MOZ_ASSERT(bytes.empty());
+ if (!masm.swapBuffer(bytes)) {
+ return false;
+ }
+
+ callSites.swap(masm.callSites());
+ callSiteTargets.swap(masm.callSiteTargets());
+ trapSites.swap(masm.trapSites());
+ symbolicAccesses.swap(masm.symbolicAccesses());
+ tryNotes.swap(masm.tryNotes());
+ codeLabels.swap(masm.codeLabels());
+ return true;
+}
+
+// ****************************************************************************
+// ModuleGenerator
+
+static const unsigned GENERATOR_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
+static const unsigned COMPILATION_LIFO_DEFAULT_CHUNK_SIZE = 64 * 1024;
+static const uint32_t BAD_CODE_RANGE = UINT32_MAX;
+
+ModuleGenerator::ModuleGenerator(const CompileArgs& args,
+ ModuleEnvironment* moduleEnv,
+ CompilerEnvironment* compilerEnv,
+ const Atomic<bool>* cancelled,
+ UniqueChars* error,
+ UniqueCharsVector* warnings)
+ : compileArgs_(&args),
+ error_(error),
+ warnings_(warnings),
+ cancelled_(cancelled),
+ moduleEnv_(moduleEnv),
+ compilerEnv_(compilerEnv),
+ linkData_(nullptr),
+ metadataTier_(nullptr),
+ lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE),
+ masmAlloc_(&lifo_),
+ masm_(masmAlloc_, *moduleEnv, /* limitedSize= */ false),
+ debugTrapCodeOffset_(),
+ lastPatchedCallSite_(0),
+ startOfUnpatchedCallsites_(0),
+ parallel_(false),
+ outstanding_(0),
+ currentTask_(nullptr),
+ batchedBytecode_(0),
+ finishedFuncDefs_(false) {}
+
+ModuleGenerator::~ModuleGenerator() {
+ MOZ_ASSERT_IF(finishedFuncDefs_, !batchedBytecode_);
+ MOZ_ASSERT_IF(finishedFuncDefs_, !currentTask_);
+
+ if (parallel_) {
+ if (outstanding_) {
+ AutoLockHelperThreadState lock;
+
+ // Remove any pending compilation tasks from the worklist.
+ size_t removed = RemovePendingWasmCompileTasks(taskState_, mode(), lock);
+ MOZ_ASSERT(outstanding_ >= removed);
+ outstanding_ -= removed;
+
+ // Wait until all active compilation tasks have finished.
+ while (true) {
+ MOZ_ASSERT(outstanding_ >= taskState_.finished().length());
+ outstanding_ -= taskState_.finished().length();
+ taskState_.finished().clear();
+
+ MOZ_ASSERT(outstanding_ >= taskState_.numFailed());
+ outstanding_ -= taskState_.numFailed();
+ taskState_.numFailed() = 0;
+
+ if (!outstanding_) {
+ break;
+ }
+
+ taskState_.condVar().wait(lock); /* failed or finished */
+ }
+ }
+ } else {
+ MOZ_ASSERT(!outstanding_);
+ }
+
+ // Propagate error state.
+ if (error_ && !*error_) {
+ AutoLockHelperThreadState lock;
+ *error_ = std::move(taskState_.errorMessage());
+ }
+}
+
+// This is the highest offset into Instance::globalArea that will not overflow
+// a signed 32-bit integer.
+static const uint32_t MaxInstanceDataOffset =
+ INT32_MAX - Instance::offsetOfData();
+
+bool ModuleGenerator::allocateInstanceDataBytes(uint32_t bytes, uint32_t align,
+ uint32_t* instanceDataOffset) {
+ CheckedInt<uint32_t> newInstanceDataLength(metadata_->instanceDataLength);
+
+ // Adjust the current global data length so that it's aligned to `align`
+ newInstanceDataLength +=
+ ComputeByteAlignment(newInstanceDataLength.value(), align);
+ if (!newInstanceDataLength.isValid()) {
+ return false;
+ }
+
+ // The allocated data is given by the aligned length
+ *instanceDataOffset = newInstanceDataLength.value();
+
+ // Advance the length for `bytes` being allocated
+ newInstanceDataLength += bytes;
+ if (!newInstanceDataLength.isValid()) {
+ return false;
+ }
+
+ // Check that the highest offset into this allocated space would not overflow
+ // a signed 32-bit integer.
+ if (newInstanceDataLength.value() > MaxInstanceDataOffset + 1) {
+ return false;
+ }
+
+ metadata_->instanceDataLength = newInstanceDataLength.value();
+ return true;
+}
+
+bool ModuleGenerator::allocateInstanceDataBytesN(uint32_t bytes, uint32_t align,
+ uint32_t count,
+ uint32_t* instanceDataOffset) {
+ // The size of each allocation should be a multiple of alignment so that a
+ // contiguous array of allocations will be aligned
+ MOZ_ASSERT(bytes % align == 0);
+
+ // Compute the total bytes being allocated
+ CheckedInt<uint32_t> totalBytes = bytes;
+ totalBytes *= count;
+ if (!totalBytes.isValid()) {
+ return false;
+ }
+
+ // Allocate the bytes
+ return allocateInstanceDataBytes(totalBytes.value(), align,
+ instanceDataOffset);
+}
+
+bool ModuleGenerator::init(Metadata* maybeAsmJSMetadata) {
+ // Perform fallible metadata, linkdata, assumption allocations.
+
+ MOZ_ASSERT(isAsmJS() == !!maybeAsmJSMetadata);
+ if (maybeAsmJSMetadata) {
+ metadata_ = maybeAsmJSMetadata;
+ } else {
+ metadata_ = js_new<Metadata>();
+ if (!metadata_) {
+ return false;
+ }
+ }
+
+ if (compileArgs_->scriptedCaller.filename) {
+ metadata_->filename =
+ DuplicateString(compileArgs_->scriptedCaller.filename.get());
+ if (!metadata_->filename) {
+ return false;
+ }
+
+ metadata_->filenameIsURL = compileArgs_->scriptedCaller.filenameIsURL;
+ } else {
+ MOZ_ASSERT(!compileArgs_->scriptedCaller.filenameIsURL);
+ }
+
+ if (compileArgs_->sourceMapURL) {
+ metadata_->sourceMapURL = DuplicateString(compileArgs_->sourceMapURL.get());
+ if (!metadata_->sourceMapURL) {
+ return false;
+ }
+ }
+
+ linkData_ = js::MakeUnique<LinkData>(tier());
+ if (!linkData_) {
+ return false;
+ }
+
+ metadataTier_ = js::MakeUnique<MetadataTier>(tier());
+ if (!metadataTier_) {
+ return false;
+ }
+
+ // funcToCodeRange maps function indices to code-range indices and all
+ // elements will be initialized by the time module generation is finished.
+
+ if (!metadataTier_->funcToCodeRange.appendN(BAD_CODE_RANGE,
+ moduleEnv_->funcs.length())) {
+ return false;
+ }
+
+ // Pre-reserve space for large Vectors to avoid the significant cost of the
+ // final reallocs. In particular, the MacroAssembler can be enormous, so be
+ // extra conservative. Since large over-reservations may fail when the
+ // actual allocations will succeed, ignore OOM failures. Note,
+ // shrinkStorageToFit calls at the end will trim off unneeded capacity.
+
+ size_t codeSectionSize =
+ moduleEnv_->codeSection ? moduleEnv_->codeSection->size : 0;
+
+ size_t estimatedCodeSize =
+ size_t(1.2 * EstimateCompiledCodeSize(tier(), codeSectionSize));
+ (void)masm_.reserve(std::min(estimatedCodeSize, MaxCodeBytesPerProcess));
+
+ (void)metadataTier_->codeRanges.reserve(2 * moduleEnv_->numFuncDefs());
+
+ const size_t ByteCodesPerCallSite = 50;
+ (void)metadataTier_->callSites.reserve(codeSectionSize /
+ ByteCodesPerCallSite);
+
+ const size_t ByteCodesPerOOBTrap = 10;
+ (void)metadataTier_->trapSites[Trap::OutOfBounds].reserve(
+ codeSectionSize / ByteCodesPerOOBTrap);
+
+ // Allocate space in instance for declarations that need it
+ MOZ_ASSERT(metadata_->instanceDataLength == 0);
+
+ // Allocate space for type definitions
+ if (!allocateInstanceDataBytesN(
+ sizeof(TypeDefInstanceData), alignof(TypeDefInstanceData),
+ moduleEnv_->types->length(), &moduleEnv_->typeDefsOffsetStart)) {
+ return false;
+ }
+ metadata_->typeDefsOffsetStart = moduleEnv_->typeDefsOffsetStart;
+
+ // Allocate space for every function import
+ if (!allocateInstanceDataBytesN(
+ sizeof(FuncImportInstanceData), alignof(FuncImportInstanceData),
+ moduleEnv_->numFuncImports, &moduleEnv_->funcImportsOffsetStart)) {
+ return false;
+ }
+
+ // Allocate space for every table
+ if (!allocateInstanceDataBytesN(
+ sizeof(TableInstanceData), alignof(TableInstanceData),
+ moduleEnv_->tables.length(), &moduleEnv_->tablesOffsetStart)) {
+ return false;
+ }
+ metadata_->tablesOffsetStart = moduleEnv_->tablesOffsetStart;
+
+ // Allocate space for every tag
+ if (!allocateInstanceDataBytesN(
+ sizeof(TagInstanceData), alignof(TagInstanceData),
+ moduleEnv_->tags.length(), &moduleEnv_->tagsOffsetStart)) {
+ return false;
+ }
+ metadata_->tagsOffsetStart = moduleEnv_->tagsOffsetStart;
+
+ // Allocate space for every global that requires it
+ for (GlobalDesc& global : moduleEnv_->globals) {
+ if (global.isConstant()) {
+ continue;
+ }
+
+ uint32_t width = global.isIndirect() ? sizeof(void*) : global.type().size();
+
+ uint32_t instanceDataOffset;
+ if (!allocateInstanceDataBytes(width, width, &instanceDataOffset)) {
+ return false;
+ }
+
+ global.setOffset(instanceDataOffset);
+ }
+
+ // Initialize function import metadata
+ if (!metadataTier_->funcImports.resize(moduleEnv_->numFuncImports)) {
+ return false;
+ }
+
+ for (size_t i = 0; i < moduleEnv_->numFuncImports; i++) {
+ metadataTier_->funcImports[i] =
+ FuncImport(moduleEnv_->funcs[i].typeIndex,
+ moduleEnv_->offsetOfFuncImportInstanceData(i));
+ }
+
+ // Share type definitions with metadata
+ metadata_->types = moduleEnv_->types;
+
+ // Accumulate all exported functions:
+ // - explicitly marked as such;
+ // - implicitly exported by being an element of function tables;
+ // - implicitly exported by being the start function;
+ // - implicitly exported by being used in global ref.func initializer
+ // ModuleEnvironment accumulates this information for us during decoding,
+ // transfer it to the FuncExportVector stored in Metadata.
+
+ uint32_t exportedFuncCount = 0;
+ for (const FuncDesc& func : moduleEnv_->funcs) {
+ if (func.isExported()) {
+ exportedFuncCount++;
+ }
+ }
+ if (!metadataTier_->funcExports.reserve(exportedFuncCount)) {
+ return false;
+ }
+
+ for (uint32_t funcIndex = 0; funcIndex < moduleEnv_->funcs.length();
+ funcIndex++) {
+ const FuncDesc& func = moduleEnv_->funcs[funcIndex];
+
+ if (!func.isExported()) {
+ continue;
+ }
+
+ metadataTier_->funcExports.infallibleEmplaceBack(
+ FuncExport(func.typeIndex, funcIndex, func.isEager()));
+ }
+
+ // Determine whether parallel or sequential compilation is to be used and
+ // initialize the CompileTasks that will be used in either mode.
+
+ MOZ_ASSERT(GetHelperThreadCount() > 1);
+
+ uint32_t numTasks;
+ if (CanUseExtraThreads() && GetHelperThreadCPUCount() > 1) {
+ parallel_ = true;
+ numTasks = 2 * GetMaxWasmCompilationThreads();
+ } else {
+ numTasks = 1;
+ }
+
+ if (!tasks_.initCapacity(numTasks)) {
+ return false;
+ }
+ for (size_t i = 0; i < numTasks; i++) {
+ tasks_.infallibleEmplaceBack(*moduleEnv_, *compilerEnv_, taskState_,
+ COMPILATION_LIFO_DEFAULT_CHUNK_SIZE);
+ }
+
+ if (!freeTasks_.reserve(numTasks)) {
+ return false;
+ }
+ for (size_t i = 0; i < numTasks; i++) {
+ freeTasks_.infallibleAppend(&tasks_[i]);
+ }
+
+ // Fill in function stubs for each import so that imported functions can be
+ // used in all the places that normal function definitions can (table
+ // elements, export calls, etc).
+
+ CompiledCode& importCode = tasks_[0].output;
+ MOZ_ASSERT(importCode.empty());
+
+ if (!GenerateImportFunctions(*moduleEnv_, metadataTier_->funcImports,
+ &importCode)) {
+ return false;
+ }
+
+ if (!linkCompiledCode(importCode)) {
+ return false;
+ }
+
+ importCode.clear();
+ return true;
+}
+
+bool ModuleGenerator::funcIsCompiled(uint32_t funcIndex) const {
+ return metadataTier_->funcToCodeRange[funcIndex] != BAD_CODE_RANGE;
+}
+
+const CodeRange& ModuleGenerator::funcCodeRange(uint32_t funcIndex) const {
+ MOZ_ASSERT(funcIsCompiled(funcIndex));
+ const CodeRange& cr =
+ metadataTier_->codeRanges[metadataTier_->funcToCodeRange[funcIndex]];
+ MOZ_ASSERT(cr.isFunction());
+ return cr;
+}
+
+static bool InRange(uint32_t caller, uint32_t callee) {
+ // We assume JumpImmediateRange is defined conservatively enough that the
+ // slight difference between 'caller' (which is really the return address
+ // offset) and the actual base of the relative displacement computation
+ // isn't significant.
+ uint32_t range = std::min(JitOptions.jumpThreshold, JumpImmediateRange);
+ if (caller < callee) {
+ return callee - caller < range;
+ }
+ return caller - callee < range;
+}
+
+using OffsetMap =
+ HashMap<uint32_t, uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy>;
+using TrapMaybeOffsetArray =
+ EnumeratedArray<Trap, Trap::Limit, Maybe<uint32_t>>;
+
+bool ModuleGenerator::linkCallSites() {
+ AutoCreatedBy acb(masm_, "linkCallSites");
+
+ masm_.haltingAlign(CodeAlignment);
+
+ // Create far jumps for calls that have relative offsets that may otherwise
+ // go out of range. This method is called both between function bodies (at a
+ // frequency determined by the ISA's jump range) and once at the very end of
+ // a module's codegen after all possible calls/traps have been emitted.
+
+ OffsetMap existingCallFarJumps;
+ for (; lastPatchedCallSite_ < metadataTier_->callSites.length();
+ lastPatchedCallSite_++) {
+ const CallSite& callSite = metadataTier_->callSites[lastPatchedCallSite_];
+ const CallSiteTarget& target = callSiteTargets_[lastPatchedCallSite_];
+ uint32_t callerOffset = callSite.returnAddressOffset();
+ switch (callSite.kind()) {
+ case CallSiteDesc::Import:
+ case CallSiteDesc::Indirect:
+ case CallSiteDesc::IndirectFast:
+ case CallSiteDesc::Symbolic:
+ case CallSiteDesc::Breakpoint:
+ case CallSiteDesc::EnterFrame:
+ case CallSiteDesc::LeaveFrame:
+ case CallSiteDesc::FuncRef:
+ case CallSiteDesc::FuncRefFast:
+ break;
+ case CallSiteDesc::Func: {
+ if (funcIsCompiled(target.funcIndex())) {
+ uint32_t calleeOffset =
+ funcCodeRange(target.funcIndex()).funcUncheckedCallEntry();
+ if (InRange(callerOffset, calleeOffset)) {
+ masm_.patchCall(callerOffset, calleeOffset);
+ break;
+ }
+ }
+
+ OffsetMap::AddPtr p =
+ existingCallFarJumps.lookupForAdd(target.funcIndex());
+ if (!p) {
+ Offsets offsets;
+ offsets.begin = masm_.currentOffset();
+ if (!callFarJumps_.emplaceBack(target.funcIndex(),
+ masm_.farJumpWithPatch())) {
+ return false;
+ }
+ offsets.end = masm_.currentOffset();
+ if (masm_.oom()) {
+ return false;
+ }
+ if (!metadataTier_->codeRanges.emplaceBack(CodeRange::FarJumpIsland,
+ offsets)) {
+ return false;
+ }
+ if (!existingCallFarJumps.add(p, target.funcIndex(), offsets.begin)) {
+ return false;
+ }
+ }
+
+ masm_.patchCall(callerOffset, p->value());
+ break;
+ }
+ }
+ }
+
+ masm_.flushBuffer();
+ return !masm_.oom();
+}
+
+void ModuleGenerator::noteCodeRange(uint32_t codeRangeIndex,
+ const CodeRange& codeRange) {
+ switch (codeRange.kind()) {
+ case CodeRange::Function:
+ MOZ_ASSERT(metadataTier_->funcToCodeRange[codeRange.funcIndex()] ==
+ BAD_CODE_RANGE);
+ metadataTier_->funcToCodeRange[codeRange.funcIndex()] = codeRangeIndex;
+ break;
+ case CodeRange::InterpEntry:
+ metadataTier_->lookupFuncExport(codeRange.funcIndex())
+ .initEagerInterpEntryOffset(codeRange.begin());
+ break;
+ case CodeRange::JitEntry:
+ // Nothing to do: jit entries are linked in the jump tables.
+ break;
+ case CodeRange::ImportJitExit:
+ metadataTier_->funcImports[codeRange.funcIndex()].initJitExitOffset(
+ codeRange.begin());
+ break;
+ case CodeRange::ImportInterpExit:
+ metadataTier_->funcImports[codeRange.funcIndex()].initInterpExitOffset(
+ codeRange.begin());
+ break;
+ case CodeRange::DebugTrap:
+ MOZ_ASSERT(!debugTrapCodeOffset_);
+ debugTrapCodeOffset_ = codeRange.begin();
+ break;
+ case CodeRange::TrapExit:
+ MOZ_ASSERT(!linkData_->trapOffset);
+ linkData_->trapOffset = codeRange.begin();
+ break;
+ case CodeRange::Throw:
+ // Jumped to by other stubs, so nothing to do.
+ break;
+ case CodeRange::FarJumpIsland:
+ case CodeRange::BuiltinThunk:
+ MOZ_CRASH("Unexpected CodeRange kind");
+ }
+}
+
+// Append every element from `srcVec` where `filterOp(srcElem) == true`.
+// Applies `mutateOp(dstElem)` to every element that is appended.
+template <class Vec, class FilterOp, class MutateOp>
+static bool AppendForEach(Vec* dstVec, const Vec& srcVec, FilterOp filterOp,
+ MutateOp mutateOp) {
+ // Eagerly grow the vector to the whole src vector. Any filtered elements
+ // will be trimmed later.
+ if (!dstVec->growByUninitialized(srcVec.length())) {
+ return false;
+ }
+
+ using T = typename Vec::ElementType;
+
+ T* dstBegin = dstVec->begin();
+ T* dstEnd = dstVec->end();
+
+ // We appended srcVec.length() elements at the beginning, so we append
+ // elements starting at the first uninitialized element.
+ T* dst = dstEnd - srcVec.length();
+
+ for (const T* src = srcVec.begin(); src != srcVec.end(); src++) {
+ if (!filterOp(src)) {
+ continue;
+ }
+ new (dst) T(*src);
+ mutateOp(dst - dstBegin, dst);
+ dst++;
+ }
+
+ // Trim off the filtered out elements that were eagerly added at the
+ // beginning
+ size_t newSize = dst - dstBegin;
+ if (newSize != dstVec->length()) {
+ dstVec->shrinkTo(newSize);
+ }
+
+ return true;
+}
+
+template <typename T>
+bool FilterNothing(const T* element) {
+ return true;
+}
+
+// The same as the above `AppendForEach`, without performing any filtering.
+template <class Vec, class MutateOp>
+static bool AppendForEach(Vec* dstVec, const Vec& srcVec, MutateOp mutateOp) {
+ using T = typename Vec::ElementType;
+ return AppendForEach(dstVec, srcVec, &FilterNothing<T>, mutateOp);
+}
+
+bool ModuleGenerator::linkCompiledCode(CompiledCode& code) {
+ AutoCreatedBy acb(masm_, "ModuleGenerator::linkCompiledCode");
+ JitContext jcx;
+
+ // Before merging in new code, if calls in a prior code range might go out of
+ // range, insert far jumps to extend the range.
+
+ if (!InRange(startOfUnpatchedCallsites_,
+ masm_.size() + code.bytes.length())) {
+ startOfUnpatchedCallsites_ = masm_.size();
+ if (!linkCallSites()) {
+ return false;
+ }
+ }
+
+ // All code offsets in 'code' must be incremented by their position in the
+ // overall module when the code was appended.
+
+ masm_.haltingAlign(CodeAlignment);
+ const size_t offsetInModule = masm_.size();
+ if (!masm_.appendRawCode(code.bytes.begin(), code.bytes.length())) {
+ return false;
+ }
+
+ auto codeRangeOp = [offsetInModule, this](uint32_t codeRangeIndex,
+ CodeRange* codeRange) {
+ codeRange->offsetBy(offsetInModule);
+ noteCodeRange(codeRangeIndex, *codeRange);
+ };
+ if (!AppendForEach(&metadataTier_->codeRanges, code.codeRanges,
+ codeRangeOp)) {
+ return false;
+ }
+
+ auto callSiteOp = [=](uint32_t, CallSite* cs) {
+ cs->offsetBy(offsetInModule);
+ };
+ if (!AppendForEach(&metadataTier_->callSites, code.callSites, callSiteOp)) {
+ return false;
+ }
+
+ if (!callSiteTargets_.appendAll(code.callSiteTargets)) {
+ return false;
+ }
+
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ auto trapSiteOp = [=](uint32_t, TrapSite* ts) {
+ ts->offsetBy(offsetInModule);
+ };
+ if (!AppendForEach(&metadataTier_->trapSites[trap], code.trapSites[trap],
+ trapSiteOp)) {
+ return false;
+ }
+ }
+
+ for (const SymbolicAccess& access : code.symbolicAccesses) {
+ uint32_t patchAt = offsetInModule + access.patchAt.offset();
+ if (!linkData_->symbolicLinks[access.target].append(patchAt)) {
+ return false;
+ }
+ }
+
+ for (const CodeLabel& codeLabel : code.codeLabels) {
+ LinkData::InternalLink link;
+ link.patchAtOffset = offsetInModule + codeLabel.patchAt().offset();
+ link.targetOffset = offsetInModule + codeLabel.target().offset();
+#ifdef JS_CODELABEL_LINKMODE
+ link.mode = codeLabel.linkMode();
+#endif
+ if (!linkData_->internalLinks.append(link)) {
+ return false;
+ }
+ }
+
+ for (size_t i = 0; i < code.stackMaps.length(); i++) {
+ StackMaps::Maplet maplet = code.stackMaps.move(i);
+ maplet.offsetBy(offsetInModule);
+ if (!metadataTier_->stackMaps.add(maplet)) {
+ // This function is now the only owner of maplet.map, so we'd better
+ // free it right now.
+ maplet.map->destroy();
+ return false;
+ }
+ }
+
+ auto tryNoteFilter = [](const TryNote* tn) {
+ // Filter out all try notes that were never given a try body. This may
+ // happen due to dead code elimination.
+ return tn->hasTryBody();
+ };
+ auto tryNoteOp = [=](uint32_t, TryNote* tn) { tn->offsetBy(offsetInModule); };
+ return AppendForEach(&metadataTier_->tryNotes, code.tryNotes, tryNoteFilter,
+ tryNoteOp);
+}
+
+static bool ExecuteCompileTask(CompileTask* task, UniqueChars* error) {
+ MOZ_ASSERT(task->lifo.isEmpty());
+ MOZ_ASSERT(task->output.empty());
+
+ switch (task->compilerEnv.tier()) {
+ case Tier::Optimized:
+ if (!IonCompileFunctions(task->moduleEnv, task->compilerEnv, task->lifo,
+ task->inputs, &task->output, error)) {
+ return false;
+ }
+ break;
+ case Tier::Baseline:
+ if (!BaselineCompileFunctions(task->moduleEnv, task->compilerEnv,
+ task->lifo, task->inputs, &task->output,
+ error)) {
+ return false;
+ }
+ break;
+ }
+
+ MOZ_ASSERT(task->lifo.isEmpty());
+ MOZ_ASSERT(task->inputs.length() == task->output.codeRanges.length());
+ task->inputs.clear();
+ return true;
+}
+
+void CompileTask::runHelperThreadTask(AutoLockHelperThreadState& lock) {
+ UniqueChars error;
+ bool ok;
+
+ {
+ AutoUnlockHelperThreadState unlock(lock);
+ ok = ExecuteCompileTask(this, &error);
+ }
+
+ // Don't release the lock between updating our state and returning from this
+ // method.
+
+ if (!ok || !state.finished().append(this)) {
+ state.numFailed()++;
+ if (!state.errorMessage()) {
+ state.errorMessage() = std::move(error);
+ }
+ }
+
+ state.condVar().notify_one(); /* failed or finished */
+}
+
+ThreadType CompileTask::threadType() {
+ switch (compilerEnv.mode()) {
+ case CompileMode::Once:
+ case CompileMode::Tier1:
+ return ThreadType::THREAD_TYPE_WASM_COMPILE_TIER1;
+ case CompileMode::Tier2:
+ return ThreadType::THREAD_TYPE_WASM_COMPILE_TIER2;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+bool ModuleGenerator::locallyCompileCurrentTask() {
+ if (!ExecuteCompileTask(currentTask_, error_)) {
+ return false;
+ }
+ if (!finishTask(currentTask_)) {
+ return false;
+ }
+ currentTask_ = nullptr;
+ batchedBytecode_ = 0;
+ return true;
+}
+
+bool ModuleGenerator::finishTask(CompileTask* task) {
+ AutoCreatedBy acb(masm_, "ModuleGenerator::finishTask");
+
+ masm_.haltingAlign(CodeAlignment);
+
+ if (!linkCompiledCode(task->output)) {
+ return false;
+ }
+
+ task->output.clear();
+
+ MOZ_ASSERT(task->inputs.empty());
+ MOZ_ASSERT(task->output.empty());
+ MOZ_ASSERT(task->lifo.isEmpty());
+ freeTasks_.infallibleAppend(task);
+ return true;
+}
+
+bool ModuleGenerator::launchBatchCompile() {
+ MOZ_ASSERT(currentTask_);
+
+ if (cancelled_ && *cancelled_) {
+ return false;
+ }
+
+ if (!parallel_) {
+ return locallyCompileCurrentTask();
+ }
+
+ if (!StartOffThreadWasmCompile(currentTask_, mode())) {
+ return false;
+ }
+ outstanding_++;
+ currentTask_ = nullptr;
+ batchedBytecode_ = 0;
+ return true;
+}
+
+bool ModuleGenerator::finishOutstandingTask() {
+ MOZ_ASSERT(parallel_);
+
+ CompileTask* task = nullptr;
+ {
+ AutoLockHelperThreadState lock;
+ while (true) {
+ MOZ_ASSERT(outstanding_ > 0);
+
+ if (taskState_.numFailed() > 0) {
+ return false;
+ }
+
+ if (!taskState_.finished().empty()) {
+ outstanding_--;
+ task = taskState_.finished().popCopy();
+ break;
+ }
+
+ taskState_.condVar().wait(lock); /* failed or finished */
+ }
+ }
+
+ // Call outside of the compilation lock.
+ return finishTask(task);
+}
+
+bool ModuleGenerator::compileFuncDef(uint32_t funcIndex,
+ uint32_t lineOrBytecode,
+ const uint8_t* begin, const uint8_t* end,
+ Uint32Vector&& lineNums) {
+ MOZ_ASSERT(!finishedFuncDefs_);
+ MOZ_ASSERT(funcIndex < moduleEnv_->numFuncs());
+
+ uint32_t threshold;
+ switch (tier()) {
+ case Tier::Baseline:
+ threshold = JitOptions.wasmBatchBaselineThreshold;
+ break;
+ case Tier::Optimized:
+ threshold = JitOptions.wasmBatchIonThreshold;
+ break;
+ default:
+ MOZ_CRASH("Invalid tier value");
+ break;
+ }
+
+ uint32_t funcBytecodeLength = end - begin;
+
+ // Do not go over the threshold if we can avoid it: spin off the compilation
+ // before appending the function if we would go over. (Very large single
+ // functions may still exceed the threshold but this is fine; it'll be very
+ // uncommon and is in any case safely handled by the MacroAssembler's buffer
+ // limit logic.)
+
+ if (currentTask_ && currentTask_->inputs.length() &&
+ batchedBytecode_ + funcBytecodeLength > threshold) {
+ if (!launchBatchCompile()) {
+ return false;
+ }
+ }
+
+ if (!currentTask_) {
+ if (freeTasks_.empty() && !finishOutstandingTask()) {
+ return false;
+ }
+ currentTask_ = freeTasks_.popCopy();
+ }
+
+ if (!currentTask_->inputs.emplaceBack(funcIndex, lineOrBytecode, begin, end,
+ std::move(lineNums))) {
+ return false;
+ }
+
+ batchedBytecode_ += funcBytecodeLength;
+ MOZ_ASSERT(batchedBytecode_ <= MaxCodeSectionBytes);
+ return true;
+}
+
+bool ModuleGenerator::finishFuncDefs() {
+ MOZ_ASSERT(!finishedFuncDefs_);
+
+ if (currentTask_ && !locallyCompileCurrentTask()) {
+ return false;
+ }
+
+ finishedFuncDefs_ = true;
+ return true;
+}
+
+bool ModuleGenerator::finishCodegen() {
+ // Now that all functions and stubs are generated and their CodeRanges
+ // known, patch all calls (which can emit far jumps) and far jumps. Linking
+ // can emit tiny far-jump stubs, so there is an ordering dependency here.
+
+ if (!linkCallSites()) {
+ return false;
+ }
+
+ for (CallFarJump far : callFarJumps_) {
+ masm_.patchFarJump(far.jump,
+ funcCodeRange(far.funcIndex).funcUncheckedCallEntry());
+ }
+
+ metadataTier_->debugTrapOffset = debugTrapCodeOffset_;
+
+ // None of the linking or far-jump operations should emit masm metadata.
+
+ MOZ_ASSERT(masm_.callSites().empty());
+ MOZ_ASSERT(masm_.callSiteTargets().empty());
+ MOZ_ASSERT(masm_.trapSites().empty());
+ MOZ_ASSERT(masm_.symbolicAccesses().empty());
+ MOZ_ASSERT(masm_.tryNotes().empty());
+ MOZ_ASSERT(masm_.codeLabels().empty());
+
+ masm_.finish();
+ return !masm_.oom();
+}
+
+bool ModuleGenerator::finishMetadataTier() {
+ // The stackmaps aren't yet sorted. Do so now, since we'll need to
+ // binary-search them at GC time.
+ metadataTier_->stackMaps.finishAndSort();
+
+ // The try notes also need to be sorted to simplify lookup.
+ std::sort(metadataTier_->tryNotes.begin(), metadataTier_->tryNotes.end());
+
+#ifdef DEBUG
+ // Check that the stackmap contains no duplicates, since that could lead to
+ // ambiguities about stack slot pointerness.
+ const uint8_t* previousNextInsnAddr = nullptr;
+ for (size_t i = 0; i < metadataTier_->stackMaps.length(); i++) {
+ const StackMaps::Maplet& maplet = metadataTier_->stackMaps.get(i);
+ MOZ_ASSERT_IF(i > 0, uintptr_t(maplet.nextInsnAddr) >
+ uintptr_t(previousNextInsnAddr));
+ previousNextInsnAddr = maplet.nextInsnAddr;
+ }
+
+ // Assert all sorted metadata is sorted.
+ uint32_t last = 0;
+ for (const CodeRange& codeRange : metadataTier_->codeRanges) {
+ MOZ_ASSERT(codeRange.begin() >= last);
+ last = codeRange.end();
+ }
+
+ last = 0;
+ for (const CallSite& callSite : metadataTier_->callSites) {
+ MOZ_ASSERT(callSite.returnAddressOffset() >= last);
+ last = callSite.returnAddressOffset();
+ }
+
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ last = 0;
+ for (const TrapSite& trapSite : metadataTier_->trapSites[trap]) {
+ MOZ_ASSERT(trapSite.pcOffset >= last);
+ last = trapSite.pcOffset;
+ }
+ }
+
+ // Try notes should be sorted so that the end of ranges are in rising order
+ // so that the innermost catch handler is chosen.
+ last = 0;
+ for (const TryNote& tryNote : metadataTier_->tryNotes) {
+ MOZ_ASSERT(tryNote.tryBodyEnd() >= last);
+ MOZ_ASSERT(tryNote.tryBodyEnd() > tryNote.tryBodyBegin());
+ last = tryNote.tryBodyBegin();
+ }
+#endif
+
+ // These Vectors can get large and the excess capacity can be significant,
+ // so realloc them down to size.
+
+ metadataTier_->funcToCodeRange.shrinkStorageToFit();
+ metadataTier_->codeRanges.shrinkStorageToFit();
+ metadataTier_->callSites.shrinkStorageToFit();
+ metadataTier_->trapSites.shrinkStorageToFit();
+ metadataTier_->tryNotes.shrinkStorageToFit();
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ metadataTier_->trapSites[trap].shrinkStorageToFit();
+ }
+
+ return true;
+}
+
+UniqueCodeTier ModuleGenerator::finishCodeTier() {
+ MOZ_ASSERT(finishedFuncDefs_);
+
+ while (outstanding_ > 0) {
+ if (!finishOutstandingTask()) {
+ return nullptr;
+ }
+ }
+
+#ifdef DEBUG
+ for (uint32_t codeRangeIndex : metadataTier_->funcToCodeRange) {
+ MOZ_ASSERT(codeRangeIndex != BAD_CODE_RANGE);
+ }
+#endif
+
+ // Now that all imports/exports are known, we can generate a special
+ // CompiledCode containing stubs.
+
+ CompiledCode& stubCode = tasks_[0].output;
+ MOZ_ASSERT(stubCode.empty());
+
+ if (!GenerateStubs(*moduleEnv_, metadataTier_->funcImports,
+ metadataTier_->funcExports, &stubCode)) {
+ return nullptr;
+ }
+
+ if (!linkCompiledCode(stubCode)) {
+ return nullptr;
+ }
+
+ // Finish linking and metadata.
+
+ if (!finishCodegen()) {
+ return nullptr;
+ }
+
+ if (!finishMetadataTier()) {
+ return nullptr;
+ }
+
+ UniqueModuleSegment segment =
+ ModuleSegment::create(tier(), masm_, *linkData_);
+ if (!segment) {
+ warnf("failed to allocate executable memory for module");
+ return nullptr;
+ }
+
+ metadataTier_->stackMaps.offsetBy(uintptr_t(segment->base()));
+
+#ifdef DEBUG
+ // Check that each stackmap is associated with a plausible instruction.
+ for (size_t i = 0; i < metadataTier_->stackMaps.length(); i++) {
+ MOZ_ASSERT(IsValidStackMapKey(compilerEnv_->debugEnabled(),
+ metadataTier_->stackMaps.get(i).nextInsnAddr),
+ "wasm stackmap does not reference a valid insn");
+ }
+#endif
+
+ return js::MakeUnique<CodeTier>(std::move(metadataTier_), std::move(segment));
+}
+
+SharedMetadata ModuleGenerator::finishMetadata(const Bytes& bytecode) {
+ // Finish initialization of Metadata, which is only needed for constructing
+ // the initial Module, not for tier-2 compilation.
+ MOZ_ASSERT(mode() != CompileMode::Tier2);
+
+ // Copy over data from the ModuleEnvironment.
+
+ metadata_->memory = moduleEnv_->memory;
+ metadata_->startFuncIndex = moduleEnv_->startFuncIndex;
+ metadata_->tables = std::move(moduleEnv_->tables);
+ metadata_->globals = std::move(moduleEnv_->globals);
+ metadata_->tags = std::move(moduleEnv_->tags);
+ metadata_->nameCustomSectionIndex = moduleEnv_->nameCustomSectionIndex;
+ metadata_->moduleName = moduleEnv_->moduleName;
+ metadata_->funcNames = std::move(moduleEnv_->funcNames);
+ metadata_->omitsBoundsChecks = moduleEnv_->hugeMemoryEnabled();
+
+ // Copy over additional debug information.
+
+ if (compilerEnv_->debugEnabled()) {
+ metadata_->debugEnabled = true;
+
+ const size_t numFuncs = moduleEnv_->funcs.length();
+ if (!metadata_->debugFuncTypeIndices.resize(numFuncs)) {
+ return nullptr;
+ }
+ for (size_t i = 0; i < numFuncs; i++) {
+ metadata_->debugFuncTypeIndices[i] = moduleEnv_->funcs[i].typeIndex;
+ }
+
+ static_assert(sizeof(ModuleHash) <= sizeof(mozilla::SHA1Sum::Hash),
+ "The ModuleHash size shall not exceed the SHA1 hash size.");
+ mozilla::SHA1Sum::Hash hash;
+ mozilla::SHA1Sum sha1Sum;
+ sha1Sum.update(bytecode.begin(), bytecode.length());
+ sha1Sum.finish(hash);
+ memcpy(metadata_->debugHash, hash, sizeof(ModuleHash));
+ }
+
+ MOZ_ASSERT_IF(moduleEnv_->nameCustomSectionIndex, !!metadata_->namePayload);
+
+ // Metadata shouldn't be mutably modified after finishMetadata().
+ SharedMetadata metadata = metadata_;
+ metadata_ = nullptr;
+ return metadata;
+}
+
+SharedModule ModuleGenerator::finishModule(
+ const ShareableBytes& bytecode,
+ JS::OptimizedEncodingListener* maybeTier2Listener) {
+ MOZ_ASSERT(mode() == CompileMode::Once || mode() == CompileMode::Tier1);
+
+ UniqueCodeTier codeTier = finishCodeTier();
+ if (!codeTier) {
+ return nullptr;
+ }
+
+ JumpTables jumpTables;
+ if (!jumpTables.init(mode(), codeTier->segment(),
+ codeTier->metadata().codeRanges)) {
+ return nullptr;
+ }
+
+ // Copy over data from the Bytecode, which is going away at the end of
+ // compilation.
+
+ DataSegmentVector dataSegments;
+ if (!dataSegments.reserve(moduleEnv_->dataSegments.length())) {
+ return nullptr;
+ }
+ for (const DataSegmentEnv& srcSeg : moduleEnv_->dataSegments) {
+ MutableDataSegment dstSeg = js_new<DataSegment>();
+ if (!dstSeg) {
+ return nullptr;
+ }
+ if (!dstSeg->init(bytecode, srcSeg)) {
+ return nullptr;
+ }
+ dataSegments.infallibleAppend(std::move(dstSeg));
+ }
+
+ CustomSectionVector customSections;
+ if (!customSections.reserve(moduleEnv_->customSections.length())) {
+ return nullptr;
+ }
+ for (const CustomSectionEnv& srcSec : moduleEnv_->customSections) {
+ CustomSection sec;
+ if (!sec.name.append(bytecode.begin() + srcSec.nameOffset,
+ srcSec.nameLength)) {
+ return nullptr;
+ }
+ MutableBytes payload = js_new<ShareableBytes>();
+ if (!payload) {
+ return nullptr;
+ }
+ if (!payload->append(bytecode.begin() + srcSec.payloadOffset,
+ srcSec.payloadLength)) {
+ return nullptr;
+ }
+ sec.payload = std::move(payload);
+ customSections.infallibleAppend(std::move(sec));
+ }
+
+ if (moduleEnv_->nameCustomSectionIndex) {
+ metadata_->namePayload =
+ customSections[*moduleEnv_->nameCustomSectionIndex].payload;
+ }
+
+ SharedMetadata metadata = finishMetadata(bytecode.bytes);
+ if (!metadata) {
+ return nullptr;
+ }
+
+ MutableCode code =
+ js_new<Code>(std::move(codeTier), *metadata, std::move(jumpTables));
+ if (!code || !code->initialize(*linkData_)) {
+ return nullptr;
+ }
+
+ const ShareableBytes* debugBytecode = nullptr;
+ if (compilerEnv_->debugEnabled()) {
+ MOZ_ASSERT(mode() == CompileMode::Once);
+ MOZ_ASSERT(tier() == Tier::Debug);
+ debugBytecode = &bytecode;
+ }
+
+ // All the components are finished, so create the complete Module and start
+ // tier-2 compilation if requested.
+
+ MutableModule module = js_new<Module>(
+ *code, std::move(moduleEnv_->imports), std::move(moduleEnv_->exports),
+ std::move(dataSegments), std::move(moduleEnv_->elemSegments),
+ std::move(customSections), debugBytecode);
+ if (!module) {
+ return nullptr;
+ }
+
+ if (!isAsmJS() && compileArgs_->features.testSerialization) {
+ MOZ_RELEASE_ASSERT(mode() == CompileMode::Once &&
+ tier() == Tier::Serialized);
+
+ Bytes serializedBytes;
+ if (!module->serialize(*linkData_, &serializedBytes)) {
+ return nullptr;
+ }
+
+ MutableModule deserializedModule =
+ Module::deserialize(serializedBytes.begin(), serializedBytes.length());
+ if (!deserializedModule) {
+ return nullptr;
+ }
+ module = deserializedModule;
+
+ // Perform storeOptimizedEncoding here instead of below so we don't have to
+ // re-serialize the module.
+ if (maybeTier2Listener) {
+ maybeTier2Listener->storeOptimizedEncoding(serializedBytes.begin(),
+ serializedBytes.length());
+ maybeTier2Listener = nullptr;
+ }
+ }
+
+ if (mode() == CompileMode::Tier1) {
+ module->startTier2(*compileArgs_, bytecode, maybeTier2Listener);
+ } else if (tier() == Tier::Serialized && maybeTier2Listener) {
+ Bytes bytes;
+ if (module->serialize(*linkData_, &bytes)) {
+ maybeTier2Listener->storeOptimizedEncoding(bytes.begin(), bytes.length());
+ }
+ }
+
+ return module;
+}
+
+bool ModuleGenerator::finishTier2(const Module& module) {
+ MOZ_ASSERT(mode() == CompileMode::Tier2);
+ MOZ_ASSERT(tier() == Tier::Optimized);
+ MOZ_ASSERT(!compilerEnv_->debugEnabled());
+
+ if (cancelled_ && *cancelled_) {
+ return false;
+ }
+
+ UniqueCodeTier codeTier = finishCodeTier();
+ if (!codeTier) {
+ return false;
+ }
+
+ if (MOZ_UNLIKELY(JitOptions.wasmDelayTier2)) {
+ // Introduce an artificial delay when testing wasmDelayTier2, since we
+ // want to exercise both tier1 and tier2 code in this case.
+ ThisThread::SleepMilliseconds(500);
+ }
+
+ return module.finishTier2(*linkData_, std::move(codeTier));
+}
+
+void ModuleGenerator::warnf(const char* msg, ...) {
+ if (!warnings_) {
+ return;
+ }
+
+ va_list ap;
+ va_start(ap, msg);
+ UniqueChars str(JS_vsmprintf(msg, ap));
+ va_end(ap);
+ if (!str) {
+ return;
+ }
+
+ (void)warnings_->append(std::move(str));
+}
+
+size_t CompiledCode::sizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ size_t trapSitesSize = 0;
+ for (const TrapSiteVector& vec : trapSites) {
+ trapSitesSize += vec.sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ return bytes.sizeOfExcludingThis(mallocSizeOf) +
+ codeRanges.sizeOfExcludingThis(mallocSizeOf) +
+ callSites.sizeOfExcludingThis(mallocSizeOf) +
+ callSiteTargets.sizeOfExcludingThis(mallocSizeOf) + trapSitesSize +
+ symbolicAccesses.sizeOfExcludingThis(mallocSizeOf) +
+ tryNotes.sizeOfExcludingThis(mallocSizeOf) +
+ codeLabels.sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t CompileTask::sizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return lifo.sizeOfExcludingThis(mallocSizeOf) +
+ inputs.sizeOfExcludingThis(mallocSizeOf) +
+ output.sizeOfExcludingThis(mallocSizeOf);
+}
diff --git a/js/src/wasm/WasmGenerator.h b/js/src/wasm/WasmGenerator.h
new file mode 100644
index 0000000000..032158b7a7
--- /dev/null
+++ b/js/src/wasm/WasmGenerator.h
@@ -0,0 +1,264 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_generator_h
+#define wasm_generator_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/MemoryReporting.h"
+
+#include "jit/MacroAssembler.h"
+#include "threading/ProtectedData.h"
+#include "vm/HelperThreadTask.h"
+#include "wasm/WasmCompile.h"
+#include "wasm/WasmModule.h"
+#include "wasm/WasmValidate.h"
+
+namespace JS {
+class OptimizedEncodingListener;
+}
+
+namespace js {
+namespace wasm {
+
+struct CompileTask;
+using CompileTaskPtrVector = Vector<CompileTask*, 0, SystemAllocPolicy>;
+
+// FuncCompileInput contains the input for compiling a single function.
+
+struct FuncCompileInput {
+ const uint8_t* begin;
+ const uint8_t* end;
+ uint32_t index;
+ uint32_t lineOrBytecode;
+ Uint32Vector callSiteLineNums;
+
+ FuncCompileInput(uint32_t index, uint32_t lineOrBytecode,
+ const uint8_t* begin, const uint8_t* end,
+ Uint32Vector&& callSiteLineNums)
+ : begin(begin),
+ end(end),
+ index(index),
+ lineOrBytecode(lineOrBytecode),
+ callSiteLineNums(std::move(callSiteLineNums)) {}
+};
+
+using FuncCompileInputVector = Vector<FuncCompileInput, 8, SystemAllocPolicy>;
+
+// CompiledCode contains the resulting code and metadata for a set of compiled
+// input functions or stubs.
+
+struct CompiledCode {
+ Bytes bytes;
+ CodeRangeVector codeRanges;
+ CallSiteVector callSites;
+ CallSiteTargetVector callSiteTargets;
+ TrapSiteVectorArray trapSites;
+ SymbolicAccessVector symbolicAccesses;
+ jit::CodeLabelVector codeLabels;
+ StackMaps stackMaps;
+ TryNoteVector tryNotes;
+
+ [[nodiscard]] bool swap(jit::MacroAssembler& masm);
+
+ void clear() {
+ bytes.clear();
+ codeRanges.clear();
+ callSites.clear();
+ callSiteTargets.clear();
+ trapSites.clear();
+ symbolicAccesses.clear();
+ codeLabels.clear();
+ stackMaps.clear();
+ tryNotes.clear();
+ MOZ_ASSERT(empty());
+ }
+
+ bool empty() {
+ return bytes.empty() && codeRanges.empty() && callSites.empty() &&
+ callSiteTargets.empty() && trapSites.empty() &&
+ symbolicAccesses.empty() && codeLabels.empty() && tryNotes.empty() &&
+ stackMaps.empty();
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+};
+
+// The CompileTaskState of a ModuleGenerator contains the mutable state shared
+// between helper threads executing CompileTasks. Each CompileTask started on a
+// helper thread eventually either ends up in the 'finished' list or increments
+// 'numFailed'.
+
+struct CompileTaskState {
+ HelperThreadLockData<CompileTaskPtrVector> finished_;
+ HelperThreadLockData<uint32_t> numFailed_;
+ HelperThreadLockData<UniqueChars> errorMessage_;
+ HelperThreadLockData<ConditionVariable> condVar_;
+
+ CompileTaskState() : numFailed_(0) {}
+ ~CompileTaskState() {
+ MOZ_ASSERT(finished_.refNoCheck().empty());
+ MOZ_ASSERT(!numFailed_.refNoCheck());
+ }
+
+ CompileTaskPtrVector& finished() { return finished_.ref(); }
+ uint32_t& numFailed() { return numFailed_.ref(); }
+ UniqueChars& errorMessage() { return errorMessage_.ref(); }
+ ConditionVariable& condVar() { return condVar_.ref(); }
+};
+
+// A CompileTask holds a batch of input functions that are to be compiled on a
+// helper thread as well as, eventually, the results of compilation.
+
+struct CompileTask : public HelperThreadTask {
+ const ModuleEnvironment& moduleEnv;
+ const CompilerEnvironment& compilerEnv;
+
+ CompileTaskState& state;
+ LifoAlloc lifo;
+ FuncCompileInputVector inputs;
+ CompiledCode output;
+
+ CompileTask(const ModuleEnvironment& moduleEnv,
+ const CompilerEnvironment& compilerEnv, CompileTaskState& state,
+ size_t defaultChunkSize)
+ : moduleEnv(moduleEnv),
+ compilerEnv(compilerEnv),
+ state(state),
+ lifo(defaultChunkSize) {}
+
+ virtual ~CompileTask() = default;
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ void runHelperThreadTask(AutoLockHelperThreadState& locked) override;
+ ThreadType threadType() override;
+};
+
+// A ModuleGenerator encapsulates the creation of a wasm module. During the
+// lifetime of a ModuleGenerator, a sequence of FunctionGenerators are created
+// and destroyed to compile the individual function bodies. After generating all
+// functions, ModuleGenerator::finish() must be called to complete the
+// compilation and extract the resulting wasm module.
+
+class MOZ_STACK_CLASS ModuleGenerator {
+ using CompileTaskVector = Vector<CompileTask, 0, SystemAllocPolicy>;
+ using CodeOffsetVector = Vector<jit::CodeOffset, 0, SystemAllocPolicy>;
+ struct CallFarJump {
+ uint32_t funcIndex;
+ jit::CodeOffset jump;
+ CallFarJump(uint32_t fi, jit::CodeOffset j) : funcIndex(fi), jump(j) {}
+ };
+ using CallFarJumpVector = Vector<CallFarJump, 0, SystemAllocPolicy>;
+
+ // Constant parameters
+ SharedCompileArgs const compileArgs_;
+ UniqueChars* const error_;
+ UniqueCharsVector* const warnings_;
+ const Atomic<bool>* const cancelled_;
+ ModuleEnvironment* const moduleEnv_;
+ CompilerEnvironment* const compilerEnv_;
+
+ // Data that is moved into the result of finish()
+ UniqueLinkData linkData_;
+ UniqueMetadataTier metadataTier_;
+ MutableMetadata metadata_;
+
+ // Data scoped to the ModuleGenerator's lifetime
+ CompileTaskState taskState_;
+ LifoAlloc lifo_;
+ jit::TempAllocator masmAlloc_;
+ jit::WasmMacroAssembler masm_;
+ Uint32Vector funcToCodeRange_;
+ uint32_t debugTrapCodeOffset_;
+ CallFarJumpVector callFarJumps_;
+ CallSiteTargetVector callSiteTargets_;
+ uint32_t lastPatchedCallSite_;
+ uint32_t startOfUnpatchedCallsites_;
+
+ // Parallel compilation
+ bool parallel_;
+ uint32_t outstanding_;
+ CompileTaskVector tasks_;
+ CompileTaskPtrVector freeTasks_;
+ CompileTask* currentTask_;
+ uint32_t batchedBytecode_;
+
+ // Assertions
+ DebugOnly<bool> finishedFuncDefs_;
+
+ bool allocateInstanceDataBytes(uint32_t bytes, uint32_t align,
+ uint32_t* instanceDataOffset);
+ bool allocateInstanceDataBytesN(uint32_t bytes, uint32_t align,
+ uint32_t count, uint32_t* instanceDataOffset);
+
+ bool funcIsCompiled(uint32_t funcIndex) const;
+ const CodeRange& funcCodeRange(uint32_t funcIndex) const;
+ bool linkCallSites();
+ void noteCodeRange(uint32_t codeRangeIndex, const CodeRange& codeRange);
+ bool linkCompiledCode(CompiledCode& code);
+ bool locallyCompileCurrentTask();
+ bool finishTask(CompileTask* task);
+ bool launchBatchCompile();
+ bool finishOutstandingTask();
+ bool finishCodegen();
+ bool finishMetadataTier();
+ UniqueCodeTier finishCodeTier();
+ SharedMetadata finishMetadata(const Bytes& bytecode);
+
+ bool isAsmJS() const { return moduleEnv_->isAsmJS(); }
+ Tier tier() const { return compilerEnv_->tier(); }
+ CompileMode mode() const { return compilerEnv_->mode(); }
+ bool debugEnabled() const { return compilerEnv_->debugEnabled(); }
+
+ void warnf(const char* msg, ...) MOZ_FORMAT_PRINTF(2, 3);
+
+ public:
+ ModuleGenerator(const CompileArgs& args, ModuleEnvironment* moduleEnv,
+ CompilerEnvironment* compilerEnv,
+ const Atomic<bool>* cancelled, UniqueChars* error,
+ UniqueCharsVector* warnings);
+ ~ModuleGenerator();
+ [[nodiscard]] bool init(Metadata* maybeAsmJSMetadata = nullptr);
+
+ // Before finishFuncDefs() is called, compileFuncDef() must be called once
+ // for each funcIndex in the range [0, env->numFuncDefs()).
+
+ [[nodiscard]] bool compileFuncDef(
+ uint32_t funcIndex, uint32_t lineOrBytecode, const uint8_t* begin,
+ const uint8_t* end, Uint32Vector&& callSiteLineNums = Uint32Vector());
+
+ // Must be called after the last compileFuncDef() and before finishModule()
+ // or finishTier2().
+
+ [[nodiscard]] bool finishFuncDefs();
+
+ // If env->mode is Once or Tier1, finishModule() must be called to generate
+ // a new Module. Otherwise, if env->mode is Tier2, finishTier2() must be
+ // called to augment the given Module with tier 2 code.
+
+ SharedModule finishModule(
+ const ShareableBytes& bytecode,
+ JS::OptimizedEncodingListener* maybeTier2Listener = nullptr);
+ [[nodiscard]] bool finishTier2(const Module& module);
+};
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_generator_h
diff --git a/js/src/wasm/WasmInitExpr.cpp b/js/src/wasm/WasmInitExpr.cpp
new file mode 100644
index 0000000000..91a9cb3b69
--- /dev/null
+++ b/js/src/wasm/WasmInitExpr.cpp
@@ -0,0 +1,664 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmInitExpr.h"
+
+#include "mozilla/Maybe.h"
+
+#include "js/Value.h"
+
+#include "wasm/WasmGcObject.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmOpIter.h"
+#include "wasm/WasmSerialize.h"
+#include "wasm/WasmUtility.h"
+#include "wasm/WasmValidate.h"
+
+#include "wasm/WasmInstance-inl.h"
+
+using namespace js;
+using namespace js::wasm;
+
+static bool ValidateInitExpr(Decoder& d, ModuleEnvironment* env,
+ ValType expected,
+ uint32_t maxInitializedGlobalsIndexPlus1,
+ Maybe<LitVal>* literal) {
+ ValidatingOpIter iter(*env, d, ValidatingOpIter::InitExpr);
+
+ if (!iter.startInitExpr(expected, maxInitializedGlobalsIndexPlus1)) {
+ return false;
+ }
+
+ // Perform trivial constant recovery, this is done so that codegen may
+ // generate optimal code for global.get on immutable globals with simple
+ // initializers.
+ //
+ // We simply update the last seen literal value while validating an
+ // instruction with a literal value, and clear the literal value when
+ // validating an instruction with a dynamic value. The last value is the
+ // literal for this init expressions, if any. This is correct because there
+ // are no drops or control flow allowed in init expressions.
+ *literal = Nothing();
+
+ while (true) {
+ OpBytes op;
+ if (!iter.readOp(&op)) {
+ return false;
+ }
+
+#if defined(ENABLE_WASM_EXTENDED_CONST) || defined(ENABLE_WASM_GC)
+ Nothing nothing;
+#endif
+ NothingVector nothings{};
+ ResultType unusedType;
+
+ switch (op.b0) {
+ case uint16_t(Op::End): {
+ LabelKind kind;
+ if (!iter.readEnd(&kind, &unusedType, &nothings, &nothings)) {
+ return false;
+ }
+ MOZ_ASSERT(kind == LabelKind::Body);
+ iter.popEnd();
+ if (iter.controlStackEmpty()) {
+ return iter.endInitExpr();
+ }
+ break;
+ }
+ case uint16_t(Op::GlobalGet): {
+ uint32_t index;
+ if (!iter.readGetGlobal(&index)) {
+ return false;
+ }
+ *literal = Nothing();
+ break;
+ }
+ case uint16_t(Op::I32Const): {
+ int32_t c;
+ if (!iter.readI32Const(&c)) {
+ return false;
+ }
+ *literal = Some(LitVal(uint32_t(c)));
+ break;
+ }
+ case uint16_t(Op::I64Const): {
+ int64_t c;
+ if (!iter.readI64Const(&c)) {
+ return false;
+ }
+ *literal = Some(LitVal(uint64_t(c)));
+ break;
+ }
+ case uint16_t(Op::F32Const): {
+ float c;
+ if (!iter.readF32Const(&c)) {
+ return false;
+ }
+ *literal = Some(LitVal(c));
+ break;
+ }
+ case uint16_t(Op::F64Const): {
+ double c;
+ if (!iter.readF64Const(&c)) {
+ return false;
+ }
+ *literal = Some(LitVal(c));
+ break;
+ }
+#ifdef ENABLE_WASM_SIMD
+ case uint16_t(Op::SimdPrefix): {
+ if (!env->simdAvailable()) {
+ return d.fail("v128 not enabled");
+ }
+ if (op.b1 != uint32_t(SimdOp::V128Const)) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ V128 c;
+ if (!iter.readV128Const(&c)) {
+ return false;
+ }
+ *literal = Some(LitVal(c));
+ break;
+ }
+#endif
+ case uint16_t(Op::RefFunc): {
+ uint32_t funcIndex;
+ if (!iter.readRefFunc(&funcIndex)) {
+ return false;
+ }
+ env->declareFuncExported(funcIndex, /* eager */ false,
+ /* canRefFunc */ true);
+ *literal = Nothing();
+ break;
+ }
+ case uint16_t(Op::RefNull): {
+ RefType type;
+ if (!iter.readRefNull(&type)) {
+ return false;
+ }
+ *literal = Some(LitVal(ValType(type)));
+ break;
+ }
+#ifdef ENABLE_WASM_EXTENDED_CONST
+ case uint16_t(Op::I32Add):
+ case uint16_t(Op::I32Sub):
+ case uint16_t(Op::I32Mul): {
+ if (!env->extendedConstEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ if (!iter.readBinary(ValType::I32, &nothing, &nothing)) {
+ return false;
+ }
+ *literal = Nothing();
+ break;
+ }
+ case uint16_t(Op::I64Add):
+ case uint16_t(Op::I64Sub):
+ case uint16_t(Op::I64Mul): {
+ if (!env->extendedConstEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ if (!iter.readBinary(ValType::I64, &nothing, &nothing)) {
+ return false;
+ }
+ *literal = Nothing();
+ break;
+ }
+#endif
+#ifdef ENABLE_WASM_GC
+ case uint16_t(Op::GcPrefix): {
+ if (!env->gcEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ switch (op.b1) {
+ case uint32_t(GcOp::StructNew): {
+ uint32_t typeIndex;
+ if (!iter.readStructNew(&typeIndex, &nothings)) {
+ return false;
+ }
+ break;
+ }
+ case uint32_t(GcOp::StructNewDefault): {
+ uint32_t typeIndex;
+ if (!iter.readStructNewDefault(&typeIndex)) {
+ return false;
+ }
+ break;
+ }
+ case uint32_t(GcOp::ArrayNew): {
+ uint32_t typeIndex;
+ if (!iter.readArrayNew(&typeIndex, &nothing, &nothing)) {
+ return false;
+ }
+ break;
+ }
+ case uint32_t(GcOp::ArrayNewFixed): {
+ uint32_t typeIndex, len;
+ if (!iter.readArrayNewFixed(&typeIndex, &len, &nothings)) {
+ return false;
+ }
+ break;
+ }
+ case uint32_t(GcOp::ArrayNewDefault): {
+ uint32_t typeIndex;
+ if (!iter.readArrayNewDefault(&typeIndex, &nothing)) {
+ return false;
+ }
+ break;
+ }
+ default: {
+ return iter.unrecognizedOpcode(&op);
+ }
+ }
+ *literal = Nothing();
+ break;
+ }
+#endif
+ default: {
+ return iter.unrecognizedOpcode(&op);
+ }
+ }
+ }
+}
+
+class MOZ_STACK_CLASS InitExprInterpreter {
+ public:
+ explicit InitExprInterpreter(JSContext* cx,
+ Handle<WasmInstanceObject*> instanceObj)
+ : features(FeatureArgs::build(cx, FeatureOptions())),
+ stack(cx),
+ instanceObj(cx, instanceObj),
+ types(instanceObj->instance().metadata().types) {}
+
+ bool evaluate(JSContext* cx, Decoder& d);
+
+ Val result() {
+ MOZ_ASSERT(stack.length() == 1);
+ return stack.popCopy();
+ }
+
+ private:
+ FeatureArgs features;
+ RootedValVectorN<48> stack;
+ Rooted<WasmInstanceObject*> instanceObj;
+ SharedTypeContext types;
+
+ Instance& instance() { return instanceObj->instance(); }
+
+ [[nodiscard]] bool pushI32(int32_t c) {
+ return stack.append(Val(uint32_t(c)));
+ }
+ [[nodiscard]] bool pushI64(int64_t c) {
+ return stack.append(Val(uint64_t(c)));
+ }
+ [[nodiscard]] bool pushF32(float c) { return stack.append(Val(c)); }
+ [[nodiscard]] bool pushF64(double c) { return stack.append(Val(c)); }
+ [[nodiscard]] bool pushV128(V128 c) { return stack.append(Val(c)); }
+ [[nodiscard]] bool pushRef(ValType type, AnyRef ref) {
+ return stack.append(Val(type, ref));
+ }
+ [[nodiscard]] bool pushFuncRef(HandleFuncRef ref) {
+ return stack.append(Val(RefType::func(), ref));
+ }
+
+#if defined(ENABLE_WASM_EXTENDED_CONST) || defined(ENABLE_WASM_GC)
+ int32_t popI32() {
+ uint32_t result = stack.back().i32();
+ stack.popBack();
+ return int32_t(result);
+ }
+#endif
+#ifdef ENABLE_WASM_EXTENDED_CONST
+ int64_t popI64() {
+ uint64_t result = stack.back().i64();
+ stack.popBack();
+ return int64_t(result);
+ }
+#endif
+
+ bool evalGlobalGet(JSContext* cx, uint32_t index) {
+ RootedVal val(cx);
+ instance().constantGlobalGet(index, &val);
+ return stack.append(val);
+ }
+ bool evalI32Const(int32_t c) { return pushI32(c); }
+ bool evalI64Const(int64_t c) { return pushI64(c); }
+ bool evalF32Const(float c) { return pushF32(c); }
+ bool evalF64Const(double c) { return pushF64(c); }
+ bool evalV128Const(V128 c) { return pushV128(c); }
+ bool evalRefFunc(JSContext* cx, uint32_t funcIndex) {
+ RootedFuncRef func(cx, FuncRef::fromJSFunction(nullptr));
+ if (!instance().constantRefFunc(funcIndex, &func)) {
+ return false;
+ }
+ return pushFuncRef(func);
+ }
+ bool evalRefNull(RefType type) { return pushRef(type, AnyRef::null()); }
+#ifdef ENABLE_WASM_EXTENDED_CONST
+ bool evalI32Add() {
+ uint32_t b = popI32();
+ uint32_t a = popI32();
+ return pushI32(a + b);
+ }
+ bool evalI32Sub() {
+ uint32_t b = popI32();
+ uint32_t a = popI32();
+ return pushI32(a - b);
+ }
+ bool evalI32Mul() {
+ uint32_t b = popI32();
+ uint32_t a = popI32();
+ return pushI32(a * b);
+ }
+ bool evalI64Add() {
+ uint64_t b = popI64();
+ uint64_t a = popI64();
+ return pushI64(a + b);
+ }
+ bool evalI64Sub() {
+ uint64_t b = popI64();
+ uint64_t a = popI64();
+ return pushI64(a - b);
+ }
+ bool evalI64Mul() {
+ uint64_t b = popI64();
+ uint64_t a = popI64();
+ return pushI64(a * b);
+ }
+#endif // ENABLE_WASM_EXTENDED_CONST
+#ifdef ENABLE_WASM_GC
+ bool evalStructNew(JSContext* cx, uint32_t typeIndex) {
+ const TypeDef& typeDef = instance().metadata().types->type(typeIndex);
+ const StructType& structType = typeDef.structType();
+
+ Rooted<WasmStructObject*> structObj(
+ cx, instance().constantStructNewDefault(cx, typeIndex));
+ if (!structObj) {
+ return false;
+ }
+
+ uint32_t numFields = structType.fields_.length();
+ for (uint32_t forwardIndex = 0; forwardIndex < numFields; forwardIndex++) {
+ uint32_t reverseIndex = numFields - forwardIndex - 1;
+ const Val& val = stack.back();
+ structObj->storeVal(val, reverseIndex);
+ stack.popBack();
+ }
+
+ return pushRef(RefType::fromTypeDef(&typeDef, false),
+ AnyRef::fromJSObject(structObj));
+ }
+
+ bool evalStructNewDefault(JSContext* cx, uint32_t typeIndex) {
+ Rooted<WasmStructObject*> structObj(
+ cx, instance().constantStructNewDefault(cx, typeIndex));
+ if (!structObj) {
+ return false;
+ }
+
+ const TypeDef& typeDef = instance().metadata().types->type(typeIndex);
+ return pushRef(RefType::fromTypeDef(&typeDef, false),
+ AnyRef::fromJSObject(structObj));
+ }
+
+ bool evalArrayNew(JSContext* cx, uint32_t typeIndex) {
+ uint32_t numElements = popI32();
+ Rooted<WasmArrayObject*> arrayObj(
+ cx, instance().constantArrayNewDefault(cx, typeIndex, numElements));
+ if (!arrayObj) {
+ return false;
+ }
+
+ const Val& val = stack.back();
+ arrayObj->fillVal(val, 0, numElements);
+ stack.popBack();
+
+ const TypeDef& typeDef = instance().metadata().types->type(typeIndex);
+ return pushRef(RefType::fromTypeDef(&typeDef, false),
+ AnyRef::fromJSObject(arrayObj));
+ }
+
+ bool evalArrayNewDefault(JSContext* cx, uint32_t typeIndex) {
+ uint32_t numElements = popI32();
+ Rooted<WasmArrayObject*> arrayObj(
+ cx, instance().constantArrayNewDefault(cx, typeIndex, numElements));
+ if (!arrayObj) {
+ return false;
+ }
+
+ const TypeDef& typeDef = instance().metadata().types->type(typeIndex);
+ return pushRef(RefType::fromTypeDef(&typeDef, false),
+ AnyRef::fromJSObject(arrayObj));
+ }
+
+ bool evalArrayNewFixed(JSContext* cx, uint32_t typeIndex,
+ uint32_t numElements) {
+ Rooted<WasmArrayObject*> arrayObj(
+ cx, instance().constantArrayNewDefault(cx, typeIndex, numElements));
+ if (!arrayObj) {
+ return false;
+ }
+
+ for (uint32_t forwardIndex = 0; forwardIndex < numElements;
+ forwardIndex++) {
+ uint32_t reverseIndex = numElements - forwardIndex - 1;
+ const Val& val = stack.back();
+ arrayObj->storeVal(val, reverseIndex);
+ stack.popBack();
+ }
+
+ const TypeDef& typeDef = instance().metadata().types->type(typeIndex);
+ return pushRef(RefType::fromTypeDef(&typeDef, false),
+ AnyRef::fromJSObject(arrayObj));
+ }
+#endif // ENABLE_WASM_GC
+};
+
+bool InitExprInterpreter::evaluate(JSContext* cx, Decoder& d) {
+#define CHECK(c) \
+ if (!(c)) return false; \
+ break
+
+ while (true) {
+ OpBytes op;
+ if (!d.readOp(&op)) {
+ return false;
+ }
+
+ switch (op.b0) {
+ case uint16_t(Op::End): {
+ return true;
+ }
+ case uint16_t(Op::GlobalGet): {
+ uint32_t index;
+ if (!d.readGlobalIndex(&index)) {
+ return false;
+ }
+ CHECK(evalGlobalGet(cx, index));
+ }
+ case uint16_t(Op::I32Const): {
+ int32_t c;
+ if (!d.readI32Const(&c)) {
+ return false;
+ }
+ CHECK(evalI32Const(c));
+ }
+ case uint16_t(Op::I64Const): {
+ int64_t c;
+ if (!d.readI64Const(&c)) {
+ return false;
+ }
+ CHECK(evalI64Const(c));
+ }
+ case uint16_t(Op::F32Const): {
+ float c;
+ if (!d.readF32Const(&c)) {
+ return false;
+ }
+ CHECK(evalF32Const(c));
+ }
+ case uint16_t(Op::F64Const): {
+ double c;
+ if (!d.readF64Const(&c)) {
+ return false;
+ }
+ CHECK(evalF64Const(c));
+ }
+#ifdef ENABLE_WASM_SIMD
+ case uint16_t(Op::SimdPrefix): {
+ MOZ_RELEASE_ASSERT(op.b1 == uint32_t(SimdOp::V128Const));
+ V128 c;
+ if (!d.readV128Const(&c)) {
+ return false;
+ }
+ CHECK(evalV128Const(c));
+ }
+#endif
+ case uint16_t(Op::RefFunc): {
+ uint32_t funcIndex;
+ if (!d.readFuncIndex(&funcIndex)) {
+ return false;
+ }
+ CHECK(evalRefFunc(cx, funcIndex));
+ }
+ case uint16_t(Op::RefNull): {
+ RefType type;
+ if (!d.readRefNull(*types, features, &type)) {
+ return false;
+ }
+ CHECK(evalRefNull(type));
+ }
+#ifdef ENABLE_WASM_EXTENDED_CONST
+ case uint16_t(Op::I32Add): {
+ if (!d.readBinary()) {
+ return false;
+ }
+ CHECK(evalI32Add());
+ }
+ case uint16_t(Op::I32Sub): {
+ if (!d.readBinary()) {
+ return false;
+ }
+ CHECK(evalI32Sub());
+ }
+ case uint16_t(Op::I32Mul): {
+ if (!d.readBinary()) {
+ return false;
+ }
+ CHECK(evalI32Mul());
+ }
+ case uint16_t(Op::I64Add): {
+ if (!d.readBinary()) {
+ return false;
+ }
+ CHECK(evalI64Add());
+ }
+ case uint16_t(Op::I64Sub): {
+ if (!d.readBinary()) {
+ return false;
+ }
+ CHECK(evalI64Sub());
+ }
+ case uint16_t(Op::I64Mul): {
+ if (!d.readBinary()) {
+ return false;
+ }
+ CHECK(evalI64Mul());
+ }
+#endif
+#ifdef ENABLE_WASM_GC
+ case uint16_t(Op::GcPrefix): {
+ switch (op.b1) {
+ case uint32_t(GcOp::StructNew): {
+ uint32_t typeIndex;
+ if (!d.readTypeIndex(&typeIndex)) {
+ return false;
+ }
+ CHECK(evalStructNew(cx, typeIndex));
+ }
+ case uint32_t(GcOp::StructNewDefault): {
+ uint32_t typeIndex;
+ if (!d.readTypeIndex(&typeIndex)) {
+ return false;
+ }
+ CHECK(evalStructNewDefault(cx, typeIndex));
+ }
+ case uint32_t(GcOp::ArrayNew): {
+ uint32_t typeIndex;
+ if (!d.readTypeIndex(&typeIndex)) {
+ return false;
+ }
+ CHECK(evalArrayNew(cx, typeIndex));
+ }
+ case uint32_t(GcOp::ArrayNewFixed): {
+ uint32_t typeIndex, len;
+ if (!d.readTypeIndex(&typeIndex)) {
+ return false;
+ }
+ if (!d.readVarU32(&len)) {
+ return false;
+ }
+ CHECK(evalArrayNewFixed(cx, typeIndex, len));
+ }
+ case uint32_t(GcOp::ArrayNewDefault): {
+ uint32_t typeIndex;
+ if (!d.readTypeIndex(&typeIndex)) {
+ return false;
+ }
+ CHECK(evalArrayNewDefault(cx, typeIndex));
+ }
+ default: {
+ MOZ_CRASH();
+ }
+ }
+ break;
+ }
+#endif
+ default: {
+ MOZ_CRASH();
+ }
+ }
+ }
+
+#undef CHECK
+}
+
+bool InitExpr::decodeAndValidate(Decoder& d, ModuleEnvironment* env,
+ ValType expected,
+ uint32_t maxInitializedGlobalsIndexPlus1,
+ InitExpr* expr) {
+ Maybe<LitVal> literal = Nothing();
+ const uint8_t* exprStart = d.currentPosition();
+ if (!ValidateInitExpr(d, env, expected, maxInitializedGlobalsIndexPlus1,
+ &literal)) {
+ return false;
+ }
+ const uint8_t* exprEnd = d.currentPosition();
+ size_t exprSize = exprEnd - exprStart;
+
+ MOZ_ASSERT(expr->kind_ == InitExprKind::None);
+ expr->type_ = expected;
+
+ if (literal) {
+ expr->kind_ = InitExprKind::Literal;
+ expr->literal_ = *literal;
+ return true;
+ }
+
+ expr->kind_ = InitExprKind::Variable;
+ return expr->bytecode_.reserve(exprSize) &&
+ expr->bytecode_.append(exprStart, exprEnd);
+}
+
+bool InitExpr::evaluate(JSContext* cx, Handle<WasmInstanceObject*> instanceObj,
+ MutableHandleVal result) const {
+ MOZ_ASSERT(kind_ != InitExprKind::None);
+
+ if (isLiteral()) {
+ result.set(Val(literal()));
+ return true;
+ }
+
+ UniqueChars error;
+ Decoder d(bytecode_.begin(), bytecode_.end(), 0, &error);
+ InitExprInterpreter interp(cx, instanceObj);
+ if (!interp.evaluate(cx, d)) {
+ // This expression should have been validated already. So we should only be
+ // able to OOM, which is reported by having no error message.
+ MOZ_RELEASE_ASSERT(!error);
+ return false;
+ }
+
+ result.set(interp.result());
+ return true;
+}
+
+bool InitExpr::clone(const InitExpr& src) {
+ kind_ = src.kind_;
+ MOZ_ASSERT(bytecode_.empty());
+ if (!bytecode_.appendAll(src.bytecode_)) {
+ return false;
+ }
+ literal_ = src.literal_;
+ type_ = src.type_;
+ return true;
+}
+
+size_t InitExpr::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return bytecode_.sizeOfExcludingThis(mallocSizeOf);
+}
diff --git a/js/src/wasm/WasmInitExpr.h b/js/src/wasm/WasmInitExpr.h
new file mode 100644
index 0000000000..385f1c6b66
--- /dev/null
+++ b/js/src/wasm/WasmInitExpr.h
@@ -0,0 +1,101 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_initexpr_h
+#define wasm_initexpr_h
+
+#include "wasm/WasmConstants.h"
+#include "wasm/WasmSerialize.h"
+#include "wasm/WasmTypeDecls.h"
+#include "wasm/WasmValType.h"
+#include "wasm/WasmValue.h"
+
+namespace js {
+namespace wasm {
+
+class Decoder;
+struct ModuleEnvironment;
+
+enum class InitExprKind {
+ None,
+ Literal,
+ Variable,
+};
+
+// A InitExpr describes a deferred initializer expression, used to initialize
+// a global or a table element offset. Such expressions are created during
+// decoding and actually executed on module instantiation.
+
+class InitExpr {
+ InitExprKind kind_;
+ // The bytecode for this constant expression if this is not a literal.
+ Bytes bytecode_;
+ // The value if this is a literal.
+ LitVal literal_;
+ // The value type of this constant expression in either case.
+ ValType type_;
+
+ public:
+ InitExpr() : kind_(InitExprKind::None) {}
+
+ explicit InitExpr(LitVal literal)
+ : kind_(InitExprKind::Literal),
+ literal_(literal),
+ type_(literal.type()) {}
+
+ // Decode and validate a constant expression given at the current
+ // position of the decoder. Upon failure, the decoder contains the failure
+ // message or else the failure was an OOM.
+ static bool decodeAndValidate(Decoder& d, ModuleEnvironment* env,
+ ValType expected,
+ uint32_t maxInitializedGlobalsIndexPlus1,
+ InitExpr* expr);
+
+ // Evaluate the constant expresssion with the given context. This may only
+ // fail due to an OOM, as all InitExpr's are required to have been validated.
+ bool evaluate(JSContext* cx, Handle<WasmInstanceObject*> instanceObj,
+ MutableHandleVal result) const;
+
+ bool isLiteral() const { return kind_ == InitExprKind::Literal; }
+
+ // Gets the result of this expression if it was determined to be a literal.
+ LitVal literal() const {
+ MOZ_ASSERT(isLiteral());
+ return literal_;
+ }
+
+ // Get the type of the resulting value of this expression.
+ ValType type() const { return type_; }
+
+ // Allow moving, but not implicit copying
+ InitExpr(const InitExpr&) = delete;
+ InitExpr& operator=(const InitExpr&) = delete;
+ InitExpr(InitExpr&&) = default;
+ InitExpr& operator=(InitExpr&&) = default;
+
+ // Allow explicit cloning
+ [[nodiscard]] bool clone(const InitExpr& src);
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ WASM_DECLARE_FRIEND_SERIALIZE(InitExpr);
+};
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_initexpr_h
diff --git a/js/src/wasm/WasmInstance-inl.h b/js/src/wasm/WasmInstance-inl.h
new file mode 100644
index 0000000000..519e718cf9
--- /dev/null
+++ b/js/src/wasm/WasmInstance-inl.h
@@ -0,0 +1,32 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef wasm_Instance_inl_h
+#define wasm_Instance_inl_h
+
+#include "wasm/WasmInstance.h"
+
+#include "wasm/WasmCode.h"
+
+namespace js {
+namespace wasm {
+
+const CodeTier& Instance::code(Tier t) const { return code_->codeTier(t); }
+
+uint8_t* Instance::codeBase(Tier t) const { return code_->segment(t).base(); }
+
+const MetadataTier& Instance::metadata(Tier t) const {
+ return code_->metadata(t);
+}
+
+const Metadata& Instance::metadata() const { return code_->metadata(); }
+
+bool Instance::isAsmJS() const { return metadata().isAsmJS(); }
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_Instance_inl_h
diff --git a/js/src/wasm/WasmInstance.cpp b/js/src/wasm/WasmInstance.cpp
new file mode 100644
index 0000000000..557ed4e988
--- /dev/null
+++ b/js/src/wasm/WasmInstance.cpp
@@ -0,0 +1,2759 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmInstance-inl.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/DebugOnly.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "jsmath.h"
+
+#include "gc/Marking.h"
+#include "jit/AtomicOperations.h"
+#include "jit/Disassemble.h"
+#include "jit/JitCommon.h"
+#include "jit/JitRuntime.h"
+#include "jit/Registers.h"
+#include "js/ForOfIterator.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/Stack.h" // JS::NativeStackLimitMin
+#include "util/StringBuffer.h"
+#include "util/Text.h"
+#include "vm/ArrayBufferObject.h"
+#include "vm/BigIntType.h"
+#include "vm/Compartment.h"
+#include "vm/ErrorObject.h"
+#include "vm/Interpreter.h"
+#include "vm/Iteration.h"
+#include "vm/JitActivation.h"
+#include "vm/PlainObject.h" // js::PlainObject
+#include "wasm/WasmBuiltins.h"
+#include "wasm/WasmCode.h"
+#include "wasm/WasmDebug.h"
+#include "wasm/WasmDebugFrame.h"
+#include "wasm/WasmGcObject.h"
+#include "wasm/WasmJS.h"
+#include "wasm/WasmMemory.h"
+#include "wasm/WasmModule.h"
+#include "wasm/WasmStubs.h"
+#include "wasm/WasmTypeDef.h"
+#include "wasm/WasmValType.h"
+#include "wasm/WasmValue.h"
+
+#include "gc/StoreBuffer-inl.h"
+#include "vm/ArrayBufferObject-inl.h"
+#include "vm/JSObject-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::BitwiseCast;
+using mozilla::CheckedUint32;
+using mozilla::DebugOnly;
+
+// Instance must be aligned at least as much as any of the integer, float,
+// or SIMD values that we'd like to store in it.
+static_assert(alignof(Instance) >=
+ std::max(sizeof(Registers::RegisterContent),
+ sizeof(FloatRegisters::RegisterContent)));
+
+// The globalArea must be aligned at least as much as an instance. This is
+// guaranteed to be sufficient for all data types we care about, including
+// SIMD values. See the above assertion.
+static_assert(Instance::offsetOfData() % alignof(Instance) == 0);
+
+// We want the memory base to be the first field, and accessible with no
+// offset. This incidentally is also an assertion that there is no superclass
+// with fields.
+static_assert(Instance::offsetOfMemoryBase() == 0);
+
+// We want instance fields that are commonly accessed by the JIT to have
+// compact encodings. A limit of less than 128 bytes is chosen to fit within
+// the signed 8-bit mod r/m x86 encoding.
+static_assert(Instance::offsetOfLastCommonJitField() < 128);
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Functions and invocation.
+
+TypeDefInstanceData* Instance::typeDefInstanceData(uint32_t typeIndex) const {
+ TypeDefInstanceData* instanceData =
+ (TypeDefInstanceData*)(data() + metadata().typeDefsOffsetStart);
+ return &instanceData[typeIndex];
+}
+
+const void* Instance::addressOfGlobalCell(const GlobalDesc& global) const {
+ const void* cell = data() + global.offset();
+ // Indirect globals store a pointer to their cell in the instance global
+ // data. Dereference it to find the real cell.
+ if (global.isIndirect()) {
+ cell = *(const void**)cell;
+ }
+ return cell;
+}
+
+FuncImportInstanceData& Instance::funcImportInstanceData(const FuncImport& fi) {
+ return *(FuncImportInstanceData*)(data() + fi.instanceOffset());
+}
+
+TableInstanceData& Instance::tableInstanceData(uint32_t tableIndex) const {
+ TableInstanceData* instanceData =
+ (TableInstanceData*)(data() + metadata().tablesOffsetStart);
+ return instanceData[tableIndex];
+}
+
+TagInstanceData& Instance::tagInstanceData(uint32_t tagIndex) const {
+ TagInstanceData* instanceData =
+ (TagInstanceData*)(data() + metadata().tagsOffsetStart);
+ return instanceData[tagIndex];
+}
+
+static bool UnpackResults(JSContext* cx, const ValTypeVector& resultTypes,
+ const Maybe<char*> stackResultsArea, uint64_t* argv,
+ MutableHandleValue rval) {
+ if (!stackResultsArea) {
+ MOZ_ASSERT(resultTypes.length() <= 1);
+ // Result is either one scalar value to unpack to a wasm value, or
+ // an ignored value for a zero-valued function.
+ if (resultTypes.length() == 1) {
+ return ToWebAssemblyValue(cx, rval, resultTypes[0], argv, true);
+ }
+ return true;
+ }
+
+ MOZ_ASSERT(stackResultsArea.isSome());
+ Rooted<ArrayObject*> array(cx);
+ if (!IterableToArray(cx, rval, &array)) {
+ return false;
+ }
+
+ if (resultTypes.length() != array->length()) {
+ UniqueChars expected(JS_smprintf("%zu", resultTypes.length()));
+ UniqueChars got(JS_smprintf("%u", array->length()));
+
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_WRONG_NUMBER_OF_VALUES, expected.get(),
+ got.get());
+ return false;
+ }
+
+ DebugOnly<uint64_t> previousOffset = ~(uint64_t)0;
+
+ ABIResultIter iter(ResultType::Vector(resultTypes));
+ // The values are converted in the order they are pushed on the
+ // abstract WebAssembly stack; switch to iterate in push order.
+ while (!iter.done()) {
+ iter.next();
+ }
+ DebugOnly<bool> seenRegisterResult = false;
+ for (iter.switchToPrev(); !iter.done(); iter.prev()) {
+ const ABIResult& result = iter.cur();
+ MOZ_ASSERT(!seenRegisterResult);
+ // Use rval as a scratch area to hold the extracted result.
+ rval.set(array->getDenseElement(iter.index()));
+ if (result.inRegister()) {
+ // Currently, if a function type has results, there can be only
+ // one register result. If there is only one result, it is
+ // returned as a scalar and not an iterable, so we don't get here.
+ // If there are multiple results, we extract the register result
+ // and set `argv[0]` set to the extracted result, to be returned by
+ // register in the stub. The register result follows any stack
+ // results, so this preserves conversion order.
+ if (!ToWebAssemblyValue(cx, rval, result.type(), argv, true)) {
+ return false;
+ }
+ seenRegisterResult = true;
+ continue;
+ }
+ uint32_t result_size = result.size();
+ MOZ_ASSERT(result_size == 4 || result_size == 8);
+#ifdef DEBUG
+ if (previousOffset == ~(uint64_t)0) {
+ previousOffset = (uint64_t)result.stackOffset();
+ } else {
+ MOZ_ASSERT(previousOffset - (uint64_t)result_size ==
+ (uint64_t)result.stackOffset());
+ previousOffset -= (uint64_t)result_size;
+ }
+#endif
+ char* loc = stackResultsArea.value() + result.stackOffset();
+ if (!ToWebAssemblyValue(cx, rval, result.type(), loc, result_size == 8)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool Instance::callImport(JSContext* cx, uint32_t funcImportIndex,
+ unsigned argc, uint64_t* argv) {
+ AssertRealmUnchanged aru(cx);
+
+ Tier tier = code().bestTier();
+
+ const FuncImport& fi = metadata(tier).funcImports[funcImportIndex];
+ const FuncType& funcType = metadata().getFuncImportType(fi);
+
+ ArgTypeVector argTypes(funcType);
+ InvokeArgs args(cx);
+ if (!args.init(cx, argTypes.lengthWithoutStackResults())) {
+ return false;
+ }
+
+ if (funcType.hasUnexposableArgOrRet()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_VAL_TYPE);
+ return false;
+ }
+
+ MOZ_ASSERT(argTypes.lengthWithStackResults() == argc);
+ Maybe<char*> stackResultPointer;
+ size_t lastBoxIndexPlusOne = 0;
+ {
+ JS::AutoAssertNoGC nogc;
+ for (size_t i = 0; i < argc; i++) {
+ const void* rawArgLoc = &argv[i];
+ if (argTypes.isSyntheticStackResultPointerArg(i)) {
+ stackResultPointer = Some(*(char**)rawArgLoc);
+ continue;
+ }
+ size_t naturalIndex = argTypes.naturalIndex(i);
+ ValType type = funcType.args()[naturalIndex];
+ // Avoid boxes creation not to trigger GC.
+ if (ToJSValueMayGC(type)) {
+ lastBoxIndexPlusOne = i + 1;
+ continue;
+ }
+ MutableHandleValue argValue = args[naturalIndex];
+ if (!ToJSValue(cx, rawArgLoc, type, argValue)) {
+ return false;
+ }
+ }
+ }
+
+ // Visit arguments that need to perform allocation in a second loop
+ // after the rest of arguments are converted.
+ for (size_t i = 0; i < lastBoxIndexPlusOne; i++) {
+ if (argTypes.isSyntheticStackResultPointerArg(i)) {
+ continue;
+ }
+ const void* rawArgLoc = &argv[i];
+ size_t naturalIndex = argTypes.naturalIndex(i);
+ ValType type = funcType.args()[naturalIndex];
+ if (!ToJSValueMayGC(type)) {
+ continue;
+ }
+ MOZ_ASSERT(!type.isRefRepr());
+ // The conversions are safe here because source values are not references
+ // and will not be moved.
+ MutableHandleValue argValue = args[naturalIndex];
+ if (!ToJSValue(cx, rawArgLoc, type, argValue)) {
+ return false;
+ }
+ }
+
+ FuncImportInstanceData& import = funcImportInstanceData(fi);
+ Rooted<JSObject*> importCallable(cx, import.callable);
+ MOZ_ASSERT(cx->realm() == importCallable->nonCCWRealm());
+
+ RootedValue fval(cx, ObjectValue(*importCallable));
+ RootedValue thisv(cx, UndefinedValue());
+ RootedValue rval(cx);
+ if (!Call(cx, fval, thisv, args, &rval)) {
+ return false;
+ }
+
+ if (!UnpackResults(cx, funcType.results(), stackResultPointer, argv, &rval)) {
+ return false;
+ }
+
+ if (!JitOptions.enableWasmJitExit) {
+ return true;
+ }
+
+ // The import may already have become optimized.
+ for (auto t : code().tiers()) {
+ void* jitExitCode = codeBase(t) + fi.jitExitCodeOffset();
+ if (import.code == jitExitCode) {
+ return true;
+ }
+ }
+
+ void* jitExitCode = codeBase(tier) + fi.jitExitCodeOffset();
+
+ if (!importCallable->is<JSFunction>()) {
+ return true;
+ }
+
+ // Test if the function is JIT compiled.
+ if (!importCallable->as<JSFunction>().hasBytecode()) {
+ return true;
+ }
+
+ JSScript* script = importCallable->as<JSFunction>().nonLazyScript();
+ if (!script->hasJitScript()) {
+ return true;
+ }
+
+ // Skip if the function does not have a signature that allows for a JIT exit.
+ if (!funcType.canHaveJitExit()) {
+ return true;
+ }
+
+ // Let's optimize it!
+
+ import.code = jitExitCode;
+ return true;
+}
+
+/* static */ int32_t /* 0 to signal trap; 1 to signal OK */
+Instance::callImport_general(Instance* instance, int32_t funcImportIndex,
+ int32_t argc, uint64_t* argv) {
+ JSContext* cx = instance->cx();
+ return instance->callImport(cx, funcImportIndex, argc, argv);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Atomic operations and shared memory.
+
+template <typename ValT, typename PtrT>
+static int32_t PerformWait(Instance* instance, PtrT byteOffset, ValT value,
+ int64_t timeout_ns) {
+ JSContext* cx = instance->cx();
+
+ if (!instance->memory()->isShared()) {
+ ReportTrapError(cx, JSMSG_WASM_NONSHARED_WAIT);
+ return -1;
+ }
+
+ if (byteOffset & (sizeof(ValT) - 1)) {
+ ReportTrapError(cx, JSMSG_WASM_UNALIGNED_ACCESS);
+ return -1;
+ }
+
+ if (byteOffset + sizeof(ValT) > instance->memory()->volatileMemoryLength()) {
+ ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ mozilla::Maybe<mozilla::TimeDuration> timeout;
+ if (timeout_ns >= 0) {
+ timeout = mozilla::Some(
+ mozilla::TimeDuration::FromMicroseconds(double(timeout_ns) / 1000));
+ }
+
+ MOZ_ASSERT(byteOffset <= SIZE_MAX, "Bounds check is broken");
+ switch (atomics_wait_impl(cx, instance->sharedMemoryBuffer(),
+ size_t(byteOffset), value, timeout)) {
+ case FutexThread::WaitResult::OK:
+ return 0;
+ case FutexThread::WaitResult::NotEqual:
+ return 1;
+ case FutexThread::WaitResult::TimedOut:
+ return 2;
+ case FutexThread::WaitResult::Error:
+ return -1;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+/* static */ int32_t Instance::wait_i32_m32(Instance* instance,
+ uint32_t byteOffset, int32_t value,
+ int64_t timeout_ns) {
+ MOZ_ASSERT(SASigWaitI32M32.failureMode == FailureMode::FailOnNegI32);
+ return PerformWait(instance, byteOffset, value, timeout_ns);
+}
+
+/* static */ int32_t Instance::wait_i32_m64(Instance* instance,
+ uint64_t byteOffset, int32_t value,
+ int64_t timeout_ns) {
+ MOZ_ASSERT(SASigWaitI32M64.failureMode == FailureMode::FailOnNegI32);
+ return PerformWait(instance, byteOffset, value, timeout_ns);
+}
+
+/* static */ int32_t Instance::wait_i64_m32(Instance* instance,
+ uint32_t byteOffset, int64_t value,
+ int64_t timeout_ns) {
+ MOZ_ASSERT(SASigWaitI64M32.failureMode == FailureMode::FailOnNegI32);
+ return PerformWait(instance, byteOffset, value, timeout_ns);
+}
+
+/* static */ int32_t Instance::wait_i64_m64(Instance* instance,
+ uint64_t byteOffset, int64_t value,
+ int64_t timeout_ns) {
+ MOZ_ASSERT(SASigWaitI64M64.failureMode == FailureMode::FailOnNegI32);
+ return PerformWait(instance, byteOffset, value, timeout_ns);
+}
+
+template <typename PtrT>
+static int32_t PerformWake(Instance* instance, PtrT byteOffset, int32_t count) {
+ JSContext* cx = instance->cx();
+
+ // The alignment guard is not in the wasm spec as of 2017-11-02, but is
+ // considered likely to appear, as 4-byte alignment is required for WAKE by
+ // the spec's validation algorithm.
+
+ if (byteOffset & 3) {
+ ReportTrapError(cx, JSMSG_WASM_UNALIGNED_ACCESS);
+ return -1;
+ }
+
+ if (byteOffset >= instance->memory()->volatileMemoryLength()) {
+ ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ if (!instance->memory()->isShared()) {
+ return 0;
+ }
+
+ MOZ_ASSERT(byteOffset <= SIZE_MAX, "Bounds check is broken");
+ int64_t woken = atomics_notify_impl(instance->sharedMemoryBuffer(),
+ size_t(byteOffset), int64_t(count));
+
+ if (woken > INT32_MAX) {
+ ReportTrapError(cx, JSMSG_WASM_WAKE_OVERFLOW);
+ return -1;
+ }
+
+ return int32_t(woken);
+}
+
+/* static */ int32_t Instance::wake_m32(Instance* instance, uint32_t byteOffset,
+ int32_t count) {
+ MOZ_ASSERT(SASigWakeM32.failureMode == FailureMode::FailOnNegI32);
+ return PerformWake(instance, byteOffset, count);
+}
+
+/* static */ int32_t Instance::wake_m64(Instance* instance, uint64_t byteOffset,
+ int32_t count) {
+ MOZ_ASSERT(SASigWakeM32.failureMode == FailureMode::FailOnNegI32);
+ return PerformWake(instance, byteOffset, count);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Bulk memory operations.
+
+/* static */ uint32_t Instance::memoryGrow_m32(Instance* instance,
+ uint32_t delta) {
+ MOZ_ASSERT(SASigMemoryGrowM32.failureMode == FailureMode::Infallible);
+ MOZ_ASSERT(!instance->isAsmJS());
+
+ JSContext* cx = instance->cx();
+ Rooted<WasmMemoryObject*> memory(cx, instance->memory_);
+
+ // It is safe to cast to uint32_t, as all limits have been checked inside
+ // grow() and will not have been exceeded for a 32-bit memory.
+ uint32_t ret = uint32_t(WasmMemoryObject::grow(memory, uint64_t(delta), cx));
+
+ // If there has been a moving grow, this Instance should have been notified.
+ MOZ_RELEASE_ASSERT(instance->memoryBase_ ==
+ instance->memory_->buffer().dataPointerEither());
+
+ return ret;
+}
+
+/* static */ uint64_t Instance::memoryGrow_m64(Instance* instance,
+ uint64_t delta) {
+ MOZ_ASSERT(SASigMemoryGrowM64.failureMode == FailureMode::Infallible);
+ MOZ_ASSERT(!instance->isAsmJS());
+
+ JSContext* cx = instance->cx();
+ Rooted<WasmMemoryObject*> memory(cx, instance->memory_);
+
+ uint64_t ret = WasmMemoryObject::grow(memory, delta, cx);
+
+ // If there has been a moving grow, this Instance should have been notified.
+ MOZ_RELEASE_ASSERT(instance->memoryBase_ ==
+ instance->memory_->buffer().dataPointerEither());
+
+ return ret;
+}
+
+/* static */ uint32_t Instance::memorySize_m32(Instance* instance) {
+ MOZ_ASSERT(SASigMemorySizeM32.failureMode == FailureMode::Infallible);
+
+ // This invariant must hold when running Wasm code. Assert it here so we can
+ // write tests for cross-realm calls.
+ DebugOnly<JSContext*> cx = instance->cx();
+ MOZ_ASSERT(cx->realm() == instance->realm());
+
+ Pages pages = instance->memory()->volatilePages();
+#ifdef JS_64BIT
+ // Ensure that the memory size is no more than 4GiB.
+ MOZ_ASSERT(pages <= Pages(MaxMemory32LimitField));
+#endif
+ return uint32_t(pages.value());
+}
+
+/* static */ uint64_t Instance::memorySize_m64(Instance* instance) {
+ MOZ_ASSERT(SASigMemorySizeM64.failureMode == FailureMode::Infallible);
+
+ // This invariant must hold when running Wasm code. Assert it here so we can
+ // write tests for cross-realm calls.
+ DebugOnly<JSContext*> cx = instance->cx();
+ MOZ_ASSERT(cx->realm() == instance->realm());
+
+ Pages pages = instance->memory()->volatilePages();
+#ifdef JS_64BIT
+ MOZ_ASSERT(pages <= Pages(MaxMemory64LimitField));
+#endif
+ return pages.value();
+}
+
+template <typename T, typename F, typename I>
+inline int32_t WasmMemoryCopy(JSContext* cx, T memBase, size_t memLen,
+ I dstByteOffset, I srcByteOffset, I len,
+ F memMove) {
+ if (!MemoryBoundsCheck(dstByteOffset, len, memLen) ||
+ !MemoryBoundsCheck(srcByteOffset, len, memLen)) {
+ ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ memMove(memBase + uintptr_t(dstByteOffset),
+ memBase + uintptr_t(srcByteOffset), size_t(len));
+ return 0;
+}
+
+template <typename I>
+inline int32_t MemoryCopy(JSContext* cx, I dstByteOffset, I srcByteOffset,
+ I len, uint8_t* memBase) {
+ const WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
+ size_t memLen = rawBuf->byteLength();
+ return WasmMemoryCopy(cx, memBase, memLen, dstByteOffset, srcByteOffset, len,
+ memmove);
+}
+
+template <typename I>
+inline int32_t MemoryCopyShared(JSContext* cx, I dstByteOffset, I srcByteOffset,
+ I len, uint8_t* memBase) {
+ using RacyMemMove =
+ void (*)(SharedMem<uint8_t*>, SharedMem<uint8_t*>, size_t);
+
+ const WasmSharedArrayRawBuffer* rawBuf =
+ WasmSharedArrayRawBuffer::fromDataPtr(memBase);
+ size_t memLen = rawBuf->volatileByteLength();
+
+ return WasmMemoryCopy<SharedMem<uint8_t*>, RacyMemMove>(
+ cx, SharedMem<uint8_t*>::shared(memBase), memLen, dstByteOffset,
+ srcByteOffset, len, AtomicOperations::memmoveSafeWhenRacy);
+}
+
+/* static */ int32_t Instance::memCopy_m32(Instance* instance,
+ uint32_t dstByteOffset,
+ uint32_t srcByteOffset, uint32_t len,
+ uint8_t* memBase) {
+ MOZ_ASSERT(SASigMemCopyM32.failureMode == FailureMode::FailOnNegI32);
+ JSContext* cx = instance->cx();
+ return MemoryCopy(cx, dstByteOffset, srcByteOffset, len, memBase);
+}
+
+/* static */ int32_t Instance::memCopyShared_m32(Instance* instance,
+ uint32_t dstByteOffset,
+ uint32_t srcByteOffset,
+ uint32_t len,
+ uint8_t* memBase) {
+ MOZ_ASSERT(SASigMemCopySharedM32.failureMode == FailureMode::FailOnNegI32);
+ JSContext* cx = instance->cx();
+ return MemoryCopyShared(cx, dstByteOffset, srcByteOffset, len, memBase);
+}
+
+/* static */ int32_t Instance::memCopy_m64(Instance* instance,
+ uint64_t dstByteOffset,
+ uint64_t srcByteOffset, uint64_t len,
+ uint8_t* memBase) {
+ MOZ_ASSERT(SASigMemCopyM64.failureMode == FailureMode::FailOnNegI32);
+ JSContext* cx = instance->cx();
+ return MemoryCopy(cx, dstByteOffset, srcByteOffset, len, memBase);
+}
+
+/* static */ int32_t Instance::memCopyShared_m64(Instance* instance,
+ uint64_t dstByteOffset,
+ uint64_t srcByteOffset,
+ uint64_t len,
+ uint8_t* memBase) {
+ MOZ_ASSERT(SASigMemCopySharedM64.failureMode == FailureMode::FailOnNegI32);
+ JSContext* cx = instance->cx();
+ return MemoryCopyShared(cx, dstByteOffset, srcByteOffset, len, memBase);
+}
+
+template <typename T, typename F, typename I>
+inline int32_t WasmMemoryFill(JSContext* cx, T memBase, size_t memLen,
+ I byteOffset, uint32_t value, I len, F memSet) {
+ if (!MemoryBoundsCheck(byteOffset, len, memLen)) {
+ ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ // The required write direction is upward, but that is not currently
+ // observable as there are no fences nor any read/write protect operation.
+ memSet(memBase + uintptr_t(byteOffset), int(value), size_t(len));
+ return 0;
+}
+
+template <typename I>
+inline int32_t MemoryFill(JSContext* cx, I byteOffset, uint32_t value, I len,
+ uint8_t* memBase) {
+ const WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
+ size_t memLen = rawBuf->byteLength();
+ return WasmMemoryFill(cx, memBase, memLen, byteOffset, value, len, memset);
+}
+
+template <typename I>
+inline int32_t MemoryFillShared(JSContext* cx, I byteOffset, uint32_t value,
+ I len, uint8_t* memBase) {
+ const WasmSharedArrayRawBuffer* rawBuf =
+ WasmSharedArrayRawBuffer::fromDataPtr(memBase);
+ size_t memLen = rawBuf->volatileByteLength();
+ return WasmMemoryFill(cx, SharedMem<uint8_t*>::shared(memBase), memLen,
+ byteOffset, value, len,
+ AtomicOperations::memsetSafeWhenRacy);
+}
+
+/* static */ int32_t Instance::memFill_m32(Instance* instance,
+ uint32_t byteOffset, uint32_t value,
+ uint32_t len, uint8_t* memBase) {
+ MOZ_ASSERT(SASigMemFillM32.failureMode == FailureMode::FailOnNegI32);
+ JSContext* cx = instance->cx();
+ return MemoryFill(cx, byteOffset, value, len, memBase);
+}
+
+/* static */ int32_t Instance::memFillShared_m32(Instance* instance,
+ uint32_t byteOffset,
+ uint32_t value, uint32_t len,
+ uint8_t* memBase) {
+ MOZ_ASSERT(SASigMemFillSharedM32.failureMode == FailureMode::FailOnNegI32);
+ JSContext* cx = instance->cx();
+ return MemoryFillShared(cx, byteOffset, value, len, memBase);
+}
+
+/* static */ int32_t Instance::memFill_m64(Instance* instance,
+ uint64_t byteOffset, uint32_t value,
+ uint64_t len, uint8_t* memBase) {
+ MOZ_ASSERT(SASigMemFillM64.failureMode == FailureMode::FailOnNegI32);
+ JSContext* cx = instance->cx();
+ return MemoryFill(cx, byteOffset, value, len, memBase);
+}
+
+/* static */ int32_t Instance::memFillShared_m64(Instance* instance,
+ uint64_t byteOffset,
+ uint32_t value, uint64_t len,
+ uint8_t* memBase) {
+ MOZ_ASSERT(SASigMemFillSharedM64.failureMode == FailureMode::FailOnNegI32);
+ JSContext* cx = instance->cx();
+ return MemoryFillShared(cx, byteOffset, value, len, memBase);
+}
+
+static bool BoundsCheckInit(uint32_t dstOffset, uint32_t srcOffset,
+ uint32_t len, size_t memLen, uint32_t segLen) {
+ uint64_t dstOffsetLimit = uint64_t(dstOffset) + uint64_t(len);
+ uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
+
+ return dstOffsetLimit > memLen || srcOffsetLimit > segLen;
+}
+
+static bool BoundsCheckInit(uint64_t dstOffset, uint32_t srcOffset,
+ uint32_t len, size_t memLen, uint32_t segLen) {
+ uint64_t dstOffsetLimit = dstOffset + uint64_t(len);
+ uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
+
+ return dstOffsetLimit < dstOffset || dstOffsetLimit > memLen ||
+ srcOffsetLimit > segLen;
+}
+
+template <typename I>
+static int32_t MemoryInit(JSContext* cx, Instance* instance, I dstOffset,
+ uint32_t srcOffset, uint32_t len,
+ const DataSegment* maybeSeg) {
+ if (!maybeSeg) {
+ if (len == 0 && srcOffset == 0) {
+ return 0;
+ }
+
+ ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ const DataSegment& seg = *maybeSeg;
+ MOZ_RELEASE_ASSERT(!seg.active());
+
+ const uint32_t segLen = seg.bytes.length();
+
+ WasmMemoryObject* mem = instance->memory();
+ const size_t memLen = mem->volatileMemoryLength();
+
+ // We are proposing to copy
+ //
+ // seg.bytes.begin()[ srcOffset .. srcOffset + len - 1 ]
+ // to
+ // memoryBase[ dstOffset .. dstOffset + len - 1 ]
+
+ if (BoundsCheckInit(dstOffset, srcOffset, len, memLen, segLen)) {
+ ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ // The required read/write direction is upward, but that is not currently
+ // observable as there are no fences nor any read/write protect operation.
+ SharedMem<uint8_t*> dataPtr = mem->buffer().dataPointerEither();
+ if (mem->isShared()) {
+ AtomicOperations::memcpySafeWhenRacy(
+ dataPtr + uintptr_t(dstOffset), (uint8_t*)seg.bytes.begin() + srcOffset,
+ len);
+ } else {
+ uint8_t* rawBuf = dataPtr.unwrap(/*Unshared*/);
+ memcpy(rawBuf + uintptr_t(dstOffset),
+ (const char*)seg.bytes.begin() + srcOffset, len);
+ }
+ return 0;
+}
+
+/* static */ int32_t Instance::memInit_m32(Instance* instance,
+ uint32_t dstOffset,
+ uint32_t srcOffset, uint32_t len,
+ uint32_t segIndex) {
+ MOZ_ASSERT(SASigMemInitM32.failureMode == FailureMode::FailOnNegI32);
+ MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
+ "ensured by validation");
+
+ JSContext* cx = instance->cx();
+ return MemoryInit(cx, instance, dstOffset, srcOffset, len,
+ instance->passiveDataSegments_[segIndex]);
+}
+
+/* static */ int32_t Instance::memInit_m64(Instance* instance,
+ uint64_t dstOffset,
+ uint32_t srcOffset, uint32_t len,
+ uint32_t segIndex) {
+ MOZ_ASSERT(SASigMemInitM64.failureMode == FailureMode::FailOnNegI32);
+ MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
+ "ensured by validation");
+
+ JSContext* cx = instance->cx();
+ return MemoryInit(cx, instance, dstOffset, srcOffset, len,
+ instance->passiveDataSegments_[segIndex]);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Bulk table operations.
+
+/* static */ int32_t Instance::tableCopy(Instance* instance, uint32_t dstOffset,
+ uint32_t srcOffset, uint32_t len,
+ uint32_t dstTableIndex,
+ uint32_t srcTableIndex) {
+ MOZ_ASSERT(SASigTableCopy.failureMode == FailureMode::FailOnNegI32);
+
+ JSContext* cx = instance->cx();
+ const SharedTable& srcTable = instance->tables()[srcTableIndex];
+ uint32_t srcTableLen = srcTable->length();
+
+ const SharedTable& dstTable = instance->tables()[dstTableIndex];
+ uint32_t dstTableLen = dstTable->length();
+
+ // Bounds check and deal with arithmetic overflow.
+ uint64_t dstOffsetLimit = uint64_t(dstOffset) + len;
+ uint64_t srcOffsetLimit = uint64_t(srcOffset) + len;
+
+ if (dstOffsetLimit > dstTableLen || srcOffsetLimit > srcTableLen) {
+ ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ bool isOOM = false;
+
+ if (&srcTable == &dstTable && dstOffset > srcOffset) {
+ for (uint32_t i = len; i > 0; i--) {
+ if (!dstTable->copy(cx, *srcTable, dstOffset + (i - 1),
+ srcOffset + (i - 1))) {
+ isOOM = true;
+ break;
+ }
+ }
+ } else if (&srcTable == &dstTable && dstOffset == srcOffset) {
+ // No-op
+ } else {
+ for (uint32_t i = 0; i < len; i++) {
+ if (!dstTable->copy(cx, *srcTable, dstOffset + i, srcOffset + i)) {
+ isOOM = true;
+ break;
+ }
+ }
+ }
+
+ if (isOOM) {
+ return -1;
+ }
+ return 0;
+}
+
+bool Instance::initElems(uint32_t tableIndex, const ElemSegment& seg,
+ uint32_t dstOffset, uint32_t srcOffset, uint32_t len) {
+ Table& table = *tables_[tableIndex];
+ MOZ_ASSERT(dstOffset <= table.length());
+ MOZ_ASSERT(len <= table.length() - dstOffset);
+
+ Tier tier = code().bestTier();
+ const MetadataTier& metadataTier = metadata(tier);
+ const FuncImportVector& funcImports = metadataTier.funcImports;
+ const CodeRangeVector& codeRanges = metadataTier.codeRanges;
+ const Uint32Vector& funcToCodeRange = metadataTier.funcToCodeRange;
+ const Uint32Vector& elemFuncIndices = seg.elemFuncIndices;
+ MOZ_ASSERT(srcOffset <= elemFuncIndices.length());
+ MOZ_ASSERT(len <= elemFuncIndices.length() - srcOffset);
+
+ uint8_t* codeBaseTier = codeBase(tier);
+ for (uint32_t i = 0; i < len; i++) {
+ uint32_t funcIndex = elemFuncIndices[srcOffset + i];
+ if (funcIndex == NullFuncIndex) {
+ table.setNull(dstOffset + i);
+ } else if (!table.isFunction()) {
+ // Note, fnref must be rooted if we do anything more than just store it.
+ void* fnref = Instance::refFunc(this, funcIndex);
+ if (fnref == AnyRef::invalid().forCompiledCode()) {
+ return false; // OOM, which has already been reported.
+ }
+ table.fillAnyRef(dstOffset + i, 1, AnyRef::fromCompiledCode(fnref));
+ } else {
+ if (funcIndex < metadataTier.funcImports.length()) {
+ FuncImportInstanceData& import =
+ funcImportInstanceData(funcImports[funcIndex]);
+ MOZ_ASSERT(import.callable->isCallable());
+ if (import.callable->is<JSFunction>()) {
+ JSFunction* fun = &import.callable->as<JSFunction>();
+ if (IsWasmExportedFunction(fun)) {
+ // This element is a wasm function imported from another
+ // instance. To preserve the === function identity required by
+ // the JS embedding spec, we must set the element to the
+ // imported function's underlying CodeRange.funcCheckedCallEntry and
+ // Instance so that future Table.get()s produce the same
+ // function object as was imported.
+ WasmInstanceObject* calleeInstanceObj =
+ ExportedFunctionToInstanceObject(fun);
+ Instance& calleeInstance = calleeInstanceObj->instance();
+ Tier calleeTier = calleeInstance.code().bestTier();
+ const CodeRange& calleeCodeRange =
+ calleeInstanceObj->getExportedFunctionCodeRange(fun,
+ calleeTier);
+ void* code = calleeInstance.codeBase(calleeTier) +
+ calleeCodeRange.funcCheckedCallEntry();
+ table.setFuncRef(dstOffset + i, code, &calleeInstance);
+ continue;
+ }
+ }
+ }
+ void* code =
+ codeBaseTier +
+ codeRanges[funcToCodeRange[funcIndex]].funcCheckedCallEntry();
+ table.setFuncRef(dstOffset + i, code, this);
+ }
+ }
+ return true;
+}
+
+/* static */ int32_t Instance::tableInit(Instance* instance, uint32_t dstOffset,
+ uint32_t srcOffset, uint32_t len,
+ uint32_t segIndex,
+ uint32_t tableIndex) {
+ MOZ_ASSERT(SASigTableInit.failureMode == FailureMode::FailOnNegI32);
+
+ MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveElemSegments_.length(),
+ "ensured by validation");
+
+ JSContext* cx = instance->cx();
+ if (!instance->passiveElemSegments_[segIndex]) {
+ if (len == 0 && srcOffset == 0) {
+ return 0;
+ }
+
+ ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ const ElemSegment& seg = *instance->passiveElemSegments_[segIndex];
+ MOZ_RELEASE_ASSERT(!seg.active());
+ const uint32_t segLen = seg.length();
+
+ const Table& table = *instance->tables()[tableIndex];
+ const uint32_t tableLen = table.length();
+
+ // We are proposing to copy
+ //
+ // seg[ srcOffset .. srcOffset + len - 1 ]
+ // to
+ // tableBase[ dstOffset .. dstOffset + len - 1 ]
+
+ // Bounds check and deal with arithmetic overflow.
+ uint64_t dstOffsetLimit = uint64_t(dstOffset) + uint64_t(len);
+ uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
+
+ if (dstOffsetLimit > tableLen || srcOffsetLimit > segLen) {
+ ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ if (!instance->initElems(tableIndex, seg, dstOffset, srcOffset, len)) {
+ return -1; // OOM, which has already been reported.
+ }
+
+ return 0;
+}
+
+/* static */ int32_t Instance::tableFill(Instance* instance, uint32_t start,
+ void* value, uint32_t len,
+ uint32_t tableIndex) {
+ MOZ_ASSERT(SASigTableFill.failureMode == FailureMode::FailOnNegI32);
+
+ JSContext* cx = instance->cx();
+ Table& table = *instance->tables()[tableIndex];
+
+ // Bounds check and deal with arithmetic overflow.
+ uint64_t offsetLimit = uint64_t(start) + uint64_t(len);
+
+ if (offsetLimit > table.length()) {
+ ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ switch (table.repr()) {
+ case TableRepr::Ref:
+ table.fillAnyRef(start, len, AnyRef::fromCompiledCode(value));
+ break;
+ case TableRepr::Func:
+ MOZ_RELEASE_ASSERT(!table.isAsmJS());
+ table.fillFuncRef(start, len, FuncRef::fromCompiledCode(value), cx);
+ break;
+ }
+
+ return 0;
+}
+
+template <typename I>
+static bool WasmDiscardCheck(Instance* instance, I byteOffset, I byteLen,
+ size_t memLen, bool shared) {
+ JSContext* cx = instance->cx();
+
+ if (byteOffset % wasm::PageSize != 0 || byteLen % wasm::PageSize != 0) {
+ ReportTrapError(cx, JSMSG_WASM_UNALIGNED_ACCESS);
+ return false;
+ }
+
+ if (!MemoryBoundsCheck(byteOffset, byteLen, memLen)) {
+ ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ return false;
+ }
+
+ return true;
+}
+
+template <typename I>
+static int32_t MemDiscardNotShared(Instance* instance, I byteOffset, I byteLen,
+ uint8_t* memBase) {
+ WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
+ size_t memLen = rawBuf->byteLength();
+
+ if (!WasmDiscardCheck(instance, byteOffset, byteLen, memLen, false)) {
+ return -1;
+ }
+ rawBuf->discard(byteOffset, byteLen);
+
+ return 0;
+}
+
+template <typename I>
+static int32_t MemDiscardShared(Instance* instance, I byteOffset, I byteLen,
+ uint8_t* memBase) {
+ WasmSharedArrayRawBuffer* rawBuf =
+ WasmSharedArrayRawBuffer::fromDataPtr(memBase);
+ size_t memLen = rawBuf->volatileByteLength();
+
+ if (!WasmDiscardCheck(instance, byteOffset, byteLen, memLen, true)) {
+ return -1;
+ }
+ rawBuf->discard(byteOffset, byteLen);
+
+ return 0;
+}
+
+/* static */ int32_t Instance::memDiscard_m32(Instance* instance,
+ uint32_t byteOffset,
+ uint32_t byteLen,
+ uint8_t* memBase) {
+ return MemDiscardNotShared(instance, byteOffset, byteLen, memBase);
+}
+
+/* static */ int32_t Instance::memDiscard_m64(Instance* instance,
+ uint64_t byteOffset,
+ uint64_t byteLen,
+ uint8_t* memBase) {
+ return MemDiscardNotShared(instance, byteOffset, byteLen, memBase);
+}
+
+/* static */ int32_t Instance::memDiscardShared_m32(Instance* instance,
+ uint32_t byteOffset,
+ uint32_t byteLen,
+ uint8_t* memBase) {
+ return MemDiscardShared(instance, byteOffset, byteLen, memBase);
+}
+
+/* static */ int32_t Instance::memDiscardShared_m64(Instance* instance,
+ uint64_t byteOffset,
+ uint64_t byteLen,
+ uint8_t* memBase) {
+ return MemDiscardShared(instance, byteOffset, byteLen, memBase);
+}
+
+/* static */ void* Instance::tableGet(Instance* instance, uint32_t index,
+ uint32_t tableIndex) {
+ MOZ_ASSERT(SASigTableGet.failureMode == FailureMode::FailOnInvalidRef);
+
+ JSContext* cx = instance->cx();
+ const Table& table = *instance->tables()[tableIndex];
+ if (index >= table.length()) {
+ ReportTrapError(cx, JSMSG_WASM_TABLE_OUT_OF_BOUNDS);
+ return AnyRef::invalid().forCompiledCode();
+ }
+
+ switch (table.repr()) {
+ case TableRepr::Ref:
+ return table.getAnyRef(index).forCompiledCode();
+ case TableRepr::Func: {
+ MOZ_RELEASE_ASSERT(!table.isAsmJS());
+ RootedFunction fun(cx);
+ if (!table.getFuncRef(cx, index, &fun)) {
+ return AnyRef::invalid().forCompiledCode();
+ }
+ return FuncRef::fromJSFunction(fun).forCompiledCode();
+ }
+ }
+ MOZ_CRASH("Should not happen");
+}
+
+/* static */ uint32_t Instance::tableGrow(Instance* instance, void* initValue,
+ uint32_t delta, uint32_t tableIndex) {
+ MOZ_ASSERT(SASigTableGrow.failureMode == FailureMode::Infallible);
+
+ JSContext* cx = instance->cx();
+ RootedAnyRef ref(cx, AnyRef::fromCompiledCode(initValue));
+ Table& table = *instance->tables()[tableIndex];
+
+ uint32_t oldSize = table.grow(delta);
+
+ if (oldSize != uint32_t(-1) && initValue != nullptr) {
+ table.fillUninitialized(oldSize, delta, ref, cx);
+ }
+
+#ifdef DEBUG
+ if (!table.elemType().isNullable()) {
+ table.assertRangeNotNull(oldSize, delta);
+ }
+#endif // DEBUG
+ return oldSize;
+}
+
+/* static */ int32_t Instance::tableSet(Instance* instance, uint32_t index,
+ void* value, uint32_t tableIndex) {
+ MOZ_ASSERT(SASigTableSet.failureMode == FailureMode::FailOnNegI32);
+
+ JSContext* cx = instance->cx();
+ Table& table = *instance->tables()[tableIndex];
+
+ if (index >= table.length()) {
+ ReportTrapError(cx, JSMSG_WASM_TABLE_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ switch (table.repr()) {
+ case TableRepr::Ref:
+ table.fillAnyRef(index, 1, AnyRef::fromCompiledCode(value));
+ break;
+ case TableRepr::Func:
+ MOZ_RELEASE_ASSERT(!table.isAsmJS());
+ table.fillFuncRef(index, 1, FuncRef::fromCompiledCode(value), cx);
+ break;
+ }
+
+ return 0;
+}
+
+/* static */ uint32_t Instance::tableSize(Instance* instance,
+ uint32_t tableIndex) {
+ MOZ_ASSERT(SASigTableSize.failureMode == FailureMode::Infallible);
+ Table& table = *instance->tables()[tableIndex];
+ return table.length();
+}
+
+/* static */ void* Instance::refFunc(Instance* instance, uint32_t funcIndex) {
+ MOZ_ASSERT(SASigRefFunc.failureMode == FailureMode::FailOnInvalidRef);
+ JSContext* cx = instance->cx();
+
+ Tier tier = instance->code().bestTier();
+ const MetadataTier& metadataTier = instance->metadata(tier);
+ const FuncImportVector& funcImports = metadataTier.funcImports;
+
+ // If this is an import, we need to recover the original function to maintain
+ // reference equality between a re-exported function and 'ref.func'. The
+ // identity of the imported function object is stable across tiers, which is
+ // what we want.
+ //
+ // Use the imported function only if it is an exported function, otherwise
+ // fall through to get a (possibly new) exported function.
+ if (funcIndex < funcImports.length()) {
+ FuncImportInstanceData& import =
+ instance->funcImportInstanceData(funcImports[funcIndex]);
+ if (import.callable->is<JSFunction>()) {
+ JSFunction* fun = &import.callable->as<JSFunction>();
+ if (IsWasmExportedFunction(fun)) {
+ return FuncRef::fromJSFunction(fun).forCompiledCode();
+ }
+ }
+ }
+
+ RootedFunction fun(cx);
+ Rooted<WasmInstanceObject*> instanceObj(cx, instance->object());
+ if (!WasmInstanceObject::getExportedFunction(cx, instanceObj, funcIndex,
+ &fun)) {
+ // Validation ensures that we always have a valid funcIndex, so we must
+ // have OOM'ed
+ ReportOutOfMemory(cx);
+ return AnyRef::invalid().forCompiledCode();
+ }
+
+ return FuncRef::fromJSFunction(fun).forCompiledCode();
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Segment management.
+
+/* static */ int32_t Instance::elemDrop(Instance* instance, uint32_t segIndex) {
+ MOZ_ASSERT(SASigElemDrop.failureMode == FailureMode::FailOnNegI32);
+
+ MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveElemSegments_.length(),
+ "ensured by validation");
+
+ if (!instance->passiveElemSegments_[segIndex]) {
+ return 0;
+ }
+
+ SharedElemSegment& segRefPtr = instance->passiveElemSegments_[segIndex];
+ MOZ_RELEASE_ASSERT(!segRefPtr->active());
+
+ // Drop this instance's reference to the ElemSegment so it can be released.
+ segRefPtr = nullptr;
+ return 0;
+}
+
+/* static */ int32_t Instance::dataDrop(Instance* instance, uint32_t segIndex) {
+ MOZ_ASSERT(SASigDataDrop.failureMode == FailureMode::FailOnNegI32);
+
+ MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
+ "ensured by validation");
+
+ if (!instance->passiveDataSegments_[segIndex]) {
+ return 0;
+ }
+
+ SharedDataSegment& segRefPtr = instance->passiveDataSegments_[segIndex];
+ MOZ_RELEASE_ASSERT(!segRefPtr->active());
+
+ // Drop this instance's reference to the DataSegment so it can be released.
+ segRefPtr = nullptr;
+ return 0;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Object support.
+
+/* static */ void Instance::postBarrier(Instance* instance,
+ gc::Cell** location) {
+ MOZ_ASSERT(SASigPostBarrier.failureMode == FailureMode::Infallible);
+ MOZ_ASSERT(location);
+ instance->storeBuffer_->putCell(reinterpret_cast<JSObject**>(location));
+}
+
+/* static */ void Instance::postBarrierPrecise(Instance* instance,
+ JSObject** location,
+ JSObject* prev) {
+ MOZ_ASSERT(SASigPostBarrierPrecise.failureMode == FailureMode::Infallible);
+ postBarrierPreciseWithOffset(instance, location, /*offset=*/0, prev);
+}
+
+/* static */ void Instance::postBarrierPreciseWithOffset(Instance* instance,
+ JSObject** base,
+ uint32_t offset,
+ JSObject* prev) {
+ MOZ_ASSERT(SASigPostBarrierPreciseWithOffset.failureMode ==
+ FailureMode::Infallible);
+ MOZ_ASSERT(base);
+ JSObject** location = (JSObject**)(uintptr_t(base) + size_t(offset));
+ JSObject* next = *location;
+ JSObject::postWriteBarrier(location, prev, next);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// GC and exception handling support.
+
+/* static */ void* Instance::structNew(Instance* instance,
+ TypeDefInstanceData* typeDefData) {
+ MOZ_ASSERT(SASigStructNew.failureMode == FailureMode::FailOnNullPtr);
+ JSContext* cx = instance->cx();
+ // The new struct will be allocated in an initial heap as determined by
+ // pretenuring logic as set up in `Instance::init`.
+ return WasmStructObject::createStruct<true>(
+ cx, typeDefData, typeDefData->allocSite.initialHeap());
+}
+
+/* static */ void* Instance::structNewUninit(Instance* instance,
+ TypeDefInstanceData* typeDefData) {
+ MOZ_ASSERT(SASigStructNew.failureMode == FailureMode::FailOnNullPtr);
+ JSContext* cx = instance->cx();
+ // The new struct will be allocated in an initial heap as determined by
+ // pretenuring logic as set up in `Instance::init`.
+ return WasmStructObject::createStruct<false>(
+ cx, typeDefData, typeDefData->allocSite.initialHeap());
+}
+
+/* static */ void* Instance::arrayNew(Instance* instance, uint32_t numElements,
+ TypeDefInstanceData* typeDefData) {
+ MOZ_ASSERT(SASigArrayNew.failureMode == FailureMode::FailOnNullPtr);
+ JSContext* cx = instance->cx();
+ // The new array will be allocated in an initial heap as determined by
+ // pretenuring logic as set up in `Instance::init`.
+ return WasmArrayObject::createArray<true>(
+ cx, typeDefData, typeDefData->allocSite.initialHeap(), numElements);
+}
+
+/* static */ void* Instance::arrayNewUninit(Instance* instance,
+ uint32_t numElements,
+ TypeDefInstanceData* typeDefData) {
+ MOZ_ASSERT(SASigArrayNew.failureMode == FailureMode::FailOnNullPtr);
+ JSContext* cx = instance->cx();
+ // The new array will be allocated in an initial heap as determined by
+ // pretenuring logic as set up in `Instance::init`.
+ return WasmArrayObject::createArray<false>(
+ cx, typeDefData, typeDefData->allocSite.initialHeap(), numElements);
+}
+
+// Creates an array (WasmArrayObject) containing `numElements` of type
+// described by `typeDef`. Initialises it with data copied from the data
+// segment whose index is `segIndex`, starting at byte offset `segByteOffset`
+// in the segment. Traps if the segment doesn't hold enough bytes to fill the
+// array.
+/* static */ void* Instance::arrayNewData(Instance* instance,
+ uint32_t segByteOffset,
+ uint32_t numElements,
+ TypeDefInstanceData* typeDefData,
+ uint32_t segIndex) {
+ MOZ_ASSERT(SASigArrayNewData.failureMode == FailureMode::FailOnNullPtr);
+ JSContext* cx = instance->cx();
+
+ // Check that the data segment is valid for use.
+ MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
+ "ensured by validation");
+ const DataSegment* seg = instance->passiveDataSegments_[segIndex];
+
+ // `seg` will be nullptr if the segment has already been 'data.drop'ed
+ // (either implicitly in the case of 'active' segments during instantiation,
+ // or explicitly by the data.drop instruction.) In that case we can
+ // continue only if there's no need to copy any data out of it.
+ if (!seg && (numElements != 0 || segByteOffset != 0)) {
+ ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ return nullptr;
+ }
+ // At this point, if `seg` is null then `numElements` and `segByteOffset`
+ // are both zero.
+
+ const TypeDef* typeDef = typeDefData->typeDef;
+ Rooted<WasmArrayObject*> arrayObj(
+ cx,
+ WasmArrayObject::createArray(
+ cx, typeDefData, typeDefData->allocSite.initialHeap(), numElements));
+ if (!arrayObj) {
+ // WasmArrayObject::createArray will have reported OOM.
+ return nullptr;
+ }
+ MOZ_RELEASE_ASSERT(arrayObj->is<WasmArrayObject>());
+
+ if (!seg) {
+ // A zero-length array was requested and has been created, so we're done.
+ return arrayObj;
+ }
+
+ // Compute the number of bytes to copy, ensuring it's below 2^32.
+ CheckedUint32 numBytesToCopy =
+ CheckedUint32(numElements) *
+ CheckedUint32(typeDef->arrayType().elementType_.size());
+ if (!numBytesToCopy.isValid()) {
+ // Because the request implies that 2^32 or more bytes are to be copied.
+ ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ return nullptr;
+ }
+
+ // Range-check the copy. The obvious thing to do is to compute the offset
+ // of the last byte to copy, but that would cause underflow in the
+ // zero-length-and-zero-offset case. Instead, compute that value plus one;
+ // in other words the offset of the first byte *not* to copy.
+ CheckedUint32 lastByteOffsetPlus1 =
+ CheckedUint32(segByteOffset) + numBytesToCopy;
+
+ CheckedUint32 numBytesAvailable(seg->bytes.length());
+ if (!lastByteOffsetPlus1.isValid() || !numBytesAvailable.isValid() ||
+ lastByteOffsetPlus1.value() > numBytesAvailable.value()) {
+ // Because the last byte to copy doesn't exist inside `seg->bytes`.
+ ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ return nullptr;
+ }
+
+ // Because `numBytesToCopy` is an in-range `CheckedUint32`, the cast to
+ // `size_t` is safe even on a 32-bit target.
+ memcpy(arrayObj->data_, &seg->bytes[segByteOffset],
+ size_t(numBytesToCopy.value()));
+
+ return arrayObj;
+}
+
+// This is almost identical to ::arrayNewData, apart from the final part that
+// actually copies the data. It creates an array (WasmArrayObject)
+// containing `numElements` of type described by `typeDef`. Initialises it
+// with data copied from the element segment whose index is `segIndex`,
+// starting at element number `segElemIndex` in the segment. Traps if the
+// segment doesn't hold enough elements to fill the array.
+/* static */ void* Instance::arrayNewElem(Instance* instance,
+ uint32_t segElemIndex,
+ uint32_t numElements,
+ TypeDefInstanceData* typeDefData,
+ uint32_t segIndex) {
+ MOZ_ASSERT(SASigArrayNewElem.failureMode == FailureMode::FailOnNullPtr);
+ JSContext* cx = instance->cx();
+
+ // Check that the element segment is valid for use.
+ MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveElemSegments_.length(),
+ "ensured by validation");
+ const ElemSegment* seg = instance->passiveElemSegments_[segIndex];
+
+ // As with ::arrayNewData, if `seg` is nullptr then we can only safely copy
+ // zero elements from it.
+ if (!seg && (numElements != 0 || segElemIndex != 0)) {
+ ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ return nullptr;
+ }
+ // At this point, if `seg` is null then `numElements` and `segElemIndex`
+ // are both zero.
+
+ const TypeDef* typeDef = typeDefData->typeDef;
+
+ // The element segment is an array of uint32_t indicating function indices,
+ // which we'll have to dereference (to produce real function pointers)
+ // before parking them in the array. Hence each array element must be a
+ // machine word.
+ MOZ_RELEASE_ASSERT(typeDef->arrayType().elementType_.size() == sizeof(void*));
+
+ Rooted<WasmArrayObject*> arrayObj(
+ cx,
+ WasmArrayObject::createArray(
+ cx, typeDefData, typeDefData->allocSite.initialHeap(), numElements));
+ if (!arrayObj) {
+ // WasmArrayObject::createArray will have reported OOM.
+ return nullptr;
+ }
+ MOZ_RELEASE_ASSERT(arrayObj->is<WasmArrayObject>());
+
+ if (!seg) {
+ // A zero-length array was requested and has been created, so we're done.
+ return arrayObj;
+ }
+
+ // Range-check the copy. As in ::arrayNewData, compute the index of the
+ // last element to copy, plus one.
+ CheckedUint32 lastIndexPlus1 =
+ CheckedUint32(segElemIndex) + CheckedUint32(numElements);
+
+ CheckedUint32 numElemsAvailable(seg->elemFuncIndices.length());
+ if (!lastIndexPlus1.isValid() || !numElemsAvailable.isValid() ||
+ lastIndexPlus1.value() > numElemsAvailable.value()) {
+ // Because the last element to copy doesn't exist inside
+ // `seg->elemFuncIndices`.
+ ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ return nullptr;
+ }
+
+ // Do the initialisation, converting function indices into code pointers as
+ // we go.
+ void** dst = (void**)arrayObj->data_;
+ const uint32_t* src = &seg->elemFuncIndices[segElemIndex];
+ for (uint32_t i = 0; i < numElements; i++) {
+ uint32_t funcIndex = src[i];
+ FieldType elemType = typeDef->arrayType().elementType_;
+ MOZ_RELEASE_ASSERT(elemType.isRefType());
+ RootedVal value(cx, elemType.refType());
+ if (funcIndex == NullFuncIndex) {
+ // value remains null
+ } else {
+ void* funcRef = Instance::refFunc(instance, funcIndex);
+ if (funcRef == AnyRef::invalid().forCompiledCode()) {
+ return nullptr; // OOM, which has already been reported.
+ }
+ value = Val(elemType.refType(), FuncRef::fromCompiledCode(funcRef));
+ }
+ value.get().writeToHeapLocation(&dst[i]);
+ }
+
+ return arrayObj;
+}
+
+/* static */ int32_t Instance::arrayCopy(Instance* instance, void* dstArray,
+ uint32_t dstIndex, void* srcArray,
+ uint32_t srcIndex,
+ uint32_t numElements,
+ uint32_t elementSize) {
+ MOZ_ASSERT(SASigArrayCopy.failureMode == FailureMode::FailOnNegI32);
+ JSContext* cx = instance->cx();
+
+ // At the entry point, `elementSize` may be negative to indicate
+ // reftyped-ness of array elements. That is done in order to avoid having
+ // to pass yet another (boolean) parameter here.
+
+ // "traps if either array is null"
+ if (!srcArray || !dstArray) {
+ ReportTrapError(cx, JSMSG_WASM_DEREF_NULL);
+ return -1;
+ }
+
+ bool elemsAreRefTyped = false;
+ if (int32_t(elementSize) < 0) {
+ elemsAreRefTyped = true;
+ elementSize = uint32_t(-int32_t(elementSize));
+ }
+ MOZ_ASSERT(elementSize >= 1 && elementSize <= 16);
+
+ // Get hold of the two arrays.
+ Rooted<WasmArrayObject*> dstArrayObj(cx,
+ static_cast<WasmArrayObject*>(dstArray));
+ MOZ_RELEASE_ASSERT(dstArrayObj->is<WasmArrayObject>());
+
+ Rooted<WasmArrayObject*> srcArrayObj(cx,
+ static_cast<WasmArrayObject*>(srcArray));
+ MOZ_RELEASE_ASSERT(srcArrayObj->is<WasmArrayObject>());
+
+ // If WasmArrayObject::numElements() is changed to return 64 bits, the
+ // following checking logic will be incorrect.
+ STATIC_ASSERT_WASMARRAYELEMENTS_NUMELEMENTS_IS_U32;
+
+ // "traps if destination + length > len(array1)"
+ uint64_t dstNumElements = uint64_t(dstArrayObj->numElements_);
+ if (uint64_t(dstIndex) + uint64_t(numElements) > dstNumElements) {
+ ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ // "traps if source + length > len(array2)"
+ uint64_t srcNumElements = uint64_t(srcArrayObj->numElements_);
+ if (uint64_t(srcIndex) + uint64_t(numElements) > srcNumElements) {
+ ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ // trap if we're asked to copy 2^32 or more bytes on a 32-bit target.
+ uint64_t numBytesToCopy = uint64_t(numElements) * uint64_t(elementSize);
+#ifndef JS_64BIT
+ if (numBytesToCopy > uint64_t(UINT32_MAX)) {
+ ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+#endif
+ // We're now assured that `numBytesToCopy` can be cast to `size_t` without
+ // overflow.
+
+ // Actually do the copy, taking care to handle cases where the src and dst
+ // areas overlap.
+ uint8_t* srcBase = srcArrayObj->data_;
+ uint8_t* dstBase = dstArrayObj->data_;
+ srcBase += size_t(srcIndex) * size_t(elementSize);
+ dstBase += size_t(dstIndex) * size_t(elementSize);
+
+ if (numBytesToCopy == 0 || srcBase == dstBase) {
+ // Early exit if there's no work to do.
+ return 0;
+ }
+
+ if (!elemsAreRefTyped) {
+ // Hand off to memmove, which is presumably highly optimized.
+ memmove(dstBase, srcBase, size_t(numBytesToCopy));
+ return 0;
+ }
+
+ // We're copying refs; doing that needs suitable GC barrier-ing.
+ uint8_t* nextSrc;
+ uint8_t* nextDst;
+ intptr_t step;
+ if (dstBase < srcBase) {
+ // Moving data backwards in the address space; so iterate forwards through
+ // the array.
+ step = intptr_t(elementSize);
+ nextSrc = srcBase;
+ nextDst = dstBase;
+ } else {
+ // Moving data forwards; so iterate backwards.
+ step = -intptr_t(elementSize);
+ nextSrc = srcBase + size_t(numBytesToCopy) - size_t(elementSize);
+ nextDst = dstBase + size_t(numBytesToCopy) - size_t(elementSize);
+ }
+ // We don't know the type of the elems, only that they are refs. No matter,
+ // we can simply make up a type.
+ RefType aRefType = RefType::eq();
+ // Do the iteration
+ for (size_t i = 0; i < size_t(numElements); i++) {
+ // Copy `elementSize` bytes from `nextSrc` to `nextDst`.
+ RootedVal value(cx, aRefType);
+ value.get().readFromHeapLocation(nextSrc);
+ value.get().writeToHeapLocation(nextDst);
+ nextSrc += step;
+ nextDst += step;
+ }
+
+ return 0;
+}
+
+/* static */ void* Instance::exceptionNew(Instance* instance, JSObject* tag) {
+ MOZ_ASSERT(SASigExceptionNew.failureMode == FailureMode::FailOnNullPtr);
+ JSContext* cx = instance->cx();
+ Rooted<WasmTagObject*> tagObj(cx, &tag->as<WasmTagObject>());
+ RootedObject proto(cx, &cx->global()->getPrototype(JSProto_WasmException));
+ RootedObject stack(cx, nullptr);
+ return AnyRef::fromJSObject(
+ WasmExceptionObject::create(cx, tagObj, stack, proto))
+ .forCompiledCode();
+}
+
+/* static */ int32_t Instance::throwException(Instance* instance,
+ JSObject* exn) {
+ MOZ_ASSERT(SASigThrowException.failureMode == FailureMode::FailOnNegI32);
+
+ JSContext* cx = instance->cx();
+ RootedValue exnVal(cx, UnboxAnyRef(AnyRef::fromJSObject(exn)));
+ cx->setPendingException(exnVal, nullptr);
+
+ // By always returning -1, we trigger a wasmTrap(Trap::ThrowReported),
+ // and use that to trigger the stack walking for this exception.
+ return -1;
+}
+
+/* static */ int32_t Instance::intrI8VecMul(Instance* instance, uint32_t dest,
+ uint32_t src1, uint32_t src2,
+ uint32_t len, uint8_t* memBase) {
+ MOZ_ASSERT(SASigIntrI8VecMul.failureMode == FailureMode::FailOnNegI32);
+
+ JSContext* cx = instance->cx();
+ const WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
+ size_t memLen = rawBuf->byteLength();
+
+ // Bounds check and deal with arithmetic overflow.
+ uint64_t destLimit = uint64_t(dest) + uint64_t(len);
+ uint64_t src1Limit = uint64_t(src1) + uint64_t(len);
+ uint64_t src2Limit = uint64_t(src2) + uint64_t(len);
+ if (destLimit > memLen || src1Limit > memLen || src2Limit > memLen) {
+ ReportTrapError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ // Basic dot product
+ uint8_t* destPtr = &memBase[dest];
+ uint8_t* src1Ptr = &memBase[src1];
+ uint8_t* src2Ptr = &memBase[src2];
+ while (len > 0) {
+ *destPtr = (*src1Ptr) * (*src2Ptr);
+
+ destPtr++;
+ src1Ptr++;
+ src2Ptr++;
+ len--;
+ }
+
+ return 0;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Instance creation and related.
+
+Instance::Instance(JSContext* cx, Handle<WasmInstanceObject*> object,
+ const SharedCode& code, Handle<WasmMemoryObject*> memory,
+ SharedTableVector&& tables, UniqueDebugState maybeDebug)
+ : realm_(cx->realm()),
+ jsJitArgsRectifier_(
+ cx->runtime()->jitRuntime()->getArgumentsRectifier().value),
+ jsJitExceptionHandler_(
+ cx->runtime()->jitRuntime()->getExceptionTail().value),
+ preBarrierCode_(
+ cx->runtime()->jitRuntime()->preBarrier(MIRType::Object).value),
+ storeBuffer_(&cx->runtime()->gc.storeBuffer()),
+ object_(object),
+ code_(std::move(code)),
+ memory_(memory),
+ tables_(std::move(tables)),
+ maybeDebug_(std::move(maybeDebug)),
+ debugFilter_(nullptr),
+ maxInitializedGlobalsIndexPlus1_(0) {}
+
+Instance* Instance::create(JSContext* cx, Handle<WasmInstanceObject*> object,
+ const SharedCode& code, uint32_t instanceDataLength,
+ Handle<WasmMemoryObject*> memory,
+ SharedTableVector&& tables,
+ UniqueDebugState maybeDebug) {
+ void* base = js_calloc(alignof(Instance) + offsetof(Instance, data_) +
+ instanceDataLength);
+ if (!base) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+ void* aligned = (void*)AlignBytes(uintptr_t(base), alignof(Instance));
+
+ auto* instance = new (aligned) Instance(
+ cx, object, code, memory, std::move(tables), std::move(maybeDebug));
+ instance->allocatedBase_ = base;
+ return instance;
+}
+
+void Instance::destroy(Instance* instance) {
+ instance->~Instance();
+ js_free(instance->allocatedBase_);
+}
+
+bool Instance::init(JSContext* cx, const JSObjectVector& funcImports,
+ const ValVector& globalImportValues,
+ const WasmGlobalObjectVector& globalObjs,
+ const WasmTagObjectVector& tagObjs,
+ const DataSegmentVector& dataSegments,
+ const ElemSegmentVector& elemSegments) {
+ MOZ_ASSERT(!!maybeDebug_ == metadata().debugEnabled);
+
+#ifdef DEBUG
+ for (auto t : code_->tiers()) {
+ MOZ_ASSERT(funcImports.length() == metadata(t).funcImports.length());
+ }
+#endif
+ MOZ_ASSERT(tables_.length() == metadata().tables.length());
+
+ memoryBase_ =
+ memory_ ? memory_->buffer().dataPointerEither().unwrap() : nullptr;
+ size_t limit = memory_ ? memory_->boundsCheckLimit() : 0;
+#if !defined(JS_64BIT)
+ // We assume that the limit is a 32-bit quantity
+ MOZ_ASSERT(limit <= UINT32_MAX);
+#endif
+ boundsCheckLimit_ = limit;
+ cx_ = cx;
+ valueBoxClass_ = &WasmValueBox::class_;
+ resetInterrupt(cx);
+ jumpTable_ = code_->tieringJumpTable();
+ debugFilter_ = nullptr;
+ addressOfNeedsIncrementalBarrier_ =
+ cx->compartment()->zone()->addressOfNeedsIncrementalBarrier();
+
+ // Initialize function imports in the instance data
+ Tier callerTier = code_->bestTier();
+ for (size_t i = 0; i < metadata(callerTier).funcImports.length(); i++) {
+ JSObject* f = funcImports[i];
+ MOZ_ASSERT(f->isCallable());
+ const FuncImport& fi = metadata(callerTier).funcImports[i];
+ const FuncType& funcType = metadata().getFuncImportType(fi);
+ FuncImportInstanceData& import = funcImportInstanceData(fi);
+ import.callable = f;
+ if (f->is<JSFunction>()) {
+ JSFunction* fun = &f->as<JSFunction>();
+ if (!isAsmJS() && IsWasmExportedFunction(fun)) {
+ WasmInstanceObject* calleeInstanceObj =
+ ExportedFunctionToInstanceObject(fun);
+ Instance& calleeInstance = calleeInstanceObj->instance();
+ Tier calleeTier = calleeInstance.code().bestTier();
+ const CodeRange& codeRange =
+ calleeInstanceObj->getExportedFunctionCodeRange(
+ &f->as<JSFunction>(), calleeTier);
+ import.instance = &calleeInstance;
+ import.realm = fun->realm();
+ import.code = calleeInstance.codeBase(calleeTier) +
+ codeRange.funcUncheckedCallEntry();
+ } else if (void* thunk = MaybeGetBuiltinThunk(fun, funcType)) {
+ import.instance = this;
+ import.realm = fun->realm();
+ import.code = thunk;
+ } else {
+ import.instance = this;
+ import.realm = fun->realm();
+ import.code = codeBase(callerTier) + fi.interpExitCodeOffset();
+ }
+ } else {
+ import.instance = this;
+ import.realm = f->nonCCWRealm();
+ import.code = codeBase(callerTier) + fi.interpExitCodeOffset();
+ }
+ }
+
+ // Initialize tables in the instance data
+ for (size_t i = 0; i < tables_.length(); i++) {
+ const TableDesc& td = metadata().tables[i];
+ TableInstanceData& table = tableInstanceData(i);
+ table.length = tables_[i]->length();
+ table.elements = tables_[i]->instanceElements();
+ // Non-imported tables, with init_expr, has to be initialized with
+ // the evaluated value.
+ if (!td.isImported && td.initExpr) {
+ Rooted<WasmInstanceObject*> instanceObj(cx, object());
+ RootedVal val(cx);
+ if (!td.initExpr->evaluate(cx, instanceObj, &val)) {
+ return false;
+ }
+ RootedAnyRef ref(cx, val.get().ref());
+ tables_[i]->fillUninitialized(0, tables_[i]->length(), ref, cx);
+ }
+ }
+
+#ifdef DEBUG
+ // All (linked) tables with non-nullable types must be initialized.
+ for (size_t i = 0; i < tables_.length(); i++) {
+ const TableDesc& td = metadata().tables[i];
+ if (!td.elemType.isNullable()) {
+ tables_[i]->assertRangeNotNull(0, tables_[i]->length());
+ }
+ }
+#endif // DEBUG
+
+ // Initialize tags in the instance data
+ for (size_t i = 0; i < metadata().tags.length(); i++) {
+ MOZ_ASSERT(tagObjs[i] != nullptr);
+ tagInstanceData(i).object = tagObjs[i];
+ }
+ pendingException_ = nullptr;
+ pendingExceptionTag_ = nullptr;
+
+ // Add debug filtering table.
+ if (metadata().debugEnabled) {
+ size_t numFuncs = metadata().debugNumFuncs();
+ size_t numWords = std::max<size_t>((numFuncs + 31) / 32, 1);
+ debugFilter_ = (uint32_t*)js_calloc(numWords, sizeof(uint32_t));
+ if (!debugFilter_) {
+ return false;
+ }
+ }
+
+ // Add observer if our memory base may grow
+ if (memory_ && memory_->movingGrowable() &&
+ !memory_->addMovingGrowObserver(cx, object_)) {
+ return false;
+ }
+
+ // Add observers if our tables may grow
+ for (const SharedTable& table : tables_) {
+ if (table->movingGrowable() && !table->addMovingGrowObserver(cx, object_)) {
+ return false;
+ }
+ }
+
+ // Initialize type definitions in the instance data.
+ const SharedTypeContext& types = metadata().types;
+ Zone* zone = realm()->zone();
+ for (uint32_t typeIndex = 0; typeIndex < types->length(); typeIndex++) {
+ const TypeDef& typeDef = types->type(typeIndex);
+ TypeDefInstanceData* typeDefData = typeDefInstanceData(typeIndex);
+
+ // Set default field values.
+ new (typeDefData) TypeDefInstanceData();
+
+ // Store the runtime type for this type index
+ typeDefData->typeDef = &typeDef;
+ typeDefData->superTypeVector = typeDef.superTypeVector();
+
+ if (typeDef.kind() == TypeDefKind::Struct ||
+ typeDef.kind() == TypeDefKind::Array) {
+ // Compute the parameters that allocation will use. First, the class
+ // and alloc kind for the type definition.
+ const JSClass* clasp;
+ gc::AllocKind allocKind;
+
+ if (typeDef.kind() == TypeDefKind::Struct) {
+ clasp = WasmStructObject::classForTypeDef(&typeDef);
+ allocKind = WasmStructObject::allocKindForTypeDef(&typeDef);
+ } else {
+ clasp = &WasmArrayObject::class_;
+ allocKind = WasmArrayObject::allocKind();
+ }
+
+ // Move the alloc kind to background if possible
+ if (CanChangeToBackgroundAllocKind(allocKind, clasp)) {
+ allocKind = ForegroundToBackgroundAllocKind(allocKind);
+ }
+
+ // Find the shape using the class and recursion group
+ typeDefData->shape =
+ WasmGCShape::getShape(cx, clasp, cx->realm(), TaggedProto(),
+ &typeDef.recGroup(), ObjectFlags());
+ if (!typeDefData->shape) {
+ return false;
+ }
+
+ typeDefData->clasp = clasp;
+ typeDefData->allocKind = allocKind;
+
+ // Initialize the allocation site for pre-tenuring.
+ typeDefData->allocSite.initWasm(zone);
+ } else if (typeDef.kind() == TypeDefKind::Func) {
+ // Nothing to do; the default values are OK.
+ } else {
+ MOZ_ASSERT(typeDef.kind() == TypeDefKind::None);
+ MOZ_CRASH();
+ }
+ }
+
+ // Initialize globals in the instance data.
+ //
+ // This must be performed after we have initialized runtime types as a global
+ // initializer may reference them.
+ //
+ // We increment `maxInitializedGlobalsIndexPlus1_` every iteration of the
+ // loop, as we call out to `InitExpr::evaluate` which may call
+ // `constantGlobalGet` which uses this value to assert we're never accessing
+ // uninitialized globals.
+ maxInitializedGlobalsIndexPlus1_ = 0;
+ for (size_t i = 0; i < metadata().globals.length();
+ i++, maxInitializedGlobalsIndexPlus1_ = i) {
+ const GlobalDesc& global = metadata().globals[i];
+
+ // Constants are baked into the code, never stored in the global area.
+ if (global.isConstant()) {
+ continue;
+ }
+
+ uint8_t* globalAddr = data() + global.offset();
+ switch (global.kind()) {
+ case GlobalKind::Import: {
+ size_t imported = global.importIndex();
+ if (global.isIndirect()) {
+ *(void**)globalAddr =
+ (void*)&globalObjs[imported]->val().get().cell();
+ } else {
+ globalImportValues[imported].writeToHeapLocation(globalAddr);
+ }
+ break;
+ }
+ case GlobalKind::Variable: {
+ RootedVal val(cx);
+ const InitExpr& init = global.initExpr();
+ Rooted<WasmInstanceObject*> instanceObj(cx, object());
+ if (!init.evaluate(cx, instanceObj, &val)) {
+ return false;
+ }
+
+ if (global.isIndirect()) {
+ // Initialize the cell
+ wasm::GCPtrVal& cell = globalObjs[i]->val();
+ cell = val.get();
+ // Link to the cell
+ void* address = (void*)&cell.get().cell();
+ *(void**)globalAddr = address;
+ } else {
+ val.get().writeToHeapLocation(globalAddr);
+ }
+ break;
+ }
+ case GlobalKind::Constant: {
+ MOZ_CRASH("skipped at the top");
+ }
+ }
+ }
+
+ // All globals were initialized
+ MOZ_ASSERT(maxInitializedGlobalsIndexPlus1_ == metadata().globals.length());
+
+ // Take references to the passive data segments
+ if (!passiveDataSegments_.resize(dataSegments.length())) {
+ return false;
+ }
+ for (size_t i = 0; i < dataSegments.length(); i++) {
+ if (!dataSegments[i]->active()) {
+ passiveDataSegments_[i] = dataSegments[i];
+ }
+ }
+
+ // Take references to the passive element segments
+ if (!passiveElemSegments_.resize(elemSegments.length())) {
+ return false;
+ }
+ for (size_t i = 0; i < elemSegments.length(); i++) {
+ if (elemSegments[i]->kind == ElemSegment::Kind::Passive) {
+ passiveElemSegments_[i] = elemSegments[i];
+ }
+ }
+
+ return true;
+}
+
+Instance::~Instance() {
+ realm_->wasm.unregisterInstance(*this);
+
+ if (debugFilter_) {
+ js_free(debugFilter_);
+ }
+
+ // Any pending exceptions should have been consumed.
+ MOZ_ASSERT(!pendingException_);
+}
+
+void Instance::setInterrupt() {
+ interrupt_ = true;
+ stackLimit_ = JS::NativeStackLimitMin;
+}
+
+bool Instance::isInterrupted() const {
+ return interrupt_ || stackLimit_ == JS::NativeStackLimitMin;
+}
+
+void Instance::resetInterrupt(JSContext* cx) {
+ interrupt_ = false;
+ stackLimit_ = cx->stackLimitForJitCode(JS::StackForUntrustedScript);
+}
+
+bool Instance::debugFilter(uint32_t funcIndex) const {
+ return (debugFilter_[funcIndex / 32] >> funcIndex % 32) & 1;
+}
+
+void Instance::setDebugFilter(uint32_t funcIndex, bool value) {
+ if (value) {
+ debugFilter_[funcIndex / 32] |= (1 << funcIndex % 32);
+ } else {
+ debugFilter_[funcIndex / 32] &= ~(1 << funcIndex % 32);
+ }
+}
+
+size_t Instance::memoryMappedSize() const {
+ return memory_->buffer().wasmMappedSize();
+}
+
+bool Instance::memoryAccessInGuardRegion(const uint8_t* addr,
+ unsigned numBytes) const {
+ MOZ_ASSERT(numBytes > 0);
+
+ if (!metadata().usesMemory()) {
+ return false;
+ }
+
+ uint8_t* base = memoryBase().unwrap(/* comparison */);
+ if (addr < base) {
+ return false;
+ }
+
+ size_t lastByteOffset = addr - base + (numBytes - 1);
+ return lastByteOffset >= memory()->volatileMemoryLength() &&
+ lastByteOffset < memoryMappedSize();
+}
+
+void Instance::tracePrivate(JSTracer* trc) {
+ // This method is only called from WasmInstanceObject so the only reason why
+ // TraceEdge is called is so that the pointer can be updated during a moving
+ // GC.
+ MOZ_ASSERT_IF(trc->isMarkingTracer(), gc::IsMarked(trc->runtime(), object_));
+ TraceEdge(trc, &object_, "wasm instance object");
+
+ // OK to just do one tier here; though the tiers have different funcImports
+ // tables, they share the instance object.
+ for (const FuncImport& fi : metadata(code().stableTier()).funcImports) {
+ TraceNullableEdge(trc, &funcImportInstanceData(fi).callable, "wasm import");
+ }
+
+ for (const SharedTable& table : tables_) {
+ table->trace(trc);
+ }
+
+ for (const GlobalDesc& global : code().metadata().globals) {
+ // Indirect reference globals get traced by the owning WebAssembly.Global.
+ if (!global.type().isRefRepr() || global.isConstant() ||
+ global.isIndirect()) {
+ continue;
+ }
+ GCPtr<JSObject*>* obj = (GCPtr<JSObject*>*)(data() + global.offset());
+ TraceNullableEdge(trc, obj, "wasm reference-typed global");
+ }
+
+ for (uint32_t tagIndex = 0; tagIndex < code().metadata().tags.length();
+ tagIndex++) {
+ TraceNullableEdge(trc, &tagInstanceData(tagIndex).object, "wasm tag");
+ }
+
+ const SharedTypeContext& types = metadata().types;
+ for (uint32_t typeIndex = 0; typeIndex < types->length(); typeIndex++) {
+ TypeDefInstanceData* typeDefData = typeDefInstanceData(typeIndex);
+ TraceNullableEdge(trc, &typeDefData->shape, "wasm shape");
+ }
+
+ TraceNullableEdge(trc, &memory_, "wasm buffer");
+ TraceNullableEdge(trc, &pendingException_, "wasm pending exception value");
+ TraceNullableEdge(trc, &pendingExceptionTag_, "wasm pending exception tag");
+
+ if (maybeDebug_) {
+ maybeDebug_->trace(trc);
+ }
+}
+
+void js::wasm::TraceInstanceEdge(JSTracer* trc, Instance* instance,
+ const char* name) {
+ if (IsTracerKind(trc, JS::TracerKind::Moving)) {
+ // Compacting GC: The Instance does not move so there is nothing to do here.
+ // Reading the object from the instance below would be a data race during
+ // multi-threaded updates. Compacting GC does not rely on graph traversal
+ // to find all edges that need to be updated.
+ return;
+ }
+
+ // Instance fields are traced by the owning WasmInstanceObject's trace
+ // hook. Tracing this ensures they are traced once.
+ JSObject* object = instance->objectUnbarriered();
+ TraceManuallyBarrieredEdge(trc, &object, name);
+}
+
+uintptr_t Instance::traceFrame(JSTracer* trc, const wasm::WasmFrameIter& wfi,
+ uint8_t* nextPC,
+ uintptr_t highestByteVisitedInPrevFrame) {
+ const StackMap* map = code().lookupStackMap(nextPC);
+ if (!map) {
+ return 0;
+ }
+
+ Frame* frame = wfi.frame();
+
+ // |frame| points somewhere in the middle of the area described by |map|.
+ // We have to calculate |scanStart|, the lowest address that is described by
+ // |map|, by consulting |map->frameOffsetFromTop|.
+
+ const size_t numMappedBytes = map->header.numMappedWords * sizeof(void*);
+ const uintptr_t scanStart = uintptr_t(frame) +
+ (map->header.frameOffsetFromTop * sizeof(void*)) -
+ numMappedBytes;
+ MOZ_ASSERT(0 == scanStart % sizeof(void*));
+
+ // Do what we can to assert that, for consecutive wasm frames, their stack
+ // maps also abut exactly. This is a useful sanity check on the sizing of
+ // stackmaps.
+ //
+ // In debug builds, the stackmap construction machinery goes to considerable
+ // efforts to ensure that the stackmaps for consecutive frames abut exactly.
+ // This is so as to ensure there are no areas of stack inadvertently ignored
+ // by a stackmap, nor covered by two stackmaps. Hence any failure of this
+ // assertion is serious and should be investigated.
+#ifndef JS_CODEGEN_ARM64
+ MOZ_ASSERT_IF(highestByteVisitedInPrevFrame != 0,
+ highestByteVisitedInPrevFrame + 1 == scanStart);
+#endif
+
+ uintptr_t* stackWords = (uintptr_t*)scanStart;
+
+ // If we have some exit stub words, this means the map also covers an area
+ // created by a exit stub, and so the highest word of that should be a
+ // constant created by (code created by) GenerateTrapExit.
+ MOZ_ASSERT_IF(
+ map->header.numExitStubWords > 0,
+ stackWords[map->header.numExitStubWords - 1 -
+ TrapExitDummyValueOffsetFromTop] == TrapExitDummyValue);
+
+ // And actually hand them off to the GC.
+ for (uint32_t i = 0; i < map->header.numMappedWords; i++) {
+ if (map->getBit(i) == 0) {
+ continue;
+ }
+
+ // TODO/AnyRef-boxing: With boxed immediates and strings, the value may
+ // not be a traceable JSObject*.
+ ASSERT_ANYREF_IS_JSOBJECT;
+
+ // This assertion seems at least moderately effective in detecting
+ // discrepancies or misalignments between the map and reality.
+ MOZ_ASSERT(js::gc::IsCellPointerValidOrNull((const void*)stackWords[i]));
+
+ if (stackWords[i]) {
+ TraceRoot(trc, (JSObject**)&stackWords[i],
+ "Instance::traceWasmFrame: normal word");
+ }
+ }
+
+ // Finally, deal with any GC-managed fields in the DebugFrame, if it is
+ // present and those fields may be live.
+ if (map->header.hasDebugFrameWithLiveRefs) {
+ DebugFrame* debugFrame = DebugFrame::from(frame);
+ char* debugFrameP = (char*)debugFrame;
+
+ // TODO/AnyRef-boxing: With boxed immediates and strings, the value may
+ // not be a traceable JSObject*.
+ ASSERT_ANYREF_IS_JSOBJECT;
+
+ for (size_t i = 0; i < MaxRegisterResults; i++) {
+ if (debugFrame->hasSpilledRegisterRefResult(i)) {
+ char* resultRefP = debugFrameP + DebugFrame::offsetOfRegisterResult(i);
+ TraceNullableRoot(
+ trc, (JSObject**)resultRefP,
+ "Instance::traceWasmFrame: DebugFrame::resultResults_");
+ }
+ }
+
+ if (debugFrame->hasCachedReturnJSValue()) {
+ char* cachedReturnJSValueP =
+ debugFrameP + DebugFrame::offsetOfCachedReturnJSValue();
+ TraceRoot(trc, (js::Value*)cachedReturnJSValueP,
+ "Instance::traceWasmFrame: DebugFrame::cachedReturnJSValue_");
+ }
+ }
+
+ return scanStart + numMappedBytes - 1;
+}
+
+WasmMemoryObject* Instance::memory() const { return memory_; }
+
+SharedMem<uint8_t*> Instance::memoryBase() const {
+ MOZ_ASSERT(metadata().usesMemory());
+ MOZ_ASSERT(memoryBase_ == memory_->buffer().dataPointerEither());
+ return memory_->buffer().dataPointerEither();
+}
+
+SharedArrayRawBuffer* Instance::sharedMemoryBuffer() const {
+ MOZ_ASSERT(memory_->isShared());
+ return memory_->sharedArrayRawBuffer();
+}
+
+WasmInstanceObject* Instance::objectUnbarriered() const {
+ return object_.unbarrieredGet();
+}
+
+WasmInstanceObject* Instance::object() const { return object_; }
+
+static bool EnsureEntryStubs(const Instance& instance, uint32_t funcIndex,
+ const FuncExport** funcExport,
+ void** interpEntry) {
+ Tier tier = instance.code().bestTier();
+
+ size_t funcExportIndex;
+ *funcExport =
+ &instance.metadata(tier).lookupFuncExport(funcIndex, &funcExportIndex);
+
+ const FuncExport& fe = **funcExport;
+ if (fe.hasEagerStubs()) {
+ *interpEntry = instance.codeBase(tier) + fe.eagerInterpEntryOffset();
+ return true;
+ }
+
+ MOZ_ASSERT(!instance.isAsmJS(), "only wasm can lazily export functions");
+
+ // If the best tier is Ion, life is simple: background compilation has
+ // already completed and has been committed, so there's no risk of race
+ // conditions here.
+ //
+ // If the best tier is Baseline, there could be a background compilation
+ // happening at the same time. The background compilation will lock the
+ // first tier lazy stubs first to stop new baseline stubs from being
+ // generated, then the second tier stubs to generate them.
+ //
+ // - either we take the tier1 lazy stub lock before the background
+ // compilation gets it, then we generate the lazy stub for tier1. When the
+ // background thread gets the tier1 lazy stub lock, it will see it has a
+ // lazy stub and will recompile it for tier2.
+ // - or we don't take the lock here first. Background compilation won't
+ // find a lazy stub for this function, thus won't generate it. So we'll do
+ // it ourselves after taking the tier2 lock.
+ //
+ // Also see doc block for stubs in WasmJS.cpp.
+
+ auto stubs = instance.code(tier).lazyStubs().writeLock();
+ *interpEntry = stubs->lookupInterpEntry(fe.funcIndex());
+ if (*interpEntry) {
+ return true;
+ }
+
+ // The best tier might have changed after we've taken the lock.
+ Tier prevTier = tier;
+ tier = instance.code().bestTier();
+ const Metadata& metadata = instance.metadata();
+ const CodeTier& codeTier = instance.code(tier);
+ if (tier == prevTier) {
+ if (!stubs->createOneEntryStub(funcExportIndex, metadata, codeTier)) {
+ return false;
+ }
+
+ *interpEntry = stubs->lookupInterpEntry(fe.funcIndex());
+ MOZ_ASSERT(*interpEntry);
+ return true;
+ }
+
+ MOZ_RELEASE_ASSERT(prevTier == Tier::Baseline && tier == Tier::Optimized);
+ auto stubs2 = instance.code(tier).lazyStubs().writeLock();
+
+ // If it didn't have a stub in the first tier, background compilation
+ // shouldn't have made one in the second tier.
+ MOZ_ASSERT(!stubs2->hasEntryStub(fe.funcIndex()));
+
+ if (!stubs2->createOneEntryStub(funcExportIndex, metadata, codeTier)) {
+ return false;
+ }
+
+ *interpEntry = stubs2->lookupInterpEntry(fe.funcIndex());
+ MOZ_ASSERT(*interpEntry);
+ return true;
+}
+
+static bool GetInterpEntryAndEnsureStubs(JSContext* cx, Instance& instance,
+ uint32_t funcIndex, CallArgs args,
+ void** interpEntry,
+ const FuncType** funcType) {
+ const FuncExport* funcExport;
+ if (!EnsureEntryStubs(instance, funcIndex, &funcExport, interpEntry)) {
+ return false;
+ }
+
+ *funcType = &instance.metadata().getFuncExportType(*funcExport);
+
+#ifdef DEBUG
+ // EnsureEntryStubs() has ensured proper jit-entry stubs have been created and
+ // installed in funcIndex's JumpTable entry, so check against the presence of
+ // the provisional lazy stub. See also
+ // WasmInstanceObject::getExportedFunction().
+ if (!funcExport->hasEagerStubs() && (*funcType)->canHaveJitEntry()) {
+ if (!EnsureBuiltinThunksInitialized()) {
+ return false;
+ }
+ JSFunction& callee = args.callee().as<JSFunction>();
+ void* provisionalLazyJitEntryStub = ProvisionalLazyJitEntryStub();
+ MOZ_ASSERT(provisionalLazyJitEntryStub);
+ MOZ_ASSERT(callee.isWasmWithJitEntry());
+ MOZ_ASSERT(*callee.wasmJitEntry() != provisionalLazyJitEntryStub);
+ }
+#endif
+ return true;
+}
+
+bool wasm::ResultsToJSValue(JSContext* cx, ResultType type,
+ void* registerResultLoc,
+ Maybe<char*> stackResultsLoc,
+ MutableHandleValue rval, CoercionLevel level) {
+ if (type.empty()) {
+ // No results: set to undefined, and we're done.
+ rval.setUndefined();
+ return true;
+ }
+
+ // If we added support for multiple register results, we'd need to establish a
+ // convention for how to store them to memory in registerResultLoc. For now
+ // we can punt.
+ static_assert(MaxRegisterResults == 1);
+
+ // Stack results written to stackResultsLoc; register result written
+ // to registerResultLoc.
+
+ // First, convert the register return value, and prepare to iterate in
+ // push order. Note that if the register result is a reference type,
+ // it may be unrooted, so ToJSValue_anyref must not GC in that case.
+ ABIResultIter iter(type);
+ DebugOnly<bool> usedRegisterResult = false;
+ for (; !iter.done(); iter.next()) {
+ if (iter.cur().inRegister()) {
+ MOZ_ASSERT(!usedRegisterResult);
+ if (!ToJSValue<DebugCodegenVal>(cx, registerResultLoc, iter.cur().type(),
+ rval, level)) {
+ return false;
+ }
+ usedRegisterResult = true;
+ }
+ }
+ MOZ_ASSERT(usedRegisterResult);
+
+ MOZ_ASSERT((stackResultsLoc.isSome()) == (iter.count() > 1));
+ if (!stackResultsLoc) {
+ // A single result: we're done.
+ return true;
+ }
+
+ // Otherwise, collect results in an array, in push order.
+ Rooted<ArrayObject*> array(cx, NewDenseEmptyArray(cx));
+ if (!array) {
+ return false;
+ }
+ RootedValue tmp(cx);
+ for (iter.switchToPrev(); !iter.done(); iter.prev()) {
+ const ABIResult& result = iter.cur();
+ if (result.onStack()) {
+ char* loc = stackResultsLoc.value() + result.stackOffset();
+ if (!ToJSValue<DebugCodegenVal>(cx, loc, result.type(), &tmp, level)) {
+ return false;
+ }
+ if (!NewbornArrayPush(cx, array, tmp)) {
+ return false;
+ }
+ } else {
+ if (!NewbornArrayPush(cx, array, rval)) {
+ return false;
+ }
+ }
+ }
+ rval.set(ObjectValue(*array));
+ return true;
+}
+
+class MOZ_RAII ReturnToJSResultCollector {
+ class MOZ_RAII StackResultsRooter : public JS::CustomAutoRooter {
+ ReturnToJSResultCollector& collector_;
+
+ public:
+ StackResultsRooter(JSContext* cx, ReturnToJSResultCollector& collector)
+ : JS::CustomAutoRooter(cx), collector_(collector) {}
+
+ void trace(JSTracer* trc) final {
+ for (ABIResultIter iter(collector_.type_); !iter.done(); iter.next()) {
+ const ABIResult& result = iter.cur();
+ if (result.onStack() && result.type().isRefRepr()) {
+ char* loc = collector_.stackResultsArea_.get() + result.stackOffset();
+ JSObject** refLoc = reinterpret_cast<JSObject**>(loc);
+ TraceNullableRoot(trc, refLoc, "StackResultsRooter::trace");
+ }
+ }
+ }
+ };
+ friend class StackResultsRooter;
+
+ ResultType type_;
+ UniquePtr<char[], JS::FreePolicy> stackResultsArea_;
+ Maybe<StackResultsRooter> rooter_;
+
+ public:
+ explicit ReturnToJSResultCollector(const ResultType& type) : type_(type){};
+ bool init(JSContext* cx) {
+ bool needRooter = false;
+ ABIResultIter iter(type_);
+ for (; !iter.done(); iter.next()) {
+ const ABIResult& result = iter.cur();
+ if (result.onStack() && result.type().isRefRepr()) {
+ needRooter = true;
+ }
+ }
+ uint32_t areaBytes = iter.stackBytesConsumedSoFar();
+ MOZ_ASSERT_IF(needRooter, areaBytes > 0);
+ if (areaBytes > 0) {
+ // It is necessary to zero storage for ref results, and it doesn't
+ // hurt to do so for other POD results.
+ stackResultsArea_ = cx->make_zeroed_pod_array<char>(areaBytes);
+ if (!stackResultsArea_) {
+ return false;
+ }
+ if (needRooter) {
+ rooter_.emplace(cx, *this);
+ }
+ }
+ return true;
+ }
+
+ void* stackResultsArea() {
+ MOZ_ASSERT(stackResultsArea_);
+ return stackResultsArea_.get();
+ }
+
+ bool collect(JSContext* cx, void* registerResultLoc, MutableHandleValue rval,
+ CoercionLevel level) {
+ Maybe<char*> stackResultsLoc =
+ stackResultsArea_ ? Some(stackResultsArea_.get()) : Nothing();
+ return ResultsToJSValue(cx, type_, registerResultLoc, stackResultsLoc, rval,
+ level);
+ }
+};
+
+bool Instance::callExport(JSContext* cx, uint32_t funcIndex, CallArgs args,
+ CoercionLevel level) {
+ if (memory_) {
+ // If there has been a moving grow, this Instance should have been notified.
+ MOZ_RELEASE_ASSERT(memory_->buffer().dataPointerEither() == memoryBase());
+ }
+
+ void* interpEntry;
+ const FuncType* funcType;
+ if (!GetInterpEntryAndEnsureStubs(cx, *this, funcIndex, args, &interpEntry,
+ &funcType)) {
+ return false;
+ }
+
+ // Lossless coercions can handle unexposable arguments or returns. This is
+ // only available in testing code.
+ if (level != CoercionLevel::Lossless && funcType->hasUnexposableArgOrRet()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_VAL_TYPE);
+ return false;
+ }
+
+ ArgTypeVector argTypes(*funcType);
+ ResultType resultType(ResultType::Vector(funcType->results()));
+ ReturnToJSResultCollector results(resultType);
+ if (!results.init(cx)) {
+ return false;
+ }
+
+ // The calling convention for an external call into wasm is to pass an
+ // array of 16-byte values where each value contains either a coerced int32
+ // (in the low word), or a double value (in the low dword) value, with the
+ // coercions specified by the wasm signature. The external entry point
+ // unpacks this array into the system-ABI-specified registers and stack
+ // memory and then calls into the internal entry point. The return value is
+ // stored in the first element of the array (which, therefore, must have
+ // length >= 1).
+ Vector<ExportArg, 8> exportArgs(cx);
+ if (!exportArgs.resize(
+ std::max<size_t>(1, argTypes.lengthWithStackResults()))) {
+ return false;
+ }
+
+ ASSERT_ANYREF_IS_JSOBJECT;
+ Rooted<GCVector<JSObject*, 8, SystemAllocPolicy>> refs(cx);
+
+ DebugCodegen(DebugChannel::Function, "wasm-function[%d] arguments [",
+ funcIndex);
+ RootedValue v(cx);
+ for (size_t i = 0; i < argTypes.lengthWithStackResults(); ++i) {
+ void* rawArgLoc = &exportArgs[i];
+ if (argTypes.isSyntheticStackResultPointerArg(i)) {
+ *reinterpret_cast<void**>(rawArgLoc) = results.stackResultsArea();
+ continue;
+ }
+ size_t naturalIdx = argTypes.naturalIndex(i);
+ v = naturalIdx < args.length() ? args[naturalIdx] : UndefinedValue();
+ ValType type = funcType->arg(naturalIdx);
+ if (!ToWebAssemblyValue<DebugCodegenVal>(cx, v, type, rawArgLoc, true,
+ level)) {
+ return false;
+ }
+ if (type.isRefRepr()) {
+ void* ptr = *reinterpret_cast<void**>(rawArgLoc);
+ // Store in rooted array until no more GC is possible.
+ RootedAnyRef ref(cx, AnyRef::fromCompiledCode(ptr));
+ ASSERT_ANYREF_IS_JSOBJECT;
+ if (!refs.emplaceBack(ref.get().asJSObject())) {
+ return false;
+ }
+ DebugCodegen(DebugChannel::Function, "/(#%d)", int(refs.length() - 1));
+ }
+ }
+
+ // Copy over reference values from the rooted array, if any.
+ if (refs.length() > 0) {
+ DebugCodegen(DebugChannel::Function, "; ");
+ size_t nextRef = 0;
+ for (size_t i = 0; i < argTypes.lengthWithStackResults(); ++i) {
+ if (argTypes.isSyntheticStackResultPointerArg(i)) {
+ continue;
+ }
+ size_t naturalIdx = argTypes.naturalIndex(i);
+ ValType type = funcType->arg(naturalIdx);
+ if (type.isRefRepr()) {
+ void** rawArgLoc = (void**)&exportArgs[i];
+ *rawArgLoc = refs[nextRef++];
+ DebugCodegen(DebugChannel::Function, " ref(#%d) := %p ",
+ int(nextRef - 1), *rawArgLoc);
+ }
+ }
+ refs.clear();
+ }
+
+ DebugCodegen(DebugChannel::Function, "]\n");
+
+ // Ensure pending exception is cleared before and after (below) call.
+ MOZ_ASSERT(!pendingException_);
+
+ {
+ JitActivation activation(cx);
+
+ // Call the per-exported-function trampoline created by GenerateEntry.
+ auto funcPtr = JS_DATA_TO_FUNC_PTR(ExportFuncPtr, interpEntry);
+ if (!CALL_GENERATED_2(funcPtr, exportArgs.begin(), this)) {
+ return false;
+ }
+ }
+
+ MOZ_ASSERT(!pendingException_);
+
+ if (isAsmJS() && args.isConstructing()) {
+ // By spec, when a JS function is called as a constructor and this
+ // function returns a primary type, which is the case for all asm.js
+ // exported functions, the returned value is discarded and an empty
+ // object is returned instead.
+ PlainObject* obj = NewPlainObject(cx);
+ if (!obj) {
+ return false;
+ }
+ args.rval().set(ObjectValue(*obj));
+ return true;
+ }
+
+ // Note that we're not rooting the register result, if any; we depend
+ // on ResultsCollector::collect to root the value on our behalf,
+ // before causing any GC.
+ void* registerResultLoc = &exportArgs[0];
+ DebugCodegen(DebugChannel::Function, "wasm-function[%d]; results [",
+ funcIndex);
+ if (!results.collect(cx, registerResultLoc, args.rval(), level)) {
+ return false;
+ }
+ DebugCodegen(DebugChannel::Function, "]\n");
+
+ return true;
+}
+
+static JSObject* GetExceptionTag(JSObject* exn) {
+ return exn->is<WasmExceptionObject>() ? &exn->as<WasmExceptionObject>().tag()
+ : nullptr;
+}
+
+void Instance::setPendingException(HandleAnyRef exn) {
+ pendingException_ = exn.get().asJSObject();
+ pendingExceptionTag_ = GetExceptionTag(exn.get().asJSObject());
+}
+
+void Instance::constantGlobalGet(uint32_t globalIndex,
+ MutableHandleVal result) {
+ MOZ_RELEASE_ASSERT(globalIndex < maxInitializedGlobalsIndexPlus1_);
+ const GlobalDesc& global = metadata().globals[globalIndex];
+
+ // Constant globals are baked into the code and never stored in global data.
+ if (global.isConstant()) {
+ // We can just re-evaluate the global initializer to get the value.
+ result.set(Val(global.constantValue()));
+ return;
+ }
+
+ // Otherwise, we need to load the initialized value from its cell.
+ const void* cell = addressOfGlobalCell(global);
+ result.address()->initFromHeapLocation(global.type(), cell);
+}
+
+bool Instance::constantRefFunc(uint32_t funcIndex,
+ MutableHandleFuncRef result) {
+ void* fnref = Instance::refFunc(this, funcIndex);
+ if (fnref == AnyRef::invalid().forCompiledCode()) {
+ return false; // OOM, which has already been reported.
+ }
+ result.set(FuncRef::fromCompiledCode(fnref));
+ return true;
+}
+
+WasmStructObject* Instance::constantStructNewDefault(JSContext* cx,
+ uint32_t typeIndex) {
+ TypeDefInstanceData* typeDefData = typeDefInstanceData(typeIndex);
+ // We assume that constant structs will have a long lifetime and hence
+ // allocate them directly in the tenured heap.
+ return WasmStructObject::createStruct(cx, typeDefData, gc::Heap::Tenured);
+}
+
+WasmArrayObject* Instance::constantArrayNewDefault(JSContext* cx,
+ uint32_t typeIndex,
+ uint32_t numElements) {
+ TypeDefInstanceData* typeDefData = typeDefInstanceData(typeIndex);
+ // We assume that constant arrays will have a long lifetime and hence
+ // allocate them directly in the tenured heap.
+ return WasmArrayObject::createArray(cx, typeDefData, gc::Heap::Tenured,
+ numElements);
+}
+
+JSAtom* Instance::getFuncDisplayAtom(JSContext* cx, uint32_t funcIndex) const {
+ // The "display name" of a function is primarily shown in Error.stack which
+ // also includes location, so use getFuncNameBeforeLocation.
+ UTF8Bytes name;
+ if (!metadata().getFuncNameBeforeLocation(funcIndex, &name)) {
+ return nullptr;
+ }
+
+ return AtomizeUTF8Chars(cx, name.begin(), name.length());
+}
+
+void Instance::ensureProfilingLabels(bool profilingEnabled) const {
+ return code_->ensureProfilingLabels(profilingEnabled);
+}
+
+void Instance::onMovingGrowMemory() {
+ MOZ_ASSERT(!isAsmJS());
+ MOZ_ASSERT(!memory_->isShared());
+
+ ArrayBufferObject& buffer = memory_->buffer().as<ArrayBufferObject>();
+ memoryBase_ = buffer.dataPointer();
+ size_t limit = memory_->boundsCheckLimit();
+#if !defined(JS_64BIT)
+ // We assume that the limit is a 32-bit quantity
+ MOZ_ASSERT(limit <= UINT32_MAX);
+#endif
+ boundsCheckLimit_ = limit;
+}
+
+void Instance::onMovingGrowTable(const Table* theTable) {
+ MOZ_ASSERT(!isAsmJS());
+
+ // `theTable` has grown and we must update cached data for it. Importantly,
+ // we can have cached those data in more than one location: we'll have
+ // cached them once for each time the table was imported into this instance.
+ //
+ // When an instance is registered as an observer of a table it is only
+ // registered once, regardless of how many times the table was imported.
+ // Thus when a table is grown, onMovingGrowTable() is only invoked once for
+ // the table.
+ //
+ // Ergo we must go through the entire list of tables in the instance here
+ // and check for the table in all the cached-data slots; we can't exit after
+ // the first hit.
+
+ for (uint32_t i = 0; i < tables_.length(); i++) {
+ if (tables_[i] == theTable) {
+ TableInstanceData& table = tableInstanceData(i);
+ table.length = tables_[i]->length();
+ table.elements = tables_[i]->instanceElements();
+ }
+ }
+}
+
+JSString* Instance::createDisplayURL(JSContext* cx) {
+ // In the best case, we simply have a URL, from a streaming compilation of a
+ // fetched Response.
+
+ if (metadata().filenameIsURL) {
+ const char* filename = metadata().filename.get();
+ return NewStringCopyUTF8N(cx, JS::UTF8Chars(filename, strlen(filename)));
+ }
+
+ // Otherwise, build wasm module URL from following parts:
+ // - "wasm:" as protocol;
+ // - URI encoded filename from metadata (if can be encoded), plus ":";
+ // - 64-bit hash of the module bytes (as hex dump).
+
+ JSStringBuilder result(cx);
+ if (!result.append("wasm:")) {
+ return nullptr;
+ }
+
+ if (const char* filename = metadata().filename.get()) {
+ // EncodeURI returns false due to invalid chars or OOM -- fail only
+ // during OOM.
+ JSString* filenamePrefix = EncodeURI(cx, filename, strlen(filename));
+ if (!filenamePrefix) {
+ if (cx->isThrowingOutOfMemory()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(!cx->isThrowingOverRecursed());
+ cx->clearPendingException();
+ return nullptr;
+ }
+
+ if (!result.append(filenamePrefix)) {
+ return nullptr;
+ }
+ }
+
+ if (metadata().debugEnabled) {
+ if (!result.append(":")) {
+ return nullptr;
+ }
+
+ const ModuleHash& hash = metadata().debugHash;
+ for (unsigned char byte : hash) {
+ unsigned char digit1 = byte / 16, digit2 = byte % 16;
+ if (!result.append(
+ (char)(digit1 < 10 ? digit1 + '0' : digit1 + 'a' - 10))) {
+ return nullptr;
+ }
+ if (!result.append(
+ (char)(digit2 < 10 ? digit2 + '0' : digit2 + 'a' - 10))) {
+ return nullptr;
+ }
+ }
+ }
+
+ return result.finishString();
+}
+
+WasmBreakpointSite* Instance::getOrCreateBreakpointSite(JSContext* cx,
+ uint32_t offset) {
+ MOZ_ASSERT(debugEnabled());
+ return debug().getOrCreateBreakpointSite(cx, this, offset);
+}
+
+void Instance::destroyBreakpointSite(JS::GCContext* gcx, uint32_t offset) {
+ MOZ_ASSERT(debugEnabled());
+ return debug().destroyBreakpointSite(gcx, this, offset);
+}
+
+void Instance::disassembleExport(JSContext* cx, uint32_t funcIndex, Tier tier,
+ PrintCallback printString) const {
+ const MetadataTier& metadataTier = metadata(tier);
+ const FuncExport& funcExport = metadataTier.lookupFuncExport(funcIndex);
+ const CodeRange& range = metadataTier.codeRange(funcExport);
+ const CodeTier& codeTier = code(tier);
+ const ModuleSegment& segment = codeTier.segment();
+
+ MOZ_ASSERT(range.begin() < segment.length());
+ MOZ_ASSERT(range.end() < segment.length());
+
+ uint8_t* functionCode = segment.base() + range.begin();
+ jit::Disassemble(functionCode, range.end() - range.begin(), printString);
+}
+
+void Instance::addSizeOfMisc(MallocSizeOf mallocSizeOf,
+ Metadata::SeenSet* seenMetadata,
+ Code::SeenSet* seenCode,
+ Table::SeenSet* seenTables, size_t* code,
+ size_t* data) const {
+ *data += mallocSizeOf(this);
+ for (const SharedTable& table : tables_) {
+ *data += table->sizeOfIncludingThisIfNotSeen(mallocSizeOf, seenTables);
+ }
+
+ if (maybeDebug_) {
+ maybeDebug_->addSizeOfMisc(mallocSizeOf, seenMetadata, seenCode, code,
+ data);
+ }
+
+ code_->addSizeOfMiscIfNotSeen(mallocSizeOf, seenMetadata, seenCode, code,
+ data);
+}
+
+//////////////////////////////////////////////////////////////////////////////
+//
+// Reporting of errors that are traps.
+
+void wasm::ReportTrapError(JSContext* cx, unsigned errorNumber) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, errorNumber);
+
+ if (cx->isThrowingOutOfMemory()) {
+ return;
+ }
+
+ // Mark the exception as thrown from a trap to prevent if from being handled
+ // by wasm exception handlers.
+ RootedValue exn(cx);
+ if (!cx->getPendingException(&exn)) {
+ return;
+ }
+
+ MOZ_ASSERT(exn.isObject() && exn.toObject().is<ErrorObject>());
+ exn.toObject().as<ErrorObject>().setFromWasmTrap();
+}
diff --git a/js/src/wasm/WasmInstance.h b/js/src/wasm/WasmInstance.h
new file mode 100644
index 0000000000..466eac73b1
--- /dev/null
+++ b/js/src/wasm/WasmInstance.h
@@ -0,0 +1,512 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_instance_h
+#define wasm_instance_h
+
+#include "mozilla/Atomics.h"
+#include "mozilla/Maybe.h"
+
+#include "gc/Barrier.h"
+#include "gc/Zone.h"
+#include "js/Stack.h" // JS::NativeStackLimit
+#include "js/TypeDecls.h"
+#include "vm/SharedMem.h"
+#include "wasm/WasmExprType.h" // for ResultType
+#include "wasm/WasmLog.h" // for PrintCallback
+#include "wasm/WasmShareable.h" // for SeenSet
+#include "wasm/WasmTypeDecls.h"
+#include "wasm/WasmValue.h"
+
+namespace js {
+
+class SharedArrayRawBuffer;
+class WasmBreakpointSite;
+
+class WasmStructObject;
+class WasmArrayObject;
+
+namespace gc {
+class StoreBuffer;
+} // namespace gc
+
+namespace wasm {
+
+using mozilla::Atomic;
+
+class FuncImport;
+struct FuncImportInstanceData;
+class GlobalDesc;
+struct TableDesc;
+struct TableInstanceData;
+struct TagDesc;
+struct TagInstanceData;
+struct TypeDefInstanceData;
+class WasmFrameIter;
+
+// Instance represents a wasm instance and provides all the support for runtime
+// execution of code in the instance. Instances share various immutable data
+// structures with the Module from which they were instantiated and other
+// instances instantiated from the same Module. However, an Instance has no
+// direct reference to its source Module which allows a Module to be destroyed
+// while it still has live Instances.
+//
+// The instance's code may be shared among multiple instances.
+//
+// An Instance is also known as a 'TlsData'. They used to be separate objects,
+// but have now been unified. Extant references to 'TlsData' will be cleaned
+// up over time.
+class alignas(16) Instance {
+ // NOTE: The first fields of Instance are reserved for commonly accessed data
+ // from the JIT, such that they have as small an offset as possible. See the
+ // next note for the end of this region.
+
+ // Pointer to the base of the default memory (or null if there is none).
+ uint8_t* memoryBase_;
+
+ // Bounds check limit in bytes (or zero if there is no memory). This is
+ // 64-bits on 64-bit systems so as to allow for heap lengths up to and beyond
+ // 4GB, and 32-bits on 32-bit systems, where heaps are limited to 2GB.
+ //
+ // See "Linear memory addresses and bounds checking" in WasmMemory.cpp.
+ uintptr_t boundsCheckLimit_;
+
+ // Null or a pointer to a per-process builtin thunk that will invoke the Debug
+ // Trap Handler.
+ void* debugTrapHandler_;
+
+ // The containing JS::Realm.
+ JS::Realm* realm_;
+
+ // The containing JSContext.
+ JSContext* cx_;
+
+ // The pending exception that was found during stack unwinding after a throw.
+ //
+ // - Only non-null while unwinding the control stack from a wasm-exit stub.
+ // until the nearest enclosing Wasm try-catch or try-delegate block.
+ // - Set by wasm::HandleThrow, unset by Instance::consumePendingException.
+ // - If the unwind target is a `try-delegate`, it is unset by the delegated
+ // try-catch block or function body block.
+ GCPtr<JSObject*> pendingException_;
+ // The tag object of the pending exception.
+ GCPtr<JSObject*> pendingExceptionTag_;
+
+ // Usually equal to cx->stackLimitForJitCode(JS::StackForUntrustedScript),
+ // but can be racily set to trigger immediate trap as an opportunity to
+ // CheckForInterrupt without an additional branch.
+ Atomic<JS::NativeStackLimit, mozilla::Relaxed> stackLimit_;
+
+ // Set to 1 when wasm should call CheckForInterrupt.
+ Atomic<uint32_t, mozilla::Relaxed> interrupt_;
+
+ // The address of the realm()->zone()->needsIncrementalBarrier(). This is
+ // specific to this instance and not a process wide field, and so it cannot
+ // be linked into code.
+ const JS::shadow::Zone::BarrierState* addressOfNeedsIncrementalBarrier_;
+
+ public:
+ // NOTE: All fields commonly accessed by the JIT must be above this method,
+ // and this method adapted for the last field present. This method is used
+ // to assert that we can use compact offsets on x86(-64) for these fields.
+ // We cannot have the assertion here, due to C++ 'offsetof' rules.
+ static constexpr size_t offsetOfLastCommonJitField() {
+ return offsetof(Instance, addressOfNeedsIncrementalBarrier_);
+ }
+
+ private:
+ // When compiling with tiering, the jumpTable has one entry for each
+ // baseline-compiled function.
+ void** jumpTable_;
+
+ // General scratch storage for the baseline compiler, which can't always use
+ // the stack for this.
+ uint32_t baselineScratch_[2];
+
+ // The class_ of WasmValueBox, this is a per-process value. We could patch
+ // this into code, but the only use-sites are register restricted and cannot
+ // easily use a symbolic address.
+ const JSClass* valueBoxClass_;
+
+ // Address of the JitRuntime's arguments rectifier trampoline
+ void* jsJitArgsRectifier_;
+
+ // Address of the JitRuntime's exception handler trampoline
+ void* jsJitExceptionHandler_;
+
+ // Address of the JitRuntime's object prebarrier trampoline
+ void* preBarrierCode_;
+
+ // Address of the store buffer for this instance
+ gc::StoreBuffer* storeBuffer_;
+
+ // Weak pointer to WasmInstanceObject that owns this instance
+ WeakHeapPtr<WasmInstanceObject*> object_;
+
+ // The wasm::Code for this instance
+ const SharedCode code_;
+
+ // The memory for this instance, if any
+ const GCPtr<WasmMemoryObject*> memory_;
+
+ // The tables for this instance, if any
+ const SharedTableVector tables_;
+
+ // Passive data segments for use with bulk memory instructions
+ DataSegmentVector passiveDataSegments_;
+
+ // Passive elem segments for use with bulk memory instructions
+ ElemSegmentVector passiveElemSegments_;
+
+ // The wasm::DebugState for this instance, if any
+ const UniqueDebugState maybeDebug_;
+
+ // If debugging, this is a per-funcIndex bit table denoting whether debugging
+ // is currently enabled for the function within the instance. The flag is set
+ // if any breakpoint or function entry or exit point needs to be visited. It
+ // is OK to conservatively set this flag, but there is very significant
+ // overhead to taking a breakpoint trap, so managing it precisely is
+ // worthwhile.
+ uint32_t* debugFilter_;
+
+ // The exclusive maximum index of a global that has been initialized so far.
+ uint32_t maxInitializedGlobalsIndexPlus1_;
+
+ // Pointer that should be freed (due to padding before the Instance).
+ void* allocatedBase_;
+
+ // The data must be the last field. Globals for the module start here
+ // and are inline in this structure. 16-byte alignment is required for SIMD
+ // data.
+ MOZ_ALIGNED_DECL(16, char data_);
+
+ // Internal helpers:
+ TypeDefInstanceData* typeDefInstanceData(uint32_t typeIndex) const;
+ const void* addressOfGlobalCell(const GlobalDesc& globalDesc) const;
+ FuncImportInstanceData& funcImportInstanceData(const FuncImport& fi);
+ TableInstanceData& tableInstanceData(uint32_t tableIndex) const;
+ TagInstanceData& tagInstanceData(uint32_t tagIndex) const;
+
+ // Only WasmInstanceObject can call the private trace function.
+ friend class js::WasmInstanceObject;
+ void tracePrivate(JSTracer* trc);
+
+ bool callImport(JSContext* cx, uint32_t funcImportIndex, unsigned argc,
+ uint64_t* argv);
+
+ Instance(JSContext* cx, Handle<WasmInstanceObject*> object,
+ const SharedCode& code, Handle<WasmMemoryObject*> memory,
+ SharedTableVector&& tables, UniqueDebugState maybeDebug);
+ ~Instance();
+
+ public:
+ static Instance* create(JSContext* cx, Handle<WasmInstanceObject*> object,
+ const SharedCode& code, uint32_t instanceDataLength,
+ Handle<WasmMemoryObject*> memory,
+ SharedTableVector&& tables,
+ UniqueDebugState maybeDebug);
+ static void destroy(Instance* instance);
+
+ bool init(JSContext* cx, const JSObjectVector& funcImports,
+ const ValVector& globalImportValues,
+ const WasmGlobalObjectVector& globalObjs,
+ const WasmTagObjectVector& tagObjs,
+ const DataSegmentVector& dataSegments,
+ const ElemSegmentVector& elemSegments);
+
+ // Trace any GC roots on the stack, for the frame associated with |wfi|,
+ // whose next instruction to execute is |nextPC|.
+ //
+ // For consistency checking of StackMap sizes in debug builds, this also
+ // takes |highestByteVisitedInPrevFrame|, which is the address of the
+ // highest byte scanned in the frame below this one on the stack, and in
+ // turn it returns the address of the highest byte scanned in this frame.
+ uintptr_t traceFrame(JSTracer* trc, const wasm::WasmFrameIter& wfi,
+ uint8_t* nextPC,
+ uintptr_t highestByteVisitedInPrevFrame);
+
+ static constexpr size_t offsetOfMemoryBase() {
+ return offsetof(Instance, memoryBase_);
+ }
+ static constexpr size_t offsetOfBoundsCheckLimit() {
+ return offsetof(Instance, boundsCheckLimit_);
+ }
+ static constexpr size_t offsetOfDebugTrapHandler() {
+ return offsetof(Instance, debugTrapHandler_);
+ }
+
+ static constexpr size_t offsetOfRealm() { return offsetof(Instance, realm_); }
+ static constexpr size_t offsetOfCx() { return offsetof(Instance, cx_); }
+ static constexpr size_t offsetOfValueBoxClass() {
+ return offsetof(Instance, valueBoxClass_);
+ }
+ static constexpr size_t offsetOfPendingException() {
+ return offsetof(Instance, pendingException_);
+ }
+ static constexpr size_t offsetOfPendingExceptionTag() {
+ return offsetof(Instance, pendingExceptionTag_);
+ }
+ static constexpr size_t offsetOfStackLimit() {
+ return offsetof(Instance, stackLimit_);
+ }
+ static constexpr size_t offsetOfInterrupt() {
+ return offsetof(Instance, interrupt_);
+ }
+ static constexpr size_t offsetOfAddressOfNeedsIncrementalBarrier() {
+ return offsetof(Instance, addressOfNeedsIncrementalBarrier_);
+ }
+ static constexpr size_t offsetOfJumpTable() {
+ return offsetof(Instance, jumpTable_);
+ }
+ static constexpr size_t offsetOfBaselineScratch() {
+ return offsetof(Instance, baselineScratch_);
+ }
+ static constexpr size_t sizeOfBaselineScratch() {
+ return sizeof(baselineScratch_);
+ }
+ static constexpr size_t offsetOfJSJitArgsRectifier() {
+ return offsetof(Instance, jsJitArgsRectifier_);
+ }
+ static constexpr size_t offsetOfJSJitExceptionHandler() {
+ return offsetof(Instance, jsJitExceptionHandler_);
+ }
+ static constexpr size_t offsetOfPreBarrierCode() {
+ return offsetof(Instance, preBarrierCode_);
+ }
+ static constexpr size_t offsetOfDebugFilter() {
+ return offsetof(Instance, debugFilter_);
+ }
+ static constexpr size_t offsetOfData() { return offsetof(Instance, data_); }
+ static constexpr size_t offsetInData(size_t offset) {
+ return offsetOfData() + offset;
+ }
+
+ JSContext* cx() const { return cx_; }
+ void* debugTrapHandler() const { return debugTrapHandler_; }
+ void setDebugTrapHandler(void* newHandler) { debugTrapHandler_ = newHandler; }
+ JS::Realm* realm() const { return realm_; }
+ bool debugEnabled() const { return !!maybeDebug_; }
+ DebugState& debug() { return *maybeDebug_; }
+ uint8_t* data() const { return (uint8_t*)&data_; }
+ const SharedTableVector& tables() const { return tables_; }
+ SharedMem<uint8_t*> memoryBase() const;
+ WasmMemoryObject* memory() const;
+ size_t memoryMappedSize() const;
+ SharedArrayRawBuffer* sharedMemoryBuffer() const; // never null
+ bool memoryAccessInGuardRegion(const uint8_t* addr, unsigned numBytes) const;
+
+ // Methods to set, test and clear the interrupt fields. Both interrupt
+ // fields are Relaxed and so no consistency/ordering can be assumed.
+
+ void setInterrupt();
+ bool isInterrupted() const;
+ void resetInterrupt(JSContext* cx);
+
+ bool debugFilter(uint32_t funcIndex) const;
+ void setDebugFilter(uint32_t funcIndex, bool value);
+
+ const Code& code() const { return *code_; }
+ inline const CodeTier& code(Tier t) const;
+ inline uint8_t* codeBase(Tier t) const;
+ inline const MetadataTier& metadata(Tier t) const;
+ inline const Metadata& metadata() const;
+ inline bool isAsmJS() const;
+
+ // This method returns a pointer to the GC object that owns this Instance.
+ // Instances may be reached via weak edges (e.g., Realm::instances_)
+ // so this perform a read-barrier on the returned object unless the barrier
+ // is explicitly waived.
+
+ WasmInstanceObject* object() const;
+ WasmInstanceObject* objectUnbarriered() const;
+
+ // Execute the given export given the JS call arguments, storing the return
+ // value in args.rval.
+
+ [[nodiscard]] bool callExport(JSContext* cx, uint32_t funcIndex,
+ CallArgs args,
+ CoercionLevel level = CoercionLevel::Spec);
+
+ // Exception handling support
+
+ void setPendingException(HandleAnyRef exn);
+
+ // Constant expression support
+
+ void constantGlobalGet(uint32_t globalIndex, MutableHandleVal result);
+ [[nodiscard]] bool constantRefFunc(uint32_t funcIndex,
+ MutableHandleFuncRef result);
+ WasmStructObject* constantStructNewDefault(JSContext* cx, uint32_t typeIndex);
+ WasmArrayObject* constantArrayNewDefault(JSContext* cx, uint32_t typeIndex,
+ uint32_t numElements);
+
+ // Return the name associated with a given function index, or generate one
+ // if none was given by the module.
+
+ JSAtom* getFuncDisplayAtom(JSContext* cx, uint32_t funcIndex) const;
+ void ensureProfilingLabels(bool profilingEnabled) const;
+
+ // Called by Wasm(Memory|Table)Object when a moving resize occurs:
+
+ void onMovingGrowMemory();
+ void onMovingGrowTable(const Table* theTable);
+
+ // Called to apply a single ElemSegment at a given offset, assuming
+ // that all bounds validation has already been performed.
+
+ [[nodiscard]] bool initElems(uint32_t tableIndex, const ElemSegment& seg,
+ uint32_t dstOffset, uint32_t srcOffset,
+ uint32_t len);
+
+ // Debugger support:
+
+ JSString* createDisplayURL(JSContext* cx);
+ WasmBreakpointSite* getOrCreateBreakpointSite(JSContext* cx, uint32_t offset);
+ void destroyBreakpointSite(JS::GCContext* gcx, uint32_t offset);
+
+ // about:memory reporting:
+
+ void addSizeOfMisc(MallocSizeOf mallocSizeOf, SeenSet<Metadata>* seenMetadata,
+ SeenSet<Code>* seenCode, SeenSet<Table>* seenTables,
+ size_t* code, size_t* data) const;
+
+ // Wasm disassembly support
+
+ void disassembleExport(JSContext* cx, uint32_t funcIndex, Tier tier,
+ PrintCallback printString) const;
+
+ public:
+ // Functions to be called directly from wasm code.
+ static int32_t callImport_general(Instance*, int32_t, int32_t, uint64_t*);
+ static uint32_t memoryGrow_m32(Instance* instance, uint32_t delta);
+ static uint64_t memoryGrow_m64(Instance* instance, uint64_t delta);
+ static uint32_t memorySize_m32(Instance* instance);
+ static uint64_t memorySize_m64(Instance* instance);
+ static int32_t memCopy_m32(Instance* instance, uint32_t dstByteOffset,
+ uint32_t srcByteOffset, uint32_t len,
+ uint8_t* memBase);
+ static int32_t memCopyShared_m32(Instance* instance, uint32_t dstByteOffset,
+ uint32_t srcByteOffset, uint32_t len,
+ uint8_t* memBase);
+ static int32_t memCopy_m64(Instance* instance, uint64_t dstByteOffset,
+ uint64_t srcByteOffset, uint64_t len,
+ uint8_t* memBase);
+ static int32_t memCopyShared_m64(Instance* instance, uint64_t dstByteOffset,
+ uint64_t srcByteOffset, uint64_t len,
+ uint8_t* memBase);
+ static int32_t memFill_m32(Instance* instance, uint32_t byteOffset,
+ uint32_t value, uint32_t len, uint8_t* memBase);
+ static int32_t memFillShared_m32(Instance* instance, uint32_t byteOffset,
+ uint32_t value, uint32_t len,
+ uint8_t* memBase);
+ static int32_t memFill_m64(Instance* instance, uint64_t byteOffset,
+ uint32_t value, uint64_t len, uint8_t* memBase);
+ static int32_t memFillShared_m64(Instance* instance, uint64_t byteOffset,
+ uint32_t value, uint64_t len,
+ uint8_t* memBase);
+ static int32_t memInit_m32(Instance* instance, uint32_t dstOffset,
+ uint32_t srcOffset, uint32_t len,
+ uint32_t segIndex);
+ static int32_t memInit_m64(Instance* instance, uint64_t dstOffset,
+ uint32_t srcOffset, uint32_t len,
+ uint32_t segIndex);
+ static int32_t dataDrop(Instance* instance, uint32_t segIndex);
+ static int32_t tableCopy(Instance* instance, uint32_t dstOffset,
+ uint32_t srcOffset, uint32_t len,
+ uint32_t dstTableIndex, uint32_t srcTableIndex);
+ static int32_t tableFill(Instance* instance, uint32_t start, void* value,
+ uint32_t len, uint32_t tableIndex);
+ static int32_t memDiscard_m32(Instance* instance, uint32_t byteOffset,
+ uint32_t byteLen, uint8_t* memBase);
+ static int32_t memDiscardShared_m32(Instance* instance, uint32_t byteOffset,
+ uint32_t byteLen, uint8_t* memBase);
+ static int32_t memDiscard_m64(Instance* instance, uint64_t byteOffset,
+ uint64_t byteLen, uint8_t* memBase);
+ static int32_t memDiscardShared_m64(Instance* instance, uint64_t byteOffset,
+ uint64_t byteLen, uint8_t* memBase);
+ static void* tableGet(Instance* instance, uint32_t index,
+ uint32_t tableIndex);
+ static uint32_t tableGrow(Instance* instance, void* initValue, uint32_t delta,
+ uint32_t tableIndex);
+ static int32_t tableSet(Instance* instance, uint32_t index, void* value,
+ uint32_t tableIndex);
+ static uint32_t tableSize(Instance* instance, uint32_t tableIndex);
+ static int32_t tableInit(Instance* instance, uint32_t dstOffset,
+ uint32_t srcOffset, uint32_t len, uint32_t segIndex,
+ uint32_t tableIndex);
+ static int32_t elemDrop(Instance* instance, uint32_t segIndex);
+ static int32_t wait_i32_m32(Instance* instance, uint32_t byteOffset,
+ int32_t value, int64_t timeout);
+ static int32_t wait_i32_m64(Instance* instance, uint64_t byteOffset,
+ int32_t value, int64_t timeout);
+ static int32_t wait_i64_m32(Instance* instance, uint32_t byteOffset,
+ int64_t value, int64_t timeout);
+ static int32_t wait_i64_m64(Instance* instance, uint64_t byteOffset,
+ int64_t value, int64_t timeout);
+ static int32_t wake_m32(Instance* instance, uint32_t byteOffset,
+ int32_t count);
+ static int32_t wake_m64(Instance* instance, uint64_t byteOffset,
+ int32_t count);
+ static void* refFunc(Instance* instance, uint32_t funcIndex);
+ static void postBarrier(Instance* instance, gc::Cell** location);
+ static void postBarrierPrecise(Instance* instance, JSObject** location,
+ JSObject* prev);
+ static void postBarrierPreciseWithOffset(Instance* instance, JSObject** base,
+ uint32_t offset, JSObject* prev);
+ static void* exceptionNew(Instance* instance, JSObject* tag);
+ static int32_t throwException(Instance* instance, JSObject* exn);
+ static void* structNew(Instance* instance, TypeDefInstanceData* typeDefData);
+ static void* structNewUninit(Instance* instance,
+ TypeDefInstanceData* typeDefData);
+ static void* arrayNew(Instance* instance, uint32_t numElements,
+ TypeDefInstanceData* typeDefData);
+ static void* arrayNewUninit(Instance* instance, uint32_t numElements,
+ TypeDefInstanceData* typeDefData);
+ static void* arrayNewData(Instance* instance, uint32_t segByteOffset,
+ uint32_t numElements,
+ TypeDefInstanceData* typeDefData,
+ uint32_t segIndex);
+ static void* arrayNewElem(Instance* instance, uint32_t segElemIndex,
+ uint32_t numElements,
+ TypeDefInstanceData* typeDefData,
+ uint32_t segIndex);
+ static int32_t arrayCopy(Instance* instance, void* dstArray,
+ uint32_t dstIndex, void* srcArray, uint32_t srcIndex,
+ uint32_t numElements, uint32_t elementSize);
+ static int32_t refTest(Instance* instance, void* refPtr,
+ const wasm::TypeDef* typeDef);
+ static int32_t intrI8VecMul(Instance* instance, uint32_t dest, uint32_t src1,
+ uint32_t src2, uint32_t len, uint8_t* memBase);
+};
+
+bool ResultsToJSValue(JSContext* cx, ResultType type, void* registerResultLoc,
+ Maybe<char*> stackResultsLoc, MutableHandleValue rval,
+ CoercionLevel level = CoercionLevel::Spec);
+
+// Report an error to `cx` and mark it as a 'trap' so that it cannot be caught
+// by wasm exception handlers.
+void ReportTrapError(JSContext* cx, unsigned errorNumber);
+
+// Instance is not a GC thing itself but contains GC thing pointers. Ensure they
+// are traced appropriately.
+void TraceInstanceEdge(JSTracer* trc, Instance* instance, const char* name);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_instance_h
diff --git a/js/src/wasm/WasmInstanceData.h b/js/src/wasm/WasmInstanceData.h
new file mode 100644
index 0000000000..b1be3f02a2
--- /dev/null
+++ b/js/src/wasm/WasmInstanceData.h
@@ -0,0 +1,137 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_instance_data_h
+#define wasm_instance_data_h
+
+#include <stdint.h>
+
+#include "NamespaceImports.h"
+
+#include "gc/Allocator.h"
+#include "gc/Pretenuring.h"
+#include "js/Utility.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmTypeDecls.h"
+
+namespace js {
+namespace wasm {
+
+// ExportArg holds the unboxed operands to the wasm entry trampoline which can
+// be called through an ExportFuncPtr.
+
+struct ExportArg {
+ uint64_t lo;
+ uint64_t hi;
+};
+
+using ExportFuncPtr = int32_t (*)(ExportArg*, Instance*);
+
+// TypeDefInstanceData describes the runtime information associated with a
+// module's type definition. This is accessed directly from JIT code and the
+// Instance.
+
+struct TypeDefInstanceData {
+ TypeDefInstanceData()
+ : typeDef(nullptr),
+ superTypeVector(nullptr),
+ shape(nullptr),
+ clasp(nullptr),
+ allocKind(gc::AllocKind::LIMIT) {}
+
+ // The canonicalized pointer to this type definition. This is kept alive by
+ // the type context associated with the instance.
+ const wasm::TypeDef* typeDef;
+
+ // The supertype vector for this type definition. This is also kept alive
+ // by the type context associated with the instance.
+ //
+ const wasm::SuperTypeVector* superTypeVector;
+
+ // The remaining fields are only meaningful for, and used by, structs and
+ // arrays.
+ GCPtr<Shape*> shape;
+ const JSClass* clasp;
+ // The allocation site for GC types. This is used for pre-tenuring.
+ gc::AllocSite allocSite;
+ gc::AllocKind allocKind;
+};
+
+// FuncImportInstanceData describes the region of wasm global memory allocated
+// in the instance's thread-local storage for a function import. This is
+// accessed directly from JIT code and mutated by Instance as exits become
+// optimized and deoptimized.
+
+struct FuncImportInstanceData {
+ // The code to call at an import site: a wasm callee, a thunk into C++, or a
+ // thunk into JIT code.
+ void* code;
+
+ // The callee's Instance pointer, which must be loaded to InstanceReg
+ // (along with any pinned registers) before calling 'code'.
+ Instance* instance;
+
+ // The callee function's realm.
+ JS::Realm* realm;
+
+ // A GC pointer which keeps the callee alive and is used to recover import
+ // values for lazy table initialization.
+ GCPtr<JSObject*> callable;
+ static_assert(sizeof(GCPtr<JSObject*>) == sizeof(void*), "for JIT access");
+};
+
+// TableInstanceData describes the region of wasm global memory allocated in the
+// instance's thread-local storage which is accessed directly from JIT code
+// to bounds-check and index the table.
+
+struct TableInstanceData {
+ // Length of the table in number of elements (not bytes).
+ uint32_t length;
+
+ // Pointer to the array of elements (which can have various representations).
+ // For tables of anyref this is null.
+ // For tables of functions, this is a pointer to the array of code pointers.
+ void* elements;
+};
+
+// TagInstanceData describes the instance state associated with a tag.
+
+struct TagInstanceData {
+ GCPtr<WasmTagObject*> object;
+};
+
+// Table element for TableRepr::Func which carries both the code pointer and
+// a instance pointer (and thus anything reachable through the instance).
+
+struct FunctionTableElem {
+ // The code to call when calling this element. The table ABI is the system
+ // ABI with the additional ABI requirements that:
+ // - InstanceReg and any pinned registers have been loaded appropriately
+ // - if this is a heterogeneous table that requires a signature check,
+ // WasmTableCallSigReg holds the signature id.
+ void* code;
+
+ // The pointer to the callee's instance's Instance. This must be loaded into
+ // InstanceReg before calling 'code'.
+ Instance* instance;
+};
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_instance_data_h
diff --git a/js/src/wasm/WasmIntrinsic.cpp b/js/src/wasm/WasmIntrinsic.cpp
new file mode 100644
index 0000000000..cc43ed887c
--- /dev/null
+++ b/js/src/wasm/WasmIntrinsic.cpp
@@ -0,0 +1,239 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmIntrinsic.h"
+
+#include "util/Text.h"
+#include "vm/GlobalObject.h"
+
+#include "wasm/WasmGenerator.h"
+#include "wasm/WasmIntrinsicGenerated.h"
+#include "wasm/WasmJS.h"
+#include "wasm/WasmModule.h"
+#include "wasm/WasmOpIter.h"
+#include "wasm/WasmValidate.h"
+
+using namespace js;
+using namespace js::wasm;
+
+#define INTR_DECL(op, export, sa_name, abitype, entry, idx) \
+ static const ValType Intrinsic##op##_Params[] = \
+ DECLARE_INTRINSIC_SAS_PARAM_VALTYPES_##op; \
+ \
+ const Intrinsic Intrinsic##op = { \
+ export, \
+ mozilla::Span<const ValType>(Intrinsic##op##_Params), \
+ SASig##sa_name, \
+ };
+
+FOR_EACH_INTRINSIC(INTR_DECL)
+#undef INTR_DECL
+
+bool Intrinsic::funcType(FuncType* type) const {
+ ValTypeVector paramVec;
+ if (!paramVec.append(params.data(), params.data() + params.size())) {
+ return false;
+ }
+ *type = FuncType(std::move(paramVec), ValTypeVector());
+ return true;
+}
+
+/* static */
+const Intrinsic& Intrinsic::getFromId(IntrinsicId id) {
+ switch (id) {
+#define OP(op, export, sa_name, abitype, entry, idx) \
+ case IntrinsicId::op: \
+ return Intrinsic##op;
+ FOR_EACH_INTRINSIC(OP)
+#undef OP
+ default:
+ MOZ_CRASH("unexpected intrinsic");
+ }
+}
+
+bool EncodeIntrinsicBody(const Intrinsic& intrinsic, IntrinsicId id,
+ Bytes* body) {
+ Encoder encoder(*body);
+ if (!EncodeLocalEntries(encoder, ValTypeVector())) {
+ return false;
+ }
+ for (uint32_t i = 0; i < intrinsic.params.size(); i++) {
+ if (!encoder.writeOp(Op::LocalGet) || !encoder.writeVarU32(i)) {
+ return false;
+ }
+ }
+ if (!encoder.writeOp(MozOp::Intrinsic)) {
+ return false;
+ }
+ if (!encoder.writeVarU32(uint32_t(id))) {
+ return false;
+ }
+ if (!encoder.writeOp(Op::End)) {
+ return false;
+ }
+ return true;
+}
+
+bool wasm::CompileIntrinsicModule(JSContext* cx,
+ const mozilla::Span<IntrinsicId> ids,
+ Shareable sharedMemory,
+ MutableHandle<WasmModuleObject*> result) {
+ // Create the options manually, enabling intrinsics
+ FeatureOptions featureOptions;
+ featureOptions.intrinsics = true;
+
+ // Initialize the compiler environment, choosing the best tier possible
+ SharedCompileArgs compileArgs = CompileArgs::buildAndReport(
+ cx, ScriptedCaller(), featureOptions, /* reportOOM */ true);
+ if (!compileArgs) {
+ return false;
+ }
+ CompilerEnvironment compilerEnv(
+ CompileMode::Once, IonAvailable(cx) ? Tier::Optimized : Tier::Baseline,
+ DebugEnabled::False);
+ compilerEnv.computeParameters();
+
+ // Build a module environment
+ ModuleEnvironment moduleEnv(compileArgs->features);
+ if (!moduleEnv.init()) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Add (import (memory 0))
+ CacheableName emptyString;
+ CacheableName memoryString;
+ if (!CacheableName::fromUTF8Chars("memory", &memoryString)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ if (!moduleEnv.imports.append(Import(std::move(emptyString),
+ std::move(memoryString),
+ DefinitionKind::Memory))) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ moduleEnv.memory = Some(MemoryDesc(Limits(0, Nothing(), sharedMemory)));
+
+ // Add (type (func (params ...))) for each intrinsic. The function types will
+ // be deduplicated by the runtime
+ for (uint32_t funcIndex = 0; funcIndex < ids.size(); funcIndex++) {
+ const IntrinsicId& id = ids[funcIndex];
+ const Intrinsic& intrinsic = Intrinsic::getFromId(id);
+
+ FuncType type;
+ if (!intrinsic.funcType(&type) ||
+ !moduleEnv.types->addType(std::move(type))) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+
+ // Add (func (type $i)) declarations. Do this after all types have been added
+ // as the function declaration metadata uses pointers into the type vectors
+ // that must be stable.
+ for (uint32_t funcIndex = 0; funcIndex < ids.size(); funcIndex++) {
+ FuncDesc decl(&(*moduleEnv.types)[funcIndex].funcType(), funcIndex);
+ if (!moduleEnv.funcs.append(decl)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ moduleEnv.declareFuncExported(funcIndex, true, false);
+ }
+
+ // Add (export "$name" (func $i)) declarations.
+ for (uint32_t funcIndex = 0; funcIndex < ids.size(); funcIndex++) {
+ const Intrinsic& intrinsic = Intrinsic::getFromId(ids[funcIndex]);
+
+ CacheableName exportName;
+ if (!CacheableName::fromUTF8Chars(intrinsic.exportName, &exportName) ||
+ !moduleEnv.exports.append(Export(std::move(exportName), funcIndex,
+ DefinitionKind::Function))) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+
+ // Compile the module functions
+ UniqueChars error;
+ ModuleGenerator mg(*compileArgs, &moduleEnv, &compilerEnv, nullptr, &error,
+ nullptr);
+ if (!mg.init(nullptr)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Prepare and compile function bodies
+ Vector<Bytes, 1, SystemAllocPolicy> bodies;
+ if (!bodies.reserve(ids.size())) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ for (uint32_t funcIndex = 0; funcIndex < ids.size(); funcIndex++) {
+ IntrinsicId id = ids[funcIndex];
+ const Intrinsic& intrinsic = Intrinsic::getFromId(ids[funcIndex]);
+
+ // Compilation may be done using other threads, ModuleGenerator requires
+ // that function bodies live until after finishFuncDefs().
+ bodies.infallibleAppend(Bytes());
+ Bytes& bytecode = bodies.back();
+
+ // Encode function body that will call the intrinsic using our builtin
+ // opcode, and launch a compile task
+ if (!EncodeIntrinsicBody(intrinsic, id, &bytecode) ||
+ !mg.compileFuncDef(funcIndex, 0, bytecode.begin(),
+ bytecode.begin() + bytecode.length())) {
+ // This must be an OOM and will be reported by the caller
+ MOZ_ASSERT(!error);
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+
+ // Finish and block on function compilation
+ if (!mg.finishFuncDefs()) {
+ // This must be an OOM and will be reported by the caller
+ MOZ_ASSERT(!error);
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Create a dummy bytecode vector, that will not be used
+ SharedBytes bytecode = js_new<ShareableBytes>();
+ if (!bytecode) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Finish the module
+ SharedModule module = mg.finishModule(*bytecode, nullptr);
+ if (!module) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Create a WasmModuleObject for the module, and return it
+ RootedObject proto(
+ cx, GlobalObject::getOrCreatePrototype(cx, JSProto_WasmModule));
+ if (!proto) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ result.set(WasmModuleObject::create(cx, *module, proto));
+ return !!result;
+}
diff --git a/js/src/wasm/WasmIntrinsic.h b/js/src/wasm/WasmIntrinsic.h
new file mode 100644
index 0000000000..ff4bc2d002
--- /dev/null
+++ b/js/src/wasm/WasmIntrinsic.h
@@ -0,0 +1,60 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_intrinsic_h
+#define wasm_intrinsic_h
+
+#include "mozilla/Span.h"
+
+#include "wasm/WasmBuiltins.h"
+#include "wasm/WasmCompileArgs.h"
+#include "wasm/WasmConstants.h"
+#include "wasm/WasmTypeDecls.h"
+#include "wasm/WasmTypeDef.h"
+
+namespace js {
+namespace wasm {
+
+// An intrinsic is a natively implemented function that may be compiled into an
+// 'intrinsic module', which may be instantiated with a provided memory
+// yielding an exported WebAssembly function wrapping the intrinsic.
+struct Intrinsic {
+ // The name of the intrinsic as it is exported
+ const char* exportName;
+ // The params taken by the intrinsic. No results are required for intrinsics
+ // at this time, so we omit them
+ mozilla::Span<const ValType> params;
+ // The signature of the builtin that implements the intrinsic
+ const SymbolicAddressSignature& signature;
+
+ // Allocate a FuncType for this intrinsic, returning false for OOM
+ bool funcType(FuncType* type) const;
+
+ // Get the Intrinsic for an IntrinsicId. IntrinsicId must be validated.
+ static const Intrinsic& getFromId(IntrinsicId id);
+};
+
+// Compile and return the intrinsic module for a given set of operations.
+bool CompileIntrinsicModule(JSContext* cx, const mozilla::Span<IntrinsicId> ids,
+ Shareable sharedMemory,
+ MutableHandle<WasmModuleObject*> result);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_intrinsic_h
diff --git a/js/src/wasm/WasmIntrinsic.yaml b/js/src/wasm/WasmIntrinsic.yaml
new file mode 100644
index 0000000000..7867604915
--- /dev/null
+++ b/js/src/wasm/WasmIntrinsic.yaml
@@ -0,0 +1,201 @@
+# The file contains information need to define wasm intrinsic operations.
+
+# i8vecmul(dest: i32, src1: i32, src2: i32, len: i32)
+# Performs pairwise multiplication of two i8 vectors of 'len' specified at
+# 'src1' and 'src2'. Output is written to 'dest'. This is used as a
+# basic self-test for intrinsics.
+- op: I8VecMul
+ symbolic_address:
+ name: IntrI8VecMul
+ type: Args_Int32_GeneralInt32Int32Int32Int32General
+ entry: Instance::intrI8VecMul
+ export: i8vecmul
+ params:
+ - I32
+ - I32
+ - I32
+ - I32
+
+#if defined(ENABLE_WASM_MOZ_INTGEMM)
+
+# Intrinsics for integer matrix multiplication followed by addition of bias.
+# Please refer to @TOPSRCDIR/js/src/intgemm/IntegerGemmIntrinsic.h for more details on these intrinsics.
+
+
+# Prepare B for the Matrix Multiply intrinsic from Input matrix B.
+#
+# Quantization is performed on the input.
+# The final prepared B is in CPU-dependent format and can be used as an input to matrix multiply
+# intrinsic (`int8_multiply_and_add_bias`).
+#
+# int8_prepare_b(const float* inputMatrixB, float scale, float zeroPoint, uint32_t rowsB, uint32_t colsB, int8_t* outputMatrixB)
+# int8_prepare_b(inputMatrixB: i32, scale: f32, zeroPoint: f32, rowsB: i32, colsB: i32, outputMatrixB: i32)
+- op: I8PrepareB
+ symbolic_address:
+ name: IntrI8PrepareB
+ type: Args_Int32_GeneralInt32Float32Float32Int32Int32Int32General
+ entry: intgemm::IntrI8PrepareB
+ export: int8_prepare_b
+ params:
+ - I32
+ - F32
+ - F32
+ - I32
+ - I32
+ - I32
+
+
+# Prepare B for the Matrix Multiply intrinsic from transposed version of Input matrix B.
+#
+# Quantization is performed on floating values of input.
+# The final prepared B is in CPU-dependent format and can be used as an input to matrix multiply
+# intrinsic (`int8_multiply_and_add_bias`).
+#
+# int8_prepare_b_from_transposed(const float* inputMatrixBTransposed, float scale, float zeroPoint, uint32_t rowsB, uint32_t colsB, int8_t* outputMatrixB)
+# int8_prepare_b_from_transposed(inputMatrixBTransposed: i32, scale: f32, zeroPoint: f32, rowsB: i32, colsB: i32, outputMatrixB: i32)
+- op: I8PrepareBFromTransposed
+ symbolic_address:
+ name: IntrI8PrepareBFromTransposed
+ type: Args_Int32_GeneralInt32Float32Float32Int32Int32Int32General
+ entry: intgemm::IntrI8PrepareBFromTransposed
+ export: int8_prepare_b_from_transposed
+ params:
+ - I32
+ - F32
+ - F32
+ - I32
+ - I32
+ - I32
+
+
+# Prepare B for the Matrix Multiply intrinsic from a quantized and transposed version of Input
+# matrix B which is also in a CPU-independent format.
+#
+# The final prepared B is in CPU-dependent format and can be used as an input to matrix multiply
+# intrinsic (`int8_multiply_and_add_bias`).
+#
+# int8_prepare_b_from_quantized_transposed(const int8_t* inputMatrixBQuantizedTransposed, uint32_t rowsB, uint32_t colsB, int8_t* outputMatrixB)
+# int8_prepare_b_from_quantized_transposed(inputMatrixBQuantizedTransposed: i32, rowsB: i32, colsB: i32, outputMatrixB: i32)
+- op: I8PrepareBFromQuantizedTransposed
+ symbolic_address:
+ name: IntrI8PrepareBFromQuantizedTransposed
+ type: Args_Int32_GeneralInt32Int32Int32Int32General
+ entry: intgemm::IntrI8PrepareBFromQuantizedTransposed
+ export: int8_prepare_b_from_quantized_transposed
+ params:
+ - I32
+ - I32
+ - I32
+ - I32
+
+
+# Prepare A for the Matrix Multiply intrinsic from Input matrix A.
+#
+# It performs quantization on floating values of input.
+# The final prepared A might be architecture dependent. e.g. On some architectures like x86, it
+# might be unsigned (achieved by adding 127 to quantized values) while on others like Arm, it might
+# be signed.
+# The final prepared A can be used as an input to matrix multiply intrinsic
+# (`int8_multiply_and_add_bias`).
+#
+# int8_prepare_a(const float* inputMatrixA, float scale, float zeroPoint, uint32_t rowsA, uint32_t colsA, int8_t* outputMatrixA)
+# int8_prepare_a(inputMatrixA: i32, scale: f32, zeroPoint: f32, rowsA: i32, colsA: i32, outputMatrixA: i32)
+- op: I8PrepareA
+ symbolic_address:
+ name: IntrI8PrepareA
+ type: Args_Int32_GeneralInt32Float32Float32Int32Int32Int32General
+ entry: intgemm::IntrI8PrepareA
+ export: int8_prepare_a
+ params:
+ - I32
+ - F32
+ - F32
+ - I32
+ - I32
+ - I32
+
+
+# Prepares bias for the Matrix Multiply intrinsic.
+#
+# It uses the prepared B (which must be obtained by using any of the `int8_prepare_b*` intrinsics) and
+# a bias input to prepare the final bias.
+#
+# The final bias can be used as an input to matrix multiply intrinsic (`int8_multiply_and_add_bias`).
+#
+# int8_prepare_bias(const int8_t* inputMatrixBPrepared, float scaleA, float zeroPointA, float scaleB, float zeroPointB, uint32_t rowsB, uint32_t colsB, const float* inputBias, float* output)
+# int8_prepare_bias(inputMatrixBPrepared: i32, scaleA: f32, zeroPointA: f32, scaleB: f32, zeroPointB: f32, rowsB: i32, colsB: i32, inputBias: i32, output: i32)
+- op: I8PrepareBias
+ symbolic_address:
+ name: IntrI8PrepareBias
+ type: Args_Int32_GeneralInt32Float32Float32Float32Float32Int32Int32Int32Int32General
+ entry: intgemm::IntrI8PrepareBias
+ export: int8_prepare_bias
+ params:
+ - I32
+ - F32
+ - F32
+ - F32
+ - F32
+ - I32
+ - I32
+ - I32
+ - I32
+
+
+# Perform multiplication of 2 matrices followed by adding a bias.
+#
+# i.e Output = inputMatrixAPrepared * inputMatrixBPrepared + inputBiasPrepared
+#
+# The inputs of this intrinsic must be obtained by using `int8_prepare_A`,
+# one of the `int8_prepare_b*` and `int8_prepare_bias` intrinsics respectively.
+#
+# int8_multiply_and_add_bias(const int8_t* inputMatrixAPrepared, float scaleA, float zeroPointA,
+# const int8_t* inputMatrixBPrepared, float scaleB, float zeroPointB,
+# const float* inputBiasPrepared, float unquantMultiplier,
+# uint32_t rowsA, uint32_t width, uint32_t colsB, float* output)
+# int8_multiply_and_add_bias(inputMatrixAPrepared: i32, scaleA: f32, zeroPointA: f32,
+# inputMatrixBPrepared: i32, scaleB: f32, zeroPointB: f32,
+# inputBiasPrepared: i32, unquantMultiplier: f32,
+# rowsA: i32, width: i32, colsB: i32, output: i32)
+- op: I8MultiplyAndAddBias
+ symbolic_address:
+ name: IntrI8MultiplyAndAddBias
+ type: Args_Int32_GeneralInt32Float32Float32Int32Float32Float32Int32Float32Int32Int32Int32Int32General
+ entry: intgemm::IntrI8MultiplyAndAddBias
+ export: int8_multiply_and_add_bias
+ params:
+ - I32
+ - F32
+ - F32
+ - I32
+ - F32
+ - F32
+ - I32
+ - F32
+ - I32
+ - I32
+ - I32
+ - I32
+
+
+# Select a subset of columns of prepared B.
+#
+# Indices of the columns to be selected are specified by an array.
+#
+# int8_select_columns_of_b(const int8_t* inputMatrixBPrepared, uint32_t rowsB, uint32_t colsB, const uint32_t* colIndexList, const uint32_t sizeColIndexList, int8_t* output)
+# int8_select_columns_of_b(inputMatrixBPrepared: i32, rowsB: i32, colsB: i32, colIndexList: i32, sizeColIndexList: i32, output: i32)
+- op: I8SelectColumnsOfB
+ symbolic_address:
+ name: IntrI8SelectColumnsOfB
+ type: Args_Int32_GeneralInt32Int32Int32Int32Int32Int32General
+ entry: intgemm::IntrI8SelectColumnsOfB
+ export: int8_select_columns_of_b
+ params:
+ - I32
+ - I32
+ - I32
+ - I32
+ - I32
+ - I32
+
+#endif // ENABLE_WASM_MOZ_INTGEMM
diff --git a/js/src/wasm/WasmIonCompile.cpp b/js/src/wasm/WasmIonCompile.cpp
new file mode 100644
index 0000000000..2f5e3f7233
--- /dev/null
+++ b/js/src/wasm/WasmIonCompile.cpp
@@ -0,0 +1,8691 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmIonCompile.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <algorithm>
+
+#include "jit/ABIArgGenerator.h"
+#include "jit/CodeGenerator.h"
+#include "jit/CompileInfo.h"
+#include "jit/Ion.h"
+#include "jit/IonOptimizationLevels.h"
+#include "jit/MIR.h"
+#include "jit/ShuffleAnalysis.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "wasm/WasmBaselineCompile.h"
+#include "wasm/WasmBuiltins.h"
+#include "wasm/WasmCodegenTypes.h"
+#include "wasm/WasmGC.h"
+#include "wasm/WasmGcObject.h"
+#include "wasm/WasmGenerator.h"
+#include "wasm/WasmIntrinsic.h"
+#include "wasm/WasmOpIter.h"
+#include "wasm/WasmSignalHandlers.h"
+#include "wasm/WasmStubs.h"
+#include "wasm/WasmValidate.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::IsPowerOfTwo;
+using mozilla::Maybe;
+using mozilla::Nothing;
+using mozilla::Some;
+
+namespace {
+
+using BlockVector = Vector<MBasicBlock*, 8, SystemAllocPolicy>;
+using DefVector = Vector<MDefinition*, 8, SystemAllocPolicy>;
+
+// To compile try-catch blocks, we extend the IonCompilePolicy's ControlItem
+// from being just an MBasicBlock* to a Control structure collecting additional
+// information.
+using ControlInstructionVector =
+ Vector<MControlInstruction*, 8, SystemAllocPolicy>;
+
+struct Control {
+ MBasicBlock* block;
+ // For a try-catch ControlItem, when its block's Labelkind is Try, this
+ // collects branches to later bind and create the try's landing pad.
+ ControlInstructionVector tryPadPatches;
+
+ Control() : block(nullptr) {}
+
+ explicit Control(MBasicBlock* block) : block(block) {}
+
+ public:
+ void setBlock(MBasicBlock* newBlock) { block = newBlock; }
+};
+
+// [SMDOC] WebAssembly Exception Handling in Ion
+// =======================================================
+//
+// ## Throwing instructions
+//
+// Wasm exceptions can be thrown by either a throw instruction (local throw),
+// or by a wasm call.
+//
+// ## The "catching try control"
+//
+// We know we are in try-code if there is a surrounding ControlItem with
+// LabelKind::Try. The innermost such control is called the
+// "catching try control".
+//
+// ## Throws without a catching try control
+//
+// Such throws are implemented with an instance call that triggers the exception
+// unwinding runtime. The exception unwinding runtime will not return to the
+// function.
+//
+// ## "landing pad" and "pre-pad" blocks
+//
+// When an exception is thrown, the unwinder will search for the nearest
+// enclosing try block and redirect control flow to it. The code that executes
+// before any catch blocks is called the 'landing pad'. The 'landing pad' is
+// responsible to:
+// 1. Consume the pending exception state from
+// Instance::pendingException(Tag)
+// 2. Branch to the correct catch block, or else rethrow
+//
+// There is one landing pad for each try block. The immediate predecessors of
+// the landing pad are called 'pre-pad' blocks. There is one pre-pad block per
+// throwing instruction.
+//
+// ## Creating pre-pad blocks
+//
+// There are two possible sorts of pre-pad blocks, depending on whether we
+// are branching after a local throw instruction, or after a wasm call:
+//
+// - If we encounter a local throw, we create the exception and tag objects,
+// store them to Instance::pendingException(Tag), and then jump to the
+// landing pad.
+//
+// - If we encounter a wasm call, we construct a MWasmCallCatchable which is a
+// control instruction with either a branch to a fallthrough block or
+// to a pre-pad block.
+//
+// The pre-pad block for a wasm call is empty except for a jump to the
+// landing pad. It only exists to avoid critical edges which when split would
+// violate the invariants of MWasmCallCatchable. The pending exception state
+// is taken care of by the unwinder.
+//
+// Each pre-pad ends with a pending jump to the landing pad. The pending jumps
+// to the landing pad are tracked in `tryPadPatches`. These are called
+// "pad patches".
+//
+// ## Creating the landing pad
+//
+// When we exit try-code, we check if tryPadPatches has captured any control
+// instructions (pad patches). If not, we don't compile any catches and we mark
+// the rest as dead code.
+//
+// If there are pre-pad blocks, we join them to create a landing pad (or just
+// "pad"). The pad's last two slots are the caught exception, and the
+// exception's tag object.
+//
+// There are three different forms of try-catch/catch_all Wasm instructions,
+// which result in different form of landing pad.
+//
+// 1. A catchless try, so a Wasm instruction of the form "try ... end".
+// - In this case, we end the pad by rethrowing the caught exception.
+//
+// 2. A single catch_all after a try.
+// - If the first catch after a try is a catch_all, then there won't be
+// any more catches, but we need the exception and its tag object, in
+// case the code in a catch_all contains "rethrow" instructions.
+// - The Wasm instruction "rethrow", gets the exception and tag object to
+// rethrow from the last two slots of the landing pad which, due to
+// validation, is the l'th surrounding ControlItem.
+// - We immediately GoTo to a new block after the pad and pop both the
+// exception and tag object, as we don't need them anymore in this case.
+//
+// 3. Otherwise, there is one or more catch code blocks following.
+// - In this case, we construct the landing pad by creating a sequence
+// of compare and branch blocks that compare the pending exception tag
+// object to the tag object of the current tagged catch block. This is
+// done incrementally as we visit each tagged catch block in the bytecode
+// stream. At every step, we update the ControlItem's block to point to
+// the next block to be created in the landing pad sequence. The final
+// block will either be a rethrow, if there is no catch_all, or else a
+// jump to a catch_all block.
+
+struct IonCompilePolicy {
+ // We store SSA definitions in the value stack.
+ using Value = MDefinition*;
+ using ValueVector = DefVector;
+
+ // We store loop headers and then/else blocks in the control flow stack.
+ // In the case of try-catch control blocks, we collect additional information
+ // regarding the possible paths from throws and calls to a landing pad, as
+ // well as information on the landing pad's handlers (its catches).
+ using ControlItem = Control;
+};
+
+using IonOpIter = OpIter<IonCompilePolicy>;
+
+class FunctionCompiler;
+
+// CallCompileState describes a call that is being compiled.
+
+class CallCompileState {
+ // A generator object that is passed each argument as it is compiled.
+ WasmABIArgGenerator abi_;
+
+ // Accumulates the register arguments while compiling arguments.
+ MWasmCallBase::Args regArgs_;
+
+ // Reserved argument for passing Instance* to builtin instance method calls.
+ ABIArg instanceArg_;
+
+ // The stack area in which the callee will write stack return values, or
+ // nullptr if no stack results.
+ MWasmStackResultArea* stackResultArea_ = nullptr;
+
+ // Only FunctionCompiler should be directly manipulating CallCompileState.
+ friend class FunctionCompiler;
+};
+
+// Encapsulates the compilation of a single function in an asm.js module. The
+// function compiler handles the creation and final backend compilation of the
+// MIR graph.
+class FunctionCompiler {
+ struct ControlFlowPatch {
+ MControlInstruction* ins;
+ uint32_t index;
+ ControlFlowPatch(MControlInstruction* ins, uint32_t index)
+ : ins(ins), index(index) {}
+ };
+
+ using ControlFlowPatchVector = Vector<ControlFlowPatch, 0, SystemAllocPolicy>;
+ using ControlFlowPatchVectorVector =
+ Vector<ControlFlowPatchVector, 0, SystemAllocPolicy>;
+
+ const ModuleEnvironment& moduleEnv_;
+ IonOpIter iter_;
+ const FuncCompileInput& func_;
+ const ValTypeVector& locals_;
+ size_t lastReadCallSite_;
+
+ TempAllocator& alloc_;
+ MIRGraph& graph_;
+ const CompileInfo& info_;
+ MIRGenerator& mirGen_;
+
+ MBasicBlock* curBlock_;
+ uint32_t maxStackArgBytes_;
+
+ uint32_t loopDepth_;
+ uint32_t blockDepth_;
+ ControlFlowPatchVectorVector blockPatches_;
+
+ // Instance pointer argument to the current function.
+ MWasmParameter* instancePointer_;
+ MWasmParameter* stackResultPointer_;
+
+ // Reference to masm.tryNotes_
+ wasm::TryNoteVector& tryNotes_;
+
+ public:
+ FunctionCompiler(const ModuleEnvironment& moduleEnv, Decoder& decoder,
+ const FuncCompileInput& func, const ValTypeVector& locals,
+ MIRGenerator& mirGen, TryNoteVector& tryNotes)
+ : moduleEnv_(moduleEnv),
+ iter_(moduleEnv, decoder),
+ func_(func),
+ locals_(locals),
+ lastReadCallSite_(0),
+ alloc_(mirGen.alloc()),
+ graph_(mirGen.graph()),
+ info_(mirGen.outerInfo()),
+ mirGen_(mirGen),
+ curBlock_(nullptr),
+ maxStackArgBytes_(0),
+ loopDepth_(0),
+ blockDepth_(0),
+ instancePointer_(nullptr),
+ stackResultPointer_(nullptr),
+ tryNotes_(tryNotes) {}
+
+ const ModuleEnvironment& moduleEnv() const { return moduleEnv_; }
+
+ IonOpIter& iter() { return iter_; }
+ TempAllocator& alloc() const { return alloc_; }
+ // FIXME(1401675): Replace with BlockType.
+ uint32_t funcIndex() const { return func_.index; }
+ const FuncType& funcType() const {
+ return *moduleEnv_.funcs[func_.index].type;
+ }
+
+ BytecodeOffset bytecodeOffset() const { return iter_.bytecodeOffset(); }
+ BytecodeOffset bytecodeIfNotAsmJS() const {
+ return moduleEnv_.isAsmJS() ? BytecodeOffset() : iter_.bytecodeOffset();
+ }
+
+ [[nodiscard]] bool init() {
+ // Prepare the entry block for MIR generation:
+
+ const ArgTypeVector args(funcType());
+
+ if (!mirGen_.ensureBallast()) {
+ return false;
+ }
+ if (!newBlock(/* prev */ nullptr, &curBlock_)) {
+ return false;
+ }
+
+ for (WasmABIArgIter i(args); !i.done(); i++) {
+ MWasmParameter* ins = MWasmParameter::New(alloc(), *i, i.mirType());
+ curBlock_->add(ins);
+ if (args.isSyntheticStackResultPointerArg(i.index())) {
+ MOZ_ASSERT(stackResultPointer_ == nullptr);
+ stackResultPointer_ = ins;
+ } else {
+ curBlock_->initSlot(info().localSlot(args.naturalIndex(i.index())),
+ ins);
+ }
+ if (!mirGen_.ensureBallast()) {
+ return false;
+ }
+ }
+
+ // Set up a parameter that receives the hidden instance pointer argument.
+ instancePointer_ =
+ MWasmParameter::New(alloc(), ABIArg(InstanceReg), MIRType::Pointer);
+ curBlock_->add(instancePointer_);
+ if (!mirGen_.ensureBallast()) {
+ return false;
+ }
+
+ for (size_t i = args.lengthWithoutStackResults(); i < locals_.length();
+ i++) {
+ ValType slotValType = locals_[i];
+#ifndef ENABLE_WASM_SIMD
+ if (slotValType == ValType::V128) {
+ return iter().fail("Ion has no SIMD support yet");
+ }
+#endif
+ MDefinition* zero = constantZeroOfValType(slotValType);
+ curBlock_->initSlot(info().localSlot(i), zero);
+ if (!mirGen_.ensureBallast()) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ void finish() {
+ mirGen().initWasmMaxStackArgBytes(maxStackArgBytes_);
+
+ MOZ_ASSERT(loopDepth_ == 0);
+ MOZ_ASSERT(blockDepth_ == 0);
+#ifdef DEBUG
+ for (ControlFlowPatchVector& patches : blockPatches_) {
+ MOZ_ASSERT(patches.empty());
+ }
+#endif
+ MOZ_ASSERT(inDeadCode());
+ MOZ_ASSERT(done(), "all bytes must be consumed");
+ MOZ_ASSERT(func_.callSiteLineNums.length() == lastReadCallSite_);
+ }
+
+ /************************* Read-only interface (after local scope setup) */
+
+ MIRGenerator& mirGen() const { return mirGen_; }
+ MIRGraph& mirGraph() const { return graph_; }
+ const CompileInfo& info() const { return info_; }
+
+ MDefinition* getLocalDef(unsigned slot) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ return curBlock_->getSlot(info().localSlot(slot));
+ }
+
+ const ValTypeVector& locals() const { return locals_; }
+
+ /*********************************************************** Constants ***/
+
+ MDefinition* constantF32(float f) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* cst = MWasmFloatConstant::NewFloat32(alloc(), f);
+ curBlock_->add(cst);
+ return cst;
+ }
+ // Hide all other overloads, to guarantee no implicit argument conversion.
+ template <typename T>
+ MDefinition* constantF32(T) = delete;
+
+ MDefinition* constantF64(double d) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* cst = MWasmFloatConstant::NewDouble(alloc(), d);
+ curBlock_->add(cst);
+ return cst;
+ }
+ template <typename T>
+ MDefinition* constantF64(T) = delete;
+
+ MDefinition* constantI32(int32_t i) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ MConstant* constant =
+ MConstant::New(alloc(), Int32Value(i), MIRType::Int32);
+ curBlock_->add(constant);
+ return constant;
+ }
+ template <typename T>
+ MDefinition* constantI32(T) = delete;
+
+ MDefinition* constantI64(int64_t i) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ MConstant* constant = MConstant::NewInt64(alloc(), i);
+ curBlock_->add(constant);
+ return constant;
+ }
+ template <typename T>
+ MDefinition* constantI64(T) = delete;
+
+ // Produce an MConstant of the machine's target int type (Int32 or Int64).
+ MDefinition* constantTargetWord(intptr_t n) {
+ return targetIs64Bit() ? constantI64(int64_t(n)) : constantI32(int32_t(n));
+ }
+ template <typename T>
+ MDefinition* constantTargetWord(T) = delete;
+
+#ifdef ENABLE_WASM_SIMD
+ MDefinition* constantV128(V128 v) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ MWasmFloatConstant* constant = MWasmFloatConstant::NewSimd128(
+ alloc(), SimdConstant::CreateSimd128((int8_t*)v.bytes));
+ curBlock_->add(constant);
+ return constant;
+ }
+ template <typename T>
+ MDefinition* constantV128(T) = delete;
+#endif
+
+ MDefinition* constantNullRef() {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ // MConstant has a lot of baggage so we don't use that here.
+ MWasmNullConstant* constant = MWasmNullConstant::New(alloc());
+ curBlock_->add(constant);
+ return constant;
+ }
+
+ // Produce a zero constant for the specified ValType.
+ MDefinition* constantZeroOfValType(ValType valType) {
+ switch (valType.kind()) {
+ case ValType::I32:
+ return constantI32(0);
+ case ValType::I64:
+ return constantI64(int64_t(0));
+#ifdef ENABLE_WASM_SIMD
+ case ValType::V128:
+ return constantV128(V128(0));
+#endif
+ case ValType::F32:
+ return constantF32(0.0f);
+ case ValType::F64:
+ return constantF64(0.0);
+ case ValType::Ref:
+ return constantNullRef();
+ default:
+ MOZ_CRASH();
+ }
+ }
+
+ /***************************** Code generation (after local scope setup) */
+
+ void fence() {
+ if (inDeadCode()) {
+ return;
+ }
+ MWasmFence* ins = MWasmFence::New(alloc());
+ curBlock_->add(ins);
+ }
+
+ template <class T>
+ MDefinition* unary(MDefinition* op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ T* ins = T::New(alloc(), op);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ template <class T>
+ MDefinition* unary(MDefinition* op, MIRType type) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ T* ins = T::New(alloc(), op, type);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ template <class T>
+ MDefinition* binary(MDefinition* lhs, MDefinition* rhs) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ T* ins = T::New(alloc(), lhs, rhs);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ template <class T>
+ MDefinition* binary(MDefinition* lhs, MDefinition* rhs, MIRType type) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ T* ins = T::New(alloc(), lhs, rhs, type);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ template <class T>
+ MDefinition* binary(MDefinition* lhs, MDefinition* rhs, MIRType type,
+ MWasmBinaryBitwise::SubOpcode subOpc) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ T* ins = T::New(alloc(), lhs, rhs, type, subOpc);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* ursh(MDefinition* lhs, MDefinition* rhs, MIRType type) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MUrsh::NewWasm(alloc(), lhs, rhs, type);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* add(MDefinition* lhs, MDefinition* rhs, MIRType type) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MAdd::NewWasm(alloc(), lhs, rhs, type);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ bool mustPreserveNaN(MIRType type) {
+ return IsFloatingPointType(type) && !moduleEnv().isAsmJS();
+ }
+
+ MDefinition* sub(MDefinition* lhs, MDefinition* rhs, MIRType type) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ // wasm can't fold x - 0.0 because of NaN with custom payloads.
+ MSub* ins = MSub::NewWasm(alloc(), lhs, rhs, type, mustPreserveNaN(type));
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* nearbyInt(MDefinition* input, RoundingMode roundingMode) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ auto* ins = MNearbyInt::New(alloc(), input, input->type(), roundingMode);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* minMax(MDefinition* lhs, MDefinition* rhs, MIRType type,
+ bool isMax) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ if (mustPreserveNaN(type)) {
+ // Convert signaling NaN to quiet NaNs.
+ MDefinition* zero = constantZeroOfValType(ValType::fromMIRType(type));
+ lhs = sub(lhs, zero, type);
+ rhs = sub(rhs, zero, type);
+ }
+
+ MMinMax* ins = MMinMax::NewWasm(alloc(), lhs, rhs, type, isMax);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* mul(MDefinition* lhs, MDefinition* rhs, MIRType type,
+ MMul::Mode mode) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ // wasm can't fold x * 1.0 because of NaN with custom payloads.
+ auto* ins =
+ MMul::NewWasm(alloc(), lhs, rhs, type, mode, mustPreserveNaN(type));
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* div(MDefinition* lhs, MDefinition* rhs, MIRType type,
+ bool unsignd) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ bool trapOnError = !moduleEnv().isAsmJS();
+ if (!unsignd && type == MIRType::Int32) {
+ // Enforce the signedness of the operation by coercing the operands
+ // to signed. Otherwise, operands that "look" unsigned to Ion but
+ // are not unsigned to Baldr (eg, unsigned right shifts) may lead to
+ // the operation being executed unsigned. Applies to mod() as well.
+ //
+ // Do this for Int32 only since Int64 is not subject to the same
+ // issues.
+ //
+ // Note the offsets passed to MWasmBuiltinTruncateToInt32 are wrong here,
+ // but it doesn't matter: they're not codegen'd to calls since inputs
+ // already are int32.
+ auto* lhs2 = createTruncateToInt32(lhs);
+ curBlock_->add(lhs2);
+ lhs = lhs2;
+ auto* rhs2 = createTruncateToInt32(rhs);
+ curBlock_->add(rhs2);
+ rhs = rhs2;
+ }
+
+ // For x86 and arm we implement i64 div via c++ builtin.
+ // A call to c++ builtin requires instance pointer.
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+ if (type == MIRType::Int64) {
+ auto* ins =
+ MWasmBuiltinDivI64::New(alloc(), lhs, rhs, instancePointer_, unsignd,
+ trapOnError, bytecodeOffset());
+ curBlock_->add(ins);
+ return ins;
+ }
+#endif
+
+ auto* ins = MDiv::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
+ bytecodeOffset(), mustPreserveNaN(type));
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MInstruction* createTruncateToInt32(MDefinition* op) {
+ if (op->type() == MIRType::Double || op->type() == MIRType::Float32) {
+ return MWasmBuiltinTruncateToInt32::New(alloc(), op, instancePointer_);
+ }
+
+ return MTruncateToInt32::New(alloc(), op);
+ }
+
+ MDefinition* mod(MDefinition* lhs, MDefinition* rhs, MIRType type,
+ bool unsignd) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ bool trapOnError = !moduleEnv().isAsmJS();
+ if (!unsignd && type == MIRType::Int32) {
+ // See block comment in div().
+ auto* lhs2 = createTruncateToInt32(lhs);
+ curBlock_->add(lhs2);
+ lhs = lhs2;
+ auto* rhs2 = createTruncateToInt32(rhs);
+ curBlock_->add(rhs2);
+ rhs = rhs2;
+ }
+
+ // For x86 and arm we implement i64 mod via c++ builtin.
+ // A call to c++ builtin requires instance pointer.
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+ if (type == MIRType::Int64) {
+ auto* ins =
+ MWasmBuiltinModI64::New(alloc(), lhs, rhs, instancePointer_, unsignd,
+ trapOnError, bytecodeOffset());
+ curBlock_->add(ins);
+ return ins;
+ }
+#endif
+
+ // Should be handled separately because we call BuiltinThunk for this case
+ // and so, need to add the dependency from instancePointer.
+ if (type == MIRType::Double) {
+ auto* ins = MWasmBuiltinModD::New(alloc(), lhs, rhs, instancePointer_,
+ type, bytecodeOffset());
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ auto* ins = MMod::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
+ bytecodeOffset());
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* bitnot(MDefinition* op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MBitNot::New(alloc(), op);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* select(MDefinition* trueExpr, MDefinition* falseExpr,
+ MDefinition* condExpr) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MWasmSelect::New(alloc(), trueExpr, falseExpr, condExpr);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* extendI32(MDefinition* op, bool isUnsigned) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MExtendInt32ToInt64::New(alloc(), op, isUnsigned);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* signExtend(MDefinition* op, uint32_t srcSize,
+ uint32_t targetSize) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ MInstruction* ins;
+ switch (targetSize) {
+ case 4: {
+ MSignExtendInt32::Mode mode;
+ switch (srcSize) {
+ case 1:
+ mode = MSignExtendInt32::Byte;
+ break;
+ case 2:
+ mode = MSignExtendInt32::Half;
+ break;
+ default:
+ MOZ_CRASH("Bad sign extension");
+ }
+ ins = MSignExtendInt32::New(alloc(), op, mode);
+ break;
+ }
+ case 8: {
+ MSignExtendInt64::Mode mode;
+ switch (srcSize) {
+ case 1:
+ mode = MSignExtendInt64::Byte;
+ break;
+ case 2:
+ mode = MSignExtendInt64::Half;
+ break;
+ case 4:
+ mode = MSignExtendInt64::Word;
+ break;
+ default:
+ MOZ_CRASH("Bad sign extension");
+ }
+ ins = MSignExtendInt64::New(alloc(), op, mode);
+ break;
+ }
+ default: {
+ MOZ_CRASH("Bad sign extension");
+ }
+ }
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* convertI64ToFloatingPoint(MDefinition* op, MIRType type,
+ bool isUnsigned) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+#if defined(JS_CODEGEN_ARM)
+ auto* ins = MBuiltinInt64ToFloatingPoint::New(
+ alloc(), op, instancePointer_, type, bytecodeOffset(), isUnsigned);
+#else
+ auto* ins = MInt64ToFloatingPoint::New(alloc(), op, type, bytecodeOffset(),
+ isUnsigned);
+#endif
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* rotate(MDefinition* input, MDefinition* count, MIRType type,
+ bool left) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MRotate::New(alloc(), input, count, type, left);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ template <class T>
+ MDefinition* truncate(MDefinition* op, TruncFlags flags) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = T::New(alloc(), op, flags, bytecodeOffset());
+ curBlock_->add(ins);
+ return ins;
+ }
+
+#if defined(JS_CODEGEN_ARM)
+ MDefinition* truncateWithInstance(MDefinition* op, TruncFlags flags) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MWasmBuiltinTruncateToInt64::New(alloc(), op, instancePointer_,
+ flags, bytecodeOffset());
+ curBlock_->add(ins);
+ return ins;
+ }
+#endif
+
+ MDefinition* compare(MDefinition* lhs, MDefinition* rhs, JSOp op,
+ MCompare::CompareType type) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MCompare::NewWasm(alloc(), lhs, rhs, op, type);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ void assign(unsigned slot, MDefinition* def) {
+ if (inDeadCode()) {
+ return;
+ }
+ curBlock_->setSlot(info().localSlot(slot), def);
+ }
+
+ MDefinition* compareIsNull(MDefinition* value, JSOp compareOp) {
+ MDefinition* nullVal = constantNullRef();
+ if (!nullVal) {
+ return nullptr;
+ }
+ return compare(value, nullVal, compareOp, MCompare::Compare_RefOrNull);
+ }
+
+ [[nodiscard]] bool refAsNonNull(MDefinition* value) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ auto* ins = MWasmTrapIfNull::New(
+ alloc(), value, wasm::Trap::NullPointerDereference, bytecodeOffset());
+
+ curBlock_->add(ins);
+ return true;
+ }
+
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ [[nodiscard]] bool brOnNull(uint32_t relativeDepth, const DefVector& values,
+ const ResultType& type, MDefinition* condition) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ MBasicBlock* fallthroughBlock = nullptr;
+ if (!newBlock(curBlock_, &fallthroughBlock)) {
+ return false;
+ }
+
+ MDefinition* check = compareIsNull(condition, JSOp::Eq);
+ if (!check) {
+ return false;
+ }
+ MTest* test = MTest::New(alloc(), check, nullptr, fallthroughBlock);
+ if (!test ||
+ !addControlFlowPatch(test, relativeDepth, MTest::TrueBranchIndex)) {
+ return false;
+ }
+
+ if (!pushDefs(values)) {
+ return false;
+ }
+
+ curBlock_->end(test);
+ curBlock_ = fallthroughBlock;
+ return true;
+ }
+
+ [[nodiscard]] bool brOnNonNull(uint32_t relativeDepth,
+ const DefVector& values,
+ const ResultType& type,
+ MDefinition* condition) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ MBasicBlock* fallthroughBlock = nullptr;
+ if (!newBlock(curBlock_, &fallthroughBlock)) {
+ return false;
+ }
+
+ MDefinition* check = compareIsNull(condition, JSOp::Ne);
+ if (!check) {
+ return false;
+ }
+ MTest* test = MTest::New(alloc(), check, nullptr, fallthroughBlock);
+ if (!test ||
+ !addControlFlowPatch(test, relativeDepth, MTest::TrueBranchIndex)) {
+ return false;
+ }
+
+ if (!pushDefs(values)) {
+ return false;
+ }
+
+ curBlock_->end(test);
+ curBlock_ = fallthroughBlock;
+ return true;
+ }
+
+#endif // ENABLE_WASM_FUNCTION_REFERENCES
+
+#ifdef ENABLE_WASM_SIMD
+ // About Wasm SIMD as supported by Ion:
+ //
+ // The expectation is that Ion will only ever support SIMD on x86 and x64,
+ // since ARMv7 will cease to be a tier-1 platform soon, and MIPS64 will never
+ // implement SIMD.
+ //
+ // The division of the operations into MIR nodes reflects that expectation,
+ // and is a good fit for x86/x64. Should the expectation change we'll
+ // possibly want to re-architect the SIMD support to be a little more general.
+ //
+ // Most SIMD operations map directly to a single MIR node that ultimately ends
+ // up being expanded in the macroassembler.
+ //
+ // Some SIMD operations that do have a complete macroassembler expansion are
+ // open-coded into multiple MIR nodes here; in some cases that's just
+ // convenience, in other cases it may also allow them to benefit from Ion
+ // optimizations. The reason for the expansions will be documented by a
+ // comment.
+
+ // (v128,v128) -> v128 effect-free binary operations
+ MDefinition* binarySimd128(MDefinition* lhs, MDefinition* rhs,
+ bool commutative, SimdOp op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(lhs->type() == MIRType::Simd128 &&
+ rhs->type() == MIRType::Simd128);
+
+ auto* ins = MWasmBinarySimd128::New(alloc(), lhs, rhs, commutative, op);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ // (v128,i32) -> v128 effect-free shift operations
+ MDefinition* shiftSimd128(MDefinition* lhs, MDefinition* rhs, SimdOp op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(lhs->type() == MIRType::Simd128 &&
+ rhs->type() == MIRType::Int32);
+
+ int32_t maskBits;
+ if (MacroAssembler::MustMaskShiftCountSimd128(op, &maskBits)) {
+ MDefinition* mask = constantI32(maskBits);
+ auto* rhs2 = MBitAnd::New(alloc(), rhs, mask, MIRType::Int32);
+ curBlock_->add(rhs2);
+ rhs = rhs2;
+ }
+
+ auto* ins = MWasmShiftSimd128::New(alloc(), lhs, rhs, op);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ // (v128,scalar,imm) -> v128
+ MDefinition* replaceLaneSimd128(MDefinition* lhs, MDefinition* rhs,
+ uint32_t laneIndex, SimdOp op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(lhs->type() == MIRType::Simd128);
+
+ auto* ins = MWasmReplaceLaneSimd128::New(alloc(), lhs, rhs, laneIndex, op);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ // (scalar) -> v128 effect-free unary operations
+ MDefinition* scalarToSimd128(MDefinition* src, SimdOp op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ auto* ins = MWasmScalarToSimd128::New(alloc(), src, op);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ // (v128) -> v128 effect-free unary operations
+ MDefinition* unarySimd128(MDefinition* src, SimdOp op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(src->type() == MIRType::Simd128);
+ auto* ins = MWasmUnarySimd128::New(alloc(), src, op);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ // (v128, imm) -> scalar effect-free unary operations
+ MDefinition* reduceSimd128(MDefinition* src, SimdOp op, ValType outType,
+ uint32_t imm = 0) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(src->type() == MIRType::Simd128);
+ auto* ins =
+ MWasmReduceSimd128::New(alloc(), src, op, outType.toMIRType(), imm);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ // (v128, v128, v128) -> v128 effect-free operations
+ MDefinition* ternarySimd128(MDefinition* v0, MDefinition* v1, MDefinition* v2,
+ SimdOp op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(v0->type() == MIRType::Simd128 &&
+ v1->type() == MIRType::Simd128 &&
+ v2->type() == MIRType::Simd128);
+
+ auto* ins = MWasmTernarySimd128::New(alloc(), v0, v1, v2, op);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ // (v128, v128, imm_v128) -> v128 effect-free operations
+ MDefinition* shuffleSimd128(MDefinition* v1, MDefinition* v2, V128 control) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(v1->type() == MIRType::Simd128);
+ MOZ_ASSERT(v2->type() == MIRType::Simd128);
+ auto* ins = BuildWasmShuffleSimd128(
+ alloc(), reinterpret_cast<int8_t*>(control.bytes), v1, v2);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ // Also see below for SIMD memory references
+
+#endif // ENABLE_WASM_SIMD
+
+ /************************************************ Linear memory accesses */
+
+ // For detailed information about memory accesses, see "Linear memory
+ // addresses and bounds checking" in WasmMemory.cpp.
+
+ private:
+ // If the platform does not have a HeapReg, load the memory base from
+ // instance.
+ MWasmLoadInstance* maybeLoadMemoryBase() {
+ MWasmLoadInstance* load = nullptr;
+#ifdef JS_CODEGEN_X86
+ AliasSet aliases = !moduleEnv_.memory->canMovingGrow()
+ ? AliasSet::None()
+ : AliasSet::Load(AliasSet::WasmHeapMeta);
+ load = MWasmLoadInstance::New(alloc(), instancePointer_,
+ wasm::Instance::offsetOfMemoryBase(),
+ MIRType::Pointer, aliases);
+ curBlock_->add(load);
+#endif
+ return load;
+ }
+
+ public:
+ // A value holding the memory base, whether that's HeapReg or some other
+ // register.
+ MWasmHeapBase* memoryBase() {
+ MWasmHeapBase* base = nullptr;
+ AliasSet aliases = !moduleEnv_.memory->canMovingGrow()
+ ? AliasSet::None()
+ : AliasSet::Load(AliasSet::WasmHeapMeta);
+ base = MWasmHeapBase::New(alloc(), instancePointer_, aliases);
+ curBlock_->add(base);
+ return base;
+ }
+
+ private:
+ // If the bounds checking strategy requires it, load the bounds check limit
+ // from the instance.
+ MWasmLoadInstance* maybeLoadBoundsCheckLimit(MIRType type) {
+ MOZ_ASSERT(type == MIRType::Int32 || type == MIRType::Int64);
+ if (moduleEnv_.hugeMemoryEnabled()) {
+ return nullptr;
+ }
+ AliasSet aliases = !moduleEnv_.memory->canMovingGrow()
+ ? AliasSet::None()
+ : AliasSet::Load(AliasSet::WasmHeapMeta);
+ auto* load = MWasmLoadInstance::New(
+ alloc(), instancePointer_, wasm::Instance::offsetOfBoundsCheckLimit(),
+ type, aliases);
+ curBlock_->add(load);
+ return load;
+ }
+
+ // Return true if the access requires an alignment check. If so, sets
+ // *mustAdd to true if the offset must be added to the pointer before
+ // checking.
+ bool needAlignmentCheck(MemoryAccessDesc* access, MDefinition* base,
+ bool* mustAdd) {
+ MOZ_ASSERT(!*mustAdd);
+
+ // asm.js accesses are always aligned and need no checks.
+ if (moduleEnv_.isAsmJS() || !access->isAtomic()) {
+ return false;
+ }
+
+ // If the EA is known and aligned it will need no checks.
+ if (base->isConstant()) {
+ // We only care about the low bits, so overflow is OK, as is chopping off
+ // the high bits of an i64 pointer.
+ uint32_t ptr = 0;
+ if (isMem64()) {
+ ptr = uint32_t(base->toConstant()->toInt64());
+ } else {
+ ptr = base->toConstant()->toInt32();
+ }
+ if (((ptr + access->offset64()) & (access->byteSize() - 1)) == 0) {
+ return false;
+ }
+ }
+
+ // If the offset is aligned then the EA is just the pointer, for
+ // the purposes of this check.
+ *mustAdd = (access->offset64() & (access->byteSize() - 1)) != 0;
+ return true;
+ }
+
+ // Fold a constant base into the offset and make the base 0, provided the
+ // offset stays below the guard limit. The reason for folding the base into
+ // the offset rather than vice versa is that a small offset can be ignored
+ // by both explicit bounds checking and bounds check elimination.
+ void foldConstantPointer(MemoryAccessDesc* access, MDefinition** base) {
+ uint32_t offsetGuardLimit =
+ GetMaxOffsetGuardLimit(moduleEnv_.hugeMemoryEnabled());
+
+ if ((*base)->isConstant()) {
+ uint64_t basePtr = 0;
+ if (isMem64()) {
+ basePtr = uint64_t((*base)->toConstant()->toInt64());
+ } else {
+ basePtr = uint64_t(int64_t((*base)->toConstant()->toInt32()));
+ }
+
+ uint64_t offset = access->offset64();
+
+ if (offset < offsetGuardLimit && basePtr < offsetGuardLimit - offset) {
+ offset += uint32_t(basePtr);
+ access->setOffset32(uint32_t(offset));
+ *base = isMem64() ? constantI64(int64_t(0)) : constantI32(0);
+ }
+ }
+ }
+
+ // If the offset must be added because it is large or because the true EA must
+ // be checked, compute the effective address, trapping on overflow.
+ void maybeComputeEffectiveAddress(MemoryAccessDesc* access,
+ MDefinition** base, bool mustAddOffset) {
+ uint32_t offsetGuardLimit =
+ GetMaxOffsetGuardLimit(moduleEnv_.hugeMemoryEnabled());
+
+ if (access->offset64() >= offsetGuardLimit ||
+ access->offset64() > UINT32_MAX || mustAddOffset ||
+ !JitOptions.wasmFoldOffsets) {
+ *base = computeEffectiveAddress(*base, access);
+ }
+ }
+
+ MWasmLoadInstance* needBoundsCheck() {
+#ifdef JS_64BIT
+ // For 32-bit base pointers:
+ //
+ // If the bounds check uses the full 64 bits of the bounds check limit, then
+ // the base pointer must be zero-extended to 64 bits before checking and
+ // wrapped back to 32-bits after Spectre masking. (And it's important that
+ // the value we end up with has flowed through the Spectre mask.)
+ //
+ // If the memory's max size is known to be smaller than 64K pages exactly,
+ // we can use a 32-bit check and avoid extension and wrapping.
+ static_assert(0x100000000 % PageSize == 0);
+ bool mem32LimitIs64Bits = isMem32() &&
+ !moduleEnv_.memory->boundsCheckLimitIs32Bits() &&
+ MaxMemoryPages(moduleEnv_.memory->indexType()) >=
+ Pages(0x100000000 / PageSize);
+#else
+ // On 32-bit platforms we have no more than 2GB memory and the limit for a
+ // 32-bit base pointer is never a 64-bit value.
+ bool mem32LimitIs64Bits = false;
+#endif
+ return maybeLoadBoundsCheckLimit(
+ mem32LimitIs64Bits || isMem64() ? MIRType::Int64 : MIRType::Int32);
+ }
+
+ void performBoundsCheck(MDefinition** base,
+ MWasmLoadInstance* boundsCheckLimit) {
+ // At the outset, actualBase could be the result of pretty much any integer
+ // operation, or it could be the load of an integer constant. If its type
+ // is i32, we may assume the value has a canonical representation for the
+ // platform, see doc block in MacroAssembler.h.
+ MDefinition* actualBase = *base;
+
+ // Extend an i32 index value to perform a 64-bit bounds check if the memory
+ // can be 4GB or larger.
+ bool extendAndWrapIndex =
+ isMem32() && boundsCheckLimit->type() == MIRType::Int64;
+ if (extendAndWrapIndex) {
+ auto* extended = MWasmExtendU32Index::New(alloc(), actualBase);
+ curBlock_->add(extended);
+ actualBase = extended;
+ }
+
+ auto* ins =
+ MWasmBoundsCheck::New(alloc(), actualBase, boundsCheckLimit,
+ bytecodeOffset(), MWasmBoundsCheck::Memory0);
+ curBlock_->add(ins);
+ actualBase = ins;
+
+ // If we're masking, then we update *base to create a dependency chain
+ // through the masked index. But we will first need to wrap the index
+ // value if it was extended above.
+ if (JitOptions.spectreIndexMasking) {
+ if (extendAndWrapIndex) {
+ auto* wrapped = MWasmWrapU32Index::New(alloc(), actualBase);
+ curBlock_->add(wrapped);
+ actualBase = wrapped;
+ }
+ *base = actualBase;
+ }
+ }
+
+ // Perform all necessary checking before a wasm heap access, based on the
+ // attributes of the access and base pointer.
+ //
+ // For 64-bit indices on platforms that are limited to indices that fit into
+ // 32 bits (all 32-bit platforms and mips64), this returns a bounds-checked
+ // `base` that has type Int32. Lowering code depends on this and will assert
+ // that the base has this type. See the end of this function.
+
+ void checkOffsetAndAlignmentAndBounds(MemoryAccessDesc* access,
+ MDefinition** base) {
+ MOZ_ASSERT(!inDeadCode());
+ MOZ_ASSERT(!moduleEnv_.isAsmJS());
+
+ // Attempt to fold an offset into a constant base pointer so as to simplify
+ // the addressing expression. This may update *base.
+ foldConstantPointer(access, base);
+
+ // Determine whether an alignment check is needed and whether the offset
+ // must be checked too.
+ bool mustAddOffsetForAlignmentCheck = false;
+ bool alignmentCheck =
+ needAlignmentCheck(access, *base, &mustAddOffsetForAlignmentCheck);
+
+ // If bounds checking or alignment checking requires it, compute the
+ // effective address: add the offset into the pointer and trap on overflow.
+ // This may update *base.
+ maybeComputeEffectiveAddress(access, base, mustAddOffsetForAlignmentCheck);
+
+ // Emit the alignment check if necessary; it traps if it fails.
+ if (alignmentCheck) {
+ curBlock_->add(MWasmAlignmentCheck::New(
+ alloc(), *base, access->byteSize(), bytecodeOffset()));
+ }
+
+ // Emit the bounds check if necessary; it traps if it fails. This may
+ // update *base.
+ MWasmLoadInstance* boundsCheckLimit = needBoundsCheck();
+ if (boundsCheckLimit) {
+ performBoundsCheck(base, boundsCheckLimit);
+ }
+
+#ifndef JS_64BIT
+ if (isMem64()) {
+ // We must have had an explicit bounds check (or one was elided if it was
+ // proved redundant), and on 32-bit systems the index will for sure fit in
+ // 32 bits: the max memory is 2GB. So chop the index down to 32-bit to
+ // simplify the back-end.
+ MOZ_ASSERT((*base)->type() == MIRType::Int64);
+ MOZ_ASSERT(!moduleEnv_.hugeMemoryEnabled());
+ auto* chopped = MWasmWrapU32Index::New(alloc(), *base);
+ MOZ_ASSERT(chopped->type() == MIRType::Int32);
+ curBlock_->add(chopped);
+ *base = chopped;
+ }
+#endif
+ }
+
+ bool isSmallerAccessForI64(ValType result, const MemoryAccessDesc* access) {
+ if (result == ValType::I64 && access->byteSize() <= 4) {
+ // These smaller accesses should all be zero-extending.
+ MOZ_ASSERT(!isSignedIntType(access->type()));
+ return true;
+ }
+ return false;
+ }
+
+ public:
+ bool isMem32() { return moduleEnv_.memory->indexType() == IndexType::I32; }
+ bool isMem64() { return moduleEnv_.memory->indexType() == IndexType::I64; }
+
+ // Sometimes, we need to determine the memory type before the opcode reader
+ // that will reject a memory opcode in the presence of no-memory gets a chance
+ // to do so. This predicate is safe.
+ bool isNoMemOrMem32() {
+ return !moduleEnv_.usesMemory() ||
+ moduleEnv_.memory->indexType() == IndexType::I32;
+ }
+
+ // Add the offset into the pointer to yield the EA; trap on overflow.
+ MDefinition* computeEffectiveAddress(MDefinition* base,
+ MemoryAccessDesc* access) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ uint64_t offset = access->offset64();
+ if (offset == 0) {
+ return base;
+ }
+ auto* ins = MWasmAddOffset::New(alloc(), base, offset, bytecodeOffset());
+ curBlock_->add(ins);
+ access->clearOffset();
+ return ins;
+ }
+
+ MDefinition* load(MDefinition* base, MemoryAccessDesc* access,
+ ValType result) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MWasmLoadInstance* memoryBase = maybeLoadMemoryBase();
+ MInstruction* load = nullptr;
+ if (moduleEnv_.isAsmJS()) {
+ MOZ_ASSERT(access->offset64() == 0);
+ MWasmLoadInstance* boundsCheckLimit =
+ maybeLoadBoundsCheckLimit(MIRType::Int32);
+ load = MAsmJSLoadHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
+ access->type());
+ } else {
+ checkOffsetAndAlignmentAndBounds(access, &base);
+#ifndef JS_64BIT
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+#endif
+ load = MWasmLoad::New(alloc(), memoryBase, base, *access,
+ result.toMIRType());
+ }
+ if (!load) {
+ return nullptr;
+ }
+ curBlock_->add(load);
+ return load;
+ }
+
+ void store(MDefinition* base, MemoryAccessDesc* access, MDefinition* v) {
+ if (inDeadCode()) {
+ return;
+ }
+
+ MWasmLoadInstance* memoryBase = maybeLoadMemoryBase();
+ MInstruction* store = nullptr;
+ if (moduleEnv_.isAsmJS()) {
+ MOZ_ASSERT(access->offset64() == 0);
+ MWasmLoadInstance* boundsCheckLimit =
+ maybeLoadBoundsCheckLimit(MIRType::Int32);
+ store = MAsmJSStoreHeap::New(alloc(), memoryBase, base, boundsCheckLimit,
+ access->type(), v);
+ } else {
+ checkOffsetAndAlignmentAndBounds(access, &base);
+#ifndef JS_64BIT
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+#endif
+ store = MWasmStore::New(alloc(), memoryBase, base, *access, v);
+ }
+ if (!store) {
+ return;
+ }
+ curBlock_->add(store);
+ }
+
+ MDefinition* atomicCompareExchangeHeap(MDefinition* base,
+ MemoryAccessDesc* access,
+ ValType result, MDefinition* oldv,
+ MDefinition* newv) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ checkOffsetAndAlignmentAndBounds(access, &base);
+#ifndef JS_64BIT
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+#endif
+
+ if (isSmallerAccessForI64(result, access)) {
+ auto* cvtOldv =
+ MWrapInt64ToInt32::New(alloc(), oldv, /*bottomHalf=*/true);
+ curBlock_->add(cvtOldv);
+ oldv = cvtOldv;
+
+ auto* cvtNewv =
+ MWrapInt64ToInt32::New(alloc(), newv, /*bottomHalf=*/true);
+ curBlock_->add(cvtNewv);
+ newv = cvtNewv;
+ }
+
+ MWasmLoadInstance* memoryBase = maybeLoadMemoryBase();
+ MInstruction* cas = MWasmCompareExchangeHeap::New(
+ alloc(), bytecodeOffset(), memoryBase, base, *access, oldv, newv,
+ instancePointer_);
+ if (!cas) {
+ return nullptr;
+ }
+ curBlock_->add(cas);
+
+ if (isSmallerAccessForI64(result, access)) {
+ cas = MExtendInt32ToInt64::New(alloc(), cas, true);
+ curBlock_->add(cas);
+ }
+
+ return cas;
+ }
+
+ MDefinition* atomicExchangeHeap(MDefinition* base, MemoryAccessDesc* access,
+ ValType result, MDefinition* value) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ checkOffsetAndAlignmentAndBounds(access, &base);
+#ifndef JS_64BIT
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+#endif
+
+ if (isSmallerAccessForI64(result, access)) {
+ auto* cvtValue =
+ MWrapInt64ToInt32::New(alloc(), value, /*bottomHalf=*/true);
+ curBlock_->add(cvtValue);
+ value = cvtValue;
+ }
+
+ MWasmLoadInstance* memoryBase = maybeLoadMemoryBase();
+ MInstruction* xchg =
+ MWasmAtomicExchangeHeap::New(alloc(), bytecodeOffset(), memoryBase,
+ base, *access, value, instancePointer_);
+ if (!xchg) {
+ return nullptr;
+ }
+ curBlock_->add(xchg);
+
+ if (isSmallerAccessForI64(result, access)) {
+ xchg = MExtendInt32ToInt64::New(alloc(), xchg, true);
+ curBlock_->add(xchg);
+ }
+
+ return xchg;
+ }
+
+ MDefinition* atomicBinopHeap(AtomicOp op, MDefinition* base,
+ MemoryAccessDesc* access, ValType result,
+ MDefinition* value) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ checkOffsetAndAlignmentAndBounds(access, &base);
+#ifndef JS_64BIT
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+#endif
+
+ if (isSmallerAccessForI64(result, access)) {
+ auto* cvtValue =
+ MWrapInt64ToInt32::New(alloc(), value, /*bottomHalf=*/true);
+ curBlock_->add(cvtValue);
+ value = cvtValue;
+ }
+
+ MWasmLoadInstance* memoryBase = maybeLoadMemoryBase();
+ MInstruction* binop =
+ MWasmAtomicBinopHeap::New(alloc(), bytecodeOffset(), op, memoryBase,
+ base, *access, value, instancePointer_);
+ if (!binop) {
+ return nullptr;
+ }
+ curBlock_->add(binop);
+
+ if (isSmallerAccessForI64(result, access)) {
+ binop = MExtendInt32ToInt64::New(alloc(), binop, true);
+ curBlock_->add(binop);
+ }
+
+ return binop;
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ MDefinition* loadSplatSimd128(Scalar::Type viewType,
+ const LinearMemoryAddress<MDefinition*>& addr,
+ wasm::SimdOp splatOp) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset,
+ bytecodeIfNotAsmJS());
+
+ // Generate better code (on x86)
+ // If AVX2 is enabled, more broadcast operators are available.
+ if (viewType == Scalar::Float64
+# if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+ || (js::jit::CPUInfo::IsAVX2Present() &&
+ (viewType == Scalar::Uint8 || viewType == Scalar::Uint16 ||
+ viewType == Scalar::Float32))
+# endif
+ ) {
+ access.setSplatSimd128Load();
+ return load(addr.base, &access, ValType::V128);
+ }
+
+ ValType resultType = ValType::I32;
+ if (viewType == Scalar::Float32) {
+ resultType = ValType::F32;
+ splatOp = wasm::SimdOp::F32x4Splat;
+ }
+ auto* scalar = load(addr.base, &access, resultType);
+ if (!inDeadCode() && !scalar) {
+ return nullptr;
+ }
+ return scalarToSimd128(scalar, splatOp);
+ }
+
+ MDefinition* loadExtendSimd128(const LinearMemoryAddress<MDefinition*>& addr,
+ wasm::SimdOp op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ // Generate better code (on x86) by loading as a double with an
+ // operation that sign extends directly.
+ MemoryAccessDesc access(Scalar::Float64, addr.align, addr.offset,
+ bytecodeIfNotAsmJS());
+ access.setWidenSimd128Load(op);
+ return load(addr.base, &access, ValType::V128);
+ }
+
+ MDefinition* loadZeroSimd128(Scalar::Type viewType, size_t numBytes,
+ const LinearMemoryAddress<MDefinition*>& addr) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset,
+ bytecodeIfNotAsmJS());
+ access.setZeroExtendSimd128Load();
+ return load(addr.base, &access, ValType::V128);
+ }
+
+ MDefinition* loadLaneSimd128(uint32_t laneSize,
+ const LinearMemoryAddress<MDefinition*>& addr,
+ uint32_t laneIndex, MDefinition* src) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MemoryAccessDesc access(Scalar::Simd128, addr.align, addr.offset,
+ bytecodeIfNotAsmJS());
+ MWasmLoadInstance* memoryBase = maybeLoadMemoryBase();
+ MDefinition* base = addr.base;
+ MOZ_ASSERT(!moduleEnv_.isAsmJS());
+ checkOffsetAndAlignmentAndBounds(&access, &base);
+# ifndef JS_64BIT
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+# endif
+ MInstruction* load = MWasmLoadLaneSimd128::New(
+ alloc(), memoryBase, base, access, laneSize, laneIndex, src);
+ if (!load) {
+ return nullptr;
+ }
+ curBlock_->add(load);
+ return load;
+ }
+
+ void storeLaneSimd128(uint32_t laneSize,
+ const LinearMemoryAddress<MDefinition*>& addr,
+ uint32_t laneIndex, MDefinition* src) {
+ if (inDeadCode()) {
+ return;
+ }
+ MemoryAccessDesc access(Scalar::Simd128, addr.align, addr.offset,
+ bytecodeIfNotAsmJS());
+ MWasmLoadInstance* memoryBase = maybeLoadMemoryBase();
+ MDefinition* base = addr.base;
+ MOZ_ASSERT(!moduleEnv_.isAsmJS());
+ checkOffsetAndAlignmentAndBounds(&access, &base);
+# ifndef JS_64BIT
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+# endif
+ MInstruction* store = MWasmStoreLaneSimd128::New(
+ alloc(), memoryBase, base, access, laneSize, laneIndex, src);
+ if (!store) {
+ return;
+ }
+ curBlock_->add(store);
+ }
+#endif // ENABLE_WASM_SIMD
+
+ /************************************************ Global variable accesses */
+
+ MDefinition* loadGlobalVar(unsigned instanceDataOffset, bool isConst,
+ bool isIndirect, MIRType type) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MInstruction* load;
+ if (isIndirect) {
+ // Pull a pointer to the value out of Instance::globalArea, then
+ // load from that pointer. Note that the pointer is immutable
+ // even though the value it points at may change, hence the use of
+ // |true| for the first node's |isConst| value, irrespective of
+ // the |isConst| formal parameter to this method. The latter
+ // applies to the denoted value as a whole.
+ auto* cellPtr = MWasmLoadInstanceDataField::New(
+ alloc(), MIRType::Pointer, instanceDataOffset,
+ /*isConst=*/true, instancePointer_);
+ curBlock_->add(cellPtr);
+ load = MWasmLoadGlobalCell::New(alloc(), type, cellPtr);
+ } else {
+ // Pull the value directly out of Instance::globalArea.
+ load = MWasmLoadInstanceDataField::New(alloc(), type, instanceDataOffset,
+ isConst, instancePointer_);
+ }
+ curBlock_->add(load);
+ return load;
+ }
+
+ [[nodiscard]] bool storeGlobalVar(uint32_t lineOrBytecode,
+ uint32_t instanceDataOffset,
+ bool isIndirect, MDefinition* v) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ if (isIndirect) {
+ // Pull a pointer to the value out of Instance::globalArea, then
+ // store through that pointer.
+ auto* valueAddr = MWasmLoadInstanceDataField::New(
+ alloc(), MIRType::Pointer, instanceDataOffset,
+ /*isConst=*/true, instancePointer_);
+ curBlock_->add(valueAddr);
+
+ // Handle a store to a ref-typed field specially
+ if (v->type() == MIRType::RefOrNull) {
+ // Load the previous value for the post-write barrier
+ auto* prevValue =
+ MWasmLoadGlobalCell::New(alloc(), MIRType::RefOrNull, valueAddr);
+ curBlock_->add(prevValue);
+
+ // Store the new value
+ auto* store =
+ MWasmStoreRef::New(alloc(), instancePointer_, valueAddr,
+ /*valueOffset=*/0, v, AliasSet::WasmGlobalCell,
+ WasmPreBarrierKind::Normal);
+ curBlock_->add(store);
+
+ // Call the post-write barrier
+ return postBarrierPrecise(lineOrBytecode, valueAddr, prevValue);
+ }
+
+ auto* store = MWasmStoreGlobalCell::New(alloc(), v, valueAddr);
+ curBlock_->add(store);
+ return true;
+ }
+ // Or else store the value directly in Instance::globalArea.
+
+ // Handle a store to a ref-typed field specially
+ if (v->type() == MIRType::RefOrNull) {
+ // Compute the address of the ref-typed global
+ auto* valueAddr = MWasmDerivedPointer::New(
+ alloc(), instancePointer_,
+ wasm::Instance::offsetInData(instanceDataOffset));
+ curBlock_->add(valueAddr);
+
+ // Load the previous value for the post-write barrier
+ auto* prevValue =
+ MWasmLoadGlobalCell::New(alloc(), MIRType::RefOrNull, valueAddr);
+ curBlock_->add(prevValue);
+
+ // Store the new value
+ auto* store =
+ MWasmStoreRef::New(alloc(), instancePointer_, valueAddr,
+ /*valueOffset=*/0, v, AliasSet::WasmInstanceData,
+ WasmPreBarrierKind::Normal);
+ curBlock_->add(store);
+
+ // Call the post-write barrier
+ return postBarrierPrecise(lineOrBytecode, valueAddr, prevValue);
+ }
+
+ auto* store = MWasmStoreInstanceDataField::New(alloc(), instanceDataOffset,
+ v, instancePointer_);
+ curBlock_->add(store);
+ return true;
+ }
+
+ MDefinition* loadTableField(uint32_t tableIndex, unsigned fieldOffset,
+ MIRType type) {
+ uint32_t instanceDataOffset = wasm::Instance::offsetInData(
+ moduleEnv_.offsetOfTableInstanceData(tableIndex) + fieldOffset);
+ auto* load =
+ MWasmLoadInstance::New(alloc(), instancePointer_, instanceDataOffset,
+ type, AliasSet::Load(AliasSet::WasmTableMeta));
+ curBlock_->add(load);
+ return load;
+ }
+
+ MDefinition* loadTableLength(uint32_t tableIndex) {
+ return loadTableField(tableIndex, offsetof(TableInstanceData, length),
+ MIRType::Int32);
+ }
+
+ MDefinition* loadTableElements(uint32_t tableIndex) {
+ return loadTableField(tableIndex, offsetof(TableInstanceData, elements),
+ MIRType::Pointer);
+ }
+
+ MDefinition* tableGetAnyRef(uint32_t tableIndex, MDefinition* index) {
+ // Load the table length and perform a bounds check with spectre index
+ // masking
+ auto* length = loadTableLength(tableIndex);
+ auto* check = MWasmBoundsCheck::New(
+ alloc(), index, length, bytecodeOffset(), MWasmBoundsCheck::Unknown);
+ curBlock_->add(check);
+ if (JitOptions.spectreIndexMasking) {
+ index = check;
+ }
+
+ // Load the table elements and load the element
+ auto* elements = loadTableElements(tableIndex);
+ auto* element = MWasmLoadTableElement::New(alloc(), elements, index);
+ curBlock_->add(element);
+ return element;
+ }
+
+ [[nodiscard]] bool tableSetAnyRef(uint32_t tableIndex, MDefinition* index,
+ MDefinition* value,
+ uint32_t lineOrBytecode) {
+ // Load the table length and perform a bounds check with spectre index
+ // masking
+ auto* length = loadTableLength(tableIndex);
+ auto* check = MWasmBoundsCheck::New(
+ alloc(), index, length, bytecodeOffset(), MWasmBoundsCheck::Unknown);
+ curBlock_->add(check);
+ if (JitOptions.spectreIndexMasking) {
+ index = check;
+ }
+
+ // Load the table elements
+ auto* elements = loadTableElements(tableIndex);
+
+ // Load the previous value
+ auto* prevValue = MWasmLoadTableElement::New(alloc(), elements, index);
+ curBlock_->add(prevValue);
+
+ // Compute the value's location for the post barrier
+ auto* loc =
+ MWasmDerivedIndexPointer::New(alloc(), elements, index, ScalePointer);
+ curBlock_->add(loc);
+
+ // Store the new value
+ auto* store = MWasmStoreRef::New(
+ alloc(), instancePointer_, loc, /*valueOffset=*/0, value,
+ AliasSet::WasmTableElement, WasmPreBarrierKind::Normal);
+ curBlock_->add(store);
+
+ // Perform the post barrier
+ return postBarrierPrecise(lineOrBytecode, loc, prevValue);
+ }
+
+ void addInterruptCheck() {
+ if (inDeadCode()) {
+ return;
+ }
+ curBlock_->add(
+ MWasmInterruptCheck::New(alloc(), instancePointer_, bytecodeOffset()));
+ }
+
+ // Perform a post-write barrier to update the generational store buffer. This
+ // version will remove a previous store buffer entry if it is no longer
+ // needed.
+ [[nodiscard]] bool postBarrierPrecise(uint32_t lineOrBytecode,
+ MDefinition* valueAddr,
+ MDefinition* value) {
+ return emitInstanceCall2(lineOrBytecode, SASigPostBarrierPrecise, valueAddr,
+ value);
+ }
+
+ // Perform a post-write barrier to update the generational store buffer. This
+ // version will remove a previous store buffer entry if it is no longer
+ // needed.
+ [[nodiscard]] bool postBarrierPreciseWithOffset(uint32_t lineOrBytecode,
+ MDefinition* valueBase,
+ uint32_t valueOffset,
+ MDefinition* value) {
+ MDefinition* valueOffsetDef = constantI32(int32_t(valueOffset));
+ if (!valueOffsetDef) {
+ return false;
+ }
+ return emitInstanceCall3(lineOrBytecode, SASigPostBarrierPreciseWithOffset,
+ valueBase, valueOffsetDef, value);
+ }
+
+ // Perform a post-write barrier to update the generational store buffer. This
+ // version is the most efficient and only requires the address to store the
+ // value and the new value. It does not remove a previous store buffer entry
+ // if it is no longer needed, you must use a precise post-write barrier for
+ // that.
+ [[nodiscard]] bool postBarrier(uint32_t lineOrBytecode, MDefinition* object,
+ MDefinition* valueBase, uint32_t valueOffset,
+ MDefinition* newValue) {
+ auto* barrier = MWasmPostWriteBarrier::New(
+ alloc(), instancePointer_, object, valueBase, valueOffset, newValue);
+ if (!barrier) {
+ return false;
+ }
+ curBlock_->add(barrier);
+ return true;
+ }
+
+ /***************************************************************** Calls */
+
+ // The IonMonkey backend maintains a single stack offset (from the stack
+ // pointer to the base of the frame) by adding the total amount of spill
+ // space required plus the maximum stack required for argument passing.
+ // Since we do not use IonMonkey's MPrepareCall/MPassArg/MCall, we must
+ // manually accumulate, for the entire function, the maximum required stack
+ // space for argument passing. (This is passed to the CodeGenerator via
+ // MIRGenerator::maxWasmStackArgBytes.) This is just be the maximum of the
+ // stack space required for each individual call (as determined by the call
+ // ABI).
+
+ // Operations that modify a CallCompileState.
+
+ [[nodiscard]] bool passInstance(MIRType instanceType,
+ CallCompileState* args) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ // Should only pass an instance once. And it must be a non-GC pointer.
+ MOZ_ASSERT(args->instanceArg_ == ABIArg());
+ MOZ_ASSERT(instanceType == MIRType::Pointer);
+ args->instanceArg_ = args->abi_.next(MIRType::Pointer);
+ return true;
+ }
+
+ // Do not call this directly. Call one of the passArg() variants instead.
+ [[nodiscard]] bool passArgWorker(MDefinition* argDef, MIRType type,
+ CallCompileState* call) {
+ ABIArg arg = call->abi_.next(type);
+ switch (arg.kind()) {
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ case ABIArg::GPR_PAIR: {
+ auto mirLow =
+ MWrapInt64ToInt32::New(alloc(), argDef, /* bottomHalf = */ true);
+ curBlock_->add(mirLow);
+ auto mirHigh =
+ MWrapInt64ToInt32::New(alloc(), argDef, /* bottomHalf = */ false);
+ curBlock_->add(mirHigh);
+ return call->regArgs_.append(
+ MWasmCallBase::Arg(AnyRegister(arg.gpr64().low), mirLow)) &&
+ call->regArgs_.append(
+ MWasmCallBase::Arg(AnyRegister(arg.gpr64().high), mirHigh));
+ }
+#endif
+ case ABIArg::GPR:
+ case ABIArg::FPU:
+ return call->regArgs_.append(MWasmCallBase::Arg(arg.reg(), argDef));
+ case ABIArg::Stack: {
+ auto* mir =
+ MWasmStackArg::New(alloc(), arg.offsetFromArgBase(), argDef);
+ curBlock_->add(mir);
+ return true;
+ }
+ case ABIArg::Uninitialized:
+ MOZ_ASSERT_UNREACHABLE("Uninitialized ABIArg kind");
+ }
+ MOZ_CRASH("Unknown ABIArg kind.");
+ }
+
+ template <typename SpanT>
+ [[nodiscard]] bool passArgs(const DefVector& argDefs, SpanT types,
+ CallCompileState* call) {
+ MOZ_ASSERT(argDefs.length() == types.size());
+ for (uint32_t i = 0; i < argDefs.length(); i++) {
+ MDefinition* def = argDefs[i];
+ ValType type = types[i];
+ if (!passArg(def, type, call)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ [[nodiscard]] bool passArg(MDefinition* argDef, MIRType type,
+ CallCompileState* call) {
+ if (inDeadCode()) {
+ return true;
+ }
+ return passArgWorker(argDef, type, call);
+ }
+
+ [[nodiscard]] bool passArg(MDefinition* argDef, ValType type,
+ CallCompileState* call) {
+ if (inDeadCode()) {
+ return true;
+ }
+ return passArgWorker(argDef, type.toMIRType(), call);
+ }
+
+ // If the call returns results on the stack, prepare a stack area to receive
+ // them, and pass the address of the stack area to the callee as an additional
+ // argument.
+ [[nodiscard]] bool passStackResultAreaCallArg(const ResultType& resultType,
+ CallCompileState* call) {
+ if (inDeadCode()) {
+ return true;
+ }
+ ABIResultIter iter(resultType);
+ while (!iter.done() && iter.cur().inRegister()) {
+ iter.next();
+ }
+ if (iter.done()) {
+ // No stack results.
+ return true;
+ }
+
+ auto* stackResultArea = MWasmStackResultArea::New(alloc());
+ if (!stackResultArea) {
+ return false;
+ }
+ if (!stackResultArea->init(alloc(), iter.remaining())) {
+ return false;
+ }
+ for (uint32_t base = iter.index(); !iter.done(); iter.next()) {
+ MWasmStackResultArea::StackResult loc(iter.cur().stackOffset(),
+ iter.cur().type().toMIRType());
+ stackResultArea->initResult(iter.index() - base, loc);
+ }
+ curBlock_->add(stackResultArea);
+ if (!passArg(stackResultArea, MIRType::Pointer, call)) {
+ return false;
+ }
+ call->stackResultArea_ = stackResultArea;
+ return true;
+ }
+
+ [[nodiscard]] bool finishCall(CallCompileState* call) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ if (!call->regArgs_.append(
+ MWasmCallBase::Arg(AnyRegister(InstanceReg), instancePointer_))) {
+ return false;
+ }
+
+ uint32_t stackBytes = call->abi_.stackBytesConsumedSoFar();
+
+ maxStackArgBytes_ = std::max(maxStackArgBytes_, stackBytes);
+ return true;
+ }
+
+ // Wrappers for creating various kinds of calls.
+
+ [[nodiscard]] bool collectUnaryCallResult(MIRType type,
+ MDefinition** result) {
+ MInstruction* def;
+ switch (type) {
+ case MIRType::Int32:
+ def = MWasmRegisterResult::New(alloc(), MIRType::Int32, ReturnReg);
+ break;
+ case MIRType::Int64:
+ def = MWasmRegister64Result::New(alloc(), ReturnReg64);
+ break;
+ case MIRType::Float32:
+ def = MWasmFloatRegisterResult::New(alloc(), type, ReturnFloat32Reg);
+ break;
+ case MIRType::Double:
+ def = MWasmFloatRegisterResult::New(alloc(), type, ReturnDoubleReg);
+ break;
+#ifdef ENABLE_WASM_SIMD
+ case MIRType::Simd128:
+ def = MWasmFloatRegisterResult::New(alloc(), type, ReturnSimd128Reg);
+ break;
+#endif
+ case MIRType::RefOrNull:
+ def = MWasmRegisterResult::New(alloc(), MIRType::RefOrNull, ReturnReg);
+ break;
+ default:
+ MOZ_CRASH("unexpected MIRType result for builtin call");
+ }
+
+ if (!def) {
+ return false;
+ }
+
+ curBlock_->add(def);
+ *result = def;
+
+ return true;
+ }
+
+ [[nodiscard]] bool collectCallResults(const ResultType& type,
+ MWasmStackResultArea* stackResultArea,
+ DefVector* results) {
+ if (!results->reserve(type.length())) {
+ return false;
+ }
+
+ // The result iterator goes in the order in which results would be popped
+ // off; we want the order in which they would be pushed.
+ ABIResultIter iter(type);
+ uint32_t stackResultCount = 0;
+ while (!iter.done()) {
+ if (iter.cur().onStack()) {
+ stackResultCount++;
+ }
+ iter.next();
+ }
+
+ for (iter.switchToPrev(); !iter.done(); iter.prev()) {
+ if (!mirGen().ensureBallast()) {
+ return false;
+ }
+ const ABIResult& result = iter.cur();
+ MInstruction* def;
+ if (result.inRegister()) {
+ switch (result.type().kind()) {
+ case wasm::ValType::I32:
+ def =
+ MWasmRegisterResult::New(alloc(), MIRType::Int32, result.gpr());
+ break;
+ case wasm::ValType::I64:
+ def = MWasmRegister64Result::New(alloc(), result.gpr64());
+ break;
+ case wasm::ValType::F32:
+ def = MWasmFloatRegisterResult::New(alloc(), MIRType::Float32,
+ result.fpr());
+ break;
+ case wasm::ValType::F64:
+ def = MWasmFloatRegisterResult::New(alloc(), MIRType::Double,
+ result.fpr());
+ break;
+ case wasm::ValType::Ref:
+ def = MWasmRegisterResult::New(alloc(), MIRType::RefOrNull,
+ result.gpr());
+ break;
+ case wasm::ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ def = MWasmFloatRegisterResult::New(alloc(), MIRType::Simd128,
+ result.fpr());
+#else
+ return this->iter().fail("Ion has no SIMD support yet");
+#endif
+ }
+ } else {
+ MOZ_ASSERT(stackResultArea);
+ MOZ_ASSERT(stackResultCount);
+ uint32_t idx = --stackResultCount;
+ def = MWasmStackResult::New(alloc(), stackResultArea, idx);
+ }
+
+ if (!def) {
+ return false;
+ }
+ curBlock_->add(def);
+ results->infallibleAppend(def);
+ }
+
+ MOZ_ASSERT(results->length() == type.length());
+
+ return true;
+ }
+
+ [[nodiscard]] bool catchableCall(const CallSiteDesc& desc,
+ const CalleeDesc& callee,
+ const MWasmCallBase::Args& args,
+ const ArgTypeVector& argTypes,
+ MDefinition* indexOrRef = nullptr) {
+ MWasmCallTryDesc tryDesc;
+ if (!beginTryCall(&tryDesc)) {
+ return false;
+ }
+
+ MInstruction* ins;
+ if (tryDesc.inTry) {
+ ins = MWasmCallCatchable::New(alloc(), desc, callee, args,
+ StackArgAreaSizeUnaligned(argTypes),
+ tryDesc, indexOrRef);
+ } else {
+ ins = MWasmCallUncatchable::New(alloc(), desc, callee, args,
+ StackArgAreaSizeUnaligned(argTypes),
+ indexOrRef);
+ }
+ if (!ins) {
+ return false;
+ }
+ curBlock_->add(ins);
+
+ return finishTryCall(&tryDesc);
+ }
+
+ [[nodiscard]] bool callDirect(const FuncType& funcType, uint32_t funcIndex,
+ uint32_t lineOrBytecode,
+ const CallCompileState& call,
+ DefVector* results) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Func);
+ ResultType resultType = ResultType::Vector(funcType.results());
+ auto callee = CalleeDesc::function(funcIndex);
+ ArgTypeVector args(funcType);
+
+ if (!catchableCall(desc, callee, call.regArgs_, args)) {
+ return false;
+ }
+ return collectCallResults(resultType, call.stackResultArea_, results);
+ }
+
+ [[nodiscard]] bool callIndirect(uint32_t funcTypeIndex, uint32_t tableIndex,
+ MDefinition* index, uint32_t lineOrBytecode,
+ const CallCompileState& call,
+ DefVector* results) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ const FuncType& funcType = (*moduleEnv_.types)[funcTypeIndex].funcType();
+ CallIndirectId callIndirectId =
+ CallIndirectId::forFuncType(moduleEnv_, funcTypeIndex);
+
+ CalleeDesc callee;
+ if (moduleEnv_.isAsmJS()) {
+ MOZ_ASSERT(tableIndex == 0);
+ MOZ_ASSERT(callIndirectId.kind() == CallIndirectIdKind::AsmJS);
+ uint32_t tableIndex = moduleEnv_.asmJSSigToTableIndex[funcTypeIndex];
+ const TableDesc& table = moduleEnv_.tables[tableIndex];
+ MOZ_ASSERT(IsPowerOfTwo(table.initialLength));
+
+ MDefinition* mask = constantI32(int32_t(table.initialLength - 1));
+ MBitAnd* maskedIndex = MBitAnd::New(alloc(), index, mask, MIRType::Int32);
+ curBlock_->add(maskedIndex);
+
+ index = maskedIndex;
+ callee = CalleeDesc::asmJSTable(moduleEnv_, tableIndex);
+ } else {
+ MOZ_ASSERT(callIndirectId.kind() != CallIndirectIdKind::AsmJS);
+ const TableDesc& table = moduleEnv_.tables[tableIndex];
+ callee =
+ CalleeDesc::wasmTable(moduleEnv_, table, tableIndex, callIndirectId);
+ }
+
+ CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Indirect);
+ ArgTypeVector args(funcType);
+ ResultType resultType = ResultType::Vector(funcType.results());
+
+ if (!catchableCall(desc, callee, call.regArgs_, args, index)) {
+ return false;
+ }
+ return collectCallResults(resultType, call.stackResultArea_, results);
+ }
+
+ [[nodiscard]] bool callImport(unsigned instanceDataOffset,
+ uint32_t lineOrBytecode,
+ const CallCompileState& call,
+ const FuncType& funcType, DefVector* results) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Import);
+ auto callee = CalleeDesc::import(instanceDataOffset);
+ ArgTypeVector args(funcType);
+ ResultType resultType = ResultType::Vector(funcType.results());
+
+ if (!catchableCall(desc, callee, call.regArgs_, args)) {
+ return false;
+ }
+ return collectCallResults(resultType, call.stackResultArea_, results);
+ }
+
+ [[nodiscard]] bool builtinCall(const SymbolicAddressSignature& builtin,
+ uint32_t lineOrBytecode,
+ const CallCompileState& call,
+ MDefinition** def) {
+ if (inDeadCode()) {
+ *def = nullptr;
+ return true;
+ }
+
+ MOZ_ASSERT(builtin.failureMode == FailureMode::Infallible);
+
+ CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Symbolic);
+ auto callee = CalleeDesc::builtin(builtin.identity);
+ auto* ins = MWasmCallUncatchable::New(alloc(), desc, callee, call.regArgs_,
+ StackArgAreaSizeUnaligned(builtin));
+ if (!ins) {
+ return false;
+ }
+
+ curBlock_->add(ins);
+
+ return collectUnaryCallResult(builtin.retType, def);
+ }
+
+ [[nodiscard]] bool builtinInstanceMethodCall(
+ const SymbolicAddressSignature& builtin, uint32_t lineOrBytecode,
+ const CallCompileState& call, MDefinition** def = nullptr) {
+ MOZ_ASSERT_IF(!def, builtin.retType == MIRType::None);
+ if (inDeadCode()) {
+ if (def) {
+ *def = nullptr;
+ }
+ return true;
+ }
+
+ CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Symbolic);
+ auto* ins = MWasmCallUncatchable::NewBuiltinInstanceMethodCall(
+ alloc(), desc, builtin.identity, builtin.failureMode, call.instanceArg_,
+ call.regArgs_, StackArgAreaSizeUnaligned(builtin));
+ if (!ins) {
+ return false;
+ }
+
+ curBlock_->add(ins);
+
+ return def ? collectUnaryCallResult(builtin.retType, def) : true;
+ }
+
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ [[nodiscard]] bool callRef(const FuncType& funcType, MDefinition* ref,
+ uint32_t lineOrBytecode,
+ const CallCompileState& call, DefVector* results) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ CalleeDesc callee = CalleeDesc::wasmFuncRef();
+
+ CallSiteDesc desc(lineOrBytecode, CallSiteDesc::FuncRef);
+ ArgTypeVector args(funcType);
+ ResultType resultType = ResultType::Vector(funcType.results());
+
+ if (!catchableCall(desc, callee, call.regArgs_, args, ref)) {
+ return false;
+ }
+ return collectCallResults(resultType, call.stackResultArea_, results);
+ }
+
+#endif // ENABLE_WASM_FUNCTION_REFERENCES
+
+ /*********************************************** Control flow generation */
+
+ inline bool inDeadCode() const { return curBlock_ == nullptr; }
+
+ [[nodiscard]] bool returnValues(const DefVector& values) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ if (values.empty()) {
+ curBlock_->end(MWasmReturnVoid::New(alloc(), instancePointer_));
+ } else {
+ ResultType resultType = ResultType::Vector(funcType().results());
+ ABIResultIter iter(resultType);
+ // Switch to iterate in FIFO order instead of the default LIFO.
+ while (!iter.done()) {
+ iter.next();
+ }
+ iter.switchToPrev();
+ for (uint32_t i = 0; !iter.done(); iter.prev(), i++) {
+ if (!mirGen().ensureBallast()) {
+ return false;
+ }
+ const ABIResult& result = iter.cur();
+ if (result.onStack()) {
+ MOZ_ASSERT(iter.remaining() > 1);
+ if (result.type().isRefRepr()) {
+ auto* store = MWasmStoreRef::New(
+ alloc(), instancePointer_, stackResultPointer_,
+ result.stackOffset(), values[i], AliasSet::WasmStackResult,
+ WasmPreBarrierKind::None);
+ curBlock_->add(store);
+ } else {
+ auto* store = MWasmStoreStackResult::New(
+ alloc(), stackResultPointer_, result.stackOffset(), values[i]);
+ curBlock_->add(store);
+ }
+ } else {
+ MOZ_ASSERT(iter.remaining() == 1);
+ MOZ_ASSERT(i + 1 == values.length());
+ curBlock_->end(
+ MWasmReturn::New(alloc(), values[i], instancePointer_));
+ }
+ }
+ }
+ curBlock_ = nullptr;
+ return true;
+ }
+
+ void unreachableTrap() {
+ if (inDeadCode()) {
+ return;
+ }
+
+ auto* ins =
+ MWasmTrap::New(alloc(), wasm::Trap::Unreachable, bytecodeOffset());
+ curBlock_->end(ins);
+ curBlock_ = nullptr;
+ }
+
+ private:
+ static uint32_t numPushed(MBasicBlock* block) {
+ return block->stackDepth() - block->info().firstStackSlot();
+ }
+
+ public:
+ [[nodiscard]] bool pushDefs(const DefVector& defs) {
+ if (inDeadCode()) {
+ return true;
+ }
+ MOZ_ASSERT(numPushed(curBlock_) == 0);
+ if (!curBlock_->ensureHasSlots(defs.length())) {
+ return false;
+ }
+ for (MDefinition* def : defs) {
+ MOZ_ASSERT(def->type() != MIRType::None);
+ curBlock_->push(def);
+ }
+ return true;
+ }
+
+ [[nodiscard]] bool popPushedDefs(DefVector* defs) {
+ size_t n = numPushed(curBlock_);
+ if (!defs->resizeUninitialized(n)) {
+ return false;
+ }
+ for (; n > 0; n--) {
+ MDefinition* def = curBlock_->pop();
+ MOZ_ASSERT(def->type() != MIRType::Value);
+ (*defs)[n - 1] = def;
+ }
+ return true;
+ }
+
+ private:
+ [[nodiscard]] bool addJoinPredecessor(const DefVector& defs,
+ MBasicBlock** joinPred) {
+ *joinPred = curBlock_;
+ if (inDeadCode()) {
+ return true;
+ }
+ return pushDefs(defs);
+ }
+
+ public:
+ [[nodiscard]] bool branchAndStartThen(MDefinition* cond,
+ MBasicBlock** elseBlock) {
+ if (inDeadCode()) {
+ *elseBlock = nullptr;
+ } else {
+ MBasicBlock* thenBlock;
+ if (!newBlock(curBlock_, &thenBlock)) {
+ return false;
+ }
+ if (!newBlock(curBlock_, elseBlock)) {
+ return false;
+ }
+
+ curBlock_->end(MTest::New(alloc(), cond, thenBlock, *elseBlock));
+
+ curBlock_ = thenBlock;
+ mirGraph().moveBlockToEnd(curBlock_);
+ }
+
+ return startBlock();
+ }
+
+ [[nodiscard]] bool switchToElse(MBasicBlock* elseBlock,
+ MBasicBlock** thenJoinPred) {
+ DefVector values;
+ if (!finishBlock(&values)) {
+ return false;
+ }
+
+ if (!elseBlock) {
+ *thenJoinPred = nullptr;
+ } else {
+ if (!addJoinPredecessor(values, thenJoinPred)) {
+ return false;
+ }
+
+ curBlock_ = elseBlock;
+ mirGraph().moveBlockToEnd(curBlock_);
+ }
+
+ return startBlock();
+ }
+
+ [[nodiscard]] bool joinIfElse(MBasicBlock* thenJoinPred, DefVector* defs) {
+ DefVector values;
+ if (!finishBlock(&values)) {
+ return false;
+ }
+
+ if (!thenJoinPred && inDeadCode()) {
+ return true;
+ }
+
+ MBasicBlock* elseJoinPred;
+ if (!addJoinPredecessor(values, &elseJoinPred)) {
+ return false;
+ }
+
+ mozilla::Array<MBasicBlock*, 2> blocks;
+ size_t numJoinPreds = 0;
+ if (thenJoinPred) {
+ blocks[numJoinPreds++] = thenJoinPred;
+ }
+ if (elseJoinPred) {
+ blocks[numJoinPreds++] = elseJoinPred;
+ }
+
+ if (numJoinPreds == 0) {
+ return true;
+ }
+
+ MBasicBlock* join;
+ if (!goToNewBlock(blocks[0], &join)) {
+ return false;
+ }
+ for (size_t i = 1; i < numJoinPreds; ++i) {
+ if (!goToExistingBlock(blocks[i], join)) {
+ return false;
+ }
+ }
+
+ curBlock_ = join;
+ return popPushedDefs(defs);
+ }
+
+ [[nodiscard]] bool startBlock() {
+ MOZ_ASSERT_IF(blockDepth_ < blockPatches_.length(),
+ blockPatches_[blockDepth_].empty());
+ blockDepth_++;
+ return true;
+ }
+
+ [[nodiscard]] bool finishBlock(DefVector* defs) {
+ MOZ_ASSERT(blockDepth_);
+ uint32_t topLabel = --blockDepth_;
+ return bindBranches(topLabel, defs);
+ }
+
+ [[nodiscard]] bool startLoop(MBasicBlock** loopHeader, size_t paramCount) {
+ *loopHeader = nullptr;
+
+ blockDepth_++;
+ loopDepth_++;
+
+ if (inDeadCode()) {
+ return true;
+ }
+
+ // Create the loop header.
+ MOZ_ASSERT(curBlock_->loopDepth() == loopDepth_ - 1);
+ *loopHeader = MBasicBlock::New(mirGraph(), info(), curBlock_,
+ MBasicBlock::PENDING_LOOP_HEADER);
+ if (!*loopHeader) {
+ return false;
+ }
+
+ (*loopHeader)->setLoopDepth(loopDepth_);
+ mirGraph().addBlock(*loopHeader);
+ curBlock_->end(MGoto::New(alloc(), *loopHeader));
+
+ DefVector loopParams;
+ if (!iter().getResults(paramCount, &loopParams)) {
+ return false;
+ }
+ for (size_t i = 0; i < paramCount; i++) {
+ MPhi* phi = MPhi::New(alloc(), loopParams[i]->type());
+ if (!phi) {
+ return false;
+ }
+ if (!phi->reserveLength(2)) {
+ return false;
+ }
+ (*loopHeader)->addPhi(phi);
+ phi->addInput(loopParams[i]);
+ loopParams[i] = phi;
+ }
+ iter().setResults(paramCount, loopParams);
+
+ MBasicBlock* body;
+ if (!goToNewBlock(*loopHeader, &body)) {
+ return false;
+ }
+ curBlock_ = body;
+ return true;
+ }
+
+ private:
+ void fixupRedundantPhis(MBasicBlock* b) {
+ for (size_t i = 0, depth = b->stackDepth(); i < depth; i++) {
+ MDefinition* def = b->getSlot(i);
+ if (def->isUnused()) {
+ b->setSlot(i, def->toPhi()->getOperand(0));
+ }
+ }
+ }
+
+ [[nodiscard]] bool setLoopBackedge(MBasicBlock* loopEntry,
+ MBasicBlock* loopBody,
+ MBasicBlock* backedge, size_t paramCount) {
+ if (!loopEntry->setBackedgeWasm(backedge, paramCount)) {
+ return false;
+ }
+
+ // Flag all redundant phis as unused.
+ for (MPhiIterator phi = loopEntry->phisBegin(); phi != loopEntry->phisEnd();
+ phi++) {
+ MOZ_ASSERT(phi->numOperands() == 2);
+ if (phi->getOperand(0) == phi->getOperand(1)) {
+ phi->setUnused();
+ }
+ }
+
+ // Fix up phis stored in the slots Vector of pending blocks.
+ for (ControlFlowPatchVector& patches : blockPatches_) {
+ for (ControlFlowPatch& p : patches) {
+ MBasicBlock* block = p.ins->block();
+ if (block->loopDepth() >= loopEntry->loopDepth()) {
+ fixupRedundantPhis(block);
+ }
+ }
+ }
+
+ // The loop body, if any, might be referencing recycled phis too.
+ if (loopBody) {
+ fixupRedundantPhis(loopBody);
+ }
+
+ // Pending jumps to an enclosing try-catch may reference the recycled phis.
+ // We have to search above all enclosing try blocks, as a delegate may move
+ // patches around.
+ for (uint32_t depth = 0; depth < iter().controlStackDepth(); depth++) {
+ LabelKind kind = iter().controlKind(depth);
+ if (kind != LabelKind::Try && kind != LabelKind::Body) {
+ continue;
+ }
+ Control& control = iter().controlItem(depth);
+ for (MControlInstruction* patch : control.tryPadPatches) {
+ MBasicBlock* block = patch->block();
+ if (block->loopDepth() >= loopEntry->loopDepth()) {
+ fixupRedundantPhis(block);
+ }
+ }
+ }
+
+ // Discard redundant phis and add to the free list.
+ for (MPhiIterator phi = loopEntry->phisBegin();
+ phi != loopEntry->phisEnd();) {
+ MPhi* entryDef = *phi++;
+ if (!entryDef->isUnused()) {
+ continue;
+ }
+
+ entryDef->justReplaceAllUsesWith(entryDef->getOperand(0));
+ loopEntry->discardPhi(entryDef);
+ mirGraph().addPhiToFreeList(entryDef);
+ }
+
+ return true;
+ }
+
+ public:
+ [[nodiscard]] bool closeLoop(MBasicBlock* loopHeader,
+ DefVector* loopResults) {
+ MOZ_ASSERT(blockDepth_ >= 1);
+ MOZ_ASSERT(loopDepth_);
+
+ uint32_t headerLabel = blockDepth_ - 1;
+
+ if (!loopHeader) {
+ MOZ_ASSERT(inDeadCode());
+ MOZ_ASSERT(headerLabel >= blockPatches_.length() ||
+ blockPatches_[headerLabel].empty());
+ blockDepth_--;
+ loopDepth_--;
+ return true;
+ }
+
+ // Op::Loop doesn't have an implicit backedge so temporarily set
+ // aside the end of the loop body to bind backedges.
+ MBasicBlock* loopBody = curBlock_;
+ curBlock_ = nullptr;
+
+ // As explained in bug 1253544, Ion apparently has an invariant that
+ // there is only one backedge to loop headers. To handle wasm's ability
+ // to have multiple backedges to the same loop header, we bind all those
+ // branches as forward jumps to a single backward jump. This is
+ // unfortunate but the optimizer is able to fold these into single jumps
+ // to backedges.
+ DefVector backedgeValues;
+ if (!bindBranches(headerLabel, &backedgeValues)) {
+ return false;
+ }
+
+ MOZ_ASSERT(loopHeader->loopDepth() == loopDepth_);
+
+ if (curBlock_) {
+ // We're on the loop backedge block, created by bindBranches.
+ for (size_t i = 0, n = numPushed(curBlock_); i != n; i++) {
+ curBlock_->pop();
+ }
+
+ if (!pushDefs(backedgeValues)) {
+ return false;
+ }
+
+ MOZ_ASSERT(curBlock_->loopDepth() == loopDepth_);
+ curBlock_->end(MGoto::New(alloc(), loopHeader));
+ if (!setLoopBackedge(loopHeader, loopBody, curBlock_,
+ backedgeValues.length())) {
+ return false;
+ }
+ }
+
+ curBlock_ = loopBody;
+
+ loopDepth_--;
+
+ // If the loop depth still at the inner loop body, correct it.
+ if (curBlock_ && curBlock_->loopDepth() != loopDepth_) {
+ MBasicBlock* out;
+ if (!goToNewBlock(curBlock_, &out)) {
+ return false;
+ }
+ curBlock_ = out;
+ }
+
+ blockDepth_ -= 1;
+ return inDeadCode() || popPushedDefs(loopResults);
+ }
+
+ [[nodiscard]] bool addControlFlowPatch(MControlInstruction* ins,
+ uint32_t relative, uint32_t index) {
+ MOZ_ASSERT(relative < blockDepth_);
+ uint32_t absolute = blockDepth_ - 1 - relative;
+
+ if (absolute >= blockPatches_.length() &&
+ !blockPatches_.resize(absolute + 1)) {
+ return false;
+ }
+
+ return blockPatches_[absolute].append(ControlFlowPatch(ins, index));
+ }
+
+ [[nodiscard]] bool br(uint32_t relativeDepth, const DefVector& values) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ MGoto* jump = MGoto::New(alloc());
+ if (!addControlFlowPatch(jump, relativeDepth, MGoto::TargetIndex)) {
+ return false;
+ }
+
+ if (!pushDefs(values)) {
+ return false;
+ }
+
+ curBlock_->end(jump);
+ curBlock_ = nullptr;
+ return true;
+ }
+
+ [[nodiscard]] bool brIf(uint32_t relativeDepth, const DefVector& values,
+ MDefinition* condition) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ MBasicBlock* joinBlock = nullptr;
+ if (!newBlock(curBlock_, &joinBlock)) {
+ return false;
+ }
+
+ MTest* test = MTest::New(alloc(), condition, nullptr, joinBlock);
+ if (!addControlFlowPatch(test, relativeDepth, MTest::TrueBranchIndex)) {
+ return false;
+ }
+
+ if (!pushDefs(values)) {
+ return false;
+ }
+
+ curBlock_->end(test);
+ curBlock_ = joinBlock;
+ return true;
+ }
+
+ [[nodiscard]] bool brTable(MDefinition* operand, uint32_t defaultDepth,
+ const Uint32Vector& depths,
+ const DefVector& values) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ size_t numCases = depths.length();
+ MOZ_ASSERT(numCases <= INT32_MAX);
+ MOZ_ASSERT(numCases);
+
+ MTableSwitch* table =
+ MTableSwitch::New(alloc(), operand, 0, int32_t(numCases - 1));
+
+ size_t defaultIndex;
+ if (!table->addDefault(nullptr, &defaultIndex)) {
+ return false;
+ }
+ if (!addControlFlowPatch(table, defaultDepth, defaultIndex)) {
+ return false;
+ }
+
+ using IndexToCaseMap =
+ HashMap<uint32_t, uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy>;
+
+ IndexToCaseMap indexToCase;
+ if (!indexToCase.put(defaultDepth, defaultIndex)) {
+ return false;
+ }
+
+ for (size_t i = 0; i < numCases; i++) {
+ if (!mirGen_.ensureBallast()) {
+ return false;
+ }
+
+ uint32_t depth = depths[i];
+
+ size_t caseIndex;
+ IndexToCaseMap::AddPtr p = indexToCase.lookupForAdd(depth);
+ if (!p) {
+ if (!table->addSuccessor(nullptr, &caseIndex)) {
+ return false;
+ }
+ if (!addControlFlowPatch(table, depth, caseIndex)) {
+ return false;
+ }
+ if (!indexToCase.add(p, depth, caseIndex)) {
+ return false;
+ }
+ } else {
+ caseIndex = p->value();
+ }
+
+ if (!table->addCase(caseIndex)) {
+ return false;
+ }
+ }
+
+ if (!pushDefs(values)) {
+ return false;
+ }
+
+ curBlock_->end(table);
+ curBlock_ = nullptr;
+
+ return true;
+ }
+
+ /********************************************************** Exceptions ***/
+
+ bool inTryBlock(uint32_t* relativeDepth) {
+ return iter().controlFindInnermost(LabelKind::Try, relativeDepth);
+ }
+
+ bool inTryCode() {
+ uint32_t relativeDepth;
+ return inTryBlock(&relativeDepth);
+ }
+
+ MDefinition* loadTag(uint32_t tagIndex) {
+ MWasmLoadInstanceDataField* tag = MWasmLoadInstanceDataField::New(
+ alloc(), MIRType::RefOrNull,
+ moduleEnv_.offsetOfTagInstanceData(tagIndex), true, instancePointer_);
+ curBlock_->add(tag);
+ return tag;
+ }
+
+ void loadPendingExceptionState(MInstruction** exception, MInstruction** tag) {
+ *exception = MWasmLoadInstance::New(
+ alloc(), instancePointer_, wasm::Instance::offsetOfPendingException(),
+ MIRType::RefOrNull, AliasSet::Load(AliasSet::WasmPendingException));
+ curBlock_->add(*exception);
+
+ *tag = MWasmLoadInstance::New(
+ alloc(), instancePointer_,
+ wasm::Instance::offsetOfPendingExceptionTag(), MIRType::RefOrNull,
+ AliasSet::Load(AliasSet::WasmPendingException));
+ curBlock_->add(*tag);
+ }
+
+ [[nodiscard]] bool setPendingExceptionState(MDefinition* exception,
+ MDefinition* tag) {
+ // Set the pending exception object
+ auto* exceptionAddr = MWasmDerivedPointer::New(
+ alloc(), instancePointer_, Instance::offsetOfPendingException());
+ curBlock_->add(exceptionAddr);
+ auto* setException = MWasmStoreRef::New(
+ alloc(), instancePointer_, exceptionAddr, /*valueOffset=*/0, exception,
+ AliasSet::WasmPendingException, WasmPreBarrierKind::Normal);
+ curBlock_->add(setException);
+ if (!postBarrierPrecise(/*lineOrBytecode=*/0, exceptionAddr, exception)) {
+ return false;
+ }
+
+ // Set the pending exception tag object
+ auto* exceptionTagAddr = MWasmDerivedPointer::New(
+ alloc(), instancePointer_, Instance::offsetOfPendingExceptionTag());
+ curBlock_->add(exceptionTagAddr);
+ auto* setExceptionTag = MWasmStoreRef::New(
+ alloc(), instancePointer_, exceptionTagAddr, /*valueOffset=*/0, tag,
+ AliasSet::WasmPendingException, WasmPreBarrierKind::Normal);
+ curBlock_->add(setExceptionTag);
+ return postBarrierPrecise(/*lineOrBytecode=*/0, exceptionTagAddr, tag);
+ }
+
+ [[nodiscard]] bool addPadPatch(MControlInstruction* ins,
+ size_t relativeTryDepth) {
+ Control& tryControl = iter().controlItem(relativeTryDepth);
+ ControlInstructionVector& padPatches = tryControl.tryPadPatches;
+ return padPatches.emplaceBack(ins);
+ }
+
+ [[nodiscard]] bool endWithPadPatch(uint32_t relativeTryDepth) {
+ MGoto* jumpToLandingPad = MGoto::New(alloc());
+ curBlock_->end(jumpToLandingPad);
+ return addPadPatch(jumpToLandingPad, relativeTryDepth);
+ }
+
+ [[nodiscard]] bool delegatePadPatches(const ControlInstructionVector& patches,
+ uint32_t relativeDepth) {
+ if (patches.empty()) {
+ return true;
+ }
+
+ // Find where we are delegating the pad patches to.
+ uint32_t targetRelativeDepth;
+ if (!iter().controlFindInnermostFrom(LabelKind::Try, relativeDepth,
+ &targetRelativeDepth)) {
+ MOZ_ASSERT(relativeDepth <= blockDepth_ - 1);
+ targetRelativeDepth = blockDepth_ - 1;
+ }
+ // Append the delegate's pad patches to the target's.
+ for (MControlInstruction* ins : patches) {
+ if (!addPadPatch(ins, targetRelativeDepth)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ [[nodiscard]] bool beginTryCall(MWasmCallTryDesc* call) {
+ call->inTry = inTryBlock(&call->relativeTryDepth);
+ if (!call->inTry) {
+ return true;
+ }
+ // Allocate a try note
+ if (!tryNotes_.append(wasm::TryNote())) {
+ return false;
+ }
+ call->tryNoteIndex = tryNotes_.length() - 1;
+ // Allocate blocks for fallthrough and exceptions
+ return newBlock(curBlock_, &call->fallthroughBlock) &&
+ newBlock(curBlock_, &call->prePadBlock);
+ }
+
+ [[nodiscard]] bool finishTryCall(MWasmCallTryDesc* call) {
+ if (!call->inTry) {
+ return true;
+ }
+
+ // Switch to the prePadBlock
+ MBasicBlock* callBlock = curBlock_;
+ curBlock_ = call->prePadBlock;
+
+ // Mark this as the landing pad for the call
+ curBlock_->add(
+ MWasmCallLandingPrePad::New(alloc(), callBlock, call->tryNoteIndex));
+
+ // End with a pending jump to the landing pad
+ if (!endWithPadPatch(call->relativeTryDepth)) {
+ return false;
+ }
+
+ // Compilation continues in the fallthroughBlock.
+ curBlock_ = call->fallthroughBlock;
+ return true;
+ }
+
+ // Create a landing pad for a try block if there are any throwing
+ // instructions.
+ [[nodiscard]] bool createTryLandingPadIfNeeded(Control& control,
+ MBasicBlock** landingPad) {
+ // If there are no pad-patches for this try control, it means there are no
+ // instructions in the try code that could throw an exception. In this
+ // case, all the catches are dead code, and the try code ends up equivalent
+ // to a plain wasm block.
+ ControlInstructionVector& patches = control.tryPadPatches;
+ if (patches.empty()) {
+ *landingPad = nullptr;
+ return true;
+ }
+
+ // Otherwise, if there are (pad-) branches from places in the try code that
+ // may throw an exception, bind these branches to a new landing pad
+ // block. This is done similarly to what is done in bindBranches.
+ MControlInstruction* ins = patches[0];
+ MBasicBlock* pred = ins->block();
+ if (!newBlock(pred, landingPad)) {
+ return false;
+ }
+ ins->replaceSuccessor(0, *landingPad);
+ for (size_t i = 1; i < patches.length(); i++) {
+ ins = patches[i];
+ pred = ins->block();
+ if (!(*landingPad)->addPredecessor(alloc(), pred)) {
+ return false;
+ }
+ ins->replaceSuccessor(0, *landingPad);
+ }
+
+ // Set up the slots in the landing pad block.
+ if (!setupLandingPadSlots(*landingPad)) {
+ return false;
+ }
+
+ // Clear the now bound pad patches.
+ patches.clear();
+ return true;
+ }
+
+ // Consume the pending exception state from instance, and set up the slots
+ // of the landing pad with the exception state.
+ [[nodiscard]] bool setupLandingPadSlots(MBasicBlock* landingPad) {
+ MBasicBlock* prevBlock = curBlock_;
+ curBlock_ = landingPad;
+
+ // Load the pending exception and tag
+ MInstruction* exception;
+ MInstruction* tag;
+ loadPendingExceptionState(&exception, &tag);
+
+ // Clear the pending exception and tag
+ auto* null = constantNullRef();
+ if (!setPendingExceptionState(null, null)) {
+ return false;
+ }
+
+ // Push the exception and its tag on the stack to make them available
+ // to the landing pad blocks.
+ if (!landingPad->ensureHasSlots(2)) {
+ return false;
+ }
+ landingPad->push(exception);
+ landingPad->push(tag);
+
+ curBlock_ = prevBlock;
+ return true;
+ }
+
+ [[nodiscard]] bool startTry(MBasicBlock** curBlock) {
+ *curBlock = curBlock_;
+ return startBlock();
+ }
+
+ [[nodiscard]] bool joinTryOrCatchBlock(Control& control) {
+ // If the try or catch block ended with dead code, there is no need to
+ // do any control flow join.
+ if (inDeadCode()) {
+ return true;
+ }
+
+ // This is a split path which we'll need to join later, using a control
+ // flow patch.
+ MOZ_ASSERT(!curBlock_->hasLastIns());
+ MGoto* jump = MGoto::New(alloc());
+ if (!addControlFlowPatch(jump, 0, MGoto::TargetIndex)) {
+ return false;
+ }
+
+ // Finish the current block with the control flow patch instruction.
+ curBlock_->end(jump);
+ return true;
+ }
+
+ // Finish the previous block (either a try or catch block) and then setup a
+ // new catch block.
+ [[nodiscard]] bool switchToCatch(Control& control, const LabelKind& fromKind,
+ uint32_t tagIndex) {
+ // If there is no control block, then either:
+ // - the entry of the try block is dead code, or
+ // - there is no landing pad for the try-catch.
+ // In either case, any catch will be dead code.
+ if (!control.block) {
+ MOZ_ASSERT(inDeadCode());
+ return true;
+ }
+
+ // Join the previous try or catch block with a patch to the future join of
+ // the whole try-catch block.
+ if (!joinTryOrCatchBlock(control)) {
+ return false;
+ }
+
+ // If we are switching from the try block, create the landing pad. This is
+ // guaranteed to happen once and only once before processing catch blocks.
+ if (fromKind == LabelKind::Try) {
+ MBasicBlock* padBlock = nullptr;
+ if (!createTryLandingPadIfNeeded(control, &padBlock)) {
+ return false;
+ }
+ // Set the control block for this try-catch to the landing pad.
+ control.block = padBlock;
+ }
+
+ // If there is no landing pad, then this and following catches are dead
+ // code.
+ if (!control.block) {
+ curBlock_ = nullptr;
+ return true;
+ }
+
+ // Switch to the landing pad.
+ curBlock_ = control.block;
+
+ // Handle a catch_all by immediately jumping to a new block. We require a
+ // new block (as opposed to just emitting the catch_all code in the current
+ // block) because rethrow requires the exception/tag to be present in the
+ // landing pad's slots, while the catch_all block must not have the
+ // exception/tag in slots.
+ if (tagIndex == CatchAllIndex) {
+ MBasicBlock* catchAllBlock = nullptr;
+ if (!goToNewBlock(curBlock_, &catchAllBlock)) {
+ return false;
+ }
+ // Compilation will continue in the catch_all block.
+ curBlock_ = catchAllBlock;
+ // Remove the tag and exception slots from the block, they are no
+ // longer necessary.
+ curBlock_->pop();
+ curBlock_->pop();
+ return true;
+ }
+
+ // Handle a tagged catch by doing a compare and branch on the tag index,
+ // jumping to a catch block if they match, or else to a fallthrough block
+ // to continue the landing pad.
+ MBasicBlock* catchBlock = nullptr;
+ MBasicBlock* fallthroughBlock = nullptr;
+ if (!newBlock(curBlock_, &catchBlock) ||
+ !newBlock(curBlock_, &fallthroughBlock)) {
+ return false;
+ }
+
+ // Get the exception and its tag from the slots we pushed when adding
+ // control flow patches.
+ MDefinition* exceptionTag = curBlock_->pop();
+ MDefinition* exception = curBlock_->pop();
+
+ // Branch to the catch block if the exception's tag matches this catch
+ // block's tag.
+ MDefinition* catchTag = loadTag(tagIndex);
+ MDefinition* matchesCatchTag =
+ compare(exceptionTag, catchTag, JSOp::Eq, MCompare::Compare_RefOrNull);
+ curBlock_->end(
+ MTest::New(alloc(), matchesCatchTag, catchBlock, fallthroughBlock));
+
+ // The landing pad will continue in the fallthrough block
+ control.block = fallthroughBlock;
+
+ // Set up the catch block by extracting the values from the exception
+ // object.
+ curBlock_ = catchBlock;
+
+ // Remove the tag and exception slots from the block, they are no
+ // longer necessary.
+ curBlock_->pop();
+ curBlock_->pop();
+
+ // Extract the exception values for the catch block
+ DefVector values;
+ if (!loadExceptionValues(exception, tagIndex, &values)) {
+ return false;
+ }
+ iter().setResults(values.length(), values);
+ return true;
+ }
+
+ [[nodiscard]] bool loadExceptionValues(MDefinition* exception,
+ uint32_t tagIndex, DefVector* values) {
+ SharedTagType tagType = moduleEnv().tags[tagIndex].type;
+ const ValTypeVector& params = tagType->argTypes_;
+ const TagOffsetVector& offsets = tagType->argOffsets_;
+
+ // Get the data pointer from the exception object
+ auto* data = MWasmLoadField::New(
+ alloc(), exception, WasmExceptionObject::offsetOfData(),
+ MIRType::Pointer, MWideningOp::None, AliasSet::Load(AliasSet::Any));
+ if (!data) {
+ return false;
+ }
+ curBlock_->add(data);
+
+ // Presize the values vector to the number of params
+ if (!values->reserve(params.length())) {
+ return false;
+ }
+
+ // Load each value from the data pointer
+ for (size_t i = 0; i < params.length(); i++) {
+ if (!mirGen_.ensureBallast()) {
+ return false;
+ }
+ auto* load = MWasmLoadFieldKA::New(
+ alloc(), exception, data, offsets[i], params[i].toMIRType(),
+ MWideningOp::None, AliasSet::Load(AliasSet::Any));
+ if (!load || !values->append(load)) {
+ return false;
+ }
+ curBlock_->add(load);
+ }
+ return true;
+ }
+
+ [[nodiscard]] bool finishTryCatch(LabelKind kind, Control& control,
+ DefVector* defs) {
+ switch (kind) {
+ case LabelKind::Try: {
+ // This is a catchless try, we must delegate all throwing instructions
+ // to the nearest enclosing try block if one exists, or else to the
+ // body block which will handle it in emitBodyDelegateThrowPad. We
+ // specify a relativeDepth of '1' to delegate outside of the still
+ // active try block.
+ uint32_t relativeDepth = 1;
+ if (!delegatePadPatches(control.tryPadPatches, relativeDepth)) {
+ return false;
+ }
+ break;
+ }
+ case LabelKind::Catch: {
+ // This is a try without a catch_all, we must have a rethrow at the end
+ // of the landing pad (if any).
+ MBasicBlock* padBlock = control.block;
+ if (padBlock) {
+ MBasicBlock* prevBlock = curBlock_;
+ curBlock_ = padBlock;
+ MDefinition* tag = curBlock_->pop();
+ MDefinition* exception = curBlock_->pop();
+ if (!throwFrom(exception, tag)) {
+ return false;
+ }
+ curBlock_ = prevBlock;
+ }
+ break;
+ }
+ case LabelKind::CatchAll:
+ // This is a try with a catch_all, and requires no special handling.
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ // Finish the block, joining the try and catch blocks
+ return finishBlock(defs);
+ }
+
+ [[nodiscard]] bool emitBodyDelegateThrowPad(Control& control) {
+ // Create a landing pad for any throwing instructions
+ MBasicBlock* padBlock;
+ if (!createTryLandingPadIfNeeded(control, &padBlock)) {
+ return false;
+ }
+
+ // If no landing pad was necessary, then we don't need to do anything here
+ if (!padBlock) {
+ return true;
+ }
+
+ // Switch to the landing pad and rethrow the exception
+ MBasicBlock* prevBlock = curBlock_;
+ curBlock_ = padBlock;
+ MDefinition* tag = curBlock_->pop();
+ MDefinition* exception = curBlock_->pop();
+ if (!throwFrom(exception, tag)) {
+ return false;
+ }
+ curBlock_ = prevBlock;
+ return true;
+ }
+
+ [[nodiscard]] bool emitNewException(MDefinition* tag,
+ MDefinition** exception) {
+ return emitInstanceCall1(readBytecodeOffset(), SASigExceptionNew, tag,
+ exception);
+ }
+
+ [[nodiscard]] bool emitThrow(uint32_t tagIndex, const DefVector& argValues) {
+ if (inDeadCode()) {
+ return true;
+ }
+ uint32_t bytecodeOffset = readBytecodeOffset();
+
+ // Load the tag
+ MDefinition* tag = loadTag(tagIndex);
+ if (!tag) {
+ return false;
+ }
+
+ // Allocate an exception object
+ MDefinition* exception;
+ if (!emitNewException(tag, &exception)) {
+ return false;
+ }
+
+ // Load the data pointer from the object
+ auto* data = MWasmLoadField::New(
+ alloc(), exception, WasmExceptionObject::offsetOfData(),
+ MIRType::Pointer, MWideningOp::None, AliasSet::Load(AliasSet::Any));
+ if (!data) {
+ return false;
+ }
+ curBlock_->add(data);
+
+ // Store the params into the data pointer
+ SharedTagType tagType = moduleEnv_.tags[tagIndex].type;
+ for (size_t i = 0; i < tagType->argOffsets_.length(); i++) {
+ if (!mirGen_.ensureBallast()) {
+ return false;
+ }
+ ValType type = tagType->argTypes_[i];
+ uint32_t offset = tagType->argOffsets_[i];
+
+ if (!type.isRefRepr()) {
+ auto* store = MWasmStoreFieldKA::New(alloc(), exception, data, offset,
+ argValues[i], MNarrowingOp::None,
+ AliasSet::Store(AliasSet::Any));
+ if (!store) {
+ return false;
+ }
+ curBlock_->add(store);
+ continue;
+ }
+
+ // Store the new value
+ auto* store = MWasmStoreFieldRefKA::New(
+ alloc(), instancePointer_, exception, data, offset, argValues[i],
+ AliasSet::Store(AliasSet::Any), Nothing(), WasmPreBarrierKind::None);
+ if (!store) {
+ return false;
+ }
+ curBlock_->add(store);
+
+ // Call the post-write barrier
+ if (!postBarrier(bytecodeOffset, exception, data, offset, argValues[i])) {
+ return false;
+ }
+ }
+
+ // Throw the exception
+ return throwFrom(exception, tag);
+ }
+
+ [[nodiscard]] bool throwFrom(MDefinition* exn, MDefinition* tag) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ // Check if there is a local catching try control, and if so, then add a
+ // pad-patch to its tryPadPatches.
+ uint32_t relativeTryDepth;
+ if (inTryBlock(&relativeTryDepth)) {
+ // Set the pending exception state, the landing pad will read from this
+ if (!setPendingExceptionState(exn, tag)) {
+ return false;
+ }
+
+ // End with a pending jump to the landing pad
+ if (!endWithPadPatch(relativeTryDepth)) {
+ return false;
+ }
+ curBlock_ = nullptr;
+ return true;
+ }
+
+ // If there is no surrounding catching block, call an instance method to
+ // throw the exception.
+ if (!emitInstanceCall1(readBytecodeOffset(), SASigThrowException, exn)) {
+ return false;
+ }
+ unreachableTrap();
+
+ curBlock_ = nullptr;
+ return true;
+ }
+
+ [[nodiscard]] bool emitRethrow(uint32_t relativeDepth) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ Control& control = iter().controlItem(relativeDepth);
+ MBasicBlock* pad = control.block;
+ MOZ_ASSERT(pad);
+ MOZ_ASSERT(pad->nslots() > 1);
+ MOZ_ASSERT(iter().controlKind(relativeDepth) == LabelKind::Catch ||
+ iter().controlKind(relativeDepth) == LabelKind::CatchAll);
+
+ // The exception will always be the last slot in the landing pad.
+ size_t exnSlotPosition = pad->nslots() - 2;
+ MDefinition* tag = pad->getSlot(exnSlotPosition + 1);
+ MDefinition* exception = pad->getSlot(exnSlotPosition);
+ MOZ_ASSERT(exception->type() == MIRType::RefOrNull &&
+ tag->type() == MIRType::RefOrNull);
+ return throwFrom(exception, tag);
+ }
+
+ /*********************************************** Instance call helpers ***/
+
+ // Do not call this function directly -- it offers no protection against
+ // mis-counting of arguments. Instead call one of
+ // ::emitInstanceCall{0,1,2,3,4,5,6}.
+ //
+ // Emits a call to the Instance function indicated by `callee`. This is
+ // assumed to take an Instance pointer as its first argument. The remaining
+ // args are taken from `args`, which is assumed to hold `numArgs` entries.
+ // If `result` is non-null, the MDefinition* holding the return value is
+ // written to `*result`.
+ [[nodiscard]] bool emitInstanceCallN(uint32_t lineOrBytecode,
+ const SymbolicAddressSignature& callee,
+ MDefinition** args, size_t numArgs,
+ MDefinition** result = nullptr) {
+ // Check that the first formal parameter is plausibly an Instance pointer.
+ MOZ_ASSERT(callee.numArgs > 0);
+ MOZ_ASSERT(callee.argTypes[0] == MIRType::Pointer);
+ // Check we agree on the number of args.
+ MOZ_ASSERT(numArgs + 1 /* the instance pointer */ == callee.numArgs);
+ // Check we agree on whether a value is returned.
+ MOZ_ASSERT((result == nullptr) == (callee.retType == MIRType::None));
+
+ // If we are in dead code, it can happen that some of the `args` entries
+ // are nullptr, which will look like an OOM to the logic below. So exit
+ // at this point. `passInstance`, `passArg`, `finishCall` and
+ // `builtinInstanceMethodCall` all do nothing in dead code, so it's valid
+ // to exit here.
+ if (inDeadCode()) {
+ if (result) {
+ *result = nullptr;
+ }
+ return true;
+ }
+
+ // Check all args for signs of OOMness before attempting to allocating any
+ // more memory.
+ for (size_t i = 0; i < numArgs; i++) {
+ if (!args[i]) {
+ if (result) {
+ *result = nullptr;
+ }
+ return false;
+ }
+ }
+
+ // Finally, construct the call.
+ CallCompileState ccsArgs;
+ if (!passInstance(callee.argTypes[0], &ccsArgs)) {
+ return false;
+ }
+ for (size_t i = 0; i < numArgs; i++) {
+ if (!passArg(args[i], callee.argTypes[i + 1], &ccsArgs)) {
+ return false;
+ }
+ }
+ if (!finishCall(&ccsArgs)) {
+ return false;
+ }
+ return builtinInstanceMethodCall(callee, lineOrBytecode, ccsArgs, result);
+ }
+
+ [[nodiscard]] bool emitInstanceCall0(uint32_t lineOrBytecode,
+ const SymbolicAddressSignature& callee,
+ MDefinition** result = nullptr) {
+ MDefinition* args[0] = {};
+ return emitInstanceCallN(lineOrBytecode, callee, args, 0, result);
+ }
+ [[nodiscard]] bool emitInstanceCall1(uint32_t lineOrBytecode,
+ const SymbolicAddressSignature& callee,
+ MDefinition* arg1,
+ MDefinition** result = nullptr) {
+ MDefinition* args[1] = {arg1};
+ return emitInstanceCallN(lineOrBytecode, callee, args, 1, result);
+ }
+ [[nodiscard]] bool emitInstanceCall2(uint32_t lineOrBytecode,
+ const SymbolicAddressSignature& callee,
+ MDefinition* arg1, MDefinition* arg2,
+ MDefinition** result = nullptr) {
+ MDefinition* args[2] = {arg1, arg2};
+ return emitInstanceCallN(lineOrBytecode, callee, args, 2, result);
+ }
+ [[nodiscard]] bool emitInstanceCall3(uint32_t lineOrBytecode,
+ const SymbolicAddressSignature& callee,
+ MDefinition* arg1, MDefinition* arg2,
+ MDefinition* arg3,
+ MDefinition** result = nullptr) {
+ MDefinition* args[3] = {arg1, arg2, arg3};
+ return emitInstanceCallN(lineOrBytecode, callee, args, 3, result);
+ }
+ [[nodiscard]] bool emitInstanceCall4(uint32_t lineOrBytecode,
+ const SymbolicAddressSignature& callee,
+ MDefinition* arg1, MDefinition* arg2,
+ MDefinition* arg3, MDefinition* arg4,
+ MDefinition** result = nullptr) {
+ MDefinition* args[4] = {arg1, arg2, arg3, arg4};
+ return emitInstanceCallN(lineOrBytecode, callee, args, 4, result);
+ }
+ [[nodiscard]] bool emitInstanceCall5(uint32_t lineOrBytecode,
+ const SymbolicAddressSignature& callee,
+ MDefinition* arg1, MDefinition* arg2,
+ MDefinition* arg3, MDefinition* arg4,
+ MDefinition* arg5,
+ MDefinition** result = nullptr) {
+ MDefinition* args[5] = {arg1, arg2, arg3, arg4, arg5};
+ return emitInstanceCallN(lineOrBytecode, callee, args, 5, result);
+ }
+ [[nodiscard]] bool emitInstanceCall6(uint32_t lineOrBytecode,
+ const SymbolicAddressSignature& callee,
+ MDefinition* arg1, MDefinition* arg2,
+ MDefinition* arg3, MDefinition* arg4,
+ MDefinition* arg5, MDefinition* arg6,
+ MDefinition** result = nullptr) {
+ MDefinition* args[6] = {arg1, arg2, arg3, arg4, arg5, arg6};
+ return emitInstanceCallN(lineOrBytecode, callee, args, 6, result);
+ }
+
+ /******************************** WasmGC: low level load/store helpers ***/
+
+ // Given a (FieldType, FieldExtension) pair, produce the (MIRType,
+ // MWideningOp) pair that will give the correct operation for reading the
+ // value from memory.
+ static void fieldLoadInfoToMIR(FieldType type, FieldWideningOp wideningOp,
+ MIRType* mirType, MWideningOp* mirWideningOp) {
+ switch (type.kind()) {
+ case FieldType::I8: {
+ switch (wideningOp) {
+ case FieldWideningOp::Signed:
+ *mirType = MIRType::Int32;
+ *mirWideningOp = MWideningOp::FromS8;
+ return;
+ case FieldWideningOp::Unsigned:
+ *mirType = MIRType::Int32;
+ *mirWideningOp = MWideningOp::FromU8;
+ return;
+ default:
+ MOZ_CRASH();
+ }
+ }
+ case FieldType::I16: {
+ switch (wideningOp) {
+ case FieldWideningOp::Signed:
+ *mirType = MIRType::Int32;
+ *mirWideningOp = MWideningOp::FromS16;
+ return;
+ case FieldWideningOp::Unsigned:
+ *mirType = MIRType::Int32;
+ *mirWideningOp = MWideningOp::FromU16;
+ return;
+ default:
+ MOZ_CRASH();
+ }
+ }
+ default: {
+ switch (wideningOp) {
+ case FieldWideningOp::None:
+ *mirType = type.toMIRType();
+ *mirWideningOp = MWideningOp::None;
+ return;
+ default:
+ MOZ_CRASH();
+ }
+ }
+ }
+ }
+
+ // Given a FieldType, produce the MNarrowingOp required for writing the
+ // value to memory.
+ static MNarrowingOp fieldStoreInfoToMIR(FieldType type) {
+ switch (type.kind()) {
+ case FieldType::I8:
+ return MNarrowingOp::To8;
+ case FieldType::I16:
+ return MNarrowingOp::To16;
+ default:
+ return MNarrowingOp::None;
+ }
+ }
+
+ // Generate a write of `value` at address `base + offset`, where `offset` is
+ // known at JIT time. If the written value is a reftype, the previous value
+ // at `base + offset` will be retrieved and handed off to the post-write
+ // barrier. `keepAlive` will be referenced by the instruction so as to hold
+ // it live (from the GC's point of view).
+ [[nodiscard]] bool writeGcValueAtBasePlusOffset(
+ uint32_t lineOrBytecode, FieldType fieldType, MDefinition* keepAlive,
+ AliasSet::Flag aliasBitset, MDefinition* value, MDefinition* base,
+ uint32_t offset, bool needsTrapInfo, WasmPreBarrierKind preBarrierKind) {
+ MOZ_ASSERT(aliasBitset != 0);
+ MOZ_ASSERT(keepAlive->type() == MIRType::RefOrNull);
+ MOZ_ASSERT(fieldType.widenToValType().toMIRType() == value->type());
+ MNarrowingOp narrowingOp = fieldStoreInfoToMIR(fieldType);
+
+ if (!fieldType.isRefRepr()) {
+ MaybeTrapSiteInfo maybeTrap;
+ if (needsTrapInfo) {
+ maybeTrap.emplace(getTrapSiteInfo());
+ }
+ auto* store = MWasmStoreFieldKA::New(
+ alloc(), keepAlive, base, offset, value, narrowingOp,
+ AliasSet::Store(aliasBitset), maybeTrap);
+ if (!store) {
+ return false;
+ }
+ curBlock_->add(store);
+ return true;
+ }
+
+ // Otherwise it's a ref store. Load the previous value so we can show it
+ // to the post-write barrier.
+ //
+ // Optimisation opportunity: for the case where this field write results
+ // from struct.new, the old value is always zero. So we should synthesise
+ // a suitable zero constant rather than reading it from the object. See
+ // also bug 1799999.
+ MOZ_ASSERT(narrowingOp == MNarrowingOp::None);
+ MOZ_ASSERT(fieldType.widenToValType() == fieldType.valType());
+
+ // Store the new value
+ auto* store = MWasmStoreFieldRefKA::New(
+ alloc(), instancePointer_, keepAlive, base, offset, value,
+ AliasSet::Store(aliasBitset), mozilla::Some(getTrapSiteInfo()),
+ preBarrierKind);
+ if (!store) {
+ return false;
+ }
+ curBlock_->add(store);
+
+ // Call the post-write barrier
+ return postBarrier(lineOrBytecode, keepAlive, base, offset, value);
+ }
+
+ // Generate a write of `value` at address `base + index * scale`, where
+ // `scale` is known at JIT-time. If the written value is a reftype, the
+ // previous value at `base + index * scale` will be retrieved and handed off
+ // to the post-write barrier. `keepAlive` will be referenced by the
+ // instruction so as to hold it live (from the GC's point of view).
+ [[nodiscard]] bool writeGcValueAtBasePlusScaledIndex(
+ uint32_t lineOrBytecode, FieldType fieldType, MDefinition* keepAlive,
+ AliasSet::Flag aliasBitset, MDefinition* value, MDefinition* base,
+ uint32_t scale, MDefinition* index, WasmPreBarrierKind preBarrierKind) {
+ MOZ_ASSERT(aliasBitset != 0);
+ MOZ_ASSERT(keepAlive->type() == MIRType::RefOrNull);
+ MOZ_ASSERT(fieldType.widenToValType().toMIRType() == value->type());
+ MOZ_ASSERT(scale == 1 || scale == 2 || scale == 4 || scale == 8 ||
+ scale == 16);
+
+ // Currently there's no single MIR node that this can be translated into.
+ // So compute the final address "manually", then store directly to that
+ // address. See bug 1802287.
+ MDefinition* scaleDef = constantTargetWord(intptr_t(scale));
+ if (!scaleDef) {
+ return false;
+ }
+ MDefinition* finalAddr = computeBasePlusScaledIndex(base, scaleDef, index);
+ if (!finalAddr) {
+ return false;
+ }
+
+ return writeGcValueAtBasePlusOffset(
+ lineOrBytecode, fieldType, keepAlive, aliasBitset, value, finalAddr,
+ /*offset=*/0,
+ /*needsTrapInfo=*/false, preBarrierKind);
+ }
+
+ // Generate a read from address `base + offset`, where `offset` is known at
+ // JIT time. The loaded value will be widened as described by `fieldType`
+ // and `fieldWideningOp`. `keepAlive` will be referenced by the instruction
+ // so as to hold it live (from the GC's point of view).
+ [[nodiscard]] MDefinition* readGcValueAtBasePlusOffset(
+ FieldType fieldType, FieldWideningOp fieldWideningOp,
+ MDefinition* keepAlive, AliasSet::Flag aliasBitset, MDefinition* base,
+ uint32_t offset, bool needsTrapInfo) {
+ MOZ_ASSERT(aliasBitset != 0);
+ MOZ_ASSERT(keepAlive->type() == MIRType::RefOrNull);
+ MIRType mirType;
+ MWideningOp mirWideningOp;
+ fieldLoadInfoToMIR(fieldType, fieldWideningOp, &mirType, &mirWideningOp);
+ MaybeTrapSiteInfo maybeTrap;
+ if (needsTrapInfo) {
+ maybeTrap.emplace(getTrapSiteInfo());
+ }
+ auto* load = MWasmLoadFieldKA::New(alloc(), keepAlive, base, offset,
+ mirType, mirWideningOp,
+ AliasSet::Load(aliasBitset), maybeTrap);
+ if (!load) {
+ return nullptr;
+ }
+ curBlock_->add(load);
+ return load;
+ }
+
+ // Generate a read from address `base + index * scale`, where `scale` is
+ // known at JIT-time. The loaded value will be widened as described by
+ // `fieldType` and `fieldWideningOp`. `keepAlive` will be referenced by the
+ // instruction so as to hold it live (from the GC's point of view).
+ [[nodiscard]] MDefinition* readGcValueAtBasePlusScaledIndex(
+ FieldType fieldType, FieldWideningOp fieldWideningOp,
+ MDefinition* keepAlive, AliasSet::Flag aliasBitset, MDefinition* base,
+ uint32_t scale, MDefinition* index) {
+ MOZ_ASSERT(aliasBitset != 0);
+ MOZ_ASSERT(keepAlive->type() == MIRType::RefOrNull);
+ MOZ_ASSERT(scale == 1 || scale == 2 || scale == 4 || scale == 8 ||
+ scale == 16);
+
+ // Currently there's no single MIR node that this can be translated into.
+ // So compute the final address "manually", then store directly to that
+ // address. See bug 1802287.
+ MDefinition* scaleDef = constantTargetWord(intptr_t(scale));
+ if (!scaleDef) {
+ return nullptr;
+ }
+ MDefinition* finalAddr = computeBasePlusScaledIndex(base, scaleDef, index);
+ if (!finalAddr) {
+ return nullptr;
+ }
+
+ MIRType mirType;
+ MWideningOp mirWideningOp;
+ fieldLoadInfoToMIR(fieldType, fieldWideningOp, &mirType, &mirWideningOp);
+ auto* load = MWasmLoadFieldKA::New(alloc(), keepAlive, finalAddr,
+ /*offset=*/0, mirType, mirWideningOp,
+ AliasSet::Load(aliasBitset),
+ mozilla::Some(getTrapSiteInfo()));
+ if (!load) {
+ return nullptr;
+ }
+ curBlock_->add(load);
+ return load;
+ }
+
+ /************************************************ WasmGC: type helpers ***/
+
+ // Returns an MDefinition holding the supertype vector for `typeIndex`.
+ [[nodiscard]] MDefinition* loadSuperTypeVector(uint32_t typeIndex) {
+ uint32_t superTypeVectorOffset =
+ moduleEnv().offsetOfSuperTypeVector(typeIndex);
+
+ auto* load = MWasmLoadInstanceDataField::New(
+ alloc(), MIRType::Pointer, superTypeVectorOffset,
+ /*isConst=*/true, instancePointer_);
+ if (!load) {
+ return nullptr;
+ }
+ curBlock_->add(load);
+ return load;
+ }
+
+ [[nodiscard]] MDefinition* loadTypeDefInstanceData(uint32_t typeIndex) {
+ size_t offset = Instance::offsetInData(
+ moduleEnv_.offsetOfTypeDefInstanceData(typeIndex));
+ auto* result = MWasmDerivedPointer::New(alloc(), instancePointer_, offset);
+ if (!result) {
+ return nullptr;
+ }
+ curBlock_->add(result);
+ return result;
+ }
+
+ /********************************************** WasmGC: struct helpers ***/
+
+ // Helper function for EmitStruct{New,Set}: given a MIR pointer to a
+ // WasmStructObject, a MIR pointer to a value, and a field descriptor,
+ // generate MIR to write the value to the relevant field in the object.
+ [[nodiscard]] bool writeValueToStructField(
+ uint32_t lineOrBytecode, const StructField& field,
+ MDefinition* structObject, MDefinition* value,
+ WasmPreBarrierKind preBarrierKind) {
+ FieldType fieldType = field.type;
+ uint32_t fieldOffset = field.offset;
+
+ bool areaIsOutline;
+ uint32_t areaOffset;
+ WasmStructObject::fieldOffsetToAreaAndOffset(fieldType, fieldOffset,
+ &areaIsOutline, &areaOffset);
+
+ // Make `base` point at the first byte of either the struct object as a
+ // whole or of the out-of-line data area. And adjust `areaOffset`
+ // accordingly.
+ MDefinition* base;
+ bool needsTrapInfo;
+ if (areaIsOutline) {
+ auto* load = MWasmLoadField::New(
+ alloc(), structObject, WasmStructObject::offsetOfOutlineData(),
+ MIRType::Pointer, MWideningOp::None,
+ AliasSet::Load(AliasSet::WasmStructOutlineDataPointer),
+ mozilla::Some(getTrapSiteInfo()));
+ if (!load) {
+ return false;
+ }
+ curBlock_->add(load);
+ base = load;
+ needsTrapInfo = false;
+ } else {
+ base = structObject;
+ needsTrapInfo = true;
+ areaOffset += WasmStructObject::offsetOfInlineData();
+ }
+ // The transaction is to happen at `base + areaOffset`, so to speak.
+ // After this point we must ignore `fieldOffset`.
+
+ // The alias set denoting the field's location, although lacking a
+ // Load-vs-Store indication at this point.
+ AliasSet::Flag fieldAliasSet = areaIsOutline
+ ? AliasSet::WasmStructOutlineDataArea
+ : AliasSet::WasmStructInlineDataArea;
+
+ return writeGcValueAtBasePlusOffset(lineOrBytecode, fieldType, structObject,
+ fieldAliasSet, value, base, areaOffset,
+ needsTrapInfo, preBarrierKind);
+ }
+
+ // Helper function for EmitStructGet: given a MIR pointer to a
+ // WasmStructObject, a field descriptor and a field widening operation,
+ // generate MIR to read the value from the relevant field in the object.
+ [[nodiscard]] MDefinition* readValueFromStructField(
+ const StructField& field, FieldWideningOp wideningOp,
+ MDefinition* structObject) {
+ FieldType fieldType = field.type;
+ uint32_t fieldOffset = field.offset;
+
+ bool areaIsOutline;
+ uint32_t areaOffset;
+ WasmStructObject::fieldOffsetToAreaAndOffset(fieldType, fieldOffset,
+ &areaIsOutline, &areaOffset);
+
+ // Make `base` point at the first byte of either the struct object as a
+ // whole or of the out-of-line data area. And adjust `areaOffset`
+ // accordingly.
+ MDefinition* base;
+ bool needsTrapInfo;
+ if (areaIsOutline) {
+ auto* loadOOLptr = MWasmLoadField::New(
+ alloc(), structObject, WasmStructObject::offsetOfOutlineData(),
+ MIRType::Pointer, MWideningOp::None,
+ AliasSet::Load(AliasSet::WasmStructOutlineDataPointer),
+ mozilla::Some(getTrapSiteInfo()));
+ if (!loadOOLptr) {
+ return nullptr;
+ }
+ curBlock_->add(loadOOLptr);
+ base = loadOOLptr;
+ needsTrapInfo = false;
+ } else {
+ base = structObject;
+ needsTrapInfo = true;
+ areaOffset += WasmStructObject::offsetOfInlineData();
+ }
+ // The transaction is to happen at `base + areaOffset`, so to speak.
+ // After this point we must ignore `fieldOffset`.
+
+ // The alias set denoting the field's location, although lacking a
+ // Load-vs-Store indication at this point.
+ AliasSet::Flag fieldAliasSet = areaIsOutline
+ ? AliasSet::WasmStructOutlineDataArea
+ : AliasSet::WasmStructInlineDataArea;
+
+ return readGcValueAtBasePlusOffset(fieldType, wideningOp, structObject,
+ fieldAliasSet, base, areaOffset,
+ needsTrapInfo);
+ }
+
+ /********************************* WasmGC: address-arithmetic helpers ***/
+
+ inline bool targetIs64Bit() const {
+#ifdef JS_64BIT
+ return true;
+#else
+ return false;
+#endif
+ }
+
+ // Generate MIR to unsigned widen `val` out to the target word size. If
+ // `val` is already at the target word size, this is a no-op. The only
+ // other allowed case is where `val` is Int32 and we're compiling for a
+ // 64-bit target, in which case a widen is generated.
+ [[nodiscard]] MDefinition* unsignedWidenToTargetWord(MDefinition* val) {
+ if (targetIs64Bit()) {
+ if (val->type() == MIRType::Int32) {
+ auto* ext = MExtendInt32ToInt64::New(alloc(), val, /*isUnsigned=*/true);
+ if (!ext) {
+ return nullptr;
+ }
+ curBlock_->add(ext);
+ return ext;
+ }
+ MOZ_ASSERT(val->type() == MIRType::Int64);
+ return val;
+ }
+ MOZ_ASSERT(val->type() == MIRType::Int32);
+ return val;
+ }
+
+ // Compute `base + index * scale`, for both 32- and 64-bit targets. For the
+ // convenience of callers, on a 64-bit target, `index` and `scale` can
+ // (independently) be either Int32 or Int64; in the former case they will be
+ // zero-extended before the multiplication, so that both the multiplication
+ // and addition are done at the target word size.
+ [[nodiscard]] MDefinition* computeBasePlusScaledIndex(MDefinition* base,
+ MDefinition* scale,
+ MDefinition* index) {
+ // On a 32-bit target, require:
+ // base : Int32 (== TargetWordMIRType())
+ // index, scale : Int32
+ // Calculate base +32 (index *32 scale)
+ //
+ // On a 64-bit target, require:
+ // base : Int64 (== TargetWordMIRType())
+ // index, scale: either Int32 or Int64 (any combination is OK)
+ // Calculate base +64 (u-widen to 64(index)) *64 (u-widen to 64(scale))
+ //
+ // Final result type is the same as that of `base`.
+
+ MOZ_ASSERT(base->type() == TargetWordMIRType());
+
+ // Widen `index` if necessary, producing `indexW`.
+ MDefinition* indexW = unsignedWidenToTargetWord(index);
+ if (!indexW) {
+ return nullptr;
+ }
+ // Widen `scale` if necessary, producing `scaleW`.
+ MDefinition* scaleW = unsignedWidenToTargetWord(scale);
+ if (!scaleW) {
+ return nullptr;
+ }
+ // Compute `scaledIndex = indexW * scaleW`.
+ MIRType targetWordType = TargetWordMIRType();
+ bool targetIs64 = targetWordType == MIRType::Int64;
+ MMul* scaledIndex =
+ MMul::NewWasm(alloc(), indexW, scaleW, targetWordType,
+ targetIs64 ? MMul::Mode::Normal : MMul::Mode::Integer,
+ /*mustPreserveNan=*/false);
+ if (!scaledIndex) {
+ return nullptr;
+ }
+ // Compute `result = base + scaledIndex`.
+ curBlock_->add(scaledIndex);
+ MAdd* result = MAdd::NewWasm(alloc(), base, scaledIndex, targetWordType);
+ if (!result) {
+ return nullptr;
+ }
+ curBlock_->add(result);
+ return result;
+ }
+
+ /********************************************** WasmGC: array helpers ***/
+
+ // Given `arrayObject`, the address of a WasmArrayObject, generate MIR to
+ // return the contents of the WasmArrayObject::numElements_ field.
+ // Adds trap site info for the null check.
+ [[nodiscard]] MDefinition* getWasmArrayObjectNumElements(
+ MDefinition* arrayObject) {
+ MOZ_ASSERT(arrayObject->type() == MIRType::RefOrNull);
+
+ auto* numElements = MWasmLoadField::New(
+ alloc(), arrayObject, WasmArrayObject::offsetOfNumElements(),
+ MIRType::Int32, MWideningOp::None,
+ AliasSet::Load(AliasSet::WasmArrayNumElements),
+ mozilla::Some(getTrapSiteInfo()));
+ if (!numElements) {
+ return nullptr;
+ }
+ curBlock_->add(numElements);
+
+ return numElements;
+ }
+
+ // Given `arrayObject`, the address of a WasmArrayObject, generate MIR to
+ // return the contents of the WasmArrayObject::data_ field.
+ [[nodiscard]] MDefinition* getWasmArrayObjectData(MDefinition* arrayObject) {
+ MOZ_ASSERT(arrayObject->type() == MIRType::RefOrNull);
+
+ auto* data = MWasmLoadField::New(
+ alloc(), arrayObject, WasmArrayObject::offsetOfData(),
+ TargetWordMIRType(), MWideningOp::None,
+ AliasSet::Load(AliasSet::WasmArrayDataPointer),
+ mozilla::Some(getTrapSiteInfo()));
+ if (!data) {
+ return nullptr;
+ }
+ curBlock_->add(data);
+
+ return data;
+ }
+
+ // Given a JIT-time-known type index `typeIndex` and a run-time known number
+ // of elements `numElements`, create MIR to call `Instance::arrayNew`,
+ // producing an array with the relevant type and size and initialized with
+ // `typeIndex`s default value.
+ [[nodiscard]] MDefinition* createDefaultInitializedArrayObject(
+ uint32_t lineOrBytecode, uint32_t typeIndex, MDefinition* numElements) {
+ // Get the type definition for the array as a whole.
+ MDefinition* typeDefData = loadTypeDefInstanceData(typeIndex);
+ if (!typeDefData) {
+ return nullptr;
+ }
+
+ // Create call: arrayObject = Instance::arrayNew(numElements, typeDefData)
+ // If the requested size exceeds MaxArrayPayloadBytes, the MIR generated
+ // by this call will trap.
+ MDefinition* arrayObject;
+ if (!emitInstanceCall2(lineOrBytecode, SASigArrayNew, numElements,
+ typeDefData, &arrayObject)) {
+ return nullptr;
+ }
+
+ return arrayObject;
+ }
+
+ [[nodiscard]] MDefinition* createUninitializedArrayObject(
+ uint32_t lineOrBytecode, uint32_t typeIndex, MDefinition* numElements) {
+ // Get the type definition for the array as a whole.
+ MDefinition* typeDefData = loadTypeDefInstanceData(typeIndex);
+ if (!typeDefData) {
+ return nullptr;
+ }
+
+ // Create call: arrayObject = Instance::arrayNewUninit(numElements,
+ // typeDefData) If the requested size exceeds MaxArrayPayloadBytes, the MIR
+ // generated by this call will trap.
+ MDefinition* arrayObject;
+ if (!emitInstanceCall2(lineOrBytecode, SASigArrayNewUninit, numElements,
+ typeDefData, &arrayObject)) {
+ return nullptr;
+ }
+
+ return arrayObject;
+ }
+
+ // This emits MIR to perform several actions common to array loads and
+ // stores. Given `arrayObject`, that points to a WasmArrayObject, and an
+ // index value `index`, it:
+ //
+ // * Generates a trap if the array pointer is null
+ // * Gets the size of the array
+ // * Emits a bounds check of `index` against the array size
+ // * Retrieves the OOL object pointer from the array
+ // * Includes check for null via signal handler.
+ //
+ // The returned value is for the OOL object pointer.
+ [[nodiscard]] MDefinition* setupForArrayAccess(MDefinition* arrayObject,
+ MDefinition* index) {
+ MOZ_ASSERT(arrayObject->type() == MIRType::RefOrNull);
+ MOZ_ASSERT(index->type() == MIRType::Int32);
+
+ // Check for null is done in getWasmArrayObjectNumElements.
+
+ // Get the size value for the array.
+ MDefinition* numElements = getWasmArrayObjectNumElements(arrayObject);
+ if (!numElements) {
+ return nullptr;
+ }
+
+ // Create a bounds check.
+ auto* boundsCheck =
+ MWasmBoundsCheck::New(alloc(), index, numElements, bytecodeOffset(),
+ MWasmBoundsCheck::Target::Unknown);
+ if (!boundsCheck) {
+ return nullptr;
+ }
+ curBlock_->add(boundsCheck);
+
+ // Get the address of the first byte of the (OOL) data area.
+ return getWasmArrayObjectData(arrayObject);
+ }
+
+ // This routine generates all MIR required for `array.new`. The returned
+ // value is for the newly created array.
+ [[nodiscard]] MDefinition* createArrayNewCallAndLoop(uint32_t lineOrBytecode,
+ uint32_t typeIndex,
+ MDefinition* numElements,
+ MDefinition* fillValue) {
+ const ArrayType& arrayType = (*moduleEnv_.types)[typeIndex].arrayType();
+
+ // Create the array object, default-initialized.
+ MDefinition* arrayObject =
+ createUninitializedArrayObject(lineOrBytecode, typeIndex, numElements);
+ if (!arrayObject) {
+ return nullptr;
+ }
+
+ mozilla::DebugOnly<MIRType> fillValueMIRType = fillValue->type();
+ FieldType fillValueFieldType = arrayType.elementType_;
+ MOZ_ASSERT(fillValueFieldType.widenToValType().toMIRType() ==
+ fillValueMIRType);
+
+ uint32_t elemSize = fillValueFieldType.size();
+ MOZ_ASSERT(elemSize >= 1 && elemSize <= 16);
+
+ // Make `base` point at the first byte of the (OOL) data area.
+ MDefinition* base = getWasmArrayObjectData(arrayObject);
+ if (!base) {
+ return nullptr;
+ }
+
+ // We have:
+ // base : TargetWord
+ // numElements : Int32
+ // fillValue : <any FieldType>
+ // $elemSize = arrayType.elementType_.size(); 1, 2, 4, 8 or 16
+ //
+ // Generate MIR:
+ // <in current block>
+ // limit : TargetWord = base + nElems * elemSize
+ // if (limit == base) goto after; // skip loop if trip count == 0
+ // // optimisation (not done): skip loop if fill value == 0
+ // loop:
+ // ptrPhi = phi(base, ptrNext)
+ // *ptrPhi = fillValue
+ // ptrNext = ptrPhi + $elemSize
+ // if (ptrNext <u limit) goto loop;
+ // after:
+ //
+ // We construct the loop "manually" rather than using
+ // FunctionCompiler::{startLoop,closeLoop} as the latter have awareness of
+ // the wasm view of loops, whereas the loop we're building here is not a
+ // wasm-level loop.
+ // ==== Create the "loop" and "after" blocks ====
+ MBasicBlock* loopBlock;
+ if (!newBlock(curBlock_, &loopBlock, MBasicBlock::LOOP_HEADER)) {
+ return nullptr;
+ }
+ MBasicBlock* afterBlock;
+ if (!newBlock(loopBlock, &afterBlock)) {
+ return nullptr;
+ }
+
+ // ==== Fill in the remainder of the block preceding the loop ====
+ MDefinition* elemSizeDef = constantTargetWord(intptr_t(elemSize));
+ if (!elemSizeDef) {
+ return nullptr;
+ }
+
+ MDefinition* limit =
+ computeBasePlusScaledIndex(base, elemSizeDef, numElements);
+ if (!limit) {
+ return nullptr;
+ }
+
+ // Use JSOp::StrictEq, not ::Eq, so that the comparison (and eventually
+ // the entire initialisation loop) will be folded out in the case where
+ // the number of elements is zero. See MCompare::tryFoldEqualOperands.
+ MDefinition* limitEqualsBase = compare(
+ limit, base, JSOp::StrictEq,
+ targetIs64Bit() ? MCompare::Compare_UInt64 : MCompare::Compare_UInt32);
+ if (!limitEqualsBase) {
+ return nullptr;
+ }
+ MTest* skipIfLimitEqualsBase =
+ MTest::New(alloc(), limitEqualsBase, afterBlock, loopBlock);
+ if (!skipIfLimitEqualsBase) {
+ return nullptr;
+ }
+ curBlock_->end(skipIfLimitEqualsBase);
+ if (!afterBlock->addPredecessor(alloc(), curBlock_)) {
+ return nullptr;
+ }
+ // Optimisation opportunity: if the fill value is zero, maybe we should
+ // likewise skip over the initialisation loop entirely (and, if the zero
+ // value is visible at JIT time, the loop will be removed). For the
+ // reftyped case, that would be a big win since each iteration requires a
+ // call to the post-write barrier routine.
+
+ // ==== Fill in the loop block as best we can ====
+ curBlock_ = loopBlock;
+ MPhi* ptrPhi = MPhi::New(alloc(), TargetWordMIRType());
+ if (!ptrPhi) {
+ return nullptr;
+ }
+ if (!ptrPhi->reserveLength(2)) {
+ return nullptr;
+ }
+ ptrPhi->addInput(base);
+ curBlock_->addPhi(ptrPhi);
+ curBlock_->setLoopDepth(loopDepth_ + 1);
+
+ // Because we have the exact address to hand, use
+ // `writeGcValueAtBasePlusOffset` rather than
+ // `writeGcValueAtBasePlusScaledIndex` to do the store.
+ if (!writeGcValueAtBasePlusOffset(
+ lineOrBytecode, fillValueFieldType, arrayObject,
+ AliasSet::WasmArrayDataArea, fillValue, ptrPhi, /*offset=*/0,
+ /*needsTrapInfo=*/false, WasmPreBarrierKind::None)) {
+ return nullptr;
+ }
+
+ auto* ptrNext =
+ MAdd::NewWasm(alloc(), ptrPhi, elemSizeDef, TargetWordMIRType());
+ if (!ptrNext) {
+ return nullptr;
+ }
+ curBlock_->add(ptrNext);
+ ptrPhi->addInput(ptrNext);
+
+ MDefinition* ptrNextLtuLimit = compare(
+ ptrNext, limit, JSOp::Lt,
+ targetIs64Bit() ? MCompare::Compare_UInt64 : MCompare::Compare_UInt32);
+ if (!ptrNextLtuLimit) {
+ return nullptr;
+ }
+ auto* continueIfPtrNextLtuLimit =
+ MTest::New(alloc(), ptrNextLtuLimit, loopBlock, afterBlock);
+ if (!continueIfPtrNextLtuLimit) {
+ return nullptr;
+ }
+ curBlock_->end(continueIfPtrNextLtuLimit);
+ if (!loopBlock->addPredecessor(alloc(), loopBlock)) {
+ return nullptr;
+ }
+ // ==== Loop block completed ====
+
+ curBlock_ = afterBlock;
+ return arrayObject;
+ }
+
+ /*********************************************** WasmGC: other helpers ***/
+
+ // Generate MIR that causes a trap of kind `trapKind` if `arg` is zero.
+ // Currently `arg` may only be a MIRType::Int32, but that requirement could
+ // be relaxed if needed in future.
+ [[nodiscard]] bool trapIfZero(wasm::Trap trapKind, MDefinition* arg) {
+ MOZ_ASSERT(arg->type() == MIRType::Int32);
+
+ MBasicBlock* trapBlock = nullptr;
+ if (!newBlock(curBlock_, &trapBlock)) {
+ return false;
+ }
+
+ auto* trap = MWasmTrap::New(alloc(), trapKind, bytecodeOffset());
+ if (!trap) {
+ return false;
+ }
+ trapBlock->end(trap);
+
+ MBasicBlock* joinBlock = nullptr;
+ if (!newBlock(curBlock_, &joinBlock)) {
+ return false;
+ }
+
+ auto* test = MTest::New(alloc(), arg, joinBlock, trapBlock);
+ if (!test) {
+ return false;
+ }
+ curBlock_->end(test);
+ curBlock_ = joinBlock;
+ return true;
+ }
+
+ [[nodiscard]] MDefinition* isGcObjectSubtypeOf(MDefinition* object,
+ RefType sourceType,
+ RefType destType) {
+ MInstruction* isSubTypeOf = nullptr;
+ if (destType.isTypeRef()) {
+ uint32_t typeIndex = moduleEnv_.types->indexOf(*destType.typeDef());
+ MDefinition* superSuperTypeVector = loadSuperTypeVector(typeIndex);
+ isSubTypeOf = MWasmGcObjectIsSubtypeOfConcrete::New(
+ alloc(), object, superSuperTypeVector, sourceType, destType);
+ } else {
+ isSubTypeOf = MWasmGcObjectIsSubtypeOfAbstract::New(alloc(), object,
+ sourceType, destType);
+ }
+ MOZ_ASSERT(isSubTypeOf);
+
+ curBlock_->add(isSubTypeOf);
+ return isSubTypeOf;
+ }
+
+ // Generate MIR that attempts to downcast `ref` to `castToTypeDef`. If the
+ // downcast fails, we trap. If it succeeds, then `ref` can be assumed to
+ // have a type that is a subtype of (or the same as) `castToTypeDef` after
+ // this point.
+ [[nodiscard]] bool refCast(MDefinition* ref, RefType sourceType,
+ RefType destType) {
+ MDefinition* success = isGcObjectSubtypeOf(ref, sourceType, destType);
+ if (!success) {
+ return false;
+ }
+
+ // Trap if `success` is zero. If it's nonzero, we have established that
+ // `ref <: castToTypeDef`.
+ return trapIfZero(wasm::Trap::BadCast, success);
+ }
+
+ // Generate MIR that computes a boolean value indicating whether or not it
+ // is possible to downcast `ref` to `destType`.
+ [[nodiscard]] MDefinition* refTest(MDefinition* ref, RefType sourceType,
+ RefType destType) {
+ return isGcObjectSubtypeOf(ref, sourceType, destType);
+ }
+
+ // Generates MIR for br_on_cast and br_on_cast_fail.
+ [[nodiscard]] bool brOnCastCommon(bool onSuccess, uint32_t labelRelativeDepth,
+ RefType sourceType, RefType destType,
+ const ResultType& labelType,
+ const DefVector& values) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ MBasicBlock* fallthroughBlock = nullptr;
+ if (!newBlock(curBlock_, &fallthroughBlock)) {
+ return false;
+ }
+
+ // `values` are the values in the top block-value on the stack. Since the
+ // argument to `br_on_cast{_fail}` is at the top of the stack, it is the
+ // last element in `values`.
+ //
+ // For both br_on_cast and br_on_cast_fail, the OpIter validation routines
+ // ensure that `values` is non-empty (by rejecting the case
+ // `labelType->length() < 1`) and that the last value in `values` is
+ // reftyped.
+ MOZ_RELEASE_ASSERT(values.length() > 0);
+ MDefinition* ref = values.back();
+ MOZ_ASSERT(ref->type() == MIRType::RefOrNull);
+
+ MDefinition* success = isGcObjectSubtypeOf(ref, sourceType, destType);
+ if (!success) {
+ return false;
+ }
+
+ MTest* test;
+ if (onSuccess) {
+ test = MTest::New(alloc(), success, nullptr, fallthroughBlock);
+ if (!test || !addControlFlowPatch(test, labelRelativeDepth,
+ MTest::TrueBranchIndex)) {
+ return false;
+ }
+ } else {
+ test = MTest::New(alloc(), success, fallthroughBlock, nullptr);
+ if (!test || !addControlFlowPatch(test, labelRelativeDepth,
+ MTest::FalseBranchIndex)) {
+ return false;
+ }
+ }
+
+ if (!pushDefs(values)) {
+ return false;
+ }
+
+ curBlock_->end(test);
+ curBlock_ = fallthroughBlock;
+ return true;
+ }
+
+ [[nodiscard]] bool brOnNonStruct(const DefVector& values) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ MBasicBlock* fallthroughBlock = nullptr;
+ if (!newBlock(curBlock_, &fallthroughBlock)) {
+ return false;
+ }
+
+ MOZ_ASSERT(values.length() > 0);
+ MOZ_ASSERT(values.back()->type() == MIRType::RefOrNull);
+
+ MGoto* jump = MGoto::New(alloc(), fallthroughBlock);
+ if (!jump) {
+ return false;
+ }
+ if (!pushDefs(values)) {
+ return false;
+ }
+
+ curBlock_->end(jump);
+ curBlock_ = fallthroughBlock;
+ return true;
+ }
+
+ /************************************************************ DECODING ***/
+
+ // AsmJS adds a line number to `callSiteLineNums` for certain operations that
+ // are represented by a JS call, such as math builtins. We use these line
+ // numbers when calling builtins. This method will read from
+ // `callSiteLineNums` when we are using AsmJS, or else return the current
+ // bytecode offset.
+ //
+ // This method MUST be called from opcodes that AsmJS will emit a call site
+ // line number for, or else the arrays will get out of sync. Other opcodes
+ // must use `readBytecodeOffset` below.
+ uint32_t readCallSiteLineOrBytecode() {
+ if (!func_.callSiteLineNums.empty()) {
+ return func_.callSiteLineNums[lastReadCallSite_++];
+ }
+ return iter_.lastOpcodeOffset();
+ }
+
+ // Return the current bytecode offset.
+ uint32_t readBytecodeOffset() { return iter_.lastOpcodeOffset(); }
+
+ TrapSiteInfo getTrapSiteInfo() {
+ return TrapSiteInfo(wasm::BytecodeOffset(readBytecodeOffset()));
+ }
+
+#if DEBUG
+ bool done() const { return iter_.done(); }
+#endif
+
+ /*************************************************************************/
+ private:
+ [[nodiscard]] bool newBlock(MBasicBlock* pred, MBasicBlock** block,
+ MBasicBlock::Kind kind = MBasicBlock::NORMAL) {
+ *block = MBasicBlock::New(mirGraph(), info(), pred, kind);
+ if (!*block) {
+ return false;
+ }
+ mirGraph().addBlock(*block);
+ (*block)->setLoopDepth(loopDepth_);
+ return true;
+ }
+
+ [[nodiscard]] bool goToNewBlock(MBasicBlock* pred, MBasicBlock** block) {
+ if (!newBlock(pred, block)) {
+ return false;
+ }
+ pred->end(MGoto::New(alloc(), *block));
+ return true;
+ }
+
+ [[nodiscard]] bool goToExistingBlock(MBasicBlock* prev, MBasicBlock* next) {
+ MOZ_ASSERT(prev);
+ MOZ_ASSERT(next);
+ prev->end(MGoto::New(alloc(), next));
+ return next->addPredecessor(alloc(), prev);
+ }
+
+ [[nodiscard]] bool bindBranches(uint32_t absolute, DefVector* defs) {
+ if (absolute >= blockPatches_.length() || blockPatches_[absolute].empty()) {
+ return inDeadCode() || popPushedDefs(defs);
+ }
+
+ ControlFlowPatchVector& patches = blockPatches_[absolute];
+ MControlInstruction* ins = patches[0].ins;
+ MBasicBlock* pred = ins->block();
+
+ MBasicBlock* join = nullptr;
+ if (!newBlock(pred, &join)) {
+ return false;
+ }
+
+ pred->mark();
+ ins->replaceSuccessor(patches[0].index, join);
+
+ for (size_t i = 1; i < patches.length(); i++) {
+ ins = patches[i].ins;
+
+ pred = ins->block();
+ if (!pred->isMarked()) {
+ if (!join->addPredecessor(alloc(), pred)) {
+ return false;
+ }
+ pred->mark();
+ }
+
+ ins->replaceSuccessor(patches[i].index, join);
+ }
+
+ MOZ_ASSERT_IF(curBlock_, !curBlock_->isMarked());
+ for (uint32_t i = 0; i < join->numPredecessors(); i++) {
+ join->getPredecessor(i)->unmark();
+ }
+
+ if (curBlock_ && !goToExistingBlock(curBlock_, join)) {
+ return false;
+ }
+
+ curBlock_ = join;
+
+ if (!popPushedDefs(defs)) {
+ return false;
+ }
+
+ patches.clear();
+ return true;
+ }
+};
+
+template <>
+MDefinition* FunctionCompiler::unary<MToFloat32>(MDefinition* op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MToFloat32::New(alloc(), op, mustPreserveNaN(op->type()));
+ curBlock_->add(ins);
+ return ins;
+}
+
+template <>
+MDefinition* FunctionCompiler::unary<MWasmBuiltinTruncateToInt32>(
+ MDefinition* op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MWasmBuiltinTruncateToInt32::New(alloc(), op, instancePointer_,
+ bytecodeOffset());
+ curBlock_->add(ins);
+ return ins;
+}
+
+template <>
+MDefinition* FunctionCompiler::unary<MNot>(MDefinition* op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MNot::NewInt32(alloc(), op);
+ curBlock_->add(ins);
+ return ins;
+}
+
+template <>
+MDefinition* FunctionCompiler::unary<MAbs>(MDefinition* op, MIRType type) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MAbs::NewWasm(alloc(), op, type);
+ curBlock_->add(ins);
+ return ins;
+}
+
+} // end anonymous namespace
+
+static bool EmitI32Const(FunctionCompiler& f) {
+ int32_t i32;
+ if (!f.iter().readI32Const(&i32)) {
+ return false;
+ }
+
+ f.iter().setResult(f.constantI32(i32));
+ return true;
+}
+
+static bool EmitI64Const(FunctionCompiler& f) {
+ int64_t i64;
+ if (!f.iter().readI64Const(&i64)) {
+ return false;
+ }
+
+ f.iter().setResult(f.constantI64(i64));
+ return true;
+}
+
+static bool EmitF32Const(FunctionCompiler& f) {
+ float f32;
+ if (!f.iter().readF32Const(&f32)) {
+ return false;
+ }
+
+ f.iter().setResult(f.constantF32(f32));
+ return true;
+}
+
+static bool EmitF64Const(FunctionCompiler& f) {
+ double f64;
+ if (!f.iter().readF64Const(&f64)) {
+ return false;
+ }
+
+ f.iter().setResult(f.constantF64(f64));
+ return true;
+}
+
+static bool EmitBlock(FunctionCompiler& f) {
+ ResultType params;
+ return f.iter().readBlock(&params) && f.startBlock();
+}
+
+static bool EmitLoop(FunctionCompiler& f) {
+ ResultType params;
+ if (!f.iter().readLoop(&params)) {
+ return false;
+ }
+
+ MBasicBlock* loopHeader;
+ if (!f.startLoop(&loopHeader, params.length())) {
+ return false;
+ }
+
+ f.addInterruptCheck();
+
+ f.iter().controlItem().setBlock(loopHeader);
+ return true;
+}
+
+static bool EmitIf(FunctionCompiler& f) {
+ ResultType params;
+ MDefinition* condition = nullptr;
+ if (!f.iter().readIf(&params, &condition)) {
+ return false;
+ }
+
+ MBasicBlock* elseBlock;
+ if (!f.branchAndStartThen(condition, &elseBlock)) {
+ return false;
+ }
+
+ f.iter().controlItem().setBlock(elseBlock);
+ return true;
+}
+
+static bool EmitElse(FunctionCompiler& f) {
+ ResultType paramType;
+ ResultType resultType;
+ DefVector thenValues;
+ if (!f.iter().readElse(&paramType, &resultType, &thenValues)) {
+ return false;
+ }
+
+ if (!f.pushDefs(thenValues)) {
+ return false;
+ }
+
+ Control& control = f.iter().controlItem();
+ if (!f.switchToElse(control.block, &control.block)) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool EmitEnd(FunctionCompiler& f) {
+ LabelKind kind;
+ ResultType type;
+ DefVector preJoinDefs;
+ DefVector resultsForEmptyElse;
+ if (!f.iter().readEnd(&kind, &type, &preJoinDefs, &resultsForEmptyElse)) {
+ return false;
+ }
+
+ Control& control = f.iter().controlItem();
+ MBasicBlock* block = control.block;
+
+ if (!f.pushDefs(preJoinDefs)) {
+ return false;
+ }
+
+ // Every label case is responsible to pop the control item at the appropriate
+ // time for the label case
+ DefVector postJoinDefs;
+ switch (kind) {
+ case LabelKind::Body:
+ if (!f.emitBodyDelegateThrowPad(control)) {
+ return false;
+ }
+ if (!f.finishBlock(&postJoinDefs)) {
+ return false;
+ }
+ if (!f.returnValues(postJoinDefs)) {
+ return false;
+ }
+ f.iter().popEnd();
+ MOZ_ASSERT(f.iter().controlStackEmpty());
+ return f.iter().endFunction(f.iter().end());
+ case LabelKind::Block:
+ if (!f.finishBlock(&postJoinDefs)) {
+ return false;
+ }
+ f.iter().popEnd();
+ break;
+ case LabelKind::Loop:
+ if (!f.closeLoop(block, &postJoinDefs)) {
+ return false;
+ }
+ f.iter().popEnd();
+ break;
+ case LabelKind::Then: {
+ // If we didn't see an Else, create a trivial else block so that we create
+ // a diamond anyway, to preserve Ion invariants.
+ if (!f.switchToElse(block, &block)) {
+ return false;
+ }
+
+ if (!f.pushDefs(resultsForEmptyElse)) {
+ return false;
+ }
+
+ if (!f.joinIfElse(block, &postJoinDefs)) {
+ return false;
+ }
+ f.iter().popEnd();
+ break;
+ }
+ case LabelKind::Else:
+ if (!f.joinIfElse(block, &postJoinDefs)) {
+ return false;
+ }
+ f.iter().popEnd();
+ break;
+ case LabelKind::Try:
+ case LabelKind::Catch:
+ case LabelKind::CatchAll:
+ if (!f.finishTryCatch(kind, control, &postJoinDefs)) {
+ return false;
+ }
+ f.iter().popEnd();
+ break;
+ }
+
+ MOZ_ASSERT_IF(!f.inDeadCode(), postJoinDefs.length() == type.length());
+ f.iter().setResults(postJoinDefs.length(), postJoinDefs);
+
+ return true;
+}
+
+static bool EmitBr(FunctionCompiler& f) {
+ uint32_t relativeDepth;
+ ResultType type;
+ DefVector values;
+ if (!f.iter().readBr(&relativeDepth, &type, &values)) {
+ return false;
+ }
+
+ return f.br(relativeDepth, values);
+}
+
+static bool EmitBrIf(FunctionCompiler& f) {
+ uint32_t relativeDepth;
+ ResultType type;
+ DefVector values;
+ MDefinition* condition;
+ if (!f.iter().readBrIf(&relativeDepth, &type, &values, &condition)) {
+ return false;
+ }
+
+ return f.brIf(relativeDepth, values, condition);
+}
+
+static bool EmitBrTable(FunctionCompiler& f) {
+ Uint32Vector depths;
+ uint32_t defaultDepth;
+ ResultType branchValueType;
+ DefVector branchValues;
+ MDefinition* index;
+ if (!f.iter().readBrTable(&depths, &defaultDepth, &branchValueType,
+ &branchValues, &index)) {
+ return false;
+ }
+
+ // If all the targets are the same, or there are no targets, we can just
+ // use a goto. This is not just an optimization: MaybeFoldConditionBlock
+ // assumes that tables have more than one successor.
+ bool allSameDepth = true;
+ for (uint32_t depth : depths) {
+ if (depth != defaultDepth) {
+ allSameDepth = false;
+ break;
+ }
+ }
+
+ if (allSameDepth) {
+ return f.br(defaultDepth, branchValues);
+ }
+
+ return f.brTable(index, defaultDepth, depths, branchValues);
+}
+
+static bool EmitReturn(FunctionCompiler& f) {
+ DefVector values;
+ if (!f.iter().readReturn(&values)) {
+ return false;
+ }
+
+ return f.returnValues(values);
+}
+
+static bool EmitUnreachable(FunctionCompiler& f) {
+ if (!f.iter().readUnreachable()) {
+ return false;
+ }
+
+ f.unreachableTrap();
+ return true;
+}
+
+static bool EmitTry(FunctionCompiler& f) {
+ ResultType params;
+ if (!f.iter().readTry(&params)) {
+ return false;
+ }
+
+ MBasicBlock* curBlock = nullptr;
+ if (!f.startTry(&curBlock)) {
+ return false;
+ }
+
+ f.iter().controlItem().setBlock(curBlock);
+ return true;
+}
+
+static bool EmitCatch(FunctionCompiler& f) {
+ LabelKind kind;
+ uint32_t tagIndex;
+ ResultType paramType, resultType;
+ DefVector tryValues;
+ if (!f.iter().readCatch(&kind, &tagIndex, &paramType, &resultType,
+ &tryValues)) {
+ return false;
+ }
+
+ // Pushing the results of the previous block, to properly join control flow
+ // after the try and after each handler, as well as potential control flow
+ // patches from other instrunctions. This is similar to what is done for
+ // if-then-else control flow and for most other control control flow joins.
+ if (!f.pushDefs(tryValues)) {
+ return false;
+ }
+
+ return f.switchToCatch(f.iter().controlItem(), kind, tagIndex);
+}
+
+static bool EmitCatchAll(FunctionCompiler& f) {
+ LabelKind kind;
+ ResultType paramType, resultType;
+ DefVector tryValues;
+ if (!f.iter().readCatchAll(&kind, &paramType, &resultType, &tryValues)) {
+ return false;
+ }
+
+ // Pushing the results of the previous block, to properly join control flow
+ // after the try and after each handler, as well as potential control flow
+ // patches from other instrunctions.
+ if (!f.pushDefs(tryValues)) {
+ return false;
+ }
+
+ return f.switchToCatch(f.iter().controlItem(), kind, CatchAllIndex);
+}
+
+static bool EmitDelegate(FunctionCompiler& f) {
+ uint32_t relativeDepth;
+ ResultType resultType;
+ DefVector tryValues;
+ if (!f.iter().readDelegate(&relativeDepth, &resultType, &tryValues)) {
+ return false;
+ }
+
+ Control& control = f.iter().controlItem();
+ MBasicBlock* block = control.block;
+
+ // Unless the entire try-delegate is dead code, delegate any pad-patches from
+ // this try to the next try-block above relativeDepth.
+ if (block) {
+ ControlInstructionVector& delegatePadPatches = control.tryPadPatches;
+ if (!f.delegatePadPatches(delegatePadPatches, relativeDepth)) {
+ return false;
+ }
+ }
+ f.iter().popDelegate();
+
+ // Push the results of the previous block, and join control flow with
+ // potential control flow patches from other instrunctions in the try code.
+ // This is similar to what is done for EmitEnd.
+ if (!f.pushDefs(tryValues)) {
+ return false;
+ }
+ DefVector postJoinDefs;
+ if (!f.finishBlock(&postJoinDefs)) {
+ return false;
+ }
+ MOZ_ASSERT_IF(!f.inDeadCode(), postJoinDefs.length() == resultType.length());
+ f.iter().setResults(postJoinDefs.length(), postJoinDefs);
+
+ return true;
+}
+
+static bool EmitThrow(FunctionCompiler& f) {
+ uint32_t tagIndex;
+ DefVector argValues;
+ if (!f.iter().readThrow(&tagIndex, &argValues)) {
+ return false;
+ }
+
+ return f.emitThrow(tagIndex, argValues);
+}
+
+static bool EmitRethrow(FunctionCompiler& f) {
+ uint32_t relativeDepth;
+ if (!f.iter().readRethrow(&relativeDepth)) {
+ return false;
+ }
+
+ return f.emitRethrow(relativeDepth);
+}
+
+static bool EmitCallArgs(FunctionCompiler& f, const FuncType& funcType,
+ const DefVector& args, CallCompileState* call) {
+ for (size_t i = 0, n = funcType.args().length(); i < n; ++i) {
+ if (!f.mirGen().ensureBallast()) {
+ return false;
+ }
+ if (!f.passArg(args[i], funcType.args()[i], call)) {
+ return false;
+ }
+ }
+
+ ResultType resultType = ResultType::Vector(funcType.results());
+ if (!f.passStackResultAreaCallArg(resultType, call)) {
+ return false;
+ }
+
+ return f.finishCall(call);
+}
+
+static bool EmitCall(FunctionCompiler& f, bool asmJSFuncDef) {
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ uint32_t funcIndex;
+ DefVector args;
+ if (asmJSFuncDef) {
+ if (!f.iter().readOldCallDirect(f.moduleEnv().numFuncImports, &funcIndex,
+ &args)) {
+ return false;
+ }
+ } else {
+ if (!f.iter().readCall(&funcIndex, &args)) {
+ return false;
+ }
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ const FuncType& funcType = *f.moduleEnv().funcs[funcIndex].type;
+
+ CallCompileState call;
+ if (!EmitCallArgs(f, funcType, args, &call)) {
+ return false;
+ }
+
+ DefVector results;
+ if (f.moduleEnv().funcIsImport(funcIndex)) {
+ uint32_t instanceDataOffset =
+ f.moduleEnv().offsetOfFuncImportInstanceData(funcIndex);
+ if (!f.callImport(instanceDataOffset, lineOrBytecode, call, funcType,
+ &results)) {
+ return false;
+ }
+ } else {
+ if (!f.callDirect(funcType, funcIndex, lineOrBytecode, call, &results)) {
+ return false;
+ }
+ }
+
+ f.iter().setResults(results.length(), results);
+ return true;
+}
+
+static bool EmitCallIndirect(FunctionCompiler& f, bool oldStyle) {
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ uint32_t funcTypeIndex;
+ uint32_t tableIndex;
+ MDefinition* callee;
+ DefVector args;
+ if (oldStyle) {
+ tableIndex = 0;
+ if (!f.iter().readOldCallIndirect(&funcTypeIndex, &callee, &args)) {
+ return false;
+ }
+ } else {
+ if (!f.iter().readCallIndirect(&funcTypeIndex, &tableIndex, &callee,
+ &args)) {
+ return false;
+ }
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ const FuncType& funcType = (*f.moduleEnv().types)[funcTypeIndex].funcType();
+
+ CallCompileState call;
+ if (!EmitCallArgs(f, funcType, args, &call)) {
+ return false;
+ }
+
+ DefVector results;
+ if (!f.callIndirect(funcTypeIndex, tableIndex, callee, lineOrBytecode, call,
+ &results)) {
+ return false;
+ }
+
+ f.iter().setResults(results.length(), results);
+ return true;
+}
+
+static bool EmitGetLocal(FunctionCompiler& f) {
+ uint32_t id;
+ if (!f.iter().readGetLocal(f.locals(), &id)) {
+ return false;
+ }
+
+ f.iter().setResult(f.getLocalDef(id));
+ return true;
+}
+
+static bool EmitSetLocal(FunctionCompiler& f) {
+ uint32_t id;
+ MDefinition* value;
+ if (!f.iter().readSetLocal(f.locals(), &id, &value)) {
+ return false;
+ }
+
+ f.assign(id, value);
+ return true;
+}
+
+static bool EmitTeeLocal(FunctionCompiler& f) {
+ uint32_t id;
+ MDefinition* value;
+ if (!f.iter().readTeeLocal(f.locals(), &id, &value)) {
+ return false;
+ }
+
+ f.assign(id, value);
+ return true;
+}
+
+static bool EmitGetGlobal(FunctionCompiler& f) {
+ uint32_t id;
+ if (!f.iter().readGetGlobal(&id)) {
+ return false;
+ }
+
+ const GlobalDesc& global = f.moduleEnv().globals[id];
+ if (!global.isConstant()) {
+ f.iter().setResult(f.loadGlobalVar(global.offset(), !global.isMutable(),
+ global.isIndirect(),
+ global.type().toMIRType()));
+ return true;
+ }
+
+ LitVal value = global.constantValue();
+
+ MDefinition* result;
+ switch (value.type().kind()) {
+ case ValType::I32:
+ result = f.constantI32(int32_t(value.i32()));
+ break;
+ case ValType::I64:
+ result = f.constantI64(int64_t(value.i64()));
+ break;
+ case ValType::F32:
+ result = f.constantF32(value.f32());
+ break;
+ case ValType::F64:
+ result = f.constantF64(value.f64());
+ break;
+ case ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ result = f.constantV128(value.v128());
+ break;
+#else
+ return f.iter().fail("Ion has no SIMD support yet");
+#endif
+ case ValType::Ref:
+ MOZ_ASSERT(value.ref().isNull());
+ result = f.constantNullRef();
+ break;
+ default:
+ MOZ_CRASH("unexpected type in EmitGetGlobal");
+ }
+
+ f.iter().setResult(result);
+ return true;
+}
+
+static bool EmitSetGlobal(FunctionCompiler& f) {
+ uint32_t bytecodeOffset = f.readBytecodeOffset();
+
+ uint32_t id;
+ MDefinition* value;
+ if (!f.iter().readSetGlobal(&id, &value)) {
+ return false;
+ }
+
+ const GlobalDesc& global = f.moduleEnv().globals[id];
+ MOZ_ASSERT(global.isMutable());
+ return f.storeGlobalVar(bytecodeOffset, global.offset(), global.isIndirect(),
+ value);
+}
+
+static bool EmitTeeGlobal(FunctionCompiler& f) {
+ uint32_t bytecodeOffset = f.readBytecodeOffset();
+
+ uint32_t id;
+ MDefinition* value;
+ if (!f.iter().readTeeGlobal(&id, &value)) {
+ return false;
+ }
+
+ const GlobalDesc& global = f.moduleEnv().globals[id];
+ MOZ_ASSERT(global.isMutable());
+
+ return f.storeGlobalVar(bytecodeOffset, global.offset(), global.isIndirect(),
+ value);
+}
+
+template <typename MIRClass>
+static bool EmitUnary(FunctionCompiler& f, ValType operandType) {
+ MDefinition* input;
+ if (!f.iter().readUnary(operandType, &input)) {
+ return false;
+ }
+
+ f.iter().setResult(f.unary<MIRClass>(input));
+ return true;
+}
+
+template <typename MIRClass>
+static bool EmitConversion(FunctionCompiler& f, ValType operandType,
+ ValType resultType) {
+ MDefinition* input;
+ if (!f.iter().readConversion(operandType, resultType, &input)) {
+ return false;
+ }
+
+ f.iter().setResult(f.unary<MIRClass>(input));
+ return true;
+}
+
+template <typename MIRClass>
+static bool EmitUnaryWithType(FunctionCompiler& f, ValType operandType,
+ MIRType mirType) {
+ MDefinition* input;
+ if (!f.iter().readUnary(operandType, &input)) {
+ return false;
+ }
+
+ f.iter().setResult(f.unary<MIRClass>(input, mirType));
+ return true;
+}
+
+template <typename MIRClass>
+static bool EmitConversionWithType(FunctionCompiler& f, ValType operandType,
+ ValType resultType, MIRType mirType) {
+ MDefinition* input;
+ if (!f.iter().readConversion(operandType, resultType, &input)) {
+ return false;
+ }
+
+ f.iter().setResult(f.unary<MIRClass>(input, mirType));
+ return true;
+}
+
+static bool EmitTruncate(FunctionCompiler& f, ValType operandType,
+ ValType resultType, bool isUnsigned,
+ bool isSaturating) {
+ MDefinition* input = nullptr;
+ if (!f.iter().readConversion(operandType, resultType, &input)) {
+ return false;
+ }
+
+ TruncFlags flags = 0;
+ if (isUnsigned) {
+ flags |= TRUNC_UNSIGNED;
+ }
+ if (isSaturating) {
+ flags |= TRUNC_SATURATING;
+ }
+ if (resultType == ValType::I32) {
+ if (f.moduleEnv().isAsmJS()) {
+ if (input && (input->type() == MIRType::Double ||
+ input->type() == MIRType::Float32)) {
+ f.iter().setResult(f.unary<MWasmBuiltinTruncateToInt32>(input));
+ } else {
+ f.iter().setResult(f.unary<MTruncateToInt32>(input));
+ }
+ } else {
+ f.iter().setResult(f.truncate<MWasmTruncateToInt32>(input, flags));
+ }
+ } else {
+ MOZ_ASSERT(resultType == ValType::I64);
+ MOZ_ASSERT(!f.moduleEnv().isAsmJS());
+#if defined(JS_CODEGEN_ARM)
+ f.iter().setResult(f.truncateWithInstance(input, flags));
+#else
+ f.iter().setResult(f.truncate<MWasmTruncateToInt64>(input, flags));
+#endif
+ }
+ return true;
+}
+
+static bool EmitSignExtend(FunctionCompiler& f, uint32_t srcSize,
+ uint32_t targetSize) {
+ MDefinition* input;
+ ValType type = targetSize == 4 ? ValType::I32 : ValType::I64;
+ if (!f.iter().readConversion(type, type, &input)) {
+ return false;
+ }
+
+ f.iter().setResult(f.signExtend(input, srcSize, targetSize));
+ return true;
+}
+
+static bool EmitExtendI32(FunctionCompiler& f, bool isUnsigned) {
+ MDefinition* input;
+ if (!f.iter().readConversion(ValType::I32, ValType::I64, &input)) {
+ return false;
+ }
+
+ f.iter().setResult(f.extendI32(input, isUnsigned));
+ return true;
+}
+
+static bool EmitConvertI64ToFloatingPoint(FunctionCompiler& f,
+ ValType resultType, MIRType mirType,
+ bool isUnsigned) {
+ MDefinition* input;
+ if (!f.iter().readConversion(ValType::I64, resultType, &input)) {
+ return false;
+ }
+
+ f.iter().setResult(f.convertI64ToFloatingPoint(input, mirType, isUnsigned));
+ return true;
+}
+
+static bool EmitReinterpret(FunctionCompiler& f, ValType resultType,
+ ValType operandType, MIRType mirType) {
+ MDefinition* input;
+ if (!f.iter().readConversion(operandType, resultType, &input)) {
+ return false;
+ }
+
+ f.iter().setResult(f.unary<MWasmReinterpret>(input, mirType));
+ return true;
+}
+
+static bool EmitAdd(FunctionCompiler& f, ValType type, MIRType mirType) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(type, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.add(lhs, rhs, mirType));
+ return true;
+}
+
+static bool EmitSub(FunctionCompiler& f, ValType type, MIRType mirType) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(type, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.sub(lhs, rhs, mirType));
+ return true;
+}
+
+static bool EmitRotate(FunctionCompiler& f, ValType type, bool isLeftRotation) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(type, &lhs, &rhs)) {
+ return false;
+ }
+
+ MDefinition* result = f.rotate(lhs, rhs, type.toMIRType(), isLeftRotation);
+ f.iter().setResult(result);
+ return true;
+}
+
+static bool EmitBitNot(FunctionCompiler& f, ValType operandType) {
+ MDefinition* input;
+ if (!f.iter().readUnary(operandType, &input)) {
+ return false;
+ }
+
+ f.iter().setResult(f.bitnot(input));
+ return true;
+}
+
+static bool EmitBitwiseAndOrXor(FunctionCompiler& f, ValType operandType,
+ MIRType mirType,
+ MWasmBinaryBitwise::SubOpcode subOpc) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.binary<MWasmBinaryBitwise>(lhs, rhs, mirType, subOpc));
+ return true;
+}
+
+template <typename MIRClass>
+static bool EmitShift(FunctionCompiler& f, ValType operandType,
+ MIRType mirType) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.binary<MIRClass>(lhs, rhs, mirType));
+ return true;
+}
+
+static bool EmitUrsh(FunctionCompiler& f, ValType operandType,
+ MIRType mirType) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.ursh(lhs, rhs, mirType));
+ return true;
+}
+
+static bool EmitMul(FunctionCompiler& f, ValType operandType, MIRType mirType) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(
+ f.mul(lhs, rhs, mirType,
+ mirType == MIRType::Int32 ? MMul::Integer : MMul::Normal));
+ return true;
+}
+
+static bool EmitDiv(FunctionCompiler& f, ValType operandType, MIRType mirType,
+ bool isUnsigned) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.div(lhs, rhs, mirType, isUnsigned));
+ return true;
+}
+
+static bool EmitRem(FunctionCompiler& f, ValType operandType, MIRType mirType,
+ bool isUnsigned) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.mod(lhs, rhs, mirType, isUnsigned));
+ return true;
+}
+
+static bool EmitMinMax(FunctionCompiler& f, ValType operandType,
+ MIRType mirType, bool isMax) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.minMax(lhs, rhs, mirType, isMax));
+ return true;
+}
+
+static bool EmitCopySign(FunctionCompiler& f, ValType operandType) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.binary<MCopySign>(lhs, rhs, operandType.toMIRType()));
+ return true;
+}
+
+static bool EmitComparison(FunctionCompiler& f, ValType operandType,
+ JSOp compareOp, MCompare::CompareType compareType) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readComparison(operandType, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.compare(lhs, rhs, compareOp, compareType));
+ return true;
+}
+
+static bool EmitSelect(FunctionCompiler& f, bool typed) {
+ StackType type;
+ MDefinition* trueValue;
+ MDefinition* falseValue;
+ MDefinition* condition;
+ if (!f.iter().readSelect(typed, &type, &trueValue, &falseValue, &condition)) {
+ return false;
+ }
+
+ f.iter().setResult(f.select(trueValue, falseValue, condition));
+ return true;
+}
+
+static bool EmitLoad(FunctionCompiler& f, ValType type, Scalar::Type viewType) {
+ LinearMemoryAddress<MDefinition*> addr;
+ if (!f.iter().readLoad(type, Scalar::byteSize(viewType), &addr)) {
+ return false;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset,
+ f.bytecodeIfNotAsmJS());
+ auto* ins = f.load(addr.base, &access, type);
+ if (!f.inDeadCode() && !ins) {
+ return false;
+ }
+
+ f.iter().setResult(ins);
+ return true;
+}
+
+static bool EmitStore(FunctionCompiler& f, ValType resultType,
+ Scalar::Type viewType) {
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* value;
+ if (!f.iter().readStore(resultType, Scalar::byteSize(viewType), &addr,
+ &value)) {
+ return false;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset,
+ f.bytecodeIfNotAsmJS());
+
+ f.store(addr.base, &access, value);
+ return true;
+}
+
+static bool EmitTeeStore(FunctionCompiler& f, ValType resultType,
+ Scalar::Type viewType) {
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* value;
+ if (!f.iter().readTeeStore(resultType, Scalar::byteSize(viewType), &addr,
+ &value)) {
+ return false;
+ }
+
+ MOZ_ASSERT(f.isMem32()); // asm.js opcode
+ MemoryAccessDesc access(viewType, addr.align, addr.offset,
+ f.bytecodeIfNotAsmJS());
+
+ f.store(addr.base, &access, value);
+ return true;
+}
+
+static bool EmitTeeStoreWithCoercion(FunctionCompiler& f, ValType resultType,
+ Scalar::Type viewType) {
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* value;
+ if (!f.iter().readTeeStore(resultType, Scalar::byteSize(viewType), &addr,
+ &value)) {
+ return false;
+ }
+
+ if (resultType == ValType::F32 && viewType == Scalar::Float64) {
+ value = f.unary<MToDouble>(value);
+ } else if (resultType == ValType::F64 && viewType == Scalar::Float32) {
+ value = f.unary<MToFloat32>(value);
+ } else {
+ MOZ_CRASH("unexpected coerced store");
+ }
+
+ MOZ_ASSERT(f.isMem32()); // asm.js opcode
+ MemoryAccessDesc access(viewType, addr.align, addr.offset,
+ f.bytecodeIfNotAsmJS());
+
+ f.store(addr.base, &access, value);
+ return true;
+}
+
+static bool TryInlineUnaryBuiltin(FunctionCompiler& f, SymbolicAddress callee,
+ MDefinition* input) {
+ if (!input) {
+ return false;
+ }
+
+ MOZ_ASSERT(IsFloatingPointType(input->type()));
+
+ RoundingMode mode;
+ if (!IsRoundingFunction(callee, &mode)) {
+ return false;
+ }
+
+ if (!MNearbyInt::HasAssemblerSupport(mode)) {
+ return false;
+ }
+
+ f.iter().setResult(f.nearbyInt(input, mode));
+ return true;
+}
+
+static bool EmitUnaryMathBuiltinCall(FunctionCompiler& f,
+ const SymbolicAddressSignature& callee) {
+ MOZ_ASSERT(callee.numArgs == 1);
+
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ MDefinition* input;
+ if (!f.iter().readUnary(ValType::fromMIRType(callee.argTypes[0]), &input)) {
+ return false;
+ }
+
+ if (TryInlineUnaryBuiltin(f, callee.identity, input)) {
+ return true;
+ }
+
+ CallCompileState call;
+ if (!f.passArg(input, callee.argTypes[0], &call)) {
+ return false;
+ }
+
+ if (!f.finishCall(&call)) {
+ return false;
+ }
+
+ MDefinition* def;
+ if (!f.builtinCall(callee, lineOrBytecode, call, &def)) {
+ return false;
+ }
+
+ f.iter().setResult(def);
+ return true;
+}
+
+static bool EmitBinaryMathBuiltinCall(FunctionCompiler& f,
+ const SymbolicAddressSignature& callee) {
+ MOZ_ASSERT(callee.numArgs == 2);
+ MOZ_ASSERT(callee.argTypes[0] == callee.argTypes[1]);
+
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ CallCompileState call;
+ MDefinition* lhs;
+ MDefinition* rhs;
+ // This call to readBinary assumes both operands have the same type.
+ if (!f.iter().readBinary(ValType::fromMIRType(callee.argTypes[0]), &lhs,
+ &rhs)) {
+ return false;
+ }
+
+ if (!f.passArg(lhs, callee.argTypes[0], &call)) {
+ return false;
+ }
+
+ if (!f.passArg(rhs, callee.argTypes[1], &call)) {
+ return false;
+ }
+
+ if (!f.finishCall(&call)) {
+ return false;
+ }
+
+ MDefinition* def;
+ if (!f.builtinCall(callee, lineOrBytecode, call, &def)) {
+ return false;
+ }
+
+ f.iter().setResult(def);
+ return true;
+}
+
+static bool EmitMemoryGrow(FunctionCompiler& f) {
+ uint32_t bytecodeOffset = f.readBytecodeOffset();
+
+ MDefinition* delta;
+ if (!f.iter().readMemoryGrow(&delta)) {
+ return false;
+ }
+
+ const SymbolicAddressSignature& callee =
+ f.isNoMemOrMem32() ? SASigMemoryGrowM32 : SASigMemoryGrowM64;
+
+ MDefinition* ret;
+ if (!f.emitInstanceCall1(bytecodeOffset, callee, delta, &ret)) {
+ return false;
+ }
+
+ f.iter().setResult(ret);
+ return true;
+}
+
+static bool EmitMemorySize(FunctionCompiler& f) {
+ uint32_t bytecodeOffset = f.readBytecodeOffset();
+
+ if (!f.iter().readMemorySize()) {
+ return false;
+ }
+
+ const SymbolicAddressSignature& callee =
+ f.isNoMemOrMem32() ? SASigMemorySizeM32 : SASigMemorySizeM64;
+
+ MDefinition* ret;
+ if (!f.emitInstanceCall0(bytecodeOffset, callee, &ret)) {
+ return false;
+ }
+
+ f.iter().setResult(ret);
+ return true;
+}
+
+static bool EmitAtomicCmpXchg(FunctionCompiler& f, ValType type,
+ Scalar::Type viewType) {
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* oldValue;
+ MDefinition* newValue;
+ if (!f.iter().readAtomicCmpXchg(&addr, type, byteSize(viewType), &oldValue,
+ &newValue)) {
+ return false;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
+ Synchronization::Full());
+ auto* ins =
+ f.atomicCompareExchangeHeap(addr.base, &access, type, oldValue, newValue);
+ if (!f.inDeadCode() && !ins) {
+ return false;
+ }
+
+ f.iter().setResult(ins);
+ return true;
+}
+
+static bool EmitAtomicLoad(FunctionCompiler& f, ValType type,
+ Scalar::Type viewType) {
+ LinearMemoryAddress<MDefinition*> addr;
+ if (!f.iter().readAtomicLoad(&addr, type, byteSize(viewType))) {
+ return false;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
+ Synchronization::Load());
+ auto* ins = f.load(addr.base, &access, type);
+ if (!f.inDeadCode() && !ins) {
+ return false;
+ }
+
+ f.iter().setResult(ins);
+ return true;
+}
+
+static bool EmitAtomicRMW(FunctionCompiler& f, ValType type,
+ Scalar::Type viewType, jit::AtomicOp op) {
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* value;
+ if (!f.iter().readAtomicRMW(&addr, type, byteSize(viewType), &value)) {
+ return false;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
+ Synchronization::Full());
+ auto* ins = f.atomicBinopHeap(op, addr.base, &access, type, value);
+ if (!f.inDeadCode() && !ins) {
+ return false;
+ }
+
+ f.iter().setResult(ins);
+ return true;
+}
+
+static bool EmitAtomicStore(FunctionCompiler& f, ValType type,
+ Scalar::Type viewType) {
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* value;
+ if (!f.iter().readAtomicStore(&addr, type, byteSize(viewType), &value)) {
+ return false;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
+ Synchronization::Store());
+ f.store(addr.base, &access, value);
+ return true;
+}
+
+static bool EmitWait(FunctionCompiler& f, ValType type, uint32_t byteSize) {
+ MOZ_ASSERT(type == ValType::I32 || type == ValType::I64);
+ MOZ_ASSERT(type.size() == byteSize);
+
+ uint32_t bytecodeOffset = f.readBytecodeOffset();
+
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* expected;
+ MDefinition* timeout;
+ if (!f.iter().readWait(&addr, type, byteSize, &expected, &timeout)) {
+ return false;
+ }
+
+ MemoryAccessDesc access(type == ValType::I32 ? Scalar::Int32 : Scalar::Int64,
+ addr.align, addr.offset, f.bytecodeOffset());
+ MDefinition* ptr = f.computeEffectiveAddress(addr.base, &access);
+ if (!f.inDeadCode() && !ptr) {
+ return false;
+ }
+
+ const SymbolicAddressSignature& callee =
+ f.isNoMemOrMem32()
+ ? (type == ValType::I32 ? SASigWaitI32M32 : SASigWaitI64M32)
+ : (type == ValType::I32 ? SASigWaitI32M64 : SASigWaitI64M64);
+ MOZ_ASSERT(type.toMIRType() == callee.argTypes[2]);
+
+ MDefinition* ret;
+ if (!f.emitInstanceCall3(bytecodeOffset, callee, ptr, expected, timeout,
+ &ret)) {
+ return false;
+ }
+
+ f.iter().setResult(ret);
+ return true;
+}
+
+static bool EmitFence(FunctionCompiler& f) {
+ if (!f.iter().readFence()) {
+ return false;
+ }
+
+ f.fence();
+ return true;
+}
+
+static bool EmitWake(FunctionCompiler& f) {
+ uint32_t bytecodeOffset = f.readBytecodeOffset();
+
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* count;
+ if (!f.iter().readWake(&addr, &count)) {
+ return false;
+ }
+
+ MemoryAccessDesc access(Scalar::Int32, addr.align, addr.offset,
+ f.bytecodeOffset());
+ MDefinition* ptr = f.computeEffectiveAddress(addr.base, &access);
+ if (!f.inDeadCode() && !ptr) {
+ return false;
+ }
+
+ const SymbolicAddressSignature& callee =
+ f.isNoMemOrMem32() ? SASigWakeM32 : SASigWakeM64;
+
+ MDefinition* ret;
+ if (!f.emitInstanceCall2(bytecodeOffset, callee, ptr, count, &ret)) {
+ return false;
+ }
+
+ f.iter().setResult(ret);
+ return true;
+}
+
+static bool EmitAtomicXchg(FunctionCompiler& f, ValType type,
+ Scalar::Type viewType) {
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* value;
+ if (!f.iter().readAtomicRMW(&addr, type, byteSize(viewType), &value)) {
+ return false;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
+ Synchronization::Full());
+ MDefinition* ins = f.atomicExchangeHeap(addr.base, &access, type, value);
+ if (!f.inDeadCode() && !ins) {
+ return false;
+ }
+
+ f.iter().setResult(ins);
+ return true;
+}
+
+static bool EmitMemCopyCall(FunctionCompiler& f, MDefinition* dst,
+ MDefinition* src, MDefinition* len) {
+ uint32_t bytecodeOffset = f.readBytecodeOffset();
+
+ MDefinition* memoryBase = f.memoryBase();
+ const SymbolicAddressSignature& callee =
+ (f.moduleEnv().usesSharedMemory()
+ ? (f.isMem32() ? SASigMemCopySharedM32 : SASigMemCopySharedM64)
+ : (f.isMem32() ? SASigMemCopyM32 : SASigMemCopyM64));
+
+ return f.emitInstanceCall4(bytecodeOffset, callee, dst, src, len, memoryBase);
+}
+
+static bool EmitMemCopyInline(FunctionCompiler& f, MDefinition* dst,
+ MDefinition* src, uint32_t length) {
+ MOZ_ASSERT(length != 0 && length <= MaxInlineMemoryCopyLength);
+
+ // Compute the number of copies of each width we will need to do
+ size_t remainder = length;
+#ifdef ENABLE_WASM_SIMD
+ size_t numCopies16 = 0;
+ if (MacroAssembler::SupportsFastUnalignedFPAccesses()) {
+ numCopies16 = remainder / sizeof(V128);
+ remainder %= sizeof(V128);
+ }
+#endif
+#ifdef JS_64BIT
+ size_t numCopies8 = remainder / sizeof(uint64_t);
+ remainder %= sizeof(uint64_t);
+#endif
+ size_t numCopies4 = remainder / sizeof(uint32_t);
+ remainder %= sizeof(uint32_t);
+ size_t numCopies2 = remainder / sizeof(uint16_t);
+ remainder %= sizeof(uint16_t);
+ size_t numCopies1 = remainder;
+
+ // Load all source bytes from low to high using the widest transfer width we
+ // can for the system. We will trap without writing anything if any source
+ // byte is out-of-bounds.
+ size_t offset = 0;
+ DefVector loadedValues;
+
+#ifdef ENABLE_WASM_SIMD
+ for (uint32_t i = 0; i < numCopies16; i++) {
+ MemoryAccessDesc access(Scalar::Simd128, 1, offset, f.bytecodeOffset());
+ auto* load = f.load(src, &access, ValType::V128);
+ if (!load || !loadedValues.append(load)) {
+ return false;
+ }
+
+ offset += sizeof(V128);
+ }
+#endif
+
+#ifdef JS_64BIT
+ for (uint32_t i = 0; i < numCopies8; i++) {
+ MemoryAccessDesc access(Scalar::Int64, 1, offset, f.bytecodeOffset());
+ auto* load = f.load(src, &access, ValType::I64);
+ if (!load || !loadedValues.append(load)) {
+ return false;
+ }
+
+ offset += sizeof(uint64_t);
+ }
+#endif
+
+ for (uint32_t i = 0; i < numCopies4; i++) {
+ MemoryAccessDesc access(Scalar::Uint32, 1, offset, f.bytecodeOffset());
+ auto* load = f.load(src, &access, ValType::I32);
+ if (!load || !loadedValues.append(load)) {
+ return false;
+ }
+
+ offset += sizeof(uint32_t);
+ }
+
+ if (numCopies2) {
+ MemoryAccessDesc access(Scalar::Uint16, 1, offset, f.bytecodeOffset());
+ auto* load = f.load(src, &access, ValType::I32);
+ if (!load || !loadedValues.append(load)) {
+ return false;
+ }
+
+ offset += sizeof(uint16_t);
+ }
+
+ if (numCopies1) {
+ MemoryAccessDesc access(Scalar::Uint8, 1, offset, f.bytecodeOffset());
+ auto* load = f.load(src, &access, ValType::I32);
+ if (!load || !loadedValues.append(load)) {
+ return false;
+ }
+ }
+
+ // Store all source bytes to the destination from high to low. We will trap
+ // without writing anything on the first store if any dest byte is
+ // out-of-bounds.
+ offset = length;
+
+ if (numCopies1) {
+ offset -= sizeof(uint8_t);
+
+ MemoryAccessDesc access(Scalar::Uint8, 1, offset, f.bytecodeOffset());
+ auto* value = loadedValues.popCopy();
+ f.store(dst, &access, value);
+ }
+
+ if (numCopies2) {
+ offset -= sizeof(uint16_t);
+
+ MemoryAccessDesc access(Scalar::Uint16, 1, offset, f.bytecodeOffset());
+ auto* value = loadedValues.popCopy();
+ f.store(dst, &access, value);
+ }
+
+ for (uint32_t i = 0; i < numCopies4; i++) {
+ offset -= sizeof(uint32_t);
+
+ MemoryAccessDesc access(Scalar::Uint32, 1, offset, f.bytecodeOffset());
+ auto* value = loadedValues.popCopy();
+ f.store(dst, &access, value);
+ }
+
+#ifdef JS_64BIT
+ for (uint32_t i = 0; i < numCopies8; i++) {
+ offset -= sizeof(uint64_t);
+
+ MemoryAccessDesc access(Scalar::Int64, 1, offset, f.bytecodeOffset());
+ auto* value = loadedValues.popCopy();
+ f.store(dst, &access, value);
+ }
+#endif
+
+#ifdef ENABLE_WASM_SIMD
+ for (uint32_t i = 0; i < numCopies16; i++) {
+ offset -= sizeof(V128);
+
+ MemoryAccessDesc access(Scalar::Simd128, 1, offset, f.bytecodeOffset());
+ auto* value = loadedValues.popCopy();
+ f.store(dst, &access, value);
+ }
+#endif
+
+ return true;
+}
+
+static bool EmitMemCopy(FunctionCompiler& f) {
+ MDefinition *dst, *src, *len;
+ uint32_t dstMemIndex;
+ uint32_t srcMemIndex;
+ if (!f.iter().readMemOrTableCopy(true, &dstMemIndex, &dst, &srcMemIndex, &src,
+ &len)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ if (len->isConstant()) {
+ uint64_t length = f.isMem32() ? len->toConstant()->toInt32()
+ : len->toConstant()->toInt64();
+ static_assert(MaxInlineMemoryCopyLength <= UINT32_MAX);
+ if (length != 0 && length <= MaxInlineMemoryCopyLength) {
+ return EmitMemCopyInline(f, dst, src, uint32_t(length));
+ }
+ }
+
+ return EmitMemCopyCall(f, dst, src, len);
+}
+
+static bool EmitTableCopy(FunctionCompiler& f) {
+ MDefinition *dst, *src, *len;
+ uint32_t dstTableIndex;
+ uint32_t srcTableIndex;
+ if (!f.iter().readMemOrTableCopy(false, &dstTableIndex, &dst, &srcTableIndex,
+ &src, &len)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ uint32_t bytecodeOffset = f.readBytecodeOffset();
+ MDefinition* dti = f.constantI32(int32_t(dstTableIndex));
+ MDefinition* sti = f.constantI32(int32_t(srcTableIndex));
+
+ return f.emitInstanceCall5(bytecodeOffset, SASigTableCopy, dst, src, len, dti,
+ sti);
+}
+
+static bool EmitDataOrElemDrop(FunctionCompiler& f, bool isData) {
+ uint32_t segIndexVal = 0;
+ if (!f.iter().readDataOrElemDrop(isData, &segIndexVal)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ uint32_t bytecodeOffset = f.readBytecodeOffset();
+
+ MDefinition* segIndex = f.constantI32(int32_t(segIndexVal));
+
+ const SymbolicAddressSignature& callee =
+ isData ? SASigDataDrop : SASigElemDrop;
+ return f.emitInstanceCall1(bytecodeOffset, callee, segIndex);
+}
+
+static bool EmitMemFillCall(FunctionCompiler& f, MDefinition* start,
+ MDefinition* val, MDefinition* len) {
+ uint32_t bytecodeOffset = f.readBytecodeOffset();
+
+ MDefinition* memoryBase = f.memoryBase();
+
+ const SymbolicAddressSignature& callee =
+ (f.moduleEnv().usesSharedMemory()
+ ? (f.isMem32() ? SASigMemFillSharedM32 : SASigMemFillSharedM64)
+ : (f.isMem32() ? SASigMemFillM32 : SASigMemFillM64));
+ return f.emitInstanceCall4(bytecodeOffset, callee, start, val, len,
+ memoryBase);
+}
+
+static bool EmitMemFillInline(FunctionCompiler& f, MDefinition* start,
+ MDefinition* val, uint32_t length) {
+ MOZ_ASSERT(length != 0 && length <= MaxInlineMemoryFillLength);
+ uint32_t value = val->toConstant()->toInt32();
+
+ // Compute the number of copies of each width we will need to do
+ size_t remainder = length;
+#ifdef ENABLE_WASM_SIMD
+ size_t numCopies16 = 0;
+ if (MacroAssembler::SupportsFastUnalignedFPAccesses()) {
+ numCopies16 = remainder / sizeof(V128);
+ remainder %= sizeof(V128);
+ }
+#endif
+#ifdef JS_64BIT
+ size_t numCopies8 = remainder / sizeof(uint64_t);
+ remainder %= sizeof(uint64_t);
+#endif
+ size_t numCopies4 = remainder / sizeof(uint32_t);
+ remainder %= sizeof(uint32_t);
+ size_t numCopies2 = remainder / sizeof(uint16_t);
+ remainder %= sizeof(uint16_t);
+ size_t numCopies1 = remainder;
+
+ // Generate splatted definitions for wider fills as needed
+#ifdef ENABLE_WASM_SIMD
+ MDefinition* val16 = numCopies16 ? f.constantV128(V128(value)) : nullptr;
+#endif
+#ifdef JS_64BIT
+ MDefinition* val8 =
+ numCopies8 ? f.constantI64(int64_t(SplatByteToUInt<uint64_t>(value, 8)))
+ : nullptr;
+#endif
+ MDefinition* val4 =
+ numCopies4 ? f.constantI32(int32_t(SplatByteToUInt<uint32_t>(value, 4)))
+ : nullptr;
+ MDefinition* val2 =
+ numCopies2 ? f.constantI32(int32_t(SplatByteToUInt<uint32_t>(value, 2)))
+ : nullptr;
+
+ // Store the fill value to the destination from high to low. We will trap
+ // without writing anything on the first store if any dest byte is
+ // out-of-bounds.
+ size_t offset = length;
+
+ if (numCopies1) {
+ offset -= sizeof(uint8_t);
+
+ MemoryAccessDesc access(Scalar::Uint8, 1, offset, f.bytecodeOffset());
+ f.store(start, &access, val);
+ }
+
+ if (numCopies2) {
+ offset -= sizeof(uint16_t);
+
+ MemoryAccessDesc access(Scalar::Uint16, 1, offset, f.bytecodeOffset());
+ f.store(start, &access, val2);
+ }
+
+ for (uint32_t i = 0; i < numCopies4; i++) {
+ offset -= sizeof(uint32_t);
+
+ MemoryAccessDesc access(Scalar::Uint32, 1, offset, f.bytecodeOffset());
+ f.store(start, &access, val4);
+ }
+
+#ifdef JS_64BIT
+ for (uint32_t i = 0; i < numCopies8; i++) {
+ offset -= sizeof(uint64_t);
+
+ MemoryAccessDesc access(Scalar::Int64, 1, offset, f.bytecodeOffset());
+ f.store(start, &access, val8);
+ }
+#endif
+
+#ifdef ENABLE_WASM_SIMD
+ for (uint32_t i = 0; i < numCopies16; i++) {
+ offset -= sizeof(V128);
+
+ MemoryAccessDesc access(Scalar::Simd128, 1, offset, f.bytecodeOffset());
+ f.store(start, &access, val16);
+ }
+#endif
+
+ return true;
+}
+
+static bool EmitMemFill(FunctionCompiler& f) {
+ MDefinition *start, *val, *len;
+ if (!f.iter().readMemFill(&start, &val, &len)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ if (len->isConstant() && val->isConstant()) {
+ uint64_t length = f.isMem32() ? len->toConstant()->toInt32()
+ : len->toConstant()->toInt64();
+ static_assert(MaxInlineMemoryFillLength <= UINT32_MAX);
+ if (length != 0 && length <= MaxInlineMemoryFillLength) {
+ return EmitMemFillInline(f, start, val, uint32_t(length));
+ }
+ }
+
+ return EmitMemFillCall(f, start, val, len);
+}
+
+static bool EmitMemOrTableInit(FunctionCompiler& f, bool isMem) {
+ uint32_t segIndexVal = 0, dstTableIndex = 0;
+ MDefinition *dstOff, *srcOff, *len;
+ if (!f.iter().readMemOrTableInit(isMem, &segIndexVal, &dstTableIndex, &dstOff,
+ &srcOff, &len)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ uint32_t bytecodeOffset = f.readBytecodeOffset();
+
+ MDefinition* segIndex = f.constantI32(int32_t(segIndexVal));
+
+ if (isMem) {
+ const SymbolicAddressSignature& callee =
+ f.isMem32() ? SASigMemInitM32 : SASigMemInitM64;
+ return f.emitInstanceCall4(bytecodeOffset, callee, dstOff, srcOff, len,
+ segIndex);
+ }
+
+ MDefinition* dti = f.constantI32(int32_t(dstTableIndex));
+ return f.emitInstanceCall5(bytecodeOffset, SASigTableInit, dstOff, srcOff,
+ len, segIndex, dti);
+}
+
+// Note, table.{get,grow,set} on table(funcref) are currently rejected by the
+// verifier.
+
+static bool EmitTableFill(FunctionCompiler& f) {
+ uint32_t tableIndex;
+ MDefinition *start, *val, *len;
+ if (!f.iter().readTableFill(&tableIndex, &start, &val, &len)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ uint32_t bytecodeOffset = f.readBytecodeOffset();
+
+ MDefinition* tableIndexArg = f.constantI32(int32_t(tableIndex));
+ if (!tableIndexArg) {
+ return false;
+ }
+
+ return f.emitInstanceCall4(bytecodeOffset, SASigTableFill, start, val, len,
+ tableIndexArg);
+}
+
+#if ENABLE_WASM_MEMORY_CONTROL
+static bool EmitMemDiscard(FunctionCompiler& f) {
+ MDefinition *start, *len;
+ if (!f.iter().readMemDiscard(&start, &len)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ uint32_t bytecodeOffset = f.readBytecodeOffset();
+
+ MDefinition* memoryBase = f.memoryBase();
+
+ const SymbolicAddressSignature& callee =
+ (f.moduleEnv().usesSharedMemory()
+ ? (f.isMem32() ? SASigMemDiscardSharedM32 : SASigMemDiscardSharedM64)
+ : (f.isMem32() ? SASigMemDiscardM32 : SASigMemDiscardM64));
+ return f.emitInstanceCall3(bytecodeOffset, callee, start, len, memoryBase);
+}
+#endif
+
+static bool EmitTableGet(FunctionCompiler& f) {
+ uint32_t tableIndex;
+ MDefinition* index;
+ if (!f.iter().readTableGet(&tableIndex, &index)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ const TableDesc& table = f.moduleEnv().tables[tableIndex];
+ if (table.elemType.tableRepr() == TableRepr::Ref) {
+ MDefinition* ret = f.tableGetAnyRef(tableIndex, index);
+ if (!ret) {
+ return false;
+ }
+ f.iter().setResult(ret);
+ return true;
+ }
+
+ uint32_t bytecodeOffset = f.readBytecodeOffset();
+
+ MDefinition* tableIndexArg = f.constantI32(int32_t(tableIndex));
+ if (!tableIndexArg) {
+ return false;
+ }
+
+ // The return value here is either null, denoting an error, or a short-lived
+ // pointer to a location containing a possibly-null ref.
+ MDefinition* ret;
+ if (!f.emitInstanceCall2(bytecodeOffset, SASigTableGet, index, tableIndexArg,
+ &ret)) {
+ return false;
+ }
+
+ f.iter().setResult(ret);
+ return true;
+}
+
+static bool EmitTableGrow(FunctionCompiler& f) {
+ uint32_t tableIndex;
+ MDefinition* initValue;
+ MDefinition* delta;
+ if (!f.iter().readTableGrow(&tableIndex, &initValue, &delta)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ uint32_t bytecodeOffset = f.readBytecodeOffset();
+
+ MDefinition* tableIndexArg = f.constantI32(int32_t(tableIndex));
+ if (!tableIndexArg) {
+ return false;
+ }
+
+ MDefinition* ret;
+ if (!f.emitInstanceCall3(bytecodeOffset, SASigTableGrow, initValue, delta,
+ tableIndexArg, &ret)) {
+ return false;
+ }
+
+ f.iter().setResult(ret);
+ return true;
+}
+
+static bool EmitTableSet(FunctionCompiler& f) {
+ uint32_t tableIndex;
+ MDefinition* index;
+ MDefinition* value;
+ if (!f.iter().readTableSet(&tableIndex, &index, &value)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ uint32_t bytecodeOffset = f.readBytecodeOffset();
+
+ const TableDesc& table = f.moduleEnv().tables[tableIndex];
+ if (table.elemType.tableRepr() == TableRepr::Ref) {
+ return f.tableSetAnyRef(tableIndex, index, value, bytecodeOffset);
+ }
+
+ MDefinition* tableIndexArg = f.constantI32(int32_t(tableIndex));
+ if (!tableIndexArg) {
+ return false;
+ }
+
+ return f.emitInstanceCall3(bytecodeOffset, SASigTableSet, index, value,
+ tableIndexArg);
+}
+
+static bool EmitTableSize(FunctionCompiler& f) {
+ uint32_t tableIndex;
+ if (!f.iter().readTableSize(&tableIndex)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ MDefinition* length = f.loadTableLength(tableIndex);
+ if (!length) {
+ return false;
+ }
+
+ f.iter().setResult(length);
+ return true;
+}
+
+static bool EmitRefFunc(FunctionCompiler& f) {
+ uint32_t funcIndex;
+ if (!f.iter().readRefFunc(&funcIndex)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ uint32_t bytecodeOffset = f.readBytecodeOffset();
+
+ MDefinition* funcIndexArg = f.constantI32(int32_t(funcIndex));
+ if (!funcIndexArg) {
+ return false;
+ }
+
+ // The return value here is either null, denoting an error, or a short-lived
+ // pointer to a location containing a possibly-null ref.
+ MDefinition* ret;
+ if (!f.emitInstanceCall1(bytecodeOffset, SASigRefFunc, funcIndexArg, &ret)) {
+ return false;
+ }
+
+ f.iter().setResult(ret);
+ return true;
+}
+
+static bool EmitRefNull(FunctionCompiler& f) {
+ RefType type;
+ if (!f.iter().readRefNull(&type)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ MDefinition* nullVal = f.constantNullRef();
+ if (!nullVal) {
+ return false;
+ }
+ f.iter().setResult(nullVal);
+ return true;
+}
+
+static bool EmitRefIsNull(FunctionCompiler& f) {
+ MDefinition* input;
+ if (!f.iter().readRefIsNull(&input)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ MDefinition* nullVal = f.constantNullRef();
+ if (!nullVal) {
+ return false;
+ }
+ f.iter().setResult(
+ f.compare(input, nullVal, JSOp::Eq, MCompare::Compare_RefOrNull));
+ return true;
+}
+
+#ifdef ENABLE_WASM_SIMD
+static bool EmitConstSimd128(FunctionCompiler& f) {
+ V128 v128;
+ if (!f.iter().readV128Const(&v128)) {
+ return false;
+ }
+
+ f.iter().setResult(f.constantV128(v128));
+ return true;
+}
+
+static bool EmitBinarySimd128(FunctionCompiler& f, bool commutative,
+ SimdOp op) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(ValType::V128, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.binarySimd128(lhs, rhs, commutative, op));
+ return true;
+}
+
+static bool EmitTernarySimd128(FunctionCompiler& f, wasm::SimdOp op) {
+ MDefinition* v0;
+ MDefinition* v1;
+ MDefinition* v2;
+ if (!f.iter().readTernary(ValType::V128, &v0, &v1, &v2)) {
+ return false;
+ }
+
+ f.iter().setResult(f.ternarySimd128(v0, v1, v2, op));
+ return true;
+}
+
+static bool EmitShiftSimd128(FunctionCompiler& f, SimdOp op) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readVectorShift(&lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.shiftSimd128(lhs, rhs, op));
+ return true;
+}
+
+static bool EmitSplatSimd128(FunctionCompiler& f, ValType inType, SimdOp op) {
+ MDefinition* src;
+ if (!f.iter().readConversion(inType, ValType::V128, &src)) {
+ return false;
+ }
+
+ f.iter().setResult(f.scalarToSimd128(src, op));
+ return true;
+}
+
+static bool EmitUnarySimd128(FunctionCompiler& f, SimdOp op) {
+ MDefinition* src;
+ if (!f.iter().readUnary(ValType::V128, &src)) {
+ return false;
+ }
+
+ f.iter().setResult(f.unarySimd128(src, op));
+ return true;
+}
+
+static bool EmitReduceSimd128(FunctionCompiler& f, SimdOp op) {
+ MDefinition* src;
+ if (!f.iter().readConversion(ValType::V128, ValType::I32, &src)) {
+ return false;
+ }
+
+ f.iter().setResult(f.reduceSimd128(src, op, ValType::I32));
+ return true;
+}
+
+static bool EmitExtractLaneSimd128(FunctionCompiler& f, ValType outType,
+ uint32_t laneLimit, SimdOp op) {
+ uint32_t laneIndex;
+ MDefinition* src;
+ if (!f.iter().readExtractLane(outType, laneLimit, &laneIndex, &src)) {
+ return false;
+ }
+
+ f.iter().setResult(f.reduceSimd128(src, op, outType, laneIndex));
+ return true;
+}
+
+static bool EmitReplaceLaneSimd128(FunctionCompiler& f, ValType laneType,
+ uint32_t laneLimit, SimdOp op) {
+ uint32_t laneIndex;
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readReplaceLane(laneType, laneLimit, &laneIndex, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.replaceLaneSimd128(lhs, rhs, laneIndex, op));
+ return true;
+}
+
+static bool EmitShuffleSimd128(FunctionCompiler& f) {
+ MDefinition* v1;
+ MDefinition* v2;
+ V128 control;
+ if (!f.iter().readVectorShuffle(&v1, &v2, &control)) {
+ return false;
+ }
+
+ f.iter().setResult(f.shuffleSimd128(v1, v2, control));
+ return true;
+}
+
+static bool EmitLoadSplatSimd128(FunctionCompiler& f, Scalar::Type viewType,
+ wasm::SimdOp splatOp) {
+ LinearMemoryAddress<MDefinition*> addr;
+ if (!f.iter().readLoadSplat(Scalar::byteSize(viewType), &addr)) {
+ return false;
+ }
+
+ f.iter().setResult(f.loadSplatSimd128(viewType, addr, splatOp));
+ return true;
+}
+
+static bool EmitLoadExtendSimd128(FunctionCompiler& f, wasm::SimdOp op) {
+ LinearMemoryAddress<MDefinition*> addr;
+ if (!f.iter().readLoadExtend(&addr)) {
+ return false;
+ }
+
+ f.iter().setResult(f.loadExtendSimd128(addr, op));
+ return true;
+}
+
+static bool EmitLoadZeroSimd128(FunctionCompiler& f, Scalar::Type viewType,
+ size_t numBytes) {
+ LinearMemoryAddress<MDefinition*> addr;
+ if (!f.iter().readLoadSplat(numBytes, &addr)) {
+ return false;
+ }
+
+ f.iter().setResult(f.loadZeroSimd128(viewType, numBytes, addr));
+ return true;
+}
+
+static bool EmitLoadLaneSimd128(FunctionCompiler& f, uint32_t laneSize) {
+ uint32_t laneIndex;
+ MDefinition* src;
+ LinearMemoryAddress<MDefinition*> addr;
+ if (!f.iter().readLoadLane(laneSize, &addr, &laneIndex, &src)) {
+ return false;
+ }
+
+ f.iter().setResult(f.loadLaneSimd128(laneSize, addr, laneIndex, src));
+ return true;
+}
+
+static bool EmitStoreLaneSimd128(FunctionCompiler& f, uint32_t laneSize) {
+ uint32_t laneIndex;
+ MDefinition* src;
+ LinearMemoryAddress<MDefinition*> addr;
+ if (!f.iter().readStoreLane(laneSize, &addr, &laneIndex, &src)) {
+ return false;
+ }
+
+ f.storeLaneSimd128(laneSize, addr, laneIndex, src);
+ return true;
+}
+
+#endif // ENABLE_WASM_SIMD
+
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+static bool EmitRefAsNonNull(FunctionCompiler& f) {
+ MDefinition* value;
+ if (!f.iter().readRefAsNonNull(&value)) {
+ return false;
+ }
+
+ return f.refAsNonNull(value);
+}
+
+static bool EmitBrOnNull(FunctionCompiler& f) {
+ uint32_t relativeDepth;
+ ResultType type;
+ DefVector values;
+ MDefinition* condition;
+ if (!f.iter().readBrOnNull(&relativeDepth, &type, &values, &condition)) {
+ return false;
+ }
+
+ return f.brOnNull(relativeDepth, values, type, condition);
+}
+
+static bool EmitBrOnNonNull(FunctionCompiler& f) {
+ uint32_t relativeDepth;
+ ResultType type;
+ DefVector values;
+ MDefinition* condition;
+ if (!f.iter().readBrOnNonNull(&relativeDepth, &type, &values, &condition)) {
+ return false;
+ }
+
+ return f.brOnNonNull(relativeDepth, values, type, condition);
+}
+
+static bool EmitCallRef(FunctionCompiler& f) {
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ const FuncType* funcType;
+ MDefinition* callee;
+ DefVector args;
+
+ if (!f.iter().readCallRef(&funcType, &callee, &args)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ CallCompileState call;
+ if (!EmitCallArgs(f, *funcType, args, &call)) {
+ return false;
+ }
+
+ DefVector results;
+ if (!f.callRef(*funcType, callee, lineOrBytecode, call, &results)) {
+ return false;
+ }
+
+ f.iter().setResults(results.length(), results);
+ return true;
+}
+
+#endif // ENABLE_WASM_FUNCTION_REFERENCES
+
+#ifdef ENABLE_WASM_GC
+
+static bool EmitStructNew(FunctionCompiler& f) {
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ uint32_t typeIndex;
+ DefVector args;
+ if (!f.iter().readStructNew(&typeIndex, &args)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ const StructType& structType = (*f.moduleEnv().types)[typeIndex].structType();
+ MOZ_ASSERT(args.length() == structType.fields_.length());
+
+ // Allocate a default initialized struct. This requires the type definition
+ // for the struct.
+ MDefinition* typeDefData = f.loadTypeDefInstanceData(typeIndex);
+ if (!typeDefData) {
+ return false;
+ }
+
+ // Create call: structObject = Instance::structNewUninit(typeDefData)
+ MDefinition* structObject;
+ if (!f.emitInstanceCall1(lineOrBytecode, SASigStructNewUninit, typeDefData,
+ &structObject)) {
+ return false;
+ }
+
+ // And fill in the fields.
+ for (uint32_t fieldIndex = 0; fieldIndex < structType.fields_.length();
+ fieldIndex++) {
+ if (!f.mirGen().ensureBallast()) {
+ return false;
+ }
+ const StructField& field = structType.fields_[fieldIndex];
+ if (!f.writeValueToStructField(lineOrBytecode, field, structObject,
+ args[fieldIndex],
+ WasmPreBarrierKind::None)) {
+ return false;
+ }
+ }
+
+ f.iter().setResult(structObject);
+ return true;
+}
+
+static bool EmitStructNewDefault(FunctionCompiler& f) {
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ uint32_t typeIndex;
+ if (!f.iter().readStructNewDefault(&typeIndex)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ // Allocate a default initialized struct. This requires the type definition
+ // for the struct.
+ MDefinition* typeDefData = f.loadTypeDefInstanceData(typeIndex);
+ if (!typeDefData) {
+ return false;
+ }
+
+ // Create call: structObject = Instance::structNew(typeDefData)
+ MDefinition* structObject;
+ if (!f.emitInstanceCall1(lineOrBytecode, SASigStructNew, typeDefData,
+ &structObject)) {
+ return false;
+ }
+
+ f.iter().setResult(structObject);
+ return true;
+}
+
+static bool EmitStructSet(FunctionCompiler& f) {
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ uint32_t typeIndex;
+ uint32_t fieldIndex;
+ MDefinition* structObject;
+ MDefinition* value;
+ if (!f.iter().readStructSet(&typeIndex, &fieldIndex, &structObject, &value)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ // Check for null is done at writeValueToStructField.
+
+ // And fill in the field.
+ const StructType& structType = (*f.moduleEnv().types)[typeIndex].structType();
+ const StructField& field = structType.fields_[fieldIndex];
+ return f.writeValueToStructField(lineOrBytecode, field, structObject, value,
+ WasmPreBarrierKind::Normal);
+}
+
+static bool EmitStructGet(FunctionCompiler& f, FieldWideningOp wideningOp) {
+ uint32_t typeIndex;
+ uint32_t fieldIndex;
+ MDefinition* structObject;
+ if (!f.iter().readStructGet(&typeIndex, &fieldIndex, wideningOp,
+ &structObject)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ // Check for null is done at readValueFromStructField.
+
+ // And fetch the data.
+ const StructType& structType = (*f.moduleEnv().types)[typeIndex].structType();
+ const StructField& field = structType.fields_[fieldIndex];
+ MDefinition* load =
+ f.readValueFromStructField(field, wideningOp, structObject);
+ if (!load) {
+ return false;
+ }
+
+ f.iter().setResult(load);
+ return true;
+}
+
+static bool EmitArrayNew(FunctionCompiler& f) {
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ uint32_t typeIndex;
+ MDefinition* numElements;
+ MDefinition* fillValue;
+ if (!f.iter().readArrayNew(&typeIndex, &numElements, &fillValue)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ // If the requested size exceeds MaxArrayPayloadBytes, the MIR generated by
+ // this helper will trap.
+ MDefinition* arrayObject = f.createArrayNewCallAndLoop(
+ lineOrBytecode, typeIndex, numElements, fillValue);
+ if (!arrayObject) {
+ return false;
+ }
+
+ f.iter().setResult(arrayObject);
+ return true;
+}
+
+static bool EmitArrayNewDefault(FunctionCompiler& f) {
+ // This is almost identical to EmitArrayNew, except we skip the
+ // initialisation loop.
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ uint32_t typeIndex;
+ MDefinition* numElements;
+ if (!f.iter().readArrayNewDefault(&typeIndex, &numElements)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ // Create the array object, default-initialized.
+ MDefinition* arrayObject = f.createDefaultInitializedArrayObject(
+ lineOrBytecode, typeIndex, numElements);
+ if (!arrayObject) {
+ return false;
+ }
+
+ f.iter().setResult(arrayObject);
+ return true;
+}
+
+static bool EmitArrayNewFixed(FunctionCompiler& f) {
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ uint32_t typeIndex, numElements;
+ DefVector values;
+
+ if (!f.iter().readArrayNewFixed(&typeIndex, &numElements, &values)) {
+ return false;
+ }
+ MOZ_ASSERT(values.length() == numElements);
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ MDefinition* numElementsDef = f.constantI32(int32_t(numElements));
+ if (!numElementsDef) {
+ return false;
+ }
+
+ // Create the array object, default-initialized.
+ MDefinition* arrayObject = f.createDefaultInitializedArrayObject(
+ lineOrBytecode, typeIndex, numElementsDef);
+ if (!arrayObject) {
+ return false;
+ }
+
+ // Make `base` point at the first byte of the (OOL) data area.
+ MDefinition* base = f.getWasmArrayObjectData(arrayObject);
+ if (!base) {
+ return false;
+ }
+
+ // Write each element in turn.
+ const ArrayType& arrayType = (*f.moduleEnv().types)[typeIndex].arrayType();
+ FieldType elemFieldType = arrayType.elementType_;
+ uint32_t elemSize = elemFieldType.size();
+
+ // How do we know that the offset expression `i * elemSize` below remains
+ // within 2^31 (signed-i32) range? In the worst case we will have 16-byte
+ // values, and there can be at most MaxFunctionBytes expressions, if it were
+ // theoretically possible to generate one expression per instruction byte.
+ // Hence the max offset we can be expected to generate is
+ // `16 * MaxFunctionBytes`.
+ static_assert(16 /* sizeof v128 */ * MaxFunctionBytes <=
+ MaxArrayPayloadBytes);
+ MOZ_RELEASE_ASSERT(numElements <= MaxFunctionBytes);
+
+ for (uint32_t i = 0; i < numElements; i++) {
+ if (!f.mirGen().ensureBallast()) {
+ return false;
+ }
+ // `i * elemSize` is made safe by the assertions above.
+ if (!f.writeGcValueAtBasePlusOffset(
+ lineOrBytecode, elemFieldType, arrayObject,
+ AliasSet::WasmArrayDataArea, values[numElements - 1 - i], base,
+ i * elemSize, false, WasmPreBarrierKind::None)) {
+ return false;
+ }
+ }
+
+ f.iter().setResult(arrayObject);
+ return true;
+}
+
+static bool EmitArrayNewData(FunctionCompiler& f) {
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ uint32_t typeIndex, segIndex;
+ MDefinition* segByteOffset;
+ MDefinition* numElements;
+ if (!f.iter().readArrayNewData(&typeIndex, &segIndex, &segByteOffset,
+ &numElements)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ // Get the type definition data for the array as a whole.
+ MDefinition* typeDefData = f.loadTypeDefInstanceData(typeIndex);
+ if (!typeDefData) {
+ return false;
+ }
+
+ // Other values we need to pass to the instance call:
+ MDefinition* segIndexM = f.constantI32(int32_t(segIndex));
+ if (!segIndexM) {
+ return false;
+ }
+
+ // Create call:
+ // arrayObject = Instance::arrayNewData(segByteOffset:u32, numElements:u32,
+ // typeDefData:word, segIndex:u32)
+ // If the requested size exceeds MaxArrayPayloadBytes, the MIR generated by
+ // this call will trap.
+ MDefinition* arrayObject;
+ if (!f.emitInstanceCall4(lineOrBytecode, SASigArrayNewData, segByteOffset,
+ numElements, typeDefData, segIndexM, &arrayObject)) {
+ return false;
+ }
+
+ f.iter().setResult(arrayObject);
+ return true;
+}
+
+static bool EmitArrayNewElem(FunctionCompiler& f) {
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ uint32_t typeIndex, segIndex;
+ MDefinition* segElemIndex;
+ MDefinition* numElements;
+ if (!f.iter().readArrayNewElem(&typeIndex, &segIndex, &segElemIndex,
+ &numElements)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ // Get the type definition for the array as a whole.
+ // Get the type definition data for the array as a whole.
+ MDefinition* typeDefData = f.loadTypeDefInstanceData(typeIndex);
+ if (!typeDefData) {
+ return false;
+ }
+
+ // Other values we need to pass to the instance call:
+ MDefinition* segIndexM = f.constantI32(int32_t(segIndex));
+ if (!segIndexM) {
+ return false;
+ }
+
+ // Create call:
+ // arrayObject = Instance::arrayNewElem(segElemIndex:u32, numElements:u32,
+ // typeDefData:word, segIndex:u32)
+ // If the requested size exceeds MaxArrayPayloadBytes, the MIR generated by
+ // this call will trap.
+ MDefinition* arrayObject;
+ if (!f.emitInstanceCall4(lineOrBytecode, SASigArrayNewElem, segElemIndex,
+ numElements, typeDefData, segIndexM, &arrayObject)) {
+ return false;
+ }
+
+ f.iter().setResult(arrayObject);
+ return true;
+}
+
+static bool EmitArraySet(FunctionCompiler& f) {
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ uint32_t typeIndex;
+ MDefinition* value;
+ MDefinition* index;
+ MDefinition* arrayObject;
+ if (!f.iter().readArraySet(&typeIndex, &value, &index, &arrayObject)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ // Check for null is done at setupForArrayAccess.
+
+ // Create the object null check and the array bounds check and get the OOL
+ // data pointer.
+ MDefinition* base = f.setupForArrayAccess(arrayObject, index);
+ if (!base) {
+ return false;
+ }
+
+ // And do the store.
+ const ArrayType& arrayType = (*f.moduleEnv().types)[typeIndex].arrayType();
+ FieldType elemFieldType = arrayType.elementType_;
+ uint32_t elemSize = elemFieldType.size();
+ MOZ_ASSERT(elemSize >= 1 && elemSize <= 16);
+
+ return f.writeGcValueAtBasePlusScaledIndex(
+ lineOrBytecode, elemFieldType, arrayObject, AliasSet::WasmArrayDataArea,
+ value, base, elemSize, index, WasmPreBarrierKind::Normal);
+}
+
+static bool EmitArrayGet(FunctionCompiler& f, FieldWideningOp wideningOp) {
+ uint32_t typeIndex;
+ MDefinition* index;
+ MDefinition* arrayObject;
+ if (!f.iter().readArrayGet(&typeIndex, wideningOp, &index, &arrayObject)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ // Check for null is done at setupForArrayAccess.
+
+ // Create the object null check and the array bounds check and get the OOL
+ // data pointer.
+ MDefinition* base = f.setupForArrayAccess(arrayObject, index);
+ if (!base) {
+ return false;
+ }
+
+ // And do the load.
+ const ArrayType& arrayType = (*f.moduleEnv().types)[typeIndex].arrayType();
+ FieldType elemFieldType = arrayType.elementType_;
+ uint32_t elemSize = elemFieldType.size();
+ MOZ_ASSERT(elemSize >= 1 && elemSize <= 16);
+
+ MDefinition* load = f.readGcValueAtBasePlusScaledIndex(
+ elemFieldType, wideningOp, arrayObject, AliasSet::WasmArrayDataArea, base,
+ elemSize, index);
+ if (!load) {
+ return false;
+ }
+
+ f.iter().setResult(load);
+ return true;
+}
+
+static bool EmitArrayLen(FunctionCompiler& f, bool decodeIgnoredTypeIndex) {
+ MDefinition* arrayObject;
+ if (!f.iter().readArrayLen(decodeIgnoredTypeIndex, &arrayObject)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ // Check for null is done at getWasmArrayObjectNumElements.
+
+ // Get the size value for the array
+ MDefinition* numElements = f.getWasmArrayObjectNumElements(arrayObject);
+ if (!numElements) {
+ return false;
+ }
+
+ f.iter().setResult(numElements);
+ return true;
+}
+
+static bool EmitArrayCopy(FunctionCompiler& f) {
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ int32_t elemSize;
+ bool elemsAreRefTyped;
+ MDefinition* dstArrayObject;
+ MDefinition* dstArrayIndex;
+ MDefinition* srcArrayObject;
+ MDefinition* srcArrayIndex;
+ MDefinition* numElements;
+ if (!f.iter().readArrayCopy(&elemSize, &elemsAreRefTyped, &dstArrayObject,
+ &dstArrayIndex, &srcArrayObject, &srcArrayIndex,
+ &numElements)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ MOZ_ASSERT_IF(elemsAreRefTyped,
+ size_t(elemSize) == MIRTypeToSize(TargetWordMIRType()));
+ MOZ_ASSERT_IF(!elemsAreRefTyped, elemSize == 1 || elemSize == 2 ||
+ elemSize == 4 || elemSize == 8 ||
+ elemSize == 16);
+
+ // A negative element size is used to inform Instance::arrayCopy that the
+ // values are reftyped. This avoids having to pass it an extra boolean
+ // argument.
+ MDefinition* elemSizeDef =
+ f.constantI32(elemsAreRefTyped ? -elemSize : elemSize);
+ if (!elemSizeDef) {
+ return false;
+ }
+
+ // Create call:
+ // Instance::arrayCopy(dstArrayObject:word, dstArrayIndex:u32,
+ // srcArrayObject:word, srcArrayIndex:u32,
+ // numElements:u32,
+ // (elemsAreRefTyped ? -elemSize : elemSize):u32))
+ return f.emitInstanceCall6(lineOrBytecode, SASigArrayCopy, dstArrayObject,
+ dstArrayIndex, srcArrayObject, srcArrayIndex,
+ numElements, elemSizeDef);
+}
+
+static bool EmitRefTestV5(FunctionCompiler& f) {
+ MDefinition* ref;
+ RefType sourceType;
+ uint32_t typeIndex;
+ if (!f.iter().readRefTestV5(&sourceType, &typeIndex, &ref)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ const TypeDef& typeDef = f.moduleEnv().types->type(typeIndex);
+ RefType destType = RefType::fromTypeDef(&typeDef, false);
+ MDefinition* success = f.refTest(ref, sourceType, destType);
+ if (!success) {
+ return false;
+ }
+
+ f.iter().setResult(success);
+ return true;
+}
+
+static bool EmitRefCastV5(FunctionCompiler& f) {
+ MDefinition* ref;
+ RefType sourceType;
+ uint32_t typeIndex;
+ if (!f.iter().readRefCastV5(&sourceType, &typeIndex, &ref)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ const TypeDef& typeDef = f.moduleEnv().types->type(typeIndex);
+ RefType destType = RefType::fromTypeDef(&typeDef, /*nullable=*/true);
+ if (!f.refCast(ref, sourceType, destType)) {
+ return false;
+ }
+
+ f.iter().setResult(ref);
+ return true;
+}
+
+static bool EmitRefTest(FunctionCompiler& f, bool nullable) {
+ MDefinition* ref;
+ RefType sourceType;
+ RefType destType;
+ if (!f.iter().readRefTest(nullable, &sourceType, &destType, &ref)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ MDefinition* success = f.refTest(ref, sourceType, destType);
+ if (!success) {
+ return false;
+ }
+
+ f.iter().setResult(success);
+ return true;
+}
+
+static bool EmitRefCast(FunctionCompiler& f, bool nullable) {
+ MDefinition* ref;
+ RefType sourceType;
+ RefType destType;
+ if (!f.iter().readRefCast(nullable, &sourceType, &destType, &ref)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ if (!f.refCast(ref, sourceType, destType)) {
+ return false;
+ }
+
+ f.iter().setResult(ref);
+ return true;
+}
+
+static bool EmitBrOnCast(FunctionCompiler& f) {
+ bool onSuccess;
+ uint32_t labelRelativeDepth;
+ RefType sourceType;
+ RefType destType;
+ ResultType labelType;
+ DefVector values;
+ if (!f.iter().readBrOnCast(&onSuccess, &labelRelativeDepth, &sourceType,
+ &destType, &labelType, &values)) {
+ return false;
+ }
+
+ return f.brOnCastCommon(onSuccess, labelRelativeDepth, sourceType, destType,
+ labelType, values);
+}
+
+static bool EmitBrOnCastCommonV5(FunctionCompiler& f, bool onSuccess) {
+ uint32_t labelRelativeDepth;
+ RefType sourceType;
+ uint32_t castTypeIndex;
+ ResultType labelType;
+ DefVector values;
+ if (onSuccess
+ ? !f.iter().readBrOnCastV5(&labelRelativeDepth, &sourceType,
+ &castTypeIndex, &labelType, &values)
+ : !f.iter().readBrOnCastFailV5(&labelRelativeDepth, &sourceType,
+ &castTypeIndex, &labelType, &values)) {
+ return false;
+ }
+
+ const TypeDef& typeDef = f.moduleEnv().types->type(castTypeIndex);
+ RefType type = RefType::fromTypeDef(&typeDef, false);
+ return f.brOnCastCommon(onSuccess, labelRelativeDepth, sourceType, type,
+ labelType, values);
+}
+
+static bool EmitBrOnCastHeapV5(FunctionCompiler& f, bool onSuccess,
+ bool nullable) {
+ uint32_t labelRelativeDepth;
+ RefType sourceType;
+ RefType destType;
+ ResultType labelType;
+ DefVector values;
+ if (onSuccess ? !f.iter().readBrOnCastHeapV5(nullable, &labelRelativeDepth,
+ &sourceType, &destType,
+ &labelType, &values)
+ : !f.iter().readBrOnCastFailHeapV5(
+ nullable, &labelRelativeDepth, &sourceType, &destType,
+ &labelType, &values)) {
+ return false;
+ }
+
+ return f.brOnCastCommon(onSuccess, labelRelativeDepth, sourceType, destType,
+ labelType, values);
+}
+
+static bool EmitRefAsStructV5(FunctionCompiler& f) {
+ MDefinition* value;
+ if (!f.iter().readConversion(ValType(RefType::any()),
+ ValType(RefType::struct_().asNonNullable()),
+ &value)) {
+ return false;
+ }
+ f.iter().setResult(value);
+ return true;
+}
+
+static bool EmitBrOnNonStructV5(FunctionCompiler& f) {
+ uint32_t labelRelativeDepth;
+ ResultType labelType;
+ DefVector values;
+ if (!f.iter().readBrOnNonStructV5(&labelRelativeDepth, &labelType, &values)) {
+ return false;
+ }
+ return f.brOnNonStruct(values);
+}
+
+static bool EmitExternInternalize(FunctionCompiler& f) {
+ // extern.internalize is a no-op because anyref and extern share the same
+ // representation
+ MDefinition* ref;
+ if (!f.iter().readRefConversion(RefType::extern_(), RefType::any(), &ref)) {
+ return false;
+ }
+
+ f.iter().setResult(ref);
+ return true;
+}
+
+static bool EmitExternExternalize(FunctionCompiler& f) {
+ // extern.externalize is a no-op because anyref and extern share the same
+ // representation
+ MDefinition* ref;
+ if (!f.iter().readRefConversion(RefType::any(), RefType::extern_(), &ref)) {
+ return false;
+ }
+
+ f.iter().setResult(ref);
+ return true;
+}
+
+#endif // ENABLE_WASM_GC
+
+static bool EmitIntrinsic(FunctionCompiler& f) {
+ // It's almost possible to use FunctionCompiler::emitInstanceCallN here.
+ // Unfortunately not currently possible though, since ::emitInstanceCallN
+ // expects an array of arguments along with a size, and that's not what is
+ // available here. It would be possible if we were prepared to copy
+ // `intrinsic->params` into a fixed-sized (16 element?) array, add
+ // `memoryBase`, and make the call.
+ const Intrinsic* intrinsic;
+
+ DefVector params;
+ if (!f.iter().readIntrinsic(&intrinsic, &params)) {
+ return false;
+ }
+
+ uint32_t bytecodeOffset = f.readBytecodeOffset();
+ const SymbolicAddressSignature& callee = intrinsic->signature;
+
+ CallCompileState args;
+ if (!f.passInstance(callee.argTypes[0], &args)) {
+ return false;
+ }
+
+ if (!f.passArgs(params, intrinsic->params, &args)) {
+ return false;
+ }
+
+ MDefinition* memoryBase = f.memoryBase();
+ if (!f.passArg(memoryBase, MIRType::Pointer, &args)) {
+ return false;
+ }
+
+ if (!f.finishCall(&args)) {
+ return false;
+ }
+
+ return f.builtinInstanceMethodCall(callee, bytecodeOffset, args);
+}
+
+static bool EmitBodyExprs(FunctionCompiler& f) {
+ if (!f.iter().startFunction(f.funcIndex(), f.locals())) {
+ return false;
+ }
+
+#define CHECK(c) \
+ if (!(c)) return false; \
+ break
+
+ while (true) {
+ if (!f.mirGen().ensureBallast()) {
+ return false;
+ }
+
+ OpBytes op;
+ if (!f.iter().readOp(&op)) {
+ return false;
+ }
+
+ switch (op.b0) {
+ case uint16_t(Op::End):
+ if (!EmitEnd(f)) {
+ return false;
+ }
+ if (f.iter().controlStackEmpty()) {
+ return true;
+ }
+ break;
+
+ // Control opcodes
+ case uint16_t(Op::Unreachable):
+ CHECK(EmitUnreachable(f));
+ case uint16_t(Op::Nop):
+ CHECK(f.iter().readNop());
+ case uint16_t(Op::Block):
+ CHECK(EmitBlock(f));
+ case uint16_t(Op::Loop):
+ CHECK(EmitLoop(f));
+ case uint16_t(Op::If):
+ CHECK(EmitIf(f));
+ case uint16_t(Op::Else):
+ CHECK(EmitElse(f));
+ case uint16_t(Op::Try):
+ if (!f.moduleEnv().exceptionsEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ CHECK(EmitTry(f));
+ case uint16_t(Op::Catch):
+ if (!f.moduleEnv().exceptionsEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ CHECK(EmitCatch(f));
+ case uint16_t(Op::CatchAll):
+ if (!f.moduleEnv().exceptionsEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ CHECK(EmitCatchAll(f));
+ case uint16_t(Op::Delegate):
+ if (!f.moduleEnv().exceptionsEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ if (!EmitDelegate(f)) {
+ return false;
+ }
+ break;
+ case uint16_t(Op::Throw):
+ if (!f.moduleEnv().exceptionsEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ CHECK(EmitThrow(f));
+ case uint16_t(Op::Rethrow):
+ if (!f.moduleEnv().exceptionsEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ CHECK(EmitRethrow(f));
+ case uint16_t(Op::Br):
+ CHECK(EmitBr(f));
+ case uint16_t(Op::BrIf):
+ CHECK(EmitBrIf(f));
+ case uint16_t(Op::BrTable):
+ CHECK(EmitBrTable(f));
+ case uint16_t(Op::Return):
+ CHECK(EmitReturn(f));
+
+ // Calls
+ case uint16_t(Op::Call):
+ CHECK(EmitCall(f, /* asmJSFuncDef = */ false));
+ case uint16_t(Op::CallIndirect):
+ CHECK(EmitCallIndirect(f, /* oldStyle = */ false));
+
+ // Parametric operators
+ case uint16_t(Op::Drop):
+ CHECK(f.iter().readDrop());
+ case uint16_t(Op::SelectNumeric):
+ CHECK(EmitSelect(f, /*typed*/ false));
+ case uint16_t(Op::SelectTyped):
+ CHECK(EmitSelect(f, /*typed*/ true));
+
+ // Locals and globals
+ case uint16_t(Op::LocalGet):
+ CHECK(EmitGetLocal(f));
+ case uint16_t(Op::LocalSet):
+ CHECK(EmitSetLocal(f));
+ case uint16_t(Op::LocalTee):
+ CHECK(EmitTeeLocal(f));
+ case uint16_t(Op::GlobalGet):
+ CHECK(EmitGetGlobal(f));
+ case uint16_t(Op::GlobalSet):
+ CHECK(EmitSetGlobal(f));
+ case uint16_t(Op::TableGet):
+ CHECK(EmitTableGet(f));
+ case uint16_t(Op::TableSet):
+ CHECK(EmitTableSet(f));
+
+ // Memory-related operators
+ case uint16_t(Op::I32Load):
+ CHECK(EmitLoad(f, ValType::I32, Scalar::Int32));
+ case uint16_t(Op::I64Load):
+ CHECK(EmitLoad(f, ValType::I64, Scalar::Int64));
+ case uint16_t(Op::F32Load):
+ CHECK(EmitLoad(f, ValType::F32, Scalar::Float32));
+ case uint16_t(Op::F64Load):
+ CHECK(EmitLoad(f, ValType::F64, Scalar::Float64));
+ case uint16_t(Op::I32Load8S):
+ CHECK(EmitLoad(f, ValType::I32, Scalar::Int8));
+ case uint16_t(Op::I32Load8U):
+ CHECK(EmitLoad(f, ValType::I32, Scalar::Uint8));
+ case uint16_t(Op::I32Load16S):
+ CHECK(EmitLoad(f, ValType::I32, Scalar::Int16));
+ case uint16_t(Op::I32Load16U):
+ CHECK(EmitLoad(f, ValType::I32, Scalar::Uint16));
+ case uint16_t(Op::I64Load8S):
+ CHECK(EmitLoad(f, ValType::I64, Scalar::Int8));
+ case uint16_t(Op::I64Load8U):
+ CHECK(EmitLoad(f, ValType::I64, Scalar::Uint8));
+ case uint16_t(Op::I64Load16S):
+ CHECK(EmitLoad(f, ValType::I64, Scalar::Int16));
+ case uint16_t(Op::I64Load16U):
+ CHECK(EmitLoad(f, ValType::I64, Scalar::Uint16));
+ case uint16_t(Op::I64Load32S):
+ CHECK(EmitLoad(f, ValType::I64, Scalar::Int32));
+ case uint16_t(Op::I64Load32U):
+ CHECK(EmitLoad(f, ValType::I64, Scalar::Uint32));
+ case uint16_t(Op::I32Store):
+ CHECK(EmitStore(f, ValType::I32, Scalar::Int32));
+ case uint16_t(Op::I64Store):
+ CHECK(EmitStore(f, ValType::I64, Scalar::Int64));
+ case uint16_t(Op::F32Store):
+ CHECK(EmitStore(f, ValType::F32, Scalar::Float32));
+ case uint16_t(Op::F64Store):
+ CHECK(EmitStore(f, ValType::F64, Scalar::Float64));
+ case uint16_t(Op::I32Store8):
+ CHECK(EmitStore(f, ValType::I32, Scalar::Int8));
+ case uint16_t(Op::I32Store16):
+ CHECK(EmitStore(f, ValType::I32, Scalar::Int16));
+ case uint16_t(Op::I64Store8):
+ CHECK(EmitStore(f, ValType::I64, Scalar::Int8));
+ case uint16_t(Op::I64Store16):
+ CHECK(EmitStore(f, ValType::I64, Scalar::Int16));
+ case uint16_t(Op::I64Store32):
+ CHECK(EmitStore(f, ValType::I64, Scalar::Int32));
+ case uint16_t(Op::MemorySize):
+ CHECK(EmitMemorySize(f));
+ case uint16_t(Op::MemoryGrow):
+ CHECK(EmitMemoryGrow(f));
+
+ // Constants
+ case uint16_t(Op::I32Const):
+ CHECK(EmitI32Const(f));
+ case uint16_t(Op::I64Const):
+ CHECK(EmitI64Const(f));
+ case uint16_t(Op::F32Const):
+ CHECK(EmitF32Const(f));
+ case uint16_t(Op::F64Const):
+ CHECK(EmitF64Const(f));
+
+ // Comparison operators
+ case uint16_t(Op::I32Eqz):
+ CHECK(EmitConversion<MNot>(f, ValType::I32, ValType::I32));
+ case uint16_t(Op::I32Eq):
+ CHECK(
+ EmitComparison(f, ValType::I32, JSOp::Eq, MCompare::Compare_Int32));
+ case uint16_t(Op::I32Ne):
+ CHECK(
+ EmitComparison(f, ValType::I32, JSOp::Ne, MCompare::Compare_Int32));
+ case uint16_t(Op::I32LtS):
+ CHECK(
+ EmitComparison(f, ValType::I32, JSOp::Lt, MCompare::Compare_Int32));
+ case uint16_t(Op::I32LtU):
+ CHECK(EmitComparison(f, ValType::I32, JSOp::Lt,
+ MCompare::Compare_UInt32));
+ case uint16_t(Op::I32GtS):
+ CHECK(
+ EmitComparison(f, ValType::I32, JSOp::Gt, MCompare::Compare_Int32));
+ case uint16_t(Op::I32GtU):
+ CHECK(EmitComparison(f, ValType::I32, JSOp::Gt,
+ MCompare::Compare_UInt32));
+ case uint16_t(Op::I32LeS):
+ CHECK(
+ EmitComparison(f, ValType::I32, JSOp::Le, MCompare::Compare_Int32));
+ case uint16_t(Op::I32LeU):
+ CHECK(EmitComparison(f, ValType::I32, JSOp::Le,
+ MCompare::Compare_UInt32));
+ case uint16_t(Op::I32GeS):
+ CHECK(
+ EmitComparison(f, ValType::I32, JSOp::Ge, MCompare::Compare_Int32));
+ case uint16_t(Op::I32GeU):
+ CHECK(EmitComparison(f, ValType::I32, JSOp::Ge,
+ MCompare::Compare_UInt32));
+ case uint16_t(Op::I64Eqz):
+ CHECK(EmitConversion<MNot>(f, ValType::I64, ValType::I32));
+ case uint16_t(Op::I64Eq):
+ CHECK(
+ EmitComparison(f, ValType::I64, JSOp::Eq, MCompare::Compare_Int64));
+ case uint16_t(Op::I64Ne):
+ CHECK(
+ EmitComparison(f, ValType::I64, JSOp::Ne, MCompare::Compare_Int64));
+ case uint16_t(Op::I64LtS):
+ CHECK(
+ EmitComparison(f, ValType::I64, JSOp::Lt, MCompare::Compare_Int64));
+ case uint16_t(Op::I64LtU):
+ CHECK(EmitComparison(f, ValType::I64, JSOp::Lt,
+ MCompare::Compare_UInt64));
+ case uint16_t(Op::I64GtS):
+ CHECK(
+ EmitComparison(f, ValType::I64, JSOp::Gt, MCompare::Compare_Int64));
+ case uint16_t(Op::I64GtU):
+ CHECK(EmitComparison(f, ValType::I64, JSOp::Gt,
+ MCompare::Compare_UInt64));
+ case uint16_t(Op::I64LeS):
+ CHECK(
+ EmitComparison(f, ValType::I64, JSOp::Le, MCompare::Compare_Int64));
+ case uint16_t(Op::I64LeU):
+ CHECK(EmitComparison(f, ValType::I64, JSOp::Le,
+ MCompare::Compare_UInt64));
+ case uint16_t(Op::I64GeS):
+ CHECK(
+ EmitComparison(f, ValType::I64, JSOp::Ge, MCompare::Compare_Int64));
+ case uint16_t(Op::I64GeU):
+ CHECK(EmitComparison(f, ValType::I64, JSOp::Ge,
+ MCompare::Compare_UInt64));
+ case uint16_t(Op::F32Eq):
+ CHECK(EmitComparison(f, ValType::F32, JSOp::Eq,
+ MCompare::Compare_Float32));
+ case uint16_t(Op::F32Ne):
+ CHECK(EmitComparison(f, ValType::F32, JSOp::Ne,
+ MCompare::Compare_Float32));
+ case uint16_t(Op::F32Lt):
+ CHECK(EmitComparison(f, ValType::F32, JSOp::Lt,
+ MCompare::Compare_Float32));
+ case uint16_t(Op::F32Gt):
+ CHECK(EmitComparison(f, ValType::F32, JSOp::Gt,
+ MCompare::Compare_Float32));
+ case uint16_t(Op::F32Le):
+ CHECK(EmitComparison(f, ValType::F32, JSOp::Le,
+ MCompare::Compare_Float32));
+ case uint16_t(Op::F32Ge):
+ CHECK(EmitComparison(f, ValType::F32, JSOp::Ge,
+ MCompare::Compare_Float32));
+ case uint16_t(Op::F64Eq):
+ CHECK(EmitComparison(f, ValType::F64, JSOp::Eq,
+ MCompare::Compare_Double));
+ case uint16_t(Op::F64Ne):
+ CHECK(EmitComparison(f, ValType::F64, JSOp::Ne,
+ MCompare::Compare_Double));
+ case uint16_t(Op::F64Lt):
+ CHECK(EmitComparison(f, ValType::F64, JSOp::Lt,
+ MCompare::Compare_Double));
+ case uint16_t(Op::F64Gt):
+ CHECK(EmitComparison(f, ValType::F64, JSOp::Gt,
+ MCompare::Compare_Double));
+ case uint16_t(Op::F64Le):
+ CHECK(EmitComparison(f, ValType::F64, JSOp::Le,
+ MCompare::Compare_Double));
+ case uint16_t(Op::F64Ge):
+ CHECK(EmitComparison(f, ValType::F64, JSOp::Ge,
+ MCompare::Compare_Double));
+
+ // Numeric operators
+ case uint16_t(Op::I32Clz):
+ CHECK(EmitUnaryWithType<MClz>(f, ValType::I32, MIRType::Int32));
+ case uint16_t(Op::I32Ctz):
+ CHECK(EmitUnaryWithType<MCtz>(f, ValType::I32, MIRType::Int32));
+ case uint16_t(Op::I32Popcnt):
+ CHECK(EmitUnaryWithType<MPopcnt>(f, ValType::I32, MIRType::Int32));
+ case uint16_t(Op::I32Add):
+ CHECK(EmitAdd(f, ValType::I32, MIRType::Int32));
+ case uint16_t(Op::I32Sub):
+ CHECK(EmitSub(f, ValType::I32, MIRType::Int32));
+ case uint16_t(Op::I32Mul):
+ CHECK(EmitMul(f, ValType::I32, MIRType::Int32));
+ case uint16_t(Op::I32DivS):
+ case uint16_t(Op::I32DivU):
+ CHECK(
+ EmitDiv(f, ValType::I32, MIRType::Int32, Op(op.b0) == Op::I32DivU));
+ case uint16_t(Op::I32RemS):
+ case uint16_t(Op::I32RemU):
+ CHECK(
+ EmitRem(f, ValType::I32, MIRType::Int32, Op(op.b0) == Op::I32RemU));
+ case uint16_t(Op::I32And):
+ CHECK(EmitBitwiseAndOrXor(f, ValType::I32, MIRType::Int32,
+ MWasmBinaryBitwise::SubOpcode::And));
+ case uint16_t(Op::I32Or):
+ CHECK(EmitBitwiseAndOrXor(f, ValType::I32, MIRType::Int32,
+ MWasmBinaryBitwise::SubOpcode::Or));
+ case uint16_t(Op::I32Xor):
+ CHECK(EmitBitwiseAndOrXor(f, ValType::I32, MIRType::Int32,
+ MWasmBinaryBitwise::SubOpcode::Xor));
+ case uint16_t(Op::I32Shl):
+ CHECK(EmitShift<MLsh>(f, ValType::I32, MIRType::Int32));
+ case uint16_t(Op::I32ShrS):
+ CHECK(EmitShift<MRsh>(f, ValType::I32, MIRType::Int32));
+ case uint16_t(Op::I32ShrU):
+ CHECK(EmitUrsh(f, ValType::I32, MIRType::Int32));
+ case uint16_t(Op::I32Rotl):
+ case uint16_t(Op::I32Rotr):
+ CHECK(EmitRotate(f, ValType::I32, Op(op.b0) == Op::I32Rotl));
+ case uint16_t(Op::I64Clz):
+ CHECK(EmitUnaryWithType<MClz>(f, ValType::I64, MIRType::Int64));
+ case uint16_t(Op::I64Ctz):
+ CHECK(EmitUnaryWithType<MCtz>(f, ValType::I64, MIRType::Int64));
+ case uint16_t(Op::I64Popcnt):
+ CHECK(EmitUnaryWithType<MPopcnt>(f, ValType::I64, MIRType::Int64));
+ case uint16_t(Op::I64Add):
+ CHECK(EmitAdd(f, ValType::I64, MIRType::Int64));
+ case uint16_t(Op::I64Sub):
+ CHECK(EmitSub(f, ValType::I64, MIRType::Int64));
+ case uint16_t(Op::I64Mul):
+ CHECK(EmitMul(f, ValType::I64, MIRType::Int64));
+ case uint16_t(Op::I64DivS):
+ case uint16_t(Op::I64DivU):
+ CHECK(
+ EmitDiv(f, ValType::I64, MIRType::Int64, Op(op.b0) == Op::I64DivU));
+ case uint16_t(Op::I64RemS):
+ case uint16_t(Op::I64RemU):
+ CHECK(
+ EmitRem(f, ValType::I64, MIRType::Int64, Op(op.b0) == Op::I64RemU));
+ case uint16_t(Op::I64And):
+ CHECK(EmitBitwiseAndOrXor(f, ValType::I64, MIRType::Int64,
+ MWasmBinaryBitwise::SubOpcode::And));
+ case uint16_t(Op::I64Or):
+ CHECK(EmitBitwiseAndOrXor(f, ValType::I64, MIRType::Int64,
+ MWasmBinaryBitwise::SubOpcode::Or));
+ case uint16_t(Op::I64Xor):
+ CHECK(EmitBitwiseAndOrXor(f, ValType::I64, MIRType::Int64,
+ MWasmBinaryBitwise::SubOpcode::Xor));
+ case uint16_t(Op::I64Shl):
+ CHECK(EmitShift<MLsh>(f, ValType::I64, MIRType::Int64));
+ case uint16_t(Op::I64ShrS):
+ CHECK(EmitShift<MRsh>(f, ValType::I64, MIRType::Int64));
+ case uint16_t(Op::I64ShrU):
+ CHECK(EmitUrsh(f, ValType::I64, MIRType::Int64));
+ case uint16_t(Op::I64Rotl):
+ case uint16_t(Op::I64Rotr):
+ CHECK(EmitRotate(f, ValType::I64, Op(op.b0) == Op::I64Rotl));
+ case uint16_t(Op::F32Abs):
+ CHECK(EmitUnaryWithType<MAbs>(f, ValType::F32, MIRType::Float32));
+ case uint16_t(Op::F32Neg):
+ CHECK(EmitUnaryWithType<MWasmNeg>(f, ValType::F32, MIRType::Float32));
+ case uint16_t(Op::F32Ceil):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigCeilF));
+ case uint16_t(Op::F32Floor):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigFloorF));
+ case uint16_t(Op::F32Trunc):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigTruncF));
+ case uint16_t(Op::F32Nearest):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigNearbyIntF));
+ case uint16_t(Op::F32Sqrt):
+ CHECK(EmitUnaryWithType<MSqrt>(f, ValType::F32, MIRType::Float32));
+ case uint16_t(Op::F32Add):
+ CHECK(EmitAdd(f, ValType::F32, MIRType::Float32));
+ case uint16_t(Op::F32Sub):
+ CHECK(EmitSub(f, ValType::F32, MIRType::Float32));
+ case uint16_t(Op::F32Mul):
+ CHECK(EmitMul(f, ValType::F32, MIRType::Float32));
+ case uint16_t(Op::F32Div):
+ CHECK(EmitDiv(f, ValType::F32, MIRType::Float32,
+ /* isUnsigned = */ false));
+ case uint16_t(Op::F32Min):
+ case uint16_t(Op::F32Max):
+ CHECK(EmitMinMax(f, ValType::F32, MIRType::Float32,
+ Op(op.b0) == Op::F32Max));
+ case uint16_t(Op::F32CopySign):
+ CHECK(EmitCopySign(f, ValType::F32));
+ case uint16_t(Op::F64Abs):
+ CHECK(EmitUnaryWithType<MAbs>(f, ValType::F64, MIRType::Double));
+ case uint16_t(Op::F64Neg):
+ CHECK(EmitUnaryWithType<MWasmNeg>(f, ValType::F64, MIRType::Double));
+ case uint16_t(Op::F64Ceil):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigCeilD));
+ case uint16_t(Op::F64Floor):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigFloorD));
+ case uint16_t(Op::F64Trunc):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigTruncD));
+ case uint16_t(Op::F64Nearest):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigNearbyIntD));
+ case uint16_t(Op::F64Sqrt):
+ CHECK(EmitUnaryWithType<MSqrt>(f, ValType::F64, MIRType::Double));
+ case uint16_t(Op::F64Add):
+ CHECK(EmitAdd(f, ValType::F64, MIRType::Double));
+ case uint16_t(Op::F64Sub):
+ CHECK(EmitSub(f, ValType::F64, MIRType::Double));
+ case uint16_t(Op::F64Mul):
+ CHECK(EmitMul(f, ValType::F64, MIRType::Double));
+ case uint16_t(Op::F64Div):
+ CHECK(EmitDiv(f, ValType::F64, MIRType::Double,
+ /* isUnsigned = */ false));
+ case uint16_t(Op::F64Min):
+ case uint16_t(Op::F64Max):
+ CHECK(EmitMinMax(f, ValType::F64, MIRType::Double,
+ Op(op.b0) == Op::F64Max));
+ case uint16_t(Op::F64CopySign):
+ CHECK(EmitCopySign(f, ValType::F64));
+
+ // Conversions
+ case uint16_t(Op::I32WrapI64):
+ CHECK(EmitConversion<MWrapInt64ToInt32>(f, ValType::I64, ValType::I32));
+ case uint16_t(Op::I32TruncF32S):
+ case uint16_t(Op::I32TruncF32U):
+ CHECK(EmitTruncate(f, ValType::F32, ValType::I32,
+ Op(op.b0) == Op::I32TruncF32U, false));
+ case uint16_t(Op::I32TruncF64S):
+ case uint16_t(Op::I32TruncF64U):
+ CHECK(EmitTruncate(f, ValType::F64, ValType::I32,
+ Op(op.b0) == Op::I32TruncF64U, false));
+ case uint16_t(Op::I64ExtendI32S):
+ case uint16_t(Op::I64ExtendI32U):
+ CHECK(EmitExtendI32(f, Op(op.b0) == Op::I64ExtendI32U));
+ case uint16_t(Op::I64TruncF32S):
+ case uint16_t(Op::I64TruncF32U):
+ CHECK(EmitTruncate(f, ValType::F32, ValType::I64,
+ Op(op.b0) == Op::I64TruncF32U, false));
+ case uint16_t(Op::I64TruncF64S):
+ case uint16_t(Op::I64TruncF64U):
+ CHECK(EmitTruncate(f, ValType::F64, ValType::I64,
+ Op(op.b0) == Op::I64TruncF64U, false));
+ case uint16_t(Op::F32ConvertI32S):
+ CHECK(EmitConversion<MToFloat32>(f, ValType::I32, ValType::F32));
+ case uint16_t(Op::F32ConvertI32U):
+ CHECK(EmitConversion<MWasmUnsignedToFloat32>(f, ValType::I32,
+ ValType::F32));
+ case uint16_t(Op::F32ConvertI64S):
+ case uint16_t(Op::F32ConvertI64U):
+ CHECK(EmitConvertI64ToFloatingPoint(f, ValType::F32, MIRType::Float32,
+ Op(op.b0) == Op::F32ConvertI64U));
+ case uint16_t(Op::F32DemoteF64):
+ CHECK(EmitConversion<MToFloat32>(f, ValType::F64, ValType::F32));
+ case uint16_t(Op::F64ConvertI32S):
+ CHECK(EmitConversion<MToDouble>(f, ValType::I32, ValType::F64));
+ case uint16_t(Op::F64ConvertI32U):
+ CHECK(EmitConversion<MWasmUnsignedToDouble>(f, ValType::I32,
+ ValType::F64));
+ case uint16_t(Op::F64ConvertI64S):
+ case uint16_t(Op::F64ConvertI64U):
+ CHECK(EmitConvertI64ToFloatingPoint(f, ValType::F64, MIRType::Double,
+ Op(op.b0) == Op::F64ConvertI64U));
+ case uint16_t(Op::F64PromoteF32):
+ CHECK(EmitConversion<MToDouble>(f, ValType::F32, ValType::F64));
+
+ // Reinterpretations
+ case uint16_t(Op::I32ReinterpretF32):
+ CHECK(EmitReinterpret(f, ValType::I32, ValType::F32, MIRType::Int32));
+ case uint16_t(Op::I64ReinterpretF64):
+ CHECK(EmitReinterpret(f, ValType::I64, ValType::F64, MIRType::Int64));
+ case uint16_t(Op::F32ReinterpretI32):
+ CHECK(EmitReinterpret(f, ValType::F32, ValType::I32, MIRType::Float32));
+ case uint16_t(Op::F64ReinterpretI64):
+ CHECK(EmitReinterpret(f, ValType::F64, ValType::I64, MIRType::Double));
+
+#ifdef ENABLE_WASM_GC
+ case uint16_t(Op::RefEq):
+ if (!f.moduleEnv().gcEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ CHECK(EmitComparison(f, RefType::eq(), JSOp::Eq,
+ MCompare::Compare_RefOrNull));
+#endif
+ case uint16_t(Op::RefFunc):
+ CHECK(EmitRefFunc(f));
+ case uint16_t(Op::RefNull):
+ CHECK(EmitRefNull(f));
+ case uint16_t(Op::RefIsNull):
+ CHECK(EmitRefIsNull(f));
+
+ // Sign extensions
+ case uint16_t(Op::I32Extend8S):
+ CHECK(EmitSignExtend(f, 1, 4));
+ case uint16_t(Op::I32Extend16S):
+ CHECK(EmitSignExtend(f, 2, 4));
+ case uint16_t(Op::I64Extend8S):
+ CHECK(EmitSignExtend(f, 1, 8));
+ case uint16_t(Op::I64Extend16S):
+ CHECK(EmitSignExtend(f, 2, 8));
+ case uint16_t(Op::I64Extend32S):
+ CHECK(EmitSignExtend(f, 4, 8));
+
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ case uint16_t(Op::RefAsNonNull):
+ if (!f.moduleEnv().functionReferencesEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ CHECK(EmitRefAsNonNull(f));
+ case uint16_t(Op::BrOnNull): {
+ if (!f.moduleEnv().functionReferencesEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ CHECK(EmitBrOnNull(f));
+ }
+ case uint16_t(Op::BrOnNonNull): {
+ if (!f.moduleEnv().functionReferencesEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ CHECK(EmitBrOnNonNull(f));
+ }
+ case uint16_t(Op::CallRef): {
+ if (!f.moduleEnv().functionReferencesEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ CHECK(EmitCallRef(f));
+ }
+#endif
+
+ // Gc operations
+#ifdef ENABLE_WASM_GC
+ case uint16_t(Op::GcPrefix): {
+ if (!f.moduleEnv().gcEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ switch (op.b1) {
+ case uint32_t(GcOp::StructNew):
+ CHECK(EmitStructNew(f));
+ case uint32_t(GcOp::StructNewDefault):
+ CHECK(EmitStructNewDefault(f));
+ case uint32_t(GcOp::StructSet):
+ CHECK(EmitStructSet(f));
+ case uint32_t(GcOp::StructGet):
+ CHECK(EmitStructGet(f, FieldWideningOp::None));
+ case uint32_t(GcOp::StructGetS):
+ CHECK(EmitStructGet(f, FieldWideningOp::Signed));
+ case uint32_t(GcOp::StructGetU):
+ CHECK(EmitStructGet(f, FieldWideningOp::Unsigned));
+ case uint32_t(GcOp::ArrayNew):
+ CHECK(EmitArrayNew(f));
+ case uint32_t(GcOp::ArrayNewDefault):
+ CHECK(EmitArrayNewDefault(f));
+ case uint32_t(GcOp::ArrayNewFixed):
+ CHECK(EmitArrayNewFixed(f));
+ case uint32_t(GcOp::ArrayNewData):
+ CHECK(EmitArrayNewData(f));
+ case uint32_t(GcOp::ArrayInitFromElemStaticV5):
+ case uint32_t(GcOp::ArrayNewElem):
+ CHECK(EmitArrayNewElem(f));
+ case uint32_t(GcOp::ArraySet):
+ CHECK(EmitArraySet(f));
+ case uint32_t(GcOp::ArrayGet):
+ CHECK(EmitArrayGet(f, FieldWideningOp::None));
+ case uint32_t(GcOp::ArrayGetS):
+ CHECK(EmitArrayGet(f, FieldWideningOp::Signed));
+ case uint32_t(GcOp::ArrayGetU):
+ CHECK(EmitArrayGet(f, FieldWideningOp::Unsigned));
+ case uint32_t(GcOp::ArrayLenWithTypeIndex):
+ CHECK(EmitArrayLen(f, /*decodeIgnoredTypeIndex=*/true));
+ case uint32_t(GcOp::ArrayLen):
+ CHECK(EmitArrayLen(f, /*decodeIgnoredTypeIndex=*/false));
+ case uint32_t(GcOp::ArrayCopy):
+ CHECK(EmitArrayCopy(f));
+ case uint32_t(GcOp::RefTestV5):
+ CHECK(EmitRefTestV5(f));
+ case uint32_t(GcOp::RefCastV5):
+ CHECK(EmitRefCastV5(f));
+ case uint32_t(GcOp::BrOnCast):
+ CHECK(EmitBrOnCast(f));
+ case uint32_t(GcOp::BrOnCastV5):
+ CHECK(EmitBrOnCastCommonV5(f, /*onSuccess=*/true));
+ case uint32_t(GcOp::BrOnCastFailV5):
+ CHECK(EmitBrOnCastCommonV5(f, /*onSuccess=*/false));
+ case uint32_t(GcOp::BrOnCastHeapV5):
+ CHECK(
+ EmitBrOnCastHeapV5(f, /*onSuccess=*/true, /*nullable=*/false));
+ case uint32_t(GcOp::BrOnCastHeapNullV5):
+ CHECK(EmitBrOnCastHeapV5(f, /*onSuccess=*/true, /*nullable=*/true));
+ case uint32_t(GcOp::BrOnCastFailHeapV5):
+ CHECK(
+ EmitBrOnCastHeapV5(f, /*onSuccess=*/false, /*nullable=*/false));
+ case uint32_t(GcOp::BrOnCastFailHeapNullV5):
+ CHECK(
+ EmitBrOnCastHeapV5(f, /*onSuccess=*/false, /*nullable=*/true));
+ case uint32_t(GcOp::RefAsStructV5):
+ CHECK(EmitRefAsStructV5(f));
+ case uint32_t(GcOp::BrOnNonStructV5):
+ CHECK(EmitBrOnNonStructV5(f));
+ case uint32_t(GcOp::RefTest):
+ CHECK(EmitRefTest(f, /*nullable=*/false));
+ case uint32_t(GcOp::RefTestNull):
+ CHECK(EmitRefTest(f, /*nullable=*/true));
+ case uint32_t(GcOp::RefCast):
+ CHECK(EmitRefCast(f, /*nullable=*/false));
+ case uint32_t(GcOp::RefCastNull):
+ CHECK(EmitRefCast(f, /*nullable=*/true));
+ case uint16_t(GcOp::ExternInternalize):
+ CHECK(EmitExternInternalize(f));
+ case uint16_t(GcOp::ExternExternalize):
+ CHECK(EmitExternExternalize(f));
+ default:
+ return f.iter().unrecognizedOpcode(&op);
+ } // switch (op.b1)
+ break;
+ }
+#endif
+
+ // SIMD operations
+#ifdef ENABLE_WASM_SIMD
+ case uint16_t(Op::SimdPrefix): {
+ if (!f.moduleEnv().simdAvailable()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ switch (op.b1) {
+ case uint32_t(SimdOp::V128Const):
+ CHECK(EmitConstSimd128(f));
+ case uint32_t(SimdOp::V128Load):
+ CHECK(EmitLoad(f, ValType::V128, Scalar::Simd128));
+ case uint32_t(SimdOp::V128Store):
+ CHECK(EmitStore(f, ValType::V128, Scalar::Simd128));
+ case uint32_t(SimdOp::V128And):
+ case uint32_t(SimdOp::V128Or):
+ case uint32_t(SimdOp::V128Xor):
+ case uint32_t(SimdOp::I8x16AvgrU):
+ case uint32_t(SimdOp::I16x8AvgrU):
+ case uint32_t(SimdOp::I8x16Add):
+ case uint32_t(SimdOp::I8x16AddSatS):
+ case uint32_t(SimdOp::I8x16AddSatU):
+ case uint32_t(SimdOp::I8x16MinS):
+ case uint32_t(SimdOp::I8x16MinU):
+ case uint32_t(SimdOp::I8x16MaxS):
+ case uint32_t(SimdOp::I8x16MaxU):
+ case uint32_t(SimdOp::I16x8Add):
+ case uint32_t(SimdOp::I16x8AddSatS):
+ case uint32_t(SimdOp::I16x8AddSatU):
+ case uint32_t(SimdOp::I16x8Mul):
+ case uint32_t(SimdOp::I16x8MinS):
+ case uint32_t(SimdOp::I16x8MinU):
+ case uint32_t(SimdOp::I16x8MaxS):
+ case uint32_t(SimdOp::I16x8MaxU):
+ case uint32_t(SimdOp::I32x4Add):
+ case uint32_t(SimdOp::I32x4Mul):
+ case uint32_t(SimdOp::I32x4MinS):
+ case uint32_t(SimdOp::I32x4MinU):
+ case uint32_t(SimdOp::I32x4MaxS):
+ case uint32_t(SimdOp::I32x4MaxU):
+ case uint32_t(SimdOp::I64x2Add):
+ case uint32_t(SimdOp::I64x2Mul):
+ case uint32_t(SimdOp::F32x4Add):
+ case uint32_t(SimdOp::F32x4Mul):
+ case uint32_t(SimdOp::F32x4Min):
+ case uint32_t(SimdOp::F32x4Max):
+ case uint32_t(SimdOp::F64x2Add):
+ case uint32_t(SimdOp::F64x2Mul):
+ case uint32_t(SimdOp::F64x2Min):
+ case uint32_t(SimdOp::F64x2Max):
+ case uint32_t(SimdOp::I8x16Eq):
+ case uint32_t(SimdOp::I8x16Ne):
+ case uint32_t(SimdOp::I16x8Eq):
+ case uint32_t(SimdOp::I16x8Ne):
+ case uint32_t(SimdOp::I32x4Eq):
+ case uint32_t(SimdOp::I32x4Ne):
+ case uint32_t(SimdOp::I64x2Eq):
+ case uint32_t(SimdOp::I64x2Ne):
+ case uint32_t(SimdOp::F32x4Eq):
+ case uint32_t(SimdOp::F32x4Ne):
+ case uint32_t(SimdOp::F64x2Eq):
+ case uint32_t(SimdOp::F64x2Ne):
+ case uint32_t(SimdOp::I32x4DotI16x8S):
+ case uint32_t(SimdOp::I16x8ExtmulLowI8x16S):
+ case uint32_t(SimdOp::I16x8ExtmulHighI8x16S):
+ case uint32_t(SimdOp::I16x8ExtmulLowI8x16U):
+ case uint32_t(SimdOp::I16x8ExtmulHighI8x16U):
+ case uint32_t(SimdOp::I32x4ExtmulLowI16x8S):
+ case uint32_t(SimdOp::I32x4ExtmulHighI16x8S):
+ case uint32_t(SimdOp::I32x4ExtmulLowI16x8U):
+ case uint32_t(SimdOp::I32x4ExtmulHighI16x8U):
+ case uint32_t(SimdOp::I64x2ExtmulLowI32x4S):
+ case uint32_t(SimdOp::I64x2ExtmulHighI32x4S):
+ case uint32_t(SimdOp::I64x2ExtmulLowI32x4U):
+ case uint32_t(SimdOp::I64x2ExtmulHighI32x4U):
+ case uint32_t(SimdOp::I16x8Q15MulrSatS):
+ CHECK(EmitBinarySimd128(f, /* commutative= */ true, SimdOp(op.b1)));
+ case uint32_t(SimdOp::V128AndNot):
+ case uint32_t(SimdOp::I8x16Sub):
+ case uint32_t(SimdOp::I8x16SubSatS):
+ case uint32_t(SimdOp::I8x16SubSatU):
+ case uint32_t(SimdOp::I16x8Sub):
+ case uint32_t(SimdOp::I16x8SubSatS):
+ case uint32_t(SimdOp::I16x8SubSatU):
+ case uint32_t(SimdOp::I32x4Sub):
+ case uint32_t(SimdOp::I64x2Sub):
+ case uint32_t(SimdOp::F32x4Sub):
+ case uint32_t(SimdOp::F32x4Div):
+ case uint32_t(SimdOp::F64x2Sub):
+ case uint32_t(SimdOp::F64x2Div):
+ case uint32_t(SimdOp::I8x16NarrowI16x8S):
+ case uint32_t(SimdOp::I8x16NarrowI16x8U):
+ case uint32_t(SimdOp::I16x8NarrowI32x4S):
+ case uint32_t(SimdOp::I16x8NarrowI32x4U):
+ case uint32_t(SimdOp::I8x16LtS):
+ case uint32_t(SimdOp::I8x16LtU):
+ case uint32_t(SimdOp::I8x16GtS):
+ case uint32_t(SimdOp::I8x16GtU):
+ case uint32_t(SimdOp::I8x16LeS):
+ case uint32_t(SimdOp::I8x16LeU):
+ case uint32_t(SimdOp::I8x16GeS):
+ case uint32_t(SimdOp::I8x16GeU):
+ case uint32_t(SimdOp::I16x8LtS):
+ case uint32_t(SimdOp::I16x8LtU):
+ case uint32_t(SimdOp::I16x8GtS):
+ case uint32_t(SimdOp::I16x8GtU):
+ case uint32_t(SimdOp::I16x8LeS):
+ case uint32_t(SimdOp::I16x8LeU):
+ case uint32_t(SimdOp::I16x8GeS):
+ case uint32_t(SimdOp::I16x8GeU):
+ case uint32_t(SimdOp::I32x4LtS):
+ case uint32_t(SimdOp::I32x4LtU):
+ case uint32_t(SimdOp::I32x4GtS):
+ case uint32_t(SimdOp::I32x4GtU):
+ case uint32_t(SimdOp::I32x4LeS):
+ case uint32_t(SimdOp::I32x4LeU):
+ case uint32_t(SimdOp::I32x4GeS):
+ case uint32_t(SimdOp::I32x4GeU):
+ case uint32_t(SimdOp::I64x2LtS):
+ case uint32_t(SimdOp::I64x2GtS):
+ case uint32_t(SimdOp::I64x2LeS):
+ case uint32_t(SimdOp::I64x2GeS):
+ case uint32_t(SimdOp::F32x4Lt):
+ case uint32_t(SimdOp::F32x4Gt):
+ case uint32_t(SimdOp::F32x4Le):
+ case uint32_t(SimdOp::F32x4Ge):
+ case uint32_t(SimdOp::F64x2Lt):
+ case uint32_t(SimdOp::F64x2Gt):
+ case uint32_t(SimdOp::F64x2Le):
+ case uint32_t(SimdOp::F64x2Ge):
+ case uint32_t(SimdOp::I8x16Swizzle):
+ case uint32_t(SimdOp::F32x4PMax):
+ case uint32_t(SimdOp::F32x4PMin):
+ case uint32_t(SimdOp::F64x2PMax):
+ case uint32_t(SimdOp::F64x2PMin):
+ CHECK(
+ EmitBinarySimd128(f, /* commutative= */ false, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I8x16Splat):
+ case uint32_t(SimdOp::I16x8Splat):
+ case uint32_t(SimdOp::I32x4Splat):
+ CHECK(EmitSplatSimd128(f, ValType::I32, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I64x2Splat):
+ CHECK(EmitSplatSimd128(f, ValType::I64, SimdOp(op.b1)));
+ case uint32_t(SimdOp::F32x4Splat):
+ CHECK(EmitSplatSimd128(f, ValType::F32, SimdOp(op.b1)));
+ case uint32_t(SimdOp::F64x2Splat):
+ CHECK(EmitSplatSimd128(f, ValType::F64, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I8x16Neg):
+ case uint32_t(SimdOp::I16x8Neg):
+ case uint32_t(SimdOp::I16x8ExtendLowI8x16S):
+ case uint32_t(SimdOp::I16x8ExtendHighI8x16S):
+ case uint32_t(SimdOp::I16x8ExtendLowI8x16U):
+ case uint32_t(SimdOp::I16x8ExtendHighI8x16U):
+ case uint32_t(SimdOp::I32x4Neg):
+ case uint32_t(SimdOp::I32x4ExtendLowI16x8S):
+ case uint32_t(SimdOp::I32x4ExtendHighI16x8S):
+ case uint32_t(SimdOp::I32x4ExtendLowI16x8U):
+ case uint32_t(SimdOp::I32x4ExtendHighI16x8U):
+ case uint32_t(SimdOp::I32x4TruncSatF32x4S):
+ case uint32_t(SimdOp::I32x4TruncSatF32x4U):
+ case uint32_t(SimdOp::I64x2Neg):
+ case uint32_t(SimdOp::I64x2ExtendLowI32x4S):
+ case uint32_t(SimdOp::I64x2ExtendHighI32x4S):
+ case uint32_t(SimdOp::I64x2ExtendLowI32x4U):
+ case uint32_t(SimdOp::I64x2ExtendHighI32x4U):
+ case uint32_t(SimdOp::F32x4Abs):
+ case uint32_t(SimdOp::F32x4Neg):
+ case uint32_t(SimdOp::F32x4Sqrt):
+ case uint32_t(SimdOp::F32x4ConvertI32x4S):
+ case uint32_t(SimdOp::F32x4ConvertI32x4U):
+ case uint32_t(SimdOp::F64x2Abs):
+ case uint32_t(SimdOp::F64x2Neg):
+ case uint32_t(SimdOp::F64x2Sqrt):
+ case uint32_t(SimdOp::V128Not):
+ case uint32_t(SimdOp::I8x16Popcnt):
+ case uint32_t(SimdOp::I8x16Abs):
+ case uint32_t(SimdOp::I16x8Abs):
+ case uint32_t(SimdOp::I32x4Abs):
+ case uint32_t(SimdOp::I64x2Abs):
+ case uint32_t(SimdOp::F32x4Ceil):
+ case uint32_t(SimdOp::F32x4Floor):
+ case uint32_t(SimdOp::F32x4Trunc):
+ case uint32_t(SimdOp::F32x4Nearest):
+ case uint32_t(SimdOp::F64x2Ceil):
+ case uint32_t(SimdOp::F64x2Floor):
+ case uint32_t(SimdOp::F64x2Trunc):
+ case uint32_t(SimdOp::F64x2Nearest):
+ case uint32_t(SimdOp::F32x4DemoteF64x2Zero):
+ case uint32_t(SimdOp::F64x2PromoteLowF32x4):
+ case uint32_t(SimdOp::F64x2ConvertLowI32x4S):
+ case uint32_t(SimdOp::F64x2ConvertLowI32x4U):
+ case uint32_t(SimdOp::I32x4TruncSatF64x2SZero):
+ case uint32_t(SimdOp::I32x4TruncSatF64x2UZero):
+ case uint32_t(SimdOp::I16x8ExtaddPairwiseI8x16S):
+ case uint32_t(SimdOp::I16x8ExtaddPairwiseI8x16U):
+ case uint32_t(SimdOp::I32x4ExtaddPairwiseI16x8S):
+ case uint32_t(SimdOp::I32x4ExtaddPairwiseI16x8U):
+ CHECK(EmitUnarySimd128(f, SimdOp(op.b1)));
+ case uint32_t(SimdOp::V128AnyTrue):
+ case uint32_t(SimdOp::I8x16AllTrue):
+ case uint32_t(SimdOp::I16x8AllTrue):
+ case uint32_t(SimdOp::I32x4AllTrue):
+ case uint32_t(SimdOp::I64x2AllTrue):
+ case uint32_t(SimdOp::I8x16Bitmask):
+ case uint32_t(SimdOp::I16x8Bitmask):
+ case uint32_t(SimdOp::I32x4Bitmask):
+ case uint32_t(SimdOp::I64x2Bitmask):
+ CHECK(EmitReduceSimd128(f, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I8x16Shl):
+ case uint32_t(SimdOp::I8x16ShrS):
+ case uint32_t(SimdOp::I8x16ShrU):
+ case uint32_t(SimdOp::I16x8Shl):
+ case uint32_t(SimdOp::I16x8ShrS):
+ case uint32_t(SimdOp::I16x8ShrU):
+ case uint32_t(SimdOp::I32x4Shl):
+ case uint32_t(SimdOp::I32x4ShrS):
+ case uint32_t(SimdOp::I32x4ShrU):
+ case uint32_t(SimdOp::I64x2Shl):
+ case uint32_t(SimdOp::I64x2ShrS):
+ case uint32_t(SimdOp::I64x2ShrU):
+ CHECK(EmitShiftSimd128(f, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I8x16ExtractLaneS):
+ case uint32_t(SimdOp::I8x16ExtractLaneU):
+ CHECK(EmitExtractLaneSimd128(f, ValType::I32, 16, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I16x8ExtractLaneS):
+ case uint32_t(SimdOp::I16x8ExtractLaneU):
+ CHECK(EmitExtractLaneSimd128(f, ValType::I32, 8, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I32x4ExtractLane):
+ CHECK(EmitExtractLaneSimd128(f, ValType::I32, 4, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I64x2ExtractLane):
+ CHECK(EmitExtractLaneSimd128(f, ValType::I64, 2, SimdOp(op.b1)));
+ case uint32_t(SimdOp::F32x4ExtractLane):
+ CHECK(EmitExtractLaneSimd128(f, ValType::F32, 4, SimdOp(op.b1)));
+ case uint32_t(SimdOp::F64x2ExtractLane):
+ CHECK(EmitExtractLaneSimd128(f, ValType::F64, 2, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I8x16ReplaceLane):
+ CHECK(EmitReplaceLaneSimd128(f, ValType::I32, 16, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I16x8ReplaceLane):
+ CHECK(EmitReplaceLaneSimd128(f, ValType::I32, 8, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I32x4ReplaceLane):
+ CHECK(EmitReplaceLaneSimd128(f, ValType::I32, 4, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I64x2ReplaceLane):
+ CHECK(EmitReplaceLaneSimd128(f, ValType::I64, 2, SimdOp(op.b1)));
+ case uint32_t(SimdOp::F32x4ReplaceLane):
+ CHECK(EmitReplaceLaneSimd128(f, ValType::F32, 4, SimdOp(op.b1)));
+ case uint32_t(SimdOp::F64x2ReplaceLane):
+ CHECK(EmitReplaceLaneSimd128(f, ValType::F64, 2, SimdOp(op.b1)));
+ case uint32_t(SimdOp::V128Bitselect):
+ CHECK(EmitTernarySimd128(f, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I8x16Shuffle):
+ CHECK(EmitShuffleSimd128(f));
+ case uint32_t(SimdOp::V128Load8Splat):
+ CHECK(EmitLoadSplatSimd128(f, Scalar::Uint8, SimdOp::I8x16Splat));
+ case uint32_t(SimdOp::V128Load16Splat):
+ CHECK(EmitLoadSplatSimd128(f, Scalar::Uint16, SimdOp::I16x8Splat));
+ case uint32_t(SimdOp::V128Load32Splat):
+ CHECK(EmitLoadSplatSimd128(f, Scalar::Float32, SimdOp::I32x4Splat));
+ case uint32_t(SimdOp::V128Load64Splat):
+ CHECK(EmitLoadSplatSimd128(f, Scalar::Float64, SimdOp::I64x2Splat));
+ case uint32_t(SimdOp::V128Load8x8S):
+ case uint32_t(SimdOp::V128Load8x8U):
+ case uint32_t(SimdOp::V128Load16x4S):
+ case uint32_t(SimdOp::V128Load16x4U):
+ case uint32_t(SimdOp::V128Load32x2S):
+ case uint32_t(SimdOp::V128Load32x2U):
+ CHECK(EmitLoadExtendSimd128(f, SimdOp(op.b1)));
+ case uint32_t(SimdOp::V128Load32Zero):
+ CHECK(EmitLoadZeroSimd128(f, Scalar::Float32, 4));
+ case uint32_t(SimdOp::V128Load64Zero):
+ CHECK(EmitLoadZeroSimd128(f, Scalar::Float64, 8));
+ case uint32_t(SimdOp::V128Load8Lane):
+ CHECK(EmitLoadLaneSimd128(f, 1));
+ case uint32_t(SimdOp::V128Load16Lane):
+ CHECK(EmitLoadLaneSimd128(f, 2));
+ case uint32_t(SimdOp::V128Load32Lane):
+ CHECK(EmitLoadLaneSimd128(f, 4));
+ case uint32_t(SimdOp::V128Load64Lane):
+ CHECK(EmitLoadLaneSimd128(f, 8));
+ case uint32_t(SimdOp::V128Store8Lane):
+ CHECK(EmitStoreLaneSimd128(f, 1));
+ case uint32_t(SimdOp::V128Store16Lane):
+ CHECK(EmitStoreLaneSimd128(f, 2));
+ case uint32_t(SimdOp::V128Store32Lane):
+ CHECK(EmitStoreLaneSimd128(f, 4));
+ case uint32_t(SimdOp::V128Store64Lane):
+ CHECK(EmitStoreLaneSimd128(f, 8));
+# ifdef ENABLE_WASM_RELAXED_SIMD
+ case uint32_t(SimdOp::F32x4RelaxedFma):
+ case uint32_t(SimdOp::F32x4RelaxedFnma):
+ case uint32_t(SimdOp::F64x2RelaxedFma):
+ case uint32_t(SimdOp::F64x2RelaxedFnma):
+ case uint32_t(SimdOp::I8x16RelaxedLaneSelect):
+ case uint32_t(SimdOp::I16x8RelaxedLaneSelect):
+ case uint32_t(SimdOp::I32x4RelaxedLaneSelect):
+ case uint32_t(SimdOp::I64x2RelaxedLaneSelect):
+ case uint32_t(SimdOp::I32x4DotI8x16I7x16AddS): {
+ if (!f.moduleEnv().v128RelaxedEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ CHECK(EmitTernarySimd128(f, SimdOp(op.b1)));
+ }
+ case uint32_t(SimdOp::F32x4RelaxedMin):
+ case uint32_t(SimdOp::F32x4RelaxedMax):
+ case uint32_t(SimdOp::F64x2RelaxedMin):
+ case uint32_t(SimdOp::F64x2RelaxedMax):
+ case uint32_t(SimdOp::I16x8RelaxedQ15MulrS): {
+ if (!f.moduleEnv().v128RelaxedEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ CHECK(EmitBinarySimd128(f, /* commutative= */ true, SimdOp(op.b1)));
+ }
+ case uint32_t(SimdOp::I32x4RelaxedTruncF32x4S):
+ case uint32_t(SimdOp::I32x4RelaxedTruncF32x4U):
+ case uint32_t(SimdOp::I32x4RelaxedTruncF64x2SZero):
+ case uint32_t(SimdOp::I32x4RelaxedTruncF64x2UZero): {
+ if (!f.moduleEnv().v128RelaxedEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ CHECK(EmitUnarySimd128(f, SimdOp(op.b1)));
+ }
+ case uint32_t(SimdOp::I8x16RelaxedSwizzle):
+ case uint32_t(SimdOp::I16x8DotI8x16I7x16S): {
+ if (!f.moduleEnv().v128RelaxedEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ CHECK(
+ EmitBinarySimd128(f, /* commutative= */ false, SimdOp(op.b1)));
+ }
+# endif
+
+ default:
+ return f.iter().unrecognizedOpcode(&op);
+ } // switch (op.b1)
+ break;
+ }
+#endif
+
+ // Miscellaneous operations
+ case uint16_t(Op::MiscPrefix): {
+ switch (op.b1) {
+ case uint32_t(MiscOp::I32TruncSatF32S):
+ case uint32_t(MiscOp::I32TruncSatF32U):
+ CHECK(EmitTruncate(f, ValType::F32, ValType::I32,
+ MiscOp(op.b1) == MiscOp::I32TruncSatF32U, true));
+ case uint32_t(MiscOp::I32TruncSatF64S):
+ case uint32_t(MiscOp::I32TruncSatF64U):
+ CHECK(EmitTruncate(f, ValType::F64, ValType::I32,
+ MiscOp(op.b1) == MiscOp::I32TruncSatF64U, true));
+ case uint32_t(MiscOp::I64TruncSatF32S):
+ case uint32_t(MiscOp::I64TruncSatF32U):
+ CHECK(EmitTruncate(f, ValType::F32, ValType::I64,
+ MiscOp(op.b1) == MiscOp::I64TruncSatF32U, true));
+ case uint32_t(MiscOp::I64TruncSatF64S):
+ case uint32_t(MiscOp::I64TruncSatF64U):
+ CHECK(EmitTruncate(f, ValType::F64, ValType::I64,
+ MiscOp(op.b1) == MiscOp::I64TruncSatF64U, true));
+ case uint32_t(MiscOp::MemoryCopy):
+ CHECK(EmitMemCopy(f));
+ case uint32_t(MiscOp::DataDrop):
+ CHECK(EmitDataOrElemDrop(f, /*isData=*/true));
+ case uint32_t(MiscOp::MemoryFill):
+ CHECK(EmitMemFill(f));
+ case uint32_t(MiscOp::MemoryInit):
+ CHECK(EmitMemOrTableInit(f, /*isMem=*/true));
+ case uint32_t(MiscOp::TableCopy):
+ CHECK(EmitTableCopy(f));
+ case uint32_t(MiscOp::ElemDrop):
+ CHECK(EmitDataOrElemDrop(f, /*isData=*/false));
+ case uint32_t(MiscOp::TableInit):
+ CHECK(EmitMemOrTableInit(f, /*isMem=*/false));
+ case uint32_t(MiscOp::TableFill):
+ CHECK(EmitTableFill(f));
+#if ENABLE_WASM_MEMORY_CONTROL
+ case uint32_t(MiscOp::MemoryDiscard): {
+ if (!f.moduleEnv().memoryControlEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ CHECK(EmitMemDiscard(f));
+ }
+#endif
+ case uint32_t(MiscOp::TableGrow):
+ CHECK(EmitTableGrow(f));
+ case uint32_t(MiscOp::TableSize):
+ CHECK(EmitTableSize(f));
+ default:
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ break;
+ }
+
+ // Thread operations
+ case uint16_t(Op::ThreadPrefix): {
+ // Though thread ops can be used on nonshared memories, we make them
+ // unavailable if shared memory has been disabled in the prefs, for
+ // maximum predictability and safety and consistency with JS.
+ if (f.moduleEnv().sharedMemoryEnabled() == Shareable::False) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ switch (op.b1) {
+ case uint32_t(ThreadOp::Wake):
+ CHECK(EmitWake(f));
+
+ case uint32_t(ThreadOp::I32Wait):
+ CHECK(EmitWait(f, ValType::I32, 4));
+ case uint32_t(ThreadOp::I64Wait):
+ CHECK(EmitWait(f, ValType::I64, 8));
+ case uint32_t(ThreadOp::Fence):
+ CHECK(EmitFence(f));
+
+ case uint32_t(ThreadOp::I32AtomicLoad):
+ CHECK(EmitAtomicLoad(f, ValType::I32, Scalar::Int32));
+ case uint32_t(ThreadOp::I64AtomicLoad):
+ CHECK(EmitAtomicLoad(f, ValType::I64, Scalar::Int64));
+ case uint32_t(ThreadOp::I32AtomicLoad8U):
+ CHECK(EmitAtomicLoad(f, ValType::I32, Scalar::Uint8));
+ case uint32_t(ThreadOp::I32AtomicLoad16U):
+ CHECK(EmitAtomicLoad(f, ValType::I32, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicLoad8U):
+ CHECK(EmitAtomicLoad(f, ValType::I64, Scalar::Uint8));
+ case uint32_t(ThreadOp::I64AtomicLoad16U):
+ CHECK(EmitAtomicLoad(f, ValType::I64, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicLoad32U):
+ CHECK(EmitAtomicLoad(f, ValType::I64, Scalar::Uint32));
+
+ case uint32_t(ThreadOp::I32AtomicStore):
+ CHECK(EmitAtomicStore(f, ValType::I32, Scalar::Int32));
+ case uint32_t(ThreadOp::I64AtomicStore):
+ CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Int64));
+ case uint32_t(ThreadOp::I32AtomicStore8U):
+ CHECK(EmitAtomicStore(f, ValType::I32, Scalar::Uint8));
+ case uint32_t(ThreadOp::I32AtomicStore16U):
+ CHECK(EmitAtomicStore(f, ValType::I32, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicStore8U):
+ CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Uint8));
+ case uint32_t(ThreadOp::I64AtomicStore16U):
+ CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicStore32U):
+ CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Uint32));
+
+ case uint32_t(ThreadOp::I32AtomicAdd):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
+ AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I64AtomicAdd):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
+ AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I32AtomicAdd8U):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
+ AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I32AtomicAdd16U):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
+ AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I64AtomicAdd8U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
+ AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I64AtomicAdd16U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
+ AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I64AtomicAdd32U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
+ AtomicFetchAddOp));
+
+ case uint32_t(ThreadOp::I32AtomicSub):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
+ AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I64AtomicSub):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
+ AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I32AtomicSub8U):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
+ AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I32AtomicSub16U):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
+ AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I64AtomicSub8U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
+ AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I64AtomicSub16U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
+ AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I64AtomicSub32U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
+ AtomicFetchSubOp));
+
+ case uint32_t(ThreadOp::I32AtomicAnd):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
+ AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I64AtomicAnd):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
+ AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I32AtomicAnd8U):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
+ AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I32AtomicAnd16U):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
+ AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I64AtomicAnd8U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
+ AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I64AtomicAnd16U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
+ AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I64AtomicAnd32U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
+ AtomicFetchAndOp));
+
+ case uint32_t(ThreadOp::I32AtomicOr):
+ CHECK(
+ EmitAtomicRMW(f, ValType::I32, Scalar::Int32, AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I64AtomicOr):
+ CHECK(
+ EmitAtomicRMW(f, ValType::I64, Scalar::Int64, AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I32AtomicOr8U):
+ CHECK(
+ EmitAtomicRMW(f, ValType::I32, Scalar::Uint8, AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I32AtomicOr16U):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
+ AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I64AtomicOr8U):
+ CHECK(
+ EmitAtomicRMW(f, ValType::I64, Scalar::Uint8, AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I64AtomicOr16U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
+ AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I64AtomicOr32U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
+ AtomicFetchOrOp));
+
+ case uint32_t(ThreadOp::I32AtomicXor):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
+ AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I64AtomicXor):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
+ AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I32AtomicXor8U):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
+ AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I32AtomicXor16U):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
+ AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I64AtomicXor8U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
+ AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I64AtomicXor16U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
+ AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I64AtomicXor32U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
+ AtomicFetchXorOp));
+
+ case uint32_t(ThreadOp::I32AtomicXchg):
+ CHECK(EmitAtomicXchg(f, ValType::I32, Scalar::Int32));
+ case uint32_t(ThreadOp::I64AtomicXchg):
+ CHECK(EmitAtomicXchg(f, ValType::I64, Scalar::Int64));
+ case uint32_t(ThreadOp::I32AtomicXchg8U):
+ CHECK(EmitAtomicXchg(f, ValType::I32, Scalar::Uint8));
+ case uint32_t(ThreadOp::I32AtomicXchg16U):
+ CHECK(EmitAtomicXchg(f, ValType::I32, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicXchg8U):
+ CHECK(EmitAtomicXchg(f, ValType::I64, Scalar::Uint8));
+ case uint32_t(ThreadOp::I64AtomicXchg16U):
+ CHECK(EmitAtomicXchg(f, ValType::I64, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicXchg32U):
+ CHECK(EmitAtomicXchg(f, ValType::I64, Scalar::Uint32));
+
+ case uint32_t(ThreadOp::I32AtomicCmpXchg):
+ CHECK(EmitAtomicCmpXchg(f, ValType::I32, Scalar::Int32));
+ case uint32_t(ThreadOp::I64AtomicCmpXchg):
+ CHECK(EmitAtomicCmpXchg(f, ValType::I64, Scalar::Int64));
+ case uint32_t(ThreadOp::I32AtomicCmpXchg8U):
+ CHECK(EmitAtomicCmpXchg(f, ValType::I32, Scalar::Uint8));
+ case uint32_t(ThreadOp::I32AtomicCmpXchg16U):
+ CHECK(EmitAtomicCmpXchg(f, ValType::I32, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicCmpXchg8U):
+ CHECK(EmitAtomicCmpXchg(f, ValType::I64, Scalar::Uint8));
+ case uint32_t(ThreadOp::I64AtomicCmpXchg16U):
+ CHECK(EmitAtomicCmpXchg(f, ValType::I64, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicCmpXchg32U):
+ CHECK(EmitAtomicCmpXchg(f, ValType::I64, Scalar::Uint32));
+
+ default:
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ break;
+ }
+
+ // asm.js-specific operators
+ case uint16_t(Op::MozPrefix): {
+ if (op.b1 == uint32_t(MozOp::Intrinsic)) {
+ if (!f.moduleEnv().intrinsicsEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ CHECK(EmitIntrinsic(f));
+ }
+
+ if (!f.moduleEnv().isAsmJS()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ switch (op.b1) {
+ case uint32_t(MozOp::TeeGlobal):
+ CHECK(EmitTeeGlobal(f));
+ case uint32_t(MozOp::I32Min):
+ case uint32_t(MozOp::I32Max):
+ CHECK(EmitMinMax(f, ValType::I32, MIRType::Int32,
+ MozOp(op.b1) == MozOp::I32Max));
+ case uint32_t(MozOp::I32Neg):
+ CHECK(EmitUnaryWithType<MWasmNeg>(f, ValType::I32, MIRType::Int32));
+ case uint32_t(MozOp::I32BitNot):
+ CHECK(EmitBitNot(f, ValType::I32));
+ case uint32_t(MozOp::I32Abs):
+ CHECK(EmitUnaryWithType<MAbs>(f, ValType::I32, MIRType::Int32));
+ case uint32_t(MozOp::F32TeeStoreF64):
+ CHECK(EmitTeeStoreWithCoercion(f, ValType::F32, Scalar::Float64));
+ case uint32_t(MozOp::F64TeeStoreF32):
+ CHECK(EmitTeeStoreWithCoercion(f, ValType::F64, Scalar::Float32));
+ case uint32_t(MozOp::I32TeeStore8):
+ CHECK(EmitTeeStore(f, ValType::I32, Scalar::Int8));
+ case uint32_t(MozOp::I32TeeStore16):
+ CHECK(EmitTeeStore(f, ValType::I32, Scalar::Int16));
+ case uint32_t(MozOp::I64TeeStore8):
+ CHECK(EmitTeeStore(f, ValType::I64, Scalar::Int8));
+ case uint32_t(MozOp::I64TeeStore16):
+ CHECK(EmitTeeStore(f, ValType::I64, Scalar::Int16));
+ case uint32_t(MozOp::I64TeeStore32):
+ CHECK(EmitTeeStore(f, ValType::I64, Scalar::Int32));
+ case uint32_t(MozOp::I32TeeStore):
+ CHECK(EmitTeeStore(f, ValType::I32, Scalar::Int32));
+ case uint32_t(MozOp::I64TeeStore):
+ CHECK(EmitTeeStore(f, ValType::I64, Scalar::Int64));
+ case uint32_t(MozOp::F32TeeStore):
+ CHECK(EmitTeeStore(f, ValType::F32, Scalar::Float32));
+ case uint32_t(MozOp::F64TeeStore):
+ CHECK(EmitTeeStore(f, ValType::F64, Scalar::Float64));
+ case uint32_t(MozOp::F64Mod):
+ CHECK(EmitRem(f, ValType::F64, MIRType::Double,
+ /* isUnsigned = */ false));
+ case uint32_t(MozOp::F64SinNative):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigSinNativeD));
+ case uint32_t(MozOp::F64SinFdlibm):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigSinFdlibmD));
+ case uint32_t(MozOp::F64CosNative):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigCosNativeD));
+ case uint32_t(MozOp::F64CosFdlibm):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigCosFdlibmD));
+ case uint32_t(MozOp::F64TanNative):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigTanNativeD));
+ case uint32_t(MozOp::F64TanFdlibm):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigTanFdlibmD));
+ case uint32_t(MozOp::F64Asin):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigASinD));
+ case uint32_t(MozOp::F64Acos):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigACosD));
+ case uint32_t(MozOp::F64Atan):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigATanD));
+ case uint32_t(MozOp::F64Exp):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigExpD));
+ case uint32_t(MozOp::F64Log):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigLogD));
+ case uint32_t(MozOp::F64Pow):
+ CHECK(EmitBinaryMathBuiltinCall(f, SASigPowD));
+ case uint32_t(MozOp::F64Atan2):
+ CHECK(EmitBinaryMathBuiltinCall(f, SASigATan2D));
+ case uint32_t(MozOp::OldCallDirect):
+ CHECK(EmitCall(f, /* asmJSFuncDef = */ true));
+ case uint32_t(MozOp::OldCallIndirect):
+ CHECK(EmitCallIndirect(f, /* oldStyle = */ true));
+
+ default:
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ break;
+ }
+
+ default:
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ }
+
+ MOZ_CRASH("unreachable");
+
+#undef CHECK
+}
+
+bool wasm::IonCompileFunctions(const ModuleEnvironment& moduleEnv,
+ const CompilerEnvironment& compilerEnv,
+ LifoAlloc& lifo,
+ const FuncCompileInputVector& inputs,
+ CompiledCode* code, UniqueChars* error) {
+ MOZ_ASSERT(compilerEnv.tier() == Tier::Optimized);
+ MOZ_ASSERT(compilerEnv.debug() == DebugEnabled::False);
+
+ TempAllocator alloc(&lifo);
+ JitContext jitContext;
+ MOZ_ASSERT(IsCompilingWasm());
+ WasmMacroAssembler masm(alloc, moduleEnv);
+#if defined(JS_CODEGEN_ARM64)
+ masm.SetStackPointer64(PseudoStackPointer64);
+#endif
+
+ // Swap in already-allocated empty vectors to avoid malloc/free.
+ MOZ_ASSERT(code->empty());
+ if (!code->swap(masm)) {
+ return false;
+ }
+
+ // Create a description of the stack layout created by GenerateTrapExit().
+ RegisterOffsets trapExitLayout;
+ size_t trapExitLayoutNumWords;
+ GenerateTrapExitRegisterOffsets(&trapExitLayout, &trapExitLayoutNumWords);
+
+ for (const FuncCompileInput& func : inputs) {
+ JitSpewCont(JitSpew_Codegen, "\n");
+ JitSpew(JitSpew_Codegen,
+ "# ================================"
+ "==================================");
+ JitSpew(JitSpew_Codegen, "# ==");
+ JitSpew(JitSpew_Codegen,
+ "# wasm::IonCompileFunctions: starting on function index %d",
+ (int)func.index);
+
+ Decoder d(func.begin, func.end, func.lineOrBytecode, error);
+
+ // Build the local types vector.
+
+ const FuncType& funcType = *moduleEnv.funcs[func.index].type;
+ ValTypeVector locals;
+ if (!locals.appendAll(funcType.args())) {
+ return false;
+ }
+ if (!DecodeLocalEntries(d, *moduleEnv.types, moduleEnv.features, &locals)) {
+ return false;
+ }
+
+ // Set up for Ion compilation.
+
+ const JitCompileOptions options;
+ MIRGraph graph(&alloc);
+ CompileInfo compileInfo(locals.length());
+ MIRGenerator mir(nullptr, options, &alloc, &graph, &compileInfo,
+ IonOptimizations.get(OptimizationLevel::Wasm));
+ if (moduleEnv.usesMemory()) {
+ if (moduleEnv.memory->indexType() == IndexType::I32) {
+ mir.initMinWasmHeapLength(moduleEnv.memory->initialLength32());
+ } else {
+ mir.initMinWasmHeapLength(moduleEnv.memory->initialLength64());
+ }
+ }
+
+ // Build MIR graph
+ {
+ FunctionCompiler f(moduleEnv, d, func, locals, mir, masm.tryNotes());
+ if (!f.init()) {
+ return false;
+ }
+
+ if (!f.startBlock()) {
+ return false;
+ }
+
+ if (!EmitBodyExprs(f)) {
+ return false;
+ }
+
+ f.finish();
+ }
+
+ // Compile MIR graph
+ {
+ jit::SpewBeginWasmFunction(&mir, func.index);
+ jit::AutoSpewEndFunction spewEndFunction(&mir);
+
+ if (!OptimizeMIR(&mir)) {
+ return false;
+ }
+
+ LIRGraph* lir = GenerateLIR(&mir);
+ if (!lir) {
+ return false;
+ }
+
+ CodeGenerator codegen(&mir, lir, &masm);
+
+ BytecodeOffset prologueTrapOffset(func.lineOrBytecode);
+ FuncOffsets offsets;
+ ArgTypeVector args(funcType);
+ if (!codegen.generateWasm(CallIndirectId::forFunc(moduleEnv, func.index),
+ prologueTrapOffset, args, trapExitLayout,
+ trapExitLayoutNumWords, &offsets,
+ &code->stackMaps, &d)) {
+ return false;
+ }
+
+ if (!code->codeRanges.emplaceBack(func.index, func.lineOrBytecode,
+ offsets)) {
+ return false;
+ }
+ }
+
+ JitSpew(JitSpew_Codegen,
+ "# wasm::IonCompileFunctions: completed function index %d",
+ (int)func.index);
+ JitSpew(JitSpew_Codegen, "# ==");
+ JitSpew(JitSpew_Codegen,
+ "# ================================"
+ "==================================");
+ JitSpewCont(JitSpew_Codegen, "\n");
+ }
+
+ masm.finish();
+ if (masm.oom()) {
+ return false;
+ }
+
+ return code->swap(masm);
+}
+
+bool js::wasm::IonPlatformSupport() {
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || \
+ defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_LOONG64) || \
+ defined(JS_CODEGEN_RISCV64)
+ return true;
+#else
+ return false;
+#endif
+}
diff --git a/js/src/wasm/WasmIonCompile.h b/js/src/wasm/WasmIonCompile.h
new file mode 100644
index 0000000000..f583cbad1f
--- /dev/null
+++ b/js/src/wasm/WasmIonCompile.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_ion_compile_h
+#define wasm_ion_compile_h
+
+#include "wasm/WasmGenerator.h"
+
+namespace js {
+namespace wasm {
+
+// Return whether IonCompileFunction() can generate code on the current device.
+// Usually you do *not* want this, you want IonAvailable().
+[[nodiscard]] bool IonPlatformSupport();
+
+// Generates very fast code at the expense of compilation time.
+[[nodiscard]] bool IonCompileFunctions(const ModuleEnvironment& moduleEnv,
+ const CompilerEnvironment& compilerEnv,
+ LifoAlloc& lifo,
+ const FuncCompileInputVector& inputs,
+ CompiledCode* code, UniqueChars* error);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_ion_compile_h
diff --git a/js/src/wasm/WasmJS.cpp b/js/src/wasm/WasmJS.cpp
new file mode 100644
index 0000000000..75a37dc194
--- /dev/null
+++ b/js/src/wasm/WasmJS.cpp
@@ -0,0 +1,5524 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmJS.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/EndianUtils.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/RangedPtr.h"
+
+#include <algorithm>
+#include <cstdint>
+
+#include "jsapi.h"
+#include "jsexn.h"
+
+#include "ds/IdValuePair.h" // js::IdValuePair
+#include "frontend/FrontendContext.h" // AutoReportFrontendContext
+#include "gc/GCContext.h"
+#include "jit/AtomicOperations.h"
+#include "jit/FlushICache.h"
+#include "jit/JitContext.h"
+#include "jit/JitOptions.h"
+#include "jit/Simulator.h"
+#include "js/ForOfIterator.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/Printf.h"
+#include "js/PropertyAndElement.h" // JS_DefineProperty, JS_GetProperty
+#include "js/PropertySpec.h" // JS_{PS,FN}{,_END}
+#include "js/Stack.h" // BuildStackString
+#include "js/StreamConsumer.h"
+#include "util/StringBuffer.h"
+#include "util/Text.h"
+#include "vm/ErrorObject.h"
+#include "vm/FunctionFlags.h" // js::FunctionFlags
+#include "vm/GlobalObject.h" // js::GlobalObject
+#include "vm/HelperThreadState.h" // js::PromiseHelperTask
+#include "vm/Interpreter.h"
+#include "vm/JSFunction.h"
+#include "vm/PlainObject.h" // js::PlainObject
+#include "vm/PromiseObject.h" // js::PromiseObject
+#include "vm/SharedArrayObject.h"
+#include "vm/StringType.h"
+#include "vm/Warnings.h" // js::WarnNumberASCII
+#include "vm/WellKnownAtom.h" // js_*_str
+#include "wasm/WasmBaselineCompile.h"
+#include "wasm/WasmBuiltins.h"
+#include "wasm/WasmCompile.h"
+#include "wasm/WasmDebug.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmIntrinsic.h"
+#include "wasm/WasmIonCompile.h"
+#include "wasm/WasmMemory.h"
+#include "wasm/WasmModule.h"
+#include "wasm/WasmProcess.h"
+#include "wasm/WasmSignalHandlers.h"
+#include "wasm/WasmStubs.h"
+#include "wasm/WasmValidate.h"
+
+#include "gc/GCContext-inl.h"
+#include "gc/StableCellHasher-inl.h"
+#include "vm/ArrayBufferObject-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/NativeObject-inl.h"
+#include "wasm/WasmInstance-inl.h"
+
+/*
+ * [SMDOC] WebAssembly code rules (evolving)
+ *
+ * TlsContext.get() is only to be invoked from functions that have been invoked
+ * _directly_ by generated code as cold(!) Builtin calls, from code that is
+ * only used by signal handlers, or from helper functions that have been
+ * called _directly_ from a simulator. All other code shall pass in a
+ * JSContext* to functions that need it, or an Instance* or Instance* since
+ * the context is available through them.
+ *
+ * Code that uses TlsContext.get() shall annotate each such call with the
+ * reason why the call is OK.
+ */
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::CheckedInt;
+using mozilla::Nothing;
+using mozilla::RangedPtr;
+using mozilla::Span;
+
+// About the fuzzer intercession points: If fuzzing has been selected and only a
+// single compiler has been selected then we will disable features that are not
+// supported by that single compiler. This is strictly a concession to the
+// fuzzer infrastructure.
+
+static inline bool IsFuzzingIon(JSContext* cx) {
+ return IsFuzzing() && !cx->options().wasmBaseline() &&
+ cx->options().wasmIon();
+}
+
+// These functions read flags and apply fuzzing intercession policies. Never go
+// directly to the flags in code below, always go via these accessors.
+
+static inline bool WasmThreadsFlag(JSContext* cx) {
+ return cx->realm() &&
+ cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled();
+}
+
+#define WASM_FEATURE(NAME, LOWER_NAME, COMPILE_PRED, COMPILER_PRED, FLAG_PRED, \
+ ...) \
+ static inline bool Wasm##NAME##Flag(JSContext* cx) { \
+ return (COMPILE_PRED) && (FLAG_PRED) && cx->options().wasm##NAME(); \
+ }
+JS_FOR_WASM_FEATURES(WASM_FEATURE, WASM_FEATURE, WASM_FEATURE);
+#undef WASM_FEATURE
+
+static inline bool WasmDebuggerActive(JSContext* cx) {
+ if (IsFuzzingIon(cx)) {
+ return false;
+ }
+ return cx->realm() && cx->realm()->debuggerObservesWasm();
+}
+
+/*
+ * [SMDOC] Compiler and feature selection; compiler and feature availability.
+ *
+ * In order to make the computation of whether a wasm feature or wasm compiler
+ * is available predictable, we have established some rules, and implemented
+ * those rules.
+ *
+ * Code elsewhere should use the predicates below to test for features and
+ * compilers, it should never try to compute feature and compiler availability
+ * in other ways.
+ *
+ * At the outset, there is a set of selected compilers C containing at most one
+ * baseline compiler [*] and at most one optimizing compiler [**], and a set of
+ * selected features F. These selections come from defaults and from overrides
+ * by command line switches in the shell and javascript.option.wasm_X in the
+ * browser. Defaults for both features and compilers may be platform specific,
+ * for example, some compilers may not be available on some platforms because
+ * they do not support the architecture at all or they do not support features
+ * that must be enabled by default on the platform.
+ *
+ * [*] Currently we have only one, "baseline" aka "Rabaldr", but other
+ * implementations have additional baseline translators, eg from wasm
+ * bytecode to an internal code processed by an interpreter.
+ *
+ * [**] Currently we have only one, "ion" aka "Baldr".
+ *
+ *
+ * Compiler availability:
+ *
+ * The set of features F induces a set of available compilers A: these are the
+ * compilers that all support all the features in F. (Some of these compilers
+ * may not be in the set C.)
+ *
+ * The sets C and A are intersected, yielding a set of enabled compilers E.
+ * Notably, the set E may be empty, in which case wasm is effectively disabled
+ * (though the WebAssembly object is still present in the global environment).
+ *
+ * An important consequence is that selecting a feature that is not supported by
+ * a particular compiler disables that compiler completely -- there is no notion
+ * of a compiler being available but suddenly failing when an unsupported
+ * feature is used by a program. If a compiler is available, it supports all
+ * the features that have been selected.
+ *
+ * Equally important, a feature cannot be enabled by default on a platform if
+ * the feature is not supported by all the compilers we wish to have enabled by
+ * default on the platform. We MUST by-default disable features on a platform
+ * that are not supported by all the compilers on the platform.
+ *
+ * In a shell build, the testing functions wasmCompilersPresent,
+ * wasmCompileMode, and wasmIonDisabledByFeatures can be used to probe compiler
+ * availability and the reasons for a compiler being unavailable.
+ *
+ *
+ * Feature availability:
+ *
+ * A feature is available if it is selected and there is at least one available
+ * compiler that implements it.
+ *
+ * For example, --wasm-gc selects the GC feature, and if Baseline is available
+ * then the feature is available.
+ *
+ * In a shell build, there are per-feature testing functions (of the form
+ * wasmFeatureEnabled) to probe whether specific features are available.
+ */
+
+// Compiler availability predicates. These must be kept in sync with the
+// feature predicates in the next section below.
+//
+// These can't call the feature predicates since the feature predicates call
+// back to these predicates. So there will be a small amount of duplicated
+// logic here, but as compilers reach feature parity that duplication will go
+// away.
+
+bool wasm::BaselineAvailable(JSContext* cx) {
+ if (!cx->options().wasmBaseline() || !BaselinePlatformSupport()) {
+ return false;
+ }
+ bool isDisabled = false;
+ MOZ_ALWAYS_TRUE(BaselineDisabledByFeatures(cx, &isDisabled));
+ return !isDisabled;
+}
+
+bool wasm::IonAvailable(JSContext* cx) {
+ if (!cx->options().wasmIon() || !IonPlatformSupport()) {
+ return false;
+ }
+ bool isDisabled = false;
+ MOZ_ALWAYS_TRUE(IonDisabledByFeatures(cx, &isDisabled));
+ return !isDisabled;
+}
+
+bool wasm::WasmCompilerForAsmJSAvailable(JSContext* cx) {
+ return IonAvailable(cx);
+}
+
+template <size_t ArrayLength>
+static inline bool Append(JSStringBuilder* reason, const char (&s)[ArrayLength],
+ char* sep) {
+ if ((*sep && !reason->append(*sep)) || !reason->append(s)) {
+ return false;
+ }
+ *sep = ',';
+ return true;
+}
+
+bool wasm::BaselineDisabledByFeatures(JSContext* cx, bool* isDisabled,
+ JSStringBuilder* reason) {
+ // Baseline cannot be used if we are testing serialization.
+ bool testSerialization = WasmTestSerializationFlag(cx);
+ if (reason) {
+ char sep = 0;
+ if (testSerialization && !Append(reason, "testSerialization", &sep)) {
+ return false;
+ }
+ }
+ *isDisabled = testSerialization;
+ return true;
+}
+
+bool wasm::IonDisabledByFeatures(JSContext* cx, bool* isDisabled,
+ JSStringBuilder* reason) {
+ // Ion has no debugging support.
+ bool debug = WasmDebuggerActive(cx);
+ if (reason) {
+ char sep = 0;
+ if (debug && !Append(reason, "debug", &sep)) {
+ return false;
+ }
+ }
+ *isDisabled = debug;
+ return true;
+}
+
+bool wasm::AnyCompilerAvailable(JSContext* cx) {
+ return wasm::BaselineAvailable(cx) || wasm::IonAvailable(cx);
+}
+
+// Feature predicates. These must be kept in sync with the predicates in the
+// section above.
+//
+// The meaning of these predicates is tricky: A predicate is true for a feature
+// if the feature is enabled and/or compiled-in *and* we have *at least one*
+// compiler that can support the feature. Subsequent compiler selection must
+// ensure that only compilers that actually support the feature are used.
+
+#define WASM_FEATURE(NAME, LOWER_NAME, COMPILE_PRED, COMPILER_PRED, FLAG_PRED, \
+ ...) \
+ bool wasm::NAME##Available(JSContext* cx) { \
+ return Wasm##NAME##Flag(cx) && (COMPILER_PRED); \
+ }
+JS_FOR_WASM_FEATURES(WASM_FEATURE, WASM_FEATURE, WASM_FEATURE)
+#undef WASM_FEATURE
+
+bool wasm::IsSimdPrivilegedContext(JSContext* cx) {
+ // This may be slightly more lenient than we want in an ideal world, but it
+ // remains safe.
+ return cx->realm() && cx->realm()->principals() &&
+ cx->realm()->principals()->isSystemOrAddonPrincipal();
+}
+
+bool wasm::SimdAvailable(JSContext* cx) {
+ return js::jit::JitSupportsWasmSimd();
+}
+
+bool wasm::ThreadsAvailable(JSContext* cx) {
+ return WasmThreadsFlag(cx) && AnyCompilerAvailable(cx);
+}
+
+bool wasm::HasPlatformSupport(JSContext* cx) {
+#if !MOZ_LITTLE_ENDIAN()
+ return false;
+#else
+
+ if (!HasJitBackend()) {
+ return false;
+ }
+
+ if (gc::SystemPageSize() > wasm::PageSize) {
+ return false;
+ }
+
+ if (!JitOptions.supportsUnalignedAccesses) {
+ return false;
+ }
+
+# ifndef __wasi__
+ // WASI doesn't support signals so we don't have this function.
+ if (!wasm::EnsureFullSignalHandlers(cx)) {
+ return false;
+ }
+# endif
+
+ if (!jit::JitSupportsAtomics()) {
+ return false;
+ }
+
+ // Wasm threads require 8-byte lock-free atomics.
+ if (!jit::AtomicOperations::isLockfree8()) {
+ return false;
+ }
+
+ // Test only whether the compilers are supported on the hardware, not whether
+ // they are enabled.
+ return BaselinePlatformSupport() || IonPlatformSupport();
+#endif
+}
+
+bool wasm::HasSupport(JSContext* cx) {
+ // If the general wasm pref is on, it's on for everything.
+ bool prefEnabled = cx->options().wasm();
+ // If the general pref is off, check trusted principals.
+ if (MOZ_UNLIKELY(!prefEnabled)) {
+ prefEnabled = cx->options().wasmForTrustedPrinciples() && cx->realm() &&
+ cx->realm()->principals() &&
+ cx->realm()->principals()->isSystemOrAddonPrincipal();
+ }
+ // Do not check for compiler availability, as that may be run-time variant.
+ // For HasSupport() we want a stable answer depending only on prefs.
+ return prefEnabled && HasPlatformSupport(cx);
+}
+
+bool wasm::StreamingCompilationAvailable(JSContext* cx) {
+ // This should match EnsureStreamSupport().
+ return HasSupport(cx) && AnyCompilerAvailable(cx) &&
+ cx->runtime()->offThreadPromiseState.ref().initialized() &&
+ CanUseExtraThreads() && cx->runtime()->consumeStreamCallback &&
+ cx->runtime()->reportStreamErrorCallback;
+}
+
+bool wasm::CodeCachingAvailable(JSContext* cx) {
+ // Fuzzilli breaks the out-of-process compilation mechanism,
+ // so we disable it permanently in those builds.
+#ifdef FUZZING_JS_FUZZILLI
+ return false;
+#else
+
+ // At the moment, we require Ion support for code caching. The main reason
+ // for this is that wasm::CompileAndSerialize() does not have access to
+ // information about which optimizing compiler it should use. See comments in
+ // CompileAndSerialize(), below.
+ return StreamingCompilationAvailable(cx) && IonAvailable(cx);
+#endif
+}
+
+// ============================================================================
+// Imports
+
+static bool ThrowBadImportArg(JSContext* cx) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_IMPORT_ARG);
+ return false;
+}
+
+static bool ThrowBadImportType(JSContext* cx, const CacheableName& field,
+ const char* str) {
+ UniqueChars fieldQuoted = field.toQuotedString(cx);
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_IMPORT_TYPE, fieldQuoted.get(), str);
+ return false;
+}
+
+bool js::wasm::GetImports(JSContext* cx, const Module& module,
+ HandleObject importObj, ImportValues* imports) {
+ if (!module.imports().empty() && !importObj) {
+ return ThrowBadImportArg(cx);
+ }
+
+ const Metadata& metadata = module.metadata();
+
+ uint32_t tagIndex = 0;
+ const TagDescVector& tags = metadata.tags;
+ uint32_t globalIndex = 0;
+ const GlobalDescVector& globals = metadata.globals;
+ uint32_t tableIndex = 0;
+ const TableDescVector& tables = metadata.tables;
+ for (const Import& import : module.imports()) {
+ RootedId moduleName(cx);
+ if (!import.module.toPropertyKey(cx, &moduleName)) {
+ return false;
+ }
+ RootedId fieldName(cx);
+ if (!import.field.toPropertyKey(cx, &fieldName)) {
+ return false;
+ }
+
+ RootedValue v(cx);
+ if (!GetProperty(cx, importObj, importObj, moduleName, &v)) {
+ return false;
+ }
+
+ if (!v.isObject()) {
+ UniqueChars moduleQuoted = import.module.toQuotedString(cx);
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_IMPORT_FIELD, moduleQuoted.get());
+ return false;
+ }
+
+ RootedObject obj(cx, &v.toObject());
+ if (!GetProperty(cx, obj, obj, fieldName, &v)) {
+ return false;
+ }
+
+ switch (import.kind) {
+ case DefinitionKind::Function: {
+ // For now reject cross-compartment wrappers. These have more
+ // complicated realm semantics (we use nonCCWRealm in a few places) and
+ // may require unwrapping to test for specific function types.
+ if (!IsCallable(v) || IsCrossCompartmentWrapper(&v.toObject())) {
+ return ThrowBadImportType(cx, import.field, "Function");
+ }
+
+ if (!imports->funcs.append(&v.toObject())) {
+ return false;
+ }
+
+ break;
+ }
+ case DefinitionKind::Table: {
+ const uint32_t index = tableIndex++;
+ if (!v.isObject() || !v.toObject().is<WasmTableObject>()) {
+ return ThrowBadImportType(cx, import.field, "Table");
+ }
+
+ Rooted<WasmTableObject*> obj(cx, &v.toObject().as<WasmTableObject>());
+ if (obj->table().elemType() != tables[index].elemType) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_TBL_TYPE_LINK);
+ return false;
+ }
+
+ if (!imports->tables.append(obj)) {
+ return false;
+ }
+ break;
+ }
+ case DefinitionKind::Memory: {
+ if (!v.isObject() || !v.toObject().is<WasmMemoryObject>()) {
+ return ThrowBadImportType(cx, import.field, "Memory");
+ }
+
+ MOZ_ASSERT(!imports->memory);
+ imports->memory = &v.toObject().as<WasmMemoryObject>();
+ break;
+ }
+ case DefinitionKind::Tag: {
+ const uint32_t index = tagIndex++;
+ if (!v.isObject() || !v.toObject().is<WasmTagObject>()) {
+ return ThrowBadImportType(cx, import.field, "Tag");
+ }
+
+ Rooted<WasmTagObject*> obj(cx, &v.toObject().as<WasmTagObject>());
+
+ // Checks whether the signature of the imported exception object matches
+ // the signature declared in the exception import's TagDesc.
+ if (obj->resultType() != tags[index].type->resultType()) {
+ UniqueChars fieldQuoted = import.field.toQuotedString(cx);
+ UniqueChars moduleQuoted = import.module.toQuotedString(cx);
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_TAG_SIG, moduleQuoted.get(),
+ fieldQuoted.get());
+ return false;
+ }
+
+ if (!imports->tagObjs.append(obj)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ break;
+ }
+ case DefinitionKind::Global: {
+ const uint32_t index = globalIndex++;
+ const GlobalDesc& global = globals[index];
+ MOZ_ASSERT(global.importIndex() == index);
+
+ RootedVal val(cx);
+ if (v.isObject() && v.toObject().is<WasmGlobalObject>()) {
+ Rooted<WasmGlobalObject*> obj(cx,
+ &v.toObject().as<WasmGlobalObject>());
+
+ if (obj->isMutable() != global.isMutable()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_GLOB_MUT_LINK);
+ return false;
+ }
+ if (obj->type() != global.type()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_GLOB_TYPE_LINK);
+ return false;
+ }
+
+ if (imports->globalObjs.length() <= index &&
+ !imports->globalObjs.resize(index + 1)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ imports->globalObjs[index] = obj;
+ val = obj->val();
+ } else {
+ if (!global.type().isRefType()) {
+ if (global.type() == ValType::I64 && !v.isBigInt()) {
+ return ThrowBadImportType(cx, import.field, "BigInt");
+ }
+ if (global.type() != ValType::I64 && !v.isNumber()) {
+ return ThrowBadImportType(cx, import.field, "Number");
+ }
+ } else {
+ if (!global.type().isExternRef() && !v.isObjectOrNull()) {
+ return ThrowBadImportType(cx, import.field,
+ "Object-or-null value required for "
+ "non-externref reference type");
+ }
+ }
+
+ if (global.isMutable()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_GLOB_MUT_LINK);
+ return false;
+ }
+
+ if (!Val::fromJSValue(cx, global.type(), v, &val)) {
+ return false;
+ }
+ }
+
+ if (!imports->globalValues.append(val)) {
+ return false;
+ }
+
+ break;
+ }
+ }
+ }
+
+ MOZ_ASSERT(globalIndex == globals.length() ||
+ !globals[globalIndex].isImport());
+
+ return true;
+}
+
+static bool DescribeScriptedCaller(JSContext* cx, ScriptedCaller* caller,
+ const char* introducer) {
+ // Note: JS::DescribeScriptedCaller returns whether a scripted caller was
+ // found, not whether an error was thrown. This wrapper function converts
+ // back to the more ordinary false-if-error form.
+
+ JS::AutoFilename af;
+ if (JS::DescribeScriptedCaller(cx, &af, &caller->line)) {
+ caller->filename =
+ FormatIntroducedFilename(af.get(), caller->line, introducer);
+ if (!caller->filename) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static SharedCompileArgs InitCompileArgs(JSContext* cx,
+ const char* introducer) {
+ ScriptedCaller scriptedCaller;
+ if (!DescribeScriptedCaller(cx, &scriptedCaller, introducer)) {
+ return nullptr;
+ }
+
+ FeatureOptions options;
+ return CompileArgs::buildAndReport(cx, std::move(scriptedCaller), options);
+}
+
+// ============================================================================
+// Testing / Fuzzing support
+
+bool wasm::Eval(JSContext* cx, Handle<TypedArrayObject*> code,
+ HandleObject importObj,
+ MutableHandle<WasmInstanceObject*> instanceObj) {
+ if (!GlobalObject::ensureConstructor(cx, cx->global(), JSProto_WebAssembly)) {
+ return false;
+ }
+
+ MutableBytes bytecode = cx->new_<ShareableBytes>();
+ if (!bytecode) {
+ return false;
+ }
+
+ if (!bytecode->append((uint8_t*)code->dataPointerEither().unwrap(),
+ code->byteLength())) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ SharedCompileArgs compileArgs = InitCompileArgs(cx, "wasm_eval");
+ if (!compileArgs) {
+ return false;
+ }
+
+ UniqueChars error;
+ UniqueCharsVector warnings;
+ SharedModule module =
+ CompileBuffer(*compileArgs, *bytecode, &error, &warnings, nullptr);
+ if (!module) {
+ if (error) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_COMPILE_ERROR, error.get());
+ return false;
+ }
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ Rooted<ImportValues> imports(cx);
+ if (!GetImports(cx, *module, importObj, imports.address())) {
+ return false;
+ }
+
+ return module->instantiate(cx, imports.get(), nullptr, instanceObj);
+}
+
+struct MOZ_STACK_CLASS SerializeListener : JS::OptimizedEncodingListener {
+ // MOZ_STACK_CLASS means these can be nops.
+ MozExternalRefCountType MOZ_XPCOM_ABI AddRef() override { return 0; }
+ MozExternalRefCountType MOZ_XPCOM_ABI Release() override { return 0; }
+
+ DebugOnly<bool> called = false;
+ Bytes* serialized;
+ explicit SerializeListener(Bytes* serialized) : serialized(serialized) {}
+
+ void storeOptimizedEncoding(const uint8_t* bytes, size_t length) override {
+ MOZ_ASSERT(!called);
+ called = true;
+ if (serialized->resizeUninitialized(length)) {
+ memcpy(serialized->begin(), bytes, length);
+ }
+ }
+};
+
+bool wasm::CompileAndSerialize(JSContext* cx, const ShareableBytes& bytecode,
+ Bytes* serialized) {
+ // The caller must check that code caching is available
+ MOZ_ASSERT(CodeCachingAvailable(cx));
+
+ // Create and manually fill in compile args for code caching
+ MutableCompileArgs compileArgs = js_new<CompileArgs>(ScriptedCaller());
+ if (!compileArgs) {
+ return false;
+ }
+
+ // The caller has ensured CodeCachingAvailable(). Moreover, we want to ensure
+ // we go straight to tier-2 so that we synchronously call
+ // JS::OptimizedEncodingListener::storeOptimizedEncoding().
+ compileArgs->baselineEnabled = false;
+ compileArgs->forceTiering = false;
+
+ // We always pick Ion here, and we depend on CodeCachingAvailable() having
+ // determined that Ion is available, see comments at CodeCachingAvailable().
+ // To do better, we need to pass information about which compiler that should
+ // be used into CompileAndSerialize().
+ compileArgs->ionEnabled = true;
+
+ // Select features that are enabled. This is guaranteed to be consistent with
+ // our compiler selection, as code caching is only available if ion is
+ // available, and ion is only available if it's not disabled by enabled
+ // features.
+ compileArgs->features = FeatureArgs::build(cx, FeatureOptions());
+
+ SerializeListener listener(serialized);
+
+ UniqueChars error;
+ UniqueCharsVector warnings;
+ SharedModule module =
+ CompileBuffer(*compileArgs, bytecode, &error, &warnings, &listener);
+ if (!module) {
+ fprintf(stderr, "Compilation error: %s\n", error ? error.get() : "oom");
+ return false;
+ }
+
+ MOZ_ASSERT(module->code().hasTier(Tier::Serialized));
+ MOZ_ASSERT(listener.called);
+ return !listener.serialized->empty();
+}
+
+bool wasm::DeserializeModule(JSContext* cx, const Bytes& serialized,
+ MutableHandleObject moduleObj) {
+ MutableModule module =
+ Module::deserialize(serialized.begin(), serialized.length());
+ if (!module) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ moduleObj.set(module->createObject(cx));
+ return !!moduleObj;
+}
+
+// ============================================================================
+// Common functions
+
+// '[EnforceRange] unsigned long' types are coerced with
+// ConvertToInt(v, 32, 'unsigned')
+// defined in Web IDL Section 3.2.4.9.
+//
+// This just generalizes that to an arbitrary limit that is representable as an
+// integer in double form.
+
+static bool EnforceRange(JSContext* cx, HandleValue v, const char* kind,
+ const char* noun, uint64_t max, uint64_t* val) {
+ // Step 4.
+ double x;
+ if (!ToNumber(cx, v, &x)) {
+ return false;
+ }
+
+ // Step 5.
+ if (mozilla::IsNegativeZero(x)) {
+ x = 0.0;
+ }
+
+ // Step 6.1.
+ if (!std::isfinite(x)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_ENFORCE_RANGE, kind, noun);
+ return false;
+ }
+
+ // Step 6.2.
+ x = JS::ToInteger(x);
+
+ // Step 6.3.
+ if (x < 0 || x > double(max)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_ENFORCE_RANGE, kind, noun);
+ return false;
+ }
+
+ *val = uint64_t(x);
+ MOZ_ASSERT(double(*val) == x);
+ return true;
+}
+
+static bool EnforceRangeU32(JSContext* cx, HandleValue v, const char* kind,
+ const char* noun, uint32_t* u32) {
+ uint64_t u64 = 0;
+ if (!EnforceRange(cx, v, kind, noun, uint64_t(UINT32_MAX), &u64)) {
+ return false;
+ }
+ *u32 = uint32_t(u64);
+ return true;
+}
+
+static bool EnforceRangeU64(JSContext* cx, HandleValue v, const char* kind,
+ const char* noun, uint64_t* u64) {
+ // The max is Number.MAX_SAFE_INTEGER
+ return EnforceRange(cx, v, kind, noun, (1LL << 53) - 1, u64);
+}
+
+static bool GetLimit(JSContext* cx, HandleObject obj, const char* name,
+ const char* noun, const char* msg, uint32_t range,
+ bool* found, uint64_t* value) {
+ JSAtom* atom = Atomize(cx, name, strlen(name));
+ if (!atom) {
+ return false;
+ }
+ RootedId id(cx, AtomToId(atom));
+
+ RootedValue val(cx);
+ if (!GetProperty(cx, obj, obj, id, &val)) {
+ return false;
+ }
+
+ if (val.isUndefined()) {
+ *found = false;
+ return true;
+ }
+ *found = true;
+ // The range can be greater than 53, but then the logic in EnforceRange has to
+ // change to avoid precision loss.
+ MOZ_ASSERT(range < 54);
+ return EnforceRange(cx, val, noun, msg, (uint64_t(1) << range) - 1, value);
+}
+
+static bool GetLimits(JSContext* cx, HandleObject obj, LimitsKind kind,
+ Limits* limits) {
+ limits->indexType = IndexType::I32;
+
+ // Memory limits may specify an alternate index type, and we need this to
+ // check the ranges for initial and maximum, so look for the index type first.
+ if (kind == LimitsKind::Memory) {
+#ifdef ENABLE_WASM_MEMORY64
+ // Get the index type field
+ JSAtom* indexTypeAtom = Atomize(cx, "index", strlen("index"));
+ if (!indexTypeAtom) {
+ return false;
+ }
+ RootedId indexTypeId(cx, AtomToId(indexTypeAtom));
+
+ RootedValue indexTypeVal(cx);
+ if (!GetProperty(cx, obj, obj, indexTypeId, &indexTypeVal)) {
+ return false;
+ }
+
+ // The index type has a default value
+ if (!indexTypeVal.isUndefined()) {
+ if (!ToIndexType(cx, indexTypeVal, &limits->indexType)) {
+ return false;
+ }
+
+ if (limits->indexType == IndexType::I64 && !Memory64Available(cx)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_NO_MEM64_LINK);
+ return false;
+ }
+ }
+#endif
+ }
+
+ const char* noun = (kind == LimitsKind::Memory ? "Memory" : "Table");
+ // 2^48 is a valid value, so the range goes to 49 bits. Values above 2^48 are
+ // filtered later, just as values above 2^16 are filtered for mem32.
+ const uint32_t range = limits->indexType == IndexType::I32 ? 32 : 49;
+ uint64_t limit = 0;
+
+ bool haveInitial = false;
+ if (!GetLimit(cx, obj, "initial", noun, "initial size", range, &haveInitial,
+ &limit)) {
+ return false;
+ }
+ if (haveInitial) {
+ limits->initial = limit;
+ }
+
+ bool haveMinimum = false;
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+ if (!GetLimit(cx, obj, "minimum", noun, "initial size", range, &haveMinimum,
+ &limit)) {
+ return false;
+ }
+ if (haveMinimum) {
+ limits->initial = limit;
+ }
+#endif
+
+ if (!(haveInitial || haveMinimum)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_MISSING_REQUIRED, "initial");
+ return false;
+ }
+ if (haveInitial && haveMinimum) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_SUPPLY_ONLY_ONE, "minimum", "initial");
+ return false;
+ }
+
+ bool haveMaximum = false;
+ if (!GetLimit(cx, obj, "maximum", noun, "maximum size", range, &haveMaximum,
+ &limit)) {
+ return false;
+ }
+ if (haveMaximum) {
+ limits->maximum = Some(limit);
+ }
+
+ limits->shared = Shareable::False;
+
+ // Memory limits may be shared.
+ if (kind == LimitsKind::Memory) {
+ // Get the shared field
+ JSAtom* sharedAtom = Atomize(cx, "shared", strlen("shared"));
+ if (!sharedAtom) {
+ return false;
+ }
+ RootedId sharedId(cx, AtomToId(sharedAtom));
+
+ RootedValue sharedVal(cx);
+ if (!GetProperty(cx, obj, obj, sharedId, &sharedVal)) {
+ return false;
+ }
+
+ // shared's default value is false, which is already the value set above.
+ if (!sharedVal.isUndefined()) {
+ limits->shared =
+ ToBoolean(sharedVal) ? Shareable::True : Shareable::False;
+
+ if (limits->shared == Shareable::True) {
+ if (!haveMaximum) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_MISSING_MAXIMUM, noun);
+ return false;
+ }
+
+ if (!cx->realm()
+ ->creationOptions()
+ .getSharedMemoryAndAtomicsEnabled()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_NO_SHMEM_LINK);
+ return false;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+static bool CheckLimits(JSContext* cx, uint64_t maximumField, LimitsKind kind,
+ Limits* limits) {
+ const char* noun = (kind == LimitsKind::Memory ? "Memory" : "Table");
+
+ if (limits->initial > maximumField) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_RANGE,
+ noun, "initial size");
+ return false;
+ }
+
+ if (limits->maximum.isSome() &&
+ (*limits->maximum > maximumField || limits->initial > *limits->maximum)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_RANGE,
+ noun, "maximum size");
+ return false;
+ }
+ return true;
+}
+
+template <class Class, const char* name>
+static JSObject* CreateWasmConstructor(JSContext* cx, JSProtoKey key) {
+ Rooted<JSAtom*> className(cx, Atomize(cx, name, strlen(name)));
+ if (!className) {
+ return nullptr;
+ }
+
+ return NewNativeConstructor(cx, Class::construct, 1, className);
+}
+
+static JSObject* GetWasmConstructorPrototype(JSContext* cx,
+ const CallArgs& callArgs,
+ JSProtoKey key) {
+ RootedObject proto(cx);
+ if (!GetPrototypeFromBuiltinConstructor(cx, callArgs, key, &proto)) {
+ return nullptr;
+ }
+ if (!proto) {
+ proto = GlobalObject::getOrCreatePrototype(cx, key);
+ }
+ return proto;
+}
+
+[[nodiscard]] static bool ParseValTypes(JSContext* cx, HandleValue src,
+ ValTypeVector& dest) {
+ JS::ForOfIterator iterator(cx);
+
+ if (!iterator.init(src, JS::ForOfIterator::ThrowOnNonIterable)) {
+ return false;
+ }
+
+ RootedValue nextParam(cx);
+ while (true) {
+ bool done;
+ if (!iterator.next(&nextParam, &done)) {
+ return false;
+ }
+ if (done) {
+ break;
+ }
+
+ ValType valType;
+ if (!ToValType(cx, nextParam, &valType) || !dest.append(valType)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+static JSString* UTF8CharsToString(JSContext* cx, const char* chars) {
+ return NewStringCopyUTF8Z(cx, JS::ConstUTF8CharsZ(chars, strlen(chars)));
+}
+
+[[nodiscard]] static JSObject* ValTypesToArray(JSContext* cx,
+ const ValTypeVector& valTypes) {
+ Rooted<ArrayObject*> arrayObj(cx, NewDenseEmptyArray(cx));
+ if (!arrayObj) {
+ return nullptr;
+ }
+ for (ValType valType : valTypes) {
+ RootedString type(cx,
+ UTF8CharsToString(cx, ToString(valType, nullptr).get()));
+ if (!type) {
+ return nullptr;
+ }
+ if (!NewbornArrayPush(cx, arrayObj, StringValue(type))) {
+ return nullptr;
+ }
+ }
+ return arrayObj;
+}
+
+static JSObject* FuncTypeToObject(JSContext* cx, const FuncType& type) {
+ Rooted<IdValueVector> props(cx, IdValueVector(cx));
+
+ RootedObject parametersObj(cx, ValTypesToArray(cx, type.args()));
+ if (!parametersObj ||
+ !props.append(IdValuePair(NameToId(cx->names().parameters),
+ ObjectValue(*parametersObj)))) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ RootedObject resultsObj(cx, ValTypesToArray(cx, type.results()));
+ if (!resultsObj || !props.append(IdValuePair(NameToId(cx->names().results),
+ ObjectValue(*resultsObj)))) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ return NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+}
+
+static JSObject* TableTypeToObject(JSContext* cx, RefType type,
+ uint32_t initial, Maybe<uint32_t> maximum) {
+ Rooted<IdValueVector> props(cx, IdValueVector(cx));
+
+ RootedString elementType(
+ cx, UTF8CharsToString(cx, ToString(type, nullptr).get()));
+ if (!elementType || !props.append(IdValuePair(NameToId(cx->names().element),
+ StringValue(elementType)))) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ if (maximum.isSome()) {
+ if (!props.append(IdValuePair(NameToId(cx->names().maximum),
+ NumberValue(maximum.value())))) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+ }
+
+ if (!props.append(
+ IdValuePair(NameToId(cx->names().minimum), NumberValue(initial)))) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ return NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+}
+
+static JSObject* MemoryTypeToObject(JSContext* cx, bool shared,
+ wasm::IndexType indexType,
+ wasm::Pages minPages,
+ Maybe<wasm::Pages> maxPages) {
+ Rooted<IdValueVector> props(cx, IdValueVector(cx));
+ if (maxPages) {
+ double maxPagesNum;
+ if (indexType == IndexType::I32) {
+ maxPagesNum = double(mozilla::AssertedCast<uint32_t>(maxPages->value()));
+ } else {
+ // The maximum number of pages is 2^48.
+ maxPagesNum = double(maxPages->value());
+ }
+ if (!props.append(IdValuePair(NameToId(cx->names().maximum),
+ NumberValue(maxPagesNum)))) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+ }
+
+ double minPagesNum;
+ if (indexType == IndexType::I32) {
+ minPagesNum = double(mozilla::AssertedCast<uint32_t>(minPages.value()));
+ } else {
+ minPagesNum = double(minPages.value());
+ }
+ if (!props.append(IdValuePair(NameToId(cx->names().minimum),
+ NumberValue(minPagesNum)))) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+# ifdef ENABLE_WASM_MEMORY64
+ RootedString it(
+ cx, JS_NewStringCopyZ(cx, indexType == IndexType::I32 ? "i32" : "i64"));
+ if (!it) {
+ return nullptr;
+ }
+ if (!props.append(
+ IdValuePair(NameToId(cx->names().index), StringValue(it)))) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+# endif
+
+ if (!props.append(
+ IdValuePair(NameToId(cx->names().shared), BooleanValue(shared)))) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ return NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+}
+
+static JSObject* GlobalTypeToObject(JSContext* cx, ValType type,
+ bool isMutable) {
+ Rooted<IdValueVector> props(cx, IdValueVector(cx));
+
+ if (!props.append(IdValuePair(NameToId(cx->names().mutable_),
+ BooleanValue(isMutable)))) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ RootedString valueType(cx,
+ UTF8CharsToString(cx, ToString(type, nullptr).get()));
+ if (!valueType || !props.append(IdValuePair(NameToId(cx->names().value),
+ StringValue(valueType)))) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ return NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+}
+
+static JSObject* TagTypeToObject(JSContext* cx,
+ const wasm::ValTypeVector& params) {
+ Rooted<IdValueVector> props(cx, IdValueVector(cx));
+
+ RootedObject parametersObj(cx, ValTypesToArray(cx, params));
+ if (!parametersObj ||
+ !props.append(IdValuePair(NameToId(cx->names().parameters),
+ ObjectValue(*parametersObj)))) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ return NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+}
+#endif // ENABLE_WASM_TYPE_REFLECTIONS
+
+// ============================================================================
+// WebAssembly.Module class and methods
+
+const JSClassOps WasmModuleObject::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ WasmModuleObject::finalize, // finalize
+ nullptr, // call
+ nullptr, // construct
+ nullptr, // trace
+};
+
+const JSClass WasmModuleObject::class_ = {
+ "WebAssembly.Module",
+ JSCLASS_DELAY_METADATA_BUILDER |
+ JSCLASS_HAS_RESERVED_SLOTS(WasmModuleObject::RESERVED_SLOTS) |
+ JSCLASS_FOREGROUND_FINALIZE,
+ &WasmModuleObject::classOps_,
+ &WasmModuleObject::classSpec_,
+};
+
+const JSClass& WasmModuleObject::protoClass_ = PlainObject::class_;
+
+static constexpr char WasmModuleName[] = "Module";
+
+const ClassSpec WasmModuleObject::classSpec_ = {
+ CreateWasmConstructor<WasmModuleObject, WasmModuleName>,
+ GenericCreatePrototype<WasmModuleObject>,
+ WasmModuleObject::static_methods,
+ nullptr,
+ WasmModuleObject::methods,
+ WasmModuleObject::properties,
+ nullptr,
+ ClassSpec::DontDefineConstructor};
+
+const JSPropertySpec WasmModuleObject::properties[] = {
+ JS_STRING_SYM_PS(toStringTag, "WebAssembly.Module", JSPROP_READONLY),
+ JS_PS_END};
+
+const JSFunctionSpec WasmModuleObject::methods[] = {JS_FS_END};
+
+const JSFunctionSpec WasmModuleObject::static_methods[] = {
+ JS_FN("imports", WasmModuleObject::imports, 1, JSPROP_ENUMERATE),
+ JS_FN("exports", WasmModuleObject::exports, 1, JSPROP_ENUMERATE),
+ JS_FN("customSections", WasmModuleObject::customSections, 2,
+ JSPROP_ENUMERATE),
+ JS_FS_END};
+
+/* static */
+void WasmModuleObject::finalize(JS::GCContext* gcx, JSObject* obj) {
+ const Module& module = obj->as<WasmModuleObject>().module();
+ obj->zone()->decJitMemory(module.codeLength(module.code().stableTier()));
+ gcx->release(obj, &module, module.gcMallocBytesExcludingCode(),
+ MemoryUse::WasmModule);
+}
+
+static bool IsModuleObject(JSObject* obj, const Module** module) {
+ WasmModuleObject* mobj = obj->maybeUnwrapIf<WasmModuleObject>();
+ if (!mobj) {
+ return false;
+ }
+
+ *module = &mobj->module();
+ return true;
+}
+
+static bool GetModuleArg(JSContext* cx, CallArgs args, uint32_t numRequired,
+ const char* name, const Module** module) {
+ if (!args.requireAtLeast(cx, name, numRequired)) {
+ return false;
+ }
+
+ if (!args[0].isObject() || !IsModuleObject(&args[0].toObject(), module)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_MOD_ARG);
+ return false;
+ }
+
+ return true;
+}
+
+struct KindNames {
+ Rooted<PropertyName*> kind;
+ Rooted<PropertyName*> table;
+ Rooted<PropertyName*> memory;
+ Rooted<PropertyName*> tag;
+ Rooted<PropertyName*> type;
+
+ explicit KindNames(JSContext* cx)
+ : kind(cx), table(cx), memory(cx), tag(cx), type(cx) {}
+};
+
+static bool InitKindNames(JSContext* cx, KindNames* names) {
+ JSAtom* kind = Atomize(cx, "kind", strlen("kind"));
+ if (!kind) {
+ return false;
+ }
+ names->kind = kind->asPropertyName();
+
+ JSAtom* table = Atomize(cx, "table", strlen("table"));
+ if (!table) {
+ return false;
+ }
+ names->table = table->asPropertyName();
+
+ JSAtom* memory = Atomize(cx, "memory", strlen("memory"));
+ if (!memory) {
+ return false;
+ }
+ names->memory = memory->asPropertyName();
+
+ JSAtom* tag = Atomize(cx, "tag", strlen("tag"));
+ if (!tag) {
+ return false;
+ }
+ names->tag = tag->asPropertyName();
+
+ JSAtom* type = Atomize(cx, "type", strlen("type"));
+ if (!type) {
+ return false;
+ }
+ names->type = type->asPropertyName();
+
+ return true;
+}
+
+static JSString* KindToString(JSContext* cx, const KindNames& names,
+ DefinitionKind kind) {
+ switch (kind) {
+ case DefinitionKind::Function:
+ return cx->names().function;
+ case DefinitionKind::Table:
+ return names.table;
+ case DefinitionKind::Memory:
+ return names.memory;
+ case DefinitionKind::Global:
+ return cx->names().global;
+ case DefinitionKind::Tag:
+ return names.tag;
+ }
+
+ MOZ_CRASH("invalid kind");
+}
+
+/* static */
+bool WasmModuleObject::imports(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ const Module* module;
+ if (!GetModuleArg(cx, args, 1, "WebAssembly.Module.imports", &module)) {
+ return false;
+ }
+
+ KindNames names(cx);
+ if (!InitKindNames(cx, &names)) {
+ return false;
+ }
+
+ RootedValueVector elems(cx);
+ if (!elems.reserve(module->imports().length())) {
+ return false;
+ }
+
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+ const Metadata& metadata = module->metadata();
+ const MetadataTier& metadataTier =
+ module->metadata(module->code().stableTier());
+
+ size_t numFuncImport = 0;
+ size_t numMemoryImport = 0;
+ size_t numGlobalImport = 0;
+ size_t numTableImport = 0;
+ size_t numTagImport = 0;
+#endif // ENABLE_WASM_TYPE_REFLECTIONS
+
+ for (const Import& import : module->imports()) {
+ Rooted<IdValueVector> props(cx, IdValueVector(cx));
+ if (!props.reserve(3)) {
+ return false;
+ }
+
+ JSString* moduleStr = import.module.toAtom(cx);
+ if (!moduleStr) {
+ return false;
+ }
+ props.infallibleAppend(
+ IdValuePair(NameToId(cx->names().module), StringValue(moduleStr)));
+
+ JSString* nameStr = import.field.toAtom(cx);
+ if (!nameStr) {
+ return false;
+ }
+ props.infallibleAppend(
+ IdValuePair(NameToId(cx->names().name), StringValue(nameStr)));
+
+ JSString* kindStr = KindToString(cx, names, import.kind);
+ if (!kindStr) {
+ return false;
+ }
+ props.infallibleAppend(
+ IdValuePair(NameToId(names.kind), StringValue(kindStr)));
+
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+ RootedObject typeObj(cx);
+ switch (import.kind) {
+ case DefinitionKind::Function: {
+ size_t funcIndex = numFuncImport++;
+ const FuncType& funcType =
+ metadata.getFuncImportType(metadataTier.funcImports[funcIndex]);
+ typeObj = FuncTypeToObject(cx, funcType);
+ break;
+ }
+ case DefinitionKind::Table: {
+ size_t tableIndex = numTableImport++;
+ const TableDesc& table = metadata.tables[tableIndex];
+ typeObj = TableTypeToObject(cx, table.elemType, table.initialLength,
+ table.maximumLength);
+ break;
+ }
+ case DefinitionKind::Memory: {
+ DebugOnly<size_t> memoryIndex = numMemoryImport++;
+ MOZ_ASSERT(memoryIndex == 0);
+ const MemoryDesc& memory = *metadata.memory;
+ typeObj =
+ MemoryTypeToObject(cx, memory.isShared(), memory.indexType(),
+ memory.initialPages(), memory.maximumPages());
+ break;
+ }
+ case DefinitionKind::Global: {
+ size_t globalIndex = numGlobalImport++;
+ const GlobalDesc& global = metadata.globals[globalIndex];
+ typeObj = GlobalTypeToObject(cx, global.type(), global.isMutable());
+ break;
+ }
+ case DefinitionKind::Tag: {
+ size_t tagIndex = numTagImport++;
+ const TagDesc& tag = metadata.tags[tagIndex];
+ typeObj = TagTypeToObject(cx, tag.type->argTypes_);
+ break;
+ }
+ }
+
+ if (!typeObj || !props.append(IdValuePair(NameToId(names.type),
+ ObjectValue(*typeObj)))) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+#endif // ENABLE_WASM_TYPE_REFLECTIONS
+
+ JSObject* obj =
+ NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+ if (!obj) {
+ return false;
+ }
+
+ elems.infallibleAppend(ObjectValue(*obj));
+ }
+
+ JSObject* arr = NewDenseCopiedArray(cx, elems.length(), elems.begin());
+ if (!arr) {
+ return false;
+ }
+
+ args.rval().setObject(*arr);
+ return true;
+}
+
+/* static */
+bool WasmModuleObject::exports(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ const Module* module;
+ if (!GetModuleArg(cx, args, 1, "WebAssembly.Module.exports", &module)) {
+ return false;
+ }
+
+ KindNames names(cx);
+ if (!InitKindNames(cx, &names)) {
+ return false;
+ }
+
+ RootedValueVector elems(cx);
+ if (!elems.reserve(module->exports().length())) {
+ return false;
+ }
+
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+ const Metadata& metadata = module->metadata();
+ const MetadataTier& metadataTier =
+ module->metadata(module->code().stableTier());
+#endif // ENABLE_WASM_TYPE_REFLECTIONS
+
+ for (const Export& exp : module->exports()) {
+ Rooted<IdValueVector> props(cx, IdValueVector(cx));
+ if (!props.reserve(2)) {
+ return false;
+ }
+
+ JSString* nameStr = exp.fieldName().toAtom(cx);
+ if (!nameStr) {
+ return false;
+ }
+ props.infallibleAppend(
+ IdValuePair(NameToId(cx->names().name), StringValue(nameStr)));
+
+ JSString* kindStr = KindToString(cx, names, exp.kind());
+ if (!kindStr) {
+ return false;
+ }
+ props.infallibleAppend(
+ IdValuePair(NameToId(names.kind), StringValue(kindStr)));
+
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+ RootedObject typeObj(cx);
+ switch (exp.kind()) {
+ case DefinitionKind::Function: {
+ const FuncExport& fe = metadataTier.lookupFuncExport(exp.funcIndex());
+ const FuncType& funcType = metadata.getFuncExportType(fe);
+ typeObj = FuncTypeToObject(cx, funcType);
+ break;
+ }
+ case DefinitionKind::Table: {
+ const TableDesc& table = metadata.tables[exp.tableIndex()];
+ typeObj = TableTypeToObject(cx, table.elemType, table.initialLength,
+ table.maximumLength);
+ break;
+ }
+ case DefinitionKind::Memory: {
+ const MemoryDesc& memory = *metadata.memory;
+ typeObj =
+ MemoryTypeToObject(cx, memory.isShared(), memory.indexType(),
+ memory.initialPages(), memory.maximumPages());
+ break;
+ }
+ case DefinitionKind::Global: {
+ const GlobalDesc& global = metadata.globals[exp.globalIndex()];
+ typeObj = GlobalTypeToObject(cx, global.type(), global.isMutable());
+ break;
+ }
+ case DefinitionKind::Tag: {
+ const TagDesc& tag = metadata.tags[exp.tagIndex()];
+ typeObj = TagTypeToObject(cx, tag.type->argTypes_);
+ break;
+ }
+ }
+
+ if (!typeObj || !props.append(IdValuePair(NameToId(names.type),
+ ObjectValue(*typeObj)))) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+#endif // ENABLE_WASM_TYPE_REFLECTIONS
+
+ JSObject* obj =
+ NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+ if (!obj) {
+ return false;
+ }
+
+ elems.infallibleAppend(ObjectValue(*obj));
+ }
+
+ JSObject* arr = NewDenseCopiedArray(cx, elems.length(), elems.begin());
+ if (!arr) {
+ return false;
+ }
+
+ args.rval().setObject(*arr);
+ return true;
+}
+
+/* static */
+bool WasmModuleObject::customSections(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ const Module* module;
+ if (!GetModuleArg(cx, args, 2, "WebAssembly.Module.customSections",
+ &module)) {
+ return false;
+ }
+
+ Vector<char, 8> name(cx);
+ {
+ RootedString str(cx, ToString(cx, args.get(1)));
+ if (!str) {
+ return false;
+ }
+
+ Rooted<JSLinearString*> linear(cx, str->ensureLinear(cx));
+ if (!linear) {
+ return false;
+ }
+
+ if (!name.initLengthUninitialized(
+ JS::GetDeflatedUTF8StringLength(linear))) {
+ return false;
+ }
+
+ (void)JS::DeflateStringToUTF8Buffer(linear,
+ Span(name.begin(), name.length()));
+ }
+
+ RootedValueVector elems(cx);
+ RootedArrayBufferObject buf(cx);
+ for (const CustomSection& cs : module->customSections()) {
+ if (name.length() != cs.name.length()) {
+ continue;
+ }
+ if (memcmp(name.begin(), cs.name.begin(), name.length()) != 0) {
+ continue;
+ }
+
+ buf = ArrayBufferObject::createZeroed(cx, cs.payload->length());
+ if (!buf) {
+ return false;
+ }
+
+ memcpy(buf->dataPointer(), cs.payload->begin(), cs.payload->length());
+ if (!elems.append(ObjectValue(*buf))) {
+ return false;
+ }
+ }
+
+ JSObject* arr = NewDenseCopiedArray(cx, elems.length(), elems.begin());
+ if (!arr) {
+ return false;
+ }
+
+ args.rval().setObject(*arr);
+ return true;
+}
+
+/* static */
+WasmModuleObject* WasmModuleObject::create(JSContext* cx, const Module& module,
+ HandleObject proto) {
+ AutoSetNewObjectMetadata metadata(cx);
+ auto* obj = NewObjectWithGivenProto<WasmModuleObject>(cx, proto);
+ if (!obj) {
+ return nullptr;
+ }
+
+ // The pipeline state on some architectures may retain stale instructions
+ // even after we invalidate the instruction cache. There is no generally
+ // available method to broadcast this pipeline flush to all threads after
+ // we've compiled new code, so conservatively perform one here when we're
+ // receiving a module that may have been compiled from another thread.
+ //
+ // The cost of this flush is expected to minimal enough to not be worth
+ // optimizing away in the case the module was compiled on this thread.
+ jit::FlushExecutionContext();
+
+ // This accounts for module allocation size (excluding code which is handled
+ // separately - see below). This assumes that the size of associated data
+ // doesn't change for the life of the WasmModuleObject. The size is counted
+ // once per WasmModuleObject referencing a Module.
+ InitReservedSlot(obj, MODULE_SLOT, const_cast<Module*>(&module),
+ module.gcMallocBytesExcludingCode(), MemoryUse::WasmModule);
+ module.AddRef();
+
+ // Bug 1569888: We account for the first tier here; the second tier, if
+ // different, also needs to be accounted for.
+ cx->zone()->incJitMemory(module.codeLength(module.code().stableTier()));
+ return obj;
+}
+
+static bool GetBufferSource(JSContext* cx, JSObject* obj, unsigned errorNumber,
+ MutableBytes* bytecode) {
+ *bytecode = cx->new_<ShareableBytes>();
+ if (!*bytecode) {
+ return false;
+ }
+
+ JSObject* unwrapped = CheckedUnwrapStatic(obj);
+
+ SharedMem<uint8_t*> dataPointer;
+ size_t byteLength;
+ if (!unwrapped || !IsBufferSource(unwrapped, &dataPointer, &byteLength)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, errorNumber);
+ return false;
+ }
+
+ if (!(*bytecode)->append(dataPointer.unwrap(), byteLength)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+static bool ReportCompileWarnings(JSContext* cx,
+ const UniqueCharsVector& warnings) {
+ // Avoid spamming the console.
+ size_t numWarnings = std::min<size_t>(warnings.length(), 3);
+
+ for (size_t i = 0; i < numWarnings; i++) {
+ if (!WarnNumberASCII(cx, JSMSG_WASM_COMPILE_WARNING, warnings[i].get())) {
+ return false;
+ }
+ }
+
+ if (warnings.length() > numWarnings) {
+ if (!WarnNumberASCII(cx, JSMSG_WASM_COMPILE_WARNING,
+ "other warnings suppressed")) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* static */
+bool WasmModuleObject::construct(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs callArgs = CallArgsFromVp(argc, vp);
+
+ Log(cx, "sync new Module() started");
+
+ if (!ThrowIfNotConstructing(cx, callArgs, "Module")) {
+ return false;
+ }
+
+ if (!cx->isRuntimeCodeGenEnabled(JS::RuntimeCode::WASM, nullptr)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_CSP_BLOCKED_WASM, "WebAssembly.Module");
+ return false;
+ }
+
+ if (!callArgs.requireAtLeast(cx, "WebAssembly.Module", 1)) {
+ return false;
+ }
+
+ if (!callArgs[0].isObject()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_BUF_ARG);
+ return false;
+ }
+
+ MutableBytes bytecode;
+ if (!GetBufferSource(cx, &callArgs[0].toObject(), JSMSG_WASM_BAD_BUF_ARG,
+ &bytecode)) {
+ return false;
+ }
+
+ SharedCompileArgs compileArgs = InitCompileArgs(cx, "WebAssembly.Module");
+ if (!compileArgs) {
+ return false;
+ }
+
+ UniqueChars error;
+ UniqueCharsVector warnings;
+ SharedModule module =
+ CompileBuffer(*compileArgs, *bytecode, &error, &warnings, nullptr);
+
+ if (!ReportCompileWarnings(cx, warnings)) {
+ return false;
+ }
+ if (!module) {
+ if (error) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_COMPILE_ERROR, error.get());
+ return false;
+ }
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ RootedObject proto(
+ cx, GetWasmConstructorPrototype(cx, callArgs, JSProto_WasmModule));
+ if (!proto) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ RootedObject moduleObj(cx, WasmModuleObject::create(cx, *module, proto));
+ if (!moduleObj) {
+ return false;
+ }
+
+ Log(cx, "sync new Module() succeded");
+
+ callArgs.rval().setObject(*moduleObj);
+ return true;
+}
+
+const Module& WasmModuleObject::module() const {
+ MOZ_ASSERT(is<WasmModuleObject>());
+ return *(const Module*)getReservedSlot(MODULE_SLOT).toPrivate();
+}
+
+// ============================================================================
+// WebAssembly.Instance class and methods
+
+const JSClassOps WasmInstanceObject::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ WasmInstanceObject::finalize, // finalize
+ nullptr, // call
+ nullptr, // construct
+ WasmInstanceObject::trace, // trace
+};
+
+const JSClass WasmInstanceObject::class_ = {
+ "WebAssembly.Instance",
+ JSCLASS_DELAY_METADATA_BUILDER |
+ JSCLASS_HAS_RESERVED_SLOTS(WasmInstanceObject::RESERVED_SLOTS) |
+ JSCLASS_FOREGROUND_FINALIZE,
+ &WasmInstanceObject::classOps_,
+ &WasmInstanceObject::classSpec_,
+};
+
+const JSClass& WasmInstanceObject::protoClass_ = PlainObject::class_;
+
+static constexpr char WasmInstanceName[] = "Instance";
+
+const ClassSpec WasmInstanceObject::classSpec_ = {
+ CreateWasmConstructor<WasmInstanceObject, WasmInstanceName>,
+ GenericCreatePrototype<WasmInstanceObject>,
+ WasmInstanceObject::static_methods,
+ nullptr,
+ WasmInstanceObject::methods,
+ WasmInstanceObject::properties,
+ nullptr,
+ ClassSpec::DontDefineConstructor};
+
+static bool IsInstance(HandleValue v) {
+ return v.isObject() && v.toObject().is<WasmInstanceObject>();
+}
+
+/* static */
+bool WasmInstanceObject::exportsGetterImpl(JSContext* cx,
+ const CallArgs& args) {
+ args.rval().setObject(
+ args.thisv().toObject().as<WasmInstanceObject>().exportsObj());
+ return true;
+}
+
+/* static */
+bool WasmInstanceObject::exportsGetter(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsInstance, exportsGetterImpl>(cx, args);
+}
+
+const JSPropertySpec WasmInstanceObject::properties[] = {
+ JS_PSG("exports", WasmInstanceObject::exportsGetter, JSPROP_ENUMERATE),
+ JS_STRING_SYM_PS(toStringTag, "WebAssembly.Instance", JSPROP_READONLY),
+ JS_PS_END};
+
+const JSFunctionSpec WasmInstanceObject::methods[] = {JS_FS_END};
+
+const JSFunctionSpec WasmInstanceObject::static_methods[] = {JS_FS_END};
+
+bool WasmInstanceObject::isNewborn() const {
+ MOZ_ASSERT(is<WasmInstanceObject>());
+ return getReservedSlot(INSTANCE_SLOT).isUndefined();
+}
+
+// WeakScopeMap maps from function index to js::Scope. This maps is weak
+// to avoid holding scope objects alive. The scopes are normally created
+// during debugging.
+//
+// This is defined here in order to avoid recursive dependency between
+// WasmJS.h and Scope.h.
+using WasmFunctionScopeMap =
+ JS::WeakCache<GCHashMap<uint32_t, WeakHeapPtr<WasmFunctionScope*>,
+ DefaultHasher<uint32_t>, CellAllocPolicy>>;
+class WasmInstanceObject::UnspecifiedScopeMap {
+ public:
+ WasmFunctionScopeMap& asWasmFunctionScopeMap() {
+ return *(WasmFunctionScopeMap*)this;
+ }
+};
+
+/* static */
+void WasmInstanceObject::finalize(JS::GCContext* gcx, JSObject* obj) {
+ WasmInstanceObject& instance = obj->as<WasmInstanceObject>();
+ gcx->delete_(obj, &instance.exports(), MemoryUse::WasmInstanceExports);
+ gcx->delete_(obj, &instance.scopes().asWasmFunctionScopeMap(),
+ MemoryUse::WasmInstanceScopes);
+ gcx->delete_(obj, &instance.indirectGlobals(),
+ MemoryUse::WasmInstanceGlobals);
+ if (!instance.isNewborn()) {
+ if (instance.instance().debugEnabled()) {
+ instance.instance().debug().finalize(gcx);
+ }
+ Instance::destroy(&instance.instance());
+ gcx->removeCellMemory(obj, sizeof(Instance),
+ MemoryUse::WasmInstanceInstance);
+ }
+}
+
+/* static */
+void WasmInstanceObject::trace(JSTracer* trc, JSObject* obj) {
+ WasmInstanceObject& instanceObj = obj->as<WasmInstanceObject>();
+ instanceObj.exports().trace(trc);
+ instanceObj.indirectGlobals().trace(trc);
+ if (!instanceObj.isNewborn()) {
+ instanceObj.instance().tracePrivate(trc);
+ }
+}
+
+/* static */
+WasmInstanceObject* WasmInstanceObject::create(
+ JSContext* cx, const SharedCode& code,
+ const DataSegmentVector& dataSegments,
+ const ElemSegmentVector& elemSegments, uint32_t instanceDataLength,
+ Handle<WasmMemoryObject*> memory, SharedTableVector&& tables,
+ const JSObjectVector& funcImports, const GlobalDescVector& globals,
+ const ValVector& globalImportValues,
+ const WasmGlobalObjectVector& globalObjs,
+ const WasmTagObjectVector& tagObjs, HandleObject proto,
+ UniqueDebugState maybeDebug) {
+ Rooted<UniquePtr<ExportMap>> exports(cx,
+ js::MakeUnique<ExportMap>(cx->zone()));
+ if (!exports) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ UniquePtr<WasmFunctionScopeMap> scopes =
+ js::MakeUnique<WasmFunctionScopeMap>(cx->zone(), cx->zone());
+ if (!scopes) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+ // Note that `scopes` is a WeakCache, auto-linked into a sweep list on the
+ // Zone, and so does not require rooting.
+
+ uint32_t indirectGlobals = 0;
+
+ for (uint32_t i = 0; i < globalObjs.length(); i++) {
+ if (globalObjs[i] && globals[i].isIndirect()) {
+ indirectGlobals++;
+ }
+ }
+
+ Rooted<UniquePtr<GlobalObjectVector>> indirectGlobalObjs(
+ cx, js::MakeUnique<GlobalObjectVector>(cx->zone()));
+ if (!indirectGlobalObjs || !indirectGlobalObjs->resize(indirectGlobals)) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ {
+ uint32_t next = 0;
+ for (uint32_t i = 0; i < globalObjs.length(); i++) {
+ if (globalObjs[i] && globals[i].isIndirect()) {
+ (*indirectGlobalObjs)[next++] = globalObjs[i];
+ }
+ }
+ }
+
+ Instance* instance = nullptr;
+ Rooted<WasmInstanceObject*> obj(cx);
+
+ {
+ // We must delay creating metadata for this object until after all its
+ // slots have been initialized. We must also create the metadata before
+ // calling Instance::init as that may allocate new objects.
+ AutoSetNewObjectMetadata metadata(cx);
+ obj = NewObjectWithGivenProto<WasmInstanceObject>(cx, proto);
+ if (!obj) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(obj->isTenured(), "assumed by WasmTableObject write barriers");
+
+ // Finalization assumes these slots are always initialized:
+ InitReservedSlot(obj, EXPORTS_SLOT, exports.release(),
+ MemoryUse::WasmInstanceExports);
+
+ InitReservedSlot(obj, SCOPES_SLOT, scopes.release(),
+ MemoryUse::WasmInstanceScopes);
+
+ InitReservedSlot(obj, GLOBALS_SLOT, indirectGlobalObjs.release(),
+ MemoryUse::WasmInstanceGlobals);
+
+ obj->initReservedSlot(INSTANCE_SCOPE_SLOT, UndefinedValue());
+
+ // The INSTANCE_SLOT may not be initialized if Instance allocation fails,
+ // leading to an observable "newborn" state in tracing/finalization.
+ MOZ_ASSERT(obj->isNewborn());
+
+ // Create this just before constructing Instance to avoid rooting hazards.
+ instance = Instance::create(cx, obj, code, instanceDataLength, memory,
+ std::move(tables), std::move(maybeDebug));
+ if (!instance) {
+ return nullptr;
+ }
+
+ InitReservedSlot(obj, INSTANCE_SLOT, instance,
+ MemoryUse::WasmInstanceInstance);
+ MOZ_ASSERT(!obj->isNewborn());
+ }
+
+ if (!instance->init(cx, funcImports, globalImportValues, globalObjs, tagObjs,
+ dataSegments, elemSegments)) {
+ return nullptr;
+ }
+
+ return obj;
+}
+
+void WasmInstanceObject::initExportsObj(JSObject& exportsObj) {
+ MOZ_ASSERT(getReservedSlot(EXPORTS_OBJ_SLOT).isUndefined());
+ setReservedSlot(EXPORTS_OBJ_SLOT, ObjectValue(exportsObj));
+}
+
+static bool GetImportArg(JSContext* cx, CallArgs callArgs,
+ MutableHandleObject importObj) {
+ if (!callArgs.get(1).isUndefined()) {
+ if (!callArgs[1].isObject()) {
+ return ThrowBadImportArg(cx);
+ }
+ importObj.set(&callArgs[1].toObject());
+ }
+ return true;
+}
+
+/* static */
+bool WasmInstanceObject::construct(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ Log(cx, "sync new Instance() started");
+
+ if (!ThrowIfNotConstructing(cx, args, "Instance")) {
+ return false;
+ }
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Instance", 1)) {
+ return false;
+ }
+
+ const Module* module;
+ if (!args[0].isObject() || !IsModuleObject(&args[0].toObject(), &module)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_MOD_ARG);
+ return false;
+ }
+
+ RootedObject importObj(cx);
+ if (!GetImportArg(cx, args, &importObj)) {
+ return false;
+ }
+
+ RootedObject proto(
+ cx, GetWasmConstructorPrototype(cx, args, JSProto_WasmInstance));
+ if (!proto) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ Rooted<ImportValues> imports(cx);
+ if (!GetImports(cx, *module, importObj, imports.address())) {
+ return false;
+ }
+
+ Rooted<WasmInstanceObject*> instanceObj(cx);
+ if (!module->instantiate(cx, imports.get(), proto, &instanceObj)) {
+ return false;
+ }
+
+ Log(cx, "sync new Instance() succeeded");
+
+ args.rval().setObject(*instanceObj);
+ return true;
+}
+
+Instance& WasmInstanceObject::instance() const {
+ MOZ_ASSERT(!isNewborn());
+ return *(Instance*)getReservedSlot(INSTANCE_SLOT).toPrivate();
+}
+
+JSObject& WasmInstanceObject::exportsObj() const {
+ return getReservedSlot(EXPORTS_OBJ_SLOT).toObject();
+}
+
+WasmInstanceObject::ExportMap& WasmInstanceObject::exports() const {
+ return *(ExportMap*)getReservedSlot(EXPORTS_SLOT).toPrivate();
+}
+
+WasmInstanceObject::UnspecifiedScopeMap& WasmInstanceObject::scopes() const {
+ return *(UnspecifiedScopeMap*)(getReservedSlot(SCOPES_SLOT).toPrivate());
+}
+
+WasmInstanceObject::GlobalObjectVector& WasmInstanceObject::indirectGlobals()
+ const {
+ return *(GlobalObjectVector*)getReservedSlot(GLOBALS_SLOT).toPrivate();
+}
+
+static bool WasmCall(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ RootedFunction callee(cx, &args.callee().as<JSFunction>());
+
+ Instance& instance = ExportedFunctionToInstance(callee);
+ uint32_t funcIndex = ExportedFunctionToFuncIndex(callee);
+ return instance.callExport(cx, funcIndex, args);
+}
+
+/*
+ * [SMDOC] Exported wasm functions and the jit-entry stubs
+ *
+ * ## The kinds of exported functions
+ *
+ * There are several kinds of exported wasm functions. /Explicitly/ exported
+ * functions are:
+ *
+ * - any wasm function exported via the export section
+ * - any asm.js export
+ * - the module start function
+ *
+ * There are also /implicitly/ exported functions, these are the functions whose
+ * indices in the module are referenced outside the code segment, eg, in element
+ * segments and in global initializers.
+ *
+ * ## Wasm functions as JSFunctions
+ *
+ * Any exported function can be manipulated by JS and wasm code, and to both the
+ * exported function is represented as a JSFunction. To JS, that means that the
+ * function can be called in the same way as any other JSFunction. To Wasm, it
+ * means that the function is a reference with the same representation as
+ * externref.
+ *
+ * However, the JSFunction object is created only when the function value is
+ * actually exposed to JS the first time. The creation is performed by
+ * getExportedFunction(), below, as follows:
+ *
+ * - a function exported via the export section (or from asm.js) is created
+ * when the export object is created, which happens at instantiation time.
+ *
+ * - a function implicitly exported via a table is created when the table
+ * element is read (by JS or wasm) and a function value is needed to
+ * represent that value. Functions stored in tables by initializers have a
+ * special representation that does not require the function object to be
+ * created.
+ *
+ * - a function implicitly exported via a global initializer is created when
+ * the global is initialized.
+ *
+ * - a function referenced from a ref.func instruction in code is created when
+ * that instruction is executed the first time.
+ *
+ * The JSFunction representing a wasm function never changes: every reference to
+ * the wasm function that exposes the JSFunction gets the same JSFunction. In
+ * particular, imported functions already have a JSFunction representation (from
+ * JS or from their home module), and will be exposed using that representation.
+ *
+ * The mapping from a wasm function to its JSFunction is instance-specific, and
+ * held in a hashmap in the instance. If a module is shared across multiple
+ * instances, possibly in multiple threads, each instance will have its own
+ * JSFunction representing the wasm function.
+ *
+ * ## Stubs -- interpreter, eager, lazy, provisional, and absent
+ *
+ * While a Wasm exported function is just a JSFunction, the internal wasm ABI is
+ * neither the C++ ABI nor the JS JIT ABI, so there needs to be an extra step
+ * when C++ or JS JIT code calls wasm code. For this, execution passes through
+ * a stub that is adapted to both the JS caller and the wasm callee.
+ *
+ * ### Interpreter stubs and jit-entry stubs
+ *
+ * When JS interpreted code calls a wasm function, we end up in
+ * Instance::callExport() to execute the call. This function must enter wasm,
+ * and to do this it uses a stub that is specific to the wasm function (see
+ * GenerateInterpEntry) that is callable with the C++ interpreter ABI and which
+ * will convert arguments as necessary and enter compiled wasm code.
+ *
+ * The interpreter stub is created eagerly, when the module is compiled.
+ *
+ * However, the interpreter call path is slow, and when JS jitted code calls
+ * wasm we want to do better. In this case, there is a different, optimized
+ * stub that is to be invoked, and it uses the JIT ABI. This is the jit-entry
+ * stub for the function. Jitted code will call a wasm function's jit-entry
+ * stub to invoke the function with the JIT ABI. The stub will adapt the call
+ * to the wasm ABI.
+ *
+ * Some jit-entry stubs are created eagerly and some are created lazily.
+ *
+ * ### Eager jit-entry stubs
+ *
+ * The explicitly exported functions have stubs created for them eagerly. Eager
+ * stubs are created with their tier when the module is compiled, see
+ * ModuleGenerator::finishCodeTier(), which calls wasm::GenerateStubs(), which
+ * generates stubs for functions with eager stubs.
+ *
+ * An eager stub for tier-1 is upgraded to tier-2 if the module tiers up, see
+ * below.
+ *
+ * ### Lazy jit-entry stubs
+ *
+ * Stubs are created lazily for all implicitly exported functions. These
+ * functions may flow out to JS, but will only need a stub if they are ever
+ * called from jitted code. (That's true for explicitly exported functions too,
+ * but for them the presumption is that they will be called.)
+ *
+ * Lazy stubs are created only when they are needed, and they are /doubly/ lazy,
+ * see getExportedFunction(), below: A function implicitly exported via a table
+ * or global may be manipulated eagerly by host code without actually being
+ * called (maybe ever), so we do not generate a lazy stub when the function
+ * object escapes to JS, but instead delay stub generation until the function is
+ * actually called.
+ *
+ * ### The provisional lazy jit-entry stub
+ *
+ * However, JS baseline compilation needs to have a stub to start with in order
+ * to allow it to attach CacheIR data to the call (or it deoptimizes the call as
+ * a C++ call). Thus when the JSFunction for the wasm export is retrieved by JS
+ * code, a /provisional/ lazy jit-entry stub is associated with the function.
+ * The stub will invoke the wasm function on the slow interpreter path via
+ * callExport - if the function is ever called - and will cause a fast jit-entry
+ * stub to be created at the time of the call. The provisional lazy stub is
+ * shared globally, it contains no function-specific or context-specific data.
+ *
+ * Thus, the final lazy jit-entry stubs are eventually created by
+ * Instance::callExport, when a call is routed through it on the slow path for
+ * any of the reasons given above.
+ *
+ * ### Absent jit-entry stubs
+ *
+ * Some functions never get jit-entry stubs. The predicate canHaveJitEntry()
+ * determines if a wasm function gets a stub, and it will deny this if the
+ * function's signature exposes non-JS-compatible types (such as v128) or if
+ * stub optimization has been disabled by a jit option. Calls to these
+ * functions will continue to go via callExport and use the slow interpreter
+ * stub.
+ *
+ * ## The jit-entry jump table
+ *
+ * The mapping from the exported function to its jit-entry stub is implemented
+ * by the jit-entry jump table in the JumpTables object (see WasmCode.h). The
+ * jit-entry jump table entry for a function holds a stub that the jit can call
+ * to perform fast calls.
+ *
+ * While there is a single contiguous jump table, it has two logical sections:
+ * one for eager stubs, and one for lazy stubs. These sections are initialized
+ * and updated separately, using logic that is specific to each section.
+ *
+ * The value of the table element for an eager stub is a pointer to the stub
+ * code in the current tier. The pointer is installed just after the creation
+ * of the stub, before any code in the module is executed. If the module later
+ * tiers up, the eager jit-entry stub for tier-1 code is replaced by one for
+ * tier-2 code, see the next section.
+ *
+ * Initially the value of the jump table element for a lazy stub is null.
+ *
+ * If the function is retrieved by JS (by getExportedFunction()) and is not
+ * barred from having a jit-entry, then the stub is upgraded to the shared
+ * provisional lazy jit-entry stub. This upgrade happens to be racy if the
+ * module is shared, and so the update is atomic and only happens if the entry
+ * is already null. Since the provisional lazy stub is shared, this is fine; if
+ * several threads try to upgrade at the same time, it is to the same shared
+ * value.
+ *
+ * If the retrieved function is later invoked (via callExport()), the stub is
+ * upgraded to an actual jit-entry stub for the current code tier, again if the
+ * function is allowed to have a jit-entry. This is not racy -- though multiple
+ * threads can be trying to create a jit-entry stub at the same time, they do so
+ * under a lock and only the first to take the lock will be allowed to create a
+ * stub, the others will reuse the first-installed stub.
+ *
+ * If the module later tiers up, the lazy jit-entry stub for tier-1 code (if it
+ * exists) is replaced by one for tier-2 code, see the next section.
+ *
+ * (Note, the InterpEntry stub is never stored in the jit-entry table, as it
+ * uses the C++ ABI, not the JIT ABI. It is accessible through the
+ * FunctionEntry.)
+ *
+ * ### Interaction of the jit-entry jump table and tiering
+ *
+ * (For general info about tiering, see the comment in WasmCompile.cpp.)
+ *
+ * The jit-entry stub, whether eager or lazy, is specific to a code tier - a
+ * stub will invoke the code for its function for the tier. When we tier up,
+ * new jit-entry stubs must be created that reference tier-2 code, and must then
+ * be patched into the jit-entry table. The complication here is that, since
+ * the jump table is shared with its code between instances on multiple threads,
+ * tier-1 code is running on other threads and new tier-1 specific jit-entry
+ * stubs may be created concurrently with trying to create the tier-2 stubs on
+ * the thread that performs the tiering-up. Indeed, there may also be
+ * concurrent attempts to upgrade null jit-entries to the provisional lazy stub.
+ *
+ * Eager stubs:
+ *
+ * - Eager stubs for tier-2 code are patched in racily by Module::finishTier2()
+ * along with code pointers for tiering; nothing conflicts with these writes.
+ *
+ * Lazy stubs:
+ *
+ * - An upgrade from a null entry to a lazy provisional stub is atomic and can
+ * only happen if the entry is null, and it only happens in
+ * getExportedFunction(). No lazy provisional stub will be installed if
+ * there's another stub present.
+ *
+ * - The lazy tier-appropriate stub is installed by callExport() (really by
+ * EnsureEntryStubs()) during the first invocation of the exported function
+ * that reaches callExport(). That invocation must be from within JS, and so
+ * the jit-entry element can't be null, because a prior getExportedFunction()
+ * will have ensured that it is not: the lazy provisional stub will have been
+ * installed. Hence the installing of the lazy tier-appropriate stub does
+ * not race with the installing of the lazy provisional stub.
+ *
+ * - A lazy tier-1 stub is upgraded to a lazy tier-2 stub by
+ * Module::finishTier2(). The upgrade needs to ensure that all tier-1 stubs
+ * are upgraded, and that once the upgrade is finished, callExport() will
+ * only create tier-2 lazy stubs. (This upgrading does not upgrade lazy
+ * provisional stubs or absent stubs.)
+ *
+ * The locking protocol ensuring that all stubs are upgraded properly and
+ * that the system switches to creating tier-2 stubs is implemented in
+ * Module::finishTier2() and EnsureEntryStubs():
+ *
+ * There are two locks, one per code tier.
+ *
+ * EnsureEntryStubs() is attempting to create a tier-appropriate lazy stub,
+ * so it takes the lock for the current best tier, checks to see if there is
+ * a stub, and exits if there is. If the tier changed racily it takes the
+ * other lock too, since that is now the lock for the best tier. Then it
+ * creates the stub, installs it, and releases the locks. Thus at most one
+ * stub per tier can be created at a time.
+ *
+ * Module::finishTier2() takes both locks (tier-1 before tier-2), thus
+ * preventing EnsureEntryStubs() from creating stubs while stub upgrading is
+ * going on, and itself waiting until EnsureEntryStubs() is not active. Once
+ * it has both locks, it upgrades all lazy stubs and makes tier-2 the new
+ * best tier. Should EnsureEntryStubs subsequently enter, it will find that
+ * a stub already exists at tier-2 and will exit early.
+ *
+ * (It would seem that the locking protocol could be simplified a little by
+ * having only one lock, hanging off the Code object, or by unconditionally
+ * taking both locks in EnsureEntryStubs(). However, in some cases where we
+ * acquire a lock the Code object is not readily available, so plumbing would
+ * have to be added, and in EnsureEntryStubs(), there are sometimes not two code
+ * tiers.)
+ *
+ * ## Stub lifetimes and serialization
+ *
+ * Eager jit-entry stub code, along with stub code for import functions, is
+ * serialized along with the tier-2 code for the module.
+ *
+ * Lazy stub code and thunks for builtin functions (including the provisional
+ * lazy jit-entry stub) are never serialized.
+ */
+
+/* static */
+bool WasmInstanceObject::getExportedFunction(
+ JSContext* cx, Handle<WasmInstanceObject*> instanceObj, uint32_t funcIndex,
+ MutableHandleFunction fun) {
+ if (ExportMap::Ptr p = instanceObj->exports().lookup(funcIndex)) {
+ fun.set(p->value());
+ return true;
+ }
+
+ const Instance& instance = instanceObj->instance();
+ const FuncExport& funcExport =
+ instance.metadata(instance.code().bestTier()).lookupFuncExport(funcIndex);
+ const FuncType& funcType = instance.metadata().getFuncExportType(funcExport);
+ unsigned numArgs = funcType.args().length();
+
+ if (instance.isAsmJS()) {
+ // asm.js needs to act like a normal JS function which means having the
+ // name from the original source and being callable as a constructor.
+ Rooted<JSAtom*> name(cx, instance.getFuncDisplayAtom(cx, funcIndex));
+ if (!name) {
+ return false;
+ }
+ fun.set(NewNativeConstructor(cx, WasmCall, numArgs, name,
+ gc::AllocKind::FUNCTION_EXTENDED,
+ TenuredObject, FunctionFlags::ASMJS_CTOR));
+ if (!fun) {
+ return false;
+ }
+
+ // asm.js does not support jit entries.
+ fun->setWasmFuncIndex(funcIndex);
+ } else {
+ Rooted<JSAtom*> name(cx, NumberToAtom(cx, funcIndex));
+ if (!name) {
+ return false;
+ }
+ RootedObject proto(cx);
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+ proto = GlobalObject::getOrCreatePrototype(cx, JSProto_WasmFunction);
+ if (!proto) {
+ return false;
+ }
+#endif
+ fun.set(NewFunctionWithProto(
+ cx, WasmCall, numArgs, FunctionFlags::WASM, nullptr, name, proto,
+ gc::AllocKind::FUNCTION_EXTENDED, TenuredObject));
+ if (!fun) {
+ return false;
+ }
+
+ // Some applications eagerly access all table elements which currently
+ // triggers worst-case behavior for lazy stubs, since each will allocate a
+ // separate 4kb code page. Most eagerly-accessed functions are not called,
+ // so use a shared, provisional (and slow) lazy stub as JitEntry and wait
+ // until Instance::callExport() to create the fast entry stubs.
+ if (funcType.canHaveJitEntry()) {
+ if (!funcExport.hasEagerStubs()) {
+ if (!EnsureBuiltinThunksInitialized()) {
+ return false;
+ }
+ void* provisionalLazyJitEntryStub = ProvisionalLazyJitEntryStub();
+ MOZ_ASSERT(provisionalLazyJitEntryStub);
+ instance.code().setJitEntryIfNull(funcIndex,
+ provisionalLazyJitEntryStub);
+ }
+ fun->setWasmJitEntry(instance.code().getAddressOfJitEntry(funcIndex));
+ } else {
+ fun->setWasmFuncIndex(funcIndex);
+ }
+ }
+
+ fun->setExtendedSlot(FunctionExtended::WASM_INSTANCE_SLOT,
+ PrivateValue(const_cast<Instance*>(&instance)));
+
+ const CodeTier& codeTier =
+ instance.code().codeTier(instance.code().bestTier());
+ const CodeRange& codeRange = codeTier.metadata().codeRange(funcExport);
+
+ fun->setExtendedSlot(FunctionExtended::WASM_FUNC_UNCHECKED_ENTRY_SLOT,
+ PrivateValue(codeTier.segment().base() +
+ codeRange.funcUncheckedCallEntry()));
+
+ if (!instanceObj->exports().putNew(funcIndex, fun)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+const CodeRange& WasmInstanceObject::getExportedFunctionCodeRange(
+ JSFunction* fun, Tier tier) {
+ uint32_t funcIndex = ExportedFunctionToFuncIndex(fun);
+ MOZ_ASSERT(exports().lookup(funcIndex)->value() == fun);
+ const MetadataTier& metadata = instance().metadata(tier);
+ return metadata.codeRange(metadata.lookupFuncExport(funcIndex));
+}
+
+/* static */
+WasmInstanceScope* WasmInstanceObject::getScope(
+ JSContext* cx, Handle<WasmInstanceObject*> instanceObj) {
+ if (!instanceObj->getReservedSlot(INSTANCE_SCOPE_SLOT).isUndefined()) {
+ return (WasmInstanceScope*)instanceObj->getReservedSlot(INSTANCE_SCOPE_SLOT)
+ .toGCThing();
+ }
+
+ Rooted<WasmInstanceScope*> instanceScope(
+ cx, WasmInstanceScope::create(cx, instanceObj));
+ if (!instanceScope) {
+ return nullptr;
+ }
+
+ instanceObj->setReservedSlot(INSTANCE_SCOPE_SLOT,
+ PrivateGCThingValue(instanceScope));
+
+ return instanceScope;
+}
+
+/* static */
+WasmFunctionScope* WasmInstanceObject::getFunctionScope(
+ JSContext* cx, Handle<WasmInstanceObject*> instanceObj,
+ uint32_t funcIndex) {
+ if (auto p =
+ instanceObj->scopes().asWasmFunctionScopeMap().lookup(funcIndex)) {
+ return p->value();
+ }
+
+ Rooted<WasmInstanceScope*> instanceScope(
+ cx, WasmInstanceObject::getScope(cx, instanceObj));
+ if (!instanceScope) {
+ return nullptr;
+ }
+
+ Rooted<WasmFunctionScope*> funcScope(
+ cx, WasmFunctionScope::create(cx, instanceScope, funcIndex));
+ if (!funcScope) {
+ return nullptr;
+ }
+
+ if (!instanceObj->scopes().asWasmFunctionScopeMap().putNew(funcIndex,
+ funcScope)) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ return funcScope;
+}
+
+bool wasm::IsWasmExportedFunction(JSFunction* fun) {
+ return fun->kind() == FunctionFlags::Wasm;
+}
+
+Instance& wasm::ExportedFunctionToInstance(JSFunction* fun) {
+ return fun->wasmInstance();
+}
+
+WasmInstanceObject* wasm::ExportedFunctionToInstanceObject(JSFunction* fun) {
+ return fun->wasmInstance().object();
+}
+
+uint32_t wasm::ExportedFunctionToFuncIndex(JSFunction* fun) {
+ return fun->wasmInstance().code().getFuncIndex(fun);
+}
+
+// ============================================================================
+// WebAssembly.Memory class and methods
+
+const JSClassOps WasmMemoryObject::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ WasmMemoryObject::finalize, // finalize
+ nullptr, // call
+ nullptr, // construct
+ nullptr, // trace
+};
+
+const JSClass WasmMemoryObject::class_ = {
+ "WebAssembly.Memory",
+ JSCLASS_DELAY_METADATA_BUILDER |
+ JSCLASS_HAS_RESERVED_SLOTS(WasmMemoryObject::RESERVED_SLOTS) |
+ JSCLASS_FOREGROUND_FINALIZE,
+ &WasmMemoryObject::classOps_, &WasmMemoryObject::classSpec_};
+
+const JSClass& WasmMemoryObject::protoClass_ = PlainObject::class_;
+
+static constexpr char WasmMemoryName[] = "Memory";
+
+static JSObject* CreateWasmMemoryPrototype(JSContext* cx, JSProtoKey key) {
+ RootedObject proto(cx, GlobalObject::createBlankPrototype(
+ cx, cx->global(), &WasmMemoryObject::protoClass_));
+ if (!proto) {
+ return nullptr;
+ }
+ if (MemoryControlAvailable(cx)) {
+ if (!JS_DefineFunctions(cx, proto,
+ WasmMemoryObject::memoryControlMethods)) {
+ return nullptr;
+ }
+ }
+ return proto;
+}
+
+const ClassSpec WasmMemoryObject::classSpec_ = {
+ CreateWasmConstructor<WasmMemoryObject, WasmMemoryName>,
+ CreateWasmMemoryPrototype,
+ WasmMemoryObject::static_methods,
+ nullptr,
+ WasmMemoryObject::methods,
+ WasmMemoryObject::properties,
+ nullptr,
+ ClassSpec::DontDefineConstructor};
+
+/* static */
+void WasmMemoryObject::finalize(JS::GCContext* gcx, JSObject* obj) {
+ WasmMemoryObject& memory = obj->as<WasmMemoryObject>();
+ if (memory.hasObservers()) {
+ gcx->delete_(obj, &memory.observers(), MemoryUse::WasmMemoryObservers);
+ }
+}
+
+/* static */
+WasmMemoryObject* WasmMemoryObject::create(
+ JSContext* cx, HandleArrayBufferObjectMaybeShared buffer, bool isHuge,
+ HandleObject proto) {
+ AutoSetNewObjectMetadata metadata(cx);
+ auto* obj = NewObjectWithGivenProto<WasmMemoryObject>(cx, proto);
+ if (!obj) {
+ return nullptr;
+ }
+
+ obj->initReservedSlot(BUFFER_SLOT, ObjectValue(*buffer));
+ obj->initReservedSlot(ISHUGE_SLOT, BooleanValue(isHuge));
+ MOZ_ASSERT(!obj->hasObservers());
+
+ return obj;
+}
+
+/* static */
+bool WasmMemoryObject::construct(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ if (!ThrowIfNotConstructing(cx, args, "Memory")) {
+ return false;
+ }
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Memory", 1)) {
+ return false;
+ }
+
+ if (!args.get(0).isObject()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_DESC_ARG, "memory");
+ return false;
+ }
+
+ RootedObject obj(cx, &args[0].toObject());
+ Limits limits;
+ if (!GetLimits(cx, obj, LimitsKind::Memory, &limits) ||
+ !CheckLimits(cx, MaxMemoryLimitField(limits.indexType),
+ LimitsKind::Memory, &limits)) {
+ return false;
+ }
+
+ if (Pages(limits.initial) > MaxMemoryPages(limits.indexType)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_MEM_IMP_LIMIT);
+ return false;
+ }
+ MemoryDesc memory(limits);
+
+ RootedArrayBufferObjectMaybeShared buffer(cx);
+ if (!CreateWasmBuffer(cx, memory, &buffer)) {
+ return false;
+ }
+
+ RootedObject proto(cx,
+ GetWasmConstructorPrototype(cx, args, JSProto_WasmMemory));
+ if (!proto) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ Rooted<WasmMemoryObject*> memoryObj(
+ cx, WasmMemoryObject::create(
+ cx, buffer, IsHugeMemoryEnabled(limits.indexType), proto));
+ if (!memoryObj) {
+ return false;
+ }
+
+ args.rval().setObject(*memoryObj);
+ return true;
+}
+
+static bool IsMemory(HandleValue v) {
+ return v.isObject() && v.toObject().is<WasmMemoryObject>();
+}
+
+/* static */
+bool WasmMemoryObject::bufferGetterImpl(JSContext* cx, const CallArgs& args) {
+ Rooted<WasmMemoryObject*> memoryObj(
+ cx, &args.thisv().toObject().as<WasmMemoryObject>());
+ RootedArrayBufferObjectMaybeShared buffer(cx, &memoryObj->buffer());
+
+ if (memoryObj->isShared()) {
+ size_t memoryLength = memoryObj->volatileMemoryLength();
+ MOZ_ASSERT(memoryLength >= buffer->byteLength());
+
+ if (memoryLength > buffer->byteLength()) {
+ RootedSharedArrayBufferObject newBuffer(
+ cx, SharedArrayBufferObject::New(
+ cx, memoryObj->sharedArrayRawBuffer(), memoryLength));
+ if (!newBuffer) {
+ return false;
+ }
+ // OK to addReference after we try to allocate because the memoryObj
+ // keeps the rawBuffer alive.
+ if (!memoryObj->sharedArrayRawBuffer()->addReference()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_SC_SAB_REFCNT_OFLO);
+ return false;
+ }
+ buffer = newBuffer;
+ memoryObj->setReservedSlot(BUFFER_SLOT, ObjectValue(*newBuffer));
+ }
+ }
+
+ args.rval().setObject(*buffer);
+ return true;
+}
+
+/* static */
+bool WasmMemoryObject::bufferGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsMemory, bufferGetterImpl>(cx, args);
+}
+
+const JSPropertySpec WasmMemoryObject::properties[] = {
+ JS_PSG("buffer", WasmMemoryObject::bufferGetter, JSPROP_ENUMERATE),
+ JS_STRING_SYM_PS(toStringTag, "WebAssembly.Memory", JSPROP_READONLY),
+ JS_PS_END};
+
+/* static */
+bool WasmMemoryObject::growImpl(JSContext* cx, const CallArgs& args) {
+ Rooted<WasmMemoryObject*> memory(
+ cx, &args.thisv().toObject().as<WasmMemoryObject>());
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Memory.grow", 1)) {
+ return false;
+ }
+
+ uint32_t delta;
+ if (!EnforceRangeU32(cx, args.get(0), "Memory", "grow delta", &delta)) {
+ return false;
+ }
+
+ uint32_t ret = grow(memory, delta, cx);
+
+ if (ret == uint32_t(-1)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_GROW,
+ "memory");
+ return false;
+ }
+
+ args.rval().setInt32(int32_t(ret));
+ return true;
+}
+
+/* static */
+bool WasmMemoryObject::grow(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsMemory, growImpl>(cx, args);
+}
+
+/* static */
+bool WasmMemoryObject::discardImpl(JSContext* cx, const CallArgs& args) {
+ Rooted<WasmMemoryObject*> memory(
+ cx, &args.thisv().toObject().as<WasmMemoryObject>());
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Memory.discard", 2)) {
+ return false;
+ }
+
+ uint64_t byteOffset;
+ if (!EnforceRangeU64(cx, args.get(0), "Memory", "byte offset", &byteOffset)) {
+ return false;
+ }
+
+ uint64_t byteLen;
+ if (!EnforceRangeU64(cx, args.get(1), "Memory", "length", &byteLen)) {
+ return false;
+ }
+
+ if (byteOffset % wasm::PageSize != 0 || byteLen % wasm::PageSize != 0) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_UNALIGNED_ACCESS);
+ return false;
+ }
+
+ if (!wasm::MemoryBoundsCheck(byteOffset, byteLen,
+ memory->volatileMemoryLength())) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_OUT_OF_BOUNDS);
+ return false;
+ }
+
+ discard(memory, byteOffset, byteLen, cx);
+
+ args.rval().setUndefined();
+ return true;
+}
+
+/* static */
+bool WasmMemoryObject::discard(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsMemory, discardImpl>(cx, args);
+}
+
+const JSFunctionSpec WasmMemoryObject::methods[] = {
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+ JS_FN("type", WasmMemoryObject::type, 0, JSPROP_ENUMERATE),
+#endif
+ JS_FN("grow", WasmMemoryObject::grow, 1, JSPROP_ENUMERATE), JS_FS_END};
+
+const JSFunctionSpec WasmMemoryObject::memoryControlMethods[] = {
+ JS_FN("discard", WasmMemoryObject::discard, 2, JSPROP_ENUMERATE),
+ JS_FS_END};
+
+const JSFunctionSpec WasmMemoryObject::static_methods[] = {JS_FS_END};
+
+ArrayBufferObjectMaybeShared& WasmMemoryObject::buffer() const {
+ return getReservedSlot(BUFFER_SLOT)
+ .toObject()
+ .as<ArrayBufferObjectMaybeShared>();
+}
+
+WasmSharedArrayRawBuffer* WasmMemoryObject::sharedArrayRawBuffer() const {
+ MOZ_ASSERT(isShared());
+ return buffer().as<SharedArrayBufferObject>().rawWasmBufferObject();
+}
+
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+bool WasmMemoryObject::typeImpl(JSContext* cx, const CallArgs& args) {
+ Rooted<WasmMemoryObject*> memoryObj(
+ cx, &args.thisv().toObject().as<WasmMemoryObject>());
+ RootedObject typeObj(
+ cx, MemoryTypeToObject(cx, memoryObj->isShared(), memoryObj->indexType(),
+ memoryObj->volatilePages(),
+ memoryObj->sourceMaxPages()));
+ if (!typeObj) {
+ return false;
+ }
+ args.rval().setObject(*typeObj);
+ return true;
+}
+
+bool WasmMemoryObject::type(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsMemory, typeImpl>(cx, args);
+}
+#endif
+
+size_t WasmMemoryObject::volatileMemoryLength() const {
+ if (isShared()) {
+ return sharedArrayRawBuffer()->volatileByteLength();
+ }
+ return buffer().byteLength();
+}
+
+wasm::Pages WasmMemoryObject::volatilePages() const {
+ if (isShared()) {
+ return sharedArrayRawBuffer()->volatileWasmPages();
+ }
+ return buffer().wasmPages();
+}
+
+wasm::Pages WasmMemoryObject::clampedMaxPages() const {
+ if (isShared()) {
+ return sharedArrayRawBuffer()->wasmClampedMaxPages();
+ }
+ return buffer().wasmClampedMaxPages();
+}
+
+Maybe<wasm::Pages> WasmMemoryObject::sourceMaxPages() const {
+ if (isShared()) {
+ return Some(sharedArrayRawBuffer()->wasmSourceMaxPages());
+ }
+ return buffer().wasmSourceMaxPages();
+}
+
+wasm::IndexType WasmMemoryObject::indexType() const {
+ if (isShared()) {
+ return sharedArrayRawBuffer()->wasmIndexType();
+ }
+ return buffer().wasmIndexType();
+}
+
+bool WasmMemoryObject::isShared() const {
+ return buffer().is<SharedArrayBufferObject>();
+}
+
+bool WasmMemoryObject::hasObservers() const {
+ return !getReservedSlot(OBSERVERS_SLOT).isUndefined();
+}
+
+WasmMemoryObject::InstanceSet& WasmMemoryObject::observers() const {
+ MOZ_ASSERT(hasObservers());
+ return *reinterpret_cast<InstanceSet*>(
+ getReservedSlot(OBSERVERS_SLOT).toPrivate());
+}
+
+WasmMemoryObject::InstanceSet* WasmMemoryObject::getOrCreateObservers(
+ JSContext* cx) {
+ if (!hasObservers()) {
+ auto observers = MakeUnique<InstanceSet>(cx->zone(), cx->zone());
+ if (!observers) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ InitReservedSlot(this, OBSERVERS_SLOT, observers.release(),
+ MemoryUse::WasmMemoryObservers);
+ }
+
+ return &observers();
+}
+
+bool WasmMemoryObject::isHuge() const {
+ return getReservedSlot(ISHUGE_SLOT).toBoolean();
+}
+
+bool WasmMemoryObject::movingGrowable() const {
+ return !isHuge() && !buffer().wasmSourceMaxPages();
+}
+
+size_t WasmMemoryObject::boundsCheckLimit() const {
+ if (!buffer().isWasm() || isHuge()) {
+ return buffer().byteLength();
+ }
+ size_t mappedSize = buffer().wasmMappedSize();
+#if !defined(JS_64BIT)
+ // See clamping performed in CreateSpecificWasmBuffer(). On 32-bit systems
+ // we do not want to overflow a uint32_t. For the other 64-bit compilers,
+ // all constraints are implied by the largest accepted value for a memory's
+ // max field.
+ MOZ_ASSERT(mappedSize < UINT32_MAX);
+#endif
+ MOZ_ASSERT(mappedSize % wasm::PageSize == 0);
+ MOZ_ASSERT(mappedSize >= wasm::GuardSize);
+ MOZ_ASSERT(wasm::IsValidBoundsCheckImmediate(mappedSize - wasm::GuardSize));
+ size_t limit = mappedSize - wasm::GuardSize;
+ MOZ_ASSERT(limit <= MaxMemoryBoundsCheckLimit(indexType()));
+ return limit;
+}
+
+bool WasmMemoryObject::addMovingGrowObserver(JSContext* cx,
+ WasmInstanceObject* instance) {
+ MOZ_ASSERT(movingGrowable());
+
+ InstanceSet* observers = getOrCreateObservers(cx);
+ if (!observers) {
+ return false;
+ }
+
+ if (!observers->putNew(instance)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+/* static */
+uint64_t WasmMemoryObject::growShared(Handle<WasmMemoryObject*> memory,
+ uint64_t delta) {
+ WasmSharedArrayRawBuffer* rawBuf = memory->sharedArrayRawBuffer();
+ WasmSharedArrayRawBuffer::Lock lock(rawBuf);
+
+ Pages oldNumPages = rawBuf->volatileWasmPages();
+ Pages newPages = oldNumPages;
+ if (!newPages.checkedIncrement(Pages(delta))) {
+ return uint64_t(int64_t(-1));
+ }
+
+ if (!rawBuf->wasmGrowToPagesInPlace(lock, memory->indexType(), newPages)) {
+ return uint64_t(int64_t(-1));
+ }
+ // New buffer objects will be created lazily in all agents (including in
+ // this agent) by bufferGetterImpl, above, so no more work to do here.
+
+ return oldNumPages.value();
+}
+
+/* static */
+uint64_t WasmMemoryObject::grow(Handle<WasmMemoryObject*> memory,
+ uint64_t delta, JSContext* cx) {
+ if (memory->isShared()) {
+ return growShared(memory, delta);
+ }
+
+ RootedArrayBufferObject oldBuf(cx, &memory->buffer().as<ArrayBufferObject>());
+
+#if !defined(JS_64BIT)
+ // TODO (large ArrayBuffer): See more information at the definition of
+ // MaxMemoryBytes().
+ MOZ_ASSERT(MaxMemoryBytes(memory->indexType()) <= UINT32_MAX,
+ "Avoid 32-bit overflows");
+#endif
+
+ Pages oldNumPages = oldBuf->wasmPages();
+ Pages newPages = oldNumPages;
+ if (!newPages.checkedIncrement(Pages(delta))) {
+ return uint64_t(int64_t(-1));
+ }
+
+ RootedArrayBufferObject newBuf(cx);
+
+ if (memory->movingGrowable()) {
+ MOZ_ASSERT(!memory->isHuge());
+ if (!ArrayBufferObject::wasmMovingGrowToPages(memory->indexType(), newPages,
+ oldBuf, &newBuf, cx)) {
+ return uint64_t(int64_t(-1));
+ }
+ } else if (!ArrayBufferObject::wasmGrowToPagesInPlace(
+ memory->indexType(), newPages, oldBuf, &newBuf, cx)) {
+ return uint64_t(int64_t(-1));
+ }
+
+ memory->setReservedSlot(BUFFER_SLOT, ObjectValue(*newBuf));
+
+ // Only notify moving-grow-observers after the BUFFER_SLOT has been updated
+ // since observers will call buffer().
+ if (memory->hasObservers()) {
+ for (InstanceSet::Range r = memory->observers().all(); !r.empty();
+ r.popFront()) {
+ r.front()->instance().onMovingGrowMemory();
+ }
+ }
+
+ return oldNumPages.value();
+}
+
+/* static */
+void WasmMemoryObject::discard(Handle<WasmMemoryObject*> memory,
+ uint64_t byteOffset, uint64_t byteLen,
+ JSContext* cx) {
+ if (memory->isShared()) {
+ RootedSharedArrayBufferObject buf(
+ cx, &memory->buffer().as<SharedArrayBufferObject>());
+ SharedArrayBufferObject::wasmDiscard(buf, byteOffset, byteLen);
+ } else {
+ RootedArrayBufferObject buf(cx, &memory->buffer().as<ArrayBufferObject>());
+ ArrayBufferObject::wasmDiscard(buf, byteOffset, byteLen);
+ }
+}
+
+bool js::wasm::IsSharedWasmMemoryObject(JSObject* obj) {
+ WasmMemoryObject* mobj = obj->maybeUnwrapIf<WasmMemoryObject>();
+ return mobj && mobj->isShared();
+}
+
+// ============================================================================
+// WebAssembly.Table class and methods
+
+const JSClassOps WasmTableObject::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ WasmTableObject::finalize, // finalize
+ nullptr, // call
+ nullptr, // construct
+ WasmTableObject::trace, // trace
+};
+
+const JSClass WasmTableObject::class_ = {
+ "WebAssembly.Table",
+ JSCLASS_DELAY_METADATA_BUILDER |
+ JSCLASS_HAS_RESERVED_SLOTS(WasmTableObject::RESERVED_SLOTS) |
+ JSCLASS_FOREGROUND_FINALIZE,
+ &WasmTableObject::classOps_, &WasmTableObject::classSpec_};
+
+const JSClass& WasmTableObject::protoClass_ = PlainObject::class_;
+
+static constexpr char WasmTableName[] = "Table";
+
+const ClassSpec WasmTableObject::classSpec_ = {
+ CreateWasmConstructor<WasmTableObject, WasmTableName>,
+ GenericCreatePrototype<WasmTableObject>,
+ WasmTableObject::static_methods,
+ nullptr,
+ WasmTableObject::methods,
+ WasmTableObject::properties,
+ nullptr,
+ ClassSpec::DontDefineConstructor};
+
+bool WasmTableObject::isNewborn() const {
+ MOZ_ASSERT(is<WasmTableObject>());
+ return getReservedSlot(TABLE_SLOT).isUndefined();
+}
+
+/* static */
+void WasmTableObject::finalize(JS::GCContext* gcx, JSObject* obj) {
+ WasmTableObject& tableObj = obj->as<WasmTableObject>();
+ if (!tableObj.isNewborn()) {
+ auto& table = tableObj.table();
+ gcx->release(obj, &table, table.gcMallocBytes(), MemoryUse::WasmTableTable);
+ }
+}
+
+/* static */
+void WasmTableObject::trace(JSTracer* trc, JSObject* obj) {
+ WasmTableObject& tableObj = obj->as<WasmTableObject>();
+ if (!tableObj.isNewborn()) {
+ tableObj.table().tracePrivate(trc);
+ }
+}
+
+// Return the JS value to use when a parameter to a function requiring a table
+// value is omitted. An implementation of [1].
+//
+// [1]
+// https://webassembly.github.io/reference-types/js-api/index.html#defaultvalue
+static Value RefTypeDefautValue(wasm::RefType tableType) {
+ return tableType.isExtern() ? UndefinedValue() : NullValue();
+}
+
+static bool CheckRefTypeValue(JSContext* cx, wasm::RefType type,
+ HandleValue value) {
+ RootedFunction fun(cx);
+ RootedAnyRef any(cx, AnyRef::null());
+
+ return CheckRefType(cx, type, value, &fun, &any);
+}
+
+/* static */
+WasmTableObject* WasmTableObject::create(JSContext* cx, uint32_t initialLength,
+ Maybe<uint32_t> maximumLength,
+ wasm::RefType tableType,
+ HandleObject proto) {
+ AutoSetNewObjectMetadata metadata(cx);
+ Rooted<WasmTableObject*> obj(
+ cx, NewObjectWithGivenProto<WasmTableObject>(cx, proto));
+ if (!obj) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(obj->isNewborn());
+
+ TableDesc td(tableType, initialLength, maximumLength, Nothing(),
+ /*isAsmJS*/ false,
+ /*isImported=*/true, /*isExported=*/true);
+
+ SharedTable table = Table::create(cx, td, obj);
+ if (!table) {
+ return nullptr;
+ }
+
+ size_t size = table->gcMallocBytes();
+ InitReservedSlot(obj, TABLE_SLOT, table.forget().take(), size,
+ MemoryUse::WasmTableTable);
+
+ MOZ_ASSERT(!obj->isNewborn());
+ return obj;
+}
+
+/* static */
+bool WasmTableObject::construct(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ if (!ThrowIfNotConstructing(cx, args, "Table")) {
+ return false;
+ }
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Table", 1)) {
+ return false;
+ }
+
+ if (!args.get(0).isObject()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_DESC_ARG, "table");
+ return false;
+ }
+
+ RootedObject obj(cx, &args[0].toObject());
+
+ JSAtom* elementAtom = Atomize(cx, "element", strlen("element"));
+ if (!elementAtom) {
+ return false;
+ }
+ RootedId elementId(cx, AtomToId(elementAtom));
+
+ RootedValue elementVal(cx);
+ if (!GetProperty(cx, obj, obj, elementId, &elementVal)) {
+ return false;
+ }
+
+ RefType tableType;
+ if (!ToRefType(cx, elementVal, &tableType)) {
+ return false;
+ }
+
+ Limits limits;
+ if (!GetLimits(cx, obj, LimitsKind::Table, &limits) ||
+ !CheckLimits(cx, MaxTableLimitField, LimitsKind::Table, &limits)) {
+ return false;
+ }
+
+ // Converting limits for a table only supports i32
+ MOZ_ASSERT(limits.indexType == IndexType::I32);
+
+ if (limits.initial > MaxTableLength) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_TABLE_IMP_LIMIT);
+ return false;
+ }
+
+ RootedObject proto(cx,
+ GetWasmConstructorPrototype(cx, args, JSProto_WasmTable));
+ if (!proto) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // The rest of the runtime expects table limits to be within a 32-bit range.
+ static_assert(MaxTableLimitField <= UINT32_MAX, "invariant");
+ uint32_t initialLength = uint32_t(limits.initial);
+ Maybe<uint32_t> maximumLength;
+ if (limits.maximum) {
+ maximumLength = Some(uint32_t(*limits.maximum));
+ }
+
+ Rooted<WasmTableObject*> table(
+ cx, WasmTableObject::create(cx, initialLength, maximumLength, tableType,
+ proto));
+ if (!table) {
+ return false;
+ }
+
+ // Initialize the table to a default value
+ RootedValue initValue(
+ cx, args.length() < 2 ? RefTypeDefautValue(tableType) : args[1]);
+ if (!CheckRefTypeValue(cx, tableType, initValue)) {
+ return false;
+ }
+
+ // Skip initializing the table if the fill value is null, as that is the
+ // default value.
+ if (!initValue.isNull() &&
+ !table->fillRange(cx, 0, initialLength, initValue)) {
+ return false;
+ }
+#ifdef DEBUG
+ // Assert that null is the default value of a new table.
+ if (initValue.isNull()) {
+ table->table().assertRangeNull(0, initialLength);
+ }
+ if (!tableType.isNullable()) {
+ table->table().assertRangeNotNull(0, initialLength);
+ }
+#endif
+
+ args.rval().setObject(*table);
+ return true;
+}
+
+static bool IsTable(HandleValue v) {
+ return v.isObject() && v.toObject().is<WasmTableObject>();
+}
+
+/* static */
+bool WasmTableObject::lengthGetterImpl(JSContext* cx, const CallArgs& args) {
+ args.rval().setNumber(
+ args.thisv().toObject().as<WasmTableObject>().table().length());
+ return true;
+}
+
+/* static */
+bool WasmTableObject::lengthGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsTable, lengthGetterImpl>(cx, args);
+}
+
+const JSPropertySpec WasmTableObject::properties[] = {
+ JS_PSG("length", WasmTableObject::lengthGetter, JSPROP_ENUMERATE),
+ JS_STRING_SYM_PS(toStringTag, "WebAssembly.Table", JSPROP_READONLY),
+ JS_PS_END};
+
+static bool ToTableIndex(JSContext* cx, HandleValue v, const Table& table,
+ const char* noun, uint32_t* index) {
+ if (!EnforceRangeU32(cx, v, "Table", noun, index)) {
+ return false;
+ }
+
+ if (*index >= table.length()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_RANGE, "Table", noun);
+ return false;
+ }
+
+ return true;
+}
+
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+/* static */
+bool WasmTableObject::typeImpl(JSContext* cx, const CallArgs& args) {
+ Table& table = args.thisv().toObject().as<WasmTableObject>().table();
+ RootedObject typeObj(cx, TableTypeToObject(cx, table.elemType(),
+ table.length(), table.maximum()));
+ if (!typeObj) {
+ return false;
+ }
+ args.rval().setObject(*typeObj);
+ return true;
+}
+
+/* static */
+bool WasmTableObject::type(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsTable, typeImpl>(cx, args);
+}
+#endif
+
+/* static */
+bool WasmTableObject::getImpl(JSContext* cx, const CallArgs& args) {
+ Rooted<WasmTableObject*> tableObj(
+ cx, &args.thisv().toObject().as<WasmTableObject>());
+ const Table& table = tableObj->table();
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Table.get", 1)) {
+ return false;
+ }
+
+ uint32_t index;
+ if (!ToTableIndex(cx, args.get(0), table, "get index", &index)) {
+ return false;
+ }
+
+ return table.getValue(cx, index, args.rval());
+}
+
+/* static */
+bool WasmTableObject::get(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsTable, getImpl>(cx, args);
+}
+
+/* static */
+bool WasmTableObject::setImpl(JSContext* cx, const CallArgs& args) {
+ Rooted<WasmTableObject*> tableObj(
+ cx, &args.thisv().toObject().as<WasmTableObject>());
+ Table& table = tableObj->table();
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Table.set", 1)) {
+ return false;
+ }
+
+ uint32_t index;
+ if (!ToTableIndex(cx, args.get(0), table, "set index", &index)) {
+ return false;
+ }
+
+ RootedValue fillValue(
+ cx, args.length() < 2 ? RefTypeDefautValue(table.elemType()) : args[1]);
+ if (!tableObj->fillRange(cx, index, 1, fillValue)) {
+ return false;
+ }
+
+ args.rval().setUndefined();
+ return true;
+}
+
+/* static */
+bool WasmTableObject::set(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsTable, setImpl>(cx, args);
+}
+
+/* static */
+bool WasmTableObject::growImpl(JSContext* cx, const CallArgs& args) {
+ Rooted<WasmTableObject*> tableObj(
+ cx, &args.thisv().toObject().as<WasmTableObject>());
+ Table& table = tableObj->table();
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Table.grow", 1)) {
+ return false;
+ }
+
+ uint32_t delta;
+ if (!EnforceRangeU32(cx, args.get(0), "Table", "grow delta", &delta)) {
+ return false;
+ }
+
+ RootedValue fillValue(
+ cx, args.length() < 2 ? RefTypeDefautValue(table.elemType()) : args[1]);
+ if (!CheckRefTypeValue(cx, table.elemType(), fillValue)) {
+ return false;
+ }
+
+ uint32_t oldLength = table.grow(delta);
+
+ if (oldLength == uint32_t(-1)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_GROW,
+ "table");
+ return false;
+ }
+
+ // Skip filling the grown range of the table if the fill value is null, as
+ // that is the default value.
+ if (!fillValue.isNull() &&
+ !tableObj->fillRange(cx, oldLength, delta, fillValue)) {
+ return false;
+ }
+#ifdef DEBUG
+ // Assert that null is the default value of the grown range.
+ if (fillValue.isNull()) {
+ table.assertRangeNull(oldLength, delta);
+ }
+ if (!table.elemType().isNullable()) {
+ table.assertRangeNotNull(oldLength, delta);
+ }
+#endif
+
+ args.rval().setInt32(int32_t(oldLength));
+ return true;
+}
+
+/* static */
+bool WasmTableObject::grow(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsTable, growImpl>(cx, args);
+}
+
+const JSFunctionSpec WasmTableObject::methods[] = {
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+ JS_FN("type", WasmTableObject::type, 0, JSPROP_ENUMERATE),
+#endif
+ JS_FN("get", WasmTableObject::get, 1, JSPROP_ENUMERATE),
+ JS_FN("set", WasmTableObject::set, 2, JSPROP_ENUMERATE),
+ JS_FN("grow", WasmTableObject::grow, 1, JSPROP_ENUMERATE), JS_FS_END};
+
+const JSFunctionSpec WasmTableObject::static_methods[] = {JS_FS_END};
+
+Table& WasmTableObject::table() const {
+ return *(Table*)getReservedSlot(TABLE_SLOT).toPrivate();
+}
+
+bool WasmTableObject::fillRange(JSContext* cx, uint32_t index, uint32_t length,
+ HandleValue value) const {
+ Table& tab = table();
+
+ // All consumers are required to either bounds check or statically be in
+ // bounds
+ MOZ_ASSERT(uint64_t(index) + uint64_t(length) <= tab.length());
+
+ RootedFunction fun(cx);
+ RootedAnyRef any(cx, AnyRef::null());
+ if (!CheckRefType(cx, tab.elemType(), value, &fun, &any)) {
+ return false;
+ }
+ switch (tab.repr()) {
+ case TableRepr::Func:
+ MOZ_RELEASE_ASSERT(!tab.isAsmJS());
+ tab.fillFuncRef(index, length, FuncRef::fromJSFunction(fun), cx);
+ break;
+ case TableRepr::Ref:
+ tab.fillAnyRef(index, length, any);
+ break;
+ }
+ return true;
+}
+
+// ============================================================================
+// WebAssembly.global class and methods
+
+const JSClassOps WasmGlobalObject::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ WasmGlobalObject::finalize, // finalize
+ nullptr, // call
+ nullptr, // construct
+ WasmGlobalObject::trace, // trace
+};
+
+const JSClass WasmGlobalObject::class_ = {
+ "WebAssembly.Global",
+ JSCLASS_HAS_RESERVED_SLOTS(WasmGlobalObject::RESERVED_SLOTS) |
+ JSCLASS_BACKGROUND_FINALIZE,
+ &WasmGlobalObject::classOps_, &WasmGlobalObject::classSpec_};
+
+const JSClass& WasmGlobalObject::protoClass_ = PlainObject::class_;
+
+static constexpr char WasmGlobalName[] = "Global";
+
+const ClassSpec WasmGlobalObject::classSpec_ = {
+ CreateWasmConstructor<WasmGlobalObject, WasmGlobalName>,
+ GenericCreatePrototype<WasmGlobalObject>,
+ WasmGlobalObject::static_methods,
+ nullptr,
+ WasmGlobalObject::methods,
+ WasmGlobalObject::properties,
+ nullptr,
+ ClassSpec::DontDefineConstructor};
+
+/* static */
+void WasmGlobalObject::trace(JSTracer* trc, JSObject* obj) {
+ WasmGlobalObject* global = reinterpret_cast<WasmGlobalObject*>(obj);
+ if (global->isNewborn()) {
+ // This can happen while we're allocating the object, in which case
+ // every single slot of the object is not defined yet. In particular,
+ // there's nothing to trace yet.
+ return;
+ }
+ global->val().get().trace(trc);
+}
+
+/* static */
+void WasmGlobalObject::finalize(JS::GCContext* gcx, JSObject* obj) {
+ WasmGlobalObject* global = reinterpret_cast<WasmGlobalObject*>(obj);
+ if (!global->isNewborn()) {
+ gcx->delete_(obj, &global->val(), MemoryUse::WasmGlobalCell);
+ }
+}
+
+/* static */
+WasmGlobalObject* WasmGlobalObject::create(JSContext* cx, HandleVal value,
+ bool isMutable, HandleObject proto) {
+ Rooted<WasmGlobalObject*> obj(
+ cx, NewObjectWithGivenProto<WasmGlobalObject>(cx, proto));
+ if (!obj) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(obj->isNewborn());
+ MOZ_ASSERT(obj->isTenured(), "assumed by global.set post barriers");
+
+ GCPtrVal* val = js_new<GCPtrVal>(Val());
+ if (!val) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+ obj->initReservedSlot(MUTABLE_SLOT, JS::BooleanValue(isMutable));
+ InitReservedSlot(obj, VAL_SLOT, val, MemoryUse::WasmGlobalCell);
+
+ // It's simpler to initialize the cell after the object has been created,
+ // to avoid needing to root the cell before the object creation.
+ obj->val() = value.get();
+
+ MOZ_ASSERT(!obj->isNewborn());
+
+ return obj;
+}
+
+/* static */
+bool WasmGlobalObject::construct(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ if (!ThrowIfNotConstructing(cx, args, "Global")) {
+ return false;
+ }
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Global", 1)) {
+ return false;
+ }
+
+ if (!args.get(0).isObject()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_DESC_ARG, "global");
+ return false;
+ }
+
+ RootedObject obj(cx, &args[0].toObject());
+
+ // Extract properties in lexicographic order per spec.
+
+ RootedValue mutableVal(cx);
+ if (!JS_GetProperty(cx, obj, "mutable", &mutableVal)) {
+ return false;
+ }
+
+ RootedValue typeVal(cx);
+ if (!JS_GetProperty(cx, obj, "value", &typeVal)) {
+ return false;
+ }
+
+ ValType globalType;
+ if (!ToValType(cx, typeVal, &globalType)) {
+ return false;
+ }
+
+ if (!globalType.isExposable()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_VAL_TYPE);
+ return false;
+ }
+
+ bool isMutable = ToBoolean(mutableVal);
+
+ // Extract the initial value, or provide a suitable default.
+ RootedVal globalVal(cx, globalType);
+
+ // Override with non-undefined value, if provided.
+ RootedValue valueVal(cx);
+ if (globalType.isRefType()) {
+ valueVal.set(args.length() < 2 ? RefTypeDefautValue(globalType.refType())
+ : args[1]);
+ if (!Val::fromJSValue(cx, globalType, valueVal, &globalVal)) {
+ return false;
+ }
+ } else {
+ valueVal.set(args.get(1));
+ if (!valueVal.isUndefined() &&
+ !Val::fromJSValue(cx, globalType, valueVal, &globalVal)) {
+ return false;
+ }
+ }
+
+ RootedObject proto(cx,
+ GetWasmConstructorPrototype(cx, args, JSProto_WasmGlobal));
+ if (!proto) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ WasmGlobalObject* global =
+ WasmGlobalObject::create(cx, globalVal, isMutable, proto);
+ if (!global) {
+ return false;
+ }
+
+ args.rval().setObject(*global);
+ return true;
+}
+
+static bool IsGlobal(HandleValue v) {
+ return v.isObject() && v.toObject().is<WasmGlobalObject>();
+}
+
+/* static */
+bool WasmGlobalObject::valueGetterImpl(JSContext* cx, const CallArgs& args) {
+ const WasmGlobalObject& globalObj =
+ args.thisv().toObject().as<WasmGlobalObject>();
+ if (!globalObj.type().isExposable()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_VAL_TYPE);
+ return false;
+ }
+ return globalObj.val().get().toJSValue(cx, args.rval());
+}
+
+/* static */
+bool WasmGlobalObject::valueGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsGlobal, valueGetterImpl>(cx, args);
+}
+
+/* static */
+bool WasmGlobalObject::valueSetterImpl(JSContext* cx, const CallArgs& args) {
+ if (!args.requireAtLeast(cx, "WebAssembly.Global setter", 1)) {
+ return false;
+ }
+
+ Rooted<WasmGlobalObject*> global(
+ cx, &args.thisv().toObject().as<WasmGlobalObject>());
+ if (!global->isMutable()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_GLOBAL_IMMUTABLE);
+ return false;
+ }
+
+ RootedVal val(cx);
+ if (!Val::fromJSValue(cx, global->type(), args.get(0), &val)) {
+ return false;
+ }
+ global->val() = val.get();
+
+ args.rval().setUndefined();
+ return true;
+}
+
+/* static */
+bool WasmGlobalObject::valueSetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsGlobal, valueSetterImpl>(cx, args);
+}
+
+const JSPropertySpec WasmGlobalObject::properties[] = {
+ JS_PSGS("value", WasmGlobalObject::valueGetter,
+ WasmGlobalObject::valueSetter, JSPROP_ENUMERATE),
+ JS_STRING_SYM_PS(toStringTag, "WebAssembly.Global", JSPROP_READONLY),
+ JS_PS_END};
+
+const JSFunctionSpec WasmGlobalObject::methods[] = {
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+ JS_FN("type", WasmGlobalObject::type, 0, JSPROP_ENUMERATE),
+#endif
+ JS_FN(js_valueOf_str, WasmGlobalObject::valueGetter, 0, JSPROP_ENUMERATE),
+ JS_FS_END};
+
+const JSFunctionSpec WasmGlobalObject::static_methods[] = {JS_FS_END};
+
+bool WasmGlobalObject::isMutable() const {
+ return getReservedSlot(MUTABLE_SLOT).toBoolean();
+}
+
+ValType WasmGlobalObject::type() const { return val().get().type(); }
+
+GCPtrVal& WasmGlobalObject::val() const {
+ return *reinterpret_cast<GCPtrVal*>(getReservedSlot(VAL_SLOT).toPrivate());
+}
+
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+/* static */
+bool WasmGlobalObject::typeImpl(JSContext* cx, const CallArgs& args) {
+ Rooted<WasmGlobalObject*> global(
+ cx, &args.thisv().toObject().as<WasmGlobalObject>());
+ RootedObject typeObj(
+ cx, GlobalTypeToObject(cx, global->type(), global->isMutable()));
+ if (!typeObj) {
+ return false;
+ }
+ args.rval().setObject(*typeObj);
+ return true;
+}
+
+/* static */
+bool WasmGlobalObject::type(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsGlobal, typeImpl>(cx, args);
+}
+#endif
+
+// ============================================================================
+// WebAssembly.Tag class and methods
+
+const JSClassOps WasmTagObject::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ WasmTagObject::finalize, // finalize
+ nullptr, // call
+ nullptr, // construct
+ nullptr, // trace
+};
+
+const JSClass WasmTagObject::class_ = {
+ "WebAssembly.Tag",
+ JSCLASS_HAS_RESERVED_SLOTS(WasmTagObject::RESERVED_SLOTS) |
+ JSCLASS_FOREGROUND_FINALIZE,
+ &WasmTagObject::classOps_, &WasmTagObject::classSpec_};
+
+const JSClass& WasmTagObject::protoClass_ = PlainObject::class_;
+
+static constexpr char WasmTagName[] = "Tag";
+
+const ClassSpec WasmTagObject::classSpec_ = {
+ CreateWasmConstructor<WasmTagObject, WasmTagName>,
+ GenericCreatePrototype<WasmTagObject>,
+ WasmTagObject::static_methods,
+ nullptr,
+ WasmTagObject::methods,
+ WasmTagObject::properties,
+ nullptr,
+ ClassSpec::DontDefineConstructor};
+
+/* static */
+void WasmTagObject::finalize(JS::GCContext* gcx, JSObject* obj) {
+ WasmTagObject& tagObj = obj->as<WasmTagObject>();
+ tagObj.tagType()->Release();
+}
+
+static bool IsTag(HandleValue v) {
+ return v.isObject() && v.toObject().is<WasmTagObject>();
+}
+
+bool WasmTagObject::construct(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ if (!ThrowIfNotConstructing(cx, args, "Tag")) {
+ return false;
+ }
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Tag", 1)) {
+ return false;
+ }
+
+ if (!args.get(0).isObject()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_DESC_ARG, "tag");
+ return false;
+ }
+
+ RootedObject obj(cx, &args[0].toObject());
+ RootedValue paramsVal(cx);
+ if (!JS_GetProperty(cx, obj, "parameters", &paramsVal)) {
+ return false;
+ }
+
+ ValTypeVector params;
+ if (!ParseValTypes(cx, paramsVal, params)) {
+ return false;
+ }
+ wasm::MutableTagType tagType = js_new<wasm::TagType>();
+ if (!tagType || !tagType->initialize(std::move(params))) {
+ return false;
+ }
+
+ RootedObject proto(cx,
+ GetWasmConstructorPrototype(cx, args, JSProto_WasmTag));
+ if (!proto) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ Rooted<WasmTagObject*> tagObj(cx, WasmTagObject::create(cx, tagType, proto));
+ if (!tagObj) {
+ return false;
+ }
+
+ args.rval().setObject(*tagObj);
+ return true;
+}
+
+/* static */
+WasmTagObject* WasmTagObject::create(JSContext* cx,
+ const wasm::SharedTagType& tagType,
+ HandleObject proto) {
+ Rooted<WasmTagObject*> obj(cx,
+ NewObjectWithGivenProto<WasmTagObject>(cx, proto));
+ if (!obj) {
+ return nullptr;
+ }
+
+ tagType.get()->AddRef();
+ obj->initReservedSlot(TYPE_SLOT, PrivateValue((void*)tagType.get()));
+
+ return obj;
+}
+
+const JSPropertySpec WasmTagObject::properties[] = {
+ JS_STRING_SYM_PS(toStringTag, "WebAssembly.Tag", JSPROP_READONLY),
+ JS_PS_END};
+
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+/* static */
+bool WasmTagObject::typeImpl(JSContext* cx, const CallArgs& args) {
+ Rooted<WasmTagObject*> tag(cx, &args.thisv().toObject().as<WasmTagObject>());
+ RootedObject typeObj(cx, TagTypeToObject(cx, tag->valueTypes()));
+ if (!typeObj) {
+ return false;
+ }
+ args.rval().setObject(*typeObj);
+ return true;
+}
+
+/* static */
+bool WasmTagObject::type(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsTag, typeImpl>(cx, args);
+}
+#endif
+
+const JSFunctionSpec WasmTagObject::methods[] = {
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+ JS_FN("type", WasmTagObject::type, 0, JSPROP_ENUMERATE),
+#endif
+ JS_FS_END};
+
+const JSFunctionSpec WasmTagObject::static_methods[] = {JS_FS_END};
+
+const TagType* WasmTagObject::tagType() const {
+ return (const TagType*)getFixedSlot(TYPE_SLOT).toPrivate();
+};
+
+const wasm::ValTypeVector& WasmTagObject::valueTypes() const {
+ return tagType()->argTypes_;
+};
+
+wasm::ResultType WasmTagObject::resultType() const {
+ return wasm::ResultType::Vector(valueTypes());
+}
+
+// ============================================================================
+// WebAssembly.Exception class and methods
+
+const JSClassOps WasmExceptionObject::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ WasmExceptionObject::finalize, // finalize
+ nullptr, // call
+ nullptr, // construct
+ WasmExceptionObject::trace, // trace
+};
+
+const JSClass WasmExceptionObject::class_ = {
+ "WebAssembly.Exception",
+ JSCLASS_HAS_RESERVED_SLOTS(WasmExceptionObject::RESERVED_SLOTS) |
+ JSCLASS_FOREGROUND_FINALIZE,
+ &WasmExceptionObject::classOps_, &WasmExceptionObject::classSpec_};
+
+const JSClass& WasmExceptionObject::protoClass_ = PlainObject::class_;
+
+static constexpr char WasmExceptionName[] = "Exception";
+
+const ClassSpec WasmExceptionObject::classSpec_ = {
+ CreateWasmConstructor<WasmExceptionObject, WasmExceptionName>,
+ GenericCreatePrototype<WasmExceptionObject>,
+ WasmExceptionObject::static_methods,
+ nullptr,
+ WasmExceptionObject::methods,
+ WasmExceptionObject::properties,
+ nullptr,
+ ClassSpec::DontDefineConstructor};
+
+/* static */
+void WasmExceptionObject::finalize(JS::GCContext* gcx, JSObject* obj) {
+ WasmExceptionObject& exnObj = obj->as<WasmExceptionObject>();
+ if (exnObj.isNewborn()) {
+ return;
+ }
+ gcx->free_(obj, exnObj.typedMem(), exnObj.tagType()->size_,
+ MemoryUse::WasmExceptionData);
+ exnObj.tagType()->Release();
+}
+
+/* static */
+void WasmExceptionObject::trace(JSTracer* trc, JSObject* obj) {
+ WasmExceptionObject& exnObj = obj->as<WasmExceptionObject>();
+ if (exnObj.isNewborn()) {
+ return;
+ }
+
+ wasm::SharedTagType tag = exnObj.tagType();
+ const wasm::ValTypeVector& params = tag->argTypes_;
+ const wasm::TagOffsetVector& offsets = tag->argOffsets_;
+ uint8_t* typedMem = exnObj.typedMem();
+ for (size_t i = 0; i < params.length(); i++) {
+ ValType paramType = params[i];
+ if (paramType.isRefRepr()) {
+ GCPtr<JSObject*>* objectPtr =
+ reinterpret_cast<GCPtr<JSObject*>*>(typedMem + offsets[i]);
+ TraceNullableEdge(trc, objectPtr, "reference-obj");
+ }
+ }
+}
+
+static bool IsException(HandleValue v) {
+ return v.isObject() && v.toObject().is<WasmExceptionObject>();
+}
+
+struct ExceptionOptions {
+ bool traceStack;
+
+ ExceptionOptions() : traceStack(false) {}
+
+ [[nodiscard]] bool init(JSContext* cx, HandleValue val);
+};
+
+bool ExceptionOptions::init(JSContext* cx, HandleValue val) {
+ if (val.isNullOrUndefined()) {
+ return true;
+ }
+
+ if (!val.isObject()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_EXN_OPTIONS);
+ return false;
+ }
+ RootedObject obj(cx, &val.toObject());
+
+ // Get `traceStack` and coerce to boolean
+ RootedValue traceStackVal(cx);
+ if (!JS_GetProperty(cx, obj, "traceStack", &traceStackVal)) {
+ return false;
+ }
+ traceStack = ToBoolean(traceStackVal);
+
+ return true;
+}
+
+bool WasmExceptionObject::construct(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ if (!ThrowIfNotConstructing(cx, args, "Exception")) {
+ return false;
+ }
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Exception", 2)) {
+ return false;
+ }
+
+ if (!IsTag(args[0])) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_EXN_ARG);
+ return false;
+ }
+ Rooted<WasmTagObject*> exnTag(cx, &args[0].toObject().as<WasmTagObject>());
+
+ if (!args.get(1).isObject()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_EXN_PAYLOAD);
+ return false;
+ }
+
+ JS::ForOfIterator iterator(cx);
+ if (!iterator.init(args.get(1), JS::ForOfIterator::ThrowOnNonIterable)) {
+ return false;
+ }
+
+ // Get the optional 'options' parameter
+ ExceptionOptions options;
+ if (!options.init(cx, args.get(2))) {
+ return false;
+ }
+
+ // Trace the stack if requested
+ RootedObject stack(cx);
+ if (options.traceStack && !CaptureStack(cx, &stack)) {
+ return false;
+ }
+
+ RootedObject proto(
+ cx, GetWasmConstructorPrototype(cx, args, JSProto_WasmException));
+ if (!proto) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ Rooted<WasmExceptionObject*> exnObj(
+ cx, WasmExceptionObject::create(cx, exnTag, stack, proto));
+ if (!exnObj) {
+ return false;
+ }
+
+ wasm::SharedTagType tagType = exnObj->tagType();
+ const wasm::ValTypeVector& params = tagType->argTypes_;
+ const wasm::TagOffsetVector& offsets = tagType->argOffsets_;
+
+ RootedValue nextArg(cx);
+ for (size_t i = 0; i < params.length(); i++) {
+ bool done;
+ if (!iterator.next(&nextArg, &done)) {
+ return false;
+ }
+ if (done) {
+ UniqueChars expected(JS_smprintf("%zu", params.length()));
+ UniqueChars got(JS_smprintf("%zu", i));
+
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_EXN_PAYLOAD_LEN, expected.get(),
+ got.get());
+ return false;
+ }
+
+ if (!exnObj->initValue(cx, offsets[i], params[i], nextArg)) {
+ return false;
+ }
+ }
+
+ args.rval().setObject(*exnObj);
+ return true;
+}
+
+/* static */
+WasmExceptionObject* WasmExceptionObject::create(JSContext* cx,
+ Handle<WasmTagObject*> tag,
+ HandleObject stack,
+ HandleObject proto) {
+ Rooted<WasmExceptionObject*> obj(
+ cx, NewObjectWithGivenProto<WasmExceptionObject>(cx, proto));
+ if (!obj) {
+ return nullptr;
+ }
+ const TagType* tagType = tag->tagType();
+
+ // Allocate the data buffer before initializing the object so that an OOM
+ // does not result in a partially constructed object.
+ uint8_t* data = (uint8_t*)js_calloc(tagType->size_);
+ if (!data) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(obj->isNewborn());
+ obj->initFixedSlot(TAG_SLOT, ObjectValue(*tag));
+ tagType->AddRef();
+ obj->initFixedSlot(TYPE_SLOT, PrivateValue((void*)tagType));
+ InitReservedSlot(obj, DATA_SLOT, data, tagType->size_,
+ MemoryUse::WasmExceptionData);
+ obj->initFixedSlot(STACK_SLOT, ObjectOrNullValue(stack));
+
+ MOZ_ASSERT(!obj->isNewborn());
+
+ return obj;
+}
+
+bool WasmExceptionObject::isNewborn() const {
+ MOZ_ASSERT(is<WasmExceptionObject>());
+ return getReservedSlot(DATA_SLOT).isUndefined();
+}
+
+const JSPropertySpec WasmExceptionObject::properties[] = {
+ JS_PSG("stack", WasmExceptionObject::getStack, 0),
+ JS_STRING_SYM_PS(toStringTag, "WebAssembly.Exception", JSPROP_READONLY),
+ JS_PS_END};
+
+/* static */
+bool WasmExceptionObject::isImpl(JSContext* cx, const CallArgs& args) {
+ Rooted<WasmExceptionObject*> exnObj(
+ cx, &args.thisv().toObject().as<WasmExceptionObject>());
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Exception.is", 1)) {
+ return false;
+ }
+
+ if (!IsTag(args[0])) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_EXN_ARG);
+ return false;
+ }
+
+ Rooted<WasmTagObject*> exnTag(cx,
+ &args.get(0).toObject().as<WasmTagObject>());
+ args.rval().setBoolean(exnTag.get() == &exnObj->tag());
+
+ return true;
+}
+
+/* static */
+bool WasmExceptionObject::isMethod(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsException, isImpl>(cx, args);
+}
+
+/* static */
+bool WasmExceptionObject::getArgImpl(JSContext* cx, const CallArgs& args) {
+ Rooted<WasmExceptionObject*> exnObj(
+ cx, &args.thisv().toObject().as<WasmExceptionObject>());
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Exception.getArg", 2)) {
+ return false;
+ }
+
+ if (!IsTag(args[0])) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_EXN_ARG);
+ return false;
+ }
+
+ Rooted<WasmTagObject*> exnTag(cx,
+ &args.get(0).toObject().as<WasmTagObject>());
+ if (exnTag.get() != &exnObj->tag()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_EXN_TAG);
+ return false;
+ }
+
+ uint32_t index;
+ if (!EnforceRangeU32(cx, args.get(1), "Exception", "getArg index", &index)) {
+ return false;
+ }
+
+ const wasm::ValTypeVector& params = exnTag->valueTypes();
+ if (index >= params.length()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_RANGE,
+ "Exception", "getArg index");
+ return false;
+ }
+
+ uint32_t offset = exnTag->tagType()->argOffsets_[index];
+ RootedValue result(cx);
+ if (!exnObj->loadValue(cx, offset, params[index], &result)) {
+ return false;
+ }
+ args.rval().set(result);
+ return true;
+}
+
+/* static */
+bool WasmExceptionObject::getArg(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsException, getArgImpl>(cx, args);
+}
+
+/* static */
+bool WasmExceptionObject::getStack_impl(JSContext* cx, const CallArgs& args) {
+ Rooted<WasmExceptionObject*> exnObj(
+ cx, &args.thisv().toObject().as<WasmExceptionObject>());
+ RootedObject savedFrameObj(cx, exnObj->stack());
+ if (!savedFrameObj) {
+ args.rval().setUndefined();
+ return true;
+ }
+ JSPrincipals* principals = exnObj->realm()->principals();
+ RootedString stackString(cx);
+ if (!BuildStackString(cx, principals, savedFrameObj, &stackString)) {
+ return false;
+ }
+ args.rval().setString(stackString);
+ return true;
+}
+
+/* static */
+bool WasmExceptionObject::getStack(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsException, getStack_impl>(cx, args);
+}
+
+JSObject* WasmExceptionObject::stack() const {
+ return getReservedSlot(STACK_SLOT).toObjectOrNull();
+}
+
+uint8_t* WasmExceptionObject::typedMem() const {
+ return (uint8_t*)getReservedSlot(DATA_SLOT).toPrivate();
+}
+
+bool WasmExceptionObject::loadValue(JSContext* cx, size_t offset,
+ wasm::ValType type, MutableHandleValue vp) {
+ if (!type.isExposable()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_VAL_TYPE);
+ return false;
+ }
+ return ToJSValue(cx, typedMem() + offset, type, vp);
+}
+
+bool WasmExceptionObject::initValue(JSContext* cx, size_t offset,
+ wasm::ValType type, HandleValue value) {
+ if (!type.isExposable()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_VAL_TYPE);
+ return false;
+ }
+
+ // Avoid rooting hazard of `this` being live across `fromJSValue`
+ // which may GC.
+ uint8_t* dest = typedMem() + offset;
+ RootedVal val(cx);
+ if (!Val::fromJSValue(cx, type, value, &val)) {
+ return false;
+ }
+ val.get().writeToHeapLocation(dest);
+ return true;
+}
+
+const JSFunctionSpec WasmExceptionObject::methods[] = {
+ JS_FN("is", WasmExceptionObject::isMethod, 1, JSPROP_ENUMERATE),
+ JS_FN("getArg", WasmExceptionObject::getArg, 2, JSPROP_ENUMERATE),
+ JS_FS_END};
+
+const JSFunctionSpec WasmExceptionObject::static_methods[] = {JS_FS_END};
+
+const TagType* WasmExceptionObject::tagType() const {
+ return (const TagType*)getReservedSlot(TYPE_SLOT).toPrivate();
+}
+
+WasmTagObject& WasmExceptionObject::tag() const {
+ return getReservedSlot(TAG_SLOT).toObject().as<WasmTagObject>();
+}
+
+// ============================================================================
+// WebAssembly.Function and methods
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+static JSObject* CreateWasmFunctionPrototype(JSContext* cx, JSProtoKey key) {
+ // WasmFunction's prototype should inherit from JSFunction's prototype.
+ RootedObject jsProto(cx, &cx->global()->getFunctionPrototype());
+ return GlobalObject::createBlankPrototypeInheriting(cx, &PlainObject::class_,
+ jsProto);
+}
+
+[[nodiscard]] static bool IsWasmFunction(HandleValue v) {
+ if (!v.isObject()) {
+ return false;
+ }
+ if (!v.toObject().is<JSFunction>()) {
+ return false;
+ }
+ return v.toObject().as<JSFunction>().isWasm();
+}
+
+bool WasmFunctionTypeImpl(JSContext* cx, const CallArgs& args) {
+ RootedFunction function(cx, &args.thisv().toObject().as<JSFunction>());
+ Rooted<WasmInstanceObject*> instanceObj(
+ cx, ExportedFunctionToInstanceObject(function));
+ uint32_t funcIndex = ExportedFunctionToFuncIndex(function);
+ Instance& instance = instanceObj->instance();
+ const FuncExport& fe =
+ instance.metadata(instance.code().bestTier()).lookupFuncExport(funcIndex);
+ const FuncType& funcType = instance.metadata().getFuncExportType(fe);
+ RootedObject typeObj(cx, FuncTypeToObject(cx, funcType));
+ if (!typeObj) {
+ return false;
+ }
+ args.rval().setObject(*typeObj);
+ return true;
+}
+
+bool WasmFunctionType(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsWasmFunction, WasmFunctionTypeImpl>(cx, args);
+}
+
+JSFunction* WasmFunctionCreate(JSContext* cx, HandleFunction func,
+ wasm::ValTypeVector&& params,
+ wasm::ValTypeVector&& results,
+ HandleObject proto) {
+ MOZ_RELEASE_ASSERT(!IsWasmExportedFunction(func));
+
+ // We want to import the function to a wasm module and then export it again so
+ // that it behaves exactly like a normal wasm function and can be used like
+ // one in wasm tables. We synthesize such a module below, instantiate it, and
+ // then return the exported function as the result.
+ FeatureOptions options;
+ ScriptedCaller scriptedCaller;
+ SharedCompileArgs compileArgs =
+ CompileArgs::buildAndReport(cx, std::move(scriptedCaller), options);
+ if (!compileArgs) {
+ return nullptr;
+ }
+
+ ModuleEnvironment moduleEnv(compileArgs->features);
+ CompilerEnvironment compilerEnv(CompileMode::Once, Tier::Optimized,
+ DebugEnabled::False);
+ compilerEnv.computeParameters();
+
+ if (!moduleEnv.init()) {
+ return nullptr;
+ }
+
+ FuncType funcType = FuncType(std::move(params), std::move(results));
+ if (!moduleEnv.types->addType(std::move(funcType))) {
+ return nullptr;
+ }
+
+ // Add an (import (func ...))
+ FuncDesc funcDesc = FuncDesc(&(*moduleEnv.types)[0].funcType(), 0);
+ if (!moduleEnv.funcs.append(funcDesc)) {
+ return nullptr;
+ }
+ moduleEnv.numFuncImports = 1;
+
+ // Add an (export (func 0))
+ moduleEnv.declareFuncExported(0, /* eager */ true, /* canRefFunc */ true);
+
+ // We will be looking up and using the function in the future by index so the
+ // name doesn't matter.
+ CacheableName fieldName;
+ if (!moduleEnv.exports.emplaceBack(std::move(fieldName), 0,
+ DefinitionKind::Function)) {
+ return nullptr;
+ }
+
+ ModuleGenerator mg(*compileArgs, &moduleEnv, &compilerEnv, nullptr, nullptr,
+ nullptr);
+ if (!mg.init(nullptr)) {
+ return nullptr;
+ }
+ // We're not compiling any function definitions.
+ if (!mg.finishFuncDefs()) {
+ return nullptr;
+ }
+ SharedBytes shareableBytes = js_new<ShareableBytes>();
+ if (!shareableBytes) {
+ return nullptr;
+ }
+ SharedModule module = mg.finishModule(*shareableBytes);
+ if (!module) {
+ return nullptr;
+ }
+
+ // Instantiate the module.
+ Rooted<ImportValues> imports(cx);
+ if (!imports.get().funcs.append(func)) {
+ return nullptr;
+ }
+ Rooted<WasmInstanceObject*> instance(cx);
+ if (!module->instantiate(cx, imports.get(), nullptr, &instance)) {
+ MOZ_ASSERT(cx->isThrowingOutOfMemory());
+ return nullptr;
+ }
+
+ // Get the exported function which wraps the JS function to return.
+ RootedFunction wasmFunc(cx);
+ if (!instance->getExportedFunction(cx, instance, 0, &wasmFunc)) {
+ return nullptr;
+ }
+ return wasmFunc;
+}
+
+bool WasmFunctionConstruct(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ if (!ThrowIfNotConstructing(cx, args, "WebAssembly.Function")) {
+ return false;
+ }
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Function", 2)) {
+ return false;
+ }
+
+ if (!args[0].isObject()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_DESC_ARG, "function");
+ return false;
+ }
+ RootedObject typeObj(cx, &args[0].toObject());
+
+ // Extract properties in lexicographic order per spec.
+
+ RootedValue parametersVal(cx);
+ if (!JS_GetProperty(cx, typeObj, "parameters", &parametersVal)) {
+ return false;
+ }
+
+ ValTypeVector params;
+ if (!ParseValTypes(cx, parametersVal, params)) {
+ return false;
+ }
+
+ RootedValue resultsVal(cx);
+ if (!JS_GetProperty(cx, typeObj, "results", &resultsVal)) {
+ return false;
+ }
+
+ ValTypeVector results;
+ if (!ParseValTypes(cx, resultsVal, results)) {
+ return false;
+ }
+
+ // Get the target function
+
+ if (!args[1].isObject() || !args[1].toObject().is<JSFunction>() ||
+ IsWasmFunction(args[1])) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_FUNCTION_VALUE);
+ return false;
+ }
+ RootedFunction func(cx, &args[1].toObject().as<JSFunction>());
+
+ RootedObject proto(
+ cx, GetWasmConstructorPrototype(cx, args, JSProto_WasmFunction));
+ if (!proto) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ RootedFunction wasmFunc(cx, WasmFunctionCreate(cx, func, std::move(params),
+ std::move(results), proto));
+ if (!wasmFunc) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ args.rval().setObject(*wasmFunc);
+
+ return true;
+}
+
+static constexpr char WasmFunctionName[] = "Function";
+
+static JSObject* CreateWasmFunctionConstructor(JSContext* cx, JSProtoKey key) {
+ RootedObject proto(cx, &cx->global()->getFunctionConstructor());
+
+ Rooted<JSAtom*> className(
+ cx, Atomize(cx, WasmFunctionName, strlen(WasmFunctionName)));
+ if (!className) {
+ return nullptr;
+ }
+ return NewFunctionWithProto(cx, WasmFunctionConstruct, 1,
+ FunctionFlags::NATIVE_CTOR, nullptr, className,
+ proto, gc::AllocKind::FUNCTION, TenuredObject);
+}
+
+const JSFunctionSpec WasmFunctionMethods[] = {
+ JS_FN("type", WasmFunctionType, 0, 0), JS_FS_END};
+
+const ClassSpec WasmFunctionClassSpec = {CreateWasmFunctionConstructor,
+ CreateWasmFunctionPrototype,
+ nullptr,
+ nullptr,
+ WasmFunctionMethods,
+ nullptr,
+ nullptr,
+ ClassSpec::DontDefineConstructor};
+
+const JSClass js::WasmFunctionClass = {
+ "WebAssembly.Function", 0, JS_NULL_CLASS_OPS, &WasmFunctionClassSpec};
+
+#endif
+
+// ============================================================================
+// WebAssembly class and static methods
+
+static bool WebAssembly_toSource(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setString(cx->names().WebAssembly);
+ return true;
+}
+
+static bool RejectWithPendingException(JSContext* cx,
+ Handle<PromiseObject*> promise) {
+ if (!cx->isExceptionPending()) {
+ return false;
+ }
+
+ RootedValue rejectionValue(cx);
+ if (!GetAndClearException(cx, &rejectionValue)) {
+ return false;
+ }
+
+ return PromiseObject::reject(cx, promise, rejectionValue);
+}
+
+static bool Reject(JSContext* cx, const CompileArgs& args,
+ Handle<PromiseObject*> promise, const UniqueChars& error) {
+ if (!error) {
+ ReportOutOfMemory(cx);
+ return RejectWithPendingException(cx, promise);
+ }
+
+ RootedObject stack(cx, promise->allocationSite());
+ RootedString fileName(cx);
+ if (const char* filename = args.scriptedCaller.filename.get()) {
+ fileName =
+ JS_NewStringCopyUTF8N(cx, JS::UTF8Chars(filename, strlen(filename)));
+ } else {
+ fileName = JS_GetEmptyString(cx);
+ }
+ if (!fileName) {
+ return false;
+ }
+
+ unsigned line = args.scriptedCaller.line;
+
+ // Ideally we'd report a JSMSG_WASM_COMPILE_ERROR here, but there's no easy
+ // way to create an ErrorObject for an arbitrary error code with multiple
+ // replacements.
+ UniqueChars str(JS_smprintf("wasm validation error: %s", error.get()));
+ if (!str) {
+ return false;
+ }
+
+ size_t len = strlen(str.get());
+ RootedString message(cx, NewStringCopyN<CanGC>(cx, str.get(), len));
+ if (!message) {
+ return false;
+ }
+
+ // There's no error |cause| available here.
+ auto cause = JS::NothingHandleValue;
+
+ RootedObject errorObj(
+ cx, ErrorObject::create(cx, JSEXN_WASMCOMPILEERROR, stack, fileName, 0,
+ line, 0, nullptr, message, cause));
+ if (!errorObj) {
+ return false;
+ }
+
+ RootedValue rejectionValue(cx, ObjectValue(*errorObj));
+ return PromiseObject::reject(cx, promise, rejectionValue);
+}
+
+static void LogAsync(JSContext* cx, const char* funcName,
+ const Module& module) {
+ Log(cx, "async %s succeeded%s", funcName,
+ module.loggingDeserialized() ? " (loaded from cache)" : "");
+}
+
+enum class Ret { Pair, Instance };
+
+class AsyncInstantiateTask : public OffThreadPromiseTask {
+ SharedModule module_;
+ PersistentRooted<ImportValues> imports_;
+ Ret ret_;
+
+ public:
+ AsyncInstantiateTask(JSContext* cx, const Module& module, Ret ret,
+ Handle<PromiseObject*> promise)
+ : OffThreadPromiseTask(cx, promise),
+ module_(&module),
+ imports_(cx),
+ ret_(ret) {}
+
+ ImportValues& imports() { return imports_.get(); }
+
+ bool resolve(JSContext* cx, Handle<PromiseObject*> promise) override {
+ RootedObject instanceProto(
+ cx, &cx->global()->getPrototype(JSProto_WasmInstance));
+
+ Rooted<WasmInstanceObject*> instanceObj(cx);
+ if (!module_->instantiate(cx, imports_.get(), instanceProto,
+ &instanceObj)) {
+ return RejectWithPendingException(cx, promise);
+ }
+
+ RootedValue resolutionValue(cx);
+ if (ret_ == Ret::Instance) {
+ resolutionValue = ObjectValue(*instanceObj);
+ } else {
+ RootedObject resultObj(cx, JS_NewPlainObject(cx));
+ if (!resultObj) {
+ return RejectWithPendingException(cx, promise);
+ }
+
+ RootedObject moduleProto(cx,
+ &cx->global()->getPrototype(JSProto_WasmModule));
+ RootedObject moduleObj(
+ cx, WasmModuleObject::create(cx, *module_, moduleProto));
+ if (!moduleObj) {
+ return RejectWithPendingException(cx, promise);
+ }
+
+ RootedValue val(cx, ObjectValue(*moduleObj));
+ if (!JS_DefineProperty(cx, resultObj, "module", val, JSPROP_ENUMERATE)) {
+ return RejectWithPendingException(cx, promise);
+ }
+
+ val = ObjectValue(*instanceObj);
+ if (!JS_DefineProperty(cx, resultObj, "instance", val,
+ JSPROP_ENUMERATE)) {
+ return RejectWithPendingException(cx, promise);
+ }
+
+ resolutionValue = ObjectValue(*resultObj);
+ }
+
+ if (!PromiseObject::resolve(cx, promise, resolutionValue)) {
+ return RejectWithPendingException(cx, promise);
+ }
+
+ LogAsync(cx, "instantiate", *module_);
+ return true;
+ }
+};
+
+static bool AsyncInstantiate(JSContext* cx, const Module& module,
+ HandleObject importObj, Ret ret,
+ Handle<PromiseObject*> promise) {
+ auto task = js::MakeUnique<AsyncInstantiateTask>(cx, module, ret, promise);
+ if (!task || !task->init(cx)) {
+ return false;
+ }
+
+ if (!GetImports(cx, module, importObj, &task->imports())) {
+ return RejectWithPendingException(cx, promise);
+ }
+
+ task.release()->dispatchResolveAndDestroy();
+ return true;
+}
+
+static bool ResolveCompile(JSContext* cx, const Module& module,
+ Handle<PromiseObject*> promise) {
+ RootedObject proto(cx, &cx->global()->getPrototype(JSProto_WasmModule));
+ RootedObject moduleObj(cx, WasmModuleObject::create(cx, module, proto));
+ if (!moduleObj) {
+ return RejectWithPendingException(cx, promise);
+ }
+
+ RootedValue resolutionValue(cx, ObjectValue(*moduleObj));
+ if (!PromiseObject::resolve(cx, promise, resolutionValue)) {
+ return RejectWithPendingException(cx, promise);
+ }
+
+ LogAsync(cx, "compile", module);
+ return true;
+}
+
+struct CompileBufferTask : PromiseHelperTask {
+ MutableBytes bytecode;
+ SharedCompileArgs compileArgs;
+ UniqueChars error;
+ UniqueCharsVector warnings;
+ SharedModule module;
+ bool instantiate;
+ PersistentRootedObject importObj;
+
+ CompileBufferTask(JSContext* cx, Handle<PromiseObject*> promise,
+ HandleObject importObj)
+ : PromiseHelperTask(cx, promise),
+ instantiate(true),
+ importObj(cx, importObj) {}
+
+ CompileBufferTask(JSContext* cx, Handle<PromiseObject*> promise)
+ : PromiseHelperTask(cx, promise), instantiate(false) {}
+
+ bool init(JSContext* cx, const char* introducer) {
+ compileArgs = InitCompileArgs(cx, introducer);
+ if (!compileArgs) {
+ return false;
+ }
+ return PromiseHelperTask::init(cx);
+ }
+
+ void execute() override {
+ module = CompileBuffer(*compileArgs, *bytecode, &error, &warnings, nullptr);
+ }
+
+ bool resolve(JSContext* cx, Handle<PromiseObject*> promise) override {
+ if (!ReportCompileWarnings(cx, warnings)) {
+ return false;
+ }
+ if (!module) {
+ return Reject(cx, *compileArgs, promise, error);
+ }
+ if (instantiate) {
+ return AsyncInstantiate(cx, *module, importObj, Ret::Pair, promise);
+ }
+ return ResolveCompile(cx, *module, promise);
+ }
+};
+
+static bool RejectWithPendingException(JSContext* cx,
+ Handle<PromiseObject*> promise,
+ CallArgs& callArgs) {
+ if (!RejectWithPendingException(cx, promise)) {
+ return false;
+ }
+
+ callArgs.rval().setObject(*promise);
+ return true;
+}
+
+static bool EnsurePromiseSupport(JSContext* cx) {
+ if (!cx->runtime()->offThreadPromiseState.ref().initialized()) {
+ JS_ReportErrorASCII(
+ cx, "WebAssembly Promise APIs not supported in this runtime.");
+ return false;
+ }
+ return true;
+}
+
+static bool GetBufferSource(JSContext* cx, CallArgs callArgs, const char* name,
+ MutableBytes* bytecode) {
+ if (!callArgs.requireAtLeast(cx, name, 1)) {
+ return false;
+ }
+
+ if (!callArgs[0].isObject()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_BUF_ARG);
+ return false;
+ }
+
+ return GetBufferSource(cx, &callArgs[0].toObject(), JSMSG_WASM_BAD_BUF_ARG,
+ bytecode);
+}
+
+static bool WebAssembly_compile(JSContext* cx, unsigned argc, Value* vp) {
+ if (!EnsurePromiseSupport(cx)) {
+ return false;
+ }
+
+ Log(cx, "async compile() started");
+
+ Rooted<PromiseObject*> promise(cx, PromiseObject::createSkippingExecutor(cx));
+ if (!promise) {
+ return false;
+ }
+
+ CallArgs callArgs = CallArgsFromVp(argc, vp);
+
+ if (!cx->isRuntimeCodeGenEnabled(JS::RuntimeCode::WASM, nullptr)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_CSP_BLOCKED_WASM, "WebAssembly.compile");
+ return RejectWithPendingException(cx, promise, callArgs);
+ }
+
+ auto task = cx->make_unique<CompileBufferTask>(cx, promise);
+ if (!task || !task->init(cx, "WebAssembly.compile")) {
+ return false;
+ }
+
+ if (!GetBufferSource(cx, callArgs, "WebAssembly.compile", &task->bytecode)) {
+ return RejectWithPendingException(cx, promise, callArgs);
+ }
+
+ if (!StartOffThreadPromiseHelperTask(cx, std::move(task))) {
+ return false;
+ }
+
+ callArgs.rval().setObject(*promise);
+ return true;
+}
+
+static bool GetInstantiateArgs(JSContext* cx, CallArgs callArgs,
+ MutableHandleObject firstArg,
+ MutableHandleObject importObj) {
+ if (!callArgs.requireAtLeast(cx, "WebAssembly.instantiate", 1)) {
+ return false;
+ }
+
+ if (!callArgs[0].isObject()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_BUF_MOD_ARG);
+ return false;
+ }
+
+ firstArg.set(&callArgs[0].toObject());
+
+ return GetImportArg(cx, callArgs, importObj);
+}
+
+static bool WebAssembly_instantiate(JSContext* cx, unsigned argc, Value* vp) {
+ if (!EnsurePromiseSupport(cx)) {
+ return false;
+ }
+
+ Log(cx, "async instantiate() started");
+
+ Rooted<PromiseObject*> promise(cx, PromiseObject::createSkippingExecutor(cx));
+ if (!promise) {
+ return false;
+ }
+
+ CallArgs callArgs = CallArgsFromVp(argc, vp);
+
+ RootedObject firstArg(cx);
+ RootedObject importObj(cx);
+ if (!GetInstantiateArgs(cx, callArgs, &firstArg, &importObj)) {
+ return RejectWithPendingException(cx, promise, callArgs);
+ }
+
+ const Module* module;
+ if (IsModuleObject(firstArg, &module)) {
+ if (!AsyncInstantiate(cx, *module, importObj, Ret::Instance, promise)) {
+ return false;
+ }
+ } else {
+ if (!cx->isRuntimeCodeGenEnabled(JS::RuntimeCode::WASM, nullptr)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_CSP_BLOCKED_WASM,
+ "WebAssembly.instantiate");
+ return RejectWithPendingException(cx, promise, callArgs);
+ }
+
+ auto task = cx->make_unique<CompileBufferTask>(cx, promise, importObj);
+ if (!task || !task->init(cx, "WebAssembly.instantiate")) {
+ return false;
+ }
+
+ if (!GetBufferSource(cx, firstArg, JSMSG_WASM_BAD_BUF_MOD_ARG,
+ &task->bytecode)) {
+ return RejectWithPendingException(cx, promise, callArgs);
+ }
+
+ if (!StartOffThreadPromiseHelperTask(cx, std::move(task))) {
+ return false;
+ }
+ }
+
+ callArgs.rval().setObject(*promise);
+ return true;
+}
+
+static bool WebAssembly_validate(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs callArgs = CallArgsFromVp(argc, vp);
+
+ MutableBytes bytecode;
+ if (!GetBufferSource(cx, callArgs, "WebAssembly.validate", &bytecode)) {
+ return false;
+ }
+
+ FeatureOptions options;
+ UniqueChars error;
+ bool validated = Validate(cx, *bytecode, options, &error);
+
+ // If the reason for validation failure was OOM (signalled by null error
+ // message), report out-of-memory so that validate's return is always
+ // correct.
+ if (!validated && !error) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ if (error) {
+ MOZ_ASSERT(!validated);
+ Log(cx, "validate() failed with: %s", error.get());
+ }
+
+ callArgs.rval().setBoolean(validated);
+ return true;
+}
+
+static bool EnsureStreamSupport(JSContext* cx) {
+ // This should match wasm::StreamingCompilationAvailable().
+
+ if (!EnsurePromiseSupport(cx)) {
+ return false;
+ }
+
+ if (!CanUseExtraThreads()) {
+ JS_ReportErrorASCII(
+ cx, "WebAssembly.compileStreaming not supported with --no-threads");
+ return false;
+ }
+
+ if (!cx->runtime()->consumeStreamCallback) {
+ JS_ReportErrorASCII(cx,
+ "WebAssembly streaming not supported in this runtime");
+ return false;
+ }
+
+ return true;
+}
+
+// This value is chosen and asserted to be disjoint from any host error code.
+static const size_t StreamOOMCode = 0;
+
+static bool RejectWithStreamErrorNumber(JSContext* cx, size_t errorCode,
+ Handle<PromiseObject*> promise) {
+ if (errorCode == StreamOOMCode) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ cx->runtime()->reportStreamErrorCallback(cx, errorCode);
+ return RejectWithPendingException(cx, promise);
+}
+
+class CompileStreamTask : public PromiseHelperTask, public JS::StreamConsumer {
+ // The stream progresses monotonically through these states; the helper
+ // thread wait()s for streamState_ to reach Closed.
+ enum StreamState { Env, Code, Tail, Closed };
+ ExclusiveWaitableData<StreamState> streamState_;
+
+ // Immutable:
+ const bool instantiate_;
+ const PersistentRootedObject importObj_;
+
+ // Immutable after noteResponseURLs() which is called at most once before
+ // first call on stream thread:
+ const MutableCompileArgs compileArgs_;
+
+ // Immutable after Env state:
+ Bytes envBytes_;
+ SectionRange codeSection_;
+
+ // The code section vector is resized once during the Env state and filled
+ // in chunk by chunk during the Code state, updating the end-pointer after
+ // each chunk:
+ Bytes codeBytes_;
+ uint8_t* codeBytesEnd_;
+ ExclusiveBytesPtr exclusiveCodeBytesEnd_;
+
+ // Immutable after Tail state:
+ Bytes tailBytes_;
+ ExclusiveStreamEndData exclusiveStreamEnd_;
+
+ // Written once before Closed state and read in Closed state on main thread:
+ SharedModule module_;
+ Maybe<size_t> streamError_;
+ UniqueChars compileError_;
+ UniqueCharsVector warnings_;
+
+ // Set on stream thread and read racily on helper thread to abort compilation:
+ Atomic<bool> streamFailed_;
+
+ // Called on some thread before consumeChunk(), streamEnd(), streamError()):
+
+ void noteResponseURLs(const char* url, const char* sourceMapUrl) override {
+ if (url) {
+ compileArgs_->scriptedCaller.filename = DuplicateString(url);
+ compileArgs_->scriptedCaller.filenameIsURL = true;
+ }
+ if (sourceMapUrl) {
+ compileArgs_->sourceMapURL = DuplicateString(sourceMapUrl);
+ }
+ }
+
+ // Called on a stream thread:
+
+ // Until StartOffThreadPromiseHelperTask succeeds, we are responsible for
+ // dispatching ourselves back to the JS thread.
+ //
+ // Warning: After this function returns, 'this' can be deleted at any time, so
+ // the caller must immediately return from the stream callback.
+ void setClosedAndDestroyBeforeHelperThreadStarted() {
+ streamState_.lock().get() = Closed;
+ dispatchResolveAndDestroy();
+ }
+
+ // See setClosedAndDestroyBeforeHelperThreadStarted() comment.
+ bool rejectAndDestroyBeforeHelperThreadStarted(size_t errorNumber) {
+ MOZ_ASSERT(streamState_.lock() == Env);
+ MOZ_ASSERT(!streamError_);
+ streamError_ = Some(errorNumber);
+ setClosedAndDestroyBeforeHelperThreadStarted();
+ return false;
+ }
+
+ // Once StartOffThreadPromiseHelperTask succeeds, the helper thread will
+ // dispatchResolveAndDestroy() after execute() returns, but execute()
+ // wait()s for state to be Closed.
+ //
+ // Warning: After this function returns, 'this' can be deleted at any time, so
+ // the caller must immediately return from the stream callback.
+ void setClosedAndDestroyAfterHelperThreadStarted() {
+ auto streamState = streamState_.lock();
+ MOZ_ASSERT(streamState != Closed);
+ streamState.get() = Closed;
+ streamState.notify_one(/* stream closed */);
+ }
+
+ // See setClosedAndDestroyAfterHelperThreadStarted() comment.
+ bool rejectAndDestroyAfterHelperThreadStarted(size_t errorNumber) {
+ MOZ_ASSERT(!streamError_);
+ streamError_ = Some(errorNumber);
+ streamFailed_ = true;
+ exclusiveCodeBytesEnd_.lock().notify_one();
+ exclusiveStreamEnd_.lock().notify_one();
+ setClosedAndDestroyAfterHelperThreadStarted();
+ return false;
+ }
+
+ bool consumeChunk(const uint8_t* begin, size_t length) override {
+ switch (streamState_.lock().get()) {
+ case Env: {
+ if (!envBytes_.append(begin, length)) {
+ return rejectAndDestroyBeforeHelperThreadStarted(StreamOOMCode);
+ }
+
+ if (!StartsCodeSection(envBytes_.begin(), envBytes_.end(),
+ &codeSection_)) {
+ return true;
+ }
+
+ uint32_t extraBytes = envBytes_.length() - codeSection_.start;
+ if (extraBytes) {
+ envBytes_.shrinkTo(codeSection_.start);
+ }
+
+ if (codeSection_.size > MaxCodeSectionBytes) {
+ return rejectAndDestroyBeforeHelperThreadStarted(StreamOOMCode);
+ }
+
+ if (!codeBytes_.resize(codeSection_.size)) {
+ return rejectAndDestroyBeforeHelperThreadStarted(StreamOOMCode);
+ }
+
+ codeBytesEnd_ = codeBytes_.begin();
+ exclusiveCodeBytesEnd_.lock().get() = codeBytesEnd_;
+
+ if (!StartOffThreadPromiseHelperTask(this)) {
+ return rejectAndDestroyBeforeHelperThreadStarted(StreamOOMCode);
+ }
+
+ // Set the state to Code iff StartOffThreadPromiseHelperTask()
+ // succeeds so that the state tells us whether we are before or
+ // after the helper thread started.
+ streamState_.lock().get() = Code;
+
+ if (extraBytes) {
+ return consumeChunk(begin + length - extraBytes, extraBytes);
+ }
+
+ return true;
+ }
+ case Code: {
+ size_t copyLength =
+ std::min<size_t>(length, codeBytes_.end() - codeBytesEnd_);
+ memcpy(codeBytesEnd_, begin, copyLength);
+ codeBytesEnd_ += copyLength;
+
+ {
+ auto codeStreamEnd = exclusiveCodeBytesEnd_.lock();
+ codeStreamEnd.get() = codeBytesEnd_;
+ codeStreamEnd.notify_one();
+ }
+
+ if (codeBytesEnd_ != codeBytes_.end()) {
+ return true;
+ }
+
+ streamState_.lock().get() = Tail;
+
+ if (uint32_t extraBytes = length - copyLength) {
+ return consumeChunk(begin + copyLength, extraBytes);
+ }
+
+ return true;
+ }
+ case Tail: {
+ if (!tailBytes_.append(begin, length)) {
+ return rejectAndDestroyAfterHelperThreadStarted(StreamOOMCode);
+ }
+
+ return true;
+ }
+ case Closed:
+ MOZ_CRASH("consumeChunk() in Closed state");
+ }
+ MOZ_CRASH("unreachable");
+ }
+
+ void streamEnd(JS::OptimizedEncodingListener* tier2Listener) override {
+ switch (streamState_.lock().get()) {
+ case Env: {
+ SharedBytes bytecode = js_new<ShareableBytes>(std::move(envBytes_));
+ if (!bytecode) {
+ rejectAndDestroyBeforeHelperThreadStarted(StreamOOMCode);
+ return;
+ }
+ module_ = CompileBuffer(*compileArgs_, *bytecode, &compileError_,
+ &warnings_, nullptr);
+ setClosedAndDestroyBeforeHelperThreadStarted();
+ return;
+ }
+ case Code:
+ case Tail:
+ // Unlock exclusiveStreamEnd_ before locking streamState_.
+ {
+ auto streamEnd = exclusiveStreamEnd_.lock();
+ MOZ_ASSERT(!streamEnd->reached);
+ streamEnd->reached = true;
+ streamEnd->tailBytes = &tailBytes_;
+ streamEnd->tier2Listener = tier2Listener;
+ streamEnd.notify_one();
+ }
+ setClosedAndDestroyAfterHelperThreadStarted();
+ return;
+ case Closed:
+ MOZ_CRASH("streamEnd() in Closed state");
+ }
+ }
+
+ void streamError(size_t errorCode) override {
+ MOZ_ASSERT(errorCode != StreamOOMCode);
+ switch (streamState_.lock().get()) {
+ case Env:
+ rejectAndDestroyBeforeHelperThreadStarted(errorCode);
+ return;
+ case Tail:
+ case Code:
+ rejectAndDestroyAfterHelperThreadStarted(errorCode);
+ return;
+ case Closed:
+ MOZ_CRASH("streamError() in Closed state");
+ }
+ }
+
+ void consumeOptimizedEncoding(const uint8_t* begin, size_t length) override {
+ module_ = Module::deserialize(begin, length);
+
+ MOZ_ASSERT(streamState_.lock().get() == Env);
+ setClosedAndDestroyBeforeHelperThreadStarted();
+ }
+
+ // Called on a helper thread:
+
+ void execute() override {
+ module_ = CompileStreaming(*compileArgs_, envBytes_, codeBytes_,
+ exclusiveCodeBytesEnd_, exclusiveStreamEnd_,
+ streamFailed_, &compileError_, &warnings_);
+
+ // When execute() returns, the CompileStreamTask will be dispatched
+ // back to its JS thread to call resolve() and then be destroyed. We
+ // can't let this happen until the stream has been closed lest
+ // consumeChunk() or streamEnd() be called on a dead object.
+ auto streamState = streamState_.lock();
+ while (streamState != Closed) {
+ streamState.wait(/* stream closed */);
+ }
+ }
+
+ // Called on a JS thread after streaming compilation completes/errors:
+
+ bool resolve(JSContext* cx, Handle<PromiseObject*> promise) override {
+ MOZ_ASSERT(streamState_.lock() == Closed);
+
+ if (!ReportCompileWarnings(cx, warnings_)) {
+ return false;
+ }
+ if (module_) {
+ MOZ_ASSERT(!streamFailed_ && !streamError_ && !compileError_);
+ if (instantiate_) {
+ return AsyncInstantiate(cx, *module_, importObj_, Ret::Pair, promise);
+ }
+ return ResolveCompile(cx, *module_, promise);
+ }
+
+ if (streamError_) {
+ return RejectWithStreamErrorNumber(cx, *streamError_, promise);
+ }
+
+ return Reject(cx, *compileArgs_, promise, compileError_);
+ }
+
+ public:
+ CompileStreamTask(JSContext* cx, Handle<PromiseObject*> promise,
+ CompileArgs& compileArgs, bool instantiate,
+ HandleObject importObj)
+ : PromiseHelperTask(cx, promise),
+ streamState_(mutexid::WasmStreamStatus, Env),
+ instantiate_(instantiate),
+ importObj_(cx, importObj),
+ compileArgs_(&compileArgs),
+ codeSection_{},
+ codeBytesEnd_(nullptr),
+ exclusiveCodeBytesEnd_(mutexid::WasmCodeBytesEnd, nullptr),
+ exclusiveStreamEnd_(mutexid::WasmStreamEnd),
+ streamFailed_(false) {
+ MOZ_ASSERT_IF(importObj_, instantiate_);
+ }
+};
+
+// A short-lived object that captures the arguments of a
+// WebAssembly.{compileStreaming,instantiateStreaming} while waiting for
+// the Promise<Response> to resolve to a (hopefully) Promise.
+class ResolveResponseClosure : public NativeObject {
+ static const unsigned COMPILE_ARGS_SLOT = 0;
+ static const unsigned PROMISE_OBJ_SLOT = 1;
+ static const unsigned INSTANTIATE_SLOT = 2;
+ static const unsigned IMPORT_OBJ_SLOT = 3;
+ static const JSClassOps classOps_;
+
+ static void finalize(JS::GCContext* gcx, JSObject* obj) {
+ auto& closure = obj->as<ResolveResponseClosure>();
+ gcx->release(obj, &closure.compileArgs(),
+ MemoryUse::WasmResolveResponseClosure);
+ }
+
+ public:
+ static const unsigned RESERVED_SLOTS = 4;
+ static const JSClass class_;
+
+ static ResolveResponseClosure* create(JSContext* cx, const CompileArgs& args,
+ HandleObject promise, bool instantiate,
+ HandleObject importObj) {
+ MOZ_ASSERT_IF(importObj, instantiate);
+
+ AutoSetNewObjectMetadata metadata(cx);
+ auto* obj = NewObjectWithGivenProto<ResolveResponseClosure>(cx, nullptr);
+ if (!obj) {
+ return nullptr;
+ }
+
+ args.AddRef();
+ InitReservedSlot(obj, COMPILE_ARGS_SLOT, const_cast<CompileArgs*>(&args),
+ MemoryUse::WasmResolveResponseClosure);
+ obj->setReservedSlot(PROMISE_OBJ_SLOT, ObjectValue(*promise));
+ obj->setReservedSlot(INSTANTIATE_SLOT, BooleanValue(instantiate));
+ obj->setReservedSlot(IMPORT_OBJ_SLOT, ObjectOrNullValue(importObj));
+ return obj;
+ }
+
+ CompileArgs& compileArgs() const {
+ return *(CompileArgs*)getReservedSlot(COMPILE_ARGS_SLOT).toPrivate();
+ }
+ PromiseObject& promise() const {
+ return getReservedSlot(PROMISE_OBJ_SLOT).toObject().as<PromiseObject>();
+ }
+ bool instantiate() const {
+ return getReservedSlot(INSTANTIATE_SLOT).toBoolean();
+ }
+ JSObject* importObj() const {
+ return getReservedSlot(IMPORT_OBJ_SLOT).toObjectOrNull();
+ }
+};
+
+const JSClassOps ResolveResponseClosure::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ ResolveResponseClosure::finalize, // finalize
+ nullptr, // call
+ nullptr, // construct
+ nullptr, // trace
+};
+
+const JSClass ResolveResponseClosure::class_ = {
+ "WebAssembly ResolveResponseClosure",
+ JSCLASS_DELAY_METADATA_BUILDER |
+ JSCLASS_HAS_RESERVED_SLOTS(ResolveResponseClosure::RESERVED_SLOTS) |
+ JSCLASS_FOREGROUND_FINALIZE,
+ &ResolveResponseClosure::classOps_,
+};
+
+static ResolveResponseClosure* ToResolveResponseClosure(CallArgs args) {
+ return &args.callee()
+ .as<JSFunction>()
+ .getExtendedSlot(0)
+ .toObject()
+ .as<ResolveResponseClosure>();
+}
+
+static bool RejectWithErrorNumber(JSContext* cx, uint32_t errorNumber,
+ Handle<PromiseObject*> promise) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, errorNumber);
+ return RejectWithPendingException(cx, promise);
+}
+
+static bool ResolveResponse_OnFulfilled(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs callArgs = CallArgsFromVp(argc, vp);
+
+ Rooted<ResolveResponseClosure*> closure(cx,
+ ToResolveResponseClosure(callArgs));
+ Rooted<PromiseObject*> promise(cx, &closure->promise());
+ CompileArgs& compileArgs = closure->compileArgs();
+ bool instantiate = closure->instantiate();
+ Rooted<JSObject*> importObj(cx, closure->importObj());
+
+ auto task = cx->make_unique<CompileStreamTask>(cx, promise, compileArgs,
+ instantiate, importObj);
+ if (!task || !task->init(cx)) {
+ return false;
+ }
+
+ if (!callArgs.get(0).isObject()) {
+ return RejectWithErrorNumber(cx, JSMSG_WASM_BAD_RESPONSE_VALUE, promise);
+ }
+
+ RootedObject response(cx, &callArgs.get(0).toObject());
+ if (!cx->runtime()->consumeStreamCallback(cx, response, JS::MimeType::Wasm,
+ task.get())) {
+ return RejectWithPendingException(cx, promise);
+ }
+
+ (void)task.release();
+
+ callArgs.rval().setUndefined();
+ return true;
+}
+
+static bool ResolveResponse_OnRejected(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ Rooted<ResolveResponseClosure*> closure(cx, ToResolveResponseClosure(args));
+ Rooted<PromiseObject*> promise(cx, &closure->promise());
+
+ if (!PromiseObject::reject(cx, promise, args.get(0))) {
+ return false;
+ }
+
+ args.rval().setUndefined();
+ return true;
+}
+
+static bool ResolveResponse(JSContext* cx, CallArgs callArgs,
+ Handle<PromiseObject*> promise,
+ bool instantiate = false,
+ HandleObject importObj = nullptr) {
+ MOZ_ASSERT_IF(importObj, instantiate);
+
+ const char* introducer = instantiate ? "WebAssembly.instantiateStreaming"
+ : "WebAssembly.compileStreaming";
+
+ SharedCompileArgs compileArgs = InitCompileArgs(cx, introducer);
+ if (!compileArgs) {
+ return false;
+ }
+
+ RootedObject closure(
+ cx, ResolveResponseClosure::create(cx, *compileArgs, promise, instantiate,
+ importObj));
+ if (!closure) {
+ return false;
+ }
+
+ RootedFunction onResolved(
+ cx, NewNativeFunction(cx, ResolveResponse_OnFulfilled, 1, nullptr,
+ gc::AllocKind::FUNCTION_EXTENDED, GenericObject));
+ if (!onResolved) {
+ return false;
+ }
+
+ RootedFunction onRejected(
+ cx, NewNativeFunction(cx, ResolveResponse_OnRejected, 1, nullptr,
+ gc::AllocKind::FUNCTION_EXTENDED, GenericObject));
+ if (!onRejected) {
+ return false;
+ }
+
+ onResolved->setExtendedSlot(0, ObjectValue(*closure));
+ onRejected->setExtendedSlot(0, ObjectValue(*closure));
+
+ RootedObject resolve(cx,
+ PromiseObject::unforgeableResolve(cx, callArgs.get(0)));
+ if (!resolve) {
+ return false;
+ }
+
+ return JS::AddPromiseReactions(cx, resolve, onResolved, onRejected);
+}
+
+static bool WebAssembly_compileStreaming(JSContext* cx, unsigned argc,
+ Value* vp) {
+ if (!EnsureStreamSupport(cx)) {
+ return false;
+ }
+
+ Log(cx, "async compileStreaming() started");
+
+ Rooted<PromiseObject*> promise(cx, PromiseObject::createSkippingExecutor(cx));
+ if (!promise) {
+ return false;
+ }
+
+ CallArgs callArgs = CallArgsFromVp(argc, vp);
+
+ if (!cx->isRuntimeCodeGenEnabled(JS::RuntimeCode::WASM, nullptr)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_CSP_BLOCKED_WASM,
+ "WebAssembly.compileStreaming");
+ return RejectWithPendingException(cx, promise, callArgs);
+ }
+
+ if (!ResolveResponse(cx, callArgs, promise)) {
+ return RejectWithPendingException(cx, promise, callArgs);
+ }
+
+ callArgs.rval().setObject(*promise);
+ return true;
+}
+
+static bool WebAssembly_instantiateStreaming(JSContext* cx, unsigned argc,
+ Value* vp) {
+ if (!EnsureStreamSupport(cx)) {
+ return false;
+ }
+
+ Log(cx, "async instantiateStreaming() started");
+
+ Rooted<PromiseObject*> promise(cx, PromiseObject::createSkippingExecutor(cx));
+ if (!promise) {
+ return false;
+ }
+
+ CallArgs callArgs = CallArgsFromVp(argc, vp);
+
+ if (!cx->isRuntimeCodeGenEnabled(JS::RuntimeCode::WASM, nullptr)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_CSP_BLOCKED_WASM,
+ "WebAssembly.instantiateStreaming");
+ return RejectWithPendingException(cx, promise, callArgs);
+ }
+
+ RootedObject firstArg(cx);
+ RootedObject importObj(cx);
+ if (!GetInstantiateArgs(cx, callArgs, &firstArg, &importObj)) {
+ return RejectWithPendingException(cx, promise, callArgs);
+ }
+
+ if (!ResolveResponse(cx, callArgs, promise, true, importObj)) {
+ return RejectWithPendingException(cx, promise, callArgs);
+ }
+
+ callArgs.rval().setObject(*promise);
+ return true;
+}
+
+#ifdef ENABLE_WASM_MOZ_INTGEMM
+
+static bool WebAssembly_mozIntGemm(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ Rooted<WasmModuleObject*> module(cx);
+ wasm::IntrinsicId ids[] = {
+ wasm::IntrinsicId::I8PrepareB,
+ wasm::IntrinsicId::I8PrepareBFromTransposed,
+ wasm::IntrinsicId::I8PrepareBFromQuantizedTransposed,
+ wasm::IntrinsicId::I8PrepareA,
+ wasm::IntrinsicId::I8PrepareBias,
+ wasm::IntrinsicId::I8MultiplyAndAddBias,
+ wasm::IntrinsicId::I8SelectColumnsOfB};
+ if (!wasm::CompileIntrinsicModule(cx, ids, Shareable::False, &module)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ args.rval().set(ObjectValue(*module.get()));
+ return true;
+}
+
+static const JSFunctionSpec WebAssembly_mozIntGemm_methods[] = {
+ JS_FN("mozIntGemm", WebAssembly_mozIntGemm, 0, JSPROP_ENUMERATE),
+ JS_FS_END};
+
+#endif // ENABLE_WASM_MOZ_INTGEMM
+
+static const JSFunctionSpec WebAssembly_static_methods[] = {
+ JS_FN(js_toSource_str, WebAssembly_toSource, 0, 0),
+ JS_FN("compile", WebAssembly_compile, 1, JSPROP_ENUMERATE),
+ JS_FN("instantiate", WebAssembly_instantiate, 1, JSPROP_ENUMERATE),
+ JS_FN("validate", WebAssembly_validate, 1, JSPROP_ENUMERATE),
+ JS_FN("compileStreaming", WebAssembly_compileStreaming, 1,
+ JSPROP_ENUMERATE),
+ JS_FN("instantiateStreaming", WebAssembly_instantiateStreaming, 1,
+ JSPROP_ENUMERATE),
+ JS_FS_END};
+
+static const JSPropertySpec WebAssembly_static_properties[] = {
+ JS_STRING_SYM_PS(toStringTag, "WebAssembly", JSPROP_READONLY), JS_PS_END};
+
+static JSObject* CreateWebAssemblyObject(JSContext* cx, JSProtoKey key) {
+ MOZ_RELEASE_ASSERT(HasSupport(cx));
+
+ RootedObject proto(cx, &cx->global()->getObjectPrototype());
+ return NewTenuredObjectWithGivenProto(cx, &WasmNamespaceObject::class_,
+ proto);
+}
+
+struct NameAndProtoKey {
+ const char* const name;
+ JSProtoKey key;
+};
+
+static bool WebAssemblyDefineConstructor(JSContext* cx,
+ Handle<WasmNamespaceObject*> wasm,
+ NameAndProtoKey entry,
+ MutableHandleValue ctorValue,
+ MutableHandleId id) {
+ JSObject* ctor = GlobalObject::getOrCreateConstructor(cx, entry.key);
+ if (!ctor) {
+ return false;
+ }
+ ctorValue.setObject(*ctor);
+
+ JSAtom* className = Atomize(cx, entry.name, strlen(entry.name));
+ if (!className) {
+ return false;
+ }
+ id.set(AtomToId(className));
+
+ return DefineDataProperty(cx, wasm, id, ctorValue, 0);
+}
+
+static bool WebAssemblyClassFinish(JSContext* cx, HandleObject object,
+ HandleObject proto) {
+ Handle<WasmNamespaceObject*> wasm = object.as<WasmNamespaceObject>();
+
+ constexpr NameAndProtoKey entries[] = {
+ {"Module", JSProto_WasmModule},
+ {"Instance", JSProto_WasmInstance},
+ {"Memory", JSProto_WasmMemory},
+ {"Table", JSProto_WasmTable},
+ {"Global", JSProto_WasmGlobal},
+ {"CompileError", GetExceptionProtoKey(JSEXN_WASMCOMPILEERROR)},
+ {"LinkError", GetExceptionProtoKey(JSEXN_WASMLINKERROR)},
+ {"RuntimeError", GetExceptionProtoKey(JSEXN_WASMRUNTIMEERROR)},
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+ {"Function", JSProto_WasmFunction},
+#endif
+ };
+ RootedValue ctorValue(cx);
+ RootedId id(cx);
+ for (const auto& entry : entries) {
+ if (!WebAssemblyDefineConstructor(cx, wasm, entry, &ctorValue, &id)) {
+ return false;
+ }
+ }
+
+ if (ExceptionsAvailable(cx)) {
+ constexpr NameAndProtoKey exceptionEntries[] = {
+ {"Tag", JSProto_WasmTag},
+ {"Exception", JSProto_WasmException},
+ };
+ for (const auto& entry : exceptionEntries) {
+ if (!WebAssemblyDefineConstructor(cx, wasm, entry, &ctorValue, &id)) {
+ return false;
+ }
+ }
+ }
+
+#ifdef ENABLE_WASM_MOZ_INTGEMM
+ if (MozIntGemmAvailable(cx) &&
+ !JS_DefineFunctions(cx, wasm, WebAssembly_mozIntGemm_methods)) {
+ return false;
+ }
+#endif
+
+ return true;
+}
+
+static const ClassSpec WebAssemblyClassSpec = {
+ CreateWebAssemblyObject, nullptr, WebAssembly_static_methods,
+ WebAssembly_static_properties, nullptr, nullptr,
+ WebAssemblyClassFinish};
+
+const JSClass js::WasmNamespaceObject::class_ = {
+ js_WebAssembly_str, JSCLASS_HAS_CACHED_PROTO(JSProto_WebAssembly),
+ JS_NULL_CLASS_OPS, &WebAssemblyClassSpec};
+
+// Sundry
diff --git a/js/src/wasm/WasmJS.h b/js/src/wasm/WasmJS.h
new file mode 100644
index 0000000000..53de0f2962
--- /dev/null
+++ b/js/src/wasm/WasmJS.h
@@ -0,0 +1,580 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_js_h
+#define wasm_js_h
+
+#include "mozilla/HashTable.h" // DefaultHasher
+#include "mozilla/Maybe.h" // mozilla::Maybe
+
+#include <stdint.h> // int32_t, int64_t, uint32_t
+
+#include "gc/Barrier.h" // HeapPtr
+#include "gc/ZoneAllocator.h" // ZoneAllocPolicy
+#include "js/AllocPolicy.h" // SystemAllocPolicy
+#include "js/Class.h" // JSClassOps, ClassSpec
+#include "js/GCHashTable.h" // GCHashMap, GCHashSet
+#include "js/GCVector.h" // GCVector
+#include "js/PropertySpec.h" // JSPropertySpec, JSFunctionSpec
+#include "js/RootingAPI.h" // StableCellHasher
+#include "js/SweepingAPI.h" // JS::WeakCache
+#include "js/TypeDecls.h" // HandleValue, HandleObject, MutableHandleObject, MutableHandleFunction
+#include "js/Vector.h" // JS::Vector
+#include "js/WasmFeatures.h"
+#include "vm/JSFunction.h" // JSFunction
+#include "vm/NativeObject.h" // NativeObject
+#include "wasm/WasmCodegenTypes.h"
+#include "wasm/WasmConstants.h"
+#include "wasm/WasmException.h"
+#include "wasm/WasmExprType.h"
+#include "wasm/WasmMemory.h"
+#include "wasm/WasmModuleTypes.h"
+#include "wasm/WasmTypeDecls.h"
+#include "wasm/WasmValType.h"
+#include "wasm/WasmValue.h"
+
+class JSObject;
+class JSTracer;
+struct JSContext;
+
+namespace JS {
+class CallArgs;
+class Value;
+} // namespace JS
+
+namespace js {
+
+class ArrayBufferObject;
+class ArrayBufferObjectMaybeShared;
+class JSStringBuilder;
+class TypedArrayObject;
+class WasmFunctionScope;
+class WasmInstanceScope;
+class WasmSharedArrayRawBuffer;
+
+namespace wasm {
+
+struct ImportValues;
+
+// Return whether WebAssembly can in principle be compiled on this platform (ie
+// combination of hardware and OS), assuming at least one of the compilers that
+// supports the platform is not disabled by other settings.
+//
+// This predicate must be checked and must be true to call any of the top-level
+// wasm eval/compile methods.
+
+bool HasPlatformSupport(JSContext* cx);
+
+// Return whether WebAssembly is supported on this platform. This determines
+// whether the WebAssembly object is exposed to JS in this context / realm and
+//
+// It does *not* guarantee that a compiler is actually available; that has to be
+// checked separately, as it is sometimes run-time variant, depending on whether
+// a debugger has been created or not.
+
+bool HasSupport(JSContext* cx);
+
+// Predicates for compiler availability.
+//
+// These three predicates together select zero or one baseline compiler and zero
+// or one optimizing compiler, based on: what's compiled into the executable,
+// what's supported on the current platform, what's selected by options, and the
+// current run-time environment. As it is possible for the computed values to
+// change (when a value changes in about:config or the debugger pane is shown or
+// hidden), it is inadvisable to cache these values in such a way that they
+// could become invalid. Generally it is cheap always to recompute them.
+
+bool BaselineAvailable(JSContext* cx);
+bool IonAvailable(JSContext* cx);
+
+// Test all three.
+
+bool AnyCompilerAvailable(JSContext* cx);
+
+// Asm.JS is translated to wasm and then compiled using the wasm optimizing
+// compiler; test whether this compiler is available.
+
+bool WasmCompilerForAsmJSAvailable(JSContext* cx);
+
+// Predicates for white-box compiler disablement testing.
+//
+// These predicates determine whether the optimizing compilers were disabled by
+// features that are enabled at compile-time or run-time. They do not consider
+// the hardware platform on whether other compilers are enabled.
+//
+// If `reason` is not null then it is populated with a string that describes
+// the specific features that disable the compiler.
+//
+// Returns false on OOM (which happens only when a reason is requested),
+// otherwise true, with the result in `*isDisabled` and optionally the reason in
+// `*reason`.
+
+bool BaselineDisabledByFeatures(JSContext* cx, bool* isDisabled,
+ JSStringBuilder* reason = nullptr);
+bool IonDisabledByFeatures(JSContext* cx, bool* isDisabled,
+ JSStringBuilder* reason = nullptr);
+
+// Predicates for feature availability.
+//
+// The following predicates check whether particular wasm features are enabled,
+// and for each, whether at least one compiler is (currently) available that
+// supports the feature.
+
+// Streaming compilation.
+bool StreamingCompilationAvailable(JSContext* cx);
+
+// Caching of optimized code. Implies both streaming compilation and an
+// optimizing compiler tier.
+bool CodeCachingAvailable(JSContext* cx);
+
+// Shared memory and atomics.
+bool ThreadsAvailable(JSContext* cx);
+
+#define WASM_FEATURE(NAME, ...) bool NAME##Available(JSContext* cx);
+JS_FOR_WASM_FEATURES(WASM_FEATURE, WASM_FEATURE, WASM_FEATURE)
+#undef WASM_FEATURE
+
+// SIMD operations.
+bool SimdAvailable(JSContext* cx);
+
+// Privileged content that can access experimental intrinsics
+bool IsSimdPrivilegedContext(JSContext* cx);
+
+#if defined(ENABLE_WASM_SIMD) && defined(DEBUG)
+// Report the result of a Simd simplification to the testing infrastructure.
+void ReportSimdAnalysis(const char* data);
+#endif
+
+// Returns true if WebAssembly as configured by compile-time flags and run-time
+// options can support try/catch, throw, rethrow, and branch_on_exn (evolving).
+bool ExceptionsAvailable(JSContext* cx);
+
+// Compiles the given binary wasm module given the ArrayBufferObject
+// and links the module's imports with the given import object.
+
+[[nodiscard]] bool Eval(JSContext* cx, Handle<TypedArrayObject*> code,
+ HandleObject importObj,
+ MutableHandle<WasmInstanceObject*> instanceObj);
+
+// Extracts the various imports from the given import object into the given
+// ImportValues structure while checking the imports against the given module.
+// The resulting structure can be passed to WasmModule::instantiate.
+
+struct ImportValues;
+[[nodiscard]] bool GetImports(JSContext* cx, const Module& module,
+ HandleObject importObj, ImportValues* imports);
+
+// For testing cross-process (de)serialization, this pair of functions are
+// responsible for, in the child process, compiling the given wasm bytecode
+// to a wasm::Module that is serialized into the given byte array, and, in
+// the parent process, deserializing the given byte array into a
+// WebAssembly.Module object.
+
+[[nodiscard]] bool CompileAndSerialize(JSContext* cx,
+ const ShareableBytes& bytecode,
+ Bytes* serialized);
+
+[[nodiscard]] bool DeserializeModule(JSContext* cx, const Bytes& serialized,
+ MutableHandleObject module);
+
+// A WebAssembly "Exported Function" is the spec name for the JS function
+// objects created to wrap wasm functions. This predicate returns false
+// for asm.js functions which are semantically just normal JS functions
+// (even if they are implemented via wasm under the hood). The accessor
+// functions for extracting the instance and func-index of a wasm function
+// can be used for both wasm and asm.js, however.
+
+bool IsWasmExportedFunction(JSFunction* fun);
+
+Instance& ExportedFunctionToInstance(JSFunction* fun);
+WasmInstanceObject* ExportedFunctionToInstanceObject(JSFunction* fun);
+uint32_t ExportedFunctionToFuncIndex(JSFunction* fun);
+
+bool IsSharedWasmMemoryObject(JSObject* obj);
+
+} // namespace wasm
+
+// The class of WebAssembly.Module. Each WasmModuleObject owns a
+// wasm::Module. These objects are used both as content-facing JS objects and as
+// internal implementation details of asm.js.
+
+class WasmModuleObject : public NativeObject {
+ static const unsigned MODULE_SLOT = 0;
+ static const JSClassOps classOps_;
+ static const ClassSpec classSpec_;
+ static void finalize(JS::GCContext* gcx, JSObject* obj);
+ static bool imports(JSContext* cx, unsigned argc, Value* vp);
+ static bool exports(JSContext* cx, unsigned argc, Value* vp);
+ static bool customSections(JSContext* cx, unsigned argc, Value* vp);
+
+ public:
+ static const unsigned RESERVED_SLOTS = 1;
+ static const JSClass class_;
+ static const JSClass& protoClass_;
+ static const JSPropertySpec properties[];
+ static const JSFunctionSpec methods[];
+ static const JSFunctionSpec static_methods[];
+ static bool construct(JSContext*, unsigned, Value*);
+
+ static WasmModuleObject* create(JSContext* cx, const wasm::Module& module,
+ HandleObject proto);
+ const wasm::Module& module() const;
+};
+
+// The class of WebAssembly.Global. This wraps a storage location, and there is
+// a per-agent one-to-one relationship between the WasmGlobalObject and the
+// storage location (the Cell) it wraps: if a module re-exports an imported
+// global, the imported and exported WasmGlobalObjects are the same, and if a
+// module exports a global twice, the two exported WasmGlobalObjects are the
+// same.
+
+// TODO/AnyRef-boxing: With boxed immediates and strings, JSObject* is no longer
+// the most appropriate representation for Cell::anyref.
+STATIC_ASSERT_ANYREF_IS_JSOBJECT;
+
+class WasmGlobalObject : public NativeObject {
+ static const unsigned MUTABLE_SLOT = 0;
+ static const unsigned VAL_SLOT = 1;
+
+ static const JSClassOps classOps_;
+ static const ClassSpec classSpec_;
+ static void finalize(JS::GCContext* gcx, JSObject* obj);
+ static void trace(JSTracer* trc, JSObject* obj);
+
+ static bool typeImpl(JSContext* cx, const CallArgs& args);
+ static bool type(JSContext* cx, unsigned argc, Value* vp);
+
+ static bool valueGetterImpl(JSContext* cx, const CallArgs& args);
+ static bool valueGetter(JSContext* cx, unsigned argc, Value* vp);
+ static bool valueSetterImpl(JSContext* cx, const CallArgs& args);
+ static bool valueSetter(JSContext* cx, unsigned argc, Value* vp);
+
+ public:
+ static const unsigned RESERVED_SLOTS = 2;
+ static const JSClass class_;
+ static const JSClass& protoClass_;
+ static const JSPropertySpec properties[];
+ static const JSFunctionSpec methods[];
+ static const JSFunctionSpec static_methods[];
+ static bool construct(JSContext*, unsigned, Value*);
+
+ static WasmGlobalObject* create(JSContext* cx, wasm::HandleVal value,
+ bool isMutable, HandleObject proto);
+ bool isNewborn() { return getReservedSlot(VAL_SLOT).isUndefined(); }
+
+ bool isMutable() const;
+ wasm::ValType type() const;
+ wasm::GCPtrVal& val() const;
+};
+
+// The class of WebAssembly.Instance. Each WasmInstanceObject owns a
+// wasm::Instance. These objects are used both as content-facing JS objects and
+// as internal implementation details of asm.js.
+
+class WasmInstanceObject : public NativeObject {
+ static const unsigned INSTANCE_SLOT = 0;
+ static const unsigned EXPORTS_OBJ_SLOT = 1;
+ static const unsigned EXPORTS_SLOT = 2;
+ static const unsigned SCOPES_SLOT = 3;
+ static const unsigned INSTANCE_SCOPE_SLOT = 4;
+ static const unsigned GLOBALS_SLOT = 5;
+
+ static const JSClassOps classOps_;
+ static const ClassSpec classSpec_;
+ static bool exportsGetterImpl(JSContext* cx, const CallArgs& args);
+ static bool exportsGetter(JSContext* cx, unsigned argc, Value* vp);
+ bool isNewborn() const;
+ static void finalize(JS::GCContext* gcx, JSObject* obj);
+ static void trace(JSTracer* trc, JSObject* obj);
+
+ // ExportMap maps from function index to exported function object.
+ // This allows the instance to lazily create exported function
+ // objects on demand (instead up-front for all table elements) while
+ // correctly preserving observable function object identity.
+ using ExportMap = GCHashMap<uint32_t, HeapPtr<JSFunction*>,
+ DefaultHasher<uint32_t>, CellAllocPolicy>;
+ ExportMap& exports() const;
+
+ // See the definition inside WasmJS.cpp.
+ class UnspecifiedScopeMap;
+ UnspecifiedScopeMap& scopes() const;
+
+ public:
+ static const unsigned RESERVED_SLOTS = 6;
+ static const JSClass class_;
+ static const JSClass& protoClass_;
+ static const JSPropertySpec properties[];
+ static const JSFunctionSpec methods[];
+ static const JSFunctionSpec static_methods[];
+ static bool construct(JSContext*, unsigned, Value*);
+
+ static WasmInstanceObject* create(
+ JSContext* cx, const RefPtr<const wasm::Code>& code,
+ const wasm::DataSegmentVector& dataSegments,
+ const wasm::ElemSegmentVector& elemSegments, uint32_t instanceDataLength,
+ Handle<WasmMemoryObject*> memory,
+ Vector<RefPtr<wasm::Table>, 0, SystemAllocPolicy>&& tables,
+ const JSObjectVector& funcImports, const wasm::GlobalDescVector& globals,
+ const wasm::ValVector& globalImportValues,
+ const WasmGlobalObjectVector& globalObjs,
+ const WasmTagObjectVector& tagObjs, HandleObject proto,
+ UniquePtr<wasm::DebugState> maybeDebug);
+ void initExportsObj(JSObject& exportsObj);
+
+ wasm::Instance& instance() const;
+ JSObject& exportsObj() const;
+
+ [[nodiscard]] static bool getExportedFunction(
+ JSContext* cx, Handle<WasmInstanceObject*> instanceObj,
+ uint32_t funcIndex, MutableHandleFunction fun);
+
+ const wasm::CodeRange& getExportedFunctionCodeRange(JSFunction* fun,
+ wasm::Tier tier);
+
+ static WasmInstanceScope* getScope(JSContext* cx,
+ Handle<WasmInstanceObject*> instanceObj);
+ static WasmFunctionScope* getFunctionScope(
+ JSContext* cx, Handle<WasmInstanceObject*> instanceObj,
+ uint32_t funcIndex);
+
+ using GlobalObjectVector =
+ GCVector<HeapPtr<WasmGlobalObject*>, 0, CellAllocPolicy>;
+ GlobalObjectVector& indirectGlobals() const;
+};
+
+// The class of WebAssembly.Memory. A WasmMemoryObject references an ArrayBuffer
+// or SharedArrayBuffer object which owns the actual memory.
+
+class WasmMemoryObject : public NativeObject {
+ static const unsigned BUFFER_SLOT = 0;
+ static const unsigned OBSERVERS_SLOT = 1;
+ static const unsigned ISHUGE_SLOT = 2;
+ static const JSClassOps classOps_;
+ static const ClassSpec classSpec_;
+ static void finalize(JS::GCContext* gcx, JSObject* obj);
+ static bool bufferGetterImpl(JSContext* cx, const CallArgs& args);
+ static bool bufferGetter(JSContext* cx, unsigned argc, Value* vp);
+ static bool typeImpl(JSContext* cx, const CallArgs& args);
+ static bool type(JSContext* cx, unsigned argc, Value* vp);
+ static bool growImpl(JSContext* cx, const CallArgs& args);
+ static bool grow(JSContext* cx, unsigned argc, Value* vp);
+ static bool discardImpl(JSContext* cx, const CallArgs& args);
+ static bool discard(JSContext* cx, unsigned argc, Value* vp);
+ static uint64_t growShared(Handle<WasmMemoryObject*> memory, uint64_t delta);
+
+ using InstanceSet = JS::WeakCache<GCHashSet<
+ WeakHeapPtr<WasmInstanceObject*>,
+ StableCellHasher<WeakHeapPtr<WasmInstanceObject*>>, CellAllocPolicy>>;
+ bool hasObservers() const;
+ InstanceSet& observers() const;
+ InstanceSet* getOrCreateObservers(JSContext* cx);
+
+ public:
+ static const unsigned RESERVED_SLOTS = 3;
+ static const JSClass class_;
+ static const JSClass& protoClass_;
+ static const JSPropertySpec properties[];
+ static const JSFunctionSpec methods[];
+ static const JSFunctionSpec memoryControlMethods[];
+ static const JSFunctionSpec static_methods[];
+ static bool construct(JSContext*, unsigned, Value*);
+
+ static WasmMemoryObject* create(JSContext* cx,
+ Handle<ArrayBufferObjectMaybeShared*> buffer,
+ bool isHuge, HandleObject proto);
+
+ // `buffer()` returns the current buffer object always. If the buffer
+ // represents shared memory then `buffer().byteLength()` never changes, and
+ // in particular it may be a smaller value than that returned from
+ // `volatileMemoryLength()` below.
+ //
+ // Generally, you do not want to call `buffer().byteLength()`, but to call
+ // `volatileMemoryLength()`, instead.
+ ArrayBufferObjectMaybeShared& buffer() const;
+
+ // The current length of the memory in bytes. In the case of shared memory,
+ // the length can change at any time. Also note that this will acquire a lock
+ // for shared memory, so do not call this from a signal handler.
+ size_t volatileMemoryLength() const;
+
+ // The current length of the memory in pages. See the comment for
+ // `volatileMemoryLength` for details on why this is 'volatile'.
+ wasm::Pages volatilePages() const;
+
+ // The maximum length of the memory in pages. This is not 'volatile' in
+ // contrast to the current length, as it cannot change for shared memories.
+ wasm::Pages clampedMaxPages() const;
+ mozilla::Maybe<wasm::Pages> sourceMaxPages() const;
+
+ wasm::IndexType indexType() const;
+ bool isShared() const;
+ bool isHuge() const;
+ bool movingGrowable() const;
+ size_t boundsCheckLimit() const;
+
+ // If isShared() is true then obtain the underlying buffer object.
+ WasmSharedArrayRawBuffer* sharedArrayRawBuffer() const;
+
+ bool addMovingGrowObserver(JSContext* cx, WasmInstanceObject* instance);
+ static uint64_t grow(Handle<WasmMemoryObject*> memory, uint64_t delta,
+ JSContext* cx);
+ static void discard(Handle<WasmMemoryObject*> memory, uint64_t byteOffset,
+ uint64_t len, JSContext* cx);
+};
+
+// The class of WebAssembly.Table. A WasmTableObject holds a refcount on a
+// wasm::Table, allowing a Table to be shared between multiple Instances
+// (eventually between multiple threads).
+
+class WasmTableObject : public NativeObject {
+ static const unsigned TABLE_SLOT = 0;
+ static const JSClassOps classOps_;
+ static const ClassSpec classSpec_;
+ bool isNewborn() const;
+ static void finalize(JS::GCContext* gcx, JSObject* obj);
+ static void trace(JSTracer* trc, JSObject* obj);
+ static bool lengthGetterImpl(JSContext* cx, const CallArgs& args);
+ static bool lengthGetter(JSContext* cx, unsigned argc, Value* vp);
+ static bool typeImpl(JSContext* cx, const CallArgs& args);
+ static bool type(JSContext* cx, unsigned argc, Value* vp);
+ static bool getImpl(JSContext* cx, const CallArgs& args);
+ static bool get(JSContext* cx, unsigned argc, Value* vp);
+ static bool setImpl(JSContext* cx, const CallArgs& args);
+ static bool set(JSContext* cx, unsigned argc, Value* vp);
+ static bool growImpl(JSContext* cx, const CallArgs& args);
+ static bool grow(JSContext* cx, unsigned argc, Value* vp);
+
+ public:
+ static const unsigned RESERVED_SLOTS = 1;
+ static const JSClass class_;
+ static const JSClass& protoClass_;
+ static const JSPropertySpec properties[];
+ static const JSFunctionSpec methods[];
+ static const JSFunctionSpec static_methods[];
+ static bool construct(JSContext*, unsigned, Value*);
+
+ // Note that, after creation, a WasmTableObject's table() is not initialized
+ // and must be initialized before use.
+
+ static WasmTableObject* create(JSContext* cx, uint32_t initialLength,
+ mozilla::Maybe<uint32_t> maximumLength,
+ wasm::RefType tableType, HandleObject proto);
+ wasm::Table& table() const;
+
+ // Perform the standard `ToWebAssemblyValue` coercion on `value` and fill the
+ // range [index, index + length) in the table. Callers are required to ensure
+ // the range is within bounds. Returns false if the coercion failed.
+ bool fillRange(JSContext* cx, uint32_t index, uint32_t length,
+ HandleValue value) const;
+};
+
+// The class of WebAssembly.Tag. This class is used to track exception tag
+// types for exports and imports.
+
+class WasmTagObject : public NativeObject {
+ static const unsigned TYPE_SLOT = 0;
+
+ static const JSClassOps classOps_;
+ static const ClassSpec classSpec_;
+ static void finalize(JS::GCContext* gcx, JSObject* obj);
+ static bool typeImpl(JSContext* cx, const CallArgs& args);
+ static bool type(JSContext* cx, unsigned argc, Value* vp);
+
+ public:
+ static const unsigned RESERVED_SLOTS = 1;
+ static const JSClass class_;
+ static const JSClass& protoClass_;
+ static const JSPropertySpec properties[];
+ static const JSFunctionSpec methods[];
+ static const JSFunctionSpec static_methods[];
+ static bool construct(JSContext*, unsigned, Value*);
+
+ static WasmTagObject* create(JSContext* cx,
+ const wasm::SharedTagType& tagType,
+ HandleObject proto);
+
+ const wasm::TagType* tagType() const;
+ const wasm::ValTypeVector& valueTypes() const;
+ wasm::ResultType resultType() const;
+};
+
+// The class of WebAssembly.Exception. This class is used for
+// representing exceptions thrown from Wasm in JS. (it is also used as
+// the internal representation for exceptions in Wasm)
+
+class WasmExceptionObject : public NativeObject {
+ static const unsigned TAG_SLOT = 0;
+ static const unsigned TYPE_SLOT = 1;
+ static const unsigned DATA_SLOT = 2;
+ static const unsigned STACK_SLOT = 3;
+
+ static const JSClassOps classOps_;
+ static const ClassSpec classSpec_;
+ static void trace(JSTracer* trc, JSObject* obj);
+ static void finalize(JS::GCContext* gcx, JSObject* obj);
+ // Named isMethod instead of is to avoid name conflict.
+ static bool isMethod(JSContext* cx, unsigned argc, Value* vp);
+ static bool isImpl(JSContext* cx, const CallArgs& args);
+ static bool getArg(JSContext* cx, unsigned argc, Value* vp);
+ static bool getArgImpl(JSContext* cx, const CallArgs& args);
+ static bool getStack(JSContext* cx, unsigned argc, Value* vp);
+ static bool getStack_impl(JSContext* cx, const CallArgs& args);
+
+ uint8_t* typedMem() const;
+ [[nodiscard]] bool loadValue(JSContext* cx, size_t offset, wasm::ValType type,
+ MutableHandleValue vp);
+ [[nodiscard]] bool initValue(JSContext* cx, size_t offset, wasm::ValType type,
+ HandleValue value);
+
+ public:
+ static const unsigned RESERVED_SLOTS = 4;
+ static const JSClass class_;
+ static const JSClass& protoClass_;
+ static const JSPropertySpec properties[];
+ static const JSFunctionSpec methods[];
+ static const JSFunctionSpec static_methods[];
+ static bool construct(JSContext*, unsigned, Value*);
+
+ static WasmExceptionObject* create(JSContext* cx, Handle<WasmTagObject*> tag,
+ HandleObject stack, HandleObject proto);
+ bool isNewborn() const;
+
+ JSObject* stack() const;
+ const wasm::TagType* tagType() const;
+ WasmTagObject& tag() const;
+
+ static size_t offsetOfData() {
+ return NativeObject::getFixedSlotOffset(DATA_SLOT);
+ }
+};
+
+// The class of the WebAssembly global namespace object.
+
+class WasmNamespaceObject : public NativeObject {
+ public:
+ static const JSClass class_;
+
+ private:
+ static const ClassSpec classSpec_;
+};
+
+extern const JSClass WasmFunctionClass;
+
+} // namespace js
+
+#endif // wasm_js_h
diff --git a/js/src/wasm/WasmLog.cpp b/js/src/wasm/WasmLog.cpp
new file mode 100644
index 0000000000..34ef219ce1
--- /dev/null
+++ b/js/src/wasm/WasmLog.cpp
@@ -0,0 +1,81 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmLog.h"
+
+#include <stdio.h>
+
+#include "jit/JitOptions.h"
+#include "js/Printf.h"
+#include "js/Utility.h"
+#include "vm/JSContext.h"
+#include "vm/Warnings.h"
+
+using namespace js;
+using namespace js::wasm;
+
+void wasm::Log(JSContext* cx, const char* fmt, ...) {
+ MOZ_ASSERT(!cx->isExceptionPending());
+
+ if (!cx->options().wasmVerbose()) {
+ return;
+ }
+
+ va_list args;
+ va_start(args, fmt);
+
+ if (UniqueChars chars = JS_vsmprintf(fmt, args)) {
+ WarnNumberASCII(cx, JSMSG_WASM_VERBOSE, chars.get());
+ if (cx->isExceptionPending()) {
+ cx->clearPendingException();
+ }
+ }
+
+ va_end(args);
+}
+
+void wasm::LogOffThread(const char* fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+}
+
+#ifdef WASM_CODEGEN_DEBUG
+bool wasm::IsCodegenDebugEnabled(DebugChannel channel) {
+ switch (channel) {
+ case DebugChannel::Function:
+ return jit::JitOptions.enableWasmFuncCallSpew;
+ case DebugChannel::Import:
+ return jit::JitOptions.enableWasmImportCallSpew;
+ }
+ return false;
+}
+#endif
+
+void wasm::DebugCodegen(DebugChannel channel, const char* fmt, ...) {
+#ifdef WASM_CODEGEN_DEBUG
+ if (!IsCodegenDebugEnabled(channel)) {
+ return;
+ }
+ va_list ap;
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+#endif
+}
diff --git a/js/src/wasm/WasmLog.h b/js/src/wasm/WasmLog.h
new file mode 100644
index 0000000000..7ee5a5fc20
--- /dev/null
+++ b/js/src/wasm/WasmLog.h
@@ -0,0 +1,51 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_log_h
+#define wasm_log_h
+
+#include "js/TypeDecls.h"
+
+namespace js {
+namespace wasm {
+
+// Verbose logging support.
+
+extern void Log(JSContext* cx, const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3);
+extern void LogOffThread(const char* fmt, ...) MOZ_FORMAT_PRINTF(1, 2);
+
+// Codegen debug support.
+
+enum class DebugChannel {
+ Function,
+ Import,
+};
+
+#ifdef WASM_CODEGEN_DEBUG
+bool IsCodegenDebugEnabled(DebugChannel channel);
+#endif
+
+void DebugCodegen(DebugChannel channel, const char* fmt, ...)
+ MOZ_FORMAT_PRINTF(2, 3);
+
+using PrintCallback = void (*)(const char*);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_log_h
diff --git a/js/src/wasm/WasmMemory.cpp b/js/src/wasm/WasmMemory.cpp
new file mode 100644
index 0000000000..b5566921e0
--- /dev/null
+++ b/js/src/wasm/WasmMemory.cpp
@@ -0,0 +1,385 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmMemory.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "js/Conversions.h"
+#include "js/ErrorReport.h"
+#include "vm/ArrayBufferObject.h"
+#include "wasm/WasmCodegenTypes.h"
+#include "wasm/WasmProcess.h"
+
+using mozilla::IsPowerOfTwo;
+
+using namespace js;
+using namespace js::wasm;
+
+const char* wasm::ToString(IndexType indexType) {
+ switch (indexType) {
+ case IndexType::I32:
+ return "i32";
+ case IndexType::I64:
+ return "i64";
+ default:
+ MOZ_CRASH();
+ }
+}
+
+bool wasm::ToIndexType(JSContext* cx, HandleValue value, IndexType* indexType) {
+ RootedString typeStr(cx, ToString(cx, value));
+ if (!typeStr) {
+ return false;
+ }
+
+ Rooted<JSLinearString*> typeLinearStr(cx, typeStr->ensureLinear(cx));
+ if (!typeLinearStr) {
+ return false;
+ }
+
+ if (StringEqualsLiteral(typeLinearStr, "i32")) {
+ *indexType = IndexType::I32;
+ } else if (StringEqualsLiteral(typeLinearStr, "i64")) {
+ *indexType = IndexType::I64;
+ } else {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_STRING_IDX_TYPE);
+ return false;
+ }
+ return true;
+}
+
+/*
+ * [SMDOC] Linear memory addresses and bounds checking
+ *
+ * (Also see "WASM Linear Memory structure" in vm/ArrayBufferObject.cpp)
+ *
+ *
+ * Memory addresses, bounds check avoidance, and the huge memory trick.
+ *
+ * A memory address in an access instruction has three components, the "memory
+ * base", the "pointer", and the "offset". The "memory base" - the HeapReg on
+ * most platforms and a value loaded from the instance on x86 - is a native
+ * pointer that points to the start of the linear memory array; we'll ignore the
+ * memory base in the following. The "pointer" is the i32 or i64 index supplied
+ * by the program as a separate value argument to the access instruction; it is
+ * usually variable but can be constant. The "offset" is a constant encoded in
+ * the access instruction.
+ *
+ * The "effective address" (EA) is the non-overflowed sum of the pointer and the
+ * offset (if the sum overflows the program traps); the pointer, offset, and EA
+ * all have the same type, i32 or i64.
+ *
+ * An access has an "access size", which is the number of bytes that are
+ * accessed - currently up to 16 (for V128). The highest-addressed byte to be
+ * accessed by an access is thus the byte at (pointer+offset+access_size-1),
+ * where offset+access_size-1 is compile-time evaluable.
+ *
+ * Bounds checking ensures that the entire access is in bounds, ie, that the
+ * highest-addressed byte is at an offset in the linear memory below that of the
+ * memory's current byteLength.
+ *
+ * To avoid performing an addition with overflow check and a compare-and-branch
+ * bounds check for every memory access, we use some tricks:
+ *
+ * - An access-protected guard region of size R at the end of each memory is
+ * used to trap accesses to out-of-bounds offsets in the range
+ * 0..R-access_size. Thus the offset and the access size need not be added
+ * into the pointer before the bounds check, saving the add and overflow
+ * check. The offset is added into the pointer without an overflow check
+ * either directly before the access or in the access instruction itself
+ * (depending on the ISA). The pointer must still be explicitly
+ * bounds-checked.
+ *
+ * - On 64-bit systems where we determine there is plenty of virtual memory
+ * space (and ideally we determine that the VM system uses overcommit), a
+ * 32-bit memory is implemented as a 4GB + R reservation, where the memory
+ * from the current heap length through the end of the reservation is
+ * access-protected. The protected area R allows offsets up to R-access_size
+ * to be encoded in the access instruction. The pointer need not be bounds
+ * checked explicitly, since it has only a 4GB range and thus points into the
+ * 4GB part of the reservation. The offset can be added into the pointer
+ * (using 64-bit arithmetic) either directly before the access or in the
+ * access instruction.
+ *
+ * The value of R differs in the two situations; in the first case it tends to
+ * be small, currently 64KB; in the second case it is large, currently 2GB+64KB.
+ * The difference is due to explicit bounds checking tending to be used on
+ * 32-bit systems where memory and address space are scarce, while the implicit
+ * bounds check is used only on 64-bit systems after ensuring that sufficient
+ * address space is available in the process. (2GB is really overkill, and
+ * there's nothing magic about it; we could use something much smaller.)
+ *
+ * The implicit bounds checking strategy with the large reservation is known
+ * below and elsewhere as the "huge memory trick" or just "huge memory".
+ *
+ * All memories in a process use the same strategy, selected at process startup.
+ * The immediate reason for that is that the machine code embeds the strategy
+ * it's been compiled with, and may later be exposed to memories originating
+ * from different modules or directly from JS. If the memories did not all use
+ * the same strategy, and the same strategy as the code, linking would fail or
+ * we would have to recompile the code.
+ *
+ *
+ * The boundsCheckLimit.
+ *
+ * The bounds check limit that is stored in the instance is always valid and is
+ * always a 64-bit datum, and it is always correct to load it and use it as a
+ * 64-bit value. However, in situations when the 32 upper bits are known to be
+ * zero, it is also correct to load just the low 32 bits from the address of the
+ * limit (which is always little-endian when a JIT is enabled), and use that
+ * value as the limit.
+ *
+ * On x86 and arm32 (and on any other 32-bit platform, should there ever be
+ * one), there is explicit bounds checking and the heap, whether memory32 or
+ * memory64, is limited to 2GB; the bounds check limit can be treated as a
+ * 32-bit quantity.
+ *
+ * On all 64-bit platforms, we may use explicit bounds checking or the huge
+ * memory trick for memory32, but must always use explicit bounds checking for
+ * memory64. If the heap does not have a known maximum size or the known
+ * maximum is greater than or equal to 4GB, then the bounds check limit must be
+ * treated as a 64-bit quantity; otherwise it can be treated as a 32-bit
+ * quantity.
+ *
+ * On x64 and arm64 with Baseline and Ion, we allow 32-bit memories up to 4GB,
+ * and 64-bit memories can be larger.
+ *
+ * On mips64, memories are limited to 2GB, for now.
+ *
+ * Asm.js memories are limited to 2GB even on 64-bit platforms, and we can
+ * always assume a 32-bit bounds check limit for asm.js.
+ *
+ *
+ * Constant pointers.
+ *
+ * If the pointer is constant then the EA can be computed at compile time, and
+ * if the EA is below the initial memory size then the bounds check can be
+ * elided.
+ *
+ *
+ * Alignment checks.
+ *
+ * On all platforms, some accesses (currently atomics) require an alignment
+ * check: the EA must be naturally aligned for the datum being accessed.
+ * However, we do not need to compute the EA properly, we care only about the
+ * low bits - a cheap, overflowing add is fine, and if the offset is known
+ * to be aligned, only the pointer need be checked.
+ */
+
+// Bounds checks always compare the base of the memory access with the bounds
+// check limit. If the memory access is unaligned, this means that, even if the
+// bounds check succeeds, a few bytes of the access can extend past the end of
+// memory. To guard against this, extra space is included in the guard region to
+// catch the overflow. MaxMemoryAccessSize is a conservative approximation of
+// the maximum guard space needed to catch all unaligned overflows.
+//
+// Also see "Linear memory addresses and bounds checking" above.
+
+static const unsigned MaxMemoryAccessSize = LitVal::sizeofLargestValue();
+
+// All plausible targets must be able to do at least IEEE754 double
+// loads/stores, hence the lower limit of 8. Some Intel processors support
+// AVX-512 loads/stores, hence the upper limit of 64.
+static_assert(MaxMemoryAccessSize >= 8, "MaxMemoryAccessSize too low");
+static_assert(MaxMemoryAccessSize <= 64, "MaxMemoryAccessSize too high");
+static_assert((MaxMemoryAccessSize & (MaxMemoryAccessSize - 1)) == 0,
+ "MaxMemoryAccessSize is not a power of two");
+
+#ifdef WASM_SUPPORTS_HUGE_MEMORY
+
+static_assert(MaxMemoryAccessSize <= HugeUnalignedGuardPage,
+ "rounded up to static page size");
+static_assert(HugeOffsetGuardLimit < UINT32_MAX,
+ "checking for overflow against OffsetGuardLimit is enough.");
+
+// We have only tested huge memory on x64 and arm64.
+# if !(defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64))
+# error "Not an expected configuration"
+# endif
+
+#endif
+
+// On !WASM_SUPPORTS_HUGE_MEMORY platforms:
+// - To avoid OOM in ArrayBuffer::prepareForAsmJS, asm.js continues to use the
+// original ArrayBuffer allocation which has no guard region at all.
+// - For WebAssembly memories, an additional GuardSize is mapped after the
+// accessible region of the memory to catch folded (base+offset) accesses
+// where `offset < OffsetGuardLimit` as well as the overflow from unaligned
+// accesses, as described above for MaxMemoryAccessSize.
+
+static const size_t OffsetGuardLimit = PageSize - MaxMemoryAccessSize;
+
+static_assert(MaxMemoryAccessSize < GuardSize,
+ "Guard page handles partial out-of-bounds");
+static_assert(OffsetGuardLimit < UINT32_MAX,
+ "checking for overflow against OffsetGuardLimit is enough.");
+
+size_t wasm::GetMaxOffsetGuardLimit(bool hugeMemory) {
+#ifdef WASM_SUPPORTS_HUGE_MEMORY
+ return hugeMemory ? HugeOffsetGuardLimit : OffsetGuardLimit;
+#else
+ return OffsetGuardLimit;
+#endif
+}
+
+// Assert that our minimum offset guard limit covers our inline
+// memory.copy/fill optimizations.
+static const size_t MinOffsetGuardLimit = OffsetGuardLimit;
+static_assert(MaxInlineMemoryCopyLength < MinOffsetGuardLimit, "precondition");
+static_assert(MaxInlineMemoryFillLength < MinOffsetGuardLimit, "precondition");
+
+#ifdef JS_64BIT
+wasm::Pages wasm::MaxMemoryPages(IndexType t) {
+ MOZ_ASSERT_IF(t == IndexType::I64, !IsHugeMemoryEnabled(t));
+ size_t desired = MaxMemoryLimitField(t);
+ constexpr size_t actual = ArrayBufferObject::MaxByteLength / PageSize;
+ return wasm::Pages(std::min(desired, actual));
+}
+
+size_t wasm::MaxMemoryBoundsCheckLimit(IndexType t) {
+ return MaxMemoryPages(t).byteLength();
+}
+
+#else
+// On 32-bit systems, the heap limit must be representable in the nonnegative
+// range of an int32_t, which means the maximum heap size as observed by wasm
+// code is one wasm page less than 2GB.
+wasm::Pages wasm::MaxMemoryPages(IndexType t) {
+ static_assert(ArrayBufferObject::MaxByteLength >= INT32_MAX / PageSize);
+ return wasm::Pages(INT32_MAX / PageSize);
+}
+
+// The max bounds check limit can be larger than the MaxMemoryPages because it
+// is really MaxMemoryPages rounded up to the next valid bounds check immediate,
+// see ComputeMappedSize().
+size_t wasm::MaxMemoryBoundsCheckLimit(IndexType t) {
+ size_t boundsCheckLimit = size_t(INT32_MAX) + 1;
+ MOZ_ASSERT(IsValidBoundsCheckImmediate(boundsCheckLimit));
+ return boundsCheckLimit;
+}
+#endif
+
+// Because ARM has a fixed-width instruction encoding, ARM can only express a
+// limited subset of immediates (in a single instruction).
+
+static const uint64_t HighestValidARMImmediate = 0xff000000;
+
+// Heap length on ARM should fit in an ARM immediate. We approximate the set
+// of valid ARM immediates with the predicate:
+// 2^n for n in [16, 24)
+// or
+// 2^24 * n for n >= 1.
+bool wasm::IsValidARMImmediate(uint32_t i) {
+ bool valid = (IsPowerOfTwo(i) || (i & 0x00ffffff) == 0);
+
+ MOZ_ASSERT_IF(valid, i % PageSize == 0);
+
+ return valid;
+}
+
+uint64_t wasm::RoundUpToNextValidARMImmediate(uint64_t i) {
+ MOZ_ASSERT(i <= HighestValidARMImmediate);
+ static_assert(HighestValidARMImmediate == 0xff000000,
+ "algorithm relies on specific constant");
+
+ if (i <= 16 * 1024 * 1024) {
+ i = i ? mozilla::RoundUpPow2(i) : 0;
+ } else {
+ i = (i + 0x00ffffff) & ~0x00ffffff;
+ }
+
+ MOZ_ASSERT(IsValidARMImmediate(i));
+
+ return i;
+}
+
+Pages wasm::ClampedMaxPages(IndexType t, Pages initialPages,
+ const Maybe<Pages>& sourceMaxPages,
+ bool useHugeMemory) {
+ Pages clampedMaxPages;
+
+ if (sourceMaxPages.isSome()) {
+ // There is a specified maximum, clamp it to the implementation limit of
+ // maximum pages
+ clampedMaxPages = std::min(*sourceMaxPages, wasm::MaxMemoryPages(t));
+
+#ifndef JS_64BIT
+ static_assert(sizeof(uintptr_t) == 4, "assuming not 64 bit implies 32 bit");
+
+ // On 32-bit platforms, prevent applications specifying a large max (like
+ // MaxMemoryPages()) from unintentially OOMing the browser: they just want
+ // "a lot of memory". Maintain the invariant that initialPages <=
+ // clampedMaxPages.
+ static const uint64_t OneGib = 1 << 30;
+ static const Pages OneGibPages = Pages(OneGib >> wasm::PageBits);
+ static_assert(HighestValidARMImmediate > OneGib,
+ "computing mapped size on ARM requires clamped max size");
+
+ Pages clampedPages = std::max(OneGibPages, initialPages);
+ clampedMaxPages = std::min(clampedPages, clampedMaxPages);
+#endif
+ } else {
+ // There is not a specified maximum, fill it in with the implementation
+ // limit of maximum pages
+ clampedMaxPages = wasm::MaxMemoryPages(t);
+ }
+
+ // Double-check our invariants
+ MOZ_RELEASE_ASSERT(sourceMaxPages.isNothing() ||
+ clampedMaxPages <= *sourceMaxPages);
+ MOZ_RELEASE_ASSERT(clampedMaxPages <= wasm::MaxMemoryPages(t));
+ MOZ_RELEASE_ASSERT(initialPages <= clampedMaxPages);
+
+ return clampedMaxPages;
+}
+
+size_t wasm::ComputeMappedSize(wasm::Pages clampedMaxPages) {
+ // Caller is responsible to ensure that clampedMaxPages has been clamped to
+ // implementation limits.
+ size_t maxSize = clampedMaxPages.byteLength();
+
+ // It is the bounds-check limit, not the mapped size, that gets baked into
+ // code. Thus round up the maxSize to the next valid immediate value
+ // *before* adding in the guard page.
+ //
+ // Also see "Wasm Linear Memory Structure" in vm/ArrayBufferObject.cpp.
+ uint64_t boundsCheckLimit = RoundUpToNextValidBoundsCheckImmediate(maxSize);
+ MOZ_ASSERT(IsValidBoundsCheckImmediate(boundsCheckLimit));
+
+ MOZ_ASSERT(boundsCheckLimit % gc::SystemPageSize() == 0);
+ MOZ_ASSERT(GuardSize % gc::SystemPageSize() == 0);
+ return boundsCheckLimit + GuardSize;
+}
+
+bool wasm::IsValidBoundsCheckImmediate(uint32_t i) {
+#ifdef JS_CODEGEN_ARM
+ return IsValidARMImmediate(i);
+#else
+ return true;
+#endif
+}
+
+uint64_t wasm::RoundUpToNextValidBoundsCheckImmediate(uint64_t i) {
+#ifdef JS_CODEGEN_ARM
+ return RoundUpToNextValidARMImmediate(i);
+#else
+ return i;
+#endif
+}
diff --git a/js/src/wasm/WasmMemory.h b/js/src/wasm/WasmMemory.h
new file mode 100644
index 0000000000..ea2c61aa38
--- /dev/null
+++ b/js/src/wasm/WasmMemory.h
@@ -0,0 +1,226 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_memory_h
+#define wasm_memory_h
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/Maybe.h"
+
+#include <stdint.h>
+
+#include "js/Value.h"
+#include "vm/NativeObject.h"
+#include "wasm/WasmConstants.h"
+#include "wasm/WasmValType.h"
+
+namespace js {
+namespace wasm {
+
+// Limits are parameterized by an IndexType which is used to index the
+// underlying resource (either a Memory or a Table). Tables are restricted to
+// I32, while memories may use I64 when memory64 is enabled.
+
+enum class IndexType : uint8_t { I32, I64 };
+
+inline ValType ToValType(IndexType it) {
+ return it == IndexType::I64 ? ValType::I64 : ValType::I32;
+}
+
+extern bool ToIndexType(JSContext* cx, HandleValue value, IndexType* indexType);
+
+extern const char* ToString(IndexType indexType);
+
+// Pages is a typed unit representing a multiple of wasm::PageSize. We
+// generally use pages as the unit of length when representing linear memory
+// lengths so as to avoid overflow when the specified initial or maximum pages
+// would overflow the native word size.
+//
+// Modules may specify pages up to 2^48 inclusive and so Pages is 64-bit on all
+// platforms.
+//
+// We represent byte lengths using the native word size, as it is assumed that
+// consumers of this API will only need byte lengths once it is time to
+// allocate memory, at which point the pages will be checked against the
+// implementation limits `MaxMemoryPages()` and will then be guaranteed to
+// fit in a native word.
+struct Pages {
+ private:
+ // Pages are specified by limit fields, which in general may be up to 2^48,
+ // so we must use uint64_t here.
+ uint64_t value_;
+
+ public:
+ constexpr Pages() : value_(0) {}
+ constexpr explicit Pages(uint64_t value) : value_(value) {}
+
+ // Get the wrapped page value. Only use this if you must, prefer to use or
+ // add new APIs to Page.
+ uint64_t value() const { return value_; }
+
+ // Converts from a byte length to pages, assuming that the length is an
+ // exact multiple of the page size.
+ static Pages fromByteLengthExact(size_t byteLength) {
+ MOZ_ASSERT(byteLength % PageSize == 0);
+ return Pages(byteLength / PageSize);
+ }
+
+ // Return whether the page length may overflow when converted to a byte
+ // length in the native word size.
+ bool hasByteLength() const {
+ mozilla::CheckedInt<size_t> length(value_);
+ length *= PageSize;
+ return length.isValid();
+ }
+
+ // Converts from pages to byte length in the native word size. Users must
+ // check for overflow, or be assured else-how that overflow cannot happen.
+ size_t byteLength() const {
+ mozilla::CheckedInt<size_t> length(value_);
+ length *= PageSize;
+ return length.value();
+ }
+
+ // Increment this pages by delta and return whether the resulting value
+ // did not overflow. If there is no overflow, then this is set to the
+ // resulting value.
+ bool checkedIncrement(Pages delta) {
+ mozilla::CheckedInt<uint64_t> newValue = value_;
+ newValue += delta.value_;
+ if (!newValue.isValid()) {
+ return false;
+ }
+ value_ = newValue.value();
+ return true;
+ }
+
+ // Implement pass-through comparison operators so that Pages can be compared.
+
+ bool operator==(Pages other) const { return value_ == other.value_; }
+ bool operator!=(Pages other) const { return value_ != other.value_; }
+ bool operator<=(Pages other) const { return value_ <= other.value_; }
+ bool operator<(Pages other) const { return value_ < other.value_; }
+ bool operator>=(Pages other) const { return value_ >= other.value_; }
+ bool operator>(Pages other) const { return value_ > other.value_; }
+};
+
+// The largest number of pages the application can request.
+extern Pages MaxMemoryPages(IndexType t);
+
+// The byte value of MaxMemoryPages(t).
+static inline size_t MaxMemoryBytes(IndexType t) {
+ return MaxMemoryPages(t).byteLength();
+}
+
+// A value at least as large as MaxMemoryBytes(t) representing the largest valid
+// bounds check limit on the system. (It can be larger than MaxMemoryBytes()
+// because bounds check limits are rounded up to fit formal requirements on some
+// platforms. Also see ComputeMappedSize().)
+extern size_t MaxMemoryBoundsCheckLimit(IndexType t);
+
+static inline uint64_t MaxMemoryLimitField(IndexType indexType) {
+ return indexType == IndexType::I32 ? MaxMemory32LimitField
+ : MaxMemory64LimitField;
+}
+
+// Compute the 'clamped' maximum size of a memory. See
+// 'WASM Linear Memory structure' in ArrayBufferObject.cpp for background.
+extern Pages ClampedMaxPages(IndexType t, Pages initialPages,
+ const mozilla::Maybe<Pages>& sourceMaxPages,
+ bool useHugeMemory);
+
+// For a given WebAssembly/asm.js 'clamped' max pages, return the number of
+// bytes to map which will necessarily be a multiple of the system page size and
+// greater than clampedMaxPages in bytes. See "Wasm Linear Memory Structure" in
+// vm/ArrayBufferObject.cpp.
+extern size_t ComputeMappedSize(Pages clampedMaxPages);
+
+extern size_t GetMaxOffsetGuardLimit(bool hugeMemory);
+
+// Return whether the given immediate satisfies the constraints of the platform.
+extern bool IsValidBoundsCheckImmediate(uint32_t i);
+
+// Return whether the given immediate is valid on arm.
+extern bool IsValidARMImmediate(uint32_t i);
+
+// Return the next higher valid immediate that satisfies the constraints of the
+// platform.
+extern uint64_t RoundUpToNextValidBoundsCheckImmediate(uint64_t i);
+
+// Return the next higher valid immediate for arm.
+extern uint64_t RoundUpToNextValidARMImmediate(uint64_t i);
+
+#ifdef WASM_SUPPORTS_HUGE_MEMORY
+// On WASM_SUPPORTS_HUGE_MEMORY platforms, every asm.js or WebAssembly 32-bit
+// memory unconditionally allocates a huge region of virtual memory of size
+// wasm::HugeMappedSize. This allows all memory resizing to work without
+// reallocation and provides enough guard space for most offsets to be folded
+// into memory accesses. See "Linear memory addresses and bounds checking" in
+// wasm/WasmMemory.cpp for more information.
+
+// Reserve 4GiB to support any i32 index.
+static const uint64_t HugeIndexRange = uint64_t(UINT32_MAX) + 1;
+// Reserve 32MiB to support most offset immediates. Any immediate that is over
+// this will require a bounds check to be emitted. 32MiB was chosen to
+// generously cover the max offset immediate, 20MiB, found in a corpus of wasm
+// modules.
+static const uint64_t HugeOffsetGuardLimit = 1 << 25;
+// Reserve a wasm page (64KiB) to support slop on unaligned accesses.
+static const uint64_t HugeUnalignedGuardPage = PageSize;
+
+// Compute the total memory reservation.
+static const uint64_t HugeMappedSize =
+ HugeIndexRange + HugeOffsetGuardLimit + HugeUnalignedGuardPage;
+
+// Try to keep the memory reservation aligned to the wasm page size. This
+// ensures that it's aligned to the system page size.
+static_assert(HugeMappedSize % PageSize == 0);
+
+#endif
+
+// The size of the guard page for non huge-memories.
+static const size_t GuardSize = PageSize;
+
+// The size of the guard page that included NULL pointer. Reserve a smallest
+// range for typical hardware, to catch near NULL pointer accesses, e.g.
+// for a structure fields operations.
+static const size_t NullPtrGuardSize = 4096;
+
+// Check if a range of wasm memory is within bounds, specified as byte offset
+// and length (using 32-bit indices). Omits one check by converting from
+// uint32_t to uint64_t, at which point overflow cannot occur.
+static inline bool MemoryBoundsCheck(uint32_t offset, uint32_t len,
+ size_t memLen) {
+ uint64_t offsetLimit = uint64_t(offset) + uint64_t(len);
+ return offsetLimit <= memLen;
+}
+
+// Check if a range of wasm memory is within bounds, specified as byte offset
+// and length (using 64-bit indices).
+static inline bool MemoryBoundsCheck(uint64_t offset, uint64_t len,
+ size_t memLen) {
+ uint64_t offsetLimit = offset + len;
+ bool didOverflow = offsetLimit < offset;
+ bool tooLong = memLen < offsetLimit;
+ return !didOverflow && !tooLong;
+}
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_memory_h
diff --git a/js/src/wasm/WasmModule.cpp b/js/src/wasm/WasmModule.cpp
new file mode 100644
index 0000000000..b30c810e0f
--- /dev/null
+++ b/js/src/wasm/WasmModule.cpp
@@ -0,0 +1,1134 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmModule.h"
+
+#include <chrono>
+
+#include "jit/FlushICache.h" // for FlushExecutionContextForAllThreads
+#include "js/BuildId.h" // JS::BuildIdCharVector
+#include "js/experimental/TypedData.h" // JS_NewUint8Array
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/Printf.h" // JS_smprintf
+#include "js/PropertyAndElement.h" // JS_DefineProperty, JS_DefinePropertyById
+#include "js/StreamConsumer.h"
+#include "threading/LockGuard.h"
+#include "threading/Thread.h"
+#include "vm/HelperThreadState.h" // Tier2GeneratorTask
+#include "vm/PlainObject.h" // js::PlainObject
+#include "wasm/WasmBaselineCompile.h"
+#include "wasm/WasmCompile.h"
+#include "wasm/WasmDebug.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmIonCompile.h"
+#include "wasm/WasmJS.h"
+#include "wasm/WasmSerialize.h"
+#include "wasm/WasmUtility.h"
+
+#include "debugger/DebugAPI-inl.h"
+#include "vm/ArrayBufferObject-inl.h"
+#include "vm/JSAtom-inl.h"
+#include "wasm/WasmInstance-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+static UniqueChars Tier2ResultsContext(const ScriptedCaller& scriptedCaller) {
+ return scriptedCaller.filename
+ ? JS_smprintf("%s:%d", scriptedCaller.filename.get(),
+ scriptedCaller.line)
+ : UniqueChars();
+}
+
+static void ReportTier2ResultsOffThread(bool success,
+ const ScriptedCaller& scriptedCaller,
+ const UniqueChars& error,
+ const UniqueCharsVector& warnings) {
+ // Get context to describe this tier-2 task.
+ UniqueChars context = Tier2ResultsContext(scriptedCaller);
+ const char* contextString = context ? context.get() : "unknown";
+
+ // Display the main error, if any.
+ if (!success) {
+ const char* errorString = error ? error.get() : "out of memory";
+ LogOffThread("'%s': wasm tier-2 failed with '%s'.\n", contextString,
+ errorString);
+ }
+
+ // Display warnings as a follow-up, avoiding spamming the console.
+ size_t numWarnings = std::min<size_t>(warnings.length(), 3);
+
+ for (size_t i = 0; i < numWarnings; i++) {
+ LogOffThread("'%s': wasm tier-2 warning: '%s'.\n'.", contextString,
+ warnings[i].get());
+ }
+ if (warnings.length() > numWarnings) {
+ LogOffThread("'%s': other warnings suppressed.\n", contextString);
+ }
+}
+
+class Module::Tier2GeneratorTaskImpl : public Tier2GeneratorTask {
+ SharedCompileArgs compileArgs_;
+ SharedBytes bytecode_;
+ SharedModule module_;
+ Atomic<bool> cancelled_;
+
+ public:
+ Tier2GeneratorTaskImpl(const CompileArgs& compileArgs,
+ const ShareableBytes& bytecode, Module& module)
+ : compileArgs_(&compileArgs),
+ bytecode_(&bytecode),
+ module_(&module),
+ cancelled_(false) {}
+
+ ~Tier2GeneratorTaskImpl() override {
+ module_->tier2Listener_ = nullptr;
+ module_->testingTier2Active_ = false;
+ }
+
+ void cancel() override { cancelled_ = true; }
+
+ void runHelperThreadTask(AutoLockHelperThreadState& locked) override {
+ {
+ AutoUnlockHelperThreadState unlock(locked);
+
+ // Compile tier-2 and report any warning/errors as long as it's not a
+ // cancellation. Encountering a warning/error during compilation and
+ // being cancelled may race with each other, but the only observable race
+ // should be being cancelled after a warning/error is set, and that's
+ // okay.
+ UniqueChars error;
+ UniqueCharsVector warnings;
+ bool success = CompileTier2(*compileArgs_, bytecode_->bytes, *module_,
+ &error, &warnings, &cancelled_);
+ if (!cancelled_) {
+ // We could try to dispatch a runnable to the thread that started this
+ // compilation, so as to report the warning/error using a JSContext*.
+ // For now we just report to stderr.
+ ReportTier2ResultsOffThread(success, compileArgs_->scriptedCaller,
+ error, warnings);
+ }
+ }
+
+ // During shutdown the main thread will wait for any ongoing (cancelled)
+ // tier-2 generation to shut down normally. To do so, it waits on the
+ // HelperThreadState's condition variable for the count of finished
+ // generators to rise.
+ HelperThreadState().incWasmTier2GeneratorsFinished(locked);
+
+ // The task is finished, release it.
+ js_delete(this);
+ }
+
+ ThreadType threadType() override {
+ return ThreadType::THREAD_TYPE_WASM_GENERATOR_TIER2;
+ }
+};
+
+Module::~Module() {
+ // Note: Modules can be destroyed on any thread.
+ MOZ_ASSERT(!tier2Listener_);
+ MOZ_ASSERT(!testingTier2Active_);
+}
+
+void Module::startTier2(const CompileArgs& args, const ShareableBytes& bytecode,
+ JS::OptimizedEncodingListener* listener) {
+ MOZ_ASSERT(!testingTier2Active_);
+
+ auto task = MakeUnique<Tier2GeneratorTaskImpl>(args, bytecode, *this);
+ if (!task) {
+ return;
+ }
+
+ // These will be cleared asynchronously by ~Tier2GeneratorTaskImpl() if not
+ // sooner by finishTier2().
+ tier2Listener_ = listener;
+ testingTier2Active_ = true;
+
+ StartOffThreadWasmTier2Generator(std::move(task));
+}
+
+bool Module::finishTier2(const LinkData& linkData2,
+ UniqueCodeTier code2) const {
+ MOZ_ASSERT(code().bestTier() == Tier::Baseline &&
+ code2->tier() == Tier::Optimized);
+
+ // Install the data in the data structures. They will not be visible
+ // until commitTier2().
+
+ const CodeTier* borrowedTier2;
+ if (!code().setAndBorrowTier2(std::move(code2), linkData2, &borrowedTier2)) {
+ return false;
+ }
+
+ // Before we can make tier-2 live, we need to compile tier2 versions of any
+ // extant tier1 lazy stubs (otherwise, tiering would break the assumption
+ // that any extant exported wasm function has had a lazy entry stub already
+ // compiled for it).
+ //
+ // Also see doc block for stubs in WasmJS.cpp.
+ {
+ // We need to prevent new tier1 stubs generation until we've committed
+ // the newer tier2 stubs, otherwise we might not generate one tier2
+ // stub that has been generated for tier1 before we committed.
+
+ const MetadataTier& metadataTier1 = metadata(Tier::Baseline);
+
+ auto stubs1 = code().codeTier(Tier::Baseline).lazyStubs().readLock();
+ auto stubs2 = borrowedTier2->lazyStubs().writeLock();
+
+ MOZ_ASSERT(stubs2->entryStubsEmpty());
+
+ Uint32Vector funcExportIndices;
+ for (size_t i = 0; i < metadataTier1.funcExports.length(); i++) {
+ const FuncExport& fe = metadataTier1.funcExports[i];
+ if (fe.hasEagerStubs()) {
+ continue;
+ }
+ if (!stubs1->hasEntryStub(fe.funcIndex())) {
+ continue;
+ }
+ if (!funcExportIndices.emplaceBack(i)) {
+ return false;
+ }
+ }
+
+ Maybe<size_t> stub2Index;
+ if (!stubs2->createTier2(funcExportIndices, metadata(), *borrowedTier2,
+ &stub2Index)) {
+ return false;
+ }
+
+ // Initializing the code above will have flushed the icache for all cores.
+ // However, there could still be stale data in the execution pipeline of
+ // other cores on some platforms. Force an execution context flush on all
+ // threads to fix this before we commit the code.
+ //
+ // This is safe due to the check in `PlatformCanTier` in WasmCompile.cpp
+ jit::FlushExecutionContextForAllThreads();
+
+ // Now that we can't fail or otherwise abort tier2, make it live.
+
+ MOZ_ASSERT(!code().hasTier2());
+ code().commitTier2();
+
+ stubs2->setJitEntries(stub2Index, code());
+ }
+
+ // And we update the jump vectors with pointers to tier-2 functions and eager
+ // stubs. Callers will continue to invoke tier-1 code until, suddenly, they
+ // will invoke tier-2 code. This is benign.
+
+ uint8_t* base = code().segment(Tier::Optimized).base();
+ for (const CodeRange& cr : metadata(Tier::Optimized).codeRanges) {
+ // These are racy writes that we just want to be visible, atomically,
+ // eventually. All hardware we care about will do this right. But
+ // we depend on the compiler not splitting the stores hidden inside the
+ // set*Entry functions.
+ if (cr.isFunction()) {
+ code().setTieringEntry(cr.funcIndex(), base + cr.funcTierEntry());
+ } else if (cr.isJitEntry()) {
+ code().setJitEntry(cr.funcIndex(), base + cr.begin());
+ }
+ }
+
+ // Tier-2 is done; let everyone know. Mark tier-2 active for testing
+ // purposes so that wasmHasTier2CompilationCompleted() only returns true
+ // after tier-2 has been fully cached.
+
+ if (tier2Listener_) {
+ Bytes bytes;
+ if (serialize(linkData2, &bytes)) {
+ tier2Listener_->storeOptimizedEncoding(bytes.begin(), bytes.length());
+ }
+ tier2Listener_ = nullptr;
+ }
+ testingTier2Active_ = false;
+
+ return true;
+}
+
+void Module::testingBlockOnTier2Complete() const {
+ while (testingTier2Active_) {
+ ThisThread::SleepMilliseconds(1);
+ }
+}
+
+/* virtual */
+JSObject* Module::createObject(JSContext* cx) const {
+ if (!GlobalObject::ensureConstructor(cx, cx->global(), JSProto_WebAssembly)) {
+ return nullptr;
+ }
+
+ if (!cx->isRuntimeCodeGenEnabled(JS::RuntimeCode::WASM, nullptr)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_CSP_BLOCKED_WASM, "WebAssembly.Module");
+ return nullptr;
+ }
+
+ RootedObject proto(cx, &cx->global()->getPrototype(JSProto_WasmModule));
+ return WasmModuleObject::create(cx, *this, proto);
+}
+
+/* virtual */
+JSObject* Module::createObjectForAsmJS(JSContext* cx) const {
+ // Use nullptr to get the default object prototype. These objects are never
+ // exposed to script for asm.js.
+ return WasmModuleObject::create(cx, *this, nullptr);
+}
+
+bool wasm::GetOptimizedEncodingBuildId(JS::BuildIdCharVector* buildId) {
+ // From a JS API perspective, the "build id" covers everything that can
+ // cause machine code to become invalid, so include both the actual build-id
+ // and cpu-id.
+
+ if (!GetBuildId || !GetBuildId(buildId)) {
+ return false;
+ }
+
+ uint32_t cpu = ObservedCPUFeatures();
+
+ if (!buildId->reserve(buildId->length() +
+ 13 /* "()" + 8 nibbles + "m[+-][+-]" */)) {
+ return false;
+ }
+
+ buildId->infallibleAppend('(');
+ while (cpu) {
+ buildId->infallibleAppend('0' + (cpu & 0xf));
+ cpu >>= 4;
+ }
+ buildId->infallibleAppend(')');
+
+ buildId->infallibleAppend('m');
+ buildId->infallibleAppend(wasm::IsHugeMemoryEnabled(IndexType::I32) ? '+'
+ : '-');
+ buildId->infallibleAppend(wasm::IsHugeMemoryEnabled(IndexType::I64) ? '+'
+ : '-');
+
+ return true;
+}
+
+/* virtual */
+void Module::addSizeOfMisc(MallocSizeOf mallocSizeOf,
+ Metadata::SeenSet* seenMetadata,
+ Code::SeenSet* seenCode, size_t* code,
+ size_t* data) const {
+ code_->addSizeOfMiscIfNotSeen(mallocSizeOf, seenMetadata, seenCode, code,
+ data);
+ *data += mallocSizeOf(this) +
+ SizeOfVectorExcludingThis(imports_, mallocSizeOf) +
+ SizeOfVectorExcludingThis(exports_, mallocSizeOf) +
+ SizeOfVectorExcludingThis(dataSegments_, mallocSizeOf) +
+ SizeOfVectorExcludingThis(elemSegments_, mallocSizeOf) +
+ SizeOfVectorExcludingThis(customSections_, mallocSizeOf);
+}
+
+// Extracting machine code as JS object. The result has the "code" property, as
+// a Uint8Array, and the "segments" property as array objects. The objects
+// contain offsets in the "code" array and basic information about a code
+// segment/function body.
+bool Module::extractCode(JSContext* cx, Tier tier,
+ MutableHandleValue vp) const {
+ Rooted<PlainObject*> result(cx, NewPlainObject(cx));
+ if (!result) {
+ return false;
+ }
+
+ // This function is only used for testing purposes so we can simply
+ // block on tiered compilation to complete.
+ testingBlockOnTier2Complete();
+
+ if (!code_->hasTier(tier)) {
+ vp.setNull();
+ return true;
+ }
+
+ const ModuleSegment& moduleSegment = code_->segment(tier);
+ RootedObject code(cx, JS_NewUint8Array(cx, moduleSegment.length()));
+ if (!code) {
+ return false;
+ }
+
+ memcpy(code->as<TypedArrayObject>().dataPointerUnshared(),
+ moduleSegment.base(), moduleSegment.length());
+
+ RootedValue value(cx, ObjectValue(*code));
+ if (!JS_DefineProperty(cx, result, "code", value, JSPROP_ENUMERATE)) {
+ return false;
+ }
+
+ RootedObject segments(cx, NewDenseEmptyArray(cx));
+ if (!segments) {
+ return false;
+ }
+
+ for (const CodeRange& p : metadata(tier).codeRanges) {
+ RootedObject segment(cx, NewPlainObjectWithProto(cx, nullptr));
+ if (!segment) {
+ return false;
+ }
+
+ value.setNumber((uint32_t)p.begin());
+ if (!JS_DefineProperty(cx, segment, "begin", value, JSPROP_ENUMERATE)) {
+ return false;
+ }
+
+ value.setNumber((uint32_t)p.end());
+ if (!JS_DefineProperty(cx, segment, "end", value, JSPROP_ENUMERATE)) {
+ return false;
+ }
+
+ value.setNumber((uint32_t)p.kind());
+ if (!JS_DefineProperty(cx, segment, "kind", value, JSPROP_ENUMERATE)) {
+ return false;
+ }
+
+ if (p.isFunction()) {
+ value.setNumber((uint32_t)p.funcIndex());
+ if (!JS_DefineProperty(cx, segment, "funcIndex", value,
+ JSPROP_ENUMERATE)) {
+ return false;
+ }
+
+ value.setNumber((uint32_t)p.funcUncheckedCallEntry());
+ if (!JS_DefineProperty(cx, segment, "funcBodyBegin", value,
+ JSPROP_ENUMERATE)) {
+ return false;
+ }
+
+ value.setNumber((uint32_t)p.end());
+ if (!JS_DefineProperty(cx, segment, "funcBodyEnd", value,
+ JSPROP_ENUMERATE)) {
+ return false;
+ }
+ }
+
+ if (!NewbornArrayPush(cx, segments, ObjectValue(*segment))) {
+ return false;
+ }
+ }
+
+ value.setObject(*segments);
+ if (!JS_DefineProperty(cx, result, "segments", value, JSPROP_ENUMERATE)) {
+ return false;
+ }
+
+ vp.setObject(*result);
+ return true;
+}
+
+#ifdef DEBUG
+static bool AllSegmentsArePassive(const DataSegmentVector& vec) {
+ for (const DataSegment* seg : vec) {
+ if (seg->active()) {
+ return false;
+ }
+ }
+ return true;
+}
+#endif
+
+bool Module::initSegments(JSContext* cx,
+ Handle<WasmInstanceObject*> instanceObj,
+ Handle<WasmMemoryObject*> memoryObj) const {
+ MOZ_ASSERT_IF(!memoryObj, AllSegmentsArePassive(dataSegments_));
+
+ Instance& instance = instanceObj->instance();
+ const SharedTableVector& tables = instance.tables();
+
+ // Write data/elem segments into memories/tables.
+
+ for (const ElemSegment* seg : elemSegments_) {
+ if (seg->active()) {
+ RootedVal offsetVal(cx);
+ if (!seg->offset().evaluate(cx, instanceObj, &offsetVal)) {
+ return false; // OOM
+ }
+ uint32_t offset = offsetVal.get().i32();
+ uint32_t count = seg->length();
+
+ uint32_t tableLength = tables[seg->tableIndex]->length();
+ if (offset > tableLength || tableLength - offset < count) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_OUT_OF_BOUNDS);
+ return false;
+ }
+
+ if (!instance.initElems(seg->tableIndex, *seg, offset, 0, count)) {
+ return false; // OOM
+ }
+ }
+ }
+
+ if (memoryObj) {
+ size_t memoryLength = memoryObj->volatileMemoryLength();
+ uint8_t* memoryBase =
+ memoryObj->buffer().dataPointerEither().unwrap(/* memcpy */);
+
+ for (const DataSegment* seg : dataSegments_) {
+ if (!seg->active()) {
+ continue;
+ }
+
+ RootedVal offsetVal(cx);
+ if (!seg->offset().evaluate(cx, instanceObj, &offsetVal)) {
+ return false; // OOM
+ }
+ uint64_t offset = memoryObj->indexType() == IndexType::I32
+ ? offsetVal.get().i32()
+ : offsetVal.get().i64();
+ uint32_t count = seg->bytes.length();
+
+ if (offset > memoryLength || memoryLength - offset < count) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_OUT_OF_BOUNDS);
+ return false;
+ }
+ memcpy(memoryBase + uintptr_t(offset), seg->bytes.begin(), count);
+ }
+ }
+
+ return true;
+}
+
+static const Import& FindImportFunction(const ImportVector& imports,
+ uint32_t funcImportIndex) {
+ for (const Import& import : imports) {
+ if (import.kind != DefinitionKind::Function) {
+ continue;
+ }
+ if (funcImportIndex == 0) {
+ return import;
+ }
+ funcImportIndex--;
+ }
+ MOZ_CRASH("ran out of imports");
+}
+
+bool Module::instantiateFunctions(JSContext* cx,
+ const JSObjectVector& funcImports) const {
+#ifdef DEBUG
+ for (auto t : code().tiers()) {
+ MOZ_ASSERT(funcImports.length() == metadata(t).funcImports.length());
+ }
+#endif
+
+ if (metadata().isAsmJS()) {
+ return true;
+ }
+
+ Tier tier = code().stableTier();
+
+ for (size_t i = 0; i < metadata(tier).funcImports.length(); i++) {
+ if (!funcImports[i]->is<JSFunction>()) {
+ continue;
+ }
+
+ JSFunction* f = &funcImports[i]->as<JSFunction>();
+ if (!IsWasmExportedFunction(f)) {
+ continue;
+ }
+
+ uint32_t funcIndex = ExportedFunctionToFuncIndex(f);
+ Instance& instance = ExportedFunctionToInstance(f);
+ Tier otherTier = instance.code().stableTier();
+
+ const FuncType& exportFuncType = instance.metadata().getFuncExportType(
+ instance.metadata(otherTier).lookupFuncExport(funcIndex));
+ const FuncType& importFuncType =
+ metadata().getFuncImportType(metadata(tier).funcImports[i]);
+
+ if (!FuncType::strictlyEquals(exportFuncType, importFuncType)) {
+ const Import& import = FindImportFunction(imports_, i);
+ UniqueChars importModuleName = import.module.toQuotedString(cx);
+ UniqueChars importFieldName = import.field.toQuotedString(cx);
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_IMPORT_SIG,
+ importModuleName.get(), importFieldName.get());
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template <typename T>
+static bool CheckLimits(JSContext* cx, T declaredMin,
+ const Maybe<T>& declaredMax, T defaultMax,
+ T actualLength, const Maybe<T>& actualMax, bool isAsmJS,
+ const char* kind) {
+ if (isAsmJS) {
+ MOZ_ASSERT(actualLength >= declaredMin);
+ MOZ_ASSERT(!declaredMax);
+ MOZ_ASSERT(actualLength == actualMax.value());
+ return true;
+ }
+
+ if (actualLength < declaredMin ||
+ actualLength > declaredMax.valueOr(defaultMax)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_IMP_SIZE, kind);
+ return false;
+ }
+
+ if ((actualMax && declaredMax && *actualMax > *declaredMax) ||
+ (!actualMax && declaredMax)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_IMP_MAX, kind);
+ return false;
+ }
+
+ return true;
+}
+
+static bool CheckSharing(JSContext* cx, bool declaredShared, bool isShared) {
+ if (isShared &&
+ !cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_NO_SHMEM_LINK);
+ return false;
+ }
+
+ if (declaredShared && !isShared) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_IMP_SHARED_REQD);
+ return false;
+ }
+
+ if (!declaredShared && isShared) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_IMP_SHARED_BANNED);
+ return false;
+ }
+
+ return true;
+}
+
+// asm.js module instantiation supplies its own buffer, but for wasm, create and
+// initialize the buffer if one is requested. Either way, the buffer is wrapped
+// in a WebAssembly.Memory object which is what the Instance stores.
+bool Module::instantiateMemory(JSContext* cx,
+ MutableHandle<WasmMemoryObject*> memory) const {
+ if (!metadata().usesMemory()) {
+ MOZ_ASSERT(!memory);
+ MOZ_ASSERT(AllSegmentsArePassive(dataSegments_));
+ return true;
+ }
+
+ MemoryDesc desc = *metadata().memory;
+ if (memory) {
+ MOZ_ASSERT_IF(metadata().isAsmJS(), memory->buffer().isPreparedForAsmJS());
+ MOZ_ASSERT_IF(!metadata().isAsmJS(), memory->buffer().isWasm());
+
+ if (memory->indexType() != desc.indexType()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_IMP_INDEX,
+ ToString(memory->indexType()));
+ return false;
+ }
+
+ if (!CheckLimits(cx, desc.initialPages(), desc.maximumPages(),
+ /* defaultMax */ MaxMemoryPages(desc.indexType()),
+ /* actualLength */
+ memory->volatilePages(), memory->sourceMaxPages(),
+ metadata().isAsmJS(), "Memory")) {
+ return false;
+ }
+
+ if (!CheckSharing(cx, desc.isShared(), memory->isShared())) {
+ return false;
+ }
+ } else {
+ MOZ_ASSERT(!metadata().isAsmJS());
+
+ if (desc.initialPages() > MaxMemoryPages(desc.indexType())) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_MEM_IMP_LIMIT);
+ return false;
+ }
+
+ RootedArrayBufferObjectMaybeShared buffer(cx);
+ if (!CreateWasmBuffer(cx, desc, &buffer)) {
+ return false;
+ }
+
+ RootedObject proto(cx, &cx->global()->getPrototype(JSProto_WasmMemory));
+ memory.set(WasmMemoryObject::create(
+ cx, buffer, IsHugeMemoryEnabled(desc.indexType()), proto));
+ if (!memory) {
+ return false;
+ }
+ }
+
+ MOZ_RELEASE_ASSERT(memory->isHuge() == metadata().omitsBoundsChecks);
+
+ return true;
+}
+
+bool Module::instantiateTags(JSContext* cx,
+ WasmTagObjectVector& tagObjs) const {
+ size_t tagLength = metadata().tags.length();
+ if (tagLength == 0) {
+ return true;
+ }
+ size_t importedTagsLength = tagObjs.length();
+ if (tagObjs.length() <= tagLength && !tagObjs.resize(tagLength)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ uint32_t tagIndex = 0;
+ RootedObject proto(cx, &cx->global()->getPrototype(JSProto_WasmTag));
+ for (const TagDesc& desc : metadata().tags) {
+ if (tagIndex >= importedTagsLength) {
+ Rooted<WasmTagObject*> tagObj(
+ cx, WasmTagObject::create(cx, desc.type, proto));
+ if (!tagObj) {
+ return false;
+ }
+ tagObjs[tagIndex] = tagObj;
+ }
+ tagIndex++;
+ }
+ return true;
+}
+
+bool Module::instantiateImportedTable(JSContext* cx, const TableDesc& td,
+ Handle<WasmTableObject*> tableObj,
+ WasmTableObjectVector* tableObjs,
+ SharedTableVector* tables) const {
+ MOZ_ASSERT(tableObj);
+ MOZ_ASSERT(!metadata().isAsmJS());
+
+ Table& table = tableObj->table();
+ if (!CheckLimits(cx, td.initialLength, td.maximumLength,
+ /* declaredMin */ MaxTableLimitField,
+ /* actualLength */ table.length(), table.maximum(),
+ metadata().isAsmJS(), "Table")) {
+ return false;
+ }
+
+ if (!tables->append(&table)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ if (!tableObjs->append(tableObj)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+bool Module::instantiateLocalTable(JSContext* cx, const TableDesc& td,
+ WasmTableObjectVector* tableObjs,
+ SharedTableVector* tables) const {
+ if (td.initialLength > MaxTableLength) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_TABLE_IMP_LIMIT);
+ return false;
+ }
+
+ SharedTable table;
+ Rooted<WasmTableObject*> tableObj(cx);
+ if (td.isExported) {
+ RootedObject proto(cx, &cx->global()->getPrototype(JSProto_WasmTable));
+ tableObj.set(WasmTableObject::create(cx, td.initialLength, td.maximumLength,
+ td.elemType, proto));
+ if (!tableObj) {
+ return false;
+ }
+ table = &tableObj->table();
+ } else {
+ table = Table::create(cx, td, /* Handle<WasmTableObject*> = */ nullptr);
+ if (!table) {
+ return false;
+ }
+ }
+
+ // Note, appending a null pointer for non-exported local tables.
+ if (!tableObjs->append(tableObj.get())) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ if (!tables->emplaceBack(table)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+bool Module::instantiateTables(JSContext* cx,
+ const WasmTableObjectVector& tableImports,
+ MutableHandle<WasmTableObjectVector> tableObjs,
+ SharedTableVector* tables) const {
+ uint32_t tableIndex = 0;
+ for (const TableDesc& td : metadata().tables) {
+ if (tableIndex < tableImports.length()) {
+ Rooted<WasmTableObject*> tableObj(cx, tableImports[tableIndex]);
+ if (!instantiateImportedTable(cx, td, tableObj, &tableObjs.get(),
+ tables)) {
+ return false;
+ }
+ } else {
+ if (!instantiateLocalTable(cx, td, &tableObjs.get(), tables)) {
+ return false;
+ }
+ }
+ tableIndex++;
+ }
+ return true;
+}
+
+static bool EnsureExportedGlobalObject(JSContext* cx,
+ const ValVector& globalImportValues,
+ size_t globalIndex,
+ const GlobalDesc& global,
+ WasmGlobalObjectVector& globalObjs) {
+ if (globalIndex < globalObjs.length() && globalObjs[globalIndex]) {
+ return true;
+ }
+
+ RootedVal val(cx);
+ if (global.kind() == GlobalKind::Import) {
+ // If this is an import, then this must be a constant global that was
+ // provided without a global object. We must initialize it with the
+ // provided value while we still can differentiate this case.
+ MOZ_ASSERT(!global.isMutable());
+ val.set(Val(globalImportValues[globalIndex]));
+ } else {
+ // If this is not an import, then the initial value will be set by
+ // Instance::init() for indirect globals or else by CreateExportObject().
+ // In either case, we initialize with a default value here.
+ val.set(Val(global.type()));
+ }
+
+ RootedObject proto(cx, &cx->global()->getPrototype(JSProto_WasmGlobal));
+ Rooted<WasmGlobalObject*> go(
+ cx, WasmGlobalObject::create(cx, val, global.isMutable(), proto));
+ if (!go) {
+ return false;
+ }
+
+ if (globalObjs.length() <= globalIndex &&
+ !globalObjs.resize(globalIndex + 1)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ globalObjs[globalIndex] = go;
+ return true;
+}
+
+bool Module::instantiateGlobals(JSContext* cx,
+ const ValVector& globalImportValues,
+ WasmGlobalObjectVector& globalObjs) const {
+ // If there are exported globals that aren't in globalObjs because they
+ // originate in this module or because they were immutable imports that came
+ // in as primitive values then we must create cells in the globalObjs for
+ // them here, as WasmInstanceObject::create() and CreateExportObject() will
+ // need the cells to exist.
+
+ const GlobalDescVector& globals = metadata().globals;
+
+ for (const Export& exp : exports_) {
+ if (exp.kind() != DefinitionKind::Global) {
+ continue;
+ }
+ unsigned globalIndex = exp.globalIndex();
+ const GlobalDesc& global = globals[globalIndex];
+ if (!EnsureExportedGlobalObject(cx, globalImportValues, globalIndex, global,
+ globalObjs)) {
+ return false;
+ }
+ }
+
+ // Imported globals that are not re-exported may also have received only a
+ // primitive value; these globals are always immutable. Assert that we do
+ // not need to create any additional Global objects for such imports.
+
+#ifdef DEBUG
+ size_t numGlobalImports = 0;
+ for (const Import& import : imports_) {
+ if (import.kind != DefinitionKind::Global) {
+ continue;
+ }
+ size_t globalIndex = numGlobalImports++;
+ const GlobalDesc& global = globals[globalIndex];
+ MOZ_ASSERT(global.importIndex() == globalIndex);
+ MOZ_ASSERT_IF(global.isIndirect(),
+ globalIndex < globalObjs.length() || globalObjs[globalIndex]);
+ }
+ MOZ_ASSERT_IF(!metadata().isAsmJS(),
+ numGlobalImports == globals.length() ||
+ !globals[numGlobalImports].isImport());
+#endif
+ return true;
+}
+
+static bool GetFunctionExport(JSContext* cx,
+ Handle<WasmInstanceObject*> instanceObj,
+ const JSObjectVector& funcImports,
+ uint32_t funcIndex, MutableHandleFunction func) {
+ if (funcIndex < funcImports.length() &&
+ funcImports[funcIndex]->is<JSFunction>()) {
+ JSFunction* f = &funcImports[funcIndex]->as<JSFunction>();
+ if (IsWasmExportedFunction(f)) {
+ func.set(f);
+ return true;
+ }
+ }
+
+ return instanceObj->getExportedFunction(cx, instanceObj, funcIndex, func);
+}
+
+static bool GetGlobalExport(JSContext* cx,
+ Handle<WasmInstanceObject*> instanceObj,
+ const JSObjectVector& funcImports,
+ const GlobalDesc& global, uint32_t globalIndex,
+ const ValVector& globalImportValues,
+ const WasmGlobalObjectVector& globalObjs,
+ MutableHandleValue val) {
+ // A global object for this index is guaranteed to exist by
+ // instantiateGlobals.
+ Rooted<WasmGlobalObject*> globalObj(cx, globalObjs[globalIndex]);
+ val.setObject(*globalObj);
+
+ // We are responsible to set the initial value of the global object here if
+ // it's not imported or indirect. Imported global objects have their initial
+ // value set by their defining module, or are set by
+ // EnsureExportedGlobalObject when a constant value is provided as an import.
+ // Indirect exported globals that are not imported, are initialized in
+ // Instance::init.
+ if (global.isIndirect() || global.isImport()) {
+ return true;
+ }
+
+ // This must be an exported immutable global defined in this module. The
+ // instance either has compiled the value into the code or has its own copy
+ // in its global data area. Either way, we must initialize the global object
+ // with the same initial value.
+ MOZ_ASSERT(!global.isMutable());
+ MOZ_ASSERT(!global.isImport());
+ RootedVal globalVal(cx);
+ MOZ_RELEASE_ASSERT(!global.isImport());
+ const InitExpr& init = global.initExpr();
+ if (!init.evaluate(cx, instanceObj, &globalVal)) {
+ return false;
+ }
+ globalObj->val() = globalVal;
+ return true;
+}
+
+static bool CreateExportObject(
+ JSContext* cx, Handle<WasmInstanceObject*> instanceObj,
+ const JSObjectVector& funcImports, const WasmTableObjectVector& tableObjs,
+ Handle<WasmMemoryObject*> memoryObj, const WasmTagObjectVector& tagObjs,
+ const ValVector& globalImportValues,
+ const WasmGlobalObjectVector& globalObjs, const ExportVector& exports) {
+ const Instance& instance = instanceObj->instance();
+ const Metadata& metadata = instance.metadata();
+ const GlobalDescVector& globals = metadata.globals;
+
+ if (metadata.isAsmJS() && exports.length() == 1 &&
+ exports[0].fieldName().isEmpty()) {
+ RootedFunction func(cx);
+ if (!GetFunctionExport(cx, instanceObj, funcImports, exports[0].funcIndex(),
+ &func)) {
+ return false;
+ }
+ instanceObj->initExportsObj(*func.get());
+ return true;
+ }
+
+ RootedObject exportObj(cx);
+ uint8_t propertyAttr = JSPROP_ENUMERATE;
+
+ if (metadata.isAsmJS()) {
+ exportObj = NewPlainObject(cx);
+ } else {
+ exportObj = NewPlainObjectWithProto(cx, nullptr);
+ propertyAttr |= JSPROP_READONLY | JSPROP_PERMANENT;
+ }
+ if (!exportObj) {
+ return false;
+ }
+
+ for (const Export& exp : exports) {
+ JSAtom* atom = exp.fieldName().toAtom(cx);
+ if (!atom) {
+ return false;
+ }
+
+ RootedId id(cx, AtomToId(atom));
+ RootedValue val(cx);
+ switch (exp.kind()) {
+ case DefinitionKind::Function: {
+ RootedFunction func(cx);
+ if (!GetFunctionExport(cx, instanceObj, funcImports, exp.funcIndex(),
+ &func)) {
+ return false;
+ }
+ val = ObjectValue(*func);
+ break;
+ }
+ case DefinitionKind::Table: {
+ val = ObjectValue(*tableObjs[exp.tableIndex()]);
+ break;
+ }
+ case DefinitionKind::Memory: {
+ val = ObjectValue(*memoryObj);
+ break;
+ }
+ case DefinitionKind::Global: {
+ const GlobalDesc& global = globals[exp.globalIndex()];
+ if (!GetGlobalExport(cx, instanceObj, funcImports, global,
+ exp.globalIndex(), globalImportValues, globalObjs,
+ &val)) {
+ return false;
+ }
+ break;
+ }
+ case DefinitionKind::Tag: {
+ val = ObjectValue(*tagObjs[exp.tagIndex()]);
+ break;
+ }
+ }
+
+ if (!JS_DefinePropertyById(cx, exportObj, id, val, propertyAttr)) {
+ return false;
+ }
+ }
+
+ if (!metadata.isAsmJS()) {
+ if (!PreventExtensions(cx, exportObj)) {
+ return false;
+ }
+ }
+
+ instanceObj->initExportsObj(*exportObj);
+ return true;
+}
+
+bool Module::instantiate(JSContext* cx, ImportValues& imports,
+ HandleObject instanceProto,
+ MutableHandle<WasmInstanceObject*> instance) const {
+ MOZ_RELEASE_ASSERT(cx->wasm().haveSignalHandlers);
+
+ if (!instantiateFunctions(cx, imports.funcs)) {
+ return false;
+ }
+
+ Rooted<WasmMemoryObject*> memory(cx, imports.memory);
+ if (!instantiateMemory(cx, &memory)) {
+ return false;
+ }
+
+ // Note that the following will extend imports.exceptionObjs with wrappers for
+ // the local (non-imported) exceptions of the module.
+ // The resulting vector is sparse, i.e., it will be null in slots that contain
+ // exceptions that are neither exported or imported.
+ // On the contrary, all the slots of exceptionTags will be filled with
+ // unique tags.
+
+ if (!instantiateTags(cx, imports.tagObjs)) {
+ return false;
+ }
+
+ // Note that tableObjs is sparse: it will be null in slots that contain
+ // tables that are neither exported nor imported.
+
+ Rooted<WasmTableObjectVector> tableObjs(cx);
+ SharedTableVector tables;
+ if (!instantiateTables(cx, imports.tables, &tableObjs, &tables)) {
+ return false;
+ }
+
+ if (!instantiateGlobals(cx, imports.globalValues, imports.globalObjs)) {
+ return false;
+ }
+
+ UniqueDebugState maybeDebug;
+ if (metadata().debugEnabled) {
+ maybeDebug = cx->make_unique<DebugState>(*code_, *this);
+ if (!maybeDebug) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+
+ instance.set(WasmInstanceObject::create(
+ cx, code_, dataSegments_, elemSegments_, metadata().instanceDataLength,
+ memory, std::move(tables), imports.funcs, metadata().globals,
+ imports.globalValues, imports.globalObjs, imports.tagObjs, instanceProto,
+ std::move(maybeDebug)));
+ if (!instance) {
+ return false;
+ }
+
+ if (!CreateExportObject(cx, instance, imports.funcs, tableObjs.get(), memory,
+ imports.tagObjs, imports.globalValues,
+ imports.globalObjs, exports_)) {
+ return false;
+ }
+
+ // Register the instance with the Realm so that it can find out about global
+ // events like profiling being enabled in the realm. Registration does not
+ // require a fully-initialized instance and must precede initSegments as the
+ // final pre-requisite for a live instance.
+
+ if (!cx->realm()->wasm.registerInstance(cx, instance)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Perform initialization as the final step after the instance is fully
+ // constructed since this can make the instance live to content (even if the
+ // start function fails).
+
+ if (!initSegments(cx, instance, memory)) {
+ return false;
+ }
+
+ // Now that the instance is fully live and initialized, the start function.
+ // Note that failure may cause instantiation to throw, but the instance may
+ // still be live via edges created by initSegments or the start function.
+
+ if (metadata().startFuncIndex) {
+ FixedInvokeArgs<0> args(cx);
+ if (!instance->instance().callExport(cx, *metadata().startFuncIndex,
+ args)) {
+ return false;
+ }
+ }
+
+ JSUseCounter useCounter =
+ metadata().isAsmJS() ? JSUseCounter::ASMJS : JSUseCounter::WASM;
+ cx->runtime()->setUseCounter(instance, useCounter);
+
+ if (cx->options().testWasmAwaitTier2()) {
+ testingBlockOnTier2Complete();
+ }
+
+ return true;
+}
diff --git a/js/src/wasm/WasmModule.h b/js/src/wasm/WasmModule.h
new file mode 100644
index 0000000000..e070f749c9
--- /dev/null
+++ b/js/src/wasm/WasmModule.h
@@ -0,0 +1,225 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_module_h
+#define wasm_module_h
+
+#include "js/WasmModule.h"
+#include "js/BuildId.h"
+
+#include "wasm/WasmCode.h"
+#include "wasm/WasmException.h"
+#include "wasm/WasmJS.h"
+#include "wasm/WasmSerialize.h"
+#include "wasm/WasmTable.h"
+
+namespace JS {
+class OptimizedEncodingListener;
+}
+
+namespace js {
+namespace wasm {
+
+struct CompileArgs;
+
+// In the context of wasm, the OptimizedEncodingListener specifically is
+// listening for the completion of tier-2.
+
+using Tier2Listener = RefPtr<JS::OptimizedEncodingListener>;
+
+// A struct containing the typed, imported values that are harvested from the
+// import object and passed to Module::instantiate(). This struct must be
+// stored in a (Persistent)Rooted, not in the heap due to its use of TraceRoot()
+// and complete lack of barriers.
+
+struct ImportValues {
+ JSObjectVector funcs;
+ WasmTableObjectVector tables;
+ WasmMemoryObject* memory;
+ WasmTagObjectVector tagObjs;
+ WasmGlobalObjectVector globalObjs;
+ ValVector globalValues;
+
+ ImportValues() : memory(nullptr) {}
+
+ void trace(JSTracer* trc) {
+ funcs.trace(trc);
+ tables.trace(trc);
+ if (memory) {
+ TraceRoot(trc, &memory, "import values memory");
+ }
+ tagObjs.trace(trc);
+ globalObjs.trace(trc);
+ globalValues.trace(trc);
+ }
+};
+
+// Module represents a compiled wasm module and primarily provides three
+// operations: instantiation, tiered compilation, serialization. A Module can be
+// instantiated any number of times to produce new Instance objects. A Module
+// can have a single tier-2 task initiated to augment a Module's code with a
+// higher tier. A Module can have its optimized code serialized at any point
+// where the LinkData is also available, which is primarily (1) at the end of
+// module generation, (2) at the end of tier-2 compilation.
+//
+// Fully linked-and-instantiated code (represented by SharedCode and its owned
+// ModuleSegment) can be shared between instances.
+
+class Module : public JS::WasmModule {
+ const SharedCode code_;
+ const ImportVector imports_;
+ const ExportVector exports_;
+ const DataSegmentVector dataSegments_;
+ const ElemSegmentVector elemSegments_;
+ const CustomSectionVector customSections_;
+
+ // This field is only meaningful when code_->metadata().debugEnabled.
+
+ const SharedBytes debugBytecode_;
+
+ // This field is set during tier-2 compilation and cleared on success or
+ // failure. These happen on different threads and are serialized by the
+ // control flow of helper tasks.
+
+ mutable Tier2Listener tier2Listener_;
+
+ // This flag is used for logging (and testing) purposes to indicate
+ // whether the module was deserialized (from a cache).
+
+ const bool loggingDeserialized_;
+
+ // This flag is only used for testing purposes and is cleared on success or
+ // failure. The field is racily polled from various threads.
+
+ mutable Atomic<bool> testingTier2Active_;
+
+ // Cached malloc allocation size for GC memory tracking.
+
+ size_t gcMallocBytesExcludingCode_;
+
+ bool instantiateFunctions(JSContext* cx,
+ const JSObjectVector& funcImports) const;
+ bool instantiateMemory(JSContext* cx,
+ MutableHandle<WasmMemoryObject*> memory) const;
+ bool instantiateTags(JSContext* cx, WasmTagObjectVector& tagObjs) const;
+ bool instantiateImportedTable(JSContext* cx, const TableDesc& td,
+ Handle<WasmTableObject*> table,
+ WasmTableObjectVector* tableObjs,
+ SharedTableVector* tables) const;
+ bool instantiateLocalTable(JSContext* cx, const TableDesc& td,
+ WasmTableObjectVector* tableObjs,
+ SharedTableVector* tables) const;
+ bool instantiateTables(JSContext* cx,
+ const WasmTableObjectVector& tableImports,
+ MutableHandle<WasmTableObjectVector> tableObjs,
+ SharedTableVector* tables) const;
+ bool instantiateGlobals(JSContext* cx, const ValVector& globalImportValues,
+ WasmGlobalObjectVector& globalObjs) const;
+ bool initSegments(JSContext* cx, Handle<WasmInstanceObject*> instance,
+ Handle<WasmMemoryObject*> memory) const;
+
+ class Tier2GeneratorTaskImpl;
+
+ public:
+ Module(const Code& code, ImportVector&& imports, ExportVector&& exports,
+ DataSegmentVector&& dataSegments, ElemSegmentVector&& elemSegments,
+ CustomSectionVector&& customSections,
+ const ShareableBytes* debugBytecode = nullptr,
+ bool loggingDeserialized = false)
+ : code_(&code),
+ imports_(std::move(imports)),
+ exports_(std::move(exports)),
+ dataSegments_(std::move(dataSegments)),
+ elemSegments_(std::move(elemSegments)),
+ customSections_(std::move(customSections)),
+ debugBytecode_(debugBytecode),
+ loggingDeserialized_(loggingDeserialized),
+ testingTier2Active_(false) {
+ initGCMallocBytesExcludingCode();
+ }
+ ~Module() override;
+
+ const Code& code() const { return *code_; }
+ const ModuleSegment& moduleSegment(Tier t) const { return code_->segment(t); }
+ const Metadata& metadata() const { return code_->metadata(); }
+ const MetadataTier& metadata(Tier t) const { return code_->metadata(t); }
+ const ImportVector& imports() const { return imports_; }
+ const ExportVector& exports() const { return exports_; }
+ const CustomSectionVector& customSections() const { return customSections_; }
+ const Bytes& debugBytecode() const { return debugBytecode_->bytes; }
+ uint32_t codeLength(Tier t) const { return code_->segment(t).length(); }
+
+ // Instantiate this module with the given imports:
+
+ bool instantiate(JSContext* cx, ImportValues& imports,
+ HandleObject instanceProto,
+ MutableHandle<WasmInstanceObject*> instanceObj) const;
+
+ // Tier-2 compilation may be initiated after the Module is constructed at
+ // most once. When tier-2 compilation completes, ModuleGenerator calls
+ // finishTier2() from a helper thread, passing tier-variant data which will
+ // be installed and made visible.
+
+ void startTier2(const CompileArgs& args, const ShareableBytes& bytecode,
+ JS::OptimizedEncodingListener* listener);
+ bool finishTier2(const LinkData& linkData2, UniqueCodeTier code2) const;
+
+ void testingBlockOnTier2Complete() const;
+ bool testingTier2Active() const { return testingTier2Active_; }
+
+ // Code caching support.
+
+ [[nodiscard]] bool serialize(const LinkData& linkData, Bytes* bytes) const;
+ static RefPtr<Module> deserialize(const uint8_t* begin, size_t size);
+ bool loggingDeserialized() const { return loggingDeserialized_; }
+
+ // JS API and JS::WasmModule implementation:
+
+ JSObject* createObject(JSContext* cx) const override;
+ JSObject* createObjectForAsmJS(JSContext* cx) const override;
+
+ // about:memory reporting:
+
+ void addSizeOfMisc(MallocSizeOf mallocSizeOf, Metadata::SeenSet* seenMetadata,
+ Code::SeenSet* seenCode, size_t* code, size_t* data) const;
+
+ // GC malloc memory tracking:
+
+ void initGCMallocBytesExcludingCode();
+ size_t gcMallocBytesExcludingCode() const {
+ return gcMallocBytesExcludingCode_;
+ }
+
+ // Generated code analysis support:
+
+ bool extractCode(JSContext* cx, Tier tier, MutableHandleValue vp) const;
+
+ WASM_DECLARE_FRIEND_SERIALIZE_ARGS(Module, const wasm::LinkData& linkData);
+};
+
+using MutableModule = RefPtr<Module>;
+using SharedModule = RefPtr<const Module>;
+
+// JS API implementations:
+
+[[nodiscard]] bool GetOptimizedEncodingBuildId(JS::BuildIdCharVector* buildId);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_module_h
diff --git a/js/src/wasm/WasmModuleTypes.cpp b/js/src/wasm/WasmModuleTypes.cpp
new file mode 100644
index 0000000000..9823d7ddbc
--- /dev/null
+++ b/js/src/wasm/WasmModuleTypes.cpp
@@ -0,0 +1,171 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmModuleTypes.h"
+
+#include "mozilla/Range.h"
+
+#include "vm/JSAtom.h"
+#include "vm/MallocProvider.h"
+#include "wasm/WasmUtility.h"
+
+#include "vm/JSAtom-inl.h"
+
+using namespace js;
+using namespace js::wasm;
+
+/* static */
+CacheableName CacheableName::fromUTF8Chars(UniqueChars&& utf8Chars) {
+ size_t length = strlen(utf8Chars.get());
+ UTF8Bytes bytes;
+ bytes.replaceRawBuffer(utf8Chars.release(), length, length + 1);
+ return CacheableName(std::move(bytes));
+}
+
+/* static */
+bool CacheableName::fromUTF8Chars(const char* utf8Chars, CacheableName* name) {
+ size_t utf8CharsLen = strlen(utf8Chars);
+ UTF8Bytes bytes;
+ if (!bytes.resizeUninitialized(utf8CharsLen)) {
+ return false;
+ }
+ memcpy(bytes.begin(), utf8Chars, utf8CharsLen);
+ *name = CacheableName(std::move(bytes));
+ return true;
+}
+
+JSAtom* CacheableName::toAtom(JSContext* cx) const {
+ return AtomizeUTF8Chars(cx, begin(), length());
+}
+
+bool CacheableName::toPropertyKey(JSContext* cx,
+ MutableHandleId propertyKey) const {
+ JSAtom* atom = toAtom(cx);
+ if (!atom) {
+ return false;
+ }
+ propertyKey.set(AtomToId(atom));
+ return true;
+}
+
+UniqueChars CacheableName::toQuotedString(JSContext* cx) const {
+ RootedString atom(cx, toAtom(cx));
+ if (!atom) {
+ return nullptr;
+ }
+ return QuoteString(cx, atom.get());
+}
+
+size_t CacheableName::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return bytes_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t Import::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return module.sizeOfExcludingThis(mallocSizeOf) +
+ field.sizeOfExcludingThis(mallocSizeOf);
+}
+
+Export::Export(CacheableName&& fieldName, uint32_t index, DefinitionKind kind)
+ : fieldName_(std::move(fieldName)) {
+ pod.kind_ = kind;
+ pod.index_ = index;
+}
+
+Export::Export(CacheableName&& fieldName, DefinitionKind kind)
+ : fieldName_(std::move(fieldName)) {
+ pod.kind_ = kind;
+ pod.index_ = 0;
+}
+
+uint32_t Export::funcIndex() const {
+ MOZ_ASSERT(pod.kind_ == DefinitionKind::Function);
+ return pod.index_;
+}
+
+uint32_t Export::globalIndex() const {
+ MOZ_ASSERT(pod.kind_ == DefinitionKind::Global);
+ return pod.index_;
+}
+
+uint32_t Export::tagIndex() const {
+ MOZ_ASSERT(pod.kind_ == DefinitionKind::Tag);
+ return pod.index_;
+}
+
+uint32_t Export::tableIndex() const {
+ MOZ_ASSERT(pod.kind_ == DefinitionKind::Table);
+ return pod.index_;
+}
+
+size_t Export::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return fieldName_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t GlobalDesc::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return initial_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+bool TagType::initialize(ValTypeVector&& argTypes) {
+ MOZ_ASSERT(argTypes_.empty() && argOffsets_.empty() && size_ == 0);
+
+ argTypes_ = std::move(argTypes);
+ if (!argOffsets_.resize(argTypes_.length())) {
+ return false;
+ }
+
+ StructLayout layout;
+ for (size_t i = 0; i < argTypes_.length(); i++) {
+ CheckedInt32 offset = layout.addField(FieldType(argTypes_[i].packed()));
+ if (!offset.isValid()) {
+ return false;
+ }
+ argOffsets_[i] = offset.value();
+ }
+
+ CheckedInt32 size = layout.close();
+ if (!size.isValid()) {
+ return false;
+ }
+ this->size_ = size.value();
+
+ return true;
+}
+
+size_t TagType::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return argTypes_.sizeOfExcludingThis(mallocSizeOf) +
+ argOffsets_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t TagDesc::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return type->sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t ElemSegment::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return SizeOfMaybeExcludingThis(offsetIfActive, mallocSizeOf) +
+ elemFuncIndices.sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t DataSegment::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return SizeOfMaybeExcludingThis(offsetIfActive, mallocSizeOf) +
+ bytes.sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t CustomSection::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return name.sizeOfExcludingThis(mallocSizeOf) + sizeof(*payload) +
+ payload->sizeOfExcludingThis(mallocSizeOf);
+}
diff --git a/js/src/wasm/WasmModuleTypes.h b/js/src/wasm/WasmModuleTypes.h
new file mode 100644
index 0000000000..9d018bdc01
--- /dev/null
+++ b/js/src/wasm/WasmModuleTypes.h
@@ -0,0 +1,632 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_module_types_h
+#define wasm_module_types_h
+
+#include "mozilla/RefPtr.h"
+#include "mozilla/Span.h"
+
+#include "js/AllocPolicy.h"
+#include "js/RefCounted.h"
+#include "js/Utility.h"
+#include "js/Vector.h"
+
+#include "wasm/WasmCompileArgs.h"
+#include "wasm/WasmConstants.h"
+#include "wasm/WasmExprType.h"
+#include "wasm/WasmInitExpr.h"
+#include "wasm/WasmMemory.h"
+#include "wasm/WasmSerialize.h"
+#include "wasm/WasmShareable.h"
+#include "wasm/WasmTypeDecls.h"
+#include "wasm/WasmValType.h"
+#include "wasm/WasmValue.h"
+
+namespace js {
+namespace wasm {
+
+using mozilla::Maybe;
+using mozilla::Nothing;
+using mozilla::Span;
+
+class FuncType;
+
+// A Module can either be asm.js or wasm.
+
+enum ModuleKind { Wasm, AsmJS };
+
+// CacheableChars is used to cacheably store UniqueChars.
+
+struct CacheableChars : UniqueChars {
+ CacheableChars() = default;
+ explicit CacheableChars(char* ptr) : UniqueChars(ptr) {}
+ MOZ_IMPLICIT CacheableChars(UniqueChars&& rhs)
+ : UniqueChars(std::move(rhs)) {}
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+};
+
+using CacheableCharsVector = Vector<CacheableChars, 0, SystemAllocPolicy>;
+
+// CacheableName is used to cacheably store a UTF-8 string that may contain
+// null terminators in sequence.
+
+struct CacheableName {
+ private:
+ UTF8Bytes bytes_;
+
+ const char* begin() const { return (const char*)bytes_.begin(); }
+ size_t length() const { return bytes_.length(); }
+
+ public:
+ CacheableName() = default;
+ MOZ_IMPLICIT CacheableName(UTF8Bytes&& rhs) : bytes_(std::move(rhs)) {}
+
+ bool isEmpty() const { return bytes_.length() == 0; }
+
+ Span<char> utf8Bytes() { return Span<char>(bytes_); }
+ Span<const char> utf8Bytes() const { return Span<const char>(bytes_); }
+
+ static CacheableName fromUTF8Chars(UniqueChars&& utf8Chars);
+ [[nodiscard]] static bool fromUTF8Chars(const char* utf8Chars,
+ CacheableName* name);
+
+ [[nodiscard]] JSAtom* toAtom(JSContext* cx) const;
+ [[nodiscard]] bool toPropertyKey(JSContext* cx,
+ MutableHandleId propertyKey) const;
+ [[nodiscard]] UniqueChars toQuotedString(JSContext* cx) const;
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ WASM_DECLARE_FRIEND_SERIALIZE(CacheableName);
+};
+
+using CacheableNameVector = Vector<CacheableName, 0, SystemAllocPolicy>;
+
+// A hash policy for names.
+struct NameHasher {
+ using Key = Span<const char>;
+ using Lookup = Span<const char>;
+
+ static HashNumber hash(const Lookup& aLookup) {
+ return mozilla::HashString(aLookup.data(), aLookup.Length());
+ }
+
+ static bool match(const Key& aKey, const Lookup& aLookup) {
+ return aKey == aLookup;
+ }
+};
+
+// Import describes a single wasm import. An ImportVector describes all
+// of a single module's imports.
+//
+// ImportVector is built incrementally by ModuleGenerator and then stored
+// immutably by Module.
+
+struct Import {
+ CacheableName module;
+ CacheableName field;
+ DefinitionKind kind;
+
+ Import() = default;
+ Import(CacheableName&& module, CacheableName&& field, DefinitionKind kind)
+ : module(std::move(module)), field(std::move(field)), kind(kind) {}
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+};
+
+using ImportVector = Vector<Import, 0, SystemAllocPolicy>;
+
+// Export describes the export of a definition in a Module to a field in the
+// export object. The Export stores the index of the exported item in the
+// appropriate type-specific module data structure (function table, global
+// table, table table, and - eventually - memory table).
+//
+// Note a single definition can be exported by multiple Exports in the
+// ExportVector.
+//
+// ExportVector is built incrementally by ModuleGenerator and then stored
+// immutably by Module.
+
+class Export {
+ public:
+ struct CacheablePod {
+ DefinitionKind kind_;
+ uint32_t index_;
+
+ WASM_CHECK_CACHEABLE_POD(kind_, index_);
+ };
+
+ private:
+ CacheableName fieldName_;
+ CacheablePod pod;
+
+ public:
+ Export() = default;
+ explicit Export(CacheableName&& fieldName, uint32_t index,
+ DefinitionKind kind);
+ explicit Export(CacheableName&& fieldName, DefinitionKind kind);
+
+ const CacheableName& fieldName() const { return fieldName_; }
+
+ DefinitionKind kind() const { return pod.kind_; }
+ uint32_t funcIndex() const;
+ uint32_t tagIndex() const;
+ uint32_t globalIndex() const;
+ uint32_t tableIndex() const;
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ WASM_DECLARE_FRIEND_SERIALIZE(Export);
+};
+
+WASM_DECLARE_CACHEABLE_POD(Export::CacheablePod);
+
+using ExportVector = Vector<Export, 0, SystemAllocPolicy>;
+
+// FuncFlags provides metadata for a function definition.
+
+enum class FuncFlags : uint8_t {
+ None = 0x0,
+ // The function maybe be accessible by JS and needs thunks generated for it.
+ // See `[SMDOC] Exported wasm functions and the jit-entry stubs` in
+ // WasmJS.cpp for more information.
+ Exported = 0x1,
+ // The function should have thunks generated upon instantiation, not upon
+ // first call. May only be set if `Exported` is set.
+ Eager = 0x2,
+ // The function can be the target of a ref.func instruction in the code
+ // section. May only be set if `Exported` is set.
+ CanRefFunc = 0x4,
+};
+
+// A FuncDesc describes a single function definition.
+
+struct FuncDesc {
+ const FuncType* type;
+ // Bit pack to keep this struct small on 32-bit systems
+ uint32_t typeIndex : 24;
+ FuncFlags flags : 8;
+
+ // Assert that the bit packing scheme is viable
+ static_assert(MaxTypes <= (1 << 24) - 1);
+ static_assert(sizeof(FuncFlags) == sizeof(uint8_t));
+
+ FuncDesc() = default;
+ FuncDesc(const FuncType* type, uint32_t typeIndex)
+ : type(type), typeIndex(typeIndex), flags(FuncFlags::None) {}
+
+ bool isExported() const {
+ return uint8_t(flags) & uint8_t(FuncFlags::Exported);
+ }
+ bool isEager() const { return uint8_t(flags) & uint8_t(FuncFlags::Eager); }
+ bool canRefFunc() const {
+ return uint8_t(flags) & uint8_t(FuncFlags::CanRefFunc);
+ }
+};
+
+using FuncDescVector = Vector<FuncDesc, 0, SystemAllocPolicy>;
+
+// A GlobalDesc describes a single global variable.
+//
+// wasm can import and export mutable and immutable globals.
+//
+// asm.js can import mutable and immutable globals, but a mutable global has a
+// location that is private to the module, and its initial value is copied into
+// that cell from the environment. asm.js cannot export globals.
+
+enum class GlobalKind { Import, Constant, Variable };
+
+class GlobalDesc {
+ GlobalKind kind_;
+ // Stores the value type of this global for all kinds, and the initializer
+ // expression when `constant` or `variable`.
+ InitExpr initial_;
+ // Metadata for the global when `variable` or `import`.
+ unsigned offset_;
+ bool isMutable_;
+ bool isWasm_;
+ bool isExport_;
+ // Metadata for the global when `import`.
+ uint32_t importIndex_;
+
+ // Private, as they have unusual semantics.
+
+ bool isExport() const { return !isConstant() && isExport_; }
+ bool isWasm() const { return !isConstant() && isWasm_; }
+
+ public:
+ GlobalDesc() = default;
+
+ explicit GlobalDesc(InitExpr&& initial, bool isMutable,
+ ModuleKind kind = ModuleKind::Wasm)
+ : kind_((isMutable || !initial.isLiteral()) ? GlobalKind::Variable
+ : GlobalKind::Constant) {
+ initial_ = std::move(initial);
+ if (isVariable()) {
+ isMutable_ = isMutable;
+ isWasm_ = kind == Wasm;
+ isExport_ = false;
+ offset_ = UINT32_MAX;
+ }
+ }
+
+ explicit GlobalDesc(ValType type, bool isMutable, uint32_t importIndex,
+ ModuleKind kind = ModuleKind::Wasm)
+ : kind_(GlobalKind::Import) {
+ initial_ = InitExpr(LitVal(type));
+ importIndex_ = importIndex;
+ isMutable_ = isMutable;
+ isWasm_ = kind == Wasm;
+ isExport_ = false;
+ offset_ = UINT32_MAX;
+ }
+
+ void setOffset(unsigned offset) {
+ MOZ_ASSERT(!isConstant());
+ MOZ_ASSERT(offset_ == UINT32_MAX);
+ offset_ = offset;
+ }
+ unsigned offset() const {
+ MOZ_ASSERT(!isConstant());
+ MOZ_ASSERT(offset_ != UINT32_MAX);
+ return offset_;
+ }
+
+ void setIsExport() {
+ if (!isConstant()) {
+ isExport_ = true;
+ }
+ }
+
+ GlobalKind kind() const { return kind_; }
+ bool isVariable() const { return kind_ == GlobalKind::Variable; }
+ bool isConstant() const { return kind_ == GlobalKind::Constant; }
+ bool isImport() const { return kind_ == GlobalKind::Import; }
+
+ bool isMutable() const { return !isConstant() && isMutable_; }
+ const InitExpr& initExpr() const {
+ MOZ_ASSERT(!isImport());
+ return initial_;
+ }
+ uint32_t importIndex() const {
+ MOZ_ASSERT(isImport());
+ return importIndex_;
+ }
+
+ LitVal constantValue() const { return initial_.literal(); }
+
+ // If isIndirect() is true then storage for the value is not in the
+ // instance's global area, but in a WasmGlobalObject::Cell hanging off a
+ // WasmGlobalObject; the global area contains a pointer to the Cell.
+ //
+ // We don't want to indirect unless we must, so only mutable, exposed
+ // globals are indirected - in all other cases we copy values into and out
+ // of their module.
+ //
+ // Note that isIndirect() isn't equivalent to getting a WasmGlobalObject:
+ // an immutable exported global will still get an object, but will not be
+ // indirect.
+ bool isIndirect() const {
+ return isMutable() && isWasm() && (isImport() || isExport());
+ }
+
+ ValType type() const { return initial_.type(); }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ WASM_DECLARE_FRIEND_SERIALIZE(GlobalDesc);
+};
+
+using GlobalDescVector = Vector<GlobalDesc, 0, SystemAllocPolicy>;
+
+// A TagDesc represents fresh per-instance tags that are used for the
+// exception handling proposal and potentially other future proposals.
+
+// The TagOffsetVector represents the offsets in the layout of the
+// data buffer stored in a Wasm exception.
+using TagOffsetVector = Vector<uint32_t, 2, SystemAllocPolicy>;
+
+struct TagType : AtomicRefCounted<TagType> {
+ ValTypeVector argTypes_;
+ TagOffsetVector argOffsets_;
+ uint32_t size_;
+
+ TagType() : size_(0) {}
+
+ ResultType resultType() const { return ResultType::Vector(argTypes_); }
+
+ [[nodiscard]] bool initialize(ValTypeVector&& argTypes);
+
+ [[nodiscard]] bool clone(const TagType& src) {
+ MOZ_ASSERT(argTypes_.empty() && argOffsets_.empty() && size_ == 0);
+ if (!argTypes_.appendAll(src.argTypes_) ||
+ !argOffsets_.appendAll(src.argOffsets_)) {
+ return false;
+ }
+ size_ = src.size_;
+ return true;
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+};
+
+using MutableTagType = RefPtr<TagType>;
+using SharedTagType = RefPtr<const TagType>;
+
+struct TagDesc {
+ TagKind kind;
+ SharedTagType type;
+ bool isExport;
+
+ TagDesc() : isExport(false) {}
+ TagDesc(TagKind kind, const SharedTagType& type, bool isExport = false)
+ : kind(kind), type(type), isExport(isExport) {}
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+};
+
+using TagDescVector = Vector<TagDesc, 0, SystemAllocPolicy>;
+
+// When a ElemSegment is "passive" it is shared between a wasm::Module and its
+// wasm::Instances. To allow each segment to be released as soon as the last
+// Instance elem.drops it and the Module is destroyed, each ElemSegment is
+// individually atomically ref-counted.
+
+struct ElemSegment : AtomicRefCounted<ElemSegment> {
+ enum class Kind {
+ Active,
+ Passive,
+ Declared,
+ };
+
+ Kind kind;
+ uint32_t tableIndex;
+ RefType elemType;
+ Maybe<InitExpr> offsetIfActive;
+ Uint32Vector elemFuncIndices; // Element may be NullFuncIndex
+
+ bool active() const { return kind == Kind::Active; }
+
+ const InitExpr& offset() const { return *offsetIfActive; }
+
+ size_t length() const { return elemFuncIndices.length(); }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+};
+
+// NullFuncIndex represents the case when an element segment (of type funcref)
+// contains a null element.
+constexpr uint32_t NullFuncIndex = UINT32_MAX;
+static_assert(NullFuncIndex > MaxFuncs, "Invariant");
+
+using MutableElemSegment = RefPtr<ElemSegment>;
+using SharedElemSegment = RefPtr<const ElemSegment>;
+using ElemSegmentVector = Vector<SharedElemSegment, 0, SystemAllocPolicy>;
+
+// DataSegmentEnv holds the initial results of decoding a data segment from the
+// bytecode and is stored in the ModuleEnvironment during compilation. When
+// compilation completes, (non-Env) DataSegments are created and stored in
+// the wasm::Module which contain copies of the data segment payload. This
+// allows non-compilation uses of wasm validation to avoid expensive copies.
+//
+// When a DataSegment is "passive" it is shared between a wasm::Module and its
+// wasm::Instances. To allow each segment to be released as soon as the last
+// Instance mem.drops it and the Module is destroyed, each DataSegment is
+// individually atomically ref-counted.
+
+struct DataSegmentEnv {
+ Maybe<InitExpr> offsetIfActive;
+ uint32_t bytecodeOffset;
+ uint32_t length;
+};
+
+using DataSegmentEnvVector = Vector<DataSegmentEnv, 0, SystemAllocPolicy>;
+
+struct DataSegment : AtomicRefCounted<DataSegment> {
+ Maybe<InitExpr> offsetIfActive;
+ Bytes bytes;
+
+ DataSegment() = default;
+
+ bool active() const { return !!offsetIfActive; }
+
+ const InitExpr& offset() const { return *offsetIfActive; }
+
+ [[nodiscard]] bool init(const ShareableBytes& bytecode,
+ const DataSegmentEnv& src) {
+ if (src.offsetIfActive) {
+ offsetIfActive.emplace();
+ if (!offsetIfActive->clone(*src.offsetIfActive)) {
+ return false;
+ }
+ }
+ return bytes.append(bytecode.begin() + src.bytecodeOffset, src.length);
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+};
+
+using MutableDataSegment = RefPtr<DataSegment>;
+using SharedDataSegment = RefPtr<const DataSegment>;
+using DataSegmentVector = Vector<SharedDataSegment, 0, SystemAllocPolicy>;
+
+// The CustomSection(Env) structs are like DataSegment(Env): CustomSectionEnv is
+// stored in the ModuleEnvironment and CustomSection holds a copy of the payload
+// and is stored in the wasm::Module.
+
+struct CustomSectionEnv {
+ uint32_t nameOffset;
+ uint32_t nameLength;
+ uint32_t payloadOffset;
+ uint32_t payloadLength;
+};
+
+using CustomSectionEnvVector = Vector<CustomSectionEnv, 0, SystemAllocPolicy>;
+
+struct CustomSection {
+ Bytes name;
+ SharedBytes payload;
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+};
+
+using CustomSectionVector = Vector<CustomSection, 0, SystemAllocPolicy>;
+
+// A Name represents a string of utf8 chars embedded within the name custom
+// section. The offset of a name is expressed relative to the beginning of the
+// name section's payload so that Names can stored in wasm::Code, which only
+// holds the name section's bytes, not the whole bytecode.
+
+struct Name {
+ // All fields are treated as cacheable POD:
+ uint32_t offsetInNamePayload;
+ uint32_t length;
+
+ WASM_CHECK_CACHEABLE_POD(offsetInNamePayload, length);
+
+ Name() : offsetInNamePayload(UINT32_MAX), length(0) {}
+};
+
+WASM_DECLARE_CACHEABLE_POD(Name);
+
+using NameVector = Vector<Name, 0, SystemAllocPolicy>;
+
+// The kind of limits to decode or convert from JS.
+
+enum class LimitsKind {
+ Memory,
+ Table,
+};
+
+// Represents the resizable limits of memories and tables.
+
+struct Limits {
+ // `indexType` will always be I32 for tables, but may be I64 for memories
+ // when memory64 is enabled.
+ IndexType indexType;
+
+ // The initial and maximum limit. The unit is pages for memories and elements
+ // for tables.
+ uint64_t initial;
+ Maybe<uint64_t> maximum;
+
+ // `shared` is Shareable::False for tables but may be Shareable::True for
+ // memories.
+ Shareable shared;
+
+ WASM_CHECK_CACHEABLE_POD(indexType, initial, maximum, shared);
+
+ Limits() = default;
+ explicit Limits(uint64_t initial, const Maybe<uint64_t>& maximum = Nothing(),
+ Shareable shared = Shareable::False)
+ : indexType(IndexType::I32),
+ initial(initial),
+ maximum(maximum),
+ shared(shared) {}
+};
+
+WASM_DECLARE_CACHEABLE_POD(Limits);
+
+// MemoryDesc describes a memory.
+
+struct MemoryDesc {
+ Limits limits;
+
+ WASM_CHECK_CACHEABLE_POD(limits);
+
+ bool isShared() const { return limits.shared == Shareable::True; }
+
+ // Whether a backing store for this memory may move when grown.
+ bool canMovingGrow() const { return limits.maximum.isNothing(); }
+
+ // Whether the bounds check limit (see the doc comment in
+ // ArrayBufferObject.cpp regarding linear memory structure) can ever be
+ // larger than 32-bits.
+ bool boundsCheckLimitIs32Bits() const {
+ return limits.maximum.isSome() &&
+ limits.maximum.value() < (0x100000000 / PageSize);
+ }
+
+ IndexType indexType() const { return limits.indexType; }
+
+ // The initial length of this memory in pages.
+ Pages initialPages() const { return Pages(limits.initial); }
+
+ // The maximum length of this memory in pages.
+ Maybe<Pages> maximumPages() const {
+ return limits.maximum.map([](uint64_t x) { return Pages(x); });
+ }
+
+ // The initial length of this memory in bytes. Only valid for memory32.
+ uint64_t initialLength32() const {
+ MOZ_ASSERT(indexType() == IndexType::I32);
+ // See static_assert after MemoryDesc for why this is safe.
+ return limits.initial * PageSize;
+ }
+
+ uint64_t initialLength64() const {
+ MOZ_ASSERT(indexType() == IndexType::I64);
+ return limits.initial * PageSize;
+ }
+
+ MemoryDesc() = default;
+ explicit MemoryDesc(Limits limits) : limits(limits) {}
+};
+
+WASM_DECLARE_CACHEABLE_POD(MemoryDesc);
+
+// We don't need to worry about overflow with a Memory32 field when
+// using a uint64_t.
+static_assert(MaxMemory32LimitField <= UINT64_MAX / PageSize);
+
+// TableDesc describes a table as well as the offset of the table's base pointer
+// in global memory.
+//
+// A TableDesc contains the element type and whether the table is for asm.js,
+// which determines the table representation.
+// - ExternRef: a wasm anyref word (wasm::AnyRef)
+// - FuncRef: a two-word FunctionTableElem (wasm indirect call ABI)
+// - FuncRef (if `isAsmJS`): a two-word FunctionTableElem (asm.js ABI)
+// Eventually there should be a single unified AnyRef representation.
+
+struct TableDesc {
+ RefType elemType;
+ bool isImported;
+ bool isExported;
+ bool isAsmJS;
+ uint32_t initialLength;
+ Maybe<uint32_t> maximumLength;
+ Maybe<InitExpr> initExpr;
+
+ TableDesc() = default;
+ TableDesc(RefType elemType, uint32_t initialLength,
+ Maybe<uint32_t> maximumLength, Maybe<InitExpr>&& initExpr,
+ bool isAsmJS, bool isImported = false, bool isExported = false)
+ : elemType(elemType),
+ isImported(isImported),
+ isExported(isExported),
+ isAsmJS(isAsmJS),
+ initialLength(initialLength),
+ maximumLength(maximumLength),
+ initExpr(std::move(initExpr)) {}
+};
+
+using TableDescVector = Vector<TableDesc, 0, SystemAllocPolicy>;
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_module_types_h
diff --git a/js/src/wasm/WasmOpIter.cpp b/js/src/wasm/WasmOpIter.cpp
new file mode 100644
index 0000000000..91e2e17a70
--- /dev/null
+++ b/js/src/wasm/WasmOpIter.cpp
@@ -0,0 +1,863 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmOpIter.h"
+
+#include "jit/AtomicOp.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+#ifdef ENABLE_WASM_GC
+# ifndef ENABLE_WASM_FUNCTION_REFERENCES
+# error "GC types require the function-references feature"
+# endif
+#endif
+
+#ifdef DEBUG
+
+# ifdef ENABLE_WASM_FUNCTION_REFERENCES
+# define WASM_FUNCTION_REFERENCES_OP(code) return code
+# else
+# define WASM_FUNCTION_REFERENCES_OP(code) break
+# endif
+# ifdef ENABLE_WASM_GC
+# define WASM_GC_OP(code) return code
+# else
+# define WASM_GC_OP(code) break
+# endif
+# ifdef ENABLE_WASM_SIMD
+# define WASM_SIMD_OP(code) return code
+# else
+# define WASM_SIMD_OP(code) break
+# endif
+
+OpKind wasm::Classify(OpBytes op) {
+ switch (Op(op.b0)) {
+ case Op::Block:
+ return OpKind::Block;
+ case Op::Loop:
+ return OpKind::Loop;
+ case Op::Unreachable:
+ return OpKind::Unreachable;
+ case Op::Drop:
+ return OpKind::Drop;
+ case Op::I32Const:
+ return OpKind::I32;
+ case Op::I64Const:
+ return OpKind::I64;
+ case Op::F32Const:
+ return OpKind::F32;
+ case Op::F64Const:
+ return OpKind::F64;
+ case Op::Br:
+ return OpKind::Br;
+ case Op::BrIf:
+ return OpKind::BrIf;
+ case Op::BrTable:
+ return OpKind::BrTable;
+ case Op::Nop:
+ return OpKind::Nop;
+ case Op::I32Clz:
+ case Op::I32Ctz:
+ case Op::I32Popcnt:
+ case Op::I64Clz:
+ case Op::I64Ctz:
+ case Op::I64Popcnt:
+ case Op::F32Abs:
+ case Op::F32Neg:
+ case Op::F32Ceil:
+ case Op::F32Floor:
+ case Op::F32Trunc:
+ case Op::F32Nearest:
+ case Op::F32Sqrt:
+ case Op::F64Abs:
+ case Op::F64Neg:
+ case Op::F64Ceil:
+ case Op::F64Floor:
+ case Op::F64Trunc:
+ case Op::F64Nearest:
+ case Op::F64Sqrt:
+ return OpKind::Unary;
+ case Op::I32Add:
+ case Op::I32Sub:
+ case Op::I32Mul:
+ case Op::I32DivS:
+ case Op::I32DivU:
+ case Op::I32RemS:
+ case Op::I32RemU:
+ case Op::I32And:
+ case Op::I32Or:
+ case Op::I32Xor:
+ case Op::I32Shl:
+ case Op::I32ShrS:
+ case Op::I32ShrU:
+ case Op::I32Rotl:
+ case Op::I32Rotr:
+ case Op::I64Add:
+ case Op::I64Sub:
+ case Op::I64Mul:
+ case Op::I64DivS:
+ case Op::I64DivU:
+ case Op::I64RemS:
+ case Op::I64RemU:
+ case Op::I64And:
+ case Op::I64Or:
+ case Op::I64Xor:
+ case Op::I64Shl:
+ case Op::I64ShrS:
+ case Op::I64ShrU:
+ case Op::I64Rotl:
+ case Op::I64Rotr:
+ case Op::F32Add:
+ case Op::F32Sub:
+ case Op::F32Mul:
+ case Op::F32Div:
+ case Op::F32Min:
+ case Op::F32Max:
+ case Op::F32CopySign:
+ case Op::F64Add:
+ case Op::F64Sub:
+ case Op::F64Mul:
+ case Op::F64Div:
+ case Op::F64Min:
+ case Op::F64Max:
+ case Op::F64CopySign:
+ return OpKind::Binary;
+ case Op::I32Eq:
+ case Op::I32Ne:
+ case Op::I32LtS:
+ case Op::I32LtU:
+ case Op::I32LeS:
+ case Op::I32LeU:
+ case Op::I32GtS:
+ case Op::I32GtU:
+ case Op::I32GeS:
+ case Op::I32GeU:
+ case Op::I64Eq:
+ case Op::I64Ne:
+ case Op::I64LtS:
+ case Op::I64LtU:
+ case Op::I64LeS:
+ case Op::I64LeU:
+ case Op::I64GtS:
+ case Op::I64GtU:
+ case Op::I64GeS:
+ case Op::I64GeU:
+ case Op::F32Eq:
+ case Op::F32Ne:
+ case Op::F32Lt:
+ case Op::F32Le:
+ case Op::F32Gt:
+ case Op::F32Ge:
+ case Op::F64Eq:
+ case Op::F64Ne:
+ case Op::F64Lt:
+ case Op::F64Le:
+ case Op::F64Gt:
+ case Op::F64Ge:
+ return OpKind::Comparison;
+ case Op::I32Eqz:
+ case Op::I32WrapI64:
+ case Op::I32TruncF32S:
+ case Op::I32TruncF32U:
+ case Op::I32ReinterpretF32:
+ case Op::I32TruncF64S:
+ case Op::I32TruncF64U:
+ case Op::I64ExtendI32S:
+ case Op::I64ExtendI32U:
+ case Op::I64TruncF32S:
+ case Op::I64TruncF32U:
+ case Op::I64TruncF64S:
+ case Op::I64TruncF64U:
+ case Op::I64ReinterpretF64:
+ case Op::I64Eqz:
+ case Op::F32ConvertI32S:
+ case Op::F32ConvertI32U:
+ case Op::F32ReinterpretI32:
+ case Op::F32ConvertI64S:
+ case Op::F32ConvertI64U:
+ case Op::F32DemoteF64:
+ case Op::F64ConvertI32S:
+ case Op::F64ConvertI32U:
+ case Op::F64ConvertI64S:
+ case Op::F64ConvertI64U:
+ case Op::F64ReinterpretI64:
+ case Op::F64PromoteF32:
+ case Op::I32Extend8S:
+ case Op::I32Extend16S:
+ case Op::I64Extend8S:
+ case Op::I64Extend16S:
+ case Op::I64Extend32S:
+ return OpKind::Conversion;
+ case Op::I32Load8S:
+ case Op::I32Load8U:
+ case Op::I32Load16S:
+ case Op::I32Load16U:
+ case Op::I64Load8S:
+ case Op::I64Load8U:
+ case Op::I64Load16S:
+ case Op::I64Load16U:
+ case Op::I64Load32S:
+ case Op::I64Load32U:
+ case Op::I32Load:
+ case Op::I64Load:
+ case Op::F32Load:
+ case Op::F64Load:
+ return OpKind::Load;
+ case Op::I32Store8:
+ case Op::I32Store16:
+ case Op::I64Store8:
+ case Op::I64Store16:
+ case Op::I64Store32:
+ case Op::I32Store:
+ case Op::I64Store:
+ case Op::F32Store:
+ case Op::F64Store:
+ return OpKind::Store;
+ case Op::SelectNumeric:
+ case Op::SelectTyped:
+ return OpKind::Select;
+ case Op::LocalGet:
+ return OpKind::GetLocal;
+ case Op::LocalSet:
+ return OpKind::SetLocal;
+ case Op::LocalTee:
+ return OpKind::TeeLocal;
+ case Op::GlobalGet:
+ return OpKind::GetGlobal;
+ case Op::GlobalSet:
+ return OpKind::SetGlobal;
+ case Op::TableGet:
+ return OpKind::TableGet;
+ case Op::TableSet:
+ return OpKind::TableSet;
+ case Op::Call:
+ return OpKind::Call;
+ case Op::CallIndirect:
+ return OpKind::CallIndirect;
+ case Op::CallRef:
+ WASM_FUNCTION_REFERENCES_OP(OpKind::CallRef);
+ case Op::Return:
+ case Op::Limit:
+ // Accept Limit, for use in decoding the end of a function after the body.
+ return OpKind::Return;
+ case Op::If:
+ return OpKind::If;
+ case Op::Else:
+ return OpKind::Else;
+ case Op::End:
+ return OpKind::End;
+ case Op::Catch:
+ return OpKind::Catch;
+ case Op::CatchAll:
+ return OpKind::CatchAll;
+ case Op::Delegate:
+ return OpKind::Delegate;
+ case Op::Throw:
+ return OpKind::Throw;
+ case Op::Rethrow:
+ return OpKind::Rethrow;
+ case Op::Try:
+ return OpKind::Try;
+ case Op::MemorySize:
+ return OpKind::MemorySize;
+ case Op::MemoryGrow:
+ return OpKind::MemoryGrow;
+ case Op::RefNull:
+ return OpKind::RefNull;
+ case Op::RefIsNull:
+ return OpKind::Conversion;
+ case Op::RefFunc:
+ return OpKind::RefFunc;
+ case Op::RefAsNonNull:
+ WASM_FUNCTION_REFERENCES_OP(OpKind::RefAsNonNull);
+ case Op::BrOnNull:
+ WASM_FUNCTION_REFERENCES_OP(OpKind::BrOnNull);
+ case Op::BrOnNonNull:
+ WASM_FUNCTION_REFERENCES_OP(OpKind::BrOnNonNull);
+ case Op::RefEq:
+ WASM_GC_OP(OpKind::Comparison);
+ case Op::GcPrefix: {
+ switch (GcOp(op.b1)) {
+ case GcOp::Limit:
+ // Reject Limit for GcPrefix encoding
+ break;
+ case GcOp::StructNew:
+ WASM_GC_OP(OpKind::StructNew);
+ case GcOp::StructNewDefault:
+ WASM_GC_OP(OpKind::StructNewDefault);
+ case GcOp::StructGet:
+ case GcOp::StructGetS:
+ case GcOp::StructGetU:
+ WASM_GC_OP(OpKind::StructGet);
+ case GcOp::StructSet:
+ WASM_GC_OP(OpKind::StructSet);
+ case GcOp::ArrayNew:
+ WASM_GC_OP(OpKind::ArrayNew);
+ case GcOp::ArrayNewFixed:
+ WASM_GC_OP(OpKind::ArrayNewFixed);
+ case GcOp::ArrayNewDefault:
+ WASM_GC_OP(OpKind::ArrayNewDefault);
+ case GcOp::ArrayNewData:
+ case GcOp::ArrayInitFromElemStaticV5:
+ case GcOp::ArrayNewElem:
+ WASM_GC_OP(OpKind::ArrayNewData);
+ case GcOp::ArrayGet:
+ case GcOp::ArrayGetS:
+ case GcOp::ArrayGetU:
+ WASM_GC_OP(OpKind::ArrayGet);
+ case GcOp::ArraySet:
+ WASM_GC_OP(OpKind::ArraySet);
+ case GcOp::ArrayLenWithTypeIndex:
+ case GcOp::ArrayLen:
+ WASM_GC_OP(OpKind::ArrayLen);
+ case GcOp::ArrayCopy:
+ WASM_GC_OP(OpKind::ArrayCopy);
+ case GcOp::RefTestV5:
+ WASM_GC_OP(OpKind::RefTestV5);
+ case GcOp::RefCastV5:
+ WASM_GC_OP(OpKind::RefCastV5);
+ case GcOp::RefTest:
+ case GcOp::RefTestNull:
+ WASM_GC_OP(OpKind::RefTest);
+ case GcOp::RefCast:
+ case GcOp::RefCastNull:
+ WASM_GC_OP(OpKind::RefCast);
+ case GcOp::BrOnCast:
+ WASM_GC_OP(OpKind::BrOnCast);
+ case GcOp::BrOnCastV5:
+ case GcOp::BrOnCastHeapV5:
+ case GcOp::BrOnCastHeapNullV5:
+ WASM_GC_OP(OpKind::BrOnCastV5);
+ case GcOp::BrOnCastFailV5:
+ case GcOp::BrOnCastFailHeapV5:
+ case GcOp::BrOnCastFailHeapNullV5:
+ WASM_GC_OP(OpKind::BrOnCastFailV5);
+ case GcOp::RefAsStructV5:
+ WASM_GC_OP(OpKind::Conversion);
+ case GcOp::BrOnNonStructV5:
+ WASM_GC_OP(OpKind::BrOnNonStructV5);
+ case GcOp::ExternInternalize:
+ WASM_GC_OP(OpKind::RefConversion);
+ case GcOp::ExternExternalize:
+ WASM_GC_OP(OpKind::RefConversion);
+ }
+ break;
+ }
+ case Op::SimdPrefix: {
+ switch (SimdOp(op.b1)) {
+ case SimdOp::MozPMADDUBSW:
+ case SimdOp::Limit:
+ // Reject Limit and reserved codes for SimdPrefix encoding
+ break;
+ case SimdOp::I8x16ExtractLaneS:
+ case SimdOp::I8x16ExtractLaneU:
+ case SimdOp::I16x8ExtractLaneS:
+ case SimdOp::I16x8ExtractLaneU:
+ case SimdOp::I32x4ExtractLane:
+ case SimdOp::I64x2ExtractLane:
+ case SimdOp::F32x4ExtractLane:
+ case SimdOp::F64x2ExtractLane:
+ WASM_SIMD_OP(OpKind::ExtractLane);
+ case SimdOp::I8x16Splat:
+ case SimdOp::I16x8Splat:
+ case SimdOp::I32x4Splat:
+ case SimdOp::I64x2Splat:
+ case SimdOp::F32x4Splat:
+ case SimdOp::F64x2Splat:
+ case SimdOp::V128AnyTrue:
+ case SimdOp::I8x16AllTrue:
+ case SimdOp::I16x8AllTrue:
+ case SimdOp::I32x4AllTrue:
+ case SimdOp::I64x2AllTrue:
+ case SimdOp::I8x16Bitmask:
+ case SimdOp::I16x8Bitmask:
+ case SimdOp::I32x4Bitmask:
+ case SimdOp::I64x2Bitmask:
+ WASM_SIMD_OP(OpKind::Conversion);
+ case SimdOp::I8x16ReplaceLane:
+ case SimdOp::I16x8ReplaceLane:
+ case SimdOp::I32x4ReplaceLane:
+ case SimdOp::I64x2ReplaceLane:
+ case SimdOp::F32x4ReplaceLane:
+ case SimdOp::F64x2ReplaceLane:
+ WASM_SIMD_OP(OpKind::ReplaceLane);
+ case SimdOp::I8x16Eq:
+ case SimdOp::I8x16Ne:
+ case SimdOp::I8x16LtS:
+ case SimdOp::I8x16LtU:
+ case SimdOp::I8x16GtS:
+ case SimdOp::I8x16GtU:
+ case SimdOp::I8x16LeS:
+ case SimdOp::I8x16LeU:
+ case SimdOp::I8x16GeS:
+ case SimdOp::I8x16GeU:
+ case SimdOp::I16x8Eq:
+ case SimdOp::I16x8Ne:
+ case SimdOp::I16x8LtS:
+ case SimdOp::I16x8LtU:
+ case SimdOp::I16x8GtS:
+ case SimdOp::I16x8GtU:
+ case SimdOp::I16x8LeS:
+ case SimdOp::I16x8LeU:
+ case SimdOp::I16x8GeS:
+ case SimdOp::I16x8GeU:
+ case SimdOp::I32x4Eq:
+ case SimdOp::I32x4Ne:
+ case SimdOp::I32x4LtS:
+ case SimdOp::I32x4LtU:
+ case SimdOp::I32x4GtS:
+ case SimdOp::I32x4GtU:
+ case SimdOp::I32x4LeS:
+ case SimdOp::I32x4LeU:
+ case SimdOp::I32x4GeS:
+ case SimdOp::I32x4GeU:
+ case SimdOp::I64x2Eq:
+ case SimdOp::I64x2Ne:
+ case SimdOp::I64x2LtS:
+ case SimdOp::I64x2GtS:
+ case SimdOp::I64x2LeS:
+ case SimdOp::I64x2GeS:
+ case SimdOp::F32x4Eq:
+ case SimdOp::F32x4Ne:
+ case SimdOp::F32x4Lt:
+ case SimdOp::F32x4Gt:
+ case SimdOp::F32x4Le:
+ case SimdOp::F32x4Ge:
+ case SimdOp::F64x2Eq:
+ case SimdOp::F64x2Ne:
+ case SimdOp::F64x2Lt:
+ case SimdOp::F64x2Gt:
+ case SimdOp::F64x2Le:
+ case SimdOp::F64x2Ge:
+ case SimdOp::V128And:
+ case SimdOp::V128Or:
+ case SimdOp::V128Xor:
+ case SimdOp::V128AndNot:
+ case SimdOp::I8x16AvgrU:
+ case SimdOp::I16x8AvgrU:
+ case SimdOp::I8x16Add:
+ case SimdOp::I8x16AddSatS:
+ case SimdOp::I8x16AddSatU:
+ case SimdOp::I8x16Sub:
+ case SimdOp::I8x16SubSatS:
+ case SimdOp::I8x16SubSatU:
+ case SimdOp::I8x16MinS:
+ case SimdOp::I8x16MaxS:
+ case SimdOp::I8x16MinU:
+ case SimdOp::I8x16MaxU:
+ case SimdOp::I16x8Add:
+ case SimdOp::I16x8AddSatS:
+ case SimdOp::I16x8AddSatU:
+ case SimdOp::I16x8Sub:
+ case SimdOp::I16x8SubSatS:
+ case SimdOp::I16x8SubSatU:
+ case SimdOp::I16x8Mul:
+ case SimdOp::I16x8MinS:
+ case SimdOp::I16x8MaxS:
+ case SimdOp::I16x8MinU:
+ case SimdOp::I16x8MaxU:
+ case SimdOp::I32x4Add:
+ case SimdOp::I32x4Sub:
+ case SimdOp::I32x4Mul:
+ case SimdOp::I32x4MinS:
+ case SimdOp::I32x4MaxS:
+ case SimdOp::I32x4MinU:
+ case SimdOp::I32x4MaxU:
+ case SimdOp::I64x2Add:
+ case SimdOp::I64x2Sub:
+ case SimdOp::I64x2Mul:
+ case SimdOp::F32x4Add:
+ case SimdOp::F32x4Sub:
+ case SimdOp::F32x4Mul:
+ case SimdOp::F32x4Div:
+ case SimdOp::F32x4Min:
+ case SimdOp::F32x4Max:
+ case SimdOp::F64x2Add:
+ case SimdOp::F64x2Sub:
+ case SimdOp::F64x2Mul:
+ case SimdOp::F64x2Div:
+ case SimdOp::F64x2Min:
+ case SimdOp::F64x2Max:
+ case SimdOp::I8x16NarrowI16x8S:
+ case SimdOp::I8x16NarrowI16x8U:
+ case SimdOp::I16x8NarrowI32x4S:
+ case SimdOp::I16x8NarrowI32x4U:
+ case SimdOp::I8x16Swizzle:
+ case SimdOp::F32x4PMin:
+ case SimdOp::F32x4PMax:
+ case SimdOp::F64x2PMin:
+ case SimdOp::F64x2PMax:
+ case SimdOp::I32x4DotI16x8S:
+ case SimdOp::I16x8ExtmulLowI8x16S:
+ case SimdOp::I16x8ExtmulHighI8x16S:
+ case SimdOp::I16x8ExtmulLowI8x16U:
+ case SimdOp::I16x8ExtmulHighI8x16U:
+ case SimdOp::I32x4ExtmulLowI16x8S:
+ case SimdOp::I32x4ExtmulHighI16x8S:
+ case SimdOp::I32x4ExtmulLowI16x8U:
+ case SimdOp::I32x4ExtmulHighI16x8U:
+ case SimdOp::I64x2ExtmulLowI32x4S:
+ case SimdOp::I64x2ExtmulHighI32x4S:
+ case SimdOp::I64x2ExtmulLowI32x4U:
+ case SimdOp::I64x2ExtmulHighI32x4U:
+ case SimdOp::I16x8Q15MulrSatS:
+ case SimdOp::F32x4RelaxedMin:
+ case SimdOp::F32x4RelaxedMax:
+ case SimdOp::F64x2RelaxedMin:
+ case SimdOp::F64x2RelaxedMax:
+ case SimdOp::I8x16RelaxedSwizzle:
+ case SimdOp::I16x8RelaxedQ15MulrS:
+ case SimdOp::I16x8DotI8x16I7x16S:
+ WASM_SIMD_OP(OpKind::Binary);
+ case SimdOp::I8x16Neg:
+ case SimdOp::I16x8Neg:
+ case SimdOp::I16x8ExtendLowI8x16S:
+ case SimdOp::I16x8ExtendHighI8x16S:
+ case SimdOp::I16x8ExtendLowI8x16U:
+ case SimdOp::I16x8ExtendHighI8x16U:
+ case SimdOp::I32x4Neg:
+ case SimdOp::I32x4ExtendLowI16x8S:
+ case SimdOp::I32x4ExtendHighI16x8S:
+ case SimdOp::I32x4ExtendLowI16x8U:
+ case SimdOp::I32x4ExtendHighI16x8U:
+ case SimdOp::I32x4TruncSatF32x4S:
+ case SimdOp::I32x4TruncSatF32x4U:
+ case SimdOp::I64x2Neg:
+ case SimdOp::I64x2ExtendLowI32x4S:
+ case SimdOp::I64x2ExtendHighI32x4S:
+ case SimdOp::I64x2ExtendLowI32x4U:
+ case SimdOp::I64x2ExtendHighI32x4U:
+ case SimdOp::F32x4Abs:
+ case SimdOp::F32x4Neg:
+ case SimdOp::F32x4Sqrt:
+ case SimdOp::F32x4ConvertI32x4S:
+ case SimdOp::F32x4ConvertI32x4U:
+ case SimdOp::F64x2Abs:
+ case SimdOp::F64x2Neg:
+ case SimdOp::F64x2Sqrt:
+ case SimdOp::V128Not:
+ case SimdOp::I8x16Popcnt:
+ case SimdOp::I8x16Abs:
+ case SimdOp::I16x8Abs:
+ case SimdOp::I32x4Abs:
+ case SimdOp::I64x2Abs:
+ case SimdOp::F32x4Ceil:
+ case SimdOp::F32x4Floor:
+ case SimdOp::F32x4Trunc:
+ case SimdOp::F32x4Nearest:
+ case SimdOp::F64x2Ceil:
+ case SimdOp::F64x2Floor:
+ case SimdOp::F64x2Trunc:
+ case SimdOp::F64x2Nearest:
+ case SimdOp::F32x4DemoteF64x2Zero:
+ case SimdOp::F64x2PromoteLowF32x4:
+ case SimdOp::F64x2ConvertLowI32x4S:
+ case SimdOp::F64x2ConvertLowI32x4U:
+ case SimdOp::I32x4TruncSatF64x2SZero:
+ case SimdOp::I32x4TruncSatF64x2UZero:
+ case SimdOp::I16x8ExtaddPairwiseI8x16S:
+ case SimdOp::I16x8ExtaddPairwiseI8x16U:
+ case SimdOp::I32x4ExtaddPairwiseI16x8S:
+ case SimdOp::I32x4ExtaddPairwiseI16x8U:
+ case SimdOp::I32x4RelaxedTruncF32x4S:
+ case SimdOp::I32x4RelaxedTruncF32x4U:
+ case SimdOp::I32x4RelaxedTruncF64x2SZero:
+ case SimdOp::I32x4RelaxedTruncF64x2UZero:
+ WASM_SIMD_OP(OpKind::Unary);
+ case SimdOp::I8x16Shl:
+ case SimdOp::I8x16ShrS:
+ case SimdOp::I8x16ShrU:
+ case SimdOp::I16x8Shl:
+ case SimdOp::I16x8ShrS:
+ case SimdOp::I16x8ShrU:
+ case SimdOp::I32x4Shl:
+ case SimdOp::I32x4ShrS:
+ case SimdOp::I32x4ShrU:
+ case SimdOp::I64x2Shl:
+ case SimdOp::I64x2ShrS:
+ case SimdOp::I64x2ShrU:
+ WASM_SIMD_OP(OpKind::VectorShift);
+ case SimdOp::V128Bitselect:
+ WASM_SIMD_OP(OpKind::Ternary);
+ case SimdOp::I8x16Shuffle:
+ WASM_SIMD_OP(OpKind::VectorShuffle);
+ case SimdOp::V128Const:
+ WASM_SIMD_OP(OpKind::V128);
+ case SimdOp::V128Load:
+ case SimdOp::V128Load8Splat:
+ case SimdOp::V128Load16Splat:
+ case SimdOp::V128Load32Splat:
+ case SimdOp::V128Load64Splat:
+ case SimdOp::V128Load8x8S:
+ case SimdOp::V128Load8x8U:
+ case SimdOp::V128Load16x4S:
+ case SimdOp::V128Load16x4U:
+ case SimdOp::V128Load32x2S:
+ case SimdOp::V128Load32x2U:
+ case SimdOp::V128Load32Zero:
+ case SimdOp::V128Load64Zero:
+ WASM_SIMD_OP(OpKind::Load);
+ case SimdOp::V128Store:
+ WASM_SIMD_OP(OpKind::Store);
+ case SimdOp::V128Load8Lane:
+ case SimdOp::V128Load16Lane:
+ case SimdOp::V128Load32Lane:
+ case SimdOp::V128Load64Lane:
+ WASM_SIMD_OP(OpKind::LoadLane);
+ case SimdOp::V128Store8Lane:
+ case SimdOp::V128Store16Lane:
+ case SimdOp::V128Store32Lane:
+ case SimdOp::V128Store64Lane:
+ WASM_SIMD_OP(OpKind::StoreLane);
+ case SimdOp::F32x4RelaxedFma:
+ case SimdOp::F32x4RelaxedFnma:
+ case SimdOp::F64x2RelaxedFma:
+ case SimdOp::F64x2RelaxedFnma:
+ case SimdOp::I8x16RelaxedLaneSelect:
+ case SimdOp::I16x8RelaxedLaneSelect:
+ case SimdOp::I32x4RelaxedLaneSelect:
+ case SimdOp::I64x2RelaxedLaneSelect:
+ case SimdOp::I32x4DotI8x16I7x16AddS:
+ WASM_SIMD_OP(OpKind::Ternary);
+ }
+ break;
+ }
+ case Op::MiscPrefix: {
+ switch (MiscOp(op.b1)) {
+ case MiscOp::Limit:
+ // Reject Limit for MiscPrefix encoding
+ break;
+ case MiscOp::I32TruncSatF32S:
+ case MiscOp::I32TruncSatF32U:
+ case MiscOp::I32TruncSatF64S:
+ case MiscOp::I32TruncSatF64U:
+ case MiscOp::I64TruncSatF32S:
+ case MiscOp::I64TruncSatF32U:
+ case MiscOp::I64TruncSatF64S:
+ case MiscOp::I64TruncSatF64U:
+ return OpKind::Conversion;
+ case MiscOp::MemoryCopy:
+ case MiscOp::TableCopy:
+ return OpKind::MemOrTableCopy;
+ case MiscOp::DataDrop:
+ case MiscOp::ElemDrop:
+ return OpKind::DataOrElemDrop;
+ case MiscOp::MemoryFill:
+ return OpKind::MemFill;
+ case MiscOp::MemoryInit:
+ case MiscOp::TableInit:
+ return OpKind::MemOrTableInit;
+ case MiscOp::TableFill:
+ return OpKind::TableFill;
+ case MiscOp::MemoryDiscard:
+ return OpKind::MemDiscard;
+ case MiscOp::TableGrow:
+ return OpKind::TableGrow;
+ case MiscOp::TableSize:
+ return OpKind::TableSize;
+ }
+ break;
+ }
+ case Op::ThreadPrefix: {
+ switch (ThreadOp(op.b1)) {
+ case ThreadOp::Limit:
+ // Reject Limit for ThreadPrefix encoding
+ break;
+ case ThreadOp::Wake:
+ return OpKind::Wake;
+ case ThreadOp::I32Wait:
+ case ThreadOp::I64Wait:
+ return OpKind::Wait;
+ case ThreadOp::Fence:
+ return OpKind::Fence;
+ case ThreadOp::I32AtomicLoad:
+ case ThreadOp::I64AtomicLoad:
+ case ThreadOp::I32AtomicLoad8U:
+ case ThreadOp::I32AtomicLoad16U:
+ case ThreadOp::I64AtomicLoad8U:
+ case ThreadOp::I64AtomicLoad16U:
+ case ThreadOp::I64AtomicLoad32U:
+ return OpKind::AtomicLoad;
+ case ThreadOp::I32AtomicStore:
+ case ThreadOp::I64AtomicStore:
+ case ThreadOp::I32AtomicStore8U:
+ case ThreadOp::I32AtomicStore16U:
+ case ThreadOp::I64AtomicStore8U:
+ case ThreadOp::I64AtomicStore16U:
+ case ThreadOp::I64AtomicStore32U:
+ return OpKind::AtomicStore;
+ case ThreadOp::I32AtomicAdd:
+ case ThreadOp::I64AtomicAdd:
+ case ThreadOp::I32AtomicAdd8U:
+ case ThreadOp::I32AtomicAdd16U:
+ case ThreadOp::I64AtomicAdd8U:
+ case ThreadOp::I64AtomicAdd16U:
+ case ThreadOp::I64AtomicAdd32U:
+ case ThreadOp::I32AtomicSub:
+ case ThreadOp::I64AtomicSub:
+ case ThreadOp::I32AtomicSub8U:
+ case ThreadOp::I32AtomicSub16U:
+ case ThreadOp::I64AtomicSub8U:
+ case ThreadOp::I64AtomicSub16U:
+ case ThreadOp::I64AtomicSub32U:
+ case ThreadOp::I32AtomicAnd:
+ case ThreadOp::I64AtomicAnd:
+ case ThreadOp::I32AtomicAnd8U:
+ case ThreadOp::I32AtomicAnd16U:
+ case ThreadOp::I64AtomicAnd8U:
+ case ThreadOp::I64AtomicAnd16U:
+ case ThreadOp::I64AtomicAnd32U:
+ case ThreadOp::I32AtomicOr:
+ case ThreadOp::I64AtomicOr:
+ case ThreadOp::I32AtomicOr8U:
+ case ThreadOp::I32AtomicOr16U:
+ case ThreadOp::I64AtomicOr8U:
+ case ThreadOp::I64AtomicOr16U:
+ case ThreadOp::I64AtomicOr32U:
+ case ThreadOp::I32AtomicXor:
+ case ThreadOp::I64AtomicXor:
+ case ThreadOp::I32AtomicXor8U:
+ case ThreadOp::I32AtomicXor16U:
+ case ThreadOp::I64AtomicXor8U:
+ case ThreadOp::I64AtomicXor16U:
+ case ThreadOp::I64AtomicXor32U:
+ case ThreadOp::I32AtomicXchg:
+ case ThreadOp::I64AtomicXchg:
+ case ThreadOp::I32AtomicXchg8U:
+ case ThreadOp::I32AtomicXchg16U:
+ case ThreadOp::I64AtomicXchg8U:
+ case ThreadOp::I64AtomicXchg16U:
+ case ThreadOp::I64AtomicXchg32U:
+ return OpKind::AtomicBinOp;
+ case ThreadOp::I32AtomicCmpXchg:
+ case ThreadOp::I64AtomicCmpXchg:
+ case ThreadOp::I32AtomicCmpXchg8U:
+ case ThreadOp::I32AtomicCmpXchg16U:
+ case ThreadOp::I64AtomicCmpXchg8U:
+ case ThreadOp::I64AtomicCmpXchg16U:
+ case ThreadOp::I64AtomicCmpXchg32U:
+ return OpKind::AtomicCompareExchange;
+ default:
+ break;
+ }
+ break;
+ }
+ case Op::MozPrefix: {
+ switch (MozOp(op.b1)) {
+ case MozOp::Limit:
+ // Reject Limit for the MozPrefix encoding
+ break;
+ case MozOp::TeeGlobal:
+ return OpKind::TeeGlobal;
+ case MozOp::I32BitNot:
+ case MozOp::I32Abs:
+ case MozOp::I32Neg:
+ return OpKind::Unary;
+ case MozOp::I32Min:
+ case MozOp::I32Max:
+ case MozOp::F64Mod:
+ case MozOp::F64Pow:
+ case MozOp::F64Atan2:
+ return OpKind::Binary;
+ case MozOp::F64SinNative:
+ case MozOp::F64SinFdlibm:
+ case MozOp::F64CosNative:
+ case MozOp::F64CosFdlibm:
+ case MozOp::F64TanNative:
+ case MozOp::F64TanFdlibm:
+ case MozOp::F64Asin:
+ case MozOp::F64Acos:
+ case MozOp::F64Atan:
+ case MozOp::F64Exp:
+ case MozOp::F64Log:
+ return OpKind::Unary;
+ case MozOp::I32TeeStore8:
+ case MozOp::I32TeeStore16:
+ case MozOp::I64TeeStore8:
+ case MozOp::I64TeeStore16:
+ case MozOp::I64TeeStore32:
+ case MozOp::I32TeeStore:
+ case MozOp::I64TeeStore:
+ case MozOp::F32TeeStore:
+ case MozOp::F64TeeStore:
+ case MozOp::F32TeeStoreF64:
+ case MozOp::F64TeeStoreF32:
+ return OpKind::TeeStore;
+ case MozOp::OldCallDirect:
+ return OpKind::OldCallDirect;
+ case MozOp::OldCallIndirect:
+ return OpKind::OldCallIndirect;
+ case MozOp::Intrinsic:
+ return OpKind::Intrinsic;
+ }
+ break;
+ }
+ case Op::FirstPrefix:
+ break;
+ }
+ MOZ_CRASH("unimplemented opcode");
+}
+
+# undef WASM_GC_OP
+# undef WASM_REF_OP
+
+#endif // DEBUG
+
+bool UnsetLocalsState::init(const ValTypeVector& locals, size_t numParams) {
+ MOZ_ASSERT(setLocalsStack_.empty());
+
+ // Find the first and total count of non-defaultable locals.
+ size_t firstNonDefaultable = UINT32_MAX;
+ size_t countNonDefaultable = 0;
+ for (size_t i = numParams; i < locals.length(); i++) {
+ if (!locals[i].isDefaultable()) {
+ firstNonDefaultable = std::min(i, firstNonDefaultable);
+ countNonDefaultable++;
+ }
+ }
+ firstNonDefaultLocal_ = firstNonDefaultable;
+ if (countNonDefaultable == 0) {
+ // No locals to track, saving CPU cycles.
+ MOZ_ASSERT(firstNonDefaultable == UINT32_MAX);
+ return true;
+ }
+
+ // setLocalsStack_ cannot be deeper than amount of non-defaultable locals.
+ if (!setLocalsStack_.reserve(countNonDefaultable)) {
+ return false;
+ }
+
+ // Allocate a bitmap for locals starting at the first non-defaultable local.
+ size_t bitmapSize =
+ ((locals.length() - firstNonDefaultable) + (WordBits - 1)) / WordBits;
+ if (!unsetLocals_.resize(bitmapSize)) {
+ return false;
+ }
+ memset(unsetLocals_.begin(), 0, bitmapSize * WordSize);
+ for (size_t i = firstNonDefaultable; i < locals.length(); i++) {
+ if (!locals[i].isDefaultable()) {
+ size_t localUnsetIndex = i - firstNonDefaultable;
+ unsetLocals_[localUnsetIndex / WordBits] |=
+ 1 << (localUnsetIndex % WordBits);
+ }
+ }
+ return true;
+}
diff --git a/js/src/wasm/WasmOpIter.h b/js/src/wasm/WasmOpIter.h
new file mode 100644
index 0000000000..c175921dc9
--- /dev/null
+++ b/js/src/wasm/WasmOpIter.h
@@ -0,0 +1,4239 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_op_iter_h
+#define wasm_op_iter_h
+
+#include "mozilla/CompactPair.h"
+#include "mozilla/Poison.h"
+
+#include <type_traits>
+
+#include "js/Printf.h"
+#include "wasm/WasmIntrinsic.h"
+#include "wasm/WasmUtility.h"
+#include "wasm/WasmValidate.h"
+
+namespace js {
+namespace wasm {
+
+// The kind of a control-flow stack item.
+enum class LabelKind : uint8_t {
+ Body,
+ Block,
+ Loop,
+ Then,
+ Else,
+ Try,
+ Catch,
+ CatchAll,
+};
+
+// The type of values on the operand stack during validation. This is either a
+// ValType or the special type "Bottom".
+
+class StackType {
+ PackedTypeCode tc_;
+
+ explicit StackType(PackedTypeCode tc) : tc_(tc) {}
+
+ public:
+ StackType() : tc_(PackedTypeCode::invalid()) {}
+
+ explicit StackType(const ValType& t) : tc_(t.packed()) {
+ MOZ_ASSERT(tc_.isValid());
+ MOZ_ASSERT(!isStackBottom());
+ }
+
+ static StackType bottom() {
+ return StackType(PackedTypeCode::pack(TypeCode::Limit));
+ }
+
+ bool isStackBottom() const {
+ MOZ_ASSERT(tc_.isValid());
+ return tc_.typeCode() == TypeCode::Limit;
+ }
+
+ // Returns whether this input is nullable when interpreted as an operand.
+ // When the type is bottom for unreachable code, this returns false as that
+ // is the most permissive option.
+ bool isNullableAsOperand() const {
+ MOZ_ASSERT(tc_.isValid());
+ return isStackBottom() ? false : tc_.isNullable();
+ }
+
+ ValType valType() const {
+ MOZ_ASSERT(tc_.isValid());
+ MOZ_ASSERT(!isStackBottom());
+ return ValType(tc_);
+ }
+
+ ValType valTypeOr(ValType ifBottom) const {
+ MOZ_ASSERT(tc_.isValid());
+ if (isStackBottom()) {
+ return ifBottom;
+ }
+ return valType();
+ }
+
+ ValType asNonNullable() const {
+ MOZ_ASSERT(tc_.isValid());
+ MOZ_ASSERT(!isStackBottom());
+ return ValType(tc_.withIsNullable(false));
+ }
+
+ bool isValidForUntypedSelect() const {
+ MOZ_ASSERT(tc_.isValid());
+ if (isStackBottom()) {
+ return true;
+ }
+ switch (valType().kind()) {
+ case ValType::I32:
+ case ValType::F32:
+ case ValType::I64:
+ case ValType::F64:
+#ifdef ENABLE_WASM_SIMD
+ case ValType::V128:
+#endif
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool operator==(const StackType& that) const {
+ MOZ_ASSERT(tc_.isValid() && that.tc_.isValid());
+ return tc_ == that.tc_;
+ }
+
+ bool operator!=(const StackType& that) const {
+ MOZ_ASSERT(tc_.isValid() && that.tc_.isValid());
+ return tc_ != that.tc_;
+ }
+};
+
+#ifdef DEBUG
+// Families of opcodes that share a signature and validation logic.
+enum class OpKind {
+ Block,
+ Loop,
+ Unreachable,
+ Drop,
+ I32,
+ I64,
+ F32,
+ F64,
+ V128,
+ Br,
+ BrIf,
+ BrTable,
+ Nop,
+ Unary,
+ Binary,
+ Ternary,
+ Comparison,
+ Conversion,
+ Load,
+ Store,
+ TeeStore,
+ MemorySize,
+ MemoryGrow,
+ Select,
+ GetLocal,
+ SetLocal,
+ TeeLocal,
+ GetGlobal,
+ SetGlobal,
+ TeeGlobal,
+ Call,
+ CallIndirect,
+# ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ CallRef,
+# endif
+ OldCallDirect,
+ OldCallIndirect,
+ Return,
+ If,
+ Else,
+ End,
+ Wait,
+ Wake,
+ Fence,
+ AtomicLoad,
+ AtomicStore,
+ AtomicBinOp,
+ AtomicCompareExchange,
+ MemOrTableCopy,
+ DataOrElemDrop,
+ MemFill,
+ MemOrTableInit,
+ TableFill,
+ MemDiscard,
+ TableGet,
+ TableGrow,
+ TableSet,
+ TableSize,
+ RefNull,
+ RefFunc,
+ RefAsNonNull,
+ BrOnNull,
+ BrOnNonNull,
+ StructNew,
+ StructNewDefault,
+ StructGet,
+ StructSet,
+ ArrayNew,
+ ArrayNewFixed,
+ ArrayNewDefault,
+ ArrayNewData,
+ ArrayNewElem,
+ ArrayGet,
+ ArraySet,
+ ArrayLen,
+ ArrayCopy,
+ RefTestV5,
+ RefCastV5,
+ BrOnCastV5,
+ BrOnCastFailV5,
+ BrOnNonStructV5,
+ RefTest,
+ RefCast,
+ BrOnCast,
+ RefConversion,
+# ifdef ENABLE_WASM_SIMD
+ ExtractLane,
+ ReplaceLane,
+ LoadLane,
+ StoreLane,
+ VectorShift,
+ VectorShuffle,
+# endif
+ Catch,
+ CatchAll,
+ Delegate,
+ Throw,
+ Rethrow,
+ Try,
+ Intrinsic,
+};
+
+// Return the OpKind for a given Op. This is used for sanity-checking that
+// API users use the correct read function for a given Op.
+OpKind Classify(OpBytes op);
+#endif
+
+// Common fields for linear memory access.
+template <typename Value>
+struct LinearMemoryAddress {
+ Value base;
+ uint64_t offset;
+ uint32_t align;
+
+ LinearMemoryAddress() : offset(0), align(0) {}
+ LinearMemoryAddress(Value base, uint64_t offset, uint32_t align)
+ : base(base), offset(offset), align(align) {}
+};
+
+template <typename ControlItem>
+class ControlStackEntry {
+ // Use a pair to optimize away empty ControlItem.
+ mozilla::CompactPair<BlockType, ControlItem> typeAndItem_;
+
+ // The "base" of a control stack entry is valueStack_.length() minus
+ // type().params().length(), i.e., the size of the value stack "below"
+ // this block.
+ uint32_t valueStackBase_;
+ bool polymorphicBase_;
+
+ LabelKind kind_;
+
+ public:
+ ControlStackEntry(LabelKind kind, BlockType type, uint32_t valueStackBase)
+ : typeAndItem_(type, ControlItem()),
+ valueStackBase_(valueStackBase),
+ polymorphicBase_(false),
+ kind_(kind) {
+ MOZ_ASSERT(type != BlockType());
+ }
+
+ LabelKind kind() const { return kind_; }
+ BlockType type() const { return typeAndItem_.first(); }
+ ResultType resultType() const { return type().results(); }
+ ResultType branchTargetType() const {
+ return kind_ == LabelKind::Loop ? type().params() : type().results();
+ }
+ uint32_t valueStackBase() const { return valueStackBase_; }
+ ControlItem& controlItem() { return typeAndItem_.second(); }
+ void setPolymorphicBase() { polymorphicBase_ = true; }
+ bool polymorphicBase() const { return polymorphicBase_; }
+
+ void switchToElse() {
+ MOZ_ASSERT(kind() == LabelKind::Then);
+ kind_ = LabelKind::Else;
+ polymorphicBase_ = false;
+ }
+
+ void switchToCatch() {
+ MOZ_ASSERT(kind() == LabelKind::Try || kind() == LabelKind::Catch);
+ kind_ = LabelKind::Catch;
+ polymorphicBase_ = false;
+ }
+
+ void switchToCatchAll() {
+ MOZ_ASSERT(kind() == LabelKind::Try || kind() == LabelKind::Catch);
+ kind_ = LabelKind::CatchAll;
+ polymorphicBase_ = false;
+ }
+};
+
+// Track state of the non-defaultable locals. Every time such local is
+// initialized, the stack will record at what depth and which local was set.
+// On a block end, the "unset" state will be rolled back to how it was before
+// the block started.
+//
+// It is very likely only a few functions will have non-defaultable locals and
+// very few locals will be non-defaultable. This class is optimized to be fast
+// for this common case.
+class UnsetLocalsState {
+ struct SetLocalEntry {
+ uint32_t depth;
+ uint32_t localUnsetIndex;
+ SetLocalEntry(uint32_t depth_, uint32_t localUnsetIndex_)
+ : depth(depth_), localUnsetIndex(localUnsetIndex_) {}
+ };
+ using SetLocalsStack = Vector<SetLocalEntry, 16, SystemAllocPolicy>;
+ using UnsetLocals = Vector<uint32_t, 16, SystemAllocPolicy>;
+
+ static constexpr size_t WordSize = 4;
+ static constexpr size_t WordBits = WordSize * 8;
+
+ // Bit array of "unset" function locals. Stores only unset states of the
+ // locals that are declared after the first non-defaultable local.
+ UnsetLocals unsetLocals_;
+ // Stack of "set" operations. Contains pair where the first field is a depth,
+ // and the second field is local id (offset by firstNonDefaultLocal_).
+ SetLocalsStack setLocalsStack_;
+ uint32_t firstNonDefaultLocal_;
+
+ public:
+ UnsetLocalsState() : firstNonDefaultLocal_(UINT32_MAX) {}
+
+ [[nodiscard]] bool init(const ValTypeVector& locals, size_t numParams);
+
+ inline bool isUnset(uint32_t id) const {
+ if (MOZ_LIKELY(id < firstNonDefaultLocal_)) {
+ return false;
+ }
+ uint32_t localUnsetIndex = id - firstNonDefaultLocal_;
+ return unsetLocals_[localUnsetIndex / WordBits] &
+ (1 << (localUnsetIndex % WordBits));
+ }
+
+ inline void set(uint32_t id, uint32_t depth) {
+ MOZ_ASSERT(isUnset(id));
+ MOZ_ASSERT(id >= firstNonDefaultLocal_ &&
+ (id - firstNonDefaultLocal_) / WordBits < unsetLocals_.length());
+ uint32_t localUnsetIndex = id - firstNonDefaultLocal_;
+ unsetLocals_[localUnsetIndex / WordBits] ^= 1
+ << (localUnsetIndex % WordBits);
+ // The setLocalsStack_ is reserved upfront in the UnsetLocalsState::init.
+ // A SetLocalEntry will be pushed only once per local.
+ setLocalsStack_.infallibleEmplaceBack(depth, localUnsetIndex);
+ }
+
+ inline void resetToBlock(uint32_t controlDepth) {
+ while (MOZ_UNLIKELY(setLocalsStack_.length() > 0) &&
+ setLocalsStack_.back().depth > controlDepth) {
+ uint32_t localUnsetIndex = setLocalsStack_.back().localUnsetIndex;
+ MOZ_ASSERT(!(unsetLocals_[localUnsetIndex / WordBits] &
+ (1 << (localUnsetIndex % WordBits))));
+ unsetLocals_[localUnsetIndex / WordBits] |=
+ 1 << (localUnsetIndex % WordBits);
+ setLocalsStack_.popBack();
+ }
+ }
+
+ int empty() const { return setLocalsStack_.empty(); }
+};
+
+template <typename Value>
+class TypeAndValueT {
+ // Use a Pair to optimize away empty Value.
+ mozilla::CompactPair<StackType, Value> tv_;
+
+ public:
+ TypeAndValueT() : tv_(StackType::bottom(), Value()) {}
+ explicit TypeAndValueT(StackType type) : tv_(type, Value()) {}
+ explicit TypeAndValueT(ValType type) : tv_(StackType(type), Value()) {}
+ TypeAndValueT(StackType type, Value value) : tv_(type, value) {}
+ TypeAndValueT(ValType type, Value value) : tv_(StackType(type), value) {}
+ StackType type() const { return tv_.first(); }
+ void setType(StackType type) { tv_.first() = type; }
+ Value value() const { return tv_.second(); }
+ void setValue(Value value) { tv_.second() = value; }
+};
+
+// An iterator over the bytes of a function body. It performs validation
+// and unpacks the data into a usable form.
+//
+// The MOZ_STACK_CLASS attribute here is because of the use of DebugOnly.
+// There's otherwise nothing inherent in this class which would require
+// it to be used on the stack.
+template <typename Policy>
+class MOZ_STACK_CLASS OpIter : private Policy {
+ public:
+ using Value = typename Policy::Value;
+ using ValueVector = typename Policy::ValueVector;
+ using TypeAndValue = TypeAndValueT<Value>;
+ using TypeAndValueStack = Vector<TypeAndValue, 32, SystemAllocPolicy>;
+ using ControlItem = typename Policy::ControlItem;
+ using Control = ControlStackEntry<ControlItem>;
+ using ControlStack = Vector<Control, 16, SystemAllocPolicy>;
+
+ enum Kind {
+ Func,
+ InitExpr,
+ };
+
+ private:
+ Kind kind_;
+ Decoder& d_;
+ const ModuleEnvironment& env_;
+
+ TypeAndValueStack valueStack_;
+ TypeAndValueStack elseParamStack_;
+ ControlStack controlStack_;
+ UnsetLocalsState unsetLocals_;
+ // The exclusive max index of a global that can be accessed by global.get in
+ // this expression. When GC is enabled, this is any previously defined
+ // global. Otherwise this is always set to zero, and only imported immutable
+ // globals are allowed.
+ uint32_t maxInitializedGlobalsIndexPlus1_;
+
+#ifdef DEBUG
+ OpBytes op_;
+#endif
+ size_t offsetOfLastReadOp_;
+
+ [[nodiscard]] bool readFixedU8(uint8_t* out) { return d_.readFixedU8(out); }
+ [[nodiscard]] bool readFixedU32(uint32_t* out) {
+ return d_.readFixedU32(out);
+ }
+ [[nodiscard]] bool readVarS32(int32_t* out) { return d_.readVarS32(out); }
+ [[nodiscard]] bool readVarU32(uint32_t* out) { return d_.readVarU32(out); }
+ [[nodiscard]] bool readVarS64(int64_t* out) { return d_.readVarS64(out); }
+ [[nodiscard]] bool readVarU64(uint64_t* out) { return d_.readVarU64(out); }
+ [[nodiscard]] bool readFixedF32(float* out) { return d_.readFixedF32(out); }
+ [[nodiscard]] bool readFixedF64(double* out) { return d_.readFixedF64(out); }
+
+ [[nodiscard]] bool readMemOrTableIndex(bool isMem, uint32_t* index);
+ [[nodiscard]] bool readLinearMemoryAddress(uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr);
+ [[nodiscard]] bool readLinearMemoryAddressAligned(
+ uint32_t byteSize, LinearMemoryAddress<Value>* addr);
+ [[nodiscard]] bool readBlockType(BlockType* type);
+ [[nodiscard]] bool readGcTypeIndex(uint32_t* typeIndex);
+ [[nodiscard]] bool readStructTypeIndex(uint32_t* typeIndex);
+ [[nodiscard]] bool readArrayTypeIndex(uint32_t* typeIndex);
+ [[nodiscard]] bool readFuncTypeIndex(uint32_t* typeIndex);
+ [[nodiscard]] bool readFieldIndex(uint32_t* fieldIndex,
+ const StructType& structType);
+
+ [[nodiscard]] bool popCallArgs(const ValTypeVector& expectedTypes,
+ ValueVector* values);
+
+ [[nodiscard]] bool failEmptyStack();
+ [[nodiscard]] bool popStackType(StackType* type, Value* value);
+ [[nodiscard]] bool popWithType(ValType expected, Value* value,
+ StackType* stackType);
+ [[nodiscard]] bool popWithType(ValType expected, Value* value);
+ [[nodiscard]] bool popWithType(ResultType expected, ValueVector* values);
+ template <typename ValTypeSpanT>
+ [[nodiscard]] bool popWithTypes(ValTypeSpanT expected, ValueVector* values);
+ [[nodiscard]] bool popWithRefType(Value* value, StackType* type);
+ // Check that the top of the value stack has type `expected`, bearing in
+ // mind that it may be a block type, hence involving multiple values.
+ //
+ // If the block's stack contains polymorphic values at its base (because we
+ // are in unreachable code) then suitable extra values are inserted into the
+ // value stack, as controlled by `rewriteStackTypes`: if this is true,
+ // polymorphic values have their types created/updated from `expected`. If
+ // it is false, such values are left as `StackType::bottom()`.
+ //
+ // If `values` is non-null, it is filled in with Value components of the
+ // relevant stack entries, including those of any new entries created.
+ [[nodiscard]] bool checkTopTypeMatches(ResultType expected,
+ ValueVector* values,
+ bool rewriteStackTypes);
+
+ [[nodiscard]] bool pushControl(LabelKind kind, BlockType type);
+ [[nodiscard]] bool checkStackAtEndOfBlock(ResultType* type,
+ ValueVector* values);
+ [[nodiscard]] bool getControl(uint32_t relativeDepth, Control** controlEntry);
+ [[nodiscard]] bool checkBranchValueAndPush(uint32_t relativeDepth,
+ ResultType* type,
+ ValueVector* values);
+ [[nodiscard]] bool checkBrTableEntryAndPush(uint32_t* relativeDepth,
+ ResultType prevBranchType,
+ ResultType* branchType,
+ ValueVector* branchValues);
+
+ [[nodiscard]] bool push(StackType t) { return valueStack_.emplaceBack(t); }
+ [[nodiscard]] bool push(ValType t) { return valueStack_.emplaceBack(t); }
+ [[nodiscard]] bool push(TypeAndValue tv) { return valueStack_.append(tv); }
+ [[nodiscard]] bool push(ResultType t) {
+ for (size_t i = 0; i < t.length(); i++) {
+ if (!push(t[i])) {
+ return false;
+ }
+ }
+ return true;
+ }
+ void infalliblePush(StackType t) { valueStack_.infallibleEmplaceBack(t); }
+ void infalliblePush(ValType t) {
+ valueStack_.infallibleEmplaceBack(StackType(t));
+ }
+ void infalliblePush(TypeAndValue tv) { valueStack_.infallibleAppend(tv); }
+
+ void afterUnconditionalBranch() {
+ valueStack_.shrinkTo(controlStack_.back().valueStackBase());
+ controlStack_.back().setPolymorphicBase();
+ }
+
+ inline bool checkIsSubtypeOf(FieldType actual, FieldType expected);
+
+ inline bool checkIsSubtypeOf(RefType actual, RefType expected) {
+ return checkIsSubtypeOf(ValType(actual).fieldType(),
+ ValType(expected).fieldType());
+ }
+ inline bool checkIsSubtypeOf(ValType actual, ValType expected) {
+ return checkIsSubtypeOf(actual.fieldType(), expected.fieldType());
+ }
+
+ inline bool checkIsSubtypeOf(ResultType params, ResultType results);
+
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ inline bool checkIsSubtypeOf(uint32_t actualTypeIndex,
+ uint32_t expectedTypeIndex);
+#endif
+
+ public:
+#ifdef DEBUG
+ explicit OpIter(const ModuleEnvironment& env, Decoder& decoder,
+ Kind kind = OpIter::Func)
+ : kind_(kind),
+ d_(decoder),
+ env_(env),
+ maxInitializedGlobalsIndexPlus1_(0),
+ op_(OpBytes(Op::Limit)),
+ offsetOfLastReadOp_(0) {}
+#else
+ explicit OpIter(const ModuleEnvironment& env, Decoder& decoder,
+ Kind kind = OpIter::Func)
+ : kind_(kind),
+ d_(decoder),
+ env_(env),
+ maxInitializedGlobalsIndexPlus1_(0),
+ offsetOfLastReadOp_(0) {}
+#endif
+
+ // Return the decoding byte offset.
+ uint32_t currentOffset() const { return d_.currentOffset(); }
+
+ // Return the offset within the entire module of the last-read op.
+ size_t lastOpcodeOffset() const {
+ return offsetOfLastReadOp_ ? offsetOfLastReadOp_ : d_.currentOffset();
+ }
+
+ // Return a BytecodeOffset describing where the current op should be reported
+ // to trap/call.
+ BytecodeOffset bytecodeOffset() const {
+ return BytecodeOffset(lastOpcodeOffset());
+ }
+
+ // Test whether the iterator has reached the end of the buffer.
+ bool done() const { return d_.done(); }
+
+ // Return a pointer to the end of the buffer being decoded by this iterator.
+ const uint8_t* end() const { return d_.end(); }
+
+ // Report a general failure.
+ [[nodiscard]] bool fail(const char* msg) MOZ_COLD;
+
+ // Report a general failure with a context
+ [[nodiscard]] bool fail_ctx(const char* fmt, const char* context) MOZ_COLD;
+
+ // Report an unrecognized opcode.
+ [[nodiscard]] bool unrecognizedOpcode(const OpBytes* expr) MOZ_COLD;
+
+ // Return whether the innermost block has a polymorphic base of its stack.
+ // Ideally this accessor would be removed; consider using something else.
+ bool currentBlockHasPolymorphicBase() const {
+ return !controlStack_.empty() && controlStack_.back().polymorphicBase();
+ }
+
+ // ------------------------------------------------------------------------
+ // Decoding and validation interface.
+
+ // Initialization and termination
+
+ [[nodiscard]] bool startFunction(uint32_t funcIndex,
+ const ValTypeVector& locals);
+ [[nodiscard]] bool endFunction(const uint8_t* bodyEnd);
+
+ [[nodiscard]] bool startInitExpr(ValType expected,
+ uint32_t maxInitializedGlobalsIndexPlus1);
+ [[nodiscard]] bool endInitExpr();
+
+ // Value and reference types
+
+ [[nodiscard]] bool readValType(ValType* type);
+ [[nodiscard]] bool readHeapType(bool nullable, RefType* type);
+
+ // Instructions
+
+ [[nodiscard]] bool readOp(OpBytes* op);
+ [[nodiscard]] bool readReturn(ValueVector* values);
+ [[nodiscard]] bool readBlock(ResultType* paramType);
+ [[nodiscard]] bool readLoop(ResultType* paramType);
+ [[nodiscard]] bool readIf(ResultType* paramType, Value* condition);
+ [[nodiscard]] bool readElse(ResultType* paramType, ResultType* resultType,
+ ValueVector* thenResults);
+ [[nodiscard]] bool readEnd(LabelKind* kind, ResultType* type,
+ ValueVector* results,
+ ValueVector* resultsForEmptyElse);
+ void popEnd();
+ [[nodiscard]] bool readBr(uint32_t* relativeDepth, ResultType* type,
+ ValueVector* values);
+ [[nodiscard]] bool readBrIf(uint32_t* relativeDepth, ResultType* type,
+ ValueVector* values, Value* condition);
+ [[nodiscard]] bool readBrTable(Uint32Vector* depths, uint32_t* defaultDepth,
+ ResultType* defaultBranchType,
+ ValueVector* branchValues, Value* index);
+ [[nodiscard]] bool readTry(ResultType* type);
+ [[nodiscard]] bool readCatch(LabelKind* kind, uint32_t* tagIndex,
+ ResultType* paramType, ResultType* resultType,
+ ValueVector* tryResults);
+ [[nodiscard]] bool readCatchAll(LabelKind* kind, ResultType* paramType,
+ ResultType* resultType,
+ ValueVector* tryResults);
+ [[nodiscard]] bool readDelegate(uint32_t* relativeDepth,
+ ResultType* resultType,
+ ValueVector* tryResults);
+ void popDelegate();
+ [[nodiscard]] bool readThrow(uint32_t* tagIndex, ValueVector* argValues);
+ [[nodiscard]] bool readRethrow(uint32_t* relativeDepth);
+ [[nodiscard]] bool readUnreachable();
+ [[nodiscard]] bool readDrop();
+ [[nodiscard]] bool readUnary(ValType operandType, Value* input);
+ [[nodiscard]] bool readConversion(ValType operandType, ValType resultType,
+ Value* input);
+ [[nodiscard]] bool readBinary(ValType operandType, Value* lhs, Value* rhs);
+ [[nodiscard]] bool readComparison(ValType operandType, Value* lhs,
+ Value* rhs);
+ [[nodiscard]] bool readTernary(ValType operandType, Value* v0, Value* v1,
+ Value* v2);
+ [[nodiscard]] bool readLoad(ValType resultType, uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr);
+ [[nodiscard]] bool readStore(ValType resultType, uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr, Value* value);
+ [[nodiscard]] bool readTeeStore(ValType resultType, uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr,
+ Value* value);
+ [[nodiscard]] bool readNop();
+ [[nodiscard]] bool readMemorySize();
+ [[nodiscard]] bool readMemoryGrow(Value* input);
+ [[nodiscard]] bool readSelect(bool typed, StackType* type, Value* trueValue,
+ Value* falseValue, Value* condition);
+ [[nodiscard]] bool readGetLocal(const ValTypeVector& locals, uint32_t* id);
+ [[nodiscard]] bool readSetLocal(const ValTypeVector& locals, uint32_t* id,
+ Value* value);
+ [[nodiscard]] bool readTeeLocal(const ValTypeVector& locals, uint32_t* id,
+ Value* value);
+ [[nodiscard]] bool readGetGlobal(uint32_t* id);
+ [[nodiscard]] bool readSetGlobal(uint32_t* id, Value* value);
+ [[nodiscard]] bool readTeeGlobal(uint32_t* id, Value* value);
+ [[nodiscard]] bool readI32Const(int32_t* i32);
+ [[nodiscard]] bool readI64Const(int64_t* i64);
+ [[nodiscard]] bool readF32Const(float* f32);
+ [[nodiscard]] bool readF64Const(double* f64);
+ [[nodiscard]] bool readRefFunc(uint32_t* funcIndex);
+ [[nodiscard]] bool readRefNull(RefType* type);
+ [[nodiscard]] bool readRefIsNull(Value* input);
+ [[nodiscard]] bool readRefAsNonNull(Value* input);
+ [[nodiscard]] bool readBrOnNull(uint32_t* relativeDepth, ResultType* type,
+ ValueVector* values, Value* condition);
+ [[nodiscard]] bool readBrOnNonNull(uint32_t* relativeDepth, ResultType* type,
+ ValueVector* values, Value* condition);
+ [[nodiscard]] bool readCall(uint32_t* funcTypeIndex, ValueVector* argValues);
+ [[nodiscard]] bool readCallIndirect(uint32_t* funcTypeIndex,
+ uint32_t* tableIndex, Value* callee,
+ ValueVector* argValues);
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ [[nodiscard]] bool readCallRef(const FuncType** funcType, Value* callee,
+ ValueVector* argValues);
+#endif
+ [[nodiscard]] bool readOldCallDirect(uint32_t numFuncImports,
+ uint32_t* funcTypeIndex,
+ ValueVector* argValues);
+ [[nodiscard]] bool readOldCallIndirect(uint32_t* funcTypeIndex, Value* callee,
+ ValueVector* argValues);
+ [[nodiscard]] bool readWake(LinearMemoryAddress<Value>* addr, Value* count);
+ [[nodiscard]] bool readWait(LinearMemoryAddress<Value>* addr,
+ ValType valueType, uint32_t byteSize,
+ Value* value, Value* timeout);
+ [[nodiscard]] bool readFence();
+ [[nodiscard]] bool readAtomicLoad(LinearMemoryAddress<Value>* addr,
+ ValType resultType, uint32_t byteSize);
+ [[nodiscard]] bool readAtomicStore(LinearMemoryAddress<Value>* addr,
+ ValType resultType, uint32_t byteSize,
+ Value* value);
+ [[nodiscard]] bool readAtomicRMW(LinearMemoryAddress<Value>* addr,
+ ValType resultType, uint32_t byteSize,
+ Value* value);
+ [[nodiscard]] bool readAtomicCmpXchg(LinearMemoryAddress<Value>* addr,
+ ValType resultType, uint32_t byteSize,
+ Value* oldValue, Value* newValue);
+ [[nodiscard]] bool readMemOrTableCopy(bool isMem,
+ uint32_t* dstMemOrTableIndex,
+ Value* dst,
+ uint32_t* srcMemOrTableIndex,
+ Value* src, Value* len);
+ [[nodiscard]] bool readDataOrElemDrop(bool isData, uint32_t* segIndex);
+ [[nodiscard]] bool readMemFill(Value* start, Value* val, Value* len);
+ [[nodiscard]] bool readMemOrTableInit(bool isMem, uint32_t* segIndex,
+ uint32_t* dstTableIndex, Value* dst,
+ Value* src, Value* len);
+ [[nodiscard]] bool readTableFill(uint32_t* tableIndex, Value* start,
+ Value* val, Value* len);
+ [[nodiscard]] bool readMemDiscard(Value* start, Value* len);
+ [[nodiscard]] bool readTableGet(uint32_t* tableIndex, Value* index);
+ [[nodiscard]] bool readTableGrow(uint32_t* tableIndex, Value* initValue,
+ Value* delta);
+ [[nodiscard]] bool readTableSet(uint32_t* tableIndex, Value* index,
+ Value* value);
+
+ [[nodiscard]] bool readTableSize(uint32_t* tableIndex);
+
+#ifdef ENABLE_WASM_GC
+ [[nodiscard]] bool readStructNew(uint32_t* typeIndex, ValueVector* argValues);
+ [[nodiscard]] bool readStructNewDefault(uint32_t* typeIndex);
+ [[nodiscard]] bool readStructGet(uint32_t* typeIndex, uint32_t* fieldIndex,
+ FieldWideningOp wideningOp, Value* ptr);
+ [[nodiscard]] bool readStructSet(uint32_t* typeIndex, uint32_t* fieldIndex,
+ Value* ptr, Value* val);
+ [[nodiscard]] bool readArrayNew(uint32_t* typeIndex, Value* numElements,
+ Value* argValue);
+ [[nodiscard]] bool readArrayNewFixed(uint32_t* typeIndex,
+ uint32_t* numElements,
+ ValueVector* values);
+ [[nodiscard]] bool readArrayNewDefault(uint32_t* typeIndex,
+ Value* numElements);
+ [[nodiscard]] bool readArrayNewData(uint32_t* typeIndex, uint32_t* segIndex,
+ Value* offset, Value* numElements);
+ [[nodiscard]] bool readArrayNewElem(uint32_t* typeIndex, uint32_t* segIndex,
+ Value* offset, Value* numElements);
+ [[nodiscard]] bool readArrayGet(uint32_t* typeIndex,
+ FieldWideningOp wideningOp, Value* index,
+ Value* ptr);
+ [[nodiscard]] bool readArraySet(uint32_t* typeIndex, Value* val, Value* index,
+ Value* ptr);
+ [[nodiscard]] bool readArrayLen(bool decodeIgnoredTypeIndex, Value* ptr);
+ [[nodiscard]] bool readArrayCopy(int32_t* elemSize, bool* elemsAreRefTyped,
+ Value* dstArray, Value* dstIndex,
+ Value* srcArray, Value* srcIndex,
+ Value* numElements);
+ [[nodiscard]] bool readRefTestV5(RefType* sourceType, uint32_t* typeIndex,
+ Value* ref);
+ [[nodiscard]] bool readRefCastV5(RefType* sourceType, uint32_t* typeIndex,
+ Value* ref);
+ [[nodiscard]] bool readBrOnCastV5(uint32_t* labelRelativeDepth,
+ RefType* sourceType,
+ uint32_t* castTypeIndex,
+ ResultType* labelType, ValueVector* values);
+ [[nodiscard]] bool readBrOnCastHeapV5(bool nullable,
+ uint32_t* labelRelativeDepth,
+ RefType* sourceType, RefType* destType,
+ ResultType* labelType,
+ ValueVector* values);
+ [[nodiscard]] bool readBrOnCastFailV5(uint32_t* labelRelativeDepth,
+ RefType* sourceType,
+ uint32_t* castTypeIndex,
+ ResultType* labelType,
+ ValueVector* values);
+ [[nodiscard]] bool readBrOnCastFailHeapV5(
+ bool nullable, uint32_t* labelRelativeDepth, RefType* sourceType,
+ RefType* destType, ResultType* labelType, ValueVector* values);
+ [[nodiscard]] bool readRefTest(bool nullable, RefType* sourceType,
+ RefType* destType, Value* ref);
+ [[nodiscard]] bool readRefCast(bool nullable, RefType* sourceType,
+ RefType* destType, Value* ref);
+ [[nodiscard]] bool readBrOnCast(bool* onSuccess, uint32_t* labelRelativeDepth,
+ RefType* sourceType, RefType* destType,
+ ResultType* labelType, ValueVector* values);
+ [[nodiscard]] bool checkBrOnCastCommonV5(uint32_t labelRelativeDepth,
+ RefType* sourceType,
+ ValType castToType,
+ ResultType* labelType,
+ ValueVector* values);
+ [[nodiscard]] bool checkBrOnCastFailCommonV5(uint32_t labelRelativeDepth,
+ RefType* sourceType,
+ ValType castToType,
+ ResultType* labelType,
+ ValueVector* values);
+ [[nodiscard]] bool readBrOnNonStructV5(uint32_t* labelRelativeDepth,
+ ResultType* labelType,
+ ValueVector* values);
+ [[nodiscard]] bool readRefConversion(RefType operandType, RefType resultType,
+ Value* operandValue);
+#endif
+
+#ifdef ENABLE_WASM_SIMD
+ [[nodiscard]] bool readLaneIndex(uint32_t inputLanes, uint32_t* laneIndex);
+ [[nodiscard]] bool readExtractLane(ValType resultType, uint32_t inputLanes,
+ uint32_t* laneIndex, Value* input);
+ [[nodiscard]] bool readReplaceLane(ValType operandType, uint32_t inputLanes,
+ uint32_t* laneIndex, Value* baseValue,
+ Value* operand);
+ [[nodiscard]] bool readVectorShift(Value* baseValue, Value* shift);
+ [[nodiscard]] bool readVectorShuffle(Value* v1, Value* v2, V128* selectMask);
+ [[nodiscard]] bool readV128Const(V128* value);
+ [[nodiscard]] bool readLoadSplat(uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr);
+ [[nodiscard]] bool readLoadExtend(LinearMemoryAddress<Value>* addr);
+ [[nodiscard]] bool readLoadLane(uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr,
+ uint32_t* laneIndex, Value* input);
+ [[nodiscard]] bool readStoreLane(uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr,
+ uint32_t* laneIndex, Value* input);
+#endif
+
+ [[nodiscard]] bool readIntrinsic(const Intrinsic** intrinsic,
+ ValueVector* params);
+
+ // At a location where readOp is allowed, peek at the next opcode
+ // without consuming it or updating any internal state.
+ // Never fails: returns uint16_t(Op::Limit) in op->b0 if it can't read.
+ void peekOp(OpBytes* op);
+
+ // ------------------------------------------------------------------------
+ // Stack management.
+
+ // Set the top N result values.
+ void setResults(size_t count, const ValueVector& values) {
+ MOZ_ASSERT(valueStack_.length() >= count);
+ size_t base = valueStack_.length() - count;
+ for (size_t i = 0; i < count; i++) {
+ valueStack_[base + i].setValue(values[i]);
+ }
+ }
+
+ bool getResults(size_t count, ValueVector* values) {
+ MOZ_ASSERT(valueStack_.length() >= count);
+ if (!values->resize(count)) {
+ return false;
+ }
+ size_t base = valueStack_.length() - count;
+ for (size_t i = 0; i < count; i++) {
+ (*values)[i] = valueStack_[base + i].value();
+ }
+ return true;
+ }
+
+ // Set the result value of the current top-of-value-stack expression.
+ void setResult(Value value) { valueStack_.back().setValue(value); }
+
+ // Return the result value of the current top-of-value-stack expression.
+ Value getResult() { return valueStack_.back().value(); }
+
+ // Return a reference to the top of the control stack.
+ ControlItem& controlItem() { return controlStack_.back().controlItem(); }
+
+ // Return a reference to an element in the control stack.
+ ControlItem& controlItem(uint32_t relativeDepth) {
+ return controlStack_[controlStack_.length() - 1 - relativeDepth]
+ .controlItem();
+ }
+
+ // Return the LabelKind of an element in the control stack.
+ LabelKind controlKind(uint32_t relativeDepth) {
+ return controlStack_[controlStack_.length() - 1 - relativeDepth].kind();
+ }
+
+ // Return a reference to the outermost element on the control stack.
+ ControlItem& controlOutermost() { return controlStack_[0].controlItem(); }
+
+ // Test whether the control-stack is empty, meaning we've consumed the final
+ // end of the function body.
+ bool controlStackEmpty() const { return controlStack_.empty(); }
+
+ // Return the depth of the control stack.
+ size_t controlStackDepth() const { return controlStack_.length(); }
+
+ // Find the innermost control item of a specific kind, starting to search from
+ // a certain relative depth, and returning true if such innermost control item
+ // is found. The relative depth of the found item is returned via a parameter.
+ bool controlFindInnermostFrom(LabelKind kind, uint32_t fromRelativeDepth,
+ uint32_t* foundRelativeDepth) {
+ int32_t fromAbsoluteDepth = controlStack_.length() - fromRelativeDepth - 1;
+ for (int32_t i = fromAbsoluteDepth; i >= 0; i--) {
+ if (controlStack_[i].kind() == kind) {
+ *foundRelativeDepth = controlStack_.length() - 1 - i;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool controlFindInnermost(LabelKind kind, uint32_t* foundRelativeDepth) {
+ return controlFindInnermostFrom(kind, 0, foundRelativeDepth);
+ }
+};
+
+template <typename Policy>
+inline bool OpIter<Policy>::checkIsSubtypeOf(FieldType subType,
+ FieldType superType) {
+ return CheckIsSubtypeOf(d_, env_, lastOpcodeOffset(), subType, superType);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::checkIsSubtypeOf(ResultType params,
+ ResultType results) {
+ if (params.length() != results.length()) {
+ UniqueChars error(
+ JS_smprintf("type mismatch: expected %zu values, got %zu values",
+ results.length(), params.length()));
+ if (!error) {
+ return false;
+ }
+ return fail(error.get());
+ }
+ for (uint32_t i = 0; i < params.length(); i++) {
+ ValType param = params[i];
+ ValType result = results[i];
+ if (!checkIsSubtypeOf(param, result)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+template <typename Policy>
+inline bool OpIter<Policy>::checkIsSubtypeOf(uint32_t actualTypeIndex,
+ uint32_t expectedTypeIndex) {
+ const TypeDef& actualTypeDef = env_.types->type(actualTypeIndex);
+ const TypeDef& expectedTypeDef = env_.types->type(expectedTypeIndex);
+ return CheckIsSubtypeOf(
+ d_, env_, lastOpcodeOffset(),
+ ValType(RefType::fromTypeDef(&actualTypeDef, true)),
+ ValType(RefType::fromTypeDef(&expectedTypeDef, true)));
+}
+#endif
+
+template <typename Policy>
+inline bool OpIter<Policy>::unrecognizedOpcode(const OpBytes* expr) {
+ UniqueChars error(JS_smprintf("unrecognized opcode: %x %x", expr->b0,
+ IsPrefixByte(expr->b0) ? expr->b1 : 0));
+ if (!error) {
+ return false;
+ }
+
+ return fail(error.get());
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::fail(const char* msg) {
+ return d_.fail(lastOpcodeOffset(), msg);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::fail_ctx(const char* fmt, const char* context) {
+ UniqueChars error(JS_smprintf(fmt, context));
+ if (!error) {
+ return false;
+ }
+ return fail(error.get());
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::failEmptyStack() {
+ return valueStack_.empty() ? fail("popping value from empty stack")
+ : fail("popping value from outside block");
+}
+
+// This function pops exactly one value from the stack, yielding Bottom types in
+// various cases and therefore making it the caller's responsibility to do the
+// right thing for StackType::Bottom. Prefer (pop|top)WithType. This is an
+// optimization for the super-common case where the caller is statically
+// expecting the resulttype `[valtype]`.
+template <typename Policy>
+inline bool OpIter<Policy>::popStackType(StackType* type, Value* value) {
+ Control& block = controlStack_.back();
+
+ MOZ_ASSERT(valueStack_.length() >= block.valueStackBase());
+ if (MOZ_UNLIKELY(valueStack_.length() == block.valueStackBase())) {
+ // If the base of this block's stack is polymorphic, then we can pop a
+ // dummy value of the bottom type; it won't be used since we're in
+ // unreachable code.
+ if (block.polymorphicBase()) {
+ *type = StackType::bottom();
+ *value = Value();
+
+ // Maintain the invariant that, after a pop, there is always memory
+ // reserved to push a value infallibly.
+ return valueStack_.reserve(valueStack_.length() + 1);
+ }
+
+ return failEmptyStack();
+ }
+
+ TypeAndValue& tv = valueStack_.back();
+ *type = tv.type();
+ *value = tv.value();
+ valueStack_.popBack();
+ return true;
+}
+
+// This function pops exactly one value from the stack, checking that it has the
+// expected type which can either be a specific value type or the bottom type.
+template <typename Policy>
+inline bool OpIter<Policy>::popWithType(ValType expectedType, Value* value,
+ StackType* stackType) {
+ if (!popStackType(stackType, value)) {
+ return false;
+ }
+
+ return stackType->isStackBottom() ||
+ checkIsSubtypeOf(stackType->valType(), expectedType);
+}
+
+// This function pops exactly one value from the stack, checking that it has the
+// expected type which can either be a specific value type or the bottom type.
+template <typename Policy>
+inline bool OpIter<Policy>::popWithType(ValType expectedType, Value* value) {
+ StackType stackType;
+ return popWithType(expectedType, value, &stackType);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::popWithType(ResultType expected,
+ ValueVector* values) {
+ return popWithTypes(expected, values);
+}
+
+// Pops each of the given expected types (in reverse, because it's a stack).
+template <typename Policy>
+template <typename ValTypeSpanT>
+inline bool OpIter<Policy>::popWithTypes(ValTypeSpanT expected,
+ ValueVector* values) {
+ size_t expectedLength = expected.size();
+ if (!values->resize(expectedLength)) {
+ return false;
+ }
+ for (size_t i = 0; i < expectedLength; i++) {
+ size_t reverseIndex = expectedLength - i - 1;
+ ValType expectedType = expected[reverseIndex];
+ Value* value = &(*values)[reverseIndex];
+ if (!popWithType(expectedType, value)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// This function pops exactly one value from the stack, checking that it is a
+// reference type.
+template <typename Policy>
+inline bool OpIter<Policy>::popWithRefType(Value* value, StackType* type) {
+ if (!popStackType(type, value)) {
+ return false;
+ }
+
+ if (type->isStackBottom() || type->valType().isRefType()) {
+ return true;
+ }
+
+ UniqueChars actualText = ToString(type->valType(), env_.types);
+ if (!actualText) {
+ return false;
+ }
+
+ UniqueChars error(JS_smprintf(
+ "type mismatch: expression has type %s but expected a reference type",
+ actualText.get()));
+ if (!error) {
+ return false;
+ }
+
+ return fail(error.get());
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::checkTopTypeMatches(ResultType expected,
+ ValueVector* values,
+ bool rewriteStackTypes) {
+ if (expected.empty()) {
+ return true;
+ }
+
+ Control& block = controlStack_.back();
+
+ size_t expectedLength = expected.length();
+ if (values && !values->resize(expectedLength)) {
+ return false;
+ }
+
+ for (size_t i = 0; i != expectedLength; i++) {
+ // We're iterating as-if we were popping each expected/actual type one by
+ // one, which means iterating the array of expected results backwards.
+ // The "current" value stack length refers to what the value stack length
+ // would have been if we were popping it.
+ size_t reverseIndex = expectedLength - i - 1;
+ ValType expectedType = expected[reverseIndex];
+ auto collectValue = [&](const Value& v) {
+ if (values) {
+ (*values)[reverseIndex] = v;
+ }
+ };
+
+ size_t currentValueStackLength = valueStack_.length() - i;
+
+ MOZ_ASSERT(currentValueStackLength >= block.valueStackBase());
+ if (currentValueStackLength == block.valueStackBase()) {
+ if (!block.polymorphicBase()) {
+ return failEmptyStack();
+ }
+
+ // If the base of this block's stack is polymorphic, then we can just
+ // pull out as many fake values as we need to validate, and create dummy
+ // stack entries accordingly; they won't be used since we're in
+ // unreachable code. However, if `rewriteStackTypes` is true, we must
+ // set the types on these new entries to whatever `expected` requires
+ // them to be.
+ TypeAndValue newTandV =
+ rewriteStackTypes ? TypeAndValue(expectedType) : TypeAndValue();
+ if (!valueStack_.insert(valueStack_.begin() + currentValueStackLength,
+ newTandV)) {
+ return false;
+ }
+
+ collectValue(Value());
+ } else {
+ TypeAndValue& observed = valueStack_[currentValueStackLength - 1];
+
+ if (observed.type().isStackBottom()) {
+ collectValue(Value());
+ } else {
+ if (!checkIsSubtypeOf(observed.type().valType(), expectedType)) {
+ return false;
+ }
+
+ collectValue(observed.value());
+ }
+
+ if (rewriteStackTypes) {
+ observed.setType(StackType(expectedType));
+ }
+ }
+ }
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::pushControl(LabelKind kind, BlockType type) {
+ ResultType paramType = type.params();
+
+ ValueVector values;
+ if (!checkTopTypeMatches(paramType, &values, /*rewriteStackTypes=*/true)) {
+ return false;
+ }
+ MOZ_ASSERT(valueStack_.length() >= paramType.length());
+ uint32_t valueStackBase = valueStack_.length() - paramType.length();
+ return controlStack_.emplaceBack(kind, type, valueStackBase);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::checkStackAtEndOfBlock(ResultType* expectedType,
+ ValueVector* values) {
+ Control& block = controlStack_.back();
+ *expectedType = block.type().results();
+
+ MOZ_ASSERT(valueStack_.length() >= block.valueStackBase());
+ if (expectedType->length() < valueStack_.length() - block.valueStackBase()) {
+ return fail("unused values not explicitly dropped by end of block");
+ }
+
+ return checkTopTypeMatches(*expectedType, values,
+ /*rewriteStackTypes=*/true);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::getControl(uint32_t relativeDepth,
+ Control** controlEntry) {
+ if (relativeDepth >= controlStack_.length()) {
+ return fail("branch depth exceeds current nesting level");
+ }
+
+ *controlEntry = &controlStack_[controlStack_.length() - 1 - relativeDepth];
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readBlockType(BlockType* type) {
+ uint8_t nextByte;
+ if (!d_.peekByte(&nextByte)) {
+ return fail("unable to read block type");
+ }
+
+ if (nextByte == uint8_t(TypeCode::BlockVoid)) {
+ d_.uncheckedReadFixedU8();
+ *type = BlockType::VoidToVoid();
+ return true;
+ }
+
+ if ((nextByte & SLEB128SignMask) == SLEB128SignBit) {
+ ValType v;
+ if (!readValType(&v)) {
+ return false;
+ }
+ *type = BlockType::VoidToSingle(v);
+ return true;
+ }
+
+ int32_t x;
+ if (!d_.readVarS32(&x) || x < 0 || uint32_t(x) >= env_.types->length()) {
+ return fail("invalid block type type index");
+ }
+
+ const TypeDef* typeDef = &env_.types->type(x);
+ if (!typeDef->isFuncType()) {
+ return fail("block type type index must be func type");
+ }
+
+ *type = BlockType::Func(typeDef->funcType());
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readOp(OpBytes* op) {
+ MOZ_ASSERT(!controlStack_.empty());
+
+ offsetOfLastReadOp_ = d_.currentOffset();
+
+ if (MOZ_UNLIKELY(!d_.readOp(op))) {
+ return fail("unable to read opcode");
+ }
+
+#ifdef DEBUG
+ op_ = *op;
+#endif
+
+ return true;
+}
+
+template <typename Policy>
+inline void OpIter<Policy>::peekOp(OpBytes* op) {
+ const uint8_t* pos = d_.currentPosition();
+
+ if (MOZ_UNLIKELY(!d_.readOp(op))) {
+ op->b0 = uint16_t(Op::Limit);
+ }
+
+ d_.rollbackPosition(pos);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::startFunction(uint32_t funcIndex,
+ const ValTypeVector& locals) {
+ MOZ_ASSERT(kind_ == OpIter::Func);
+ MOZ_ASSERT(elseParamStack_.empty());
+ MOZ_ASSERT(valueStack_.empty());
+ MOZ_ASSERT(controlStack_.empty());
+ MOZ_ASSERT(op_.b0 == uint16_t(Op::Limit));
+ MOZ_ASSERT(maxInitializedGlobalsIndexPlus1_ == 0);
+ BlockType type = BlockType::FuncResults(*env_.funcs[funcIndex].type);
+
+ size_t numArgs = env_.funcs[funcIndex].type->args().length();
+ if (!unsetLocals_.init(locals, numArgs)) {
+ return false;
+ }
+
+ return pushControl(LabelKind::Body, type);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::endFunction(const uint8_t* bodyEnd) {
+ if (d_.currentPosition() != bodyEnd) {
+ return fail("function body length mismatch");
+ }
+
+ if (!controlStack_.empty()) {
+ return fail("unbalanced function body control flow");
+ }
+ MOZ_ASSERT(elseParamStack_.empty());
+ MOZ_ASSERT(unsetLocals_.empty());
+
+#ifdef DEBUG
+ op_ = OpBytes(Op::Limit);
+#endif
+ valueStack_.clear();
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::startInitExpr(
+ ValType expected, uint32_t maxInitializedGlobalsIndexPlus1) {
+ MOZ_ASSERT(kind_ == OpIter::InitExpr);
+ MOZ_ASSERT(elseParamStack_.empty());
+ MOZ_ASSERT(valueStack_.empty());
+ MOZ_ASSERT(controlStack_.empty());
+ MOZ_ASSERT(maxInitializedGlobalsIndexPlus1_ == 0);
+ MOZ_ASSERT(op_.b0 == uint16_t(Op::Limit));
+
+ // GC allows accessing any previously defined global, not just those that are
+ // imported and immutable.
+ if (env_.features.gc) {
+ maxInitializedGlobalsIndexPlus1_ = maxInitializedGlobalsIndexPlus1;
+ }
+
+ BlockType type = BlockType::VoidToSingle(expected);
+ return pushControl(LabelKind::Body, type);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::endInitExpr() {
+ MOZ_ASSERT(controlStack_.empty());
+ MOZ_ASSERT(elseParamStack_.empty());
+
+#ifdef DEBUG
+ op_ = OpBytes(Op::Limit);
+#endif
+ valueStack_.clear();
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readValType(ValType* type) {
+ return d_.readValType(*env_.types, env_.features, type);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readHeapType(bool nullable, RefType* type) {
+ return d_.readHeapType(*env_.types, env_.features, nullable, type);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readReturn(ValueVector* values) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Return);
+
+ Control& body = controlStack_[0];
+ MOZ_ASSERT(body.kind() == LabelKind::Body);
+
+ if (!popWithType(body.resultType(), values)) {
+ return false;
+ }
+
+ afterUnconditionalBranch();
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readBlock(ResultType* paramType) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Block);
+
+ BlockType type;
+ if (!readBlockType(&type)) {
+ return false;
+ }
+
+ *paramType = type.params();
+ return pushControl(LabelKind::Block, type);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readLoop(ResultType* paramType) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Loop);
+
+ BlockType type;
+ if (!readBlockType(&type)) {
+ return false;
+ }
+
+ *paramType = type.params();
+ return pushControl(LabelKind::Loop, type);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readIf(ResultType* paramType, Value* condition) {
+ MOZ_ASSERT(Classify(op_) == OpKind::If);
+
+ BlockType type;
+ if (!readBlockType(&type)) {
+ return false;
+ }
+
+ if (!popWithType(ValType::I32, condition)) {
+ return false;
+ }
+
+ if (!pushControl(LabelKind::Then, type)) {
+ return false;
+ }
+
+ *paramType = type.params();
+ size_t paramsLength = type.params().length();
+ return elseParamStack_.append(valueStack_.end() - paramsLength, paramsLength);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readElse(ResultType* paramType,
+ ResultType* resultType,
+ ValueVector* thenResults) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Else);
+
+ Control& block = controlStack_.back();
+ if (block.kind() != LabelKind::Then) {
+ return fail("else can only be used within an if");
+ }
+
+ *paramType = block.type().params();
+ if (!checkStackAtEndOfBlock(resultType, thenResults)) {
+ return false;
+ }
+
+ valueStack_.shrinkTo(block.valueStackBase());
+
+ size_t nparams = block.type().params().length();
+ MOZ_ASSERT(elseParamStack_.length() >= nparams);
+ valueStack_.infallibleAppend(elseParamStack_.end() - nparams, nparams);
+ elseParamStack_.shrinkBy(nparams);
+
+ // Reset local state to the beginning of the 'if' block for the new block
+ // started by 'else'.
+ unsetLocals_.resetToBlock(controlStack_.length() - 1);
+
+ block.switchToElse();
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readEnd(LabelKind* kind, ResultType* type,
+ ValueVector* results,
+ ValueVector* resultsForEmptyElse) {
+ MOZ_ASSERT(Classify(op_) == OpKind::End);
+
+ Control& block = controlStack_.back();
+
+ if (!checkStackAtEndOfBlock(type, results)) {
+ return false;
+ }
+
+ if (block.kind() == LabelKind::Then) {
+ ResultType params = block.type().params();
+ // If an `if` block ends with `end` instead of `else`, then the `else` block
+ // implicitly passes the `if` parameters as the `else` results. In that
+ // case, assert that the `if`'s param type matches the result type.
+ if (params != block.type().results()) {
+ return fail("if without else with a result value");
+ }
+
+ size_t nparams = params.length();
+ MOZ_ASSERT(elseParamStack_.length() >= nparams);
+ if (!resultsForEmptyElse->resize(nparams)) {
+ return false;
+ }
+ const TypeAndValue* elseParams = elseParamStack_.end() - nparams;
+ for (size_t i = 0; i < nparams; i++) {
+ (*resultsForEmptyElse)[i] = elseParams[i].value();
+ }
+ elseParamStack_.shrinkBy(nparams);
+ }
+
+ *kind = block.kind();
+ return true;
+}
+
+template <typename Policy>
+inline void OpIter<Policy>::popEnd() {
+ MOZ_ASSERT(Classify(op_) == OpKind::End);
+
+ controlStack_.popBack();
+ unsetLocals_.resetToBlock(controlStack_.length());
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::checkBranchValueAndPush(uint32_t relativeDepth,
+ ResultType* type,
+ ValueVector* values) {
+ Control* block = nullptr;
+ if (!getControl(relativeDepth, &block)) {
+ return false;
+ }
+
+ *type = block->branchTargetType();
+ return checkTopTypeMatches(*type, values, /*rewriteStackTypes=*/false);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readBr(uint32_t* relativeDepth, ResultType* type,
+ ValueVector* values) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Br);
+
+ if (!readVarU32(relativeDepth)) {
+ return fail("unable to read br depth");
+ }
+
+ if (!checkBranchValueAndPush(*relativeDepth, type, values)) {
+ return false;
+ }
+
+ afterUnconditionalBranch();
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readBrIf(uint32_t* relativeDepth, ResultType* type,
+ ValueVector* values, Value* condition) {
+ MOZ_ASSERT(Classify(op_) == OpKind::BrIf);
+
+ if (!readVarU32(relativeDepth)) {
+ return fail("unable to read br_if depth");
+ }
+
+ if (!popWithType(ValType::I32, condition)) {
+ return false;
+ }
+
+ return checkBranchValueAndPush(*relativeDepth, type, values);
+}
+
+#define UNKNOWN_ARITY UINT32_MAX
+
+template <typename Policy>
+inline bool OpIter<Policy>::checkBrTableEntryAndPush(
+ uint32_t* relativeDepth, ResultType prevBranchType, ResultType* type,
+ ValueVector* branchValues) {
+ if (!readVarU32(relativeDepth)) {
+ return fail("unable to read br_table depth");
+ }
+
+ Control* block = nullptr;
+ if (!getControl(*relativeDepth, &block)) {
+ return false;
+ }
+
+ *type = block->branchTargetType();
+
+ if (prevBranchType.valid()) {
+ if (prevBranchType.length() != type->length()) {
+ return fail("br_table targets must all have the same arity");
+ }
+
+ // Avoid re-collecting the same values for subsequent branch targets.
+ branchValues = nullptr;
+ }
+
+ return checkTopTypeMatches(*type, branchValues, /*rewriteStackTypes=*/false);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readBrTable(Uint32Vector* depths,
+ uint32_t* defaultDepth,
+ ResultType* defaultBranchType,
+ ValueVector* branchValues,
+ Value* index) {
+ MOZ_ASSERT(Classify(op_) == OpKind::BrTable);
+
+ uint32_t tableLength;
+ if (!readVarU32(&tableLength)) {
+ return fail("unable to read br_table table length");
+ }
+
+ if (tableLength > MaxBrTableElems) {
+ return fail("br_table too big");
+ }
+
+ if (!popWithType(ValType::I32, index)) {
+ return false;
+ }
+
+ if (!depths->resize(tableLength)) {
+ return false;
+ }
+
+ ResultType prevBranchType;
+ for (uint32_t i = 0; i < tableLength; i++) {
+ ResultType branchType;
+ if (!checkBrTableEntryAndPush(&(*depths)[i], prevBranchType, &branchType,
+ branchValues)) {
+ return false;
+ }
+ prevBranchType = branchType;
+ }
+
+ if (!checkBrTableEntryAndPush(defaultDepth, prevBranchType, defaultBranchType,
+ branchValues)) {
+ return false;
+ }
+
+ MOZ_ASSERT(defaultBranchType->valid());
+
+ afterUnconditionalBranch();
+ return true;
+}
+
+#undef UNKNOWN_ARITY
+
+template <typename Policy>
+inline bool OpIter<Policy>::readTry(ResultType* paramType) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Try);
+
+ BlockType type;
+ if (!readBlockType(&type)) {
+ return false;
+ }
+
+ *paramType = type.params();
+ return pushControl(LabelKind::Try, type);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readCatch(LabelKind* kind, uint32_t* tagIndex,
+ ResultType* paramType,
+ ResultType* resultType,
+ ValueVector* tryResults) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Catch);
+
+ if (!readVarU32(tagIndex)) {
+ return fail("expected tag index");
+ }
+ if (*tagIndex >= env_.tags.length()) {
+ return fail("tag index out of range");
+ }
+
+ Control& block = controlStack_.back();
+ if (block.kind() == LabelKind::CatchAll) {
+ return fail("catch cannot follow a catch_all");
+ }
+ if (block.kind() != LabelKind::Try && block.kind() != LabelKind::Catch) {
+ return fail("catch can only be used within a try-catch");
+ }
+ *kind = block.kind();
+ *paramType = block.type().params();
+
+ if (!checkStackAtEndOfBlock(resultType, tryResults)) {
+ return false;
+ }
+
+ valueStack_.shrinkTo(block.valueStackBase());
+ block.switchToCatch();
+ // Reset local state to the beginning of the 'try' block.
+ unsetLocals_.resetToBlock(controlStack_.length() - 1);
+
+ return push(env_.tags[*tagIndex].type->resultType());
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readCatchAll(LabelKind* kind, ResultType* paramType,
+ ResultType* resultType,
+ ValueVector* tryResults) {
+ MOZ_ASSERT(Classify(op_) == OpKind::CatchAll);
+
+ Control& block = controlStack_.back();
+ if (block.kind() != LabelKind::Try && block.kind() != LabelKind::Catch) {
+ return fail("catch_all can only be used within a try-catch");
+ }
+ *kind = block.kind();
+ *paramType = block.type().params();
+
+ if (!checkStackAtEndOfBlock(resultType, tryResults)) {
+ return false;
+ }
+
+ valueStack_.shrinkTo(block.valueStackBase());
+ block.switchToCatchAll();
+ // Reset local state to the beginning of the 'try' block.
+ unsetLocals_.resetToBlock(controlStack_.length() - 1);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readDelegate(uint32_t* relativeDepth,
+ ResultType* resultType,
+ ValueVector* tryResults) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Delegate);
+
+ Control& block = controlStack_.back();
+ if (block.kind() != LabelKind::Try) {
+ return fail("delegate can only be used within a try");
+ }
+
+ uint32_t delegateDepth;
+ if (!readVarU32(&delegateDepth)) {
+ return fail("unable to read delegate depth");
+ }
+
+ // Depths for delegate start counting in the surrounding block.
+ if (delegateDepth >= controlStack_.length() - 1) {
+ return fail("delegate depth exceeds current nesting level");
+ }
+ *relativeDepth = delegateDepth + 1;
+
+ // Because `delegate` acts like `end` and ends the block, we will check
+ // the stack here.
+ return checkStackAtEndOfBlock(resultType, tryResults);
+}
+
+// We need popDelegate because readDelegate cannot pop the control stack
+// itself, as its caller may need to use the control item for delegate.
+template <typename Policy>
+inline void OpIter<Policy>::popDelegate() {
+ MOZ_ASSERT(Classify(op_) == OpKind::Delegate);
+
+ controlStack_.popBack();
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readThrow(uint32_t* tagIndex,
+ ValueVector* argValues) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Throw);
+
+ if (!readVarU32(tagIndex)) {
+ return fail("expected tag index");
+ }
+ if (*tagIndex >= env_.tags.length()) {
+ return fail("tag index out of range");
+ }
+
+ if (!popWithType(env_.tags[*tagIndex].type->resultType(), argValues)) {
+ return false;
+ }
+
+ afterUnconditionalBranch();
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readRethrow(uint32_t* relativeDepth) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Rethrow);
+
+ if (!readVarU32(relativeDepth)) {
+ return fail("unable to read rethrow depth");
+ }
+
+ if (*relativeDepth >= controlStack_.length()) {
+ return fail("rethrow depth exceeds current nesting level");
+ }
+ LabelKind kind = controlKind(*relativeDepth);
+ if (kind != LabelKind::Catch && kind != LabelKind::CatchAll) {
+ return fail("rethrow target was not a catch block");
+ }
+
+ afterUnconditionalBranch();
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readUnreachable() {
+ MOZ_ASSERT(Classify(op_) == OpKind::Unreachable);
+
+ afterUnconditionalBranch();
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readDrop() {
+ MOZ_ASSERT(Classify(op_) == OpKind::Drop);
+ StackType type;
+ Value value;
+ return popStackType(&type, &value);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readUnary(ValType operandType, Value* input) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Unary);
+
+ if (!popWithType(operandType, input)) {
+ return false;
+ }
+
+ infalliblePush(operandType);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readConversion(ValType operandType,
+ ValType resultType, Value* input) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Conversion);
+
+ if (!popWithType(operandType, input)) {
+ return false;
+ }
+
+ infalliblePush(resultType);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readBinary(ValType operandType, Value* lhs,
+ Value* rhs) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Binary);
+
+ if (!popWithType(operandType, rhs)) {
+ return false;
+ }
+
+ if (!popWithType(operandType, lhs)) {
+ return false;
+ }
+
+ infalliblePush(operandType);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readComparison(ValType operandType, Value* lhs,
+ Value* rhs) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Comparison);
+
+ if (!popWithType(operandType, rhs)) {
+ return false;
+ }
+
+ if (!popWithType(operandType, lhs)) {
+ return false;
+ }
+
+ infalliblePush(ValType::I32);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readTernary(ValType operandType, Value* v0,
+ Value* v1, Value* v2) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Ternary);
+
+ if (!popWithType(operandType, v2)) {
+ return false;
+ }
+
+ if (!popWithType(operandType, v1)) {
+ return false;
+ }
+
+ if (!popWithType(operandType, v0)) {
+ return false;
+ }
+
+ infalliblePush(operandType);
+
+ return true;
+}
+
+// For memories, the index is currently always a placeholder zero byte.
+//
+// For tables, the index is a placeholder zero byte until we get multi-table
+// with the reftypes proposal.
+//
+// The zero-ness of the value must be checked by the caller.
+template <typename Policy>
+inline bool OpIter<Policy>::readMemOrTableIndex(bool isMem, uint32_t* index) {
+ bool readByte = isMem;
+ if (readByte) {
+ uint8_t indexTmp;
+ if (!readFixedU8(&indexTmp)) {
+ return fail("unable to read memory or table index");
+ }
+ *index = indexTmp;
+ } else {
+ if (!readVarU32(index)) {
+ return fail("unable to read memory or table index");
+ }
+ }
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readLinearMemoryAddress(
+ uint32_t byteSize, LinearMemoryAddress<Value>* addr) {
+ if (!env_.usesMemory()) {
+ return fail("can't touch memory without memory");
+ }
+
+ IndexType it = env_.memory->indexType();
+
+ uint32_t alignLog2;
+ if (!readVarU32(&alignLog2)) {
+ return fail("unable to read load alignment");
+ }
+
+ if (!readVarU64(&addr->offset)) {
+ return fail("unable to read load offset");
+ }
+
+ if (it == IndexType::I32 && addr->offset > UINT32_MAX) {
+ return fail("offset too large for memory type");
+ }
+
+ if (alignLog2 >= 32 || (uint32_t(1) << alignLog2) > byteSize) {
+ return fail("greater than natural alignment");
+ }
+
+ if (!popWithType(ToValType(it), &addr->base)) {
+ return false;
+ }
+
+ addr->align = uint32_t(1) << alignLog2;
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readLinearMemoryAddressAligned(
+ uint32_t byteSize, LinearMemoryAddress<Value>* addr) {
+ if (!readLinearMemoryAddress(byteSize, addr)) {
+ return false;
+ }
+
+ if (addr->align != byteSize) {
+ return fail("not natural alignment");
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readLoad(ValType resultType, uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Load);
+
+ if (!readLinearMemoryAddress(byteSize, addr)) {
+ return false;
+ }
+
+ infalliblePush(resultType);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readStore(ValType resultType, uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr,
+ Value* value) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Store);
+
+ if (!popWithType(resultType, value)) {
+ return false;
+ }
+
+ if (!readLinearMemoryAddress(byteSize, addr)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readTeeStore(ValType resultType, uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr,
+ Value* value) {
+ MOZ_ASSERT(Classify(op_) == OpKind::TeeStore);
+
+ if (!popWithType(resultType, value)) {
+ return false;
+ }
+
+ if (!readLinearMemoryAddress(byteSize, addr)) {
+ return false;
+ }
+
+ infalliblePush(TypeAndValue(resultType, *value));
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readNop() {
+ MOZ_ASSERT(Classify(op_) == OpKind::Nop);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readMemorySize() {
+ MOZ_ASSERT(Classify(op_) == OpKind::MemorySize);
+
+ if (!env_.usesMemory()) {
+ return fail("can't touch memory without memory");
+ }
+
+ uint8_t flags;
+ if (!readFixedU8(&flags)) {
+ return fail("failed to read memory flags");
+ }
+
+ if (flags != uint8_t(0)) {
+ return fail("unexpected flags");
+ }
+
+ ValType ptrType = ToValType(env_.memory->indexType());
+ return push(ptrType);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readMemoryGrow(Value* input) {
+ MOZ_ASSERT(Classify(op_) == OpKind::MemoryGrow);
+
+ if (!env_.usesMemory()) {
+ return fail("can't touch memory without memory");
+ }
+
+ uint8_t flags;
+ if (!readFixedU8(&flags)) {
+ return fail("failed to read memory flags");
+ }
+
+ if (flags != uint8_t(0)) {
+ return fail("unexpected flags");
+ }
+
+ ValType ptrType = ToValType(env_.memory->indexType());
+ if (!popWithType(ptrType, input)) {
+ return false;
+ }
+
+ infalliblePush(ptrType);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readSelect(bool typed, StackType* type,
+ Value* trueValue, Value* falseValue,
+ Value* condition) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Select);
+
+ if (typed) {
+ uint32_t length;
+ if (!readVarU32(&length)) {
+ return fail("unable to read select result length");
+ }
+ if (length != 1) {
+ return fail("bad number of results");
+ }
+ ValType result;
+ if (!readValType(&result)) {
+ return fail("invalid result type for select");
+ }
+
+ if (!popWithType(ValType::I32, condition)) {
+ return false;
+ }
+ if (!popWithType(result, falseValue)) {
+ return false;
+ }
+ if (!popWithType(result, trueValue)) {
+ return false;
+ }
+
+ *type = StackType(result);
+ infalliblePush(*type);
+ return true;
+ }
+
+ if (!popWithType(ValType::I32, condition)) {
+ return false;
+ }
+
+ StackType falseType;
+ if (!popStackType(&falseType, falseValue)) {
+ return false;
+ }
+
+ StackType trueType;
+ if (!popStackType(&trueType, trueValue)) {
+ return false;
+ }
+
+ if (!falseType.isValidForUntypedSelect() ||
+ !trueType.isValidForUntypedSelect()) {
+ return fail("invalid types for untyped select");
+ }
+
+ if (falseType.isStackBottom()) {
+ *type = trueType;
+ } else if (trueType.isStackBottom() || falseType == trueType) {
+ *type = falseType;
+ } else {
+ return fail("select operand types must match");
+ }
+
+ infalliblePush(*type);
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readGetLocal(const ValTypeVector& locals,
+ uint32_t* id) {
+ MOZ_ASSERT(Classify(op_) == OpKind::GetLocal);
+
+ if (!readVarU32(id)) {
+ return fail("unable to read local index");
+ }
+
+ if (*id >= locals.length()) {
+ return fail("local.get index out of range");
+ }
+
+ if (unsetLocals_.isUnset(*id)) {
+ return fail("local.get read from unset local");
+ }
+
+ return push(locals[*id]);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readSetLocal(const ValTypeVector& locals,
+ uint32_t* id, Value* value) {
+ MOZ_ASSERT(Classify(op_) == OpKind::SetLocal);
+
+ if (!readVarU32(id)) {
+ return fail("unable to read local index");
+ }
+
+ if (*id >= locals.length()) {
+ return fail("local.set index out of range");
+ }
+
+ if (unsetLocals_.isUnset(*id)) {
+ unsetLocals_.set(*id, controlStackDepth());
+ }
+
+ return popWithType(locals[*id], value);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readTeeLocal(const ValTypeVector& locals,
+ uint32_t* id, Value* value) {
+ MOZ_ASSERT(Classify(op_) == OpKind::TeeLocal);
+
+ if (!readVarU32(id)) {
+ return fail("unable to read local index");
+ }
+
+ if (*id >= locals.length()) {
+ return fail("local.set index out of range");
+ }
+
+ if (unsetLocals_.isUnset(*id)) {
+ unsetLocals_.set(*id, controlStackDepth());
+ }
+
+ ValueVector single;
+ if (!checkTopTypeMatches(ResultType::Single(locals[*id]), &single,
+ /*rewriteStackTypes=*/true)) {
+ return false;
+ }
+
+ *value = single[0];
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readGetGlobal(uint32_t* id) {
+ MOZ_ASSERT(Classify(op_) == OpKind::GetGlobal);
+
+ if (!d_.readGlobalIndex(id)) {
+ return false;
+ }
+
+ if (*id >= env_.globals.length()) {
+ return fail("global.get index out of range");
+ }
+
+ // Initializer expressions can access immutable imported globals, or any
+ // previously defined global with GC enabled.
+ if (kind_ == OpIter::InitExpr && *id >= maxInitializedGlobalsIndexPlus1_ &&
+ (!env_.globals[*id].isImport() || env_.globals[*id].isMutable())) {
+ return fail(
+ "global.get in initializer expression must reference a global "
+ "immutable import");
+ }
+
+ return push(env_.globals[*id].type());
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readSetGlobal(uint32_t* id, Value* value) {
+ MOZ_ASSERT(Classify(op_) == OpKind::SetGlobal);
+
+ if (!d_.readGlobalIndex(id)) {
+ return false;
+ }
+
+ if (*id >= env_.globals.length()) {
+ return fail("global.set index out of range");
+ }
+
+ if (!env_.globals[*id].isMutable()) {
+ return fail("can't write an immutable global");
+ }
+
+ return popWithType(env_.globals[*id].type(), value);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readTeeGlobal(uint32_t* id, Value* value) {
+ MOZ_ASSERT(Classify(op_) == OpKind::TeeGlobal);
+
+ if (!d_.readGlobalIndex(id)) {
+ return false;
+ }
+
+ if (*id >= env_.globals.length()) {
+ return fail("global.set index out of range");
+ }
+
+ if (!env_.globals[*id].isMutable()) {
+ return fail("can't write an immutable global");
+ }
+
+ ValueVector single;
+ if (!checkTopTypeMatches(ResultType::Single(env_.globals[*id].type()),
+ &single,
+ /*rewriteStackTypes=*/true)) {
+ return false;
+ }
+
+ MOZ_ASSERT(single.length() == 1);
+ *value = single[0];
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readI32Const(int32_t* i32) {
+ MOZ_ASSERT(Classify(op_) == OpKind::I32);
+
+ if (!d_.readI32Const(i32)) {
+ return false;
+ }
+
+ return push(ValType::I32);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readI64Const(int64_t* i64) {
+ MOZ_ASSERT(Classify(op_) == OpKind::I64);
+
+ if (!d_.readI64Const(i64)) {
+ return false;
+ }
+
+ return push(ValType::I64);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readF32Const(float* f32) {
+ MOZ_ASSERT(Classify(op_) == OpKind::F32);
+
+ if (!d_.readF32Const(f32)) {
+ return false;
+ }
+
+ return push(ValType::F32);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readF64Const(double* f64) {
+ MOZ_ASSERT(Classify(op_) == OpKind::F64);
+
+ if (!d_.readF64Const(f64)) {
+ return false;
+ }
+
+ return push(ValType::F64);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readRefFunc(uint32_t* funcIndex) {
+ MOZ_ASSERT(Classify(op_) == OpKind::RefFunc);
+
+ if (!d_.readFuncIndex(funcIndex)) {
+ return false;
+ }
+ if (*funcIndex >= env_.funcs.length()) {
+ return fail("function index out of range");
+ }
+ if (kind_ == OpIter::Func && !env_.funcs[*funcIndex].canRefFunc()) {
+ return fail(
+ "function index is not declared in a section before the code section");
+ }
+
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ // When function references enabled, push type index on the stack, e.g. for
+ // validation of the call_ref instruction.
+ if (env_.functionReferencesEnabled()) {
+ const uint32_t typeIndex = env_.funcs[*funcIndex].typeIndex;
+ const TypeDef& typeDef = env_.types->type(typeIndex);
+ return push(RefType::fromTypeDef(&typeDef, false));
+ }
+#endif
+ return push(RefType::func());
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readRefNull(RefType* type) {
+ MOZ_ASSERT(Classify(op_) == OpKind::RefNull);
+
+ if (!d_.readRefNull(*env_.types, env_.features, type)) {
+ return false;
+ }
+ return push(*type);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readRefIsNull(Value* input) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Conversion);
+
+ StackType type;
+ if (!popWithRefType(input, &type)) {
+ return false;
+ }
+ return push(ValType::I32);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readRefAsNonNull(Value* input) {
+ MOZ_ASSERT(Classify(op_) == OpKind::RefAsNonNull);
+
+ StackType type;
+ if (!popWithRefType(input, &type)) {
+ return false;
+ }
+
+ if (type.isStackBottom()) {
+ infalliblePush(type);
+ } else {
+ infalliblePush(TypeAndValue(type.asNonNullable(), *input));
+ }
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readBrOnNull(uint32_t* relativeDepth,
+ ResultType* type, ValueVector* values,
+ Value* condition) {
+ MOZ_ASSERT(Classify(op_) == OpKind::BrOnNull);
+
+ if (!readVarU32(relativeDepth)) {
+ return fail("unable to read br_on_null depth");
+ }
+
+ StackType refType;
+ if (!popWithRefType(condition, &refType)) {
+ return false;
+ }
+
+ if (!checkBranchValueAndPush(*relativeDepth, type, values)) {
+ return false;
+ }
+
+ if (refType.isStackBottom()) {
+ return push(refType);
+ }
+ return push(TypeAndValue(refType.asNonNullable(), *condition));
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readBrOnNonNull(uint32_t* relativeDepth,
+ ResultType* type,
+ ValueVector* values,
+ Value* condition) {
+ MOZ_ASSERT(Classify(op_) == OpKind::BrOnNonNull);
+
+ if (!readVarU32(relativeDepth)) {
+ return fail("unable to read br_on_non_null depth");
+ }
+
+ Control* block = nullptr;
+ if (!getControl(*relativeDepth, &block)) {
+ return false;
+ }
+
+ *type = block->branchTargetType();
+
+ // Check we at least have one type in the branch target type.
+ if (type->length() < 1) {
+ return fail("type mismatch: target block type expected to be [_, ref]");
+ }
+
+ // Pop the condition reference.
+ StackType refType;
+ if (!popWithRefType(condition, &refType)) {
+ return false;
+ }
+
+ // Push non-nullable version of condition reference on the stack, prior
+ // checking the target type below.
+ if (!(refType.isStackBottom()
+ ? push(refType)
+ : push(TypeAndValue(refType.asNonNullable(), *condition)))) {
+ return false;
+ }
+
+ // Check if the type stack matches the branch target type.
+ if (!checkTopTypeMatches(*type, values, /*rewriteStackTypes=*/false)) {
+ return false;
+ }
+
+ // Pop the condition reference -- the null-branch does not receive the value.
+ StackType unusedType;
+ Value unusedValue;
+ return popStackType(&unusedType, &unusedValue);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::popCallArgs(const ValTypeVector& expectedTypes,
+ ValueVector* values) {
+ // Iterate through the argument types backward so that pops occur in the
+ // right order.
+
+ if (!values->resize(expectedTypes.length())) {
+ return false;
+ }
+
+ for (int32_t i = int32_t(expectedTypes.length()) - 1; i >= 0; i--) {
+ if (!popWithType(expectedTypes[i], &(*values)[i])) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readCall(uint32_t* funcTypeIndex,
+ ValueVector* argValues) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Call);
+
+ if (!readVarU32(funcTypeIndex)) {
+ return fail("unable to read call function index");
+ }
+
+ if (*funcTypeIndex >= env_.funcs.length()) {
+ return fail("callee index out of range");
+ }
+
+ const FuncType& funcType = *env_.funcs[*funcTypeIndex].type;
+
+ if (!popCallArgs(funcType.args(), argValues)) {
+ return false;
+ }
+
+ return push(ResultType::Vector(funcType.results()));
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readCallIndirect(uint32_t* funcTypeIndex,
+ uint32_t* tableIndex,
+ Value* callee,
+ ValueVector* argValues) {
+ MOZ_ASSERT(Classify(op_) == OpKind::CallIndirect);
+ MOZ_ASSERT(funcTypeIndex != tableIndex);
+
+ if (!readVarU32(funcTypeIndex)) {
+ return fail("unable to read call_indirect signature index");
+ }
+
+ if (*funcTypeIndex >= env_.numTypes()) {
+ return fail("signature index out of range");
+ }
+
+ if (!readVarU32(tableIndex)) {
+ return fail("unable to read call_indirect table index");
+ }
+ if (*tableIndex >= env_.tables.length()) {
+ // Special case this for improved user experience.
+ if (!env_.tables.length()) {
+ return fail("can't call_indirect without a table");
+ }
+ return fail("table index out of range for call_indirect");
+ }
+ if (!env_.tables[*tableIndex].elemType.isFuncHierarchy()) {
+ return fail("indirect calls must go through a table of 'funcref'");
+ }
+
+ if (!popWithType(ValType::I32, callee)) {
+ return false;
+ }
+
+ const TypeDef& typeDef = env_.types->type(*funcTypeIndex);
+ if (!typeDef.isFuncType()) {
+ return fail("expected signature type");
+ }
+ const FuncType& funcType = typeDef.funcType();
+
+ if (!popCallArgs(funcType.args(), argValues)) {
+ return false;
+ }
+
+ return push(ResultType::Vector(funcType.results()));
+}
+
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+template <typename Policy>
+inline bool OpIter<Policy>::readCallRef(const FuncType** funcType,
+ Value* callee, ValueVector* argValues) {
+ MOZ_ASSERT(Classify(op_) == OpKind::CallRef);
+
+ uint32_t funcTypeIndex;
+ if (!readFuncTypeIndex(&funcTypeIndex)) {
+ return false;
+ }
+
+ const TypeDef& typeDef = env_.types->type(funcTypeIndex);
+ *funcType = &typeDef.funcType();
+
+ if (!popWithType(ValType(RefType::fromTypeDef(&typeDef, true)), callee)) {
+ return false;
+ }
+
+ if (!popCallArgs((*funcType)->args(), argValues)) {
+ return false;
+ }
+
+ return push(ResultType::Vector((*funcType)->results()));
+}
+#endif
+
+template <typename Policy>
+inline bool OpIter<Policy>::readOldCallDirect(uint32_t numFuncImports,
+ uint32_t* funcTypeIndex,
+ ValueVector* argValues) {
+ MOZ_ASSERT(Classify(op_) == OpKind::OldCallDirect);
+
+ uint32_t funcDefIndex;
+ if (!readVarU32(&funcDefIndex)) {
+ return fail("unable to read call function index");
+ }
+
+ if (UINT32_MAX - funcDefIndex < numFuncImports) {
+ return fail("callee index out of range");
+ }
+
+ *funcTypeIndex = numFuncImports + funcDefIndex;
+
+ if (*funcTypeIndex >= env_.funcs.length()) {
+ return fail("callee index out of range");
+ }
+
+ const FuncType& funcType = *env_.funcs[*funcTypeIndex].type;
+
+ if (!popCallArgs(funcType.args(), argValues)) {
+ return false;
+ }
+
+ return push(ResultType::Vector(funcType.results()));
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readOldCallIndirect(uint32_t* funcTypeIndex,
+ Value* callee,
+ ValueVector* argValues) {
+ MOZ_ASSERT(Classify(op_) == OpKind::OldCallIndirect);
+
+ if (!readVarU32(funcTypeIndex)) {
+ return fail("unable to read call_indirect signature index");
+ }
+
+ if (*funcTypeIndex >= env_.numTypes()) {
+ return fail("signature index out of range");
+ }
+
+ const TypeDef& typeDef = env_.types->type(*funcTypeIndex);
+ if (!typeDef.isFuncType()) {
+ return fail("expected signature type");
+ }
+ const FuncType& funcType = typeDef.funcType();
+
+ if (!popCallArgs(funcType.args(), argValues)) {
+ return false;
+ }
+
+ if (!popWithType(ValType::I32, callee)) {
+ return false;
+ }
+
+ return push(ResultType::Vector(funcType.results()));
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readWake(LinearMemoryAddress<Value>* addr,
+ Value* count) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Wake);
+
+ if (!popWithType(ValType::I32, count)) {
+ return false;
+ }
+
+ uint32_t byteSize = 4; // Per spec; smallest WAIT is i32.
+
+ if (!readLinearMemoryAddressAligned(byteSize, addr)) {
+ return false;
+ }
+
+ infalliblePush(ValType::I32);
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readWait(LinearMemoryAddress<Value>* addr,
+ ValType valueType, uint32_t byteSize,
+ Value* value, Value* timeout) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Wait);
+
+ if (!popWithType(ValType::I64, timeout)) {
+ return false;
+ }
+
+ if (!popWithType(valueType, value)) {
+ return false;
+ }
+
+ if (!readLinearMemoryAddressAligned(byteSize, addr)) {
+ return false;
+ }
+
+ infalliblePush(ValType::I32);
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readFence() {
+ MOZ_ASSERT(Classify(op_) == OpKind::Fence);
+ uint8_t flags;
+ if (!readFixedU8(&flags)) {
+ return fail("expected memory order after fence");
+ }
+ if (flags != 0) {
+ return fail("non-zero memory order not supported yet");
+ }
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readAtomicLoad(LinearMemoryAddress<Value>* addr,
+ ValType resultType,
+ uint32_t byteSize) {
+ MOZ_ASSERT(Classify(op_) == OpKind::AtomicLoad);
+
+ if (!readLinearMemoryAddressAligned(byteSize, addr)) {
+ return false;
+ }
+
+ infalliblePush(resultType);
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readAtomicStore(LinearMemoryAddress<Value>* addr,
+ ValType resultType,
+ uint32_t byteSize, Value* value) {
+ MOZ_ASSERT(Classify(op_) == OpKind::AtomicStore);
+
+ if (!popWithType(resultType, value)) {
+ return false;
+ }
+
+ if (!readLinearMemoryAddressAligned(byteSize, addr)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readAtomicRMW(LinearMemoryAddress<Value>* addr,
+ ValType resultType, uint32_t byteSize,
+ Value* value) {
+ MOZ_ASSERT(Classify(op_) == OpKind::AtomicBinOp);
+
+ if (!popWithType(resultType, value)) {
+ return false;
+ }
+
+ if (!readLinearMemoryAddressAligned(byteSize, addr)) {
+ return false;
+ }
+
+ infalliblePush(resultType);
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readAtomicCmpXchg(LinearMemoryAddress<Value>* addr,
+ ValType resultType,
+ uint32_t byteSize,
+ Value* oldValue,
+ Value* newValue) {
+ MOZ_ASSERT(Classify(op_) == OpKind::AtomicCompareExchange);
+
+ if (!popWithType(resultType, newValue)) {
+ return false;
+ }
+
+ if (!popWithType(resultType, oldValue)) {
+ return false;
+ }
+
+ if (!readLinearMemoryAddressAligned(byteSize, addr)) {
+ return false;
+ }
+
+ infalliblePush(resultType);
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readMemOrTableCopy(bool isMem,
+ uint32_t* dstMemOrTableIndex,
+ Value* dst,
+ uint32_t* srcMemOrTableIndex,
+ Value* src, Value* len) {
+ MOZ_ASSERT(Classify(op_) == OpKind::MemOrTableCopy);
+ MOZ_ASSERT(dstMemOrTableIndex != srcMemOrTableIndex);
+
+ // Spec requires (dest, src) as of 2019-10-04.
+ if (!readMemOrTableIndex(isMem, dstMemOrTableIndex)) {
+ return false;
+ }
+ if (!readMemOrTableIndex(isMem, srcMemOrTableIndex)) {
+ return false;
+ }
+
+ if (isMem) {
+ if (!env_.usesMemory()) {
+ return fail("can't touch memory without memory");
+ }
+ if (*srcMemOrTableIndex != 0 || *dstMemOrTableIndex != 0) {
+ return fail("memory index out of range for memory.copy");
+ }
+ } else {
+ if (*dstMemOrTableIndex >= env_.tables.length() ||
+ *srcMemOrTableIndex >= env_.tables.length()) {
+ return fail("table index out of range for table.copy");
+ }
+ ValType dstElemType = env_.tables[*dstMemOrTableIndex].elemType;
+ ValType srcElemType = env_.tables[*srcMemOrTableIndex].elemType;
+ if (!checkIsSubtypeOf(srcElemType, dstElemType)) {
+ return false;
+ }
+ }
+
+ ValType ptrType = isMem ? ToValType(env_.memory->indexType()) : ValType::I32;
+
+ if (!popWithType(ptrType, len)) {
+ return false;
+ }
+
+ if (!popWithType(ptrType, src)) {
+ return false;
+ }
+
+ if (!popWithType(ptrType, dst)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readDataOrElemDrop(bool isData,
+ uint32_t* segIndex) {
+ MOZ_ASSERT(Classify(op_) == OpKind::DataOrElemDrop);
+
+ if (!readVarU32(segIndex)) {
+ return fail("unable to read segment index");
+ }
+
+ if (isData) {
+ if (env_.dataCount.isNothing()) {
+ return fail("data.drop requires a DataCount section");
+ }
+ if (*segIndex >= *env_.dataCount) {
+ return fail("data.drop segment index out of range");
+ }
+ } else {
+ if (*segIndex >= env_.elemSegments.length()) {
+ return fail("element segment index out of range for elem.drop");
+ }
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readMemFill(Value* start, Value* val, Value* len) {
+ MOZ_ASSERT(Classify(op_) == OpKind::MemFill);
+
+ if (!env_.usesMemory()) {
+ return fail("can't touch memory without memory");
+ }
+
+ uint8_t memoryIndex;
+ if (!readFixedU8(&memoryIndex)) {
+ return fail("failed to read memory index");
+ }
+ if (!env_.usesMemory()) {
+ return fail("can't touch memory without memory");
+ }
+ if (memoryIndex != 0) {
+ return fail("memory index must be zero");
+ }
+
+ ValType ptrType = ToValType(env_.memory->indexType());
+
+ if (!popWithType(ptrType, len)) {
+ return false;
+ }
+
+ if (!popWithType(ValType::I32, val)) {
+ return false;
+ }
+
+ if (!popWithType(ptrType, start)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readMemOrTableInit(bool isMem, uint32_t* segIndex,
+ uint32_t* dstTableIndex,
+ Value* dst, Value* src,
+ Value* len) {
+ MOZ_ASSERT(Classify(op_) == OpKind::MemOrTableInit);
+ MOZ_ASSERT(segIndex != dstTableIndex);
+
+ if (!readVarU32(segIndex)) {
+ return fail("unable to read segment index");
+ }
+
+ uint32_t memOrTableIndex = 0;
+ if (!readMemOrTableIndex(isMem, &memOrTableIndex)) {
+ return false;
+ }
+
+ if (isMem) {
+ if (!env_.usesMemory()) {
+ return fail("can't touch memory without memory");
+ }
+ if (memOrTableIndex != 0) {
+ return fail("memory index must be zero");
+ }
+ if (env_.dataCount.isNothing()) {
+ return fail("memory.init requires a DataCount section");
+ }
+ if (*segIndex >= *env_.dataCount) {
+ return fail("memory.init segment index out of range");
+ }
+ } else {
+ if (memOrTableIndex >= env_.tables.length()) {
+ return fail("table index out of range for table.init");
+ }
+ *dstTableIndex = memOrTableIndex;
+
+ if (*segIndex >= env_.elemSegments.length()) {
+ return fail("table.init segment index out of range");
+ }
+ if (!checkIsSubtypeOf(env_.elemSegments[*segIndex]->elemType,
+ env_.tables[*dstTableIndex].elemType)) {
+ return false;
+ }
+ }
+
+ if (!popWithType(ValType::I32, len)) {
+ return false;
+ }
+
+ if (!popWithType(ValType::I32, src)) {
+ return false;
+ }
+
+ ValType ptrType = isMem ? ToValType(env_.memory->indexType()) : ValType::I32;
+ return popWithType(ptrType, dst);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readTableFill(uint32_t* tableIndex, Value* start,
+ Value* val, Value* len) {
+ MOZ_ASSERT(Classify(op_) == OpKind::TableFill);
+
+ if (!readVarU32(tableIndex)) {
+ return fail("unable to read table index");
+ }
+ if (*tableIndex >= env_.tables.length()) {
+ return fail("table index out of range for table.fill");
+ }
+
+ if (!popWithType(ValType::I32, len)) {
+ return false;
+ }
+ if (!popWithType(env_.tables[*tableIndex].elemType, val)) {
+ return false;
+ }
+ if (!popWithType(ValType::I32, start)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readMemDiscard(Value* start, Value* len) {
+ MOZ_ASSERT(Classify(op_) == OpKind::MemDiscard);
+
+ if (!env_.usesMemory()) {
+ return fail("can't touch memory without memory");
+ }
+
+ uint8_t memoryIndex;
+ if (!readFixedU8(&memoryIndex)) {
+ return fail("failed to read memory index");
+ }
+ if (memoryIndex != 0) {
+ return fail("memory index must be zero");
+ }
+
+ ValType ptrType = ToValType(env_.memory->indexType());
+
+ if (!popWithType(ptrType, len)) {
+ return false;
+ }
+
+ if (!popWithType(ptrType, start)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readTableGet(uint32_t* tableIndex, Value* index) {
+ MOZ_ASSERT(Classify(op_) == OpKind::TableGet);
+
+ if (!readVarU32(tableIndex)) {
+ return fail("unable to read table index");
+ }
+ if (*tableIndex >= env_.tables.length()) {
+ return fail("table index out of range for table.get");
+ }
+
+ if (!popWithType(ValType::I32, index)) {
+ return false;
+ }
+
+ infalliblePush(env_.tables[*tableIndex].elemType);
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readTableGrow(uint32_t* tableIndex,
+ Value* initValue, Value* delta) {
+ MOZ_ASSERT(Classify(op_) == OpKind::TableGrow);
+
+ if (!readVarU32(tableIndex)) {
+ return fail("unable to read table index");
+ }
+ if (*tableIndex >= env_.tables.length()) {
+ return fail("table index out of range for table.grow");
+ }
+
+ if (!popWithType(ValType::I32, delta)) {
+ return false;
+ }
+ if (!popWithType(env_.tables[*tableIndex].elemType, initValue)) {
+ return false;
+ }
+
+ infalliblePush(ValType::I32);
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readTableSet(uint32_t* tableIndex, Value* index,
+ Value* value) {
+ MOZ_ASSERT(Classify(op_) == OpKind::TableSet);
+
+ if (!readVarU32(tableIndex)) {
+ return fail("unable to read table index");
+ }
+ if (*tableIndex >= env_.tables.length()) {
+ return fail("table index out of range for table.set");
+ }
+
+ if (!popWithType(env_.tables[*tableIndex].elemType, value)) {
+ return false;
+ }
+ if (!popWithType(ValType::I32, index)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readTableSize(uint32_t* tableIndex) {
+ MOZ_ASSERT(Classify(op_) == OpKind::TableSize);
+
+ *tableIndex = 0;
+
+ if (!readVarU32(tableIndex)) {
+ return fail("unable to read table index");
+ }
+ if (*tableIndex >= env_.tables.length()) {
+ return fail("table index out of range for table.size");
+ }
+
+ return push(ValType::I32);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readGcTypeIndex(uint32_t* typeIndex) {
+ if (!d_.readTypeIndex(typeIndex)) {
+ return false;
+ }
+
+ if (*typeIndex >= env_.types->length()) {
+ return fail("type index out of range");
+ }
+
+ if (!env_.types->type(*typeIndex).isStructType() &&
+ !env_.types->type(*typeIndex).isArrayType()) {
+ return fail("not a gc type");
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readStructTypeIndex(uint32_t* typeIndex) {
+ if (!readVarU32(typeIndex)) {
+ return fail("unable to read type index");
+ }
+
+ if (*typeIndex >= env_.types->length()) {
+ return fail("type index out of range");
+ }
+
+ if (!env_.types->type(*typeIndex).isStructType()) {
+ return fail("not a struct type");
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readArrayTypeIndex(uint32_t* typeIndex) {
+ if (!readVarU32(typeIndex)) {
+ return fail("unable to read type index");
+ }
+
+ if (*typeIndex >= env_.types->length()) {
+ return fail("type index out of range");
+ }
+
+ if (!env_.types->type(*typeIndex).isArrayType()) {
+ return fail("not an array type");
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readFuncTypeIndex(uint32_t* typeIndex) {
+ if (!readVarU32(typeIndex)) {
+ return fail("unable to read type index");
+ }
+
+ if (*typeIndex >= env_.types->length()) {
+ return fail("type index out of range");
+ }
+
+ if (!env_.types->type(*typeIndex).isFuncType()) {
+ return fail("not an func type");
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readFieldIndex(uint32_t* fieldIndex,
+ const StructType& structType) {
+ if (!readVarU32(fieldIndex)) {
+ return fail("unable to read field index");
+ }
+
+ if (structType.fields_.length() <= *fieldIndex) {
+ return fail("field index out of range");
+ }
+
+ return true;
+}
+
+#ifdef ENABLE_WASM_GC
+
+template <typename Policy>
+inline bool OpIter<Policy>::readStructNew(uint32_t* typeIndex,
+ ValueVector* argValues) {
+ MOZ_ASSERT(Classify(op_) == OpKind::StructNew);
+
+ if (!readStructTypeIndex(typeIndex)) {
+ return false;
+ }
+
+ const TypeDef& typeDef = env_.types->type(*typeIndex);
+ const StructType& structType = typeDef.structType();
+
+ if (!argValues->resize(structType.fields_.length())) {
+ return false;
+ }
+
+ static_assert(MaxStructFields <= INT32_MAX, "Or we iloop below");
+
+ for (int32_t i = structType.fields_.length() - 1; i >= 0; i--) {
+ if (!popWithType(structType.fields_[i].type.widenToValType(),
+ &(*argValues)[i])) {
+ return false;
+ }
+ }
+
+ return push(RefType::fromTypeDef(&typeDef, false));
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readStructNewDefault(uint32_t* typeIndex) {
+ MOZ_ASSERT(Classify(op_) == OpKind::StructNewDefault);
+
+ if (!readStructTypeIndex(typeIndex)) {
+ return false;
+ }
+
+ const TypeDef& typeDef = env_.types->type(*typeIndex);
+ const StructType& structType = typeDef.structType();
+
+ if (!structType.isDefaultable()) {
+ return fail("struct must be defaultable");
+ }
+
+ return push(RefType::fromTypeDef(&typeDef, false));
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readStructGet(uint32_t* typeIndex,
+ uint32_t* fieldIndex,
+ FieldWideningOp wideningOp,
+ Value* ptr) {
+ MOZ_ASSERT(typeIndex != fieldIndex);
+ MOZ_ASSERT(Classify(op_) == OpKind::StructGet);
+
+ if (!readStructTypeIndex(typeIndex)) {
+ return false;
+ }
+
+ const TypeDef& typeDef = env_.types->type(*typeIndex);
+ const StructType& structType = typeDef.structType();
+
+ if (!readFieldIndex(fieldIndex, structType)) {
+ return false;
+ }
+
+ if (!popWithType(RefType::fromTypeDef(&typeDef, true), ptr)) {
+ return false;
+ }
+
+ FieldType fieldType = structType.fields_[*fieldIndex].type;
+
+ if (fieldType.isValType() && wideningOp != FieldWideningOp::None) {
+ return fail("must not specify signedness for unpacked field type");
+ }
+
+ if (!fieldType.isValType() && wideningOp == FieldWideningOp::None) {
+ return fail("must specify signedness for packed field type");
+ }
+
+ return push(fieldType.widenToValType());
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readStructSet(uint32_t* typeIndex,
+ uint32_t* fieldIndex, Value* ptr,
+ Value* val) {
+ MOZ_ASSERT(typeIndex != fieldIndex);
+ MOZ_ASSERT(Classify(op_) == OpKind::StructSet);
+
+ if (!readStructTypeIndex(typeIndex)) {
+ return false;
+ }
+
+ const TypeDef& typeDef = env_.types->type(*typeIndex);
+ const StructType& structType = typeDef.structType();
+
+ if (!readFieldIndex(fieldIndex, structType)) {
+ return false;
+ }
+
+ if (!popWithType(structType.fields_[*fieldIndex].type.widenToValType(),
+ val)) {
+ return false;
+ }
+
+ if (!structType.fields_[*fieldIndex].isMutable) {
+ return fail("field is not mutable");
+ }
+
+ if (!popWithType(RefType::fromTypeDef(&typeDef, true), ptr)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readArrayNew(uint32_t* typeIndex,
+ Value* numElements, Value* argValue) {
+ MOZ_ASSERT(Classify(op_) == OpKind::ArrayNew);
+
+ if (!readArrayTypeIndex(typeIndex)) {
+ return false;
+ }
+
+ const TypeDef& typeDef = env_.types->type(*typeIndex);
+ const ArrayType& arrayType = typeDef.arrayType();
+
+ if (!popWithType(ValType::I32, numElements)) {
+ return false;
+ }
+
+ if (!popWithType(arrayType.elementType_.widenToValType(), argValue)) {
+ return false;
+ }
+
+ return push(RefType::fromTypeDef(&typeDef, false));
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readArrayNewFixed(uint32_t* typeIndex,
+ uint32_t* numElements,
+ ValueVector* values) {
+ MOZ_ASSERT(Classify(op_) == OpKind::ArrayNewFixed);
+ MOZ_ASSERT(values->length() == 0);
+
+ if (!readArrayTypeIndex(typeIndex)) {
+ return false;
+ }
+
+ const TypeDef& typeDef = env_.types->type(*typeIndex);
+ const ArrayType& arrayType = typeDef.arrayType();
+
+ if (!readVarU32(numElements)) {
+ return false;
+ }
+ // Don't resize `values` so as to hold `numElements`. If `numElements` is
+ // absurdly large, this will will take a large amount of time and memory,
+ // which will be wasted because `popWithType` in the loop below will soon
+ // start failing anyway.
+
+ ValType widenedElementType = arrayType.elementType_.widenToValType();
+ for (uint32_t i = 0; i < *numElements; i++) {
+ Value v;
+ if (!popWithType(widenedElementType, &v)) {
+ return false;
+ }
+ if (!values->append(v)) {
+ return false;
+ }
+ }
+
+ return push(RefType::fromTypeDef(&typeDef, false));
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readArrayNewDefault(uint32_t* typeIndex,
+ Value* numElements) {
+ MOZ_ASSERT(Classify(op_) == OpKind::ArrayNewDefault);
+
+ if (!readArrayTypeIndex(typeIndex)) {
+ return false;
+ }
+
+ const TypeDef& typeDef = env_.types->type(*typeIndex);
+ const ArrayType& arrayType = typeDef.arrayType();
+
+ if (!popWithType(ValType::I32, numElements)) {
+ return false;
+ }
+
+ if (!arrayType.elementType_.isDefaultable()) {
+ return fail("array must be defaultable");
+ }
+
+ return push(RefType::fromTypeDef(&typeDef, false));
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readArrayNewData(uint32_t* typeIndex,
+ uint32_t* segIndex, Value* offset,
+ Value* numElements) {
+ MOZ_ASSERT(Classify(op_) == OpKind::ArrayNewData);
+
+ if (!readArrayTypeIndex(typeIndex)) {
+ return false;
+ }
+
+ if (!readVarU32(segIndex)) {
+ return fail("unable to read segment index");
+ }
+
+ const TypeDef& typeDef = env_.types->type(*typeIndex);
+ const ArrayType& arrayType = typeDef.arrayType();
+ FieldType elemType = arrayType.elementType_;
+ if (!elemType.isNumber() && !elemType.isPacked() && !elemType.isVector()) {
+ return fail("element type must be i8/i16/i32/i64/f32/f64/v128");
+ }
+ if (env_.dataCount.isNothing()) {
+ return fail("datacount section missing");
+ }
+ if (*segIndex >= *env_.dataCount) {
+ return fail("segment index is out of range");
+ }
+
+ if (!popWithType(ValType::I32, numElements)) {
+ return false;
+ }
+ if (!popWithType(ValType::I32, offset)) {
+ return false;
+ }
+
+ return push(RefType::fromTypeDef(&typeDef, false));
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readArrayNewElem(uint32_t* typeIndex,
+ uint32_t* segIndex, Value* offset,
+ Value* numElements) {
+ MOZ_ASSERT(Classify(op_) == OpKind::ArrayNewData);
+
+ if (!readArrayTypeIndex(typeIndex)) {
+ return false;
+ }
+
+ if (!readVarU32(segIndex)) {
+ return fail("unable to read segment index");
+ }
+
+ const TypeDef& typeDef = env_.types->type(*typeIndex);
+ const ArrayType& arrayType = typeDef.arrayType();
+ FieldType dstElemType = arrayType.elementType_;
+ if (!dstElemType.isRefType()) {
+ return fail("element type is not a reftype");
+ }
+ if (*segIndex >= env_.elemSegments.length()) {
+ return fail("segment index is out of range");
+ }
+
+ const ElemSegment* elemSeg = env_.elemSegments[*segIndex];
+ RefType srcElemType = elemSeg->elemType;
+ // srcElemType needs to be a subtype (child) of dstElemType
+ if (!checkIsSubtypeOf(srcElemType, dstElemType.refType())) {
+ return fail("incompatible element types");
+ }
+
+ if (!popWithType(ValType::I32, numElements)) {
+ return false;
+ }
+ if (!popWithType(ValType::I32, offset)) {
+ return false;
+ }
+
+ return push(RefType::fromTypeDef(&typeDef, false));
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readArrayGet(uint32_t* typeIndex,
+ FieldWideningOp wideningOp,
+ Value* index, Value* ptr) {
+ MOZ_ASSERT(Classify(op_) == OpKind::ArrayGet);
+
+ if (!readArrayTypeIndex(typeIndex)) {
+ return false;
+ }
+
+ const TypeDef& typeDef = env_.types->type(*typeIndex);
+ const ArrayType& arrayType = typeDef.arrayType();
+
+ if (!popWithType(ValType::I32, index)) {
+ return false;
+ }
+
+ if (!popWithType(RefType::fromTypeDef(&typeDef, true), ptr)) {
+ return false;
+ }
+
+ FieldType fieldType = arrayType.elementType_;
+
+ if (fieldType.isValType() && wideningOp != FieldWideningOp::None) {
+ return fail("must not specify signedness for unpacked element type");
+ }
+
+ if (!fieldType.isValType() && wideningOp == FieldWideningOp::None) {
+ return fail("must specify signedness for packed element type");
+ }
+
+ return push(fieldType.widenToValType());
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readArraySet(uint32_t* typeIndex, Value* val,
+ Value* index, Value* ptr) {
+ MOZ_ASSERT(Classify(op_) == OpKind::ArraySet);
+
+ if (!readArrayTypeIndex(typeIndex)) {
+ return false;
+ }
+
+ const TypeDef& typeDef = env_.types->type(*typeIndex);
+ const ArrayType& arrayType = typeDef.arrayType();
+
+ if (!arrayType.isMutable_) {
+ return fail("array is not mutable");
+ }
+
+ if (!popWithType(arrayType.elementType_.widenToValType(), val)) {
+ return false;
+ }
+
+ if (!popWithType(ValType::I32, index)) {
+ return false;
+ }
+
+ if (!popWithType(RefType::fromTypeDef(&typeDef, true), ptr)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readArrayLen(bool decodeIgnoredTypeIndex,
+ Value* ptr) {
+ MOZ_ASSERT(Classify(op_) == OpKind::ArrayLen);
+
+ // TODO: remove once V8 removes array.len with type index from their snapshot
+ uint32_t unused;
+ if (decodeIgnoredTypeIndex && !readVarU32(&unused)) {
+ return false;
+ }
+
+ if (!popWithType(RefType::array(), ptr)) {
+ return false;
+ }
+
+ return push(ValType::I32);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readArrayCopy(int32_t* elemSize,
+ bool* elemsAreRefTyped,
+ Value* dstArray, Value* dstIndex,
+ Value* srcArray, Value* srcIndex,
+ Value* numElements) {
+ // *elemSize is set to 1/2/4/8/16, and *elemsAreRefTyped is set to indicate
+ // *ref-typeness of elements.
+ MOZ_ASSERT(Classify(op_) == OpKind::ArrayCopy);
+
+ uint32_t dstTypeIndex, srcTypeIndex;
+ if (!readArrayTypeIndex(&dstTypeIndex)) {
+ return false;
+ }
+ if (!readArrayTypeIndex(&srcTypeIndex)) {
+ return false;
+ }
+
+ // `dstTypeIndex`/`srcTypeIndex` are ensured by the above to both be array
+ // types. Reject if:
+ // * the dst array is not of mutable type
+ // * the element types are incompatible
+ const TypeDef& dstTypeDef = env_.types->type(dstTypeIndex);
+ const ArrayType& dstArrayType = dstTypeDef.arrayType();
+ const TypeDef& srcTypeDef = env_.types->type(srcTypeIndex);
+ const ArrayType& srcArrayType = srcTypeDef.arrayType();
+ FieldType dstElemType = dstArrayType.elementType_;
+ FieldType srcElemType = srcArrayType.elementType_;
+ if (!dstArrayType.isMutable_) {
+ return fail("destination array is not mutable");
+ }
+
+ if (!checkIsSubtypeOf(srcElemType, dstElemType)) {
+ return fail("incompatible element types");
+ }
+ bool dstIsRefType = dstElemType.isRefType();
+ MOZ_ASSERT(dstIsRefType == srcElemType.isRefType());
+
+ *elemSize = int32_t(dstElemType.size());
+ *elemsAreRefTyped = dstIsRefType;
+ MOZ_ASSERT(*elemSize >= 1 && *elemSize <= 16);
+ MOZ_ASSERT_IF(*elemsAreRefTyped, *elemSize == 4 || *elemSize == 8);
+
+ if (!popWithType(ValType::I32, numElements)) {
+ return false;
+ }
+ if (!popWithType(ValType::I32, srcIndex)) {
+ return false;
+ }
+ if (!popWithType(RefType::fromTypeDef(&srcTypeDef, true), srcArray)) {
+ return false;
+ }
+ if (!popWithType(ValType::I32, dstIndex)) {
+ return false;
+ }
+ if (!popWithType(RefType::fromTypeDef(&dstTypeDef, true), dstArray)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readRefTestV5(RefType* sourceType,
+ uint32_t* typeIndex, Value* ref) {
+ MOZ_ASSERT(Classify(op_) == OpKind::RefTestV5);
+
+ if (!readGcTypeIndex(typeIndex)) {
+ return false;
+ }
+
+ StackType inputType;
+ if (!popWithType(RefType::any(), ref)) {
+ return false;
+ }
+ *sourceType = inputType.valTypeOr(RefType::any()).refType();
+
+ return push(ValType(ValType::I32));
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readRefCastV5(RefType* sourceType,
+ uint32_t* typeIndex, Value* ref) {
+ MOZ_ASSERT(Classify(op_) == OpKind::RefCastV5);
+
+ if (!readGcTypeIndex(typeIndex)) {
+ return false;
+ }
+
+ StackType inputType;
+ if (!popWithType(RefType::any(), ref, &inputType)) {
+ return false;
+ }
+ *sourceType = inputType.valTypeOr(RefType::any()).refType();
+
+ const TypeDef& typeDef = env_.types->type(*typeIndex);
+ return push(RefType::fromTypeDef(&typeDef, inputType.isNullableAsOperand()));
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readRefTest(bool nullable, RefType* sourceType,
+ RefType* destType, Value* ref) {
+ MOZ_ASSERT(Classify(op_) == OpKind::RefTest);
+
+ if (!readHeapType(nullable, destType)) {
+ return false;
+ }
+
+ StackType inputType;
+ if (!popWithType(destType->topType(), ref, &inputType)) {
+ return false;
+ }
+ *sourceType = inputType.valTypeOr(RefType::any()).refType();
+
+ if (!destType->isAnyHierarchy()) {
+ return fail("ref.test only supports the any hierarchy for now");
+ }
+
+ return push(ValType(ValType::I32));
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readRefCast(bool nullable, RefType* sourceType,
+ RefType* destType, Value* ref) {
+ MOZ_ASSERT(Classify(op_) == OpKind::RefCast);
+
+ if (!readHeapType(nullable, destType)) {
+ return false;
+ }
+
+ StackType inputType;
+ if (!popWithType(destType->topType(), ref, &inputType)) {
+ return false;
+ }
+ *sourceType = inputType.valTypeOr(RefType::any()).refType();
+
+ if (!destType->isAnyHierarchy()) {
+ return fail("ref.cast only supports the any hierarchy for now");
+ }
+
+ return push(*destType);
+}
+
+// `br_on_cast <flags> <labelRelativeDepth> <rt1> <rt2>`
+// branches if a reference has a given heap type.
+//
+// V6 spec text follows - note that br_on_cast and br_on_cast_fail are both
+// handled by this function (disambiguated by a flag).
+//
+// * `br_on_cast <labelidx> <reftype> <reftype>` branches if a reference has a
+// given type
+// - `br_on_cast $l rt1 rt2 : [t0* rt1] -> [t0* rt1\rt2]`
+// - iff `$l : [t0* rt2]`
+// - and `rt2 <: rt1`
+// - passes operand along with branch under target type, plus possible extra
+// args
+// - if `rt2` contains `null`, branches on null, otherwise does not
+// * `br_on_cast_fail <labelidx> <reftype> <reftype>` branches if a reference
+// does not have a given type
+// - `br_on_cast_fail $l rt1 rt2 : [t0* rt1] -> [t0* rt2]`
+// - iff `$l : [t0* rt1\rt2]`
+// - and `rt2 <: rt1`
+// - passes operand along with branch, plus possible extra args
+// - if `rt2` contains `null`, does not branch on null, otherwise does
+// where:
+// - `(ref null1? ht1)\(ref null ht2) = (ref ht1)`
+// - `(ref null1? ht1)\(ref ht2) = (ref null1? ht1)`
+//
+// The `rt1\rt2` syntax is a "diff" - it is basically rt1 minus rt2, because a
+// successful cast to rt2 will branch away. So if rt2 allows null, the result
+// after a non-branch will be non-null; on the other hand, if rt2 is
+// non-nullable, the cast will have nothing to say about nullability and the
+// nullability of rt1 will be preserved.
+//
+// `values` will be nonempty after the call, and its last entry will be the
+// type that causes a branch (rt1\rt2 or rt2, depending).
+
+template <typename Policy>
+inline bool OpIter<Policy>::readBrOnCast(bool* onSuccess,
+ uint32_t* labelRelativeDepth,
+ RefType* sourceType, RefType* destType,
+ ResultType* labelType,
+ ValueVector* values) {
+ MOZ_ASSERT(Classify(op_) == OpKind::BrOnCast);
+
+ uint8_t flags;
+ if (!readFixedU8(&flags)) {
+ return fail("unable to read br_on_cast flags");
+ }
+ bool sourceNullable = flags & (1 << 0);
+ bool destNullable = flags & (1 << 1);
+ *onSuccess = !(flags & (1 << 2));
+
+ if (!readVarU32(labelRelativeDepth)) {
+ return fail("unable to read br_on_cast depth");
+ }
+
+ // This is distinct from the actual source type we pop from the stack, which
+ // can be more specific and allow for better optimizations.
+ RefType immediateSourceType;
+ if (!readHeapType(sourceNullable, &immediateSourceType)) {
+ return fail("unable to read br_on_cast source type");
+ }
+
+ if (!readHeapType(destNullable, destType)) {
+ return fail("unable to read br_on_cast dest type");
+ }
+
+ // Check that source and destination types are compatible
+ if (!checkIsSubtypeOf(*destType, immediateSourceType)) {
+ return fail(
+ "type mismatch: source and destination types for cast are "
+ "incompatible");
+ }
+
+ RefType typeOnSuccess = *destType;
+ // This is rt1\rt2
+ RefType typeOnFail =
+ destNullable ? immediateSourceType.asNonNullable() : immediateSourceType;
+ RefType typeOnBranch = *onSuccess ? typeOnSuccess : typeOnFail;
+ RefType typeOnFallthrough = *onSuccess ? typeOnFail : typeOnSuccess;
+
+ if (!typeOnSuccess.isAnyHierarchy() || !typeOnFail.isAnyHierarchy()) {
+ return fail(
+ "br_on_cast and br_on_cast_fail only support the any hierarchy for "
+ "now");
+ }
+
+ // Get the branch target type, which will also determine the type of extra
+ // values that are passed along on branch.
+ Control* block = nullptr;
+ if (!getControl(*labelRelativeDepth, &block)) {
+ return false;
+ }
+ *labelType = block->branchTargetType();
+
+ // Check we have at least one value slot in the branch target type, so as to
+ // receive the casted or non-casted type when we branch.
+ const size_t labelTypeNumValues = labelType->length();
+ if (labelTypeNumValues < 1) {
+ return fail("type mismatch: branch target type has no value types");
+ }
+
+ // The last value slot in the branch target type is what is being cast.
+ // This slot is guaranteed to exist by the above check.
+
+ // Check that the branch target type can accept typeOnBranch.
+ if (!checkIsSubtypeOf(typeOnBranch, (*labelType)[labelTypeNumValues - 1])) {
+ return false;
+ }
+
+ // Replace the top operand with the result of falling through. Even branching
+ // on success can change the type on top of the stack on fallthrough.
+ Value inputValue;
+ StackType inputType;
+ if (!popWithType(immediateSourceType, &inputValue, &inputType)) {
+ return false;
+ }
+ *sourceType = inputType.valTypeOr(RefType::any()).refType();
+ infalliblePush(TypeAndValue(typeOnFallthrough, inputValue));
+
+ // Create a copy of the branch target type, with the relevant value slot
+ // replaced by typeOnFallthrough.
+ ValTypeVector fallthroughTypes;
+ if (!labelType->cloneToVector(&fallthroughTypes)) {
+ return false;
+ }
+ fallthroughTypes[labelTypeNumValues - 1] = typeOnFallthrough;
+
+ return checkTopTypeMatches(ResultType::Vector(fallthroughTypes), values,
+ /*rewriteStackTypes=*/false);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::checkBrOnCastCommonV5(uint32_t labelRelativeDepth,
+ RefType* sourceType,
+ ValType castToType,
+ ResultType* labelType,
+ ValueVector* values) {
+ if (!(castToType.isRefType() && castToType.refType().isAnyHierarchy())) {
+ return fail("br_on_cast v5 only supports the any hierarchy");
+ }
+
+ // The casted from type is any subtype of anyref.
+ ValType anyrefType(RefType::any());
+ *sourceType = RefType::any();
+
+ // Get the branch target type, which will also determine the type of extra
+ // values that are passed along with the casted type. This validates
+ // requirement (1).
+ Control* block = nullptr;
+ if (!getControl(labelRelativeDepth, &block)) {
+ return false;
+ }
+ *labelType = block->branchTargetType();
+
+ // Check we have at least one value slot in the branch target type, so as to
+ // receive the casted type in the case where the cast succeeds.
+ const size_t labelTypeNumValues = labelType->length();
+ if (labelTypeNumValues < 1) {
+ return fail("type mismatch: branch target type has no value slots");
+ }
+
+ // The last value slot in the branch target type is what is being cast.
+ // This slot is guaranteed to exist by the above check.
+
+ // Check that the branch target type can accept castType. The branch target
+ // may specify a supertype of castType, and this is okay. Validates (2).
+ if (!checkIsSubtypeOf(castToType, (*labelType)[labelTypeNumValues - 1])) {
+ return false;
+ }
+
+ // Create a copy of the branch target type, with the relevant value slot
+ // replaced by anyrefType. Use this to check that the stack has the proper
+ // types to branch to the target type.
+ //
+ // TODO: We could avoid a potential allocation here by handwriting a custom
+ // checkTopTypeMatches that handles this case.
+ ValTypeVector fallthroughType;
+ if (!labelType->cloneToVector(&fallthroughType)) {
+ return false;
+ }
+ fallthroughType[labelTypeNumValues - 1] = anyrefType;
+
+ // Validates the first half of (3), if we pretend that topType is eqref,
+ // which it isn't really.
+ return checkTopTypeMatches(ResultType::Vector(fallthroughType), values,
+ /*rewriteStackTypes=*/false);
+}
+
+// `br_on_cast <labelRelativeDepth> null? <castTypeIndex>`
+// branches if a reference has a given heap type
+//
+// br_on_cast $label null? castType : [t0* (ref null argType)] ->
+// [t0* (ref null2? argType)]
+// (1) iff $label : [t0* labelType]
+// (2) and (ref null3? castType) <: labelType
+// (3) and castType <: topType and argType <: topType
+// where topType is a common super type
+// (4) and null? = null3? =/= null2?
+//
+// - passes operand along with branch under target type,
+// plus possible extra args
+// - if null? is present, branches on null, otherwise does not
+//
+// Currently unhandled:
+// (3) partial check, and not really right
+// (4) neither checked nor implemented
+//
+// `values` will be nonempty after the call, and its last entry will be that
+// of the argument.
+
+template <typename Policy>
+inline bool OpIter<Policy>::readBrOnCastV5(uint32_t* labelRelativeDepth,
+ RefType* sourceType,
+ uint32_t* castTypeIndex,
+ ResultType* labelType,
+ ValueVector* values) {
+ MOZ_ASSERT(Classify(op_) == OpKind::BrOnCastV5);
+
+ if (!readVarU32(labelRelativeDepth)) {
+ return fail("unable to read br_on_cast depth");
+ }
+
+ if (!readGcTypeIndex(castTypeIndex)) {
+ return false;
+ }
+
+ // The casted to type is a non-nullable reference to the type index
+ // specified as an immediate.
+ const TypeDef& castTypeDef = env_.types->type(*castTypeIndex);
+ ValType castType(RefType::fromTypeDef(&castTypeDef, false));
+
+ return checkBrOnCastCommonV5(*labelRelativeDepth, sourceType, castType,
+ labelType, values);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readBrOnCastHeapV5(
+ bool nullable, uint32_t* labelRelativeDepth, RefType* sourceType,
+ RefType* destType, ResultType* labelType, ValueVector* values) {
+ MOZ_ASSERT(Classify(op_) == OpKind::BrOnCastV5);
+
+ if (!readVarU32(labelRelativeDepth)) {
+ return fail("unable to read br_on_cast depth");
+ }
+
+ if (!readHeapType(nullable, destType)) {
+ return false;
+ }
+
+ ValType castToType(*destType);
+ return checkBrOnCastCommonV5(*labelRelativeDepth, sourceType, castToType,
+ labelType, values);
+}
+
+// `br_on_cast_fail <labelRelativeDepth> null? <castTypeIndex>`
+// branches if a reference does not have a given heap type
+//
+// br_on_cast_fail $label null? castType : [t0* (ref null argType)] ->
+// [t0* (ref null2? castType)]
+// (1) iff $label : [t0* labelType]
+// (2) and (ref null3? argType) <: labelType
+// (3) and castType <: topType and argType <: topType
+// where topType is a common super type
+// (4) and null? = null2? =/= null3?
+//
+// - passes operand along with branch, plus possible extra args
+// - if null? is present, does not branch on null, otherwise does
+//
+// Currently unhandled:
+// (3) partial check, and not really right
+// (4) neither checked nor implemented
+//
+// `values` will be nonempty after the call, and its last entry will be that
+// of the argument.
+template <typename Policy>
+inline bool OpIter<Policy>::checkBrOnCastFailCommonV5(
+ uint32_t labelRelativeDepth, RefType* sourceType, ValType castToType,
+ ResultType* labelType, ValueVector* values) {
+ if (!(castToType.isRefType() && castToType.refType().isAnyHierarchy())) {
+ return fail("br_on_cast_fail v5 only supports the any hierarchy");
+ }
+
+ // Get the branch target type, which will also determine the type of extra
+ // values that are passed along with the casted type. This validates
+ // requirement (1).
+ Control* block = nullptr;
+ if (!getControl(labelRelativeDepth, &block)) {
+ return false;
+ }
+ *labelType = block->branchTargetType();
+
+ // Check we at least have one value slot in the branch target type, so as to
+ // receive the argument value in the case where the cast fails.
+ if (labelType->length() < 1) {
+ return fail("type mismatch: branch target type has no value slots");
+ }
+
+ // Check all operands match the failure label's target type. Validates (2).
+ if (!checkTopTypeMatches(*labelType, values,
+ /*rewriteStackTypes=*/false)) {
+ return false;
+ }
+
+ // The top operand needs to be compatible with the casted from type.
+ // Validates the first half of (3), if we pretend that topType is eqref,
+ // which it isn't really.
+ Value ignored;
+ StackType inputValue;
+ if (!popWithType(ValType(RefType::any()), &ignored, &inputValue)) {
+ return false;
+ }
+ *sourceType = inputValue.valTypeOr(RefType::any()).refType();
+
+ // The top result in the fallthrough case is the casted to type.
+ infalliblePush(TypeAndValue(castToType, ignored));
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readBrOnCastFailV5(uint32_t* labelRelativeDepth,
+ RefType* sourceType,
+ uint32_t* castTypeIndex,
+ ResultType* labelType,
+ ValueVector* values) {
+ MOZ_ASSERT(Classify(op_) == OpKind::BrOnCastFailV5);
+
+ if (!readVarU32(labelRelativeDepth)) {
+ return fail("unable to read br_on_cast_fail depth");
+ }
+
+ if (!readGcTypeIndex(castTypeIndex)) {
+ return false;
+ }
+
+ // The casted to type is a non-nullable reference to the type index
+ // specified as an immediate.
+ const TypeDef& castToTypeDef = env_.types->type(*castTypeIndex);
+ ValType castToType(RefType::fromTypeDef(&castToTypeDef, false));
+
+ return checkBrOnCastFailCommonV5(*labelRelativeDepth, sourceType, castToType,
+ labelType, values);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readBrOnCastFailHeapV5(
+ bool nullable, uint32_t* labelRelativeDepth, RefType* sourceType,
+ RefType* destType, ResultType* labelType, ValueVector* values) {
+ MOZ_ASSERT(Classify(op_) == OpKind::BrOnCastFailV5);
+
+ if (!readVarU32(labelRelativeDepth)) {
+ return fail("unable to read br_on_cast_fail depth");
+ }
+
+ if (!readHeapType(nullable, destType)) {
+ return false;
+ }
+
+ ValType castToType(*destType);
+ return checkBrOnCastFailCommonV5(*labelRelativeDepth, sourceType, castToType,
+ labelType, values);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readBrOnNonStructV5(uint32_t* labelRelativeDepth,
+ ResultType* labelType,
+ ValueVector* values) {
+ MOZ_ASSERT(Classify(op_) == OpKind::BrOnNonStructV5);
+
+ if (!readVarU32(labelRelativeDepth)) {
+ return fail("unable to read br_on_non_struct depth");
+ }
+
+ RefType sourceTypeIgnored;
+
+ // The casted to type is a non-nullable reference to a struct.
+ ValType castToType(RefType::struct_().asNonNullable());
+
+ return checkBrOnCastFailCommonV5(*labelRelativeDepth, &sourceTypeIgnored,
+ castToType, labelType, values);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readRefConversion(RefType operandType,
+ RefType resultType,
+ Value* operandValue) {
+ MOZ_ASSERT(Classify(op_) == OpKind::RefConversion);
+
+ StackType actualOperandType;
+ if (!popWithType(ValType(operandType), operandValue, &actualOperandType)) {
+ return false;
+ }
+
+ // The result nullability is the same as the operand nullability
+ bool outputNullable = actualOperandType.isNullableAsOperand();
+ infalliblePush(ValType(resultType.withIsNullable(outputNullable)));
+ return true;
+}
+
+#endif // ENABLE_WASM_GC
+
+#ifdef ENABLE_WASM_SIMD
+
+template <typename Policy>
+inline bool OpIter<Policy>::readLaneIndex(uint32_t inputLanes,
+ uint32_t* laneIndex) {
+ uint8_t tmp;
+ if (!readFixedU8(&tmp)) {
+ return false; // Caller signals error
+ }
+ if (tmp >= inputLanes) {
+ return false;
+ }
+ *laneIndex = tmp;
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readExtractLane(ValType resultType,
+ uint32_t inputLanes,
+ uint32_t* laneIndex, Value* input) {
+ MOZ_ASSERT(Classify(op_) == OpKind::ExtractLane);
+
+ if (!readLaneIndex(inputLanes, laneIndex)) {
+ return fail("missing or invalid extract_lane lane index");
+ }
+
+ if (!popWithType(ValType::V128, input)) {
+ return false;
+ }
+
+ infalliblePush(resultType);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readReplaceLane(ValType operandType,
+ uint32_t inputLanes,
+ uint32_t* laneIndex,
+ Value* baseValue, Value* operand) {
+ MOZ_ASSERT(Classify(op_) == OpKind::ReplaceLane);
+
+ if (!readLaneIndex(inputLanes, laneIndex)) {
+ return fail("missing or invalid replace_lane lane index");
+ }
+
+ if (!popWithType(operandType, operand)) {
+ return false;
+ }
+
+ if (!popWithType(ValType::V128, baseValue)) {
+ return false;
+ }
+
+ infalliblePush(ValType::V128);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readVectorShift(Value* baseValue, Value* shift) {
+ MOZ_ASSERT(Classify(op_) == OpKind::VectorShift);
+
+ if (!popWithType(ValType::I32, shift)) {
+ return false;
+ }
+
+ if (!popWithType(ValType::V128, baseValue)) {
+ return false;
+ }
+
+ infalliblePush(ValType::V128);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readVectorShuffle(Value* v1, Value* v2,
+ V128* selectMask) {
+ MOZ_ASSERT(Classify(op_) == OpKind::VectorShuffle);
+
+ for (unsigned char& byte : selectMask->bytes) {
+ uint8_t tmp;
+ if (!readFixedU8(&tmp)) {
+ return fail("unable to read shuffle index");
+ }
+ if (tmp > 31) {
+ return fail("shuffle index out of range");
+ }
+ byte = tmp;
+ }
+
+ if (!popWithType(ValType::V128, v2)) {
+ return false;
+ }
+
+ if (!popWithType(ValType::V128, v1)) {
+ return false;
+ }
+
+ infalliblePush(ValType::V128);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readV128Const(V128* value) {
+ MOZ_ASSERT(Classify(op_) == OpKind::V128);
+
+ if (!d_.readV128Const(value)) {
+ return false;
+ }
+
+ return push(ValType::V128);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readLoadSplat(uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Load);
+
+ if (!readLinearMemoryAddress(byteSize, addr)) {
+ return false;
+ }
+
+ infalliblePush(ValType::V128);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readLoadExtend(LinearMemoryAddress<Value>* addr) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Load);
+
+ if (!readLinearMemoryAddress(/*byteSize=*/8, addr)) {
+ return false;
+ }
+
+ infalliblePush(ValType::V128);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readLoadLane(uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr,
+ uint32_t* laneIndex, Value* input) {
+ MOZ_ASSERT(Classify(op_) == OpKind::LoadLane);
+
+ if (!popWithType(ValType::V128, input)) {
+ return false;
+ }
+
+ if (!readLinearMemoryAddress(byteSize, addr)) {
+ return false;
+ }
+
+ uint32_t inputLanes = 16 / byteSize;
+ if (!readLaneIndex(inputLanes, laneIndex)) {
+ return fail("missing or invalid load_lane lane index");
+ }
+
+ infalliblePush(ValType::V128);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readStoreLane(uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr,
+ uint32_t* laneIndex, Value* input) {
+ MOZ_ASSERT(Classify(op_) == OpKind::StoreLane);
+
+ if (!popWithType(ValType::V128, input)) {
+ return false;
+ }
+
+ if (!readLinearMemoryAddress(byteSize, addr)) {
+ return false;
+ }
+
+ uint32_t inputLanes = 16 / byteSize;
+ if (!readLaneIndex(inputLanes, laneIndex)) {
+ return fail("missing or invalid store_lane lane index");
+ }
+
+ return true;
+}
+
+#endif // ENABLE_WASM_SIMD
+
+template <typename Policy>
+inline bool OpIter<Policy>::readIntrinsic(const Intrinsic** intrinsic,
+ ValueVector* params) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Intrinsic);
+
+ uint32_t id;
+ if (!d_.readVarU32(&id)) {
+ return false;
+ }
+
+ if (id >= uint32_t(IntrinsicId::Limit)) {
+ return fail("intrinsic index out of range");
+ }
+
+ *intrinsic = &Intrinsic::getFromId(IntrinsicId(id));
+
+ if (!env_.usesMemory()) {
+ return fail("can't touch memory without memory");
+ }
+ return popWithTypes((*intrinsic)->params, params);
+}
+
+} // namespace wasm
+} // namespace js
+
+static_assert(std::is_trivially_copyable<
+ js::wasm::TypeAndValueT<mozilla::Nothing>>::value,
+ "Must be trivially copyable");
+static_assert(std::is_trivially_destructible<
+ js::wasm::TypeAndValueT<mozilla::Nothing>>::value,
+ "Must be trivially destructible");
+
+static_assert(std::is_trivially_copyable<
+ js::wasm::ControlStackEntry<mozilla::Nothing>>::value,
+ "Must be trivially copyable");
+static_assert(std::is_trivially_destructible<
+ js::wasm::ControlStackEntry<mozilla::Nothing>>::value,
+ "Must be trivially destructible");
+
+#endif // wasm_op_iter_h
diff --git a/js/src/wasm/WasmProcess.cpp b/js/src/wasm/WasmProcess.cpp
new file mode 100644
index 0000000000..9b8121ceec
--- /dev/null
+++ b/js/src/wasm/WasmProcess.cpp
@@ -0,0 +1,438 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2017 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmProcess.h"
+
+#include "mozilla/Attributes.h"
+#include "mozilla/BinarySearch.h"
+#include "mozilla/ScopeExit.h"
+
+#include "gc/Memory.h"
+#include "threading/ExclusiveData.h"
+#include "vm/MutexIDs.h"
+#include "vm/Runtime.h"
+#include "wasm/WasmBuiltins.h"
+#include "wasm/WasmCode.h"
+#include "wasm/WasmInstance.h"
+
+using namespace js;
+using namespace wasm;
+
+using mozilla::BinarySearchIf;
+
+// Per-process map from values of program-counter (pc) to CodeSegments.
+//
+// Whenever a new CodeSegment is ready to use, it has to be registered so that
+// we can have fast lookups from pc to CodeSegments in numerous places. Since
+// wasm compilation may be tiered, and the second tier doesn't have access to
+// any JSContext/JS::Compartment/etc lying around, we have to use a process-wide
+// map instead.
+
+using CodeSegmentVector = Vector<const CodeSegment*, 0, SystemAllocPolicy>;
+
+Atomic<bool> wasm::CodeExists(false);
+
+// Because of profiling, the thread running wasm might need to know to which
+// CodeSegment the current PC belongs, during a call to lookup(). A lookup
+// is a read-only operation, and we don't want to take a lock then
+// (otherwise, we could have a deadlock situation if an async lookup
+// happened on a given thread that was holding mutatorsMutex_ while getting
+// sampled). Since the writer could be modifying the data that is getting
+// looked up, the writer functions use spin-locks to know if there are any
+// observers (i.e. calls to lookup()) of the atomic data.
+
+static Atomic<size_t> sNumActiveLookups(0);
+
+class ProcessCodeSegmentMap {
+ // Since writes (insertions or removals) can happen on any background
+ // thread at the same time, we need a lock here.
+
+ Mutex mutatorsMutex_ MOZ_UNANNOTATED;
+
+ CodeSegmentVector segments1_;
+ CodeSegmentVector segments2_;
+
+ // Except during swapAndWait(), there are no lookup() observers of the
+ // vector pointed to by mutableCodeSegments_
+
+ CodeSegmentVector* mutableCodeSegments_;
+ Atomic<const CodeSegmentVector*> readonlyCodeSegments_;
+
+ struct CodeSegmentPC {
+ const void* pc;
+ explicit CodeSegmentPC(const void* pc) : pc(pc) {}
+ int operator()(const CodeSegment* cs) const {
+ if (cs->containsCodePC(pc)) {
+ return 0;
+ }
+ if (pc < cs->base()) {
+ return -1;
+ }
+ return 1;
+ }
+ };
+
+ void swapAndWait() {
+ // Both vectors are consistent for lookup at this point although their
+ // contents are different: there is no way for the looked up PC to be
+ // in the code segment that is getting registered, because the code
+ // segment is not even fully created yet.
+
+ // If a lookup happens before this instruction, then the
+ // soon-to-become-former read-only pointer is used during the lookup,
+ // which is valid.
+
+ mutableCodeSegments_ = const_cast<CodeSegmentVector*>(
+ readonlyCodeSegments_.exchange(mutableCodeSegments_));
+
+ // If a lookup happens after this instruction, then the updated vector
+ // is used, which is valid:
+ // - in case of insertion, it means the new vector contains more data,
+ // but it's fine since the code segment is getting registered and thus
+ // isn't even fully created yet, so the code can't be running.
+ // - in case of removal, it means the new vector contains one less
+ // entry, but it's fine since unregistering means the code segment
+ // isn't used by any live instance anymore, thus PC can't be in the
+ // to-be-removed code segment's range.
+
+ // A lookup could have happened on any of the two vectors. Wait for
+ // observers to be done using any vector before mutating.
+
+ while (sNumActiveLookups > 0) {
+ }
+ }
+
+ public:
+ ProcessCodeSegmentMap()
+ : mutatorsMutex_(mutexid::WasmCodeSegmentMap),
+ mutableCodeSegments_(&segments1_),
+ readonlyCodeSegments_(&segments2_) {}
+
+ ~ProcessCodeSegmentMap() {
+ MOZ_RELEASE_ASSERT(sNumActiveLookups == 0);
+ MOZ_ASSERT(segments1_.empty());
+ MOZ_ASSERT(segments2_.empty());
+ segments1_.clearAndFree();
+ segments2_.clearAndFree();
+ }
+
+ bool insert(const CodeSegment* cs) {
+ LockGuard<Mutex> lock(mutatorsMutex_);
+
+ size_t index;
+ MOZ_ALWAYS_FALSE(BinarySearchIf(*mutableCodeSegments_, 0,
+ mutableCodeSegments_->length(),
+ CodeSegmentPC(cs->base()), &index));
+
+ if (!mutableCodeSegments_->insert(mutableCodeSegments_->begin() + index,
+ cs)) {
+ return false;
+ }
+
+ CodeExists = true;
+
+ swapAndWait();
+
+#ifdef DEBUG
+ size_t otherIndex;
+ MOZ_ALWAYS_FALSE(BinarySearchIf(*mutableCodeSegments_, 0,
+ mutableCodeSegments_->length(),
+ CodeSegmentPC(cs->base()), &otherIndex));
+ MOZ_ASSERT(index == otherIndex);
+#endif
+
+ // Although we could simply revert the insertion in the read-only
+ // vector, it is simpler to just crash and given that each CodeSegment
+ // consumes multiple pages, it is unlikely this insert() would OOM in
+ // practice
+ AutoEnterOOMUnsafeRegion oom;
+ if (!mutableCodeSegments_->insert(mutableCodeSegments_->begin() + index,
+ cs)) {
+ oom.crash("when inserting a CodeSegment in the process-wide map");
+ }
+
+ return true;
+ }
+
+ void remove(const CodeSegment* cs) {
+ LockGuard<Mutex> lock(mutatorsMutex_);
+
+ size_t index;
+ MOZ_ALWAYS_TRUE(BinarySearchIf(*mutableCodeSegments_, 0,
+ mutableCodeSegments_->length(),
+ CodeSegmentPC(cs->base()), &index));
+
+ mutableCodeSegments_->erase(mutableCodeSegments_->begin() + index);
+
+ if (!mutableCodeSegments_->length()) {
+ CodeExists = false;
+ }
+
+ swapAndWait();
+
+#ifdef DEBUG
+ size_t otherIndex;
+ MOZ_ALWAYS_TRUE(BinarySearchIf(*mutableCodeSegments_, 0,
+ mutableCodeSegments_->length(),
+ CodeSegmentPC(cs->base()), &otherIndex));
+ MOZ_ASSERT(index == otherIndex);
+#endif
+
+ mutableCodeSegments_->erase(mutableCodeSegments_->begin() + index);
+ }
+
+ const CodeSegment* lookup(const void* pc) {
+ const CodeSegmentVector* readonly = readonlyCodeSegments_;
+
+ size_t index;
+ if (!BinarySearchIf(*readonly, 0, readonly->length(), CodeSegmentPC(pc),
+ &index)) {
+ return nullptr;
+ }
+
+ // It is fine returning a raw CodeSegment*, because we assume we are
+ // looking up a live PC in code which is on the stack, keeping the
+ // CodeSegment alive.
+
+ return (*readonly)[index];
+ }
+};
+
+// This field is only atomic to handle buggy scenarios where we crash during
+// startup or shutdown and thus racily perform wasm::LookupCodeSegment() from
+// the crashing thread.
+
+static Atomic<ProcessCodeSegmentMap*> sProcessCodeSegmentMap(nullptr);
+
+bool wasm::RegisterCodeSegment(const CodeSegment* cs) {
+ MOZ_ASSERT(cs->codeTier().code().initialized());
+
+ // This function cannot race with startup/shutdown.
+ ProcessCodeSegmentMap* map = sProcessCodeSegmentMap;
+ MOZ_RELEASE_ASSERT(map);
+ return map->insert(cs);
+}
+
+void wasm::UnregisterCodeSegment(const CodeSegment* cs) {
+ // This function cannot race with startup/shutdown.
+ ProcessCodeSegmentMap* map = sProcessCodeSegmentMap;
+ MOZ_RELEASE_ASSERT(map);
+ map->remove(cs);
+}
+
+const CodeSegment* wasm::LookupCodeSegment(
+ const void* pc, const CodeRange** codeRange /*= nullptr */) {
+ // Since wasm::LookupCodeSegment() can race with wasm::ShutDown(), we must
+ // additionally keep sNumActiveLookups above zero for the duration we're
+ // using the ProcessCodeSegmentMap. wasm::ShutDown() spin-waits on
+ // sNumActiveLookups getting to zero.
+
+ auto decObserver = mozilla::MakeScopeExit([&] {
+ MOZ_ASSERT(sNumActiveLookups > 0);
+ sNumActiveLookups--;
+ });
+ sNumActiveLookups++;
+
+ ProcessCodeSegmentMap* map = sProcessCodeSegmentMap;
+ if (!map) {
+ return nullptr;
+ }
+
+ if (const CodeSegment* found = map->lookup(pc)) {
+ if (codeRange) {
+ *codeRange = found->isModule() ? found->asModule()->lookupRange(pc)
+ : found->asLazyStub()->lookupRange(pc);
+ }
+ return found;
+ }
+
+ if (codeRange) {
+ *codeRange = nullptr;
+ }
+
+ return nullptr;
+}
+
+const Code* wasm::LookupCode(const void* pc,
+ const CodeRange** codeRange /* = nullptr */) {
+ const CodeSegment* found = LookupCodeSegment(pc, codeRange);
+ MOZ_ASSERT_IF(!found && codeRange, !*codeRange);
+ return found ? &found->code() : nullptr;
+}
+
+bool wasm::InCompiledCode(void* pc) {
+ if (LookupCodeSegment(pc)) {
+ return true;
+ }
+
+ const CodeRange* codeRange;
+ uint8_t* codeBase;
+ return LookupBuiltinThunk(pc, &codeRange, &codeBase);
+}
+
+/**
+ * ReadLockFlag maintains a flag that can be mutated multiple times before it
+ * is read, at which point it maintains the same value.
+ */
+class ReadLockFlag {
+ private:
+ bool enabled_;
+ bool read_;
+
+ public:
+ ReadLockFlag() : enabled_(false), read_(false) {}
+
+ bool get() {
+ read_ = true;
+ return enabled_;
+ }
+
+ bool set(bool enabled) {
+ if (read_) {
+ return false;
+ }
+ enabled_ = enabled;
+ return true;
+ }
+};
+
+#ifdef WASM_SUPPORTS_HUGE_MEMORY
+/*
+ * Some 64 bit systems greatly limit the range of available virtual memory. We
+ * require about 6GiB for each wasm huge memory, which can exhaust the address
+ * spaces of these systems quickly. In order to avoid this, we only enable huge
+ * memory if we observe a large enough address space.
+ *
+ * This number is conservatively chosen to continue using huge memory on our
+ * smallest address space system, Android on ARM64 (39 bits), along with a bit
+ * for error in detecting the address space limit.
+ */
+static const size_t MinAddressBitsForHugeMemory = 38;
+
+/*
+ * In addition to the above, some systems impose an independent limit on the
+ * amount of virtual memory that may be used.
+ */
+static const size_t MinVirtualMemoryLimitForHugeMemory =
+ size_t(1) << MinAddressBitsForHugeMemory;
+#endif
+
+ExclusiveData<ReadLockFlag> sHugeMemoryEnabled32(
+ mutexid::WasmHugeMemoryEnabled);
+ExclusiveData<ReadLockFlag> sHugeMemoryEnabled64(
+ mutexid::WasmHugeMemoryEnabled);
+
+static MOZ_NEVER_INLINE bool IsHugeMemoryEnabledHelper32() {
+ auto state = sHugeMemoryEnabled32.lock();
+ return state->get();
+}
+
+static MOZ_NEVER_INLINE bool IsHugeMemoryEnabledHelper64() {
+ auto state = sHugeMemoryEnabled64.lock();
+ return state->get();
+}
+
+bool wasm::IsHugeMemoryEnabled(wasm::IndexType t) {
+ if (t == IndexType::I32) {
+ static bool enabled32 = IsHugeMemoryEnabledHelper32();
+ return enabled32;
+ }
+ static bool enabled64 = IsHugeMemoryEnabledHelper64();
+ return enabled64;
+}
+
+bool wasm::DisableHugeMemory() {
+ bool ok = true;
+ {
+ auto state = sHugeMemoryEnabled64.lock();
+ ok = ok && state->set(false);
+ }
+ {
+ auto state = sHugeMemoryEnabled32.lock();
+ ok = ok && state->set(false);
+ }
+ return ok;
+}
+
+void ConfigureHugeMemory() {
+#ifdef WASM_SUPPORTS_HUGE_MEMORY
+ bool ok = true;
+
+ {
+ // Currently no huge memory for IndexType::I64, so always set to false.
+ auto state = sHugeMemoryEnabled64.lock();
+ ok = ok && state->set(false);
+ }
+
+ if (gc::SystemAddressBits() < MinAddressBitsForHugeMemory) {
+ return;
+ }
+
+ if (gc::VirtualMemoryLimit() != size_t(-1) &&
+ gc::VirtualMemoryLimit() < MinVirtualMemoryLimitForHugeMemory) {
+ return;
+ }
+
+ {
+ auto state = sHugeMemoryEnabled32.lock();
+ ok = ok && state->set(true);
+ }
+
+ MOZ_RELEASE_ASSERT(ok);
+#endif
+}
+
+bool wasm::Init() {
+ MOZ_RELEASE_ASSERT(!sProcessCodeSegmentMap);
+
+ uintptr_t pageSize = gc::SystemPageSize();
+ MOZ_RELEASE_ASSERT(wasm::NullPtrGuardSize <= pageSize);
+
+ ConfigureHugeMemory();
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ ProcessCodeSegmentMap* map = js_new<ProcessCodeSegmentMap>();
+ if (!map) {
+ oomUnsafe.crash("js::wasm::Init");
+ }
+
+ sProcessCodeSegmentMap = map;
+ return true;
+}
+
+void wasm::ShutDown() {
+ // If there are live runtimes then we are already pretty much leaking the
+ // world, so to avoid spurious assertions (which are valid and valuable when
+ // there are not live JSRuntimes), don't bother releasing anything here.
+ if (JSRuntime::hasLiveRuntimes()) {
+ return;
+ }
+
+ PurgeCanonicalTypes();
+
+ // After signalling shutdown by clearing sProcessCodeSegmentMap, wait for
+ // concurrent wasm::LookupCodeSegment()s to finish.
+ ProcessCodeSegmentMap* map = sProcessCodeSegmentMap;
+ MOZ_RELEASE_ASSERT(map);
+ sProcessCodeSegmentMap = nullptr;
+ while (sNumActiveLookups > 0) {
+ }
+
+ ReleaseBuiltinThunks();
+ js_delete(map);
+}
diff --git a/js/src/wasm/WasmProcess.h b/js/src/wasm/WasmProcess.h
new file mode 100644
index 0000000000..89a0b733c9
--- /dev/null
+++ b/js/src/wasm/WasmProcess.h
@@ -0,0 +1,76 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2017 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_process_h
+#define wasm_process_h
+
+#include "mozilla/Atomics.h"
+
+#include "wasm/WasmMemory.h"
+
+namespace js {
+namespace wasm {
+
+class Code;
+class CodeRange;
+class CodeSegment;
+
+// These methods return the wasm::CodeSegment (resp. wasm::Code) containing
+// the given pc, if any exist in the process. These methods do not take a lock,
+// and thus are safe to use in a profiling context.
+
+const CodeSegment* LookupCodeSegment(const void* pc,
+ const CodeRange** codeRange = nullptr);
+
+const Code* LookupCode(const void* pc, const CodeRange** codeRange = nullptr);
+
+// Return whether the given PC is in any type of wasm code (module or builtin).
+
+bool InCompiledCode(void* pc);
+
+// A bool member that can be used as a very fast lookup to know if there is any
+// code segment at all.
+
+extern mozilla::Atomic<bool> CodeExists;
+
+// These methods allow to (un)register CodeSegments so they can be looked up
+// via pc in the methods described above.
+
+bool RegisterCodeSegment(const CodeSegment* cs);
+
+void UnregisterCodeSegment(const CodeSegment* cs);
+
+// Whether this process is configured to use huge memory or not. Note that this
+// is not precise enough to tell whether a particular memory uses huge memory,
+// there are additional conditions for that.
+
+bool IsHugeMemoryEnabled(IndexType t);
+
+[[nodiscard]] bool DisableHugeMemory();
+
+// Called once before/after the last VM execution which could execute or compile
+// wasm.
+
+bool Init();
+
+void ShutDown();
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_process_h
diff --git a/js/src/wasm/WasmRealm.cpp b/js/src/wasm/WasmRealm.cpp
new file mode 100644
index 0000000000..31849e15a2
--- /dev/null
+++ b/js/src/wasm/WasmRealm.cpp
@@ -0,0 +1,150 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmRealm.h"
+
+#include "vm/Realm.h"
+#include "wasm/WasmDebug.h"
+#include "wasm/WasmInstance.h"
+
+#include "debugger/DebugAPI-inl.h"
+#include "wasm/WasmInstance-inl.h"
+
+using namespace js;
+using namespace wasm;
+
+wasm::Realm::Realm(JSRuntime* rt) : runtime_(rt) {}
+
+wasm::Realm::~Realm() { MOZ_ASSERT(instances_.empty()); }
+
+struct InstanceComparator {
+ const Instance& target;
+ explicit InstanceComparator(const Instance& target) : target(target) {}
+
+ int operator()(const Instance* instance) const {
+ if (instance == &target) {
+ return 0;
+ }
+
+ // Instances can share code, so the segments can be equal (though they
+ // can't partially overlap). If the codeBases are equal, we sort by
+ // Instance address. Thus a Code may map to many instances.
+
+ // Compare by the first tier, always.
+
+ Tier instanceTier = instance->code().stableTier();
+ Tier targetTier = target.code().stableTier();
+
+ if (instance->codeBase(instanceTier) == target.codeBase(targetTier)) {
+ return instance < &target ? -1 : 1;
+ }
+
+ return target.codeBase(targetTier) < instance->codeBase(instanceTier) ? -1
+ : 1;
+ }
+};
+
+bool wasm::Realm::registerInstance(JSContext* cx,
+ Handle<WasmInstanceObject*> instanceObj) {
+ MOZ_ASSERT(runtime_ == cx->runtime());
+
+ Instance& instance = instanceObj->instance();
+ MOZ_ASSERT(this == &instance.realm()->wasm);
+
+ instance.ensureProfilingLabels(cx->runtime()->geckoProfiler().enabled());
+
+ if (instance.debugEnabled() &&
+ instance.realm()->debuggerObservesAllExecution()) {
+ instance.debug().ensureEnterFrameTrapsState(cx, &instance, true);
+ }
+
+ {
+ if (!instances_.reserve(instances_.length() + 1)) {
+ return false;
+ }
+
+ auto runtimeInstances = cx->runtime()->wasmInstances.lock();
+ if (!runtimeInstances->reserve(runtimeInstances->length() + 1)) {
+ return false;
+ }
+
+ // To avoid implementing rollback, do not fail after mutations start.
+
+ InstanceComparator cmp(instance);
+ size_t index;
+
+ // The following section is not unsafe, but simulated OOM do not consider
+ // the fact that these insert calls are guarded by the previous reserve
+ // calls.
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ (void)oomUnsafe;
+
+ MOZ_ALWAYS_FALSE(
+ BinarySearchIf(instances_, 0, instances_.length(), cmp, &index));
+ MOZ_ALWAYS_TRUE(instances_.insert(instances_.begin() + index, &instance));
+
+ MOZ_ALWAYS_FALSE(BinarySearchIf(runtimeInstances.get(), 0,
+ runtimeInstances->length(), cmp, &index));
+ MOZ_ALWAYS_TRUE(
+ runtimeInstances->insert(runtimeInstances->begin() + index, &instance));
+ }
+
+ // Notify the debugger after wasmInstances is unlocked.
+ DebugAPI::onNewWasmInstance(cx, instanceObj);
+ return true;
+}
+
+void wasm::Realm::unregisterInstance(Instance& instance) {
+ InstanceComparator cmp(instance);
+ size_t index;
+
+ if (BinarySearchIf(instances_, 0, instances_.length(), cmp, &index)) {
+ instances_.erase(instances_.begin() + index);
+ }
+
+ auto runtimeInstances = runtime_->wasmInstances.lock();
+ if (BinarySearchIf(runtimeInstances.get(), 0, runtimeInstances->length(), cmp,
+ &index)) {
+ runtimeInstances->erase(runtimeInstances->begin() + index);
+ }
+}
+
+void wasm::Realm::ensureProfilingLabels(bool profilingEnabled) {
+ for (Instance* instance : instances_) {
+ instance->ensureProfilingLabels(profilingEnabled);
+ }
+}
+
+void wasm::Realm::addSizeOfExcludingThis(MallocSizeOf mallocSizeOf,
+ size_t* realmTables) {
+ *realmTables += instances_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+void wasm::InterruptRunningCode(JSContext* cx) {
+ auto runtimeInstances = cx->runtime()->wasmInstances.lock();
+ for (Instance* instance : runtimeInstances.get()) {
+ instance->setInterrupt();
+ }
+}
+
+void wasm::ResetInterruptState(JSContext* cx) {
+ auto runtimeInstances = cx->runtime()->wasmInstances.lock();
+ for (Instance* instance : runtimeInstances.get()) {
+ instance->resetInterrupt(cx);
+ }
+}
diff --git a/js/src/wasm/WasmRealm.h b/js/src/wasm/WasmRealm.h
new file mode 100644
index 0000000000..4216d4d7b5
--- /dev/null
+++ b/js/src/wasm/WasmRealm.h
@@ -0,0 +1,79 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_realm_h
+#define wasm_realm_h
+
+#include "wasm/WasmTypeDecls.h"
+
+namespace js {
+namespace wasm {
+
+// wasm::Realm lives in JS::Realm and contains the wasm-related per-realm state.
+// wasm::Realm tracks every live instance in the realm and must be notified, via
+// registerInstance(), of any new WasmInstanceObject.
+
+class Realm {
+ JSRuntime* runtime_;
+ InstanceVector instances_;
+
+ public:
+ explicit Realm(JSRuntime* rt);
+ ~Realm();
+
+ // Before a WasmInstanceObject can be considered fully constructed and
+ // valid, it must be registered with the Realm. If this method fails,
+ // an error has been reported and the instance object must be abandoned.
+ // After a successful registration, an Instance must call
+ // unregisterInstance() before being destroyed.
+
+ bool registerInstance(JSContext* cx, Handle<WasmInstanceObject*> instanceObj);
+ void unregisterInstance(Instance& instance);
+
+ // Return a vector of all live instances in the realm. The lifetime of
+ // these Instances is determined by their owning WasmInstanceObject.
+ // Note that accessing instances()[i]->object() triggers a read barrier
+ // since instances() is effectively a weak list.
+
+ const InstanceVector& instances() const { return instances_; }
+
+ // Ensure all Instances in this Realm have profiling labels created.
+
+ void ensureProfilingLabels(bool profilingEnabled);
+
+ // about:memory reporting
+
+ void addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ size_t* realmTables);
+};
+
+// Interrupt all running wasm Instances that have been registered with
+// wasm::Realms in the given JSContext.
+
+extern void InterruptRunningCode(JSContext* cx);
+
+// After a wasm Instance sees an interrupt request and calls
+// CheckForInterrupt(), it should call RunningCodeInterrupted() to clear the
+// interrupt request for all wasm Instances to avoid spurious trapping.
+
+void ResetInterruptState(JSContext* cx);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_realm_h
diff --git a/js/src/wasm/WasmSerialize.cpp b/js/src/wasm/WasmSerialize.cpp
new file mode 100644
index 0000000000..7a20f884e2
--- /dev/null
+++ b/js/src/wasm/WasmSerialize.cpp
@@ -0,0 +1,1230 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "wasm/WasmSerialize.h"
+
+#include "mozilla/EnumeratedRange.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/Vector.h"
+
+#include <cstdint>
+#include <cstring>
+#include <type_traits>
+#include <utility>
+
+#include "js/StreamConsumer.h"
+#include "wasm/WasmCode.h"
+#include "wasm/WasmCodegenTypes.h"
+#include "wasm/WasmGC.h"
+#include "wasm/WasmInitExpr.h"
+#include "wasm/WasmModule.h"
+#include "wasm/WasmModuleTypes.h"
+#include "wasm/WasmTypeDef.h"
+#include "wasm/WasmValType.h"
+#include "wasm/WasmValue.h"
+
+using namespace js;
+using namespace js::wasm;
+
+using mozilla::Err;
+using mozilla::Ok;
+
+namespace js {
+namespace wasm {
+
+// The following assert is used as a tripwire for for new fields being added to
+// types. If this assertion is broken by your code, verify the serialization
+// code is correct, and then update the assertion.
+//
+// We only check serialized type sizes on one 'golden' platform. The platform
+// is arbitrary, but must remain consistent. The platform should be as specific
+// as possible so these assertions don't erroneously fire. Checkout the
+// definition of ENABLE_WASM_VERIFY_SERIALIZATION_FOR_SIZE in js/moz.configure
+// for the current platform.
+//
+// If this mechanism becomes a hassle, we can investigate other methods of
+// achieving the same goal.
+#if defined(ENABLE_WASM_VERIFY_SERIALIZATION_FOR_SIZE)
+
+template <typename T, size_t Size>
+struct Tripwire {
+ // The following will print a compile error that contains the size of type,
+ // and can be used for updating the assertion to the correct size.
+ static char (*_Error)[sizeof(T)] = 1;
+
+ // We must reference the bad declaration to work around SFINAE.
+ static const bool Value = !_Error;
+};
+
+template <typename T>
+struct Tripwire<T, sizeof(T)> {
+ static const bool Value = true;
+};
+
+# define WASM_VERIFY_SERIALIZATION_FOR_SIZE(Type, Size) \
+ static_assert(Tripwire<Type, Size>::Value);
+
+#else
+# define WASM_VERIFY_SERIALIZATION_FOR_SIZE(Type, Size) static_assert(true);
+#endif
+
+// A pointer is not cacheable POD
+static_assert(!is_cacheable_pod<const uint8_t*>);
+
+// A non-fixed sized array is not cacheable POD
+static_assert(!is_cacheable_pod<uint8_t[]>);
+
+// Cacheable POD is not inherited
+struct TestPodBase {};
+WASM_DECLARE_CACHEABLE_POD(TestPodBase);
+struct InheritTestPodBase : public TestPodBase {};
+static_assert(is_cacheable_pod<TestPodBase> &&
+ !is_cacheable_pod<InheritTestPodBase>);
+
+// Coding functions for containers and smart pointers need to know which code
+// function to apply to the inner value. 'CodeFunc' is the common signature to
+// be used for this.
+template <CoderMode mode, typename T>
+using CodeFunc = CoderResult (*)(Coder<mode>&, CoderArg<mode, T>);
+
+// Some functions are generic for MODE_SIZE and MODE_ENCODE, but not
+// MODE_DECODE. This assert is used to ensure that the right function overload
+// is chosen in these cases.
+#define STATIC_ASSERT_ENCODING_OR_SIZING \
+ static_assert(mode == MODE_ENCODE || mode == MODE_SIZE, "wrong overload");
+
+CoderResult Coder<MODE_SIZE>::writeBytes(const void* unusedSrc, size_t length) {
+ size_ += length;
+ if (!size_.isValid()) {
+ return Err(OutOfMemory());
+ }
+ return Ok();
+}
+
+CoderResult Coder<MODE_ENCODE>::writeBytes(const void* src, size_t length) {
+ MOZ_RELEASE_ASSERT(buffer_ + length <= end_);
+ memcpy(buffer_, src, length);
+ buffer_ += length;
+ return Ok();
+}
+
+CoderResult Coder<MODE_DECODE>::readBytes(void* dest, size_t length) {
+ MOZ_RELEASE_ASSERT(buffer_ + length <= end_);
+ memcpy(dest, buffer_, length);
+ buffer_ += length;
+ return Ok();
+}
+
+// Cacheable POD coding functions
+
+template <typename T,
+ typename std::enable_if_t<is_cacheable_pod<T>, bool> = true>
+CoderResult CodePod(Coder<MODE_DECODE>& coder, T* item) {
+ return coder.readBytes((void*)item, sizeof(T));
+}
+
+template <CoderMode mode, typename T,
+ typename std::enable_if_t<is_cacheable_pod<T>, bool> = true>
+CoderResult CodePod(Coder<mode>& coder, const T* item) {
+ STATIC_ASSERT_ENCODING_OR_SIZING;
+ return coder.writeBytes((const void*)item, sizeof(T));
+}
+
+// "Magic Marker". Use to sanity check the serialization process.
+
+enum class Marker : uint32_t {
+ LinkData = 0x49102278,
+ Imports,
+ Exports,
+ DataSegments,
+ ElemSegments,
+ CustomSections,
+ Code,
+ Metadata,
+ MetadataTier,
+ CodeTier,
+ ModuleSegment,
+};
+
+template <CoderMode mode>
+CoderResult Magic(Coder<mode>& coder, Marker item) {
+ if constexpr (mode == MODE_DECODE) {
+ // Assert the specified marker is in the binary
+ Marker decoded;
+ MOZ_TRY(CodePod(coder, &decoded));
+ MOZ_RELEASE_ASSERT(decoded == item);
+ return Ok();
+ } else {
+ // Encode the specified marker in the binary
+ return CodePod(coder, &item);
+ }
+}
+
+// mozilla::Maybe coding functions
+//
+// These functions will only code the inner value if Maybe.isSome(). The
+// coding function to use for the inner value is specified by a template
+// parameter.
+
+template <CoderMode _, typename T, CodeFunc<MODE_DECODE, T> CodeT>
+CoderResult CodeMaybe(Coder<MODE_DECODE>& coder, Maybe<T>* item) {
+ // Decode 'isSome()'
+ uint8_t isSome;
+ MOZ_TRY(CodePod(coder, &isSome));
+
+ if (isSome == 1) {
+ // Initialize to Some with default constructor
+ item->emplace();
+ // Code the inner type
+ MOZ_TRY(CodeT(coder, item->ptr()));
+ } else {
+ // Initialize to nothing
+ *item = mozilla::Nothing();
+ }
+ return Ok();
+}
+
+template <CoderMode mode, typename T, CodeFunc<mode, T> CodeT>
+CoderResult CodeMaybe(Coder<mode>& coder, const Maybe<T>* item) {
+ STATIC_ASSERT_ENCODING_OR_SIZING;
+
+ // Encode or size 'isSome()'
+ const uint8_t isSome = item->isSome() ? 1 : 0;
+ MOZ_TRY(CodePod(coder, &isSome));
+
+ if (item->isSome()) {
+ // Encode or size the inner value
+ MOZ_TRY(CodeT(coder, item->ptr()));
+ }
+ return Ok();
+}
+
+// Cacheable POD mozilla::Vector coding functions
+//
+// These functions are only available if the element type is cacheable POD. In
+// this case, the whole contents of the vector are copied directly to/from the
+// buffer.
+
+template <typename T, size_t N,
+ typename std::enable_if_t<is_cacheable_pod<T>, bool> = true>
+CoderResult CodePodVector(Coder<MODE_DECODE>& coder,
+ Vector<T, N, SystemAllocPolicy>* item) {
+ // Decode the length
+ size_t length;
+ MOZ_TRY(CodePod(coder, &length));
+
+ // Prepare to copy into the vector
+ if (!item->initLengthUninitialized(length)) {
+ return Err(OutOfMemory());
+ }
+
+ // Copy directly from the buffer to the vector
+ const size_t byteLength = length * sizeof(T);
+ return coder.readBytes((void*)item->begin(), byteLength);
+}
+
+template <CoderMode mode, typename T, size_t N,
+ typename std::enable_if_t<is_cacheable_pod<T>, bool> = true>
+CoderResult CodePodVector(Coder<mode>& coder,
+ const Vector<T, N, SystemAllocPolicy>* item) {
+ STATIC_ASSERT_ENCODING_OR_SIZING;
+
+ // Encode the length
+ const size_t length = item->length();
+ MOZ_TRY(CodePod(coder, &length));
+
+ // Copy directly from the vector to the buffer
+ const size_t byteLength = length * sizeof(T);
+ return coder.writeBytes((const void*)item->begin(), byteLength);
+}
+
+// Non-cacheable-POD mozilla::Vector coding functions
+//
+// These functions implement the general case of coding a vector of some type.
+// The coding function to use on the vector elements is provided through a
+// template parameter.
+
+template <CoderMode _, typename T, CodeFunc<MODE_DECODE, T> CodeT, size_t N,
+ typename std::enable_if_t<!is_cacheable_pod<T>, bool> = true>
+CoderResult CodeVector(Coder<MODE_DECODE>& coder,
+ Vector<T, N, SystemAllocPolicy>* item) {
+ // Decode the length
+ size_t length;
+ MOZ_TRY(CodePod(coder, &length));
+
+ // Attempt to grow the buffer to length, this will default initialize each
+ // element
+ if (!item->resize(length)) {
+ return Err(OutOfMemory());
+ }
+
+ // Decode each child element from the buffer
+ for (auto iter = item->begin(); iter != item->end(); iter++) {
+ MOZ_TRY(CodeT(coder, iter));
+ }
+ return Ok();
+}
+
+template <CoderMode mode, typename T, CodeFunc<mode, T> CodeT, size_t N,
+ typename std::enable_if_t<!is_cacheable_pod<T>, bool> = true>
+CoderResult CodeVector(Coder<mode>& coder,
+ const Vector<T, N, SystemAllocPolicy>* item) {
+ STATIC_ASSERT_ENCODING_OR_SIZING;
+
+ // Encode the length
+ const size_t length = item->length();
+ MOZ_TRY(CodePod(coder, &length));
+
+ // Encode each child element
+ for (auto iter = item->begin(); iter != item->end(); iter++) {
+ MOZ_TRY(CodeT(coder, iter));
+ }
+ return Ok();
+}
+
+// This function implements encoding and decoding of RefPtr<T>. A coding
+// function is provided for the inner value through a template parameter.
+//
+// The special handling of const qualification allows a RefPtr<const T> to be
+// decoded correctly.
+template <CoderMode mode, typename T,
+ CodeFunc<mode, std::remove_const_t<T>> CodeT>
+CoderResult CodeRefPtr(Coder<mode>& coder, CoderArg<mode, RefPtr<T>> item) {
+ if constexpr (mode == MODE_DECODE) {
+ // The RefPtr should not be initialized yet
+ MOZ_ASSERT(!item->get());
+
+ // Allocate and default construct the inner type
+ auto* allocated = js_new<std::remove_const_t<T>>();
+ if (!allocated) {
+ return Err(OutOfMemory());
+ }
+
+ // Initialize the RefPtr
+ *item = allocated;
+
+ // Decode the inner type
+ MOZ_TRY(CodeT(coder, allocated));
+ return Ok();
+ } else {
+ // Encode the inner type
+ return CodeT(coder, item->get());
+ }
+}
+
+// This function implements encoding and decoding of UniquePtr<T>.
+// A coding function is provided for the inner value as a function parameter.
+// The coding function may accept additional parameters to forward to the inner
+// coding function.
+//
+// The inner coding function is provided as an argument instead of a template
+// parameter in order to make the variable arguments template parameter
+// simpler.
+template <CoderMode mode, typename T, typename CodeTFunctor, typename... Args>
+CoderResult CodeUniquePtr(Coder<mode>& coder, CoderArg<mode, UniquePtr<T>> item,
+ CodeTFunctor innerCode, Args&&... args) {
+ if constexpr (mode == MODE_DECODE) {
+ // The UniquePtr should not be initialized yet
+ MOZ_ASSERT(!item->get());
+
+ // Allocate and default construct the inner type
+ auto allocated = js::MakeUnique<std::remove_const_t<T>>();
+ if (!allocated.get()) {
+ return Err(OutOfMemory());
+ }
+
+ // Decode the inner type
+ MOZ_TRY(innerCode(coder, allocated.get(), std::forward<Args>(args)...));
+
+ // Initialize the UniquePtr
+ *item = std::move(allocated);
+ return Ok();
+ } else {
+ // Encode the inner type
+ return innerCode(coder, item->get(), std::forward<Args>(args)...);
+ }
+}
+
+// CacheableChars coding functions
+
+static size_t StringLengthWithNullChar(const char* chars) {
+ return chars ? strlen(chars) + 1 : 0;
+}
+
+CoderResult CodeCacheableChars(Coder<MODE_DECODE>& coder,
+ CacheableChars* item) {
+ uint32_t lengthWithNullChar;
+ MOZ_TRY(CodePod(coder, &lengthWithNullChar));
+
+ // Decode the bytes, if any
+ if (lengthWithNullChar) {
+ item->reset(js_pod_malloc<char>(lengthWithNullChar));
+ if (!item->get()) {
+ return Err(OutOfMemory());
+ }
+ return coder.readBytes((char*)item->get(), lengthWithNullChar);
+ }
+
+ // If there were no bytes to write, the string should be null
+ MOZ_ASSERT(!item->get());
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeCacheableChars(Coder<mode>& coder, const CacheableChars* item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::CacheableChars, 8);
+ STATIC_ASSERT_ENCODING_OR_SIZING;
+
+ // Encode the length
+ const uint32_t lengthWithNullChar = StringLengthWithNullChar(item->get());
+ MOZ_TRY(CodePod(coder, &lengthWithNullChar));
+
+ // Write the bytes, if any
+ if (lengthWithNullChar) {
+ return coder.writeBytes((const void*)item->get(), lengthWithNullChar);
+ }
+
+ // If there were no bytes to write, the string should be null
+ MOZ_ASSERT(!item->get());
+ return Ok();
+}
+
+// Code a CacheableName
+
+template <CoderMode mode>
+CoderResult CodeCacheableName(Coder<mode>& coder,
+ CoderArg<mode, CacheableName> item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::CacheableName, 40);
+ MOZ_TRY(CodePodVector(coder, &item->bytes_));
+ return Ok();
+}
+
+// Code a ShareableBytes. This function only needs to forward to the inner
+// bytes vector.
+template <CoderMode mode>
+CoderResult CodeShareableBytes(Coder<mode>& coder,
+ CoderArg<mode, ShareableBytes> item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::ShareableBytes, 48);
+ return CodePodVector(coder, &item->bytes);
+}
+
+// WasmValType.h
+
+/* static */
+SerializableTypeCode SerializableTypeCode::serialize(PackedTypeCode ptc,
+ const TypeContext& types) {
+ SerializableTypeCode stc = {};
+ stc.typeCode = PackedRepr(ptc.typeCode());
+ stc.typeIndex = ptc.typeDef() ? types.indexOf(*ptc.typeDef())
+ : SerializableTypeCode::NoTypeIndex;
+ stc.nullable = ptc.isNullable();
+ return stc;
+}
+
+PackedTypeCode SerializableTypeCode::deserialize(const TypeContext& types) {
+ if (typeIndex == SerializableTypeCode::NoTypeIndex) {
+ return PackedTypeCode::pack(TypeCode(typeCode), nullable);
+ }
+ const TypeDef* typeDef = &types.type(typeIndex);
+ return PackedTypeCode::pack(TypeCode(typeCode), typeDef, nullable);
+}
+
+template <CoderMode mode>
+CoderResult CodePackedTypeCode(Coder<mode>& coder,
+ CoderArg<mode, PackedTypeCode> item) {
+ if constexpr (mode == MODE_DECODE) {
+ SerializableTypeCode stc;
+ MOZ_TRY(CodePod(coder, &stc));
+ *item = stc.deserialize(*coder.types_);
+ return Ok();
+ } else if constexpr (mode == MODE_SIZE) {
+ return coder.writeBytes(nullptr, sizeof(SerializableTypeCode));
+ } else {
+ SerializableTypeCode stc =
+ SerializableTypeCode::serialize(*item, *coder.types_);
+ return CodePod(coder, &stc);
+ }
+}
+
+template <CoderMode mode>
+CoderResult CodeValType(Coder<mode>& coder, CoderArg<mode, ValType> item) {
+ return CodePackedTypeCode(coder, item->addressOfPacked());
+}
+
+template <CoderMode mode>
+CoderResult CodeFieldType(Coder<mode>& coder, CoderArg<mode, FieldType> item) {
+ return CodePackedTypeCode(coder, item->addressOfPacked());
+}
+
+template <CoderMode mode>
+CoderResult CodeRefType(Coder<mode>& coder, CoderArg<mode, RefType> item) {
+ return CodePackedTypeCode(coder, item->addressOfPacked());
+}
+
+// WasmValue.h
+
+template <CoderMode mode>
+CoderResult CodeLitVal(Coder<mode>& coder, CoderArg<mode, LitVal> item) {
+ MOZ_TRY(CodeValType(coder, &item->type_));
+ MOZ_TRY(CodePod(coder, &item->cell_));
+ return Ok();
+}
+
+// WasmInitExpr.h
+
+template <CoderMode mode>
+CoderResult CodeInitExpr(Coder<mode>& coder, CoderArg<mode, InitExpr> item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::InitExpr, 80);
+ MOZ_TRY(CodePod(coder, &item->kind_));
+ MOZ_TRY(CodeValType(coder, &item->type_));
+ switch (item->kind_) {
+ case InitExprKind::Literal:
+ MOZ_TRY(CodeLitVal(coder, &item->literal_));
+ break;
+ case InitExprKind::Variable:
+ MOZ_TRY(CodePodVector(coder, &item->bytecode_));
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ return Ok();
+}
+
+// WasmTypeDef.h
+
+template <CoderMode mode>
+CoderResult CodeFuncType(Coder<mode>& coder, CoderArg<mode, FuncType> item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::FuncType, 344);
+ MOZ_TRY((CodeVector<mode, ValType, &CodeValType<mode>>(coder, &item->args_)));
+ MOZ_TRY(
+ (CodeVector<mode, ValType, &CodeValType<mode>>(coder, &item->results_)));
+ MOZ_TRY(CodePod(coder, &item->immediateTypeId_));
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeStructField(Coder<mode>& coder,
+ CoderArg<mode, StructField> item) {
+ MOZ_TRY(CodeFieldType(coder, &item->type));
+ MOZ_TRY(CodePod(coder, &item->offset));
+ MOZ_TRY(CodePod(coder, &item->isMutable));
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeStructType(Coder<mode>& coder,
+ CoderArg<mode, StructType> item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::StructType, 136);
+ MOZ_TRY((CodeVector<mode, StructField, &CodeStructField<mode>>(
+ coder, &item->fields_)));
+ MOZ_TRY(CodePod(coder, &item->size_));
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeArrayType(Coder<mode>& coder, CoderArg<mode, ArrayType> item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::ArrayType, 16);
+ MOZ_TRY(CodeFieldType(coder, &item->elementType_));
+ MOZ_TRY(CodePod(coder, &item->isMutable_));
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeTypeDef(Coder<mode>& coder, CoderArg<mode, TypeDef> item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::TypeDef, 376);
+ // TypeDef is a tagged union that begins with kind = None. This implies that
+ // we must manually initialize the variant that we decode.
+ if constexpr (mode == MODE_DECODE) {
+ MOZ_RELEASE_ASSERT(item->kind_ == TypeDefKind::None);
+ }
+ MOZ_TRY(CodePod(coder, &item->kind_));
+ switch (item->kind_) {
+ case TypeDefKind::Struct: {
+ if constexpr (mode == MODE_DECODE) {
+ new (&item->structType_) StructType();
+ }
+ MOZ_TRY(CodeStructType(coder, &item->structType_));
+ break;
+ }
+ case TypeDefKind::Func: {
+ if constexpr (mode == MODE_DECODE) {
+ new (&item->funcType_) FuncType();
+ }
+ MOZ_TRY(CodeFuncType(coder, &item->funcType_));
+ break;
+ }
+ case TypeDefKind::Array: {
+ if constexpr (mode == MODE_DECODE) {
+ new (&item->arrayType_) ArrayType();
+ }
+ MOZ_TRY(CodeArrayType(coder, &item->arrayType_));
+ break;
+ }
+ case TypeDefKind::None: {
+ break;
+ }
+ default:
+ MOZ_ASSERT_UNREACHABLE();
+ }
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeTypeContext(Coder<mode>& coder,
+ CoderArg<mode, TypeContext> item) {
+ if constexpr (mode == MODE_DECODE) {
+ // Decoding type definitions needs to reference the type context of the
+ // module
+ MOZ_ASSERT(!coder.types_);
+ coder.types_ = item;
+
+ // Decode the number of recursion groups in the module
+ uint32_t numRecGroups;
+ MOZ_TRY(CodePod(coder, &numRecGroups));
+
+ // Decode each recursion group
+ for (uint32_t recGroupIndex = 0; recGroupIndex < numRecGroups;
+ recGroupIndex++) {
+ // Decode the number of types in the recursion group
+ uint32_t numTypes;
+ MOZ_TRY(CodePod(coder, &numTypes));
+
+ MutableRecGroup recGroup = item->startRecGroup(numTypes);
+ if (!recGroup) {
+ return Err(OutOfMemory());
+ }
+
+ // Decode the type definitions
+ for (uint32_t groupTypeIndex = 0; groupTypeIndex < numTypes;
+ groupTypeIndex++) {
+ MOZ_TRY(CodeTypeDef(coder, &recGroup->type(groupTypeIndex)));
+ }
+
+ // Finish the recursion group
+ if (!item->endRecGroup()) {
+ return Err(OutOfMemory());
+ }
+ }
+ } else {
+ // Encode the number of recursion groups in the module
+ uint32_t numRecGroups = item->groups().length();
+ MOZ_TRY(CodePod(coder, &numRecGroups));
+
+ // Encode each recursion group
+ for (uint32_t groupIndex = 0; groupIndex < numRecGroups; groupIndex++) {
+ SharedRecGroup group = item->groups()[groupIndex];
+
+ // Encode the number of types in the recursion group
+ uint32_t numTypes = group->numTypes();
+ MOZ_TRY(CodePod(coder, &numTypes));
+
+ // Encode the type definitions
+ for (uint32_t i = 0; i < numTypes; i++) {
+ MOZ_TRY(CodeTypeDef(coder, &group->type(i)));
+ }
+ }
+ }
+ return Ok();
+}
+
+// WasmModuleTypes.h
+
+template <CoderMode mode>
+CoderResult CodeImport(Coder<mode>& coder, CoderArg<mode, Import> item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::Import, 88);
+ MOZ_TRY(CodeCacheableName(coder, &item->module));
+ MOZ_TRY(CodeCacheableName(coder, &item->field));
+ MOZ_TRY(CodePod(coder, &item->kind));
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeExport(Coder<mode>& coder, CoderArg<mode, Export> item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::Export, 48);
+ MOZ_TRY(CodeCacheableName(coder, &item->fieldName_));
+ MOZ_TRY(CodePod(coder, &item->pod));
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeGlobalDesc(Coder<mode>& coder,
+ CoderArg<mode, GlobalDesc> item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::GlobalDesc, 104);
+ MOZ_TRY(CodePod(coder, &item->kind_));
+ MOZ_TRY(CodeInitExpr(coder, &item->initial_));
+ MOZ_TRY(CodePod(coder, &item->offset_));
+ MOZ_TRY(CodePod(coder, &item->isMutable_));
+ MOZ_TRY(CodePod(coder, &item->isWasm_));
+ MOZ_TRY(CodePod(coder, &item->isExport_));
+ MOZ_TRY(CodePod(coder, &item->importIndex_));
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeTagType(Coder<mode>& coder, CoderArg<mode, TagType> item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::TagType, 232);
+ MOZ_TRY(
+ (CodeVector<mode, ValType, &CodeValType<mode>>(coder, &item->argTypes_)));
+ MOZ_TRY(CodePodVector(coder, &item->argOffsets_));
+ MOZ_TRY(CodePod(coder, &item->size_));
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeTagDesc(Coder<mode>& coder, CoderArg<mode, TagDesc> item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::TagDesc, 24);
+ MOZ_TRY(CodePod(coder, &item->kind));
+ MOZ_TRY((
+ CodeRefPtr<mode, const TagType, &CodeTagType<mode>>(coder, &item->type)));
+ MOZ_TRY(CodePod(coder, &item->isExport));
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeElemSegment(Coder<mode>& coder,
+ CoderArg<mode, ElemSegment> item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::ElemSegment, 184);
+ MOZ_TRY(CodePod(coder, &item->kind));
+ MOZ_TRY(CodePod(coder, &item->tableIndex));
+ MOZ_TRY(CodeRefType(coder, &item->elemType));
+ MOZ_TRY((CodeMaybe<mode, InitExpr, &CodeInitExpr<mode>>(
+ coder, &item->offsetIfActive)));
+ MOZ_TRY(CodePodVector(coder, &item->elemFuncIndices));
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeDataSegment(Coder<mode>& coder,
+ CoderArg<mode, DataSegment> item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::DataSegment, 136);
+ MOZ_TRY((CodeMaybe<mode, InitExpr, &CodeInitExpr<mode>>(
+ coder, &item->offsetIfActive)));
+ MOZ_TRY(CodePodVector(coder, &item->bytes));
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeCustomSection(Coder<mode>& coder,
+ CoderArg<mode, CustomSection> item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::CustomSection, 48);
+ MOZ_TRY(CodePodVector(coder, &item->name));
+ MOZ_TRY((CodeRefPtr<mode, const ShareableBytes, &CodeShareableBytes<mode>>(
+ coder, &item->payload)));
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeTableDesc(Coder<mode>& coder, CoderArg<mode, TableDesc> item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::TableDesc, 112);
+ MOZ_TRY(CodeRefType(coder, &item->elemType));
+ MOZ_TRY(CodePod(coder, &item->isImported));
+ MOZ_TRY(CodePod(coder, &item->isExported));
+ MOZ_TRY(CodePod(coder, &item->isAsmJS));
+ MOZ_TRY(CodePod(coder, &item->initialLength));
+ MOZ_TRY(CodePod(coder, &item->maximumLength));
+ MOZ_TRY(
+ (CodeMaybe<mode, InitExpr, &CodeInitExpr<mode>>(coder, &item->initExpr)));
+ return Ok();
+}
+
+// WasmCodegenTypes.h
+
+template <CoderMode mode>
+CoderResult CodeTrapSiteVectorArray(Coder<mode>& coder,
+ CoderArg<mode, TrapSiteVectorArray> item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::TrapSiteVectorArray, 520);
+ for (Trap trap : mozilla::MakeEnumeratedRange(Trap::Limit)) {
+ MOZ_TRY(CodePodVector(coder, &(*item)[trap]));
+ }
+ return Ok();
+}
+
+// WasmGC.h
+
+CoderResult CodeStackMap(Coder<MODE_DECODE>& coder,
+ CoderArg<MODE_DECODE, wasm::StackMap*> item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::StackMap, 12);
+ // Decode the stack map header
+ StackMapHeader header;
+ MOZ_TRY(CodePod(coder, &header));
+
+ // Allocate a stack map for the header
+ StackMap* map = StackMap::create(header);
+ if (!map) {
+ return Err(OutOfMemory());
+ }
+
+ // Decode the bitmap into the stackmap
+ MOZ_TRY(coder.readBytes(map->rawBitmap(), map->rawBitmapLengthInBytes()));
+
+ *item = map;
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeStackMap(Coder<mode>& coder,
+ CoderArg<mode, wasm::StackMap> item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::StackMap, 12);
+ STATIC_ASSERT_ENCODING_OR_SIZING;
+
+ // Encode the stackmap header
+ MOZ_TRY(CodePod(coder, &item->header));
+
+ // Encode the stackmap bitmap
+ MOZ_TRY(coder.writeBytes(item->rawBitmap(), item->rawBitmapLengthInBytes()));
+
+ return Ok();
+}
+
+static uint32_t ComputeCodeOffset(const uint8_t* codeStart,
+ const uint8_t* codePtr) {
+ MOZ_RELEASE_ASSERT(codePtr >= codeStart);
+#ifdef JS_64BIT
+ MOZ_RELEASE_ASSERT(codePtr < codeStart + UINT32_MAX);
+#endif
+ return (uint32_t)(codePtr - codeStart);
+}
+
+CoderResult CodeStackMaps(Coder<MODE_DECODE>& coder,
+ CoderArg<MODE_DECODE, wasm::StackMaps> item,
+ const uint8_t* codeStart) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::StackMaps, 48);
+ // Decode the amount of stack maps
+ size_t length;
+ MOZ_TRY(CodePod(coder, &length));
+
+ for (size_t i = 0; i < length; i++) {
+ // Decode the offset relative to codeStart
+ uint32_t codeOffset;
+ MOZ_TRY(CodePod(coder, &codeOffset));
+
+ // Decode the stack map
+ StackMap* map;
+ MOZ_TRY(CodeStackMap(coder, &map));
+
+ // Add it to the map
+ const uint8_t* nextInsnAddr = codeStart + codeOffset;
+ if (!item->add(nextInsnAddr, map)) {
+ return Err(OutOfMemory());
+ }
+ }
+
+ // Finish the maps, asserting they are sorted
+ item->finishAlreadySorted();
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeStackMaps(Coder<mode>& coder,
+ CoderArg<mode, wasm::StackMaps> item,
+ const uint8_t* codeStart) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::StackMaps, 48);
+ STATIC_ASSERT_ENCODING_OR_SIZING;
+
+ // Encode the amount of stack maps
+ size_t length = item->length();
+ MOZ_TRY(CodePod(coder, &length));
+
+ for (size_t i = 0; i < length; i++) {
+ StackMaps::Maplet maplet = item->get(i);
+ uint32_t codeOffset = ComputeCodeOffset(codeStart, maplet.nextInsnAddr);
+
+ // Encode the offset relative to codeStart
+ MOZ_TRY(CodePod(coder, &codeOffset));
+
+ // Encode the stack map
+ MOZ_TRY(CodeStackMap(coder, maplet.map));
+ }
+ return Ok();
+}
+
+// WasmCode.h
+
+template <CoderMode mode>
+CoderResult CodeSymbolicLinkArray(
+ Coder<mode>& coder,
+ CoderArg<mode, wasm::LinkData::SymbolicLinkArray> item) {
+ for (SymbolicAddress address :
+ mozilla::MakeEnumeratedRange(SymbolicAddress::Limit)) {
+ MOZ_TRY(CodePodVector(coder, &(*item)[address]));
+ }
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeLinkData(Coder<mode>& coder,
+ CoderArg<mode, wasm::LinkData> item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::LinkData, 7608);
+ if constexpr (mode == MODE_ENCODE) {
+ MOZ_ASSERT(item->tier == Tier::Serialized);
+ }
+ MOZ_TRY(CodePod(coder, &item->pod()));
+ MOZ_TRY(CodePodVector(coder, &item->internalLinks));
+ MOZ_TRY(CodeSymbolicLinkArray(coder, &item->symbolicLinks));
+ return Ok();
+}
+
+CoderResult CodeModuleSegment(Coder<MODE_DECODE>& coder,
+ wasm::UniqueModuleSegment* item,
+ const wasm::LinkData& linkData) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::ModuleSegment, 48);
+ // Assert we're decoding a ModuleSegment
+ MOZ_TRY(Magic(coder, Marker::ModuleSegment));
+
+ // Decode the code bytes length
+ size_t length;
+ MOZ_TRY(CodePod(coder, &length));
+
+ // Allocate the code bytes
+ UniqueCodeBytes bytes = AllocateCodeBytes(length);
+ if (!bytes) {
+ return Err(OutOfMemory());
+ }
+
+ // Decode the code bytes
+ MOZ_TRY(coder.readBytes(bytes.get(), length));
+
+ // Initialize the ModuleSegment
+ *item = js::MakeUnique<ModuleSegment>(Tier::Serialized, std::move(bytes),
+ length, linkData);
+ if (!*item) {
+ return Err(OutOfMemory());
+ }
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeModuleSegment(Coder<mode>& coder,
+ CoderArg<mode, wasm::UniqueModuleSegment> item,
+ const wasm::LinkData& linkData) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::ModuleSegment, 48);
+ STATIC_ASSERT_ENCODING_OR_SIZING;
+ MOZ_ASSERT((*item)->tier() == Tier::Serialized);
+
+ // Mark that we're encoding a ModuleSegment
+ MOZ_TRY(Magic(coder, Marker::ModuleSegment));
+
+ // Encode the length
+ size_t length = (*item)->length();
+ MOZ_TRY(CodePod(coder, &length));
+
+ if constexpr (mode == MODE_SIZE) {
+ // Just calculate the length of bytes written
+ MOZ_TRY(coder.writeBytes((*item)->base(), length));
+ } else {
+ // Get the start of where the code bytes will be written
+ uint8_t* serializedBase = coder.buffer_;
+
+ // Write the code bytes
+ MOZ_TRY(coder.writeBytes((*item)->base(), length));
+
+ // Unlink the code bytes written to the buffer
+ StaticallyUnlink(serializedBase, linkData);
+ }
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeMetadataTier(Coder<mode>& coder,
+ CoderArg<mode, wasm::MetadataTier> item,
+ const uint8_t* codeStart) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::MetadataTier, 856);
+ MOZ_TRY(Magic(coder, Marker::MetadataTier));
+ MOZ_TRY(CodePodVector(coder, &item->funcToCodeRange));
+ MOZ_TRY(CodePodVector(coder, &item->codeRanges));
+ MOZ_TRY(CodePodVector(coder, &item->callSites));
+ MOZ_TRY(CodeTrapSiteVectorArray(coder, &item->trapSites));
+ MOZ_TRY(CodePodVector(coder, &item->funcImports));
+ MOZ_TRY(CodePodVector(coder, &item->funcExports));
+ MOZ_TRY(CodeStackMaps(coder, &item->stackMaps, codeStart));
+ MOZ_TRY(CodePodVector(coder, &item->tryNotes));
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeMetadata(Coder<mode>& coder,
+ CoderArg<mode, wasm::Metadata> item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::Metadata, 408);
+ if constexpr (mode == MODE_ENCODE) {
+ // Serialization doesn't handle asm.js or debug enabled modules
+ MOZ_ASSERT(!item->debugEnabled && item->debugFuncTypeIndices.empty());
+ MOZ_ASSERT(!item->isAsmJS());
+ }
+
+ MOZ_TRY(Magic(coder, Marker::Metadata));
+ MOZ_TRY(CodePod(coder, &item->pod()));
+ MOZ_TRY((CodeRefPtr<mode, const TypeContext, &CodeTypeContext>(
+ coder, &item->types)));
+ MOZ_TRY((CodeVector<mode, GlobalDesc, &CodeGlobalDesc<mode>>(
+ coder, &item->globals)));
+ MOZ_TRY((
+ CodeVector<mode, TableDesc, &CodeTableDesc<mode>>(coder, &item->tables)));
+ MOZ_TRY((CodeVector<mode, TagDesc, &CodeTagDesc<mode>>(coder, &item->tags)));
+ MOZ_TRY(CodePod(coder, &item->moduleName));
+ MOZ_TRY(CodePodVector(coder, &item->funcNames));
+ MOZ_TRY(CodeCacheableChars(coder, &item->filename));
+ MOZ_TRY(CodeCacheableChars(coder, &item->sourceMapURL));
+
+ if constexpr (mode == MODE_DECODE) {
+ // Initialize debugging state to disabled
+ item->debugEnabled = false;
+ item->debugFuncTypeIndices.clear();
+ }
+
+ return Ok();
+}
+
+CoderResult CodeCodeTier(Coder<MODE_DECODE>& coder, wasm::UniqueCodeTier* item,
+ const wasm::LinkData& linkData) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::CodeTier, 248);
+ UniqueMetadataTier metadata;
+ UniqueModuleSegment segment;
+ MOZ_TRY(Magic(coder, Marker::CodeTier));
+ MOZ_TRY(CodeModuleSegment(coder, &segment, linkData));
+ MOZ_TRY((CodeUniquePtr<MODE_DECODE, MetadataTier>(
+ coder, &metadata, &CodeMetadataTier<MODE_DECODE>, segment->base())));
+ *item = js::MakeUnique<CodeTier>(std::move(metadata), std::move(segment));
+ if (!*item) {
+ return Err(OutOfMemory());
+ }
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeCodeTier(Coder<mode>& coder,
+ CoderArg<mode, wasm::CodeTier> item,
+ const wasm::LinkData& linkData) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::CodeTier, 248);
+ STATIC_ASSERT_ENCODING_OR_SIZING;
+ MOZ_TRY(Magic(coder, Marker::CodeTier));
+ MOZ_TRY(CodeModuleSegment(coder, &item->segment_, linkData));
+ MOZ_TRY((CodeUniquePtr<mode, MetadataTier>(coder, &item->metadata_,
+ &CodeMetadataTier<mode>,
+ item->segment_->base())));
+ return Ok();
+}
+
+CoderResult CodeSharedCode(Coder<MODE_DECODE>& coder, wasm::SharedCode* item,
+ const wasm::LinkData& linkData,
+ const wasm::CustomSectionVector& customSections) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::Code, 192);
+ MutableMetadata metadata;
+ UniqueCodeTier codeTier;
+ MOZ_TRY((CodeRefPtr<MODE_DECODE, Metadata, &CodeMetadata>(coder, &metadata)));
+ MOZ_TRY(CodeCodeTier(coder, &codeTier, linkData));
+
+ // Initialize metadata's name payload from the custom section
+ if (metadata->nameCustomSectionIndex) {
+ metadata->namePayload =
+ customSections[*metadata->nameCustomSectionIndex].payload;
+ } else {
+ MOZ_RELEASE_ASSERT(!metadata->moduleName);
+ MOZ_RELEASE_ASSERT(metadata->funcNames.empty());
+ }
+
+ // Initialize the jump tables
+ JumpTables jumpTables;
+ if (!jumpTables.init(CompileMode::Once, codeTier->segment(),
+ codeTier->metadata().codeRanges)) {
+ return Err(OutOfMemory());
+ }
+
+ // Create and initialize the code
+ MutableCode code =
+ js_new<Code>(std::move(codeTier), *metadata, std::move(jumpTables));
+ if (!code || !code->initialize(linkData)) {
+ return Err(OutOfMemory());
+ }
+ *item = code;
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeSharedCode(Coder<mode>& coder,
+ CoderArg<mode, wasm::SharedCode> item,
+ const wasm::LinkData& linkData) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::Code, 192);
+ STATIC_ASSERT_ENCODING_OR_SIZING;
+ MOZ_TRY((CodeRefPtr<mode, const Metadata, &CodeMetadata>(
+ coder, &(*item)->metadata_)));
+ MOZ_TRY(CodeCodeTier(coder, &(*item)->codeTier(Tier::Serialized), linkData));
+ return Ok();
+}
+
+// WasmModule.h
+
+CoderResult CodeModule(Coder<MODE_DECODE>& coder, MutableModule* item) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::Module, 256);
+ JS::BuildIdCharVector currentBuildId;
+ if (!GetOptimizedEncodingBuildId(&currentBuildId)) {
+ return Err(OutOfMemory());
+ }
+ JS::BuildIdCharVector deserializedBuildId;
+ MOZ_TRY(CodePodVector(coder, &deserializedBuildId));
+
+ MOZ_RELEASE_ASSERT(EqualContainers(currentBuildId, deserializedBuildId));
+
+ LinkData linkData(Tier::Serialized);
+ MOZ_TRY(Magic(coder, Marker::LinkData));
+ MOZ_TRY(CodeLinkData(coder, &linkData));
+
+ ImportVector imports;
+ MOZ_TRY(Magic(coder, Marker::Imports));
+ MOZ_TRY((CodeVector<MODE_DECODE, Import, &CodeImport<MODE_DECODE>>(
+ coder, &imports)));
+
+ ExportVector exports;
+ MOZ_TRY(Magic(coder, Marker::Exports));
+ MOZ_TRY((CodeVector<MODE_DECODE, Export, &CodeExport<MODE_DECODE>>(
+ coder, &exports)));
+
+ DataSegmentVector dataSegments;
+ MOZ_TRY(Magic(coder, Marker::DataSegments));
+ MOZ_TRY((CodeVector<MODE_DECODE, SharedDataSegment,
+ &CodeRefPtr<MODE_DECODE, const DataSegment,
+ &CodeDataSegment<MODE_DECODE>>>(
+ coder, &dataSegments)));
+
+ ElemSegmentVector elemSegments;
+ MOZ_TRY(Magic(coder, Marker::ElemSegments));
+ MOZ_TRY((CodeVector<MODE_DECODE, SharedElemSegment,
+ &CodeRefPtr<MODE_DECODE, const ElemSegment,
+ &CodeElemSegment<MODE_DECODE>>>(
+ coder, &elemSegments)));
+
+ CustomSectionVector customSections;
+ MOZ_TRY(Magic(coder, Marker::CustomSections));
+ MOZ_TRY(
+ (CodeVector<MODE_DECODE, CustomSection, &CodeCustomSection<MODE_DECODE>>(
+ coder, &customSections)));
+
+ SharedCode code;
+ MOZ_TRY(Magic(coder, Marker::Code));
+ MOZ_TRY(CodeSharedCode(coder, &code, linkData, customSections));
+
+ *item = js_new<Module>(*code, std::move(imports), std::move(exports),
+ std::move(dataSegments), std::move(elemSegments),
+ std::move(customSections), nullptr,
+ /* loggingDeserialized = */ true);
+ return Ok();
+}
+
+template <CoderMode mode>
+CoderResult CodeModule(Coder<mode>& coder, CoderArg<mode, Module> item,
+ const wasm::LinkData& linkData) {
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::Module, 256);
+ STATIC_ASSERT_ENCODING_OR_SIZING;
+ MOZ_RELEASE_ASSERT(!item->metadata().debugEnabled);
+ MOZ_RELEASE_ASSERT(item->code_->hasTier(Tier::Serialized));
+
+ JS::BuildIdCharVector currentBuildId;
+ if (!GetOptimizedEncodingBuildId(&currentBuildId)) {
+ return Err(OutOfMemory());
+ }
+ MOZ_TRY(CodePodVector(coder, &currentBuildId));
+ MOZ_TRY(Magic(coder, Marker::LinkData));
+ MOZ_TRY(CodeLinkData(coder, &linkData));
+ MOZ_TRY(Magic(coder, Marker::Imports));
+ MOZ_TRY(
+ (CodeVector<mode, Import, &CodeImport<mode>>(coder, &item->imports_)));
+ MOZ_TRY(Magic(coder, Marker::Exports));
+ MOZ_TRY(
+ (CodeVector<mode, Export, &CodeExport<mode>>(coder, &item->exports_)));
+ MOZ_TRY(Magic(coder, Marker::DataSegments));
+ MOZ_TRY(
+ (CodeVector<mode, SharedDataSegment,
+ &CodeRefPtr<mode, const DataSegment, CodeDataSegment<mode>>>(
+ coder, &item->dataSegments_)));
+ MOZ_TRY(Magic(coder, Marker::ElemSegments));
+ MOZ_TRY(
+ (CodeVector<mode, SharedElemSegment,
+ &CodeRefPtr<mode, const ElemSegment, CodeElemSegment<mode>>>(
+ coder, &item->elemSegments_)));
+ MOZ_TRY(Magic(coder, Marker::CustomSections));
+ MOZ_TRY((CodeVector<mode, CustomSection, &CodeCustomSection<mode>>(
+ coder, &item->customSections_)));
+ MOZ_TRY(Magic(coder, Marker::Code));
+ MOZ_TRY(CodeSharedCode(coder, &item->code_, linkData));
+ return Ok();
+}
+
+} // namespace wasm
+} // namespace js
+
+static bool GetSerializedSize(const Module& module, const LinkData& linkData,
+ size_t* size) {
+ Coder<MODE_SIZE> coder(module.metadata().types.get());
+ auto result = CodeModule(coder, &module, linkData);
+ if (result.isErr()) {
+ return false;
+ }
+ *size = coder.size_.value();
+ return true;
+}
+
+bool Module::serialize(const LinkData& linkData, Bytes* bytes) const {
+ MOZ_RELEASE_ASSERT(!metadata().debugEnabled);
+ MOZ_RELEASE_ASSERT(code_->hasTier(Tier::Serialized));
+
+ size_t serializedSize;
+ if (!GetSerializedSize(*this, linkData, &serializedSize)) {
+ // An error is an overflow, return false
+ return false;
+ }
+
+ // Try to allocate the destination buffer
+ if (!bytes->resizeUninitialized(serializedSize)) {
+ return false;
+ }
+
+ Coder<MODE_ENCODE> coder(metadata().types.get(), bytes->begin(),
+ serializedSize);
+ CoderResult result = CodeModule(coder, this, linkData);
+ if (result.isErr()) {
+ // An error is an OOM, return false
+ return false;
+ }
+ // Every byte is accounted for
+ MOZ_RELEASE_ASSERT(coder.buffer_ == coder.end_);
+ return true;
+}
+
+/* static */
+MutableModule Module::deserialize(const uint8_t* begin, size_t size) {
+ Coder<MODE_DECODE> coder(begin, size);
+ MutableModule module;
+ CoderResult result = CodeModule(coder, &module);
+ if (result.isErr()) {
+ // An error is an OOM, return nullptr
+ return nullptr;
+ }
+ // Every byte is accounted for
+ MOZ_RELEASE_ASSERT(coder.buffer_ == coder.end_);
+ return module;
+}
+
+void Module::initGCMallocBytesExcludingCode() {
+ // The size doesn't have to be exact so use the serialization framework to
+ // calculate a value. We consume all errors, as they can only be overflow and
+ // can be ignored until the end.
+ constexpr CoderMode MODE = MODE_SIZE;
+ Coder<MODE> coder(metadata().types.get());
+ (void)CodeVector<MODE, Import, &CodeImport<MODE>>(coder, &imports_);
+ (void)CodeVector<MODE, Export, &CodeExport<MODE>>(coder, &exports_);
+ (void)CodeVector<MODE, SharedDataSegment,
+ &CodeRefPtr<MODE, const DataSegment, CodeDataSegment<MODE>>>(
+ coder, &dataSegments_);
+ (void)CodeVector<MODE, SharedElemSegment,
+ &CodeRefPtr<MODE, const ElemSegment, CodeElemSegment<MODE>>>(
+ coder, &elemSegments_);
+ (void)CodeVector<MODE, CustomSection, &CodeCustomSection<MODE>>(
+ coder, &customSections_);
+
+ // Overflow really shouldn't be possible here, but handle it anyways.
+ size_t serializedSize = coder.size_.isValid() ? coder.size_.value() : 0;
+ gcMallocBytesExcludingCode_ = sizeof(*this) + serializedSize;
+}
diff --git a/js/src/wasm/WasmSerialize.h b/js/src/wasm/WasmSerialize.h
new file mode 100644
index 0000000000..918e50b42d
--- /dev/null
+++ b/js/src/wasm/WasmSerialize.h
@@ -0,0 +1,296 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2022 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_serialize_h
+#define wasm_serialize_h
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/MacroForEach.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/Result.h"
+
+#include <cstdint>
+#include <cstring>
+#include <type_traits>
+
+namespace js {
+namespace wasm {
+
+class TypeContext;
+
+// [SMDOC] "Module serialization"
+//
+// A wasm::Module may be serialized to a binary format that allows for quick
+// reloads of a previous compiled wasm binary.
+//
+// The binary format is optimized for encoding/decoding speed, not size. There
+// is no formal specification, and no backwards/forwards compatibility
+// guarantees. The prelude of the encoding contains a 'build ID' which must be
+// used when reading from a cache entry to determine if it is valid.
+//
+// Module serialization and deserialization are performed using templated
+// functions that allow for (imperfect) abstraction over whether we are decoding
+// or encoding the module. It can be viewed as a specialization of the visitor
+// pattern.
+//
+// Each module data structure is visited by a function parameterized by the
+// "mode", which may be either:
+// 1. MODE_SIZE - We are computing the final encoding size, before encoding it
+// 2. MODE_ENCODE - We are actually encoding the module to bytes
+// 3. MODE_DECODE - We are decoding the module from bytes
+//
+// These functions are called "coding" functions, as they are generic to whether
+// we are "encoding" or "decoding". The verb tense "code" is used for the
+// prefix.
+//
+// Each coding function takes the item being visited, along with a "Coder"
+// which contains the state needed for each mode. This is either a buffer span
+// or an accumulated length. The coding function either manipulates the Coder
+// directly or delegates to its field's coding functions.
+//
+// Leaf data types are usually just copied directly to and from memory using a
+// generic "CodePod" function. See the "cacheable POD" documentation in this
+// file for more information.
+//
+// Non-leaf data types need an explicit coding function. This function can
+// usually be completely generic to decoding/encoding, and delegate to the
+// coding functions for each field. Separate decoding/encoding functions may
+// be needed when decoding requires initialization logic, such as constructors.
+// In this case, it is critical that both functions agree on the fields to be
+// coded, and the order they are coded in.
+//
+// Coding functions are defined as free functions in "WasmSerialize.cpp". When
+// they require access to protected state in a type, they may use the
+// WASM_DECLARE_FRIEND_SERIALIZE macro.
+
+// Signal an out of memory condition
+struct OutOfMemory {};
+
+// The result of serialization, either OK or OOM
+using CoderResult = mozilla::Result<mozilla::Ok, OutOfMemory>;
+
+// CoderMode parameterizes the coding functions
+enum CoderMode {
+ // We are computing the final size of the encoded buffer. This is a discrete
+ // pass that runs before encoding.
+ MODE_SIZE,
+ // We are encoding the module to bytes.
+ MODE_ENCODE,
+ // We are decoding the module from bytes.
+ MODE_DECODE,
+};
+
+// Coding functions take a different argument depending on which CoderMode
+// they are invoked with:
+// * MODE_SIZE - const T*
+// * MODE_ENCODE - const T*
+// * MODE_DECODE - T*
+//
+// The CoderArg<mode, T> type alias is used to acquire the proper type for
+// coding function arguments.
+template <CoderMode mode, typename V>
+struct CoderArgT;
+
+template <typename V>
+struct CoderArgT<MODE_SIZE, V> {
+ using T = const V*;
+};
+
+template <typename V>
+struct CoderArgT<MODE_DECODE, V> {
+ using T = V*;
+};
+
+template <typename V>
+struct CoderArgT<MODE_ENCODE, V> {
+ using T = const V*;
+};
+
+template <CoderMode mode, typename T>
+using CoderArg = typename CoderArgT<mode, T>::T;
+
+// Coder is the state provided to all coding functions during module traversal.
+template <CoderMode mode>
+struct Coder;
+
+// A Coder<MODE_SIZE> computes the total encoded size of a module
+template <>
+struct Coder<MODE_SIZE> {
+ explicit Coder(const TypeContext* types) : types_(types), size_(0) {}
+
+ // The types of the module that we're going to encode. This is required in
+ // order to encode the original index of types that we encounter.
+ const TypeContext* types_;
+
+ // The current size of buffer required to serialize this module.
+ mozilla::CheckedInt<size_t> size_;
+
+ // This function shares a signature with MODE_ENCODE to allow functions to be
+ // generic across MODE_SIZE/MODE_ENCODE, even though the src pointer is not
+ // needed for MODE_SIZE.
+ CoderResult writeBytes(const void* unusedSrc, size_t length);
+};
+
+// A Coder<MODE_ENCODE> holds the buffer being written to
+template <>
+struct Coder<MODE_ENCODE> {
+ Coder(const TypeContext* types, uint8_t* start, size_t length)
+ : types_(types), buffer_(start), end_(start + length) {}
+
+ // The types of the module that we're encoding. This is required in
+ // order to encode the original index of types that we encounter.
+ const TypeContext* types_;
+
+ // The current position in the buffer we're writing to.
+ uint8_t* buffer_;
+ // The end position in the buffer we're writing to.
+ const uint8_t* end_;
+
+ CoderResult writeBytes(const void* src, size_t length);
+};
+
+// A Coder<MODE_DECODE> holds the buffer being read from
+template <>
+struct Coder<MODE_DECODE> {
+ Coder(const uint8_t* start, size_t length)
+ : types_(nullptr), buffer_(start), end_(start + length) {}
+
+ // The types of the module that we're decoding. This is null until the types
+ // of this module are decoded.
+ const TypeContext* types_;
+
+ // The current position in the buffer we're reading from.
+ const uint8_t* buffer_;
+ // The end position in the buffer we're reading from.
+ const uint8_t* end_;
+
+ CoderResult readBytes(void* dest, size_t length);
+};
+
+// Macros to help types declare friendship with a coding function
+
+#define WASM_DECLARE_FRIEND_SERIALIZE(TYPE) \
+ template <CoderMode mode> \
+ friend CoderResult Code##TYPE(Coder<mode>&, CoderArg<mode, TYPE>);
+
+#define WASM_DECLARE_FRIEND_SERIALIZE_ARGS(TYPE, ARGS...) \
+ template <CoderMode mode> \
+ friend CoderResult Code##TYPE(Coder<mode>&, CoderArg<mode, TYPE>, ARGS);
+
+// [SMDOC] "Cacheable POD"
+//
+// Module serialization relies on copying simple structs to and from the
+// cache format. We need a way to ensure that we only do this on types that are
+// "safe". We call this "cacheable POD". Note: this is not the same thing as
+// "POD" as that may contain pointers, which are not cacheable.
+//
+// We define cacheable POD (C-POD) recursively upon types:
+// 1. any integer type is C-POD
+// 2. any floating point type is C-POD
+// 3. any enum type is C-POD
+// 4. any mozilla::Maybe<T> with T: C-POD is C-POD
+// 5. any T[N] with T: C-POD is C-POD
+// 6. any union where all fields are C-POD is C-POD
+// 7. any struct with the following conditions must is C-POD
+// * every field's type must be C-POD
+// * the parent type, if it exists, must also be C-POD
+// * there must be no virtual methods
+//
+// There are no combination of C++ type traits at this time that can
+// automatically meet these criteria, so we are rolling our own system.
+//
+// We define a "IsCacheablePod" type trait, with builtin rules for cases (1-5).
+// The complex cases (6-7) are handled using manual declaration and checking
+// macros that must be used upon structs and unions that are considered
+// cacheable POD.
+//
+// See the following macros for details:
+// - WASM_DECLARE_CACHEABLE_POD
+// - WASM_CHECK_CACHEABLE_POD[_WITH_PARENT]
+
+// The IsCacheablePod type trait primary template. Contains the rules for
+// (cases 1-3).
+template <typename T>
+struct IsCacheablePod
+ : public std::conditional_t<std::is_arithmetic_v<T> || std::is_enum_v<T>,
+ std::true_type, std::false_type> {};
+
+// Partial specialization for (case 4).
+template <typename T>
+struct IsCacheablePod<mozilla::Maybe<T>>
+ : public std::conditional_t<IsCacheablePod<T>::value, std::true_type,
+ std::false_type> {};
+
+// Partial specialization for (case 5).
+template <typename T, size_t N>
+struct IsCacheablePod<T[N]>
+ : public std::conditional_t<IsCacheablePod<T>::value, std::true_type,
+ std::false_type> {};
+
+template <class T>
+inline constexpr bool is_cacheable_pod = IsCacheablePod<T>::value;
+
+// Checks if derrived class will not use the structure alignment for its
+// next field. It used when pod is a base class.
+#define WASM_CHECK_CACHEABLE_POD_PADDING(Type) \
+ class __CHECK_PADING_##Type : public Type { \
+ public: \
+ char c; \
+ }; \
+ static_assert(sizeof(__CHECK_PADING_##Type) > sizeof(Type), \
+ #Type " will overlap with next field if inherited");
+
+// Declare the type 'Type' to be cacheable POD. The definition of the type must
+// contain a WASM_CHECK_CACHEABLE_POD[_WITH_PARENT] to ensure all fields of the
+// type are cacheable POD.
+#define WASM_DECLARE_CACHEABLE_POD(Type) \
+ static_assert(!std::is_polymorphic_v<Type>, \
+ #Type "must not have virtual methods"); \
+ } /* namespace wasm */ \
+ } /* namespace js */ \
+ template <> \
+ struct js::wasm::IsCacheablePod<js::wasm::Type> : public std::true_type {}; \
+ namespace js { \
+ namespace wasm {
+
+// Helper: check each field's type to be cacheable POD
+#define WASM_CHECK_CACHEABLE_POD_FIELD_(Field) \
+ static_assert(js::wasm::IsCacheablePod<decltype(Field)>::value, \
+ #Field " must be cacheable pod");
+
+// Check every field in a type definition to ensure they are cacheable POD.
+#define WASM_CHECK_CACHEABLE_POD(Fields...) \
+ MOZ_FOR_EACH(WASM_CHECK_CACHEABLE_POD_FIELD_, (), (Fields))
+
+// Check every field in a type definition to ensure they are cacheable POD, and
+// check that the parent class is also cacheable POD.
+#define WASM_CHECK_CACHEABLE_POD_WITH_PARENT(Parent, Fields...) \
+ static_assert(js::wasm::IsCacheablePod<Parent>::value, \
+ #Parent " must be cacheable pod"); \
+ MOZ_FOR_EACH(WASM_CHECK_CACHEABLE_POD_FIELD_, (), (Fields))
+
+// Allow fields that are not cacheable POD but are believed to be safe for
+// serialization due to some justification.
+#define WASM_ALLOW_NON_CACHEABLE_POD_FIELD(Field, Reason) \
+ static_assert(!js::wasm::IsCacheablePod<decltype(Field)>::value, \
+ #Field " is not cacheable due to " Reason);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_serialize_h
diff --git a/js/src/wasm/WasmShareable.h b/js/src/wasm/WasmShareable.h
new file mode 100644
index 0000000000..a2df641c3f
--- /dev/null
+++ b/js/src/wasm/WasmShareable.h
@@ -0,0 +1,80 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_shareable_h
+#define wasm_shareable_h
+
+#include "mozilla/RefPtr.h"
+#include "js/RefCounted.h"
+#include "wasm/WasmTypeDecls.h"
+
+namespace js {
+namespace wasm {
+
+using mozilla::MallocSizeOf;
+
+// This reusable base class factors out the logic for a resource that is shared
+// by multiple instances/modules but should only be counted once when computing
+// about:memory stats.
+
+template <class T>
+using SeenSet = HashSet<const T*, DefaultHasher<const T*>, SystemAllocPolicy>;
+
+template <class T>
+struct ShareableBase : AtomicRefCounted<T> {
+ using SeenSet = wasm::SeenSet<T>;
+
+ size_t sizeOfIncludingThisIfNotSeen(MallocSizeOf mallocSizeOf,
+ SeenSet* seen) const {
+ const T* self = static_cast<const T*>(this);
+ typename SeenSet::AddPtr p = seen->lookupForAdd(self);
+ if (p) {
+ return 0;
+ }
+ bool ok = seen->add(p, self);
+ (void)ok; // oh well
+ return mallocSizeOf(self) + self->sizeOfExcludingThis(mallocSizeOf);
+ }
+};
+
+// ShareableBytes is a reference-counted Vector of bytes.
+
+struct ShareableBytes : ShareableBase<ShareableBytes> {
+ // Vector is 'final', so instead make Vector a member and add boilerplate.
+ Bytes bytes;
+
+ ShareableBytes() = default;
+ explicit ShareableBytes(Bytes&& bytes) : bytes(std::move(bytes)) {}
+ size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return bytes.sizeOfExcludingThis(mallocSizeOf);
+ }
+ const uint8_t* begin() const { return bytes.begin(); }
+ const uint8_t* end() const { return bytes.end(); }
+ size_t length() const { return bytes.length(); }
+ bool append(const uint8_t* start, size_t len) {
+ return bytes.append(start, len);
+ }
+};
+
+using MutableBytes = RefPtr<ShareableBytes>;
+using SharedBytes = RefPtr<const ShareableBytes>;
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_shareable_h
diff --git a/js/src/wasm/WasmSignalHandlers.cpp b/js/src/wasm/WasmSignalHandlers.cpp
new file mode 100644
index 0000000000..86e11df7f0
--- /dev/null
+++ b/js/src/wasm/WasmSignalHandlers.cpp
@@ -0,0 +1,1084 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2014 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmSignalHandlers.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/ThreadLocal.h"
+
+#include "threading/Thread.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/Realm.h"
+#include "vm/Runtime.h"
+#include "wasm/WasmCode.h"
+#include "wasm/WasmInstance.h"
+
+#if defined(XP_WIN)
+# include <winternl.h> // must include before util/WindowsWrapper.h's `#undef`s
+# include "util/WindowsWrapper.h"
+#elif defined(XP_DARWIN)
+# include <mach/exc.h>
+# include <mach/mach.h>
+#else
+# include <signal.h>
+#endif
+
+using namespace js;
+using namespace js::wasm;
+
+using mozilla::DebugOnly;
+
+#if !defined(JS_CODEGEN_NONE)
+
+// =============================================================================
+// This following pile of macros and includes defines the ToRegisterState() and
+// the ContextTo{PC,FP,SP,LR}() functions from the (highly) platform-specific
+// CONTEXT struct which is provided to the signal handler.
+// =============================================================================
+
+# if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
+# include <sys/ucontext.h> // for ucontext_t, mcontext_t
+# endif
+
+# if defined(__x86_64__)
+# if defined(__DragonFly__)
+# include <machine/npx.h> // for union savefpu
+# elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
+ defined(__NetBSD__) || defined(__OpenBSD__)
+# include <machine/fpu.h> // for struct savefpu/fxsave64
+# endif
+# endif
+
+# if defined(XP_WIN)
+# define EIP_sig(p) ((p)->Eip)
+# define EBP_sig(p) ((p)->Ebp)
+# define ESP_sig(p) ((p)->Esp)
+# define RIP_sig(p) ((p)->Rip)
+# define RSP_sig(p) ((p)->Rsp)
+# define RBP_sig(p) ((p)->Rbp)
+# define R11_sig(p) ((p)->R11)
+# define R13_sig(p) ((p)->R13)
+# define R14_sig(p) ((p)->R14)
+# define R15_sig(p) ((p)->R15)
+# define EPC_sig(p) ((p)->Pc)
+# define RFP_sig(p) ((p)->Fp)
+# define R31_sig(p) ((p)->Sp)
+# define RLR_sig(p) ((p)->Lr)
+# elif defined(__OpenBSD__)
+# define EIP_sig(p) ((p)->sc_eip)
+# define EBP_sig(p) ((p)->sc_ebp)
+# define ESP_sig(p) ((p)->sc_esp)
+# define RIP_sig(p) ((p)->sc_rip)
+# define RSP_sig(p) ((p)->sc_rsp)
+# define RBP_sig(p) ((p)->sc_rbp)
+# define R11_sig(p) ((p)->sc_r11)
+# if defined(__arm__)
+# define R13_sig(p) ((p)->sc_usr_sp)
+# define R14_sig(p) ((p)->sc_usr_lr)
+# define R15_sig(p) ((p)->sc_pc)
+# else
+# define R13_sig(p) ((p)->sc_r13)
+# define R14_sig(p) ((p)->sc_r14)
+# define R15_sig(p) ((p)->sc_r15)
+# endif
+# if defined(__aarch64__)
+# define EPC_sig(p) ((p)->sc_elr)
+# define RFP_sig(p) ((p)->sc_x[29])
+# define RLR_sig(p) ((p)->sc_lr)
+# define R31_sig(p) ((p)->sc_sp)
+# endif
+# if defined(__mips__)
+# define EPC_sig(p) ((p)->sc_pc)
+# define RFP_sig(p) ((p)->sc_regs[30])
+# endif
+# if defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || \
+ defined(__PPC64LE__)
+# define R01_sig(p) ((p)->sc_frame.fixreg[1])
+# define R32_sig(p) ((p)->sc_frame.srr0)
+# endif
+# elif defined(__linux__) || defined(__sun)
+# if defined(__linux__)
+# define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_EIP])
+# define EBP_sig(p) ((p)->uc_mcontext.gregs[REG_EBP])
+# define ESP_sig(p) ((p)->uc_mcontext.gregs[REG_ESP])
+# else
+# define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_PC])
+# define EBP_sig(p) ((p)->uc_mcontext.gregs[REG_EBP])
+# define ESP_sig(p) ((p)->uc_mcontext.gregs[REG_ESP])
+# endif
+# define RIP_sig(p) ((p)->uc_mcontext.gregs[REG_RIP])
+# define RSP_sig(p) ((p)->uc_mcontext.gregs[REG_RSP])
+# define RBP_sig(p) ((p)->uc_mcontext.gregs[REG_RBP])
+# if defined(__linux__) && defined(__arm__)
+# define R11_sig(p) ((p)->uc_mcontext.arm_fp)
+# define R13_sig(p) ((p)->uc_mcontext.arm_sp)
+# define R14_sig(p) ((p)->uc_mcontext.arm_lr)
+# define R15_sig(p) ((p)->uc_mcontext.arm_pc)
+# else
+# define R11_sig(p) ((p)->uc_mcontext.gregs[REG_R11])
+# define R13_sig(p) ((p)->uc_mcontext.gregs[REG_R13])
+# define R14_sig(p) ((p)->uc_mcontext.gregs[REG_R14])
+# define R15_sig(p) ((p)->uc_mcontext.gregs[REG_R15])
+# endif
+# if defined(__linux__) && defined(__aarch64__)
+# define EPC_sig(p) ((p)->uc_mcontext.pc)
+# define RFP_sig(p) ((p)->uc_mcontext.regs[29])
+# define RLR_sig(p) ((p)->uc_mcontext.regs[30])
+# define R31_sig(p) ((p)->uc_mcontext.sp)
+# endif
+# if defined(__linux__) && defined(__mips__)
+# define EPC_sig(p) ((p)->uc_mcontext.pc)
+# define RFP_sig(p) ((p)->uc_mcontext.gregs[30])
+# define RSP_sig(p) ((p)->uc_mcontext.gregs[29])
+# define R31_sig(p) ((p)->uc_mcontext.gregs[31])
+# endif
+# if defined(__linux__) && (defined(__sparc__) && defined(__arch64__))
+# define PC_sig(p) ((p)->uc_mcontext.mc_gregs[MC_PC])
+# define FP_sig(p) ((p)->uc_mcontext.mc_fp)
+# define SP_sig(p) ((p)->uc_mcontext.mc_i7)
+# endif
+# if defined(__linux__) && (defined(__ppc64__) || defined(__PPC64__) || \
+ defined(__ppc64le__) || defined(__PPC64LE__))
+# define R01_sig(p) ((p)->uc_mcontext.gp_regs[1])
+# define R32_sig(p) ((p)->uc_mcontext.gp_regs[32])
+# endif
+# if defined(__linux__) && defined(__loongarch__)
+# define EPC_sig(p) ((p)->uc_mcontext.__pc)
+# define RRA_sig(p) ((p)->uc_mcontext.__gregs[1])
+# define R03_sig(p) ((p)->uc_mcontext.__gregs[3])
+# define RFP_sig(p) ((p)->uc_mcontext.__gregs[22])
+# endif
+# if defined(__linux__) && defined(__riscv)
+# define RPC_sig(p) ((p)->uc_mcontext.__gregs[REG_PC])
+# define RRA_sig(p) ((p)->uc_mcontext.__gregs[REG_RA])
+# define RFP_sig(p) ((p)->uc_mcontext.__gregs[REG_S0])
+# define R02_sig(p) ((p)->uc_mcontext.__gregs[REG_SP])
+# endif
+# if defined(__sun__) && defined(__sparc__)
+# define PC_sig(p) ((p)->uc_mcontext.gregs[REG_PC])
+# define FP_sig(p) ((p)->uc_mcontext.gregs[REG_FPRS])
+# define SP_sig(p) ((p)->uc_mcontext.gregs[REG_SP])
+# endif
+# elif defined(__NetBSD__)
+# define EIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EIP])
+# define EBP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EBP])
+# define ESP_sig(p) ((p)->uc_mcontext.__gregs[_REG_ESP])
+# define RIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RIP])
+# define RSP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RSP])
+# define RBP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RBP])
+# define R11_sig(p) ((p)->uc_mcontext.__gregs[_REG_R11])
+# define R13_sig(p) ((p)->uc_mcontext.__gregs[_REG_R13])
+# define R14_sig(p) ((p)->uc_mcontext.__gregs[_REG_R14])
+# define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15])
+# if defined(__aarch64__)
+# define EPC_sig(p) ((p)->uc_mcontext.__gregs[_REG_PC])
+# define RFP_sig(p) ((p)->uc_mcontext.__gregs[_REG_X29])
+# define RLR_sig(p) ((p)->uc_mcontext.__gregs[_REG_X30])
+# define R31_sig(p) ((p)->uc_mcontext.__gregs[_REG_SP])
+# endif
+# if defined(__mips__)
+# define EPC_sig(p) ((p)->uc_mcontext.__gregs[_REG_EPC])
+# define RFP_sig(p) ((p)->uc_mcontext.__gregs[_REG_S8])
+# endif
+# if defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || \
+ defined(__PPC64LE__)
+# define R01_sig(p) ((p)->uc_mcontext.__gregs[_REG_R1])
+# define R32_sig(p) ((p)->uc_mcontext.__gregs[_REG_PC])
+# endif
+# elif defined(__DragonFly__) || defined(__FreeBSD__) || \
+ defined(__FreeBSD_kernel__)
+# define EIP_sig(p) ((p)->uc_mcontext.mc_eip)
+# define EBP_sig(p) ((p)->uc_mcontext.mc_ebp)
+# define ESP_sig(p) ((p)->uc_mcontext.mc_esp)
+# define RIP_sig(p) ((p)->uc_mcontext.mc_rip)
+# define RSP_sig(p) ((p)->uc_mcontext.mc_rsp)
+# define RBP_sig(p) ((p)->uc_mcontext.mc_rbp)
+# if defined(__FreeBSD__) && defined(__arm__)
+# define R11_sig(p) ((p)->uc_mcontext.__gregs[_REG_R11])
+# define R13_sig(p) ((p)->uc_mcontext.__gregs[_REG_R13])
+# define R14_sig(p) ((p)->uc_mcontext.__gregs[_REG_R14])
+# define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15])
+# else
+# define R11_sig(p) ((p)->uc_mcontext.mc_r11)
+# define R13_sig(p) ((p)->uc_mcontext.mc_r13)
+# define R14_sig(p) ((p)->uc_mcontext.mc_r14)
+# define R15_sig(p) ((p)->uc_mcontext.mc_r15)
+# endif
+# if defined(__FreeBSD__) && defined(__aarch64__)
+# define EPC_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_elr)
+# define RFP_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_x[29])
+# define RLR_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_lr)
+# define R31_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_sp)
+# endif
+# if defined(__FreeBSD__) && defined(__mips__)
+# define EPC_sig(p) ((p)->uc_mcontext.mc_pc)
+# define RFP_sig(p) ((p)->uc_mcontext.mc_regs[30])
+# endif
+# if defined(__FreeBSD__) && (defined(__ppc64__) || defined(__PPC64__) || \
+ defined(__ppc64le__) || defined(__PPC64LE__))
+# define R01_sig(p) ((p)->uc_mcontext.mc_gpr[1])
+# define R32_sig(p) ((p)->uc_mcontext.mc_srr0)
+# endif
+# elif defined(XP_DARWIN)
+# define EIP_sig(p) ((p)->thread.uts.ts32.__eip)
+# define EBP_sig(p) ((p)->thread.uts.ts32.__ebp)
+# define ESP_sig(p) ((p)->thread.uts.ts32.__esp)
+# define RIP_sig(p) ((p)->thread.__rip)
+# define RBP_sig(p) ((p)->thread.__rbp)
+# define RSP_sig(p) ((p)->thread.__rsp)
+# define R11_sig(p) ((p)->thread.__r[11])
+# define R13_sig(p) ((p)->thread.__sp)
+# define R14_sig(p) ((p)->thread.__lr)
+# define R15_sig(p) ((p)->thread.__pc)
+# define EPC_sig(p) ((p)->thread.__pc)
+# define RFP_sig(p) ((p)->thread.__fp)
+# define R31_sig(p) ((p)->thread.__sp)
+# define RLR_sig(p) ((p)->thread.__lr)
+# else
+# error \
+ "Don't know how to read/write to the thread state via the mcontext_t."
+# endif
+
+# if defined(ANDROID)
+// Not all versions of the Android NDK define ucontext_t or mcontext_t.
+// Detect this and provide custom but compatible definitions. Note that these
+// follow the GLibc naming convention to access register values from
+// mcontext_t.
+//
+// See: https://chromiumcodereview.appspot.com/10829122/
+// See: http://code.google.com/p/android/issues/detail?id=34784
+# if !defined(__BIONIC_HAVE_UCONTEXT_T)
+# if defined(__arm__)
+
+// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
+// Old versions of the C library <signal.h> didn't define the type.
+# if !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+# include <asm/sigcontext.h>
+# endif
+
+typedef struct sigcontext mcontext_t;
+
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used so don't define them here.
+} ucontext_t;
+
+# elif defined(__mips__)
+
+typedef struct {
+ uint32_t regmask;
+ uint32_t status;
+ uint64_t pc;
+ uint64_t gregs[32];
+ uint64_t fpregs[32];
+ uint32_t acx;
+ uint32_t fpc_csr;
+ uint32_t fpc_eir;
+ uint32_t used_math;
+ uint32_t dsp;
+ uint64_t mdhi;
+ uint64_t mdlo;
+ uint32_t hi1;
+ uint32_t lo1;
+ uint32_t hi2;
+ uint32_t lo2;
+ uint32_t hi3;
+ uint32_t lo3;
+} mcontext_t;
+
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used so don't define them here.
+} ucontext_t;
+
+# elif defined(__loongarch64)
+
+typedef struct {
+ uint64_t pc;
+ uint64_t gregs[32];
+ uint64_t fpregs[32];
+ uint32_t fpc_csr;
+} mcontext_t;
+
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used so don't define them here.
+} ucontext_t;
+
+# elif defined(__i386__)
+// x86 version for Android.
+typedef struct {
+ uint32_t gregs[19];
+ void* fpregs;
+ uint32_t oldmask;
+ uint32_t cr2;
+} mcontext_t;
+
+typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+enum { REG_EIP = 14 };
+# endif // defined(__i386__)
+# endif // !defined(__BIONIC_HAVE_UCONTEXT_T)
+# endif // defined(ANDROID)
+
+# if defined(XP_DARWIN)
+# if defined(__x86_64__)
+struct macos_x64_context {
+ x86_thread_state64_t thread;
+ x86_float_state64_t float_;
+};
+# define CONTEXT macos_x64_context
+# elif defined(__i386__)
+struct macos_x86_context {
+ x86_thread_state_t thread;
+ x86_float_state_t float_;
+};
+# define CONTEXT macos_x86_context
+# elif defined(__arm__)
+struct macos_arm_context {
+ arm_thread_state_t thread;
+ arm_neon_state_t float_;
+};
+# define CONTEXT macos_arm_context
+# elif defined(__aarch64__)
+struct macos_aarch64_context {
+ arm_thread_state64_t thread;
+ arm_neon_state64_t float_;
+};
+# define CONTEXT macos_aarch64_context
+# else
+# error Unsupported architecture
+# endif
+# elif !defined(XP_WIN)
+# define CONTEXT ucontext_t
+# endif
+
+# if defined(_M_X64) || defined(__x86_64__)
+# define PC_sig(p) RIP_sig(p)
+# define FP_sig(p) RBP_sig(p)
+# define SP_sig(p) RSP_sig(p)
+# elif defined(_M_IX86) || defined(__i386__)
+# define PC_sig(p) EIP_sig(p)
+# define FP_sig(p) EBP_sig(p)
+# define SP_sig(p) ESP_sig(p)
+# elif defined(__arm__)
+# define FP_sig(p) R11_sig(p)
+# define SP_sig(p) R13_sig(p)
+# define LR_sig(p) R14_sig(p)
+# define PC_sig(p) R15_sig(p)
+# elif defined(_M_ARM64) || defined(__aarch64__)
+# define PC_sig(p) EPC_sig(p)
+# define FP_sig(p) RFP_sig(p)
+# define SP_sig(p) R31_sig(p)
+# define LR_sig(p) RLR_sig(p)
+# elif defined(__mips__)
+# define PC_sig(p) EPC_sig(p)
+# define FP_sig(p) RFP_sig(p)
+# define SP_sig(p) RSP_sig(p)
+# define LR_sig(p) R31_sig(p)
+# elif defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || \
+ defined(__PPC64LE__)
+# define PC_sig(p) R32_sig(p)
+# define SP_sig(p) R01_sig(p)
+# define FP_sig(p) R01_sig(p)
+# elif defined(__loongarch__)
+# define PC_sig(p) EPC_sig(p)
+# define FP_sig(p) RFP_sig(p)
+# define SP_sig(p) R03_sig(p)
+# define LR_sig(p) RRA_sig(p)
+# elif defined(__riscv)
+# define PC_sig(p) RPC_sig(p)
+# define FP_sig(p) RFP_sig(p)
+# define SP_sig(p) R02_sig(p)
+# define LR_sig(p) RRA_sig(p)
+# endif
+
+static void SetContextPC(CONTEXT* context, uint8_t* pc) {
+# ifdef PC_sig
+ *reinterpret_cast<uint8_t**>(&PC_sig(context)) = pc;
+# else
+ MOZ_CRASH();
+# endif
+}
+
+static uint8_t* ContextToPC(CONTEXT* context) {
+# ifdef PC_sig
+ return reinterpret_cast<uint8_t*>(PC_sig(context));
+# else
+ MOZ_CRASH();
+# endif
+}
+
+static uint8_t* ContextToFP(CONTEXT* context) {
+# ifdef FP_sig
+ return reinterpret_cast<uint8_t*>(FP_sig(context));
+# else
+ MOZ_CRASH();
+# endif
+}
+
+static uint8_t* ContextToSP(CONTEXT* context) {
+# ifdef SP_sig
+ return reinterpret_cast<uint8_t*>(SP_sig(context));
+# else
+ MOZ_CRASH();
+# endif
+}
+
+# if defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ defined(__loongarch__) || defined(__riscv)
+static uint8_t* ContextToLR(CONTEXT* context) {
+# ifdef LR_sig
+ return reinterpret_cast<uint8_t*>(LR_sig(context));
+# else
+ MOZ_CRASH();
+# endif
+}
+# endif
+
+static JS::ProfilingFrameIterator::RegisterState ToRegisterState(
+ CONTEXT* context) {
+ JS::ProfilingFrameIterator::RegisterState state;
+ state.fp = ContextToFP(context);
+ state.pc = ContextToPC(context);
+ state.sp = ContextToSP(context);
+# if defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
+ defined(__loongarch__) || defined(__riscv)
+ state.lr = ContextToLR(context);
+# else
+ state.lr = (void*)UINTPTR_MAX;
+# endif
+ return state;
+}
+
+// =============================================================================
+// All signals/exceptions funnel down to this one trap-handling function which
+// tests whether the pc is in a wasm module and, if so, whether there is
+// actually a trap expected at this pc. These tests both avoid real bugs being
+// silently converted to wasm traps and provides the trapping wasm bytecode
+// offset we need to report in the error.
+//
+// Crashing inside wasm trap handling (due to a bug in trap handling or exposed
+// during trap handling) must be reported like a normal crash, not cause the
+// crash report to be lost. On Windows and non-Mach Unix, a crash during the
+// handler reenters the handler, possibly repeatedly until exhausting the stack,
+// and so we prevent recursion with the thread-local sAlreadyHandlingTrap. On
+// Mach, the wasm exception handler has its own thread and is installed only on
+// the thread-level debugging ports of JSRuntime threads, so a crash on
+// exception handler thread will not recurse; it will bubble up to the
+// process-level debugging ports (where Breakpad is installed).
+// =============================================================================
+
+static MOZ_THREAD_LOCAL(bool) sAlreadyHandlingTrap;
+
+struct AutoHandlingTrap {
+ AutoHandlingTrap() {
+ MOZ_ASSERT(!sAlreadyHandlingTrap.get());
+ sAlreadyHandlingTrap.set(true);
+ }
+
+ ~AutoHandlingTrap() {
+ MOZ_ASSERT(sAlreadyHandlingTrap.get());
+ sAlreadyHandlingTrap.set(false);
+ }
+};
+
+[[nodiscard]] static bool HandleTrap(CONTEXT* context,
+ JSContext* assertCx = nullptr) {
+ MOZ_ASSERT(sAlreadyHandlingTrap.get());
+
+ uint8_t* pc = ContextToPC(context);
+ const CodeSegment* codeSegment = LookupCodeSegment(pc);
+ if (!codeSegment || !codeSegment->isModule()) {
+ return false;
+ }
+
+ const ModuleSegment& segment = *codeSegment->asModule();
+
+ Trap trap;
+ BytecodeOffset bytecode;
+ if (!segment.code().lookupTrap(pc, &trap, &bytecode)) {
+ return false;
+ }
+
+ // We have a safe, expected wasm trap, so fp is well-defined to be a Frame*.
+ // For the first sanity check, the Trap::IndirectCallBadSig special case is
+ // due to this trap occurring in the indirect call prologue, while fp points
+ // to the caller's Frame which can be in a different Module. In any case,
+ // though, the containing JSContext is the same.
+
+ auto* frame = reinterpret_cast<Frame*>(ContextToFP(context));
+ Instance* instance = GetNearestEffectiveInstance(frame);
+ MOZ_RELEASE_ASSERT(&instance->code() == &segment.code() ||
+ trap == Trap::IndirectCallBadSig);
+
+ JSContext* cx =
+ instance->realm()->runtimeFromAnyThread()->mainContextFromAnyThread();
+ MOZ_RELEASE_ASSERT(!assertCx || cx == assertCx);
+
+ // JitActivation::startWasmTrap() stores enough register state from the
+ // point of the trap to allow stack unwinding or resumption, both of which
+ // will call finishWasmTrap().
+ jit::JitActivation* activation = cx->activation()->asJit();
+ activation->startWasmTrap(trap, bytecode.offset(), ToRegisterState(context));
+ SetContextPC(context, segment.trapCode());
+ return true;
+}
+
+// =============================================================================
+// The following platform-specific handlers funnel all signals/exceptions into
+// the shared HandleTrap() above.
+// =============================================================================
+
+# if defined(XP_WIN)
+// Obtained empirically from thread_local codegen on x86/x64/arm64.
+// Compiled in all user binaries, so should be stable over time.
+static const unsigned sThreadLocalArrayPointerIndex = 11;
+
+static LONG WINAPI WasmTrapHandler(LPEXCEPTION_POINTERS exception) {
+ // Make sure TLS is initialized before reading sAlreadyHandlingTrap.
+ if (!NtCurrentTeb()->Reserved1[sThreadLocalArrayPointerIndex]) {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ if (sAlreadyHandlingTrap.get()) {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+ AutoHandlingTrap aht;
+
+ EXCEPTION_RECORD* record = exception->ExceptionRecord;
+ if (record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION &&
+ record->ExceptionCode != EXCEPTION_ILLEGAL_INSTRUCTION) {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ JSContext* cx = TlsContext.get(); // Cold signal handling code
+ if (!HandleTrap(exception->ContextRecord, cx)) {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ return EXCEPTION_CONTINUE_EXECUTION;
+}
+
+# elif defined(XP_DARWIN)
+// On OSX we are forced to use the lower-level Mach exception mechanism instead
+// of Unix signals because breakpad uses Mach exceptions and would otherwise
+// report a crash before wasm gets a chance to handle the exception.
+
+// This definition was generated by mig (the Mach Interface Generator) for the
+// routine 'exception_raise' (exc.defs).
+# pragma pack(4)
+typedef struct {
+ mach_msg_header_t Head;
+ /* start of the kernel processed data */
+ mach_msg_body_t msgh_body;
+ mach_msg_port_descriptor_t thread;
+ mach_msg_port_descriptor_t task;
+ /* end of the kernel processed data */
+ NDR_record_t NDR;
+ exception_type_t exception;
+ mach_msg_type_number_t codeCnt;
+ int64_t code[2];
+} Request__mach_exception_raise_t;
+# pragma pack()
+
+// The full Mach message also includes a trailer.
+struct ExceptionRequest {
+ Request__mach_exception_raise_t body;
+ mach_msg_trailer_t trailer;
+};
+
+static bool HandleMachException(const ExceptionRequest& request) {
+ // Get the port of the JSContext's thread from the message.
+ mach_port_t cxThread = request.body.thread.name;
+
+ // Read out the JSRuntime thread's register state.
+ CONTEXT context;
+# if defined(__x86_64__)
+ unsigned int thread_state_count = x86_THREAD_STATE64_COUNT;
+ unsigned int float_state_count = x86_FLOAT_STATE64_COUNT;
+ int thread_state = x86_THREAD_STATE64;
+ int float_state = x86_FLOAT_STATE64;
+# elif defined(__i386__)
+ unsigned int thread_state_count = x86_THREAD_STATE_COUNT;
+ unsigned int float_state_count = x86_FLOAT_STATE_COUNT;
+ int thread_state = x86_THREAD_STATE;
+ int float_state = x86_FLOAT_STATE;
+# elif defined(__arm__)
+ unsigned int thread_state_count = ARM_THREAD_STATE_COUNT;
+ unsigned int float_state_count = ARM_NEON_STATE_COUNT;
+ int thread_state = ARM_THREAD_STATE;
+ int float_state = ARM_NEON_STATE;
+# elif defined(__aarch64__)
+ unsigned int thread_state_count = ARM_THREAD_STATE64_COUNT;
+ unsigned int float_state_count = ARM_NEON_STATE64_COUNT;
+ int thread_state = ARM_THREAD_STATE64;
+ int float_state = ARM_NEON_STATE64;
+# else
+# error Unsupported architecture
+# endif
+ kern_return_t kret;
+ kret = thread_get_state(cxThread, thread_state,
+ (thread_state_t)&context.thread, &thread_state_count);
+ if (kret != KERN_SUCCESS) {
+ return false;
+ }
+ kret = thread_get_state(cxThread, float_state,
+ (thread_state_t)&context.float_, &float_state_count);
+ if (kret != KERN_SUCCESS) {
+ return false;
+ }
+
+ if (request.body.exception != EXC_BAD_ACCESS &&
+ request.body.exception != EXC_BAD_INSTRUCTION) {
+ return false;
+ }
+
+ {
+ AutoNoteSingleThreadedRegion anstr;
+ AutoHandlingTrap aht;
+ if (!HandleTrap(&context)) {
+ return false;
+ }
+ }
+
+ // Update the thread state with the new pc and register values.
+ kret = thread_set_state(cxThread, float_state,
+ (thread_state_t)&context.float_, float_state_count);
+ if (kret != KERN_SUCCESS) {
+ return false;
+ }
+ kret = thread_set_state(cxThread, thread_state,
+ (thread_state_t)&context.thread, thread_state_count);
+ if (kret != KERN_SUCCESS) {
+ return false;
+ }
+
+ return true;
+}
+
+static mach_port_t sMachDebugPort = MACH_PORT_NULL;
+
+static void MachExceptionHandlerThread() {
+ ThisThread::SetName("JS Wasm MachExceptionHandler");
+
+ // Taken from mach_exc in /usr/include/mach/mach_exc.defs.
+ static const unsigned EXCEPTION_MSG_ID = 2405;
+
+ while (true) {
+ ExceptionRequest request;
+ kern_return_t kret =
+ mach_msg(&request.body.Head, MACH_RCV_MSG, 0, sizeof(request),
+ sMachDebugPort, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+
+ // If we fail even receiving the message, we can't even send a reply!
+ // Rather than hanging the faulting thread (hanging the browser), crash.
+ if (kret != KERN_SUCCESS) {
+ fprintf(stderr, "MachExceptionHandlerThread: mach_msg failed with %d\n",
+ (int)kret);
+ MOZ_CRASH();
+ }
+
+ if (request.body.Head.msgh_id != EXCEPTION_MSG_ID) {
+ fprintf(stderr, "Unexpected msg header id %d\n",
+ (int)request.body.Head.msgh_bits);
+ MOZ_CRASH();
+ }
+
+ // Some thread just commited an EXC_BAD_ACCESS and has been suspended by
+ // the kernel. The kernel is waiting for us to reply with instructions.
+ // Our default is the "not handled" reply (by setting the RetCode field
+ // of the reply to KERN_FAILURE) which tells the kernel to continue
+ // searching at the process and system level. If this is an asm.js
+ // expected exception, we handle it and return KERN_SUCCESS.
+ bool handled = HandleMachException(request);
+ kern_return_t replyCode = handled ? KERN_SUCCESS : KERN_FAILURE;
+
+ // This magic incantation to send a reply back to the kernel was
+ // derived from the exc_server generated by
+ // 'mig -v /usr/include/mach/mach_exc.defs'.
+ __Reply__exception_raise_t reply;
+ reply.Head.msgh_bits =
+ MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request.body.Head.msgh_bits), 0);
+ reply.Head.msgh_size = sizeof(reply);
+ reply.Head.msgh_remote_port = request.body.Head.msgh_remote_port;
+ reply.Head.msgh_local_port = MACH_PORT_NULL;
+ reply.Head.msgh_id = request.body.Head.msgh_id + 100;
+ reply.NDR = NDR_record;
+ reply.RetCode = replyCode;
+ mach_msg(&reply.Head, MACH_SEND_MSG, sizeof(reply), 0, MACH_PORT_NULL,
+ MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+ }
+}
+
+# else // If not Windows or Mac, assume Unix
+
+# if defined(__mips__) || defined(__loongarch__)
+static const uint32_t kWasmTrapSignal = SIGFPE;
+# else
+static const uint32_t kWasmTrapSignal = SIGILL;
+# endif
+
+static struct sigaction sPrevSEGVHandler;
+static struct sigaction sPrevSIGBUSHandler;
+static struct sigaction sPrevWasmTrapHandler;
+
+static void WasmTrapHandler(int signum, siginfo_t* info, void* context) {
+ if (!sAlreadyHandlingTrap.get()) {
+ AutoHandlingTrap aht;
+ MOZ_RELEASE_ASSERT(signum == SIGSEGV || signum == SIGBUS ||
+ signum == kWasmTrapSignal);
+ JSContext* cx = TlsContext.get(); // Cold signal handling code
+ if (HandleTrap((CONTEXT*)context, cx)) {
+ return;
+ }
+ }
+
+ struct sigaction* previousSignal = nullptr;
+ switch (signum) {
+ case SIGSEGV:
+ previousSignal = &sPrevSEGVHandler;
+ break;
+ case SIGBUS:
+ previousSignal = &sPrevSIGBUSHandler;
+ break;
+ case kWasmTrapSignal:
+ previousSignal = &sPrevWasmTrapHandler;
+ break;
+ }
+ MOZ_ASSERT(previousSignal);
+
+ // This signal is not for any asm.js code we expect, so we need to forward
+ // the signal to the next handler. If there is no next handler (SIG_IGN or
+ // SIG_DFL), then it's time to crash. To do this, we set the signal back to
+ // its original disposition and return. This will cause the faulting op to
+ // be re-executed which will crash in the normal way. The advantage of
+ // doing this to calling _exit() is that we remove ourselves from the crash
+ // stack which improves crash reports. If there is a next handler, call it.
+ // It will either crash synchronously, fix up the instruction so that
+ // execution can continue and return, or trigger a crash by returning the
+ // signal to it's original disposition and returning.
+ //
+ // Note: the order of these tests matter.
+ if (previousSignal->sa_flags & SA_SIGINFO) {
+ previousSignal->sa_sigaction(signum, info, context);
+ } else if (previousSignal->sa_handler == SIG_DFL ||
+ previousSignal->sa_handler == SIG_IGN) {
+ sigaction(signum, previousSignal, nullptr);
+ } else {
+ previousSignal->sa_handler(signum);
+ }
+}
+# endif // XP_WIN || XP_DARWIN || assume unix
+
+# if defined(ANDROID) && defined(MOZ_LINKER)
+extern "C" MFBT_API bool IsSignalHandlingBroken();
+# endif
+
+struct InstallState {
+ bool tried;
+ bool success;
+ InstallState() : tried(false), success(false) {}
+};
+
+static ExclusiveData<InstallState> sEagerInstallState(
+ mutexid::WasmSignalInstallState);
+
+#endif // !(JS_CODEGEN_NONE)
+
+void wasm::EnsureEagerProcessSignalHandlers() {
+#ifdef JS_CODEGEN_NONE
+ // If there is no JIT, then there should be no Wasm signal handlers.
+ return;
+#else
+ auto eagerInstallState = sEagerInstallState.lock();
+ if (eagerInstallState->tried) {
+ return;
+ }
+
+ eagerInstallState->tried = true;
+ MOZ_RELEASE_ASSERT(eagerInstallState->success == false);
+
+# if defined(ANDROID) && defined(MOZ_LINKER)
+ // Signal handling is broken on some android systems.
+ if (IsSignalHandlingBroken()) {
+ return;
+ }
+# endif
+
+ sAlreadyHandlingTrap.infallibleInit();
+
+ // Install whatever exception/signal handler is appropriate for the OS.
+# if defined(XP_WIN)
+
+# if defined(MOZ_ASAN)
+ // Under ASan we need to let the ASan runtime's ShadowExceptionHandler stay
+ // in the first handler position.
+ const bool firstHandler = false;
+# else
+ // Otherwise, WasmTrapHandler needs to go first, so that we can recover
+ // from wasm faults and continue execution without triggering handlers
+ // such as Breakpad that assume we are crashing.
+ const bool firstHandler = true;
+# endif
+ if (!AddVectoredExceptionHandler(firstHandler, WasmTrapHandler)) {
+ // Windows has all sorts of random security knobs for disabling things
+ // so make this a dynamic failure that disables wasm, not a MOZ_CRASH().
+ return;
+ }
+
+# elif defined(XP_DARWIN)
+ // All the Mach setup in EnsureLazyProcessSignalHandlers.
+# else
+ // SA_NODEFER allows us to reenter the signal handler if we crash while
+ // handling the signal, and fall through to the Breakpad handler by testing
+ // handlingSegFault.
+
+ // Allow handling OOB with signals on all architectures
+ struct sigaction faultHandler;
+ faultHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
+ faultHandler.sa_sigaction = WasmTrapHandler;
+ sigemptyset(&faultHandler.sa_mask);
+ if (sigaction(SIGSEGV, &faultHandler, &sPrevSEGVHandler)) {
+ MOZ_CRASH("unable to install segv handler");
+ }
+
+# if defined(JS_CODEGEN_ARM)
+ // On Arm Handle Unaligned Accesses
+ struct sigaction busHandler;
+ busHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
+ busHandler.sa_sigaction = WasmTrapHandler;
+ sigemptyset(&busHandler.sa_mask);
+ if (sigaction(SIGBUS, &busHandler, &sPrevSIGBUSHandler)) {
+ MOZ_CRASH("unable to install sigbus handler");
+ }
+# endif
+
+ // Install a handler to handle the instructions that are emitted to implement
+ // wasm traps.
+ struct sigaction wasmTrapHandler;
+ wasmTrapHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
+ wasmTrapHandler.sa_sigaction = WasmTrapHandler;
+ sigemptyset(&wasmTrapHandler.sa_mask);
+ if (sigaction(kWasmTrapSignal, &wasmTrapHandler, &sPrevWasmTrapHandler)) {
+ MOZ_CRASH("unable to install wasm trap handler");
+ }
+# endif
+
+ eagerInstallState->success = true;
+#endif
+}
+
+#ifndef JS_CODEGEN_NONE
+static ExclusiveData<InstallState> sLazyInstallState(
+ mutexid::WasmSignalInstallState);
+
+static bool EnsureLazyProcessSignalHandlers() {
+ auto lazyInstallState = sLazyInstallState.lock();
+ if (lazyInstallState->tried) {
+ return lazyInstallState->success;
+ }
+
+ lazyInstallState->tried = true;
+ MOZ_RELEASE_ASSERT(lazyInstallState->success == false);
+
+# ifdef XP_DARWIN
+ // Create the port that all JSContext threads will redirect their traps to.
+ kern_return_t kret;
+ kret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE,
+ &sMachDebugPort);
+ if (kret != KERN_SUCCESS) {
+ return false;
+ }
+ kret = mach_port_insert_right(mach_task_self(), sMachDebugPort,
+ sMachDebugPort, MACH_MSG_TYPE_MAKE_SEND);
+ if (kret != KERN_SUCCESS) {
+ return false;
+ }
+
+ // Create the thread that will wait on and service sMachDebugPort.
+ // It's not useful to destroy this thread on process shutdown so
+ // immediately detach on successful start.
+ Thread handlerThread;
+ if (!handlerThread.init(MachExceptionHandlerThread)) {
+ return false;
+ }
+ handlerThread.detach();
+# endif
+
+ lazyInstallState->success = true;
+ return true;
+}
+#endif // JS_CODEGEN_NONE
+
+bool wasm::EnsureFullSignalHandlers(JSContext* cx) {
+#ifdef JS_CODEGEN_NONE
+ return false;
+#else
+ if (cx->wasm().triedToInstallSignalHandlers) {
+ return cx->wasm().haveSignalHandlers;
+ }
+
+ cx->wasm().triedToInstallSignalHandlers = true;
+ MOZ_RELEASE_ASSERT(!cx->wasm().haveSignalHandlers);
+
+ {
+ auto eagerInstallState = sEagerInstallState.lock();
+ MOZ_RELEASE_ASSERT(eagerInstallState->tried);
+ if (!eagerInstallState->success) {
+ return false;
+ }
+ }
+
+ if (!EnsureLazyProcessSignalHandlers()) {
+ return false;
+ }
+
+# ifdef XP_DARWIN
+ // In addition to the process-wide signal handler setup, OSX needs each
+ // thread configured to send its exceptions to sMachDebugPort. While there
+ // are also task-level (i.e. process-level) exception ports, those are
+ // "claimed" by breakpad and chaining Mach exceptions is dark magic that we
+ // avoid by instead intercepting exceptions at the thread level before they
+ // propagate to the process-level. This works because there are no other
+ // uses of thread-level exception ports.
+ MOZ_RELEASE_ASSERT(sMachDebugPort != MACH_PORT_NULL);
+ thread_port_t thisThread = mach_thread_self();
+ kern_return_t kret = thread_set_exception_ports(
+ thisThread, EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION,
+ sMachDebugPort, EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES,
+ THREAD_STATE_NONE);
+ mach_port_deallocate(mach_task_self(), thisThread);
+ if (kret != KERN_SUCCESS) {
+ return false;
+ }
+# endif
+
+ cx->wasm().haveSignalHandlers = true;
+ return true;
+#endif
+}
+
+bool wasm::MemoryAccessTraps(const RegisterState& regs, uint8_t* addr,
+ uint32_t numBytes, uint8_t** newPC) {
+#ifdef JS_CODEGEN_NONE
+ return false;
+#else
+ const wasm::CodeSegment* codeSegment = wasm::LookupCodeSegment(regs.pc);
+ if (!codeSegment || !codeSegment->isModule()) {
+ return false;
+ }
+
+ const wasm::ModuleSegment& segment = *codeSegment->asModule();
+
+ Trap trap;
+ BytecodeOffset bytecode;
+ if (!segment.code().lookupTrap(regs.pc, &trap, &bytecode)) {
+ return false;
+ }
+ switch (trap) {
+ case Trap::OutOfBounds:
+ break;
+ case Trap::NullPointerDereference:
+ break;
+# ifdef WASM_HAS_HEAPREG
+ case Trap::IndirectCallToNull:
+ // We use the null pointer exception from loading the heapreg to
+ // handle indirect calls to null.
+ break;
+# endif
+ default:
+ return false;
+ }
+
+ const Instance& instance =
+ *GetNearestEffectiveInstance(Frame::fromUntaggedWasmExitFP(regs.fp));
+ MOZ_ASSERT(&instance.code() == &segment.code());
+
+ switch (trap) {
+ case Trap::OutOfBounds:
+ if (!instance.memoryAccessInGuardRegion((uint8_t*)addr, numBytes)) {
+ return false;
+ }
+ break;
+ case Trap::NullPointerDereference:
+ if ((uintptr_t)addr >= NullPtrGuardSize) {
+ return false;
+ }
+ break;
+# ifdef WASM_HAS_HEAPREG
+ case Trap::IndirectCallToNull:
+ // Null pointer plus the appropriate offset.
+ if (addr !=
+ reinterpret_cast<uint8_t*>(wasm::Instance::offsetOfMemoryBase())) {
+ return false;
+ }
+ break;
+# endif
+ default:
+ MOZ_CRASH("Should not happen");
+ }
+
+ JSContext* cx = TlsContext.get(); // Cold simulator helper function
+ jit::JitActivation* activation = cx->activation()->asJit();
+ activation->startWasmTrap(trap, bytecode.offset(), regs);
+ *newPC = segment.trapCode();
+ return true;
+#endif
+}
+
+bool wasm::HandleIllegalInstruction(const RegisterState& regs,
+ uint8_t** newPC) {
+#ifdef JS_CODEGEN_NONE
+ return false;
+#else
+ const wasm::CodeSegment* codeSegment = wasm::LookupCodeSegment(regs.pc);
+ if (!codeSegment || !codeSegment->isModule()) {
+ return false;
+ }
+
+ const wasm::ModuleSegment& segment = *codeSegment->asModule();
+
+ Trap trap;
+ BytecodeOffset bytecode;
+ if (!segment.code().lookupTrap(regs.pc, &trap, &bytecode)) {
+ return false;
+ }
+
+ JSContext* cx = TlsContext.get(); // Cold simulator helper function
+ jit::JitActivation* activation = cx->activation()->asJit();
+ activation->startWasmTrap(trap, bytecode.offset(), regs);
+ *newPC = segment.trapCode();
+ return true;
+#endif
+}
diff --git a/js/src/wasm/WasmSignalHandlers.h b/js/src/wasm/WasmSignalHandlers.h
new file mode 100644
index 0000000000..4b452d6b37
--- /dev/null
+++ b/js/src/wasm/WasmSignalHandlers.h
@@ -0,0 +1,63 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2014 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_signal_handlers_h
+#define wasm_signal_handlers_h
+
+#include "js/ProfilingFrameIterator.h"
+#include "wasm/WasmProcess.h"
+
+namespace js {
+namespace wasm {
+
+using RegisterState = JS::ProfilingFrameIterator::RegisterState;
+
+// This function performs the low-overhead signal handler initialization that we
+// want to do eagerly to ensure a more-deterministic global process state. This
+// is especially relevant for signal handlers since handler ordering depends on
+// installation order: the wasm signal handler must run *before* the other crash
+// handlers (breakpad) and since POSIX signal handlers work LIFO, this function
+// needs to be called at the end of the startup process, after the other two
+// handlers have been installed. Currently, this is achieved by having
+// JSRuntime() call this function. There can be multiple JSRuntimes per process
+// so this function can thus be called multiple times, having no effect after
+// the first call.
+void EnsureEagerProcessSignalHandlers();
+
+// Assuming EnsureEagerProcessSignalHandlers() has already been called,
+// this function performs the full installation of signal handlers which must
+// be performed per-thread/JSContext. This operation may incur some overhead and
+// so should be done only when needed to use wasm. Currently, this is done in
+// wasm::HasPlatformSupport() which is called when deciding whether to expose
+// the 'WebAssembly' object on the global object.
+bool EnsureFullSignalHandlers(JSContext* cx);
+
+// Return whether, with the given simulator register state, a memory access to
+// 'addr' of size 'numBytes' needs to trap and, if so, where the simulator
+// should redirect pc to.
+bool MemoryAccessTraps(const RegisterState& regs, uint8_t* addr,
+ uint32_t numBytes, uint8_t** newPC);
+
+// Return whether, with the given simulator register state, an illegal
+// instruction fault is expected and, if so, the value of the next PC.
+bool HandleIllegalInstruction(const RegisterState& regs, uint8_t** newPC);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_signal_handlers_h
diff --git a/js/src/wasm/WasmStubs.cpp b/js/src/wasm/WasmStubs.cpp
new file mode 100644
index 0000000000..7fc61381b9
--- /dev/null
+++ b/js/src/wasm/WasmStubs.cpp
@@ -0,0 +1,3117 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmStubs.h"
+
+#include <algorithm>
+#include <iterator>
+#include <type_traits>
+
+#include "jit/ABIArgGenerator.h"
+#include "jit/JitFrames.h"
+#include "jit/RegisterAllocator.h"
+#include "js/Printf.h"
+#include "util/Memory.h"
+#include "wasm/WasmCode.h"
+#include "wasm/WasmGenerator.h"
+#include "wasm/WasmInstance.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "wasm/WasmInstance-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using MIRTypeVector = Vector<jit::MIRType, 8, SystemAllocPolicy>;
+using ABIArgMIRTypeIter = jit::ABIArgIter<MIRTypeVector>;
+
+/*****************************************************************************/
+// ABIResultIter implementation
+
+static uint32_t ResultStackSize(ValType type) {
+ switch (type.kind()) {
+ case ValType::I32:
+ return ABIResult::StackSizeOfInt32;
+ case ValType::I64:
+ return ABIResult::StackSizeOfInt64;
+ case ValType::F32:
+ return ABIResult::StackSizeOfFloat;
+ case ValType::F64:
+ return ABIResult::StackSizeOfDouble;
+#ifdef ENABLE_WASM_SIMD
+ case ValType::V128:
+ return ABIResult::StackSizeOfV128;
+#endif
+ case ValType::Ref:
+ return ABIResult::StackSizeOfPtr;
+ default:
+ MOZ_CRASH("Unexpected result type");
+ }
+}
+
+// Compute the size of the stack slot that the wasm ABI requires be allocated
+// for a particular MIRType. Note that this sometimes differs from the
+// MIRType's natural size. See also ResultStackSize above and ABIResult::size()
+// and ABIResultIter below.
+
+uint32_t js::wasm::MIRTypeToABIResultSize(jit::MIRType type) {
+ switch (type) {
+ case MIRType::Int32:
+ return ABIResult::StackSizeOfInt32;
+ case MIRType::Int64:
+ return ABIResult::StackSizeOfInt64;
+ case MIRType::Float32:
+ return ABIResult::StackSizeOfFloat;
+ case MIRType::Double:
+ return ABIResult::StackSizeOfDouble;
+#ifdef ENABLE_WASM_SIMD
+ case MIRType::Simd128:
+ return ABIResult::StackSizeOfV128;
+#endif
+ case MIRType::Pointer:
+ case MIRType::RefOrNull:
+ return ABIResult::StackSizeOfPtr;
+ default:
+ MOZ_CRASH("MIRTypeToABIResultSize - unhandled case");
+ }
+}
+
+uint32_t ABIResult::size() const { return ResultStackSize(type()); }
+
+void ABIResultIter::settleRegister(ValType type) {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT_IF(direction_ == Next, index() < MaxRegisterResults);
+ MOZ_ASSERT_IF(direction_ == Prev, index() >= count_ - MaxRegisterResults);
+ static_assert(MaxRegisterResults == 1, "expected a single register result");
+
+ switch (type.kind()) {
+ case ValType::I32:
+ cur_ = ABIResult(type, ReturnReg);
+ break;
+ case ValType::I64:
+ cur_ = ABIResult(type, ReturnReg64);
+ break;
+ case ValType::F32:
+ cur_ = ABIResult(type, ReturnFloat32Reg);
+ break;
+ case ValType::F64:
+ cur_ = ABIResult(type, ReturnDoubleReg);
+ break;
+ case ValType::Ref:
+ cur_ = ABIResult(type, ReturnReg);
+ break;
+#ifdef ENABLE_WASM_SIMD
+ case ValType::V128:
+ cur_ = ABIResult(type, ReturnSimd128Reg);
+ break;
+#endif
+ default:
+ MOZ_CRASH("Unexpected result type");
+ }
+}
+
+void ABIResultIter::settleNext() {
+ MOZ_ASSERT(direction_ == Next);
+ MOZ_ASSERT(!done());
+
+ uint32_t typeIndex = count_ - index_ - 1;
+ ValType type = type_[typeIndex];
+
+ if (index_ < MaxRegisterResults) {
+ settleRegister(type);
+ return;
+ }
+
+ cur_ = ABIResult(type, nextStackOffset_);
+ nextStackOffset_ += ResultStackSize(type);
+}
+
+void ABIResultIter::settlePrev() {
+ MOZ_ASSERT(direction_ == Prev);
+ MOZ_ASSERT(!done());
+ uint32_t typeIndex = index_;
+ ValType type = type_[typeIndex];
+
+ if (count_ - index_ - 1 < MaxRegisterResults) {
+ settleRegister(type);
+ return;
+ }
+
+ uint32_t size = ResultStackSize(type);
+ MOZ_ASSERT(nextStackOffset_ >= size);
+ nextStackOffset_ -= size;
+ cur_ = ABIResult(type, nextStackOffset_);
+}
+
+#ifdef WASM_CODEGEN_DEBUG
+template <class Closure>
+static void GenPrint(DebugChannel channel, MacroAssembler& masm,
+ const Maybe<Register>& taken, Closure passArgAndCall) {
+ if (!IsCodegenDebugEnabled(channel)) {
+ return;
+ }
+
+ AllocatableRegisterSet regs(RegisterSet::All());
+ LiveRegisterSet save(regs.asLiveSet());
+ masm.PushRegsInMask(save);
+
+ if (taken) {
+ regs.take(taken.value());
+ }
+ Register temp = regs.takeAnyGeneral();
+
+ {
+ MOZ_ASSERT(MaybeGetJitContext(),
+ "codegen debug checks require a jit context");
+ masm.setupUnalignedABICall(temp);
+ passArgAndCall(IsCompilingWasm(), temp);
+ }
+
+ masm.PopRegsInMask(save);
+}
+
+static void GenPrintf(DebugChannel channel, MacroAssembler& masm,
+ const char* fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ UniqueChars str = JS_vsmprintf(fmt, ap);
+ va_end(ap);
+
+ GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
+ // If we've gone this far, it means we're actually using the debugging
+ // strings. In this case, we leak them! This is only for debugging, and
+ // doing the right thing is cumbersome (in Ion, it'd mean add a vec of
+ // strings to the IonScript; in wasm, it'd mean add it to the current
+ // Module and serialize it properly).
+ const char* text = str.release();
+
+ masm.movePtr(ImmPtr((void*)text, ImmPtr::NoCheckToken()), temp);
+ masm.passABIArg(temp);
+ if (inWasm) {
+ masm.callDebugWithABI(SymbolicAddress::PrintText);
+ } else {
+ using Fn = void (*)(const char* output);
+ masm.callWithABI<Fn, PrintText>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+ }
+ });
+}
+
+static void GenPrintIsize(DebugChannel channel, MacroAssembler& masm,
+ const Register& src) {
+ GenPrint(channel, masm, Some(src), [&](bool inWasm, Register _temp) {
+ masm.passABIArg(src);
+ if (inWasm) {
+ masm.callDebugWithABI(SymbolicAddress::PrintI32);
+ } else {
+ using Fn = void (*)(int32_t val);
+ masm.callWithABI<Fn, PrintI32>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+ }
+ });
+}
+
+static void GenPrintPtr(DebugChannel channel, MacroAssembler& masm,
+ const Register& src) {
+ GenPrint(channel, masm, Some(src), [&](bool inWasm, Register _temp) {
+ masm.passABIArg(src);
+ if (inWasm) {
+ masm.callDebugWithABI(SymbolicAddress::PrintPtr);
+ } else {
+ using Fn = void (*)(uint8_t* val);
+ masm.callWithABI<Fn, PrintPtr>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+ }
+ });
+}
+
+static void GenPrintI64(DebugChannel channel, MacroAssembler& masm,
+ const Register64& src) {
+# if JS_BITS_PER_WORD == 64
+ GenPrintf(channel, masm, "i64 ");
+ GenPrintIsize(channel, masm, src.reg);
+# else
+ GenPrintf(channel, masm, "i64(");
+ GenPrintIsize(channel, masm, src.low);
+ GenPrintIsize(channel, masm, src.high);
+ GenPrintf(channel, masm, ") ");
+# endif
+}
+
+static void GenPrintF32(DebugChannel channel, MacroAssembler& masm,
+ const FloatRegister& src) {
+ GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
+ masm.passABIArg(src, MoveOp::FLOAT32);
+ if (inWasm) {
+ masm.callDebugWithABI(SymbolicAddress::PrintF32);
+ } else {
+ using Fn = void (*)(float val);
+ masm.callWithABI<Fn, PrintF32>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+ }
+ });
+}
+
+static void GenPrintF64(DebugChannel channel, MacroAssembler& masm,
+ const FloatRegister& src) {
+ GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
+ masm.passABIArg(src, MoveOp::DOUBLE);
+ if (inWasm) {
+ masm.callDebugWithABI(SymbolicAddress::PrintF64);
+ } else {
+ using Fn = void (*)(double val);
+ masm.callWithABI<Fn, PrintF64>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+ }
+ });
+}
+
+# ifdef ENABLE_WASM_SIMD
+static void GenPrintV128(DebugChannel channel, MacroAssembler& masm,
+ const FloatRegister& src) {
+ // TODO: We might try to do something meaningful here once SIMD data are
+ // aligned and hence C++-ABI compliant. For now, just make ourselves visible.
+ GenPrintf(channel, masm, "v128");
+}
+# endif
+#else
+static void GenPrintf(DebugChannel channel, MacroAssembler& masm,
+ const char* fmt, ...) {}
+static void GenPrintIsize(DebugChannel channel, MacroAssembler& masm,
+ const Register& src) {}
+static void GenPrintPtr(DebugChannel channel, MacroAssembler& masm,
+ const Register& src) {}
+static void GenPrintI64(DebugChannel channel, MacroAssembler& masm,
+ const Register64& src) {}
+static void GenPrintF32(DebugChannel channel, MacroAssembler& masm,
+ const FloatRegister& src) {}
+static void GenPrintF64(DebugChannel channel, MacroAssembler& masm,
+ const FloatRegister& src) {}
+# ifdef ENABLE_WASM_SIMD
+static void GenPrintV128(DebugChannel channel, MacroAssembler& masm,
+ const FloatRegister& src) {}
+# endif
+#endif
+
+static bool FinishOffsets(MacroAssembler& masm, Offsets* offsets) {
+ // On old ARM hardware, constant pools could be inserted and they need to
+ // be flushed before considering the size of the masm.
+ masm.flushBuffer();
+ offsets->end = masm.size();
+ return !masm.oom();
+}
+
+static void AssertStackAlignment(MacroAssembler& masm, uint32_t alignment,
+ uint32_t addBeforeAssert = 0) {
+ MOZ_ASSERT(
+ (sizeof(Frame) + masm.framePushed() + addBeforeAssert) % alignment == 0);
+ masm.assertStackAlignment(alignment, addBeforeAssert);
+}
+
+template <class VectorT, template <class VecT> class ABIArgIterT>
+static unsigned StackArgBytesHelper(const VectorT& args) {
+ ABIArgIterT<VectorT> iter(args);
+ while (!iter.done()) {
+ iter++;
+ }
+ return iter.stackBytesConsumedSoFar();
+}
+
+template <class VectorT>
+static unsigned StackArgBytesForNativeABI(const VectorT& args) {
+ return StackArgBytesHelper<VectorT, ABIArgIter>(args);
+}
+
+template <class VectorT>
+static unsigned StackArgBytesForWasmABI(const VectorT& args) {
+ return StackArgBytesHelper<VectorT, WasmABIArgIter>(args);
+}
+
+static unsigned StackArgBytesForWasmABI(const FuncType& funcType) {
+ ArgTypeVector args(funcType);
+ return StackArgBytesForWasmABI(args);
+}
+
+static void Move64(MacroAssembler& masm, const Address& src,
+ const Address& dest, Register scratch) {
+#if JS_BITS_PER_WORD == 32
+ MOZ_RELEASE_ASSERT(src.base != scratch && dest.base != scratch);
+ masm.load32(LowWord(src), scratch);
+ masm.store32(scratch, LowWord(dest));
+ masm.load32(HighWord(src), scratch);
+ masm.store32(scratch, HighWord(dest));
+#else
+ Register64 scratch64(scratch);
+ masm.load64(src, scratch64);
+ masm.store64(scratch64, dest);
+#endif
+}
+
+static void SetupABIArguments(MacroAssembler& masm, const FuncExport& fe,
+ const FuncType& funcType, Register argv,
+ Register scratch) {
+ // Copy parameters out of argv and into the registers/stack-slots specified by
+ // the wasm ABI.
+ //
+ // SetupABIArguments are only used for C++ -> wasm calls through callExport(),
+ // and V128 and Ref types (other than externref) are not currently allowed.
+ ArgTypeVector args(funcType);
+ for (WasmABIArgIter iter(args); !iter.done(); iter++) {
+ unsigned argOffset = iter.index() * sizeof(ExportArg);
+ Address src(argv, argOffset);
+ MIRType type = iter.mirType();
+ switch (iter->kind()) {
+ case ABIArg::GPR:
+ if (type == MIRType::Int32) {
+ masm.load32(src, iter->gpr());
+ } else if (type == MIRType::Int64) {
+ masm.load64(src, iter->gpr64());
+ } else if (type == MIRType::RefOrNull) {
+ masm.loadPtr(src, iter->gpr());
+ } else if (type == MIRType::StackResults) {
+ MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
+ masm.loadPtr(src, iter->gpr());
+ } else {
+ MOZ_CRASH("unknown GPR type");
+ }
+ break;
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ case ABIArg::GPR_PAIR:
+ if (type == MIRType::Int64) {
+ masm.load64(src, iter->gpr64());
+ } else {
+ MOZ_CRASH("wasm uses hardfp for function calls.");
+ }
+ break;
+#endif
+ case ABIArg::FPU: {
+ static_assert(sizeof(ExportArg) >= jit::Simd128DataSize,
+ "ExportArg must be big enough to store SIMD values");
+ switch (type) {
+ case MIRType::Double:
+ masm.loadDouble(src, iter->fpu());
+ break;
+ case MIRType::Float32:
+ masm.loadFloat32(src, iter->fpu());
+ break;
+ case MIRType::Simd128:
+#ifdef ENABLE_WASM_SIMD
+ // This is only used by the testing invoke path,
+ // wasmLosslessInvoke, and is guarded against in normal JS-API
+ // call paths.
+ masm.loadUnalignedSimd128(src, iter->fpu());
+ break;
+#else
+ MOZ_CRASH("V128 not supported in SetupABIArguments");
+#endif
+ default:
+ MOZ_CRASH("unexpected FPU type");
+ break;
+ }
+ break;
+ }
+ case ABIArg::Stack:
+ switch (type) {
+ case MIRType::Int32:
+ masm.load32(src, scratch);
+ masm.storePtr(scratch, Address(masm.getStackPointer(),
+ iter->offsetFromArgBase()));
+ break;
+ case MIRType::Int64: {
+ RegisterOrSP sp = masm.getStackPointer();
+ Move64(masm, src, Address(sp, iter->offsetFromArgBase()), scratch);
+ break;
+ }
+ case MIRType::RefOrNull:
+ masm.loadPtr(src, scratch);
+ masm.storePtr(scratch, Address(masm.getStackPointer(),
+ iter->offsetFromArgBase()));
+ break;
+ case MIRType::Double: {
+ ScratchDoubleScope fpscratch(masm);
+ masm.loadDouble(src, fpscratch);
+ masm.storeDouble(fpscratch, Address(masm.getStackPointer(),
+ iter->offsetFromArgBase()));
+ break;
+ }
+ case MIRType::Float32: {
+ ScratchFloat32Scope fpscratch(masm);
+ masm.loadFloat32(src, fpscratch);
+ masm.storeFloat32(fpscratch, Address(masm.getStackPointer(),
+ iter->offsetFromArgBase()));
+ break;
+ }
+ case MIRType::Simd128: {
+#ifdef ENABLE_WASM_SIMD
+ // This is only used by the testing invoke path,
+ // wasmLosslessInvoke, and is guarded against in normal JS-API
+ // call paths.
+ ScratchSimd128Scope fpscratch(masm);
+ masm.loadUnalignedSimd128(src, fpscratch);
+ masm.storeUnalignedSimd128(
+ fpscratch,
+ Address(masm.getStackPointer(), iter->offsetFromArgBase()));
+ break;
+#else
+ MOZ_CRASH("V128 not supported in SetupABIArguments");
+#endif
+ }
+ case MIRType::StackResults: {
+ MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
+ masm.loadPtr(src, scratch);
+ masm.storePtr(scratch, Address(masm.getStackPointer(),
+ iter->offsetFromArgBase()));
+ break;
+ }
+ default:
+ MOZ_CRASH("unexpected stack arg type");
+ }
+ break;
+ case ABIArg::Uninitialized:
+ MOZ_CRASH("Uninitialized ABIArg kind");
+ }
+ }
+}
+
+static void StoreRegisterResult(MacroAssembler& masm, const FuncExport& fe,
+ const FuncType& funcType, Register loc) {
+ ResultType results = ResultType::Vector(funcType.results());
+ DebugOnly<bool> sawRegisterResult = false;
+ for (ABIResultIter iter(results); !iter.done(); iter.next()) {
+ const ABIResult& result = iter.cur();
+ if (result.inRegister()) {
+ MOZ_ASSERT(!sawRegisterResult);
+ sawRegisterResult = true;
+ switch (result.type().kind()) {
+ case ValType::I32:
+ masm.store32(result.gpr(), Address(loc, 0));
+ break;
+ case ValType::I64:
+ masm.store64(result.gpr64(), Address(loc, 0));
+ break;
+ case ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ masm.storeUnalignedSimd128(result.fpr(), Address(loc, 0));
+ break;
+#else
+ MOZ_CRASH("V128 not supported in StoreABIReturn");
+#endif
+ case ValType::F32:
+ masm.storeFloat32(result.fpr(), Address(loc, 0));
+ break;
+ case ValType::F64:
+ masm.storeDouble(result.fpr(), Address(loc, 0));
+ break;
+ case ValType::Ref:
+ masm.storePtr(result.gpr(), Address(loc, 0));
+ break;
+ }
+ }
+ }
+ MOZ_ASSERT(sawRegisterResult == (results.length() > 0));
+}
+
+#if defined(JS_CODEGEN_ARM)
+// The ARM system ABI also includes d15 & s31 in the non volatile float
+// registers. Also exclude lr (a.k.a. r14) as we preserve it manually.
+static const LiveRegisterSet NonVolatileRegs = LiveRegisterSet(
+ GeneralRegisterSet(Registers::NonVolatileMask &
+ ~(Registers::SetType(1) << Registers::lr)),
+ FloatRegisterSet(FloatRegisters::NonVolatileMask |
+ (FloatRegisters::SetType(1) << FloatRegisters::d15) |
+ (FloatRegisters::SetType(1) << FloatRegisters::s31)));
+#elif defined(JS_CODEGEN_ARM64)
+// Exclude the Link Register (x30) because it is preserved manually.
+//
+// Include x16 (scratch) to make a 16-byte aligned amount of integer registers.
+// Include d31 (scratch) to make a 16-byte aligned amount of floating registers.
+static const LiveRegisterSet NonVolatileRegs = LiveRegisterSet(
+ GeneralRegisterSet((Registers::NonVolatileMask &
+ ~(Registers::SetType(1) << Registers::lr)) |
+ (Registers::SetType(1) << Registers::x16)),
+ FloatRegisterSet(FloatRegisters::NonVolatileMask |
+ FloatRegisters::NonAllocatableMask));
+#else
+static const LiveRegisterSet NonVolatileRegs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask),
+ FloatRegisterSet(FloatRegisters::NonVolatileMask));
+#endif
+
+static const unsigned NumExtraPushed = 2; // instance and argv
+
+#ifdef JS_CODEGEN_ARM64
+static const unsigned WasmPushSize = 16;
+#else
+static const unsigned WasmPushSize = sizeof(void*);
+#endif
+
+static void AssertExpectedSP(MacroAssembler& masm) {
+#ifdef JS_CODEGEN_ARM64
+ MOZ_ASSERT(sp.Is(masm.GetStackPointer64()));
+# ifdef DEBUG
+ // Since we're asserting that SP is the currently active stack pointer,
+ // let's also in effect assert that PSP is dead -- by setting it to 1, so as
+ // to cause to cause any attempts to use it to segfault in an easily
+ // identifiable way.
+ masm.asVIXL().Mov(PseudoStackPointer64, 1);
+# endif
+#endif
+}
+
+template <class Operand>
+static void WasmPush(MacroAssembler& masm, const Operand& op) {
+#ifdef JS_CODEGEN_ARM64
+ // Allocate a pad word so that SP can remain properly aligned. |op| will be
+ // written at the lower-addressed of the two words pushed here.
+ masm.reserveStack(WasmPushSize);
+ masm.storePtr(op, Address(masm.getStackPointer(), 0));
+#else
+ masm.Push(op);
+#endif
+}
+
+static void WasmPop(MacroAssembler& masm, Register r) {
+#ifdef JS_CODEGEN_ARM64
+ // Also pop the pad word allocated by WasmPush.
+ masm.loadPtr(Address(masm.getStackPointer(), 0), r);
+ masm.freeStack(WasmPushSize);
+#else
+ masm.Pop(r);
+#endif
+}
+
+static void MoveSPForJitABI(MacroAssembler& masm) {
+#ifdef JS_CODEGEN_ARM64
+ masm.moveStackPtrTo(PseudoStackPointer);
+#endif
+}
+
+static void CallFuncExport(MacroAssembler& masm, const FuncExport& fe,
+ const Maybe<ImmPtr>& funcPtr) {
+ MOZ_ASSERT(fe.hasEagerStubs() == !funcPtr);
+ MoveSPForJitABI(masm);
+ if (funcPtr) {
+ masm.call(*funcPtr);
+ } else {
+ masm.call(CallSiteDesc(CallSiteDesc::Func), fe.funcIndex());
+ }
+}
+
+STATIC_ASSERT_ANYREF_IS_JSOBJECT; // Strings are currently boxed
+
+// Unboxing is branchy and contorted because of Spectre mitigations - we don't
+// have enough scratch registers. Were it not for the spectre mitigations in
+// branchTestObjClass, the branch nest below would be restructured significantly
+// by inverting branches and using fewer registers.
+
+// Unbox an anyref in src (clobbering src in the process) and then re-box it as
+// a Value in *dst. See the definition of AnyRef for a discussion of pointer
+// representation.
+static void UnboxAnyrefIntoValue(MacroAssembler& masm, Register instance,
+ Register src, const Address& dst,
+ Register scratch) {
+ MOZ_ASSERT(src != scratch);
+
+ // Not actually the value we're passing, but we've no way of
+ // decoding anything better.
+ GenPrintPtr(DebugChannel::Import, masm, src);
+
+ Label notNull, mustUnbox, done;
+ masm.branchTestPtr(Assembler::NonZero, src, src, &notNull);
+ masm.storeValue(NullValue(), dst);
+ masm.jump(&done);
+
+ masm.bind(&notNull);
+ // The type test will clear src if the test fails, so store early.
+ masm.storeValue(JSVAL_TYPE_OBJECT, src, dst);
+ // Spectre mitigations: see comment above about efficiency.
+ masm.branchTestObjClass(Assembler::Equal, src,
+ Address(instance, Instance::offsetOfValueBoxClass()),
+ scratch, src, &mustUnbox);
+ masm.jump(&done);
+
+ masm.bind(&mustUnbox);
+ Move64(masm, Address(src, WasmValueBox::offsetOfValue()), dst, scratch);
+
+ masm.bind(&done);
+}
+
+// Unbox an anyref in src and then re-box it as a Value in dst.
+// See the definition of AnyRef for a discussion of pointer representation.
+static void UnboxAnyrefIntoValueReg(MacroAssembler& masm, Register instance,
+ Register src, ValueOperand dst,
+ Register scratch) {
+ MOZ_ASSERT(src != scratch);
+#if JS_BITS_PER_WORD == 32
+ MOZ_ASSERT(dst.typeReg() != scratch);
+ MOZ_ASSERT(dst.payloadReg() != scratch);
+#else
+ MOZ_ASSERT(dst.valueReg() != scratch);
+#endif
+
+ // Not actually the value we're passing, but we've no way of
+ // decoding anything better.
+ GenPrintPtr(DebugChannel::Import, masm, src);
+
+ Label notNull, mustUnbox, done;
+ masm.branchTestPtr(Assembler::NonZero, src, src, &notNull);
+ masm.moveValue(NullValue(), dst);
+ masm.jump(&done);
+
+ masm.bind(&notNull);
+ // The type test will clear src if the test fails, so store early.
+ masm.moveValue(TypedOrValueRegister(MIRType::Object, AnyRegister(src)), dst);
+ // Spectre mitigations: see comment above about efficiency.
+ masm.branchTestObjClass(Assembler::Equal, src,
+ Address(instance, Instance::offsetOfValueBoxClass()),
+ scratch, src, &mustUnbox);
+ masm.jump(&done);
+
+ masm.bind(&mustUnbox);
+ masm.loadValue(Address(src, WasmValueBox::offsetOfValue()), dst);
+
+ masm.bind(&done);
+}
+
+// Box the Value in src as an anyref in dest. src and dest must not overlap.
+// See the definition of AnyRef for a discussion of pointer representation.
+static void BoxValueIntoAnyref(MacroAssembler& masm, ValueOperand src,
+ Register dest, Label* oolConvert) {
+ Label nullValue, objectValue, done;
+ {
+ ScratchTagScope tag(masm, src);
+ masm.splitTagForTest(src, tag);
+ masm.branchTestObject(Assembler::Equal, tag, &objectValue);
+ masm.branchTestNull(Assembler::Equal, tag, &nullValue);
+ masm.jump(oolConvert);
+ }
+
+ masm.bind(&nullValue);
+ masm.xorPtr(dest, dest);
+ masm.jump(&done);
+
+ masm.bind(&objectValue);
+ masm.unboxObject(src, dest);
+
+ masm.bind(&done);
+}
+
+// Generate a stub that enters wasm from a C++ caller via the native ABI. The
+// signature of the entry point is Module::ExportFuncPtr. The exported wasm
+// function has an ABI derived from its specific signature, so this function
+// must map from the ABI of ExportFuncPtr to the export's signature's ABI.
+static bool GenerateInterpEntry(MacroAssembler& masm, const FuncExport& fe,
+ const FuncType& funcType,
+ const Maybe<ImmPtr>& funcPtr,
+ Offsets* offsets) {
+ AutoCreatedBy acb(masm, "GenerateInterpEntry");
+
+ AssertExpectedSP(masm);
+ masm.haltingAlign(CodeAlignment);
+
+ offsets->begin = masm.currentOffset();
+
+ // Save the return address if it wasn't already saved by the call insn.
+#ifdef JS_USE_LINK_REGISTER
+# if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
+ masm.pushReturnAddress();
+# elif defined(JS_CODEGEN_ARM64)
+ // WasmPush updates framePushed() unlike pushReturnAddress(), but that's
+ // cancelled by the setFramePushed() below.
+ WasmPush(masm, lr);
+# else
+ MOZ_CRASH("Implement this");
+# endif
+#endif
+
+ // Save all caller non-volatile registers before we clobber them here and in
+ // the wasm callee (which does not preserve non-volatile registers).
+ masm.setFramePushed(0);
+ masm.PushRegsInMask(NonVolatileRegs);
+
+ const unsigned nonVolatileRegsPushSize =
+ masm.PushRegsInMaskSizeInBytes(NonVolatileRegs);
+
+ MOZ_ASSERT(masm.framePushed() == nonVolatileRegsPushSize);
+
+ // Put the 'argv' argument into a non-argument/return/instance register so
+ // that we can use 'argv' while we fill in the arguments for the wasm callee.
+ // Use a second non-argument/return register as temporary scratch.
+ Register argv = ABINonArgReturnReg0;
+ Register scratch = ABINonArgReturnReg1;
+
+ // scratch := SP
+ masm.moveStackPtrTo(scratch);
+
+ // Dynamically align the stack since ABIStackAlignment is not necessarily
+ // WasmStackAlignment. Preserve SP so it can be restored after the call.
+#ifdef JS_CODEGEN_ARM64
+ static_assert(WasmStackAlignment == 16, "ARM64 SP alignment");
+#else
+ masm.andToStackPtr(Imm32(~(WasmStackAlignment - 1)));
+#endif
+ masm.assertStackAlignment(WasmStackAlignment);
+
+ // Create a fake frame: just previous RA and an FP.
+ const size_t FakeFrameSize = 2 * sizeof(void*);
+#ifdef JS_CODEGEN_ARM64
+ masm.Ldr(ARMRegister(ABINonArgReturnReg0, 64),
+ MemOperand(ARMRegister(scratch, 64), nonVolatileRegsPushSize));
+#else
+ masm.Push(Address(scratch, nonVolatileRegsPushSize));
+#endif
+ // Store fake wasm register state. Ensure the frame pointer passed by the C++
+ // caller doesn't have the ExitFPTag bit set to not confuse frame iterators.
+ // This bit shouldn't be set if C++ code is using frame pointers, so this has
+ // no effect on native stack unwinders.
+ masm.andPtr(Imm32(int32_t(~ExitFPTag)), FramePointer);
+#ifdef JS_CODEGEN_ARM64
+ masm.asVIXL().Push(ARMRegister(ABINonArgReturnReg0, 64),
+ ARMRegister(FramePointer, 64));
+ masm.moveStackPtrTo(FramePointer);
+#else
+ masm.Push(FramePointer);
+#endif
+
+ masm.moveStackPtrTo(FramePointer);
+ masm.setFramePushed(FakeFrameSize);
+#ifdef JS_CODEGEN_ARM64
+ const size_t FakeFramePushed = 0;
+#else
+ const size_t FakeFramePushed = sizeof(void*);
+ masm.Push(scratch);
+#endif
+
+ // Read the arguments of wasm::ExportFuncPtr according to the native ABI.
+ // The entry stub's frame is 1 word.
+ const unsigned argBase = sizeof(void*) + nonVolatileRegsPushSize;
+ ABIArgGenerator abi;
+ ABIArg arg;
+
+ // arg 1: ExportArg*
+ arg = abi.next(MIRType::Pointer);
+ if (arg.kind() == ABIArg::GPR) {
+ masm.movePtr(arg.gpr(), argv);
+ } else {
+ masm.loadPtr(Address(scratch, argBase + arg.offsetFromArgBase()), argv);
+ }
+
+ // Arg 2: Instance*
+ arg = abi.next(MIRType::Pointer);
+ if (arg.kind() == ABIArg::GPR) {
+ masm.movePtr(arg.gpr(), InstanceReg);
+ } else {
+ masm.loadPtr(Address(scratch, argBase + arg.offsetFromArgBase()),
+ InstanceReg);
+ }
+
+ WasmPush(masm, InstanceReg);
+
+ // Save 'argv' on the stack so that we can recover it after the call.
+ WasmPush(masm, argv);
+
+ MOZ_ASSERT(masm.framePushed() ==
+ NumExtraPushed * WasmPushSize + FakeFrameSize + FakeFramePushed);
+
+ // Reserve stack space for the wasm call.
+ unsigned argDecrement =
+ StackDecrementForCall(WasmStackAlignment, masm.framePushed(),
+ StackArgBytesForWasmABI(funcType));
+ masm.reserveStack(argDecrement);
+
+ // Copy parameters out of argv and into the wasm ABI registers/stack-slots.
+ SetupABIArguments(masm, fe, funcType, argv, scratch);
+
+ masm.loadWasmPinnedRegsFromInstance();
+
+ masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
+ WasmCalleeInstanceOffsetBeforeCall));
+
+ // Call into the real function. Note that, due to the throw stub, fp, instance
+ // and pinned registers may be clobbered.
+ masm.assertStackAlignment(WasmStackAlignment);
+ CallFuncExport(masm, fe, funcPtr);
+ masm.assertStackAlignment(WasmStackAlignment);
+
+ // Set the return value based on whether InstanceReg is the FailInstanceReg
+ // magic value (set by the throw stub).
+ Label success, join;
+ masm.branchPtr(Assembler::NotEqual, InstanceReg, Imm32(FailInstanceReg),
+ &success);
+ masm.move32(Imm32(false), scratch);
+ masm.jump(&join);
+ masm.bind(&success);
+ masm.move32(Imm32(true), scratch);
+ masm.bind(&join);
+
+ // Pop the arguments pushed after the dynamic alignment.
+ masm.freeStack(argDecrement);
+
+ masm.setFramePushed(NumExtraPushed * WasmPushSize + FakeFrameSize +
+ FakeFramePushed);
+
+ // Recover the 'argv' pointer which was saved before aligning the stack.
+ WasmPop(masm, argv);
+
+ WasmPop(masm, InstanceReg);
+
+ // Pop the stack pointer to its value right before dynamic alignment.
+#ifdef JS_CODEGEN_ARM64
+ static_assert(WasmStackAlignment == 16, "ARM64 SP alignment");
+ masm.freeStack(FakeFrameSize);
+#else
+ masm.PopStackPtr();
+#endif
+
+ // Store the register result, if any, in argv[0].
+ // No widening is required, as the value leaves ReturnReg.
+ StoreRegisterResult(masm, fe, funcType, argv);
+
+ masm.move32(scratch, ReturnReg);
+
+ // Restore clobbered non-volatile registers of the caller.
+ masm.setFramePushed(nonVolatileRegsPushSize);
+ masm.PopRegsInMask(NonVolatileRegs);
+ MOZ_ASSERT(masm.framePushed() == 0);
+
+#if defined(JS_CODEGEN_ARM64)
+ masm.setFramePushed(WasmPushSize);
+ WasmPop(masm, lr);
+ masm.abiret();
+#else
+ masm.ret();
+#endif
+
+ return FinishOffsets(masm, offsets);
+}
+
+#ifdef JS_PUNBOX64
+static const ValueOperand ScratchValIonEntry = ValueOperand(ABINonArgReg0);
+#else
+static const ValueOperand ScratchValIonEntry =
+ ValueOperand(ABINonArgReg0, ABINonArgReg1);
+#endif
+static const Register ScratchIonEntry = ABINonArgReg2;
+
+static void CallSymbolicAddress(MacroAssembler& masm, bool isAbsolute,
+ SymbolicAddress sym) {
+ if (isAbsolute) {
+ masm.call(ImmPtr(SymbolicAddressTarget(sym), ImmPtr::NoCheckToken()));
+ } else {
+ masm.call(sym);
+ }
+}
+
+// Load instance's instance from the callee.
+static void GenerateJitEntryLoadInstance(MacroAssembler& masm) {
+ // ScratchIonEntry := callee => JSFunction*
+ unsigned offset = JitFrameLayout::offsetOfCalleeToken();
+ masm.loadFunctionFromCalleeToken(Address(FramePointer, offset),
+ ScratchIonEntry);
+
+ // ScratchIonEntry := callee->getExtendedSlot(WASM_INSTANCE_SLOT)->toPrivate()
+ // => Instance*
+ offset = FunctionExtended::offsetOfExtendedSlot(
+ FunctionExtended::WASM_INSTANCE_SLOT);
+ masm.loadPrivate(Address(ScratchIonEntry, offset), InstanceReg);
+}
+
+// Creates a JS fake exit frame for wasm, so the frame iterators just use
+// JSJit frame iteration.
+static void GenerateJitEntryThrow(MacroAssembler& masm, unsigned frameSize) {
+ AssertExpectedSP(masm);
+
+ MOZ_ASSERT(masm.framePushed() == frameSize);
+
+ masm.freeStack(frameSize);
+ MoveSPForJitABI(masm);
+
+ GenerateJitEntryLoadInstance(masm);
+
+ masm.loadPtr(Address(InstanceReg, Instance::offsetOfCx()), ScratchIonEntry);
+ masm.enterFakeExitFrameForWasm(ScratchIonEntry, ScratchIonEntry,
+ ExitFrameType::WasmGenericJitEntry);
+
+ masm.loadPtr(Address(InstanceReg, Instance::offsetOfJSJitExceptionHandler()),
+ ScratchIonEntry);
+ masm.jump(ScratchIonEntry);
+}
+
+// Helper function for allocating a BigInt and initializing it from an I64 in
+// GenerateJitEntry. The return result is written to scratch.
+//
+// Note that this will create a new frame and must not - in its current form -
+// be called from a context where there is already another stub frame on the
+// stack, as that confuses unwinding during profiling. This was a problem for
+// its use from GenerateImportJitExit, see bug 1754258. Therefore,
+// FuncType::canHaveJitExit prevents the present function from being called for
+// exits.
+static void GenerateBigIntInitialization(MacroAssembler& masm,
+ unsigned bytesPushedByPrologue,
+ Register64 input, Register scratch,
+ const FuncExport& fe, Label* fail) {
+#if JS_BITS_PER_WORD == 32
+ MOZ_ASSERT(input.low != scratch);
+ MOZ_ASSERT(input.high != scratch);
+#else
+ MOZ_ASSERT(input.reg != scratch);
+#endif
+
+ // We need to avoid clobbering other argument registers and the input.
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ LiveRegisterSet save(regs.asLiveSet());
+ masm.PushRegsInMask(save);
+
+ unsigned frameSize = StackDecrementForCall(
+ ABIStackAlignment, masm.framePushed() + bytesPushedByPrologue, 0);
+ masm.reserveStack(frameSize);
+ masm.assertStackAlignment(ABIStackAlignment);
+
+ CallSymbolicAddress(masm, !fe.hasEagerStubs(),
+ SymbolicAddress::AllocateBigInt);
+ masm.storeCallPointerResult(scratch);
+
+ masm.assertStackAlignment(ABIStackAlignment);
+ masm.freeStack(frameSize);
+
+ LiveRegisterSet ignore;
+ ignore.add(scratch);
+ masm.PopRegsInMaskIgnore(save, ignore);
+
+ masm.branchTest32(Assembler::Zero, scratch, scratch, fail);
+ masm.initializeBigInt64(Scalar::BigInt64, scratch, input);
+}
+
+// Generate a stub that enters wasm from a jit code caller via the jit ABI.
+//
+// ARM64 note: This does not save the PseudoStackPointer so we must be sure to
+// recompute it on every return path, be it normal return or exception return.
+// The JIT code we return to assumes it is correct.
+
+static bool GenerateJitEntry(MacroAssembler& masm, size_t funcExportIndex,
+ const FuncExport& fe, const FuncType& funcType,
+ const Maybe<ImmPtr>& funcPtr,
+ CallableOffsets* offsets) {
+ AutoCreatedBy acb(masm, "GenerateJitEntry");
+
+ AssertExpectedSP(masm);
+
+ RegisterOrSP sp = masm.getStackPointer();
+
+ GenerateJitEntryPrologue(masm, offsets);
+
+ // The jit caller has set up the following stack layout (sp grows to the
+ // left):
+ // <-- retAddr | descriptor | callee | argc | this | arg1..N
+ //
+ // GenerateJitEntryPrologue has additionally pushed the caller's frame
+ // pointer. The stack pointer is now JitStackAlignment-aligned.
+
+ MOZ_ASSERT(masm.framePushed() == 0);
+
+ unsigned normalBytesNeeded = StackArgBytesForWasmABI(funcType);
+
+ MIRTypeVector coerceArgTypes;
+ MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Int32));
+ MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
+ MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
+ unsigned oolBytesNeeded = StackArgBytesForWasmABI(coerceArgTypes);
+
+ unsigned bytesNeeded = std::max(normalBytesNeeded, oolBytesNeeded);
+
+ // Note the jit caller ensures the stack is aligned *after* the call
+ // instruction.
+ unsigned frameSizeExclFP = StackDecrementForCall(
+ WasmStackAlignment, masm.framePushed(), bytesNeeded);
+
+ // Reserve stack space for wasm ABI arguments, set up like this:
+ // <-- ABI args | padding
+ masm.reserveStack(frameSizeExclFP);
+
+ uint32_t frameSize = masm.framePushed();
+
+ GenerateJitEntryLoadInstance(masm);
+
+ if (funcType.hasUnexposableArgOrRet()) {
+ CallSymbolicAddress(masm, !fe.hasEagerStubs(),
+ SymbolicAddress::ReportV128JSCall);
+ GenerateJitEntryThrow(masm, frameSize);
+ return FinishOffsets(masm, offsets);
+ }
+
+ FloatRegister scratchF = ABINonArgDoubleReg;
+ Register scratchG = ScratchIonEntry;
+ ValueOperand scratchV = ScratchValIonEntry;
+
+ GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; arguments ",
+ fe.funcIndex());
+
+ // We do two loops:
+ // - one loop up-front will make sure that all the Value tags fit the
+ // expected signature argument types. If at least one inline conversion
+ // fails, we just jump to the OOL path which will call into C++. Inline
+ // conversions are ordered in the way we expect them to happen the most.
+ // - the second loop will unbox the arguments into the right registers.
+ Label oolCall;
+ for (size_t i = 0; i < funcType.args().length(); i++) {
+ Address jitArgAddr(FramePointer, JitFrameLayout::offsetOfActualArg(i));
+ masm.loadValue(jitArgAddr, scratchV);
+
+ Label next;
+ switch (funcType.args()[i].kind()) {
+ case ValType::I32: {
+ ScratchTagScope tag(masm, scratchV);
+ masm.splitTagForTest(scratchV, tag);
+
+ // For int32 inputs, just skip.
+ masm.branchTestInt32(Assembler::Equal, tag, &next);
+
+ // For double inputs, unbox, truncate and store back.
+ Label storeBack, notDouble;
+ masm.branchTestDouble(Assembler::NotEqual, tag, &notDouble);
+ {
+ ScratchTagScopeRelease _(&tag);
+ masm.unboxDouble(scratchV, scratchF);
+ masm.branchTruncateDoubleMaybeModUint32(scratchF, scratchG, &oolCall);
+ masm.jump(&storeBack);
+ }
+ masm.bind(&notDouble);
+
+ // For null or undefined, store 0.
+ Label nullOrUndefined, notNullOrUndefined;
+ masm.branchTestUndefined(Assembler::Equal, tag, &nullOrUndefined);
+ masm.branchTestNull(Assembler::NotEqual, tag, &notNullOrUndefined);
+ masm.bind(&nullOrUndefined);
+ {
+ ScratchTagScopeRelease _(&tag);
+ masm.storeValue(Int32Value(0), jitArgAddr);
+ }
+ masm.jump(&next);
+ masm.bind(&notNullOrUndefined);
+
+ // For booleans, store the number value back. Other types (symbol,
+ // object, strings) go to the C++ call.
+ masm.branchTestBoolean(Assembler::NotEqual, tag, &oolCall);
+ masm.unboxBoolean(scratchV, scratchG);
+ // fallthrough:
+
+ masm.bind(&storeBack);
+ {
+ ScratchTagScopeRelease _(&tag);
+ masm.storeValue(JSVAL_TYPE_INT32, scratchG, jitArgAddr);
+ }
+ break;
+ }
+ case ValType::I64: {
+ ScratchTagScope tag(masm, scratchV);
+ masm.splitTagForTest(scratchV, tag);
+
+ // For BigInt inputs, just skip. Otherwise go to C++ for other
+ // types that require creating a new BigInt or erroring.
+ masm.branchTestBigInt(Assembler::NotEqual, tag, &oolCall);
+ masm.jump(&next);
+ break;
+ }
+ case ValType::F32:
+ case ValType::F64: {
+ // Note we can reuse the same code for f32/f64 here, since for the
+ // case of f32, the conversion of f64 to f32 will happen in the
+ // second loop.
+ ScratchTagScope tag(masm, scratchV);
+ masm.splitTagForTest(scratchV, tag);
+
+ // For double inputs, just skip.
+ masm.branchTestDouble(Assembler::Equal, tag, &next);
+
+ // For int32 inputs, convert and rebox.
+ Label storeBack, notInt32;
+ {
+ ScratchTagScopeRelease _(&tag);
+ masm.branchTestInt32(Assembler::NotEqual, scratchV, &notInt32);
+ masm.int32ValueToDouble(scratchV, scratchF);
+ masm.jump(&storeBack);
+ }
+ masm.bind(&notInt32);
+
+ // For undefined (missing argument), store NaN.
+ Label notUndefined;
+ masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
+ {
+ ScratchTagScopeRelease _(&tag);
+ masm.storeValue(DoubleValue(JS::GenericNaN()), jitArgAddr);
+ masm.jump(&next);
+ }
+ masm.bind(&notUndefined);
+
+ // +null is 0.
+ Label notNull;
+ masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
+ {
+ ScratchTagScopeRelease _(&tag);
+ masm.storeValue(DoubleValue(0.), jitArgAddr);
+ }
+ masm.jump(&next);
+ masm.bind(&notNull);
+
+ // For booleans, store the number value back. Other types (symbol,
+ // object, strings) go to the C++ call.
+ masm.branchTestBoolean(Assembler::NotEqual, tag, &oolCall);
+ masm.boolValueToDouble(scratchV, scratchF);
+ // fallthrough:
+
+ masm.bind(&storeBack);
+ {
+ ScratchTagScopeRelease _(&tag);
+ masm.boxDouble(scratchF, jitArgAddr);
+ }
+ break;
+ }
+ case ValType::Ref: {
+ // Guarded against by temporarilyUnsupportedReftypeForEntry()
+ MOZ_RELEASE_ASSERT(funcType.args()[i].refType().isExtern());
+ ScratchTagScope tag(masm, scratchV);
+ masm.splitTagForTest(scratchV, tag);
+
+ // For object inputs, we handle object and null inline, everything
+ // else requires an actual box and we go out of line to allocate
+ // that.
+ masm.branchTestObject(Assembler::Equal, tag, &next);
+ masm.branchTestNull(Assembler::Equal, tag, &next);
+ masm.jump(&oolCall);
+ break;
+ }
+ case ValType::V128: {
+ // Guarded against by hasUnexposableArgOrRet()
+ MOZ_CRASH("unexpected argument type when calling from the jit");
+ }
+ default: {
+ MOZ_CRASH("unexpected argument type when calling from the jit");
+ }
+ }
+ masm.nopAlign(CodeAlignment);
+ masm.bind(&next);
+ }
+
+ Label rejoinBeforeCall;
+ masm.bind(&rejoinBeforeCall);
+
+ // Convert all the expected values to unboxed values on the stack.
+ ArgTypeVector args(funcType);
+ for (WasmABIArgIter iter(args); !iter.done(); iter++) {
+ Address argv(FramePointer, JitFrameLayout::offsetOfActualArg(iter.index()));
+ bool isStackArg = iter->kind() == ABIArg::Stack;
+ switch (iter.mirType()) {
+ case MIRType::Int32: {
+ Register target = isStackArg ? ScratchIonEntry : iter->gpr();
+ masm.unboxInt32(argv, target);
+ GenPrintIsize(DebugChannel::Function, masm, target);
+ if (isStackArg) {
+ masm.storePtr(target, Address(sp, iter->offsetFromArgBase()));
+ }
+ break;
+ }
+ case MIRType::Int64: {
+ // The coercion has provided a BigInt value by this point, which
+ // we need to convert to an I64 here.
+ if (isStackArg) {
+ Address dst(sp, iter->offsetFromArgBase());
+ Register src = scratchV.payloadOrValueReg();
+#if JS_BITS_PER_WORD == 64
+ Register64 scratch64(scratchG);
+#else
+ Register64 scratch64(scratchG, ABINonArgReg3);
+#endif
+ masm.unboxBigInt(argv, src);
+ masm.loadBigInt64(src, scratch64);
+ GenPrintI64(DebugChannel::Function, masm, scratch64);
+ masm.store64(scratch64, dst);
+ } else {
+ Register src = scratchG;
+ Register64 target = iter->gpr64();
+ masm.unboxBigInt(argv, src);
+ masm.loadBigInt64(src, target);
+ GenPrintI64(DebugChannel::Function, masm, target);
+ }
+ break;
+ }
+ case MIRType::Float32: {
+ FloatRegister target = isStackArg ? ABINonArgDoubleReg : iter->fpu();
+ masm.unboxDouble(argv, ABINonArgDoubleReg);
+ masm.convertDoubleToFloat32(ABINonArgDoubleReg, target);
+ GenPrintF32(DebugChannel::Function, masm, target.asSingle());
+ if (isStackArg) {
+ masm.storeFloat32(target, Address(sp, iter->offsetFromArgBase()));
+ }
+ break;
+ }
+ case MIRType::Double: {
+ FloatRegister target = isStackArg ? ABINonArgDoubleReg : iter->fpu();
+ masm.unboxDouble(argv, target);
+ GenPrintF64(DebugChannel::Function, masm, target);
+ if (isStackArg) {
+ masm.storeDouble(target, Address(sp, iter->offsetFromArgBase()));
+ }
+ break;
+ }
+ case MIRType::RefOrNull: {
+ Register target = isStackArg ? ScratchIonEntry : iter->gpr();
+ masm.unboxObjectOrNull(argv, target);
+ GenPrintPtr(DebugChannel::Function, masm, target);
+ if (isStackArg) {
+ masm.storePtr(target, Address(sp, iter->offsetFromArgBase()));
+ }
+ break;
+ }
+ default: {
+ MOZ_CRASH("unexpected input argument when calling from jit");
+ }
+ }
+ }
+
+ GenPrintf(DebugChannel::Function, masm, "\n");
+
+ // Setup wasm register state.
+ masm.loadWasmPinnedRegsFromInstance();
+
+ masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
+ WasmCalleeInstanceOffsetBeforeCall));
+
+ // Call into the real function. Note that, due to the throw stub, instance
+ // and pinned registers may be clobbered.
+ masm.assertStackAlignment(WasmStackAlignment);
+ CallFuncExport(masm, fe, funcPtr);
+ masm.assertStackAlignment(WasmStackAlignment);
+
+ // If InstanceReg is equal to the FailInstanceReg magic value (set by the
+ // throw stub), then report the exception to the JIT caller by jumping into
+ // the exception stub.
+ Label exception;
+ masm.branchPtr(Assembler::Equal, InstanceReg, Imm32(FailInstanceReg),
+ &exception);
+
+ // Pop arguments.
+ masm.freeStack(frameSizeExclFP);
+
+ GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; returns ",
+ fe.funcIndex());
+
+ // Store the return value in the JSReturnOperand.
+ const ValTypeVector& results = funcType.results();
+ if (results.length() == 0) {
+ GenPrintf(DebugChannel::Function, masm, "void");
+ masm.moveValue(UndefinedValue(), JSReturnOperand);
+ } else {
+ MOZ_ASSERT(results.length() == 1, "multi-value return to JS unimplemented");
+ switch (results[0].kind()) {
+ case ValType::I32:
+ GenPrintIsize(DebugChannel::Function, masm, ReturnReg);
+ // No widening is required, as the value is boxed.
+ masm.boxNonDouble(JSVAL_TYPE_INT32, ReturnReg, JSReturnOperand);
+ break;
+ case ValType::F32: {
+ masm.canonicalizeFloat(ReturnFloat32Reg);
+ masm.convertFloat32ToDouble(ReturnFloat32Reg, ReturnDoubleReg);
+ GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
+ ScratchDoubleScope fpscratch(masm);
+ masm.boxDouble(ReturnDoubleReg, JSReturnOperand, fpscratch);
+ break;
+ }
+ case ValType::F64: {
+ masm.canonicalizeDouble(ReturnDoubleReg);
+ GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
+ ScratchDoubleScope fpscratch(masm);
+ masm.boxDouble(ReturnDoubleReg, JSReturnOperand, fpscratch);
+ break;
+ }
+ case ValType::I64: {
+ Label fail, done;
+ GenPrintI64(DebugChannel::Function, masm, ReturnReg64);
+ GenerateBigIntInitialization(masm, 0, ReturnReg64, scratchG, fe, &fail);
+ masm.boxNonDouble(JSVAL_TYPE_BIGINT, scratchG, JSReturnOperand);
+ masm.jump(&done);
+ masm.bind(&fail);
+ // Fixup the stack for the exception tail so that we can share it.
+ masm.reserveStack(frameSizeExclFP);
+ masm.jump(&exception);
+ masm.bind(&done);
+ // Un-fixup the stack for the benefit of the assertion below.
+ masm.setFramePushed(0);
+ break;
+ }
+ case ValType::V128: {
+ MOZ_CRASH("unexpected return type when calling from ion to wasm");
+ }
+ case ValType::Ref: {
+ STATIC_ASSERT_ANYREF_IS_JSOBJECT;
+ // Per comment above, the call may have clobbered the instance
+ // register, so reload since unboxing will need it.
+ GenerateJitEntryLoadInstance(masm);
+ UnboxAnyrefIntoValueReg(masm, InstanceReg, ReturnReg, JSReturnOperand,
+ WasmJitEntryReturnScratch);
+ break;
+ }
+ }
+ }
+
+ GenPrintf(DebugChannel::Function, masm, "\n");
+
+ MOZ_ASSERT(masm.framePushed() == 0);
+
+ AssertExpectedSP(masm);
+ GenerateJitEntryEpilogue(masm, offsets);
+ MOZ_ASSERT(masm.framePushed() == 0);
+
+ // Generate an OOL call to the C++ conversion path.
+ if (funcType.args().length()) {
+ masm.bind(&oolCall);
+ masm.setFramePushed(frameSize);
+
+ // Baseline and Ion call C++ runtime via BuiltinThunk with wasm abi, so to
+ // unify the BuiltinThunk's interface we call it here with wasm abi.
+ jit::WasmABIArgIter<MIRTypeVector> argsIter(coerceArgTypes);
+
+ // argument 0: function export index.
+ if (argsIter->kind() == ABIArg::GPR) {
+ masm.movePtr(ImmWord(funcExportIndex), argsIter->gpr());
+ } else {
+ masm.storePtr(ImmWord(funcExportIndex),
+ Address(sp, argsIter->offsetFromArgBase()));
+ }
+ argsIter++;
+
+ // argument 1: instance
+ if (argsIter->kind() == ABIArg::GPR) {
+ masm.movePtr(InstanceReg, argsIter->gpr());
+ } else {
+ masm.storePtr(InstanceReg, Address(sp, argsIter->offsetFromArgBase()));
+ }
+ argsIter++;
+
+ // argument 2: effective address of start of argv
+ Address argv(FramePointer, JitFrameLayout::offsetOfActualArgs());
+ if (argsIter->kind() == ABIArg::GPR) {
+ masm.computeEffectiveAddress(argv, argsIter->gpr());
+ } else {
+ masm.computeEffectiveAddress(argv, ScratchIonEntry);
+ masm.storePtr(ScratchIonEntry,
+ Address(sp, argsIter->offsetFromArgBase()));
+ }
+ argsIter++;
+ MOZ_ASSERT(argsIter.done());
+
+ masm.assertStackAlignment(ABIStackAlignment);
+ CallSymbolicAddress(masm, !fe.hasEagerStubs(),
+ SymbolicAddress::CoerceInPlace_JitEntry);
+ masm.assertStackAlignment(ABIStackAlignment);
+
+ // No widening is required, as the return value is used as a bool.
+ masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg,
+ &rejoinBeforeCall);
+ }
+
+ // Prepare to throw: reload InstanceReg from the frame.
+ masm.bind(&exception);
+ masm.setFramePushed(frameSize);
+ GenerateJitEntryThrow(masm, frameSize);
+
+ return FinishOffsets(masm, offsets);
+}
+
+void wasm::GenerateDirectCallFromJit(MacroAssembler& masm, const FuncExport& fe,
+ const Instance& inst,
+ const JitCallStackArgVector& stackArgs,
+ Register scratch, uint32_t* callOffset) {
+ MOZ_ASSERT(!IsCompilingWasm());
+
+ const FuncType& funcType = inst.metadata().getFuncExportType(fe);
+
+ size_t framePushedAtStart = masm.framePushed();
+
+ // Note, if code here pushes a reference value into the frame for its own
+ // purposes (and not just as an argument to the callee) then the frame must be
+ // traced in TraceJitExitFrame, see the case there for DirectWasmJitCall. The
+ // callee will trace values that are pushed as arguments, however.
+
+ // Push a special frame descriptor that indicates the frame size so we can
+ // directly iterate from the current JIT frame without an extra call.
+ // Note: buildFakeExitFrame pushes an ExitFrameLayout containing the current
+ // frame pointer. We also use this to restore the frame pointer after the
+ // call.
+ *callOffset = masm.buildFakeExitFrame(scratch);
+ // FP := ExitFrameLayout*
+ masm.moveStackPtrTo(FramePointer);
+ masm.loadJSContext(scratch);
+ masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::DirectWasmJitCall);
+
+ // Move stack arguments to their final locations.
+ unsigned bytesNeeded = StackArgBytesForWasmABI(funcType);
+ bytesNeeded = StackDecrementForCall(WasmStackAlignment, masm.framePushed(),
+ bytesNeeded);
+ if (bytesNeeded) {
+ masm.reserveStack(bytesNeeded);
+ }
+
+ GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; arguments ",
+ fe.funcIndex());
+
+ ArgTypeVector args(funcType);
+ for (WasmABIArgIter iter(args); !iter.done(); iter++) {
+ MOZ_ASSERT_IF(iter->kind() == ABIArg::GPR, iter->gpr() != scratch);
+ MOZ_ASSERT_IF(iter->kind() == ABIArg::GPR, iter->gpr() != FramePointer);
+ if (iter->kind() != ABIArg::Stack) {
+ switch (iter.mirType()) {
+ case MIRType::Int32:
+ GenPrintIsize(DebugChannel::Function, masm, iter->gpr());
+ break;
+ case MIRType::Int64:
+ GenPrintI64(DebugChannel::Function, masm, iter->gpr64());
+ break;
+ case MIRType::Float32:
+ GenPrintF32(DebugChannel::Function, masm, iter->fpu());
+ break;
+ case MIRType::Double:
+ GenPrintF64(DebugChannel::Function, masm, iter->fpu());
+ break;
+ case MIRType::RefOrNull:
+ GenPrintPtr(DebugChannel::Function, masm, iter->gpr());
+ break;
+ case MIRType::StackResults:
+ MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
+ GenPrintPtr(DebugChannel::Function, masm, iter->gpr());
+ break;
+ default:
+ MOZ_CRASH("ion to wasm fast path can only handle i32/f32/f64");
+ }
+ continue;
+ }
+
+ Address dst(masm.getStackPointer(), iter->offsetFromArgBase());
+
+ const JitCallStackArg& stackArg = stackArgs[iter.index()];
+ switch (stackArg.tag()) {
+ case JitCallStackArg::Tag::Imm32:
+ GenPrintf(DebugChannel::Function, masm, "%d ", stackArg.imm32());
+ masm.storePtr(ImmWord(stackArg.imm32()), dst);
+ break;
+ case JitCallStackArg::Tag::GPR:
+ MOZ_ASSERT(stackArg.gpr() != scratch);
+ MOZ_ASSERT(stackArg.gpr() != FramePointer);
+ GenPrintIsize(DebugChannel::Function, masm, stackArg.gpr());
+ masm.storePtr(stackArg.gpr(), dst);
+ break;
+ case JitCallStackArg::Tag::FPU:
+ switch (iter.mirType()) {
+ case MIRType::Double:
+ GenPrintF64(DebugChannel::Function, masm, stackArg.fpu());
+ masm.storeDouble(stackArg.fpu(), dst);
+ break;
+ case MIRType::Float32:
+ GenPrintF32(DebugChannel::Function, masm, stackArg.fpu());
+ masm.storeFloat32(stackArg.fpu(), dst);
+ break;
+ default:
+ MOZ_CRASH(
+ "unexpected MIR type for a float register in wasm fast call");
+ }
+ break;
+ case JitCallStackArg::Tag::Address: {
+ // The address offsets were valid *before* we pushed our frame.
+ Address src = stackArg.addr();
+ MOZ_ASSERT(src.base == masm.getStackPointer());
+ src.offset += masm.framePushed() - framePushedAtStart;
+ switch (iter.mirType()) {
+ case MIRType::Double: {
+ ScratchDoubleScope fpscratch(masm);
+ GenPrintF64(DebugChannel::Function, masm, fpscratch);
+ masm.loadDouble(src, fpscratch);
+ masm.storeDouble(fpscratch, dst);
+ break;
+ }
+ case MIRType::Float32: {
+ ScratchFloat32Scope fpscratch(masm);
+ masm.loadFloat32(src, fpscratch);
+ GenPrintF32(DebugChannel::Function, masm, fpscratch);
+ masm.storeFloat32(fpscratch, dst);
+ break;
+ }
+ case MIRType::Int32: {
+ masm.loadPtr(src, scratch);
+ GenPrintIsize(DebugChannel::Function, masm, scratch);
+ masm.storePtr(scratch, dst);
+ break;
+ }
+ case MIRType::RefOrNull: {
+ masm.loadPtr(src, scratch);
+ GenPrintPtr(DebugChannel::Function, masm, scratch);
+ masm.storePtr(scratch, dst);
+ break;
+ }
+ case MIRType::StackResults: {
+ MOZ_CRASH("multi-value in ion to wasm fast path unimplemented");
+ }
+ default: {
+ MOZ_CRASH("unexpected MIR type for a stack slot in wasm fast call");
+ }
+ }
+ break;
+ }
+ case JitCallStackArg::Tag::Undefined: {
+ MOZ_CRASH("can't happen because of arg.kind() check");
+ }
+ }
+ }
+
+ GenPrintf(DebugChannel::Function, masm, "\n");
+
+ // Load instance; from now on, InstanceReg is live.
+ masm.movePtr(ImmPtr(&inst), InstanceReg);
+ masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
+ WasmCalleeInstanceOffsetBeforeCall));
+ masm.loadWasmPinnedRegsFromInstance();
+
+ // Actual call.
+ const CodeTier& codeTier = inst.code().codeTier(inst.code().bestTier());
+ const MetadataTier& metadata = codeTier.metadata();
+ const CodeRange& codeRange = metadata.codeRange(fe);
+ void* callee = codeTier.segment().base() + codeRange.funcUncheckedCallEntry();
+
+ masm.assertStackAlignment(WasmStackAlignment);
+ MoveSPForJitABI(masm);
+ masm.callJit(ImmPtr(callee));
+#ifdef JS_CODEGEN_ARM64
+ // WASM does not always keep PSP in sync with SP. So reinitialize it as it
+ // might be clobbered either by WASM or by any C++ calls within.
+ masm.initPseudoStackPtr();
+#endif
+ masm.assertStackAlignment(WasmStackAlignment);
+
+ masm.branchPtr(Assembler::Equal, InstanceReg, Imm32(wasm::FailInstanceReg),
+ masm.exceptionLabel());
+
+ // Store the return value in the appropriate place.
+ GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; returns ",
+ fe.funcIndex());
+ const ValTypeVector& results = funcType.results();
+ if (results.length() == 0) {
+ masm.moveValue(UndefinedValue(), JSReturnOperand);
+ GenPrintf(DebugChannel::Function, masm, "void");
+ } else {
+ MOZ_ASSERT(results.length() == 1, "multi-value return to JS unimplemented");
+ switch (results[0].kind()) {
+ case wasm::ValType::I32:
+ // The return value is in ReturnReg, which is what Ion expects.
+ GenPrintIsize(DebugChannel::Function, masm, ReturnReg);
+#ifdef JS_64BIT
+ masm.widenInt32(ReturnReg);
+#endif
+ break;
+ case wasm::ValType::I64:
+ // The return value is in ReturnReg64, which is what Ion expects.
+ GenPrintI64(DebugChannel::Function, masm, ReturnReg64);
+ break;
+ case wasm::ValType::F32:
+ masm.canonicalizeFloat(ReturnFloat32Reg);
+ GenPrintF32(DebugChannel::Function, masm, ReturnFloat32Reg);
+ break;
+ case wasm::ValType::F64:
+ masm.canonicalizeDouble(ReturnDoubleReg);
+ GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
+ break;
+ case wasm::ValType::Ref:
+ STATIC_ASSERT_ANYREF_IS_JSOBJECT;
+ // The call to wasm above preserves the InstanceReg, we don't
+ // need to reload it here.
+ UnboxAnyrefIntoValueReg(masm, InstanceReg, ReturnReg, JSReturnOperand,
+ WasmJitEntryReturnScratch);
+ break;
+ case wasm::ValType::V128:
+ MOZ_CRASH("unexpected return type when calling from ion to wasm");
+ }
+ }
+
+ GenPrintf(DebugChannel::Function, masm, "\n");
+
+ // Restore the frame pointer.
+ masm.loadPtr(Address(FramePointer, 0), FramePointer);
+
+ // Free args + frame descriptor.
+ masm.leaveExitFrame(bytesNeeded + ExitFrameLayout::Size());
+
+ MOZ_ASSERT(framePushedAtStart == masm.framePushed());
+}
+
+static void StackCopy(MacroAssembler& masm, MIRType type, Register scratch,
+ Address src, Address dst) {
+ if (type == MIRType::Int32) {
+ masm.load32(src, scratch);
+ GenPrintIsize(DebugChannel::Import, masm, scratch);
+ masm.store32(scratch, dst);
+ } else if (type == MIRType::Int64) {
+#if JS_BITS_PER_WORD == 32
+ MOZ_RELEASE_ASSERT(src.base != scratch && dst.base != scratch);
+ GenPrintf(DebugChannel::Import, masm, "i64(");
+ masm.load32(LowWord(src), scratch);
+ GenPrintIsize(DebugChannel::Import, masm, scratch);
+ masm.store32(scratch, LowWord(dst));
+ masm.load32(HighWord(src), scratch);
+ GenPrintIsize(DebugChannel::Import, masm, scratch);
+ masm.store32(scratch, HighWord(dst));
+ GenPrintf(DebugChannel::Import, masm, ") ");
+#else
+ Register64 scratch64(scratch);
+ masm.load64(src, scratch64);
+ GenPrintIsize(DebugChannel::Import, masm, scratch);
+ masm.store64(scratch64, dst);
+#endif
+ } else if (type == MIRType::RefOrNull || type == MIRType::Pointer ||
+ type == MIRType::StackResults) {
+ masm.loadPtr(src, scratch);
+ GenPrintPtr(DebugChannel::Import, masm, scratch);
+ masm.storePtr(scratch, dst);
+ } else if (type == MIRType::Float32) {
+ ScratchFloat32Scope fpscratch(masm);
+ masm.loadFloat32(src, fpscratch);
+ GenPrintF32(DebugChannel::Import, masm, fpscratch);
+ masm.storeFloat32(fpscratch, dst);
+ } else if (type == MIRType::Double) {
+ ScratchDoubleScope fpscratch(masm);
+ masm.loadDouble(src, fpscratch);
+ GenPrintF64(DebugChannel::Import, masm, fpscratch);
+ masm.storeDouble(fpscratch, dst);
+#ifdef ENABLE_WASM_SIMD
+ } else if (type == MIRType::Simd128) {
+ ScratchSimd128Scope fpscratch(masm);
+ masm.loadUnalignedSimd128(src, fpscratch);
+ GenPrintV128(DebugChannel::Import, masm, fpscratch);
+ masm.storeUnalignedSimd128(fpscratch, dst);
+#endif
+ } else {
+ MOZ_CRASH("StackCopy: unexpected type");
+ }
+}
+
+static void FillArgumentArrayForInterpExit(MacroAssembler& masm,
+ unsigned funcImportIndex,
+ const FuncType& funcType,
+ unsigned argOffset,
+ Register scratch) {
+ // This is FrameWithInstances::sizeOf() - ShadowStackSpace because the latter
+ // is accounted for by the ABIArgIter.
+ const unsigned offsetFromFPToCallerStackArgs = sizeof(FrameWithInstances);
+
+ GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; arguments ",
+ funcImportIndex);
+
+ ArgTypeVector args(funcType);
+ for (ABIArgIter i(args); !i.done(); i++) {
+ Address dst(masm.getStackPointer(), argOffset + i.index() * sizeof(Value));
+
+ MIRType type = i.mirType();
+ MOZ_ASSERT(args.isSyntheticStackResultPointerArg(i.index()) ==
+ (type == MIRType::StackResults));
+ switch (i->kind()) {
+ case ABIArg::GPR:
+ if (type == MIRType::Int32) {
+ GenPrintIsize(DebugChannel::Import, masm, i->gpr());
+ masm.store32(i->gpr(), dst);
+ } else if (type == MIRType::Int64) {
+ GenPrintI64(DebugChannel::Import, masm, i->gpr64());
+ masm.store64(i->gpr64(), dst);
+ } else if (type == MIRType::RefOrNull) {
+ GenPrintPtr(DebugChannel::Import, masm, i->gpr());
+ masm.storePtr(i->gpr(), dst);
+ } else if (type == MIRType::StackResults) {
+ GenPrintPtr(DebugChannel::Import, masm, i->gpr());
+ masm.storePtr(i->gpr(), dst);
+ } else {
+ MOZ_CRASH(
+ "FillArgumentArrayForInterpExit, ABIArg::GPR: unexpected type");
+ }
+ break;
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ case ABIArg::GPR_PAIR:
+ if (type == MIRType::Int64) {
+ GenPrintI64(DebugChannel::Import, masm, i->gpr64());
+ masm.store64(i->gpr64(), dst);
+ } else {
+ MOZ_CRASH("wasm uses hardfp for function calls.");
+ }
+ break;
+#endif
+ case ABIArg::FPU: {
+ FloatRegister srcReg = i->fpu();
+ if (type == MIRType::Double) {
+ GenPrintF64(DebugChannel::Import, masm, srcReg);
+ masm.storeDouble(srcReg, dst);
+ } else if (type == MIRType::Float32) {
+ // Preserve the NaN pattern in the input.
+ GenPrintF32(DebugChannel::Import, masm, srcReg);
+ masm.storeFloat32(srcReg, dst);
+ } else if (type == MIRType::Simd128) {
+ // The value should never escape; the call will be stopped later as
+ // the import is being called. But we should generate something sane
+ // here for the boxed case since a debugger or the stack walker may
+ // observe something.
+ ScratchDoubleScope dscratch(masm);
+ masm.loadConstantDouble(0, dscratch);
+ GenPrintF64(DebugChannel::Import, masm, dscratch);
+ masm.storeDouble(dscratch, dst);
+ } else {
+ MOZ_CRASH("Unknown MIRType in wasm exit stub");
+ }
+ break;
+ }
+ case ABIArg::Stack: {
+ Address src(FramePointer,
+ offsetFromFPToCallerStackArgs + i->offsetFromArgBase());
+ if (type == MIRType::Simd128) {
+ // As above. StackCopy does not know this trick.
+ ScratchDoubleScope dscratch(masm);
+ masm.loadConstantDouble(0, dscratch);
+ GenPrintF64(DebugChannel::Import, masm, dscratch);
+ masm.storeDouble(dscratch, dst);
+ } else {
+ StackCopy(masm, type, scratch, src, dst);
+ }
+ break;
+ }
+ case ABIArg::Uninitialized:
+ MOZ_CRASH("Uninitialized ABIArg kind");
+ }
+ }
+ GenPrintf(DebugChannel::Import, masm, "\n");
+}
+
+// Note, this may destroy the values in incoming argument registers as a result
+// of Spectre mitigation.
+static void FillArgumentArrayForJitExit(MacroAssembler& masm, Register instance,
+ unsigned funcImportIndex,
+ const FuncType& funcType,
+ unsigned argOffset, Register scratch,
+ Register scratch2, Label* throwLabel) {
+ MOZ_ASSERT(scratch != scratch2);
+
+ // This is FrameWithInstances::sizeOf() - ShadowStackSpace because the latter
+ // is accounted for by the ABIArgIter.
+ const unsigned offsetFromFPToCallerStackArgs = sizeof(FrameWithInstances);
+
+ // This loop does not root the values that are being constructed in
+ // for the arguments. Allocations that are generated by code either
+ // in the loop or called from it should be NoGC allocations.
+ GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; arguments ",
+ funcImportIndex);
+
+ ArgTypeVector args(funcType);
+ for (ABIArgIter i(args); !i.done(); i++) {
+ Address dst(masm.getStackPointer(), argOffset + i.index() * sizeof(Value));
+
+ MIRType type = i.mirType();
+ MOZ_ASSERT(args.isSyntheticStackResultPointerArg(i.index()) ==
+ (type == MIRType::StackResults));
+ switch (i->kind()) {
+ case ABIArg::GPR:
+ if (type == MIRType::Int32) {
+ GenPrintIsize(DebugChannel::Import, masm, i->gpr());
+ masm.storeValue(JSVAL_TYPE_INT32, i->gpr(), dst);
+ } else if (type == MIRType::Int64) {
+ // FuncType::canHaveJitExit should prevent this. Also see comments
+ // at GenerateBigIntInitialization.
+ MOZ_CRASH("Should not happen");
+ } else if (type == MIRType::RefOrNull) {
+ // This works also for FuncRef because it is distinguishable from
+ // a boxed AnyRef.
+ masm.movePtr(i->gpr(), scratch2);
+ UnboxAnyrefIntoValue(masm, instance, scratch2, dst, scratch);
+ } else if (type == MIRType::StackResults) {
+ MOZ_CRASH("Multi-result exit to JIT unimplemented");
+ } else {
+ MOZ_CRASH(
+ "FillArgumentArrayForJitExit, ABIArg::GPR: unexpected type");
+ }
+ break;
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ case ABIArg::GPR_PAIR:
+ if (type == MIRType::Int64) {
+ // FuncType::canHaveJitExit should prevent this. Also see comments
+ // at GenerateBigIntInitialization.
+ MOZ_CRASH("Should not happen");
+ } else {
+ MOZ_CRASH("wasm uses hardfp for function calls.");
+ }
+ break;
+#endif
+ case ABIArg::FPU: {
+ FloatRegister srcReg = i->fpu();
+ if (type == MIRType::Double) {
+ // Preserve the NaN pattern in the input.
+ ScratchDoubleScope fpscratch(masm);
+ masm.moveDouble(srcReg, fpscratch);
+ masm.canonicalizeDouble(fpscratch);
+ GenPrintF64(DebugChannel::Import, masm, fpscratch);
+ masm.boxDouble(fpscratch, dst);
+ } else if (type == MIRType::Float32) {
+ // JS::Values can't store Float32, so convert to a Double.
+ ScratchDoubleScope fpscratch(masm);
+ masm.convertFloat32ToDouble(srcReg, fpscratch);
+ masm.canonicalizeDouble(fpscratch);
+ GenPrintF64(DebugChannel::Import, masm, fpscratch);
+ masm.boxDouble(fpscratch, dst);
+ } else if (type == MIRType::Simd128) {
+ // The value should never escape; the call will be stopped later as
+ // the import is being called. But we should generate something sane
+ // here for the boxed case since a debugger or the stack walker may
+ // observe something.
+ ScratchDoubleScope dscratch(masm);
+ masm.loadConstantDouble(0, dscratch);
+ GenPrintF64(DebugChannel::Import, masm, dscratch);
+ masm.boxDouble(dscratch, dst);
+ } else {
+ MOZ_CRASH("Unknown MIRType in wasm exit stub");
+ }
+ break;
+ }
+ case ABIArg::Stack: {
+ Address src(FramePointer,
+ offsetFromFPToCallerStackArgs + i->offsetFromArgBase());
+ if (type == MIRType::Int32) {
+ masm.load32(src, scratch);
+ GenPrintIsize(DebugChannel::Import, masm, scratch);
+ masm.storeValue(JSVAL_TYPE_INT32, scratch, dst);
+ } else if (type == MIRType::Int64) {
+ // FuncType::canHaveJitExit should prevent this. Also see comments
+ // at GenerateBigIntInitialization.
+ MOZ_CRASH("Should not happen");
+ } else if (type == MIRType::RefOrNull) {
+ // This works also for FuncRef because it is distinguishable from a
+ // boxed AnyRef.
+ masm.loadPtr(src, scratch);
+ UnboxAnyrefIntoValue(masm, instance, scratch, dst, scratch2);
+ } else if (IsFloatingPointType(type)) {
+ ScratchDoubleScope dscratch(masm);
+ FloatRegister fscratch = dscratch.asSingle();
+ if (type == MIRType::Float32) {
+ masm.loadFloat32(src, fscratch);
+ masm.convertFloat32ToDouble(fscratch, dscratch);
+ } else {
+ masm.loadDouble(src, dscratch);
+ }
+ masm.canonicalizeDouble(dscratch);
+ GenPrintF64(DebugChannel::Import, masm, dscratch);
+ masm.boxDouble(dscratch, dst);
+ } else if (type == MIRType::Simd128) {
+ // The value should never escape; the call will be stopped later as
+ // the import is being called. But we should generate something
+ // sane here for the boxed case since a debugger or the stack walker
+ // may observe something.
+ ScratchDoubleScope dscratch(masm);
+ masm.loadConstantDouble(0, dscratch);
+ GenPrintF64(DebugChannel::Import, masm, dscratch);
+ masm.boxDouble(dscratch, dst);
+ } else {
+ MOZ_CRASH(
+ "FillArgumentArrayForJitExit, ABIArg::Stack: unexpected type");
+ }
+ break;
+ }
+ case ABIArg::Uninitialized:
+ MOZ_CRASH("Uninitialized ABIArg kind");
+ }
+ }
+ GenPrintf(DebugChannel::Import, masm, "\n");
+}
+
+// Generate a wrapper function with the standard intra-wasm call ABI which
+// simply calls an import. This wrapper function allows any import to be treated
+// like a normal wasm function for the purposes of exports and table calls. In
+// particular, the wrapper function provides:
+// - a table entry, so JS imports can be put into tables
+// - normal entries, so that, if the import is re-exported, an entry stub can
+// be generated and called without any special cases
+static bool GenerateImportFunction(jit::MacroAssembler& masm,
+ const FuncImport& fi,
+ const FuncType& funcType,
+ CallIndirectId callIndirectId,
+ FuncOffsets* offsets) {
+ AutoCreatedBy acb(masm, "wasm::GenerateImportFunction");
+
+ AssertExpectedSP(masm);
+
+ GenerateFunctionPrologue(masm, callIndirectId, Nothing(), offsets);
+
+ MOZ_ASSERT(masm.framePushed() == 0);
+ const unsigned sizeOfInstanceSlot = sizeof(void*);
+ unsigned framePushed = StackDecrementForCall(
+ WasmStackAlignment,
+ sizeof(Frame), // pushed by prologue
+ StackArgBytesForWasmABI(funcType) + sizeOfInstanceSlot);
+ masm.wasmReserveStackChecked(framePushed, BytecodeOffset(0));
+ MOZ_ASSERT(masm.framePushed() == framePushed);
+
+ masm.storePtr(InstanceReg, Address(masm.getStackPointer(),
+ framePushed - sizeOfInstanceSlot));
+
+ // The argument register state is already setup by our caller. We just need
+ // to be sure not to clobber it before the call.
+ Register scratch = ABINonArgReg0;
+
+ // Copy our frame's stack arguments to the callee frame's stack argument.
+ //
+ // Note offsetFromFPToCallerStackArgs is sizeof(Frame) because the
+ // WasmABIArgIter accounts for both the ShadowStackSpace and the instance
+ // fields of FrameWithInstances.
+ unsigned offsetFromFPToCallerStackArgs = sizeof(Frame);
+ ArgTypeVector args(funcType);
+ for (WasmABIArgIter i(args); !i.done(); i++) {
+ if (i->kind() != ABIArg::Stack) {
+ continue;
+ }
+
+ Address src(FramePointer,
+ offsetFromFPToCallerStackArgs + i->offsetFromArgBase());
+ Address dst(masm.getStackPointer(), i->offsetFromArgBase());
+ GenPrintf(DebugChannel::Import, masm,
+ "calling exotic import function with arguments: ");
+ StackCopy(masm, i.mirType(), scratch, src, dst);
+ GenPrintf(DebugChannel::Import, masm, "\n");
+ }
+
+ // Call the import exit stub.
+ CallSiteDesc desc(CallSiteDesc::Import);
+ MoveSPForJitABI(masm);
+ masm.wasmCallImport(desc, CalleeDesc::import(fi.instanceOffset()));
+
+ // Restore the instance register and pinned regs, per wasm function ABI.
+ masm.loadPtr(
+ Address(masm.getStackPointer(), framePushed - sizeOfInstanceSlot),
+ InstanceReg);
+ masm.loadWasmPinnedRegsFromInstance();
+
+ // Restore cx->realm.
+ masm.switchToWasmInstanceRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
+
+ GenerateFunctionEpilogue(masm, framePushed, offsets);
+ return FinishOffsets(masm, offsets);
+}
+
+static const unsigned STUBS_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
+
+bool wasm::GenerateImportFunctions(const ModuleEnvironment& env,
+ const FuncImportVector& imports,
+ CompiledCode* code) {
+ LifoAlloc lifo(STUBS_LIFO_DEFAULT_CHUNK_SIZE);
+ TempAllocator alloc(&lifo);
+ WasmMacroAssembler masm(alloc, env);
+
+ for (uint32_t funcIndex = 0; funcIndex < imports.length(); funcIndex++) {
+ const FuncImport& fi = imports[funcIndex];
+ const FuncType& funcType = *env.funcs[funcIndex].type;
+ CallIndirectId callIndirectId = CallIndirectId::forFunc(env, funcIndex);
+
+ FuncOffsets offsets;
+ if (!GenerateImportFunction(masm, fi, funcType, callIndirectId, &offsets)) {
+ return false;
+ }
+ if (!code->codeRanges.emplaceBack(funcIndex, /* bytecodeOffset = */ 0,
+ offsets)) {
+ return false;
+ }
+ }
+
+ masm.finish();
+ if (masm.oom()) {
+ return false;
+ }
+
+ return code->swap(masm);
+}
+
+// Generate a stub that is called via the internal ABI derived from the
+// signature of the import and calls into an appropriate callImport C++
+// function, having boxed all the ABI arguments into a homogeneous Value array.
+static bool GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi,
+ const FuncType& funcType,
+ uint32_t funcImportIndex,
+ Label* throwLabel,
+ CallableOffsets* offsets) {
+ AutoCreatedBy acb(masm, "GenerateImportInterpExit");
+
+ AssertExpectedSP(masm);
+ masm.setFramePushed(0);
+
+ // Argument types for Instance::callImport_*:
+ static const MIRType typeArray[] = {MIRType::Pointer, // Instance*
+ MIRType::Pointer, // funcImportIndex
+ MIRType::Int32, // argc
+ MIRType::Pointer}; // argv
+ MIRTypeVector invokeArgTypes;
+ MOZ_ALWAYS_TRUE(invokeArgTypes.append(typeArray, std::size(typeArray)));
+
+ // At the point of the call, the stack layout is:
+ //
+ // | stack args | padding | argv[] | padding | retaddr | caller stack | ...
+ // ^
+ // +-- sp
+ //
+ // The padding between stack args and argv ensures that argv is aligned on a
+ // Value boundary. The padding between argv and retaddr ensures that sp is
+ // aligned. The caller stack includes a ShadowStackArea and the instance
+ // fields before the args, see WasmFrame.h.
+ //
+ // The 'double' alignment is correct since the argv[] is a Value array.
+ unsigned argOffset =
+ AlignBytes(StackArgBytesForNativeABI(invokeArgTypes), sizeof(double));
+ // The abiArgCount includes a stack result pointer argument if needed.
+ unsigned abiArgCount = ArgTypeVector(funcType).lengthWithStackResults();
+ unsigned argBytes = std::max<size_t>(1, abiArgCount) * sizeof(Value);
+ unsigned framePushed =
+ StackDecrementForCall(ABIStackAlignment,
+ sizeof(Frame), // pushed by prologue
+ argOffset + argBytes);
+
+ GenerateExitPrologue(masm, framePushed, ExitReason::Fixed::ImportInterp,
+ offsets);
+
+ // Fill the argument array.
+ Register scratch = ABINonArgReturnReg0;
+ FillArgumentArrayForInterpExit(masm, funcImportIndex, funcType, argOffset,
+ scratch);
+
+ // Prepare the arguments for the call to Instance::callImport_*.
+ ABIArgMIRTypeIter i(invokeArgTypes);
+
+ // argument 0: Instance*
+ if (i->kind() == ABIArg::GPR) {
+ masm.movePtr(InstanceReg, i->gpr());
+ } else {
+ masm.storePtr(InstanceReg,
+ Address(masm.getStackPointer(), i->offsetFromArgBase()));
+ }
+ i++;
+
+ // argument 1: funcImportIndex
+ if (i->kind() == ABIArg::GPR) {
+ masm.mov(ImmWord(funcImportIndex), i->gpr());
+ } else {
+ masm.store32(Imm32(funcImportIndex),
+ Address(masm.getStackPointer(), i->offsetFromArgBase()));
+ }
+ i++;
+
+ // argument 2: argc
+ unsigned argc = abiArgCount;
+ if (i->kind() == ABIArg::GPR) {
+ masm.mov(ImmWord(argc), i->gpr());
+ } else {
+ masm.store32(Imm32(argc),
+ Address(masm.getStackPointer(), i->offsetFromArgBase()));
+ }
+ i++;
+
+ // argument 3: argv
+ Address argv(masm.getStackPointer(), argOffset);
+ if (i->kind() == ABIArg::GPR) {
+ masm.computeEffectiveAddress(argv, i->gpr());
+ } else {
+ masm.computeEffectiveAddress(argv, scratch);
+ masm.storePtr(scratch,
+ Address(masm.getStackPointer(), i->offsetFromArgBase()));
+ }
+ i++;
+ MOZ_ASSERT(i.done());
+
+ // Make the call, test whether it succeeded, and extract the return value.
+ AssertStackAlignment(masm, ABIStackAlignment);
+ masm.call(SymbolicAddress::CallImport_General);
+ masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+
+ ResultType resultType = ResultType::Vector(funcType.results());
+ ValType registerResultType;
+ for (ABIResultIter iter(resultType); !iter.done(); iter.next()) {
+ if (iter.cur().inRegister()) {
+ MOZ_ASSERT(!registerResultType.isValid());
+ registerResultType = iter.cur().type();
+ }
+ }
+ if (!registerResultType.isValid()) {
+ GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
+ funcImportIndex);
+ GenPrintf(DebugChannel::Import, masm, "void");
+ } else {
+ switch (registerResultType.kind()) {
+ case ValType::I32:
+ masm.load32(argv, ReturnReg);
+ // No widening is required, as we know the value comes from an i32 load.
+ GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
+ funcImportIndex);
+ GenPrintIsize(DebugChannel::Import, masm, ReturnReg);
+ break;
+ case ValType::I64:
+ masm.load64(argv, ReturnReg64);
+ GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
+ funcImportIndex);
+ GenPrintI64(DebugChannel::Import, masm, ReturnReg64);
+ break;
+ case ValType::V128:
+ // Note, CallImport_Rtt/V128 currently always throws, so we should never
+ // reach this point.
+ masm.breakpoint();
+ break;
+ case ValType::F32:
+ masm.loadFloat32(argv, ReturnFloat32Reg);
+ GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
+ funcImportIndex);
+ GenPrintF32(DebugChannel::Import, masm, ReturnFloat32Reg);
+ break;
+ case ValType::F64:
+ masm.loadDouble(argv, ReturnDoubleReg);
+ GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
+ funcImportIndex);
+ GenPrintF64(DebugChannel::Import, masm, ReturnDoubleReg);
+ break;
+ case ValType::Ref:
+ STATIC_ASSERT_ANYREF_IS_JSOBJECT;
+ masm.loadPtr(argv, ReturnReg);
+ GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
+ funcImportIndex);
+ GenPrintPtr(DebugChannel::Import, masm, ReturnReg);
+ break;
+ }
+ }
+
+ GenPrintf(DebugChannel::Import, masm, "\n");
+
+ // The native ABI preserves the instance, heap and global registers since they
+ // are non-volatile.
+ MOZ_ASSERT(NonVolatileRegs.has(InstanceReg));
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || \
+ defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_LOONG64) || defined(JS_CODEGEN_RISCV64)
+ MOZ_ASSERT(NonVolatileRegs.has(HeapReg));
+#endif
+
+ GenerateExitEpilogue(masm, framePushed, ExitReason::Fixed::ImportInterp,
+ offsets);
+
+ return FinishOffsets(masm, offsets);
+}
+
+// Generate a stub that is called via the internal ABI derived from the
+// signature of the import and calls into a compatible JIT function,
+// having boxed all the ABI arguments into the JIT stack frame layout.
+static bool GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi,
+ const FuncType& funcType,
+ unsigned funcImportIndex, Label* throwLabel,
+ CallableOffsets* offsets) {
+ AutoCreatedBy acb(masm, "GenerateImportJitExit");
+
+ AssertExpectedSP(masm);
+ masm.setFramePushed(0);
+
+ // JIT calls use the following stack layout:
+ //
+ // | WasmToJSJitFrameLayout | this | arg1..N | saved instance | ...
+ // ^
+ // +-- sp
+ //
+ // The JIT ABI requires that sp be JitStackAlignment-aligned after pushing
+ // the return address and frame pointer.
+ static_assert(WasmStackAlignment >= JitStackAlignment, "subsumes");
+ const unsigned sizeOfInstanceSlot = sizeof(void*);
+ const unsigned sizeOfRetAddrAndFP = 2 * sizeof(void*);
+ const unsigned sizeOfPreFrame =
+ WasmToJSJitFrameLayout::Size() - sizeOfRetAddrAndFP;
+ const unsigned sizeOfThisAndArgs =
+ (1 + funcType.args().length()) * sizeof(Value);
+ const unsigned totalJitFrameBytes = sizeOfRetAddrAndFP + sizeOfPreFrame +
+ sizeOfThisAndArgs + sizeOfInstanceSlot;
+ const unsigned jitFramePushed =
+ StackDecrementForCall(JitStackAlignment,
+ sizeof(Frame), // pushed by prologue
+ totalJitFrameBytes) -
+ sizeOfRetAddrAndFP;
+
+ GenerateJitExitPrologue(masm, jitFramePushed, offsets);
+
+ // 1. Descriptor.
+ unsigned argc = funcType.args().length();
+ size_t argOffset = 0;
+ uint32_t descriptor =
+ MakeFrameDescriptorForJitCall(FrameType::WasmToJSJit, argc);
+ masm.storePtr(ImmWord(uintptr_t(descriptor)),
+ Address(masm.getStackPointer(), argOffset));
+ argOffset += sizeof(size_t);
+
+ // 2. Callee, part 1 -- need the callee register for argument filling, so
+ // record offset here and set up callee later.
+ size_t calleeArgOffset = argOffset;
+ argOffset += sizeof(size_t);
+ MOZ_ASSERT(argOffset == sizeOfPreFrame);
+
+ // 3. |this| value.
+ masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), argOffset));
+ argOffset += sizeof(Value);
+
+ // 4. Fill the arguments.
+ Register scratch = ABINonArgReturnReg1; // Repeatedly clobbered
+ Register scratch2 = ABINonArgReturnReg0; // Reused as callee below
+ FillArgumentArrayForJitExit(masm, InstanceReg, funcImportIndex, funcType,
+ argOffset, scratch, scratch2, throwLabel);
+ argOffset += funcType.args().length() * sizeof(Value);
+ MOZ_ASSERT(argOffset == sizeOfThisAndArgs + sizeOfPreFrame);
+
+ // Preserve instance because the JIT callee clobbers it.
+ const size_t savedInstanceOffset = argOffset;
+ masm.storePtr(InstanceReg,
+ Address(masm.getStackPointer(), savedInstanceOffset));
+
+ // 2. Callee, part 2 -- now that the register is free, set up the callee.
+ Register callee = ABINonArgReturnReg0; // Live until call
+
+ // 2.1. Get the callee. This must be a JSFunction if we're using this JIT
+ // exit.
+ masm.loadPtr(
+ Address(InstanceReg, Instance::offsetInData(
+ fi.instanceOffset() +
+ offsetof(FuncImportInstanceData, callable))),
+ callee);
+
+ // 2.2. Save callee.
+ masm.storePtr(callee, Address(masm.getStackPointer(), calleeArgOffset));
+
+ // 5. Check if we need to rectify arguments.
+ masm.loadFunctionArgCount(callee, scratch);
+
+ Label rectify;
+ masm.branch32(Assembler::Above, scratch, Imm32(funcType.args().length()),
+ &rectify);
+
+ // 6. If we haven't rectified arguments, load callee executable entry point.
+
+ masm.loadJitCodeRaw(callee, callee);
+
+ Label rejoinBeforeCall;
+ masm.bind(&rejoinBeforeCall);
+
+ AssertStackAlignment(masm, JitStackAlignment, sizeOfRetAddrAndFP);
+#ifdef JS_CODEGEN_ARM64
+ AssertExpectedSP(masm);
+ // Manually resync PSP. Omitting this causes eg tests/wasm/import-export.js
+ // to segfault.
+ masm.moveStackPtrTo(PseudoStackPointer);
+#endif
+ masm.callJitNoProfiler(callee);
+
+ // Note that there might be a GC thing in the JSReturnOperand now.
+ // In all the code paths from here:
+ // - either the value is unboxed because it was a primitive and we don't
+ // need to worry about rooting anymore.
+ // - or the value needs to be rooted, but nothing can cause a GC between
+ // here and CoerceInPlace, which roots before coercing to a primitive.
+
+ // The JIT callee clobbers all registers other than the frame pointer, so
+ // restore InstanceReg here.
+ AssertStackAlignment(masm, JitStackAlignment, sizeOfRetAddrAndFP);
+ masm.loadPtr(Address(masm.getStackPointer(), savedInstanceOffset),
+ InstanceReg);
+
+ // The frame was aligned for the JIT ABI such that
+ // (sp - 2 * sizeof(void*)) % JitStackAlignment == 0
+ // But now we possibly want to call one of several different C++ functions,
+ // so subtract 2 * sizeof(void*) so that sp is aligned for an ABI call.
+ static_assert(ABIStackAlignment <= JitStackAlignment, "subsumes");
+ masm.reserveStack(sizeOfRetAddrAndFP);
+ unsigned nativeFramePushed = masm.framePushed();
+ AssertStackAlignment(masm, ABIStackAlignment);
+
+#ifdef DEBUG
+ {
+ Label ok;
+ masm.branchTestMagic(Assembler::NotEqual, JSReturnOperand, &ok);
+ masm.breakpoint();
+ masm.bind(&ok);
+ }
+#endif
+
+ GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
+ funcImportIndex);
+
+ Label oolConvert;
+ const ValTypeVector& results = funcType.results();
+ if (results.length() == 0) {
+ GenPrintf(DebugChannel::Import, masm, "void");
+ } else {
+ MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
+ switch (results[0].kind()) {
+ case ValType::I32:
+ // No widening is required, as the return value does not come to us in
+ // ReturnReg.
+ masm.truncateValueToInt32(JSReturnOperand, ReturnDoubleReg, ReturnReg,
+ &oolConvert);
+ GenPrintIsize(DebugChannel::Import, masm, ReturnReg);
+ break;
+ case ValType::I64:
+ // No fastpath for now, go immediately to ool case
+ masm.jump(&oolConvert);
+ break;
+ case ValType::V128:
+ // Unreachable as callImport should not call the stub.
+ masm.breakpoint();
+ break;
+ case ValType::F32:
+ masm.convertValueToFloat(JSReturnOperand, ReturnFloat32Reg,
+ &oolConvert);
+ GenPrintF32(DebugChannel::Import, masm, ReturnFloat32Reg);
+ break;
+ case ValType::F64:
+ masm.convertValueToDouble(JSReturnOperand, ReturnDoubleReg,
+ &oolConvert);
+ GenPrintF64(DebugChannel::Import, masm, ReturnDoubleReg);
+ break;
+ case ValType::Ref:
+ // Guarded by temporarilyUnsupportedReftypeForExit()
+ MOZ_RELEASE_ASSERT(results[0].refType().isExtern());
+ BoxValueIntoAnyref(masm, JSReturnOperand, ReturnReg, &oolConvert);
+ GenPrintPtr(DebugChannel::Import, masm, ReturnReg);
+ break;
+ }
+ }
+
+ GenPrintf(DebugChannel::Import, masm, "\n");
+
+ Label done;
+ masm.bind(&done);
+
+ GenerateJitExitEpilogue(masm, masm.framePushed(), offsets);
+
+ {
+ // Call the arguments rectifier.
+ masm.bind(&rectify);
+ masm.loadPtr(Address(InstanceReg, Instance::offsetOfJSJitArgsRectifier()),
+ callee);
+ masm.jump(&rejoinBeforeCall);
+ }
+
+ if (oolConvert.used()) {
+ masm.bind(&oolConvert);
+ masm.setFramePushed(nativeFramePushed);
+
+ // Coercion calls use the following stack layout (sp grows to the left):
+ // | args | padding | Value argv[1] | padding | exit Frame |
+ MIRTypeVector coerceArgTypes;
+ MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
+ unsigned offsetToCoerceArgv =
+ AlignBytes(StackArgBytesForNativeABI(coerceArgTypes), sizeof(Value));
+ MOZ_ASSERT(nativeFramePushed >= offsetToCoerceArgv + sizeof(Value));
+ AssertStackAlignment(masm, ABIStackAlignment);
+
+ // Store return value into argv[0].
+ masm.storeValue(JSReturnOperand,
+ Address(masm.getStackPointer(), offsetToCoerceArgv));
+
+ // From this point, it's safe to reuse the scratch register (which
+ // might be part of the JSReturnOperand).
+
+ // The JIT might have clobbered exitFP at this point. Since there's
+ // going to be a CoerceInPlace call, pretend we're still doing the JIT
+ // call by restoring our tagged exitFP.
+ SetExitFP(masm, ExitReason::Fixed::ImportJit, scratch);
+
+ // argument 0: argv
+ ABIArgMIRTypeIter i(coerceArgTypes);
+ Address argv(masm.getStackPointer(), offsetToCoerceArgv);
+ if (i->kind() == ABIArg::GPR) {
+ masm.computeEffectiveAddress(argv, i->gpr());
+ } else {
+ masm.computeEffectiveAddress(argv, scratch);
+ masm.storePtr(scratch,
+ Address(masm.getStackPointer(), i->offsetFromArgBase()));
+ }
+ i++;
+ MOZ_ASSERT(i.done());
+
+ // Call coercion function. Note that right after the call, the value of
+ // FP is correct because FP is non-volatile in the native ABI.
+ AssertStackAlignment(masm, ABIStackAlignment);
+ const ValTypeVector& results = funcType.results();
+ if (results.length() > 0) {
+ // NOTE that once there can be more than one result and we can box some of
+ // the results (as we must for AnyRef), pointer and already-boxed results
+ // must be rooted while subsequent results are boxed.
+ MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
+ switch (results[0].kind()) {
+ case ValType::I32:
+ masm.call(SymbolicAddress::CoerceInPlace_ToInt32);
+ masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+ masm.unboxInt32(Address(masm.getStackPointer(), offsetToCoerceArgv),
+ ReturnReg);
+ // No widening is required, as we generate a known-good value in a
+ // safe way here.
+ break;
+ case ValType::I64: {
+ masm.call(SymbolicAddress::CoerceInPlace_ToBigInt);
+ masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+ Address argv(masm.getStackPointer(), offsetToCoerceArgv);
+ masm.unboxBigInt(argv, scratch);
+ masm.loadBigInt64(scratch, ReturnReg64);
+ break;
+ }
+ case ValType::F64:
+ case ValType::F32:
+ masm.call(SymbolicAddress::CoerceInPlace_ToNumber);
+ masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+ masm.unboxDouble(Address(masm.getStackPointer(), offsetToCoerceArgv),
+ ReturnDoubleReg);
+ if (results[0].kind() == ValType::F32) {
+ masm.convertDoubleToFloat32(ReturnDoubleReg, ReturnFloat32Reg);
+ }
+ break;
+ case ValType::Ref:
+ // Guarded by temporarilyUnsupportedReftypeForExit()
+ MOZ_RELEASE_ASSERT(results[0].refType().isExtern());
+ masm.call(SymbolicAddress::BoxValue_Anyref);
+ masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+ break;
+ default:
+ MOZ_CRASH("Unsupported convert type");
+ }
+ }
+
+ // Maintain the invariant that exitFP is either unset or not set to a
+ // wasm tagged exitFP, per the jit exit contract.
+ ClearExitFP(masm, scratch);
+
+ masm.jump(&done);
+ masm.setFramePushed(0);
+ }
+
+ MOZ_ASSERT(masm.framePushed() == 0);
+
+ return FinishOffsets(masm, offsets);
+}
+
+struct ABIFunctionArgs {
+ ABIFunctionType abiType;
+ size_t len;
+
+ explicit ABIFunctionArgs(ABIFunctionType sig)
+ : abiType(ABIFunctionType(sig >> ArgType_Shift)) {
+ len = 0;
+ uint64_t i = uint64_t(abiType);
+ while (i) {
+ i = i >> ArgType_Shift;
+ len++;
+ }
+ }
+
+ size_t length() const { return len; }
+
+ MIRType operator[](size_t i) const {
+ MOZ_ASSERT(i < len);
+ uint64_t abi = uint64_t(abiType);
+ while (i--) {
+ abi = abi >> ArgType_Shift;
+ }
+ return ToMIRType(ABIArgType(abi & ArgType_Mask));
+ }
+};
+
+bool wasm::GenerateBuiltinThunk(MacroAssembler& masm, ABIFunctionType abiType,
+ ExitReason exitReason, void* funcPtr,
+ CallableOffsets* offsets) {
+ AssertExpectedSP(masm);
+ masm.setFramePushed(0);
+
+ ABIFunctionArgs args(abiType);
+ uint32_t framePushed =
+ StackDecrementForCall(ABIStackAlignment,
+ sizeof(Frame), // pushed by prologue
+ StackArgBytesForNativeABI(args));
+
+ GenerateExitPrologue(masm, framePushed, exitReason, offsets);
+
+ // Copy out and convert caller arguments, if needed.
+
+ // This is FrameWithInstances::sizeOf() - ShadowStackSpace because the latter
+ // is accounted for by the ABIArgIter.
+ unsigned offsetFromFPToCallerStackArgs = sizeof(FrameWithInstances);
+ Register scratch = ABINonArgReturnReg0;
+ for (ABIArgIter i(args); !i.done(); i++) {
+ if (i->argInRegister()) {
+#ifdef JS_CODEGEN_ARM
+ // Non hard-fp passes the args values in GPRs.
+ if (!UseHardFpABI() && IsFloatingPointType(i.mirType())) {
+ FloatRegister input = i->fpu();
+ if (i.mirType() == MIRType::Float32) {
+ masm.ma_vxfer(input, Register::FromCode(input.id()));
+ } else if (i.mirType() == MIRType::Double) {
+ uint32_t regId = input.singleOverlay().id();
+ masm.ma_vxfer(input, Register::FromCode(regId),
+ Register::FromCode(regId + 1));
+ }
+ }
+#endif
+ continue;
+ }
+
+ Address src(FramePointer,
+ offsetFromFPToCallerStackArgs + i->offsetFromArgBase());
+ Address dst(masm.getStackPointer(), i->offsetFromArgBase());
+ StackCopy(masm, i.mirType(), scratch, src, dst);
+ }
+
+ AssertStackAlignment(masm, ABIStackAlignment);
+ MoveSPForJitABI(masm);
+ masm.call(ImmPtr(funcPtr, ImmPtr::NoCheckToken()));
+
+#if defined(JS_CODEGEN_X64)
+ // No widening is required, as the caller will widen.
+#elif defined(JS_CODEGEN_X86)
+ // x86 passes the return value on the x87 FP stack.
+ Operand op(esp, 0);
+ MIRType retType = ToMIRType(ABIArgType(
+ std::underlying_type_t<ABIFunctionType>(abiType) & ArgType_Mask));
+ if (retType == MIRType::Float32) {
+ masm.fstp32(op);
+ masm.loadFloat32(op, ReturnFloat32Reg);
+ } else if (retType == MIRType::Double) {
+ masm.fstp(op);
+ masm.loadDouble(op, ReturnDoubleReg);
+ }
+#elif defined(JS_CODEGEN_ARM)
+ // Non hard-fp passes the return values in GPRs.
+ MIRType retType = ToMIRType(ABIArgType(
+ std::underlying_type_t<ABIFunctionType>(abiType) & ArgType_Mask));
+ if (!UseHardFpABI() && IsFloatingPointType(retType)) {
+ masm.ma_vxfer(r0, r1, d0);
+ }
+#endif
+
+ GenerateExitEpilogue(masm, framePushed, exitReason, offsets);
+ return FinishOffsets(masm, offsets);
+}
+
+#if defined(JS_CODEGEN_ARM)
+static const LiveRegisterSet RegsToPreserve(
+ GeneralRegisterSet(Registers::AllMask &
+ ~((Registers::SetType(1) << Registers::sp) |
+ (Registers::SetType(1) << Registers::pc))),
+ FloatRegisterSet(FloatRegisters::AllDoubleMask));
+# ifdef ENABLE_WASM_SIMD
+# error "high lanes of SIMD registers need to be saved too."
+# endif
+#elif defined(JS_CODEGEN_MIPS64)
+static const LiveRegisterSet RegsToPreserve(
+ GeneralRegisterSet(Registers::AllMask &
+ ~((Registers::SetType(1) << Registers::k0) |
+ (Registers::SetType(1) << Registers::k1) |
+ (Registers::SetType(1) << Registers::sp) |
+ (Registers::SetType(1) << Registers::zero))),
+ FloatRegisterSet(FloatRegisters::AllDoubleMask));
+# ifdef ENABLE_WASM_SIMD
+# error "high lanes of SIMD registers need to be saved too."
+# endif
+#elif defined(JS_CODEGEN_LOONG64)
+static const LiveRegisterSet RegsToPreserve(
+ GeneralRegisterSet(Registers::AllMask &
+ ~((uint32_t(1) << Registers::tp) |
+ (uint32_t(1) << Registers::fp) |
+ (uint32_t(1) << Registers::sp) |
+ (uint32_t(1) << Registers::zero))),
+ FloatRegisterSet(FloatRegisters::AllDoubleMask));
+# ifdef ENABLE_WASM_SIMD
+# error "high lanes of SIMD registers need to be saved too."
+# endif
+#elif defined(JS_CODEGEN_RISCV64)
+static const LiveRegisterSet RegsToPreserve(
+ GeneralRegisterSet(Registers::AllMask &
+ ~((uint32_t(1) << Registers::tp) |
+ (uint32_t(1) << Registers::fp) |
+ (uint32_t(1) << Registers::sp) |
+ (uint32_t(1) << Registers::zero))),
+ FloatRegisterSet(FloatRegisters::AllDoubleMask));
+# ifdef ENABLE_WASM_SIMD
+# error "high lanes of SIMD registers need to be saved too."
+# endif
+#elif defined(JS_CODEGEN_ARM64)
+// We assume that traps do not happen while lr is live. This both ensures that
+// the size of RegsToPreserve is a multiple of 2 (preserving WasmStackAlignment)
+// and gives us a register to clobber in the return path.
+static const LiveRegisterSet RegsToPreserve(
+ GeneralRegisterSet(Registers::AllMask &
+ ~((Registers::SetType(1) << RealStackPointer.code()) |
+ (Registers::SetType(1) << Registers::lr))),
+# ifdef ENABLE_WASM_SIMD
+ FloatRegisterSet(FloatRegisters::AllSimd128Mask));
+# else
+ // If SIMD is not enabled, it's pointless to save/restore the upper 64
+ // bits of each vector register.
+ FloatRegisterSet(FloatRegisters::AllDoubleMask));
+# endif
+#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+// It's correct to use FloatRegisters::AllMask even when SIMD is not enabled;
+// PushRegsInMask strips out the high lanes of the XMM registers in this case,
+// while the singles will be stripped as they are aliased by the larger doubles.
+static const LiveRegisterSet RegsToPreserve(
+ GeneralRegisterSet(Registers::AllMask &
+ ~(Registers::SetType(1) << Registers::StackPointer)),
+ FloatRegisterSet(FloatRegisters::AllMask));
+#else
+static const LiveRegisterSet RegsToPreserve(
+ GeneralRegisterSet(0), FloatRegisterSet(FloatRegisters::AllDoubleMask));
+# ifdef ENABLE_WASM_SIMD
+# error "no SIMD support"
+# endif
+#endif
+
+// Generate a RegisterOffsets which describes the locations of the GPRs as saved
+// by GenerateTrapExit. FP registers are ignored. Note that the values
+// stored in the RegisterOffsets are offsets in words downwards from the top of
+// the save area. That is, a higher value implies a lower address.
+void wasm::GenerateTrapExitRegisterOffsets(RegisterOffsets* offsets,
+ size_t* numWords) {
+ // This is the number of words pushed by the initial WasmPush().
+ *numWords = WasmPushSize / sizeof(void*);
+ MOZ_ASSERT(*numWords == TrapExitDummyValueOffsetFromTop + 1);
+
+ // And these correspond to the PushRegsInMask() that immediately follows.
+ for (GeneralRegisterBackwardIterator iter(RegsToPreserve.gprs()); iter.more();
+ ++iter) {
+ offsets->setOffset(*iter, *numWords);
+ (*numWords)++;
+ }
+}
+
+// Generate a stub which calls WasmReportTrap() and can be executed by having
+// the signal handler redirect PC from any trapping instruction.
+static bool GenerateTrapExit(MacroAssembler& masm, Label* throwLabel,
+ Offsets* offsets) {
+ AssertExpectedSP(masm);
+ masm.haltingAlign(CodeAlignment);
+
+ masm.setFramePushed(0);
+
+ offsets->begin = masm.currentOffset();
+
+ // Traps can only happen at well-defined program points. However, since
+ // traps may resume and the optimal assumption for the surrounding code is
+ // that registers are not clobbered, we need to preserve all registers in
+ // the trap exit. One simplifying assumption is that flags may be clobbered.
+ // Push a dummy word to use as return address below.
+ WasmPush(masm, ImmWord(TrapExitDummyValue));
+ unsigned framePushedBeforePreserve = masm.framePushed();
+ masm.PushRegsInMask(RegsToPreserve);
+ unsigned offsetOfReturnWord = masm.framePushed() - framePushedBeforePreserve;
+
+ // We know that StackPointer is word-aligned, but not necessarily
+ // stack-aligned, so we need to align it dynamically.
+ Register preAlignStackPointer = ABINonVolatileReg;
+ masm.moveStackPtrTo(preAlignStackPointer);
+ masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
+ if (ShadowStackSpace) {
+ masm.subFromStackPtr(Imm32(ShadowStackSpace));
+ }
+
+ masm.assertStackAlignment(ABIStackAlignment);
+ masm.call(SymbolicAddress::HandleTrap);
+
+ // WasmHandleTrap returns null if control should transfer to the throw stub.
+ masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+
+ // Otherwise, the return value is the TrapData::resumePC we must jump to.
+ // We must restore register state before jumping, which will clobber
+ // ReturnReg, so store ReturnReg in the above-reserved stack slot which we
+ // use to jump to via ret.
+ masm.moveToStackPtr(preAlignStackPointer);
+ masm.storePtr(ReturnReg, Address(masm.getStackPointer(), offsetOfReturnWord));
+ masm.PopRegsInMask(RegsToPreserve);
+#ifdef JS_CODEGEN_ARM64
+ WasmPop(masm, lr);
+ masm.abiret();
+#else
+ masm.ret();
+#endif
+
+ return FinishOffsets(masm, offsets);
+}
+
+static void ClobberWasmRegsForLongJmp(MacroAssembler& masm, Register jumpReg) {
+ // Get the set of all registers that are allocatable in wasm functions
+ AllocatableGeneralRegisterSet gprs(GeneralRegisterSet::All());
+ RegisterAllocator::takeWasmRegisters(gprs);
+ // Remove the instance register from this set as landing pads require it to be
+ // valid
+ gprs.take(InstanceReg);
+ // Remove a specified register that will be used for the longjmp
+ gprs.take(jumpReg);
+ // Set all of these registers to zero
+ for (GeneralRegisterIterator iter(gprs.asLiveSet()); iter.more(); ++iter) {
+ Register reg = *iter;
+ masm.xorPtr(reg, reg);
+ }
+
+ // Get the set of all floating point registers that are allocatable in wasm
+ // functions
+ AllocatableFloatRegisterSet fprs(FloatRegisterSet::All());
+ // Set all of these registers to NaN. We attempt for this to be a signalling
+ // NaN, but the bit format for signalling NaNs are implementation defined
+ // and so this is just best effort.
+ Maybe<FloatRegister> regNaN;
+ for (FloatRegisterIterator iter(fprs.asLiveSet()); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ if (!reg.isDouble()) {
+ continue;
+ }
+ if (regNaN) {
+ masm.moveDouble(*regNaN, reg);
+ continue;
+ }
+ masm.loadConstantDouble(std::numeric_limits<double>::signaling_NaN(), reg);
+ regNaN = Some(reg);
+ }
+}
+
+// Generate a stub that restores the stack pointer to what it was on entry to
+// the wasm activation, sets the return register to 'false' and then executes a
+// return which will return from this wasm activation to the caller. This stub
+// should only be called after the caller has reported an error.
+static bool GenerateThrowStub(MacroAssembler& masm, Label* throwLabel,
+ Offsets* offsets) {
+ Register scratch1 = ABINonArgReturnReg0;
+ Register scratch2 = ABINonArgReturnReg1;
+
+ AssertExpectedSP(masm);
+ masm.haltingAlign(CodeAlignment);
+ masm.setFramePushed(0);
+
+ masm.bind(throwLabel);
+
+ offsets->begin = masm.currentOffset();
+
+ // Conservatively, the stack pointer can be unaligned and we must align it
+ // dynamically.
+ masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
+ if (ShadowStackSpace) {
+ masm.subFromStackPtr(Imm32(ShadowStackSpace));
+ }
+
+ // Allocate space for exception or regular resume information.
+ masm.reserveStack(sizeof(jit::ResumeFromException));
+ masm.moveStackPtrTo(scratch1);
+
+ MIRTypeVector handleThrowTypes;
+ MOZ_ALWAYS_TRUE(handleThrowTypes.append(MIRType::Pointer));
+
+ unsigned frameSize =
+ StackDecrementForCall(ABIStackAlignment, masm.framePushed(),
+ StackArgBytesForNativeABI(handleThrowTypes));
+ masm.reserveStack(frameSize);
+ masm.assertStackAlignment(ABIStackAlignment);
+
+ ABIArgMIRTypeIter i(handleThrowTypes);
+ if (i->kind() == ABIArg::GPR) {
+ masm.movePtr(scratch1, i->gpr());
+ } else {
+ masm.storePtr(scratch1,
+ Address(masm.getStackPointer(), i->offsetFromArgBase()));
+ }
+ i++;
+ MOZ_ASSERT(i.done());
+
+ // WasmHandleThrow unwinds JitActivation::wasmExitFP() and returns the
+ // address of the return address on the stack this stub should return to.
+ // Set the FramePointer to a magic value to indicate a return by throw.
+ //
+ // If there is a Wasm catch handler present, it will instead return the
+ // address of the handler to jump to and the FP/SP values to restore.
+ masm.call(SymbolicAddress::HandleThrow);
+
+ Label resumeCatch, leaveWasm;
+
+ masm.load32(Address(ReturnReg, offsetof(jit::ResumeFromException, kind)),
+ scratch1);
+
+ masm.branch32(Assembler::Equal, scratch1,
+ Imm32(jit::ExceptionResumeKind::WasmCatch), &resumeCatch);
+ masm.branch32(Assembler::Equal, scratch1,
+ Imm32(jit::ExceptionResumeKind::Wasm), &leaveWasm);
+
+ masm.breakpoint();
+
+ // The case where a Wasm catch handler was found while unwinding the stack.
+ masm.bind(&resumeCatch);
+ masm.loadPtr(Address(ReturnReg, ResumeFromException::offsetOfInstance()),
+ InstanceReg);
+ masm.loadWasmPinnedRegsFromInstance();
+ masm.switchToWasmInstanceRealm(scratch1, scratch2);
+ masm.loadPtr(Address(ReturnReg, ResumeFromException::offsetOfTarget()),
+ scratch1);
+ masm.loadPtr(Address(ReturnReg, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ masm.loadStackPtr(
+ Address(ReturnReg, ResumeFromException::offsetOfStackPointer()));
+ MoveSPForJitABI(masm);
+ ClobberWasmRegsForLongJmp(masm, scratch1);
+ masm.jump(scratch1);
+
+ // No catch handler was found, so we will just return out.
+ masm.bind(&leaveWasm);
+ masm.loadPtr(Address(ReturnReg, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ masm.loadPtr(Address(ReturnReg, ResumeFromException::offsetOfInstance()),
+ InstanceReg);
+ masm.loadPtr(Address(ReturnReg, ResumeFromException::offsetOfStackPointer()),
+ scratch1);
+ masm.moveToStackPtr(scratch1);
+#ifdef JS_CODEGEN_ARM64
+ masm.loadPtr(Address(scratch1, 0), lr);
+ masm.addToStackPtr(Imm32(8));
+ masm.abiret();
+#else
+ masm.ret();
+#endif
+
+ return FinishOffsets(masm, offsets);
+}
+
+static const LiveRegisterSet AllAllocatableRegs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::AllocatableMask),
+ FloatRegisterSet(FloatRegisters::AllMask));
+
+// Generate a stub that handle toggable enter/leave frame traps or breakpoints.
+// The trap records frame pointer (via GenerateExitPrologue) and saves most of
+// registers to not affect the code generated by WasmBaselineCompile.
+static bool GenerateDebugTrapStub(MacroAssembler& masm, Label* throwLabel,
+ CallableOffsets* offsets) {
+ AssertExpectedSP(masm);
+ masm.haltingAlign(CodeAlignment);
+ masm.setFramePushed(0);
+
+ GenerateExitPrologue(masm, 0, ExitReason::Fixed::DebugTrap, offsets);
+
+ // Save all registers used between baseline compiler operations.
+ masm.PushRegsInMask(AllAllocatableRegs);
+
+ uint32_t framePushed = masm.framePushed();
+
+ // This method might be called with unaligned stack -- aligning and
+ // saving old stack pointer at the top.
+#ifdef JS_CODEGEN_ARM64
+ // On ARM64 however the stack is always aligned.
+ static_assert(ABIStackAlignment == 16, "ARM64 SP alignment");
+#else
+ Register scratch = ABINonArgReturnReg0;
+ masm.moveStackPtrTo(scratch);
+ masm.subFromStackPtr(Imm32(sizeof(intptr_t)));
+ masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
+ masm.storePtr(scratch, Address(masm.getStackPointer(), 0));
+#endif
+
+ if (ShadowStackSpace) {
+ masm.subFromStackPtr(Imm32(ShadowStackSpace));
+ }
+ masm.assertStackAlignment(ABIStackAlignment);
+ masm.call(SymbolicAddress::HandleDebugTrap);
+
+ masm.branchIfFalseBool(ReturnReg, throwLabel);
+
+ if (ShadowStackSpace) {
+ masm.addToStackPtr(Imm32(ShadowStackSpace));
+ }
+#ifndef JS_CODEGEN_ARM64
+ masm.Pop(scratch);
+ masm.moveToStackPtr(scratch);
+#endif
+
+ masm.setFramePushed(framePushed);
+ masm.PopRegsInMask(AllAllocatableRegs);
+
+ GenerateExitEpilogue(masm, 0, ExitReason::Fixed::DebugTrap, offsets);
+
+ return FinishOffsets(masm, offsets);
+}
+
+bool wasm::GenerateEntryStubs(MacroAssembler& masm, size_t funcExportIndex,
+ const FuncExport& fe, const FuncType& funcType,
+ const Maybe<ImmPtr>& callee, bool isAsmJS,
+ CodeRangeVector* codeRanges) {
+ MOZ_ASSERT(!callee == fe.hasEagerStubs());
+ MOZ_ASSERT_IF(isAsmJS, fe.hasEagerStubs());
+
+ Offsets offsets;
+ if (!GenerateInterpEntry(masm, fe, funcType, callee, &offsets)) {
+ return false;
+ }
+ if (!codeRanges->emplaceBack(CodeRange::InterpEntry, fe.funcIndex(),
+ offsets)) {
+ return false;
+ }
+
+ if (isAsmJS || !funcType.canHaveJitEntry()) {
+ return true;
+ }
+
+ CallableOffsets jitOffsets;
+ if (!GenerateJitEntry(masm, funcExportIndex, fe, funcType, callee,
+ &jitOffsets)) {
+ return false;
+ }
+ if (!codeRanges->emplaceBack(CodeRange::JitEntry, fe.funcIndex(),
+ jitOffsets)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool wasm::GenerateProvisionalLazyJitEntryStub(MacroAssembler& masm,
+ Offsets* offsets) {
+ AssertExpectedSP(masm);
+ masm.setFramePushed(0);
+ offsets->begin = masm.currentOffset();
+
+#ifdef JS_CODEGEN_ARM64
+ // Unaligned ABI calls require SP+PSP, but our mode here is SP-only
+ masm.SetStackPointer64(PseudoStackPointer64);
+ masm.Mov(PseudoStackPointer64, sp);
+#endif
+
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
+ Register temp = regs.takeAny();
+
+ using Fn = void* (*)();
+ masm.setupUnalignedABICall(temp);
+ masm.callWithABI<Fn, GetContextSensitiveInterpreterStub>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+#ifdef JS_USE_LINK_REGISTER
+ masm.popReturnAddress();
+#endif
+
+ masm.jump(ReturnReg);
+
+#ifdef JS_CODEGEN_ARM64
+ // Undo the SP+PSP mode
+ masm.SetStackPointer64(sp);
+#endif
+
+ return FinishOffsets(masm, offsets);
+}
+
+bool wasm::GenerateStubs(const ModuleEnvironment& env,
+ const FuncImportVector& imports,
+ const FuncExportVector& exports, CompiledCode* code) {
+ LifoAlloc lifo(STUBS_LIFO_DEFAULT_CHUNK_SIZE);
+ TempAllocator alloc(&lifo);
+ JitContext jcx;
+ WasmMacroAssembler masm(alloc, env);
+ AutoCreatedBy acb(masm, "wasm::GenerateStubs");
+
+ // Swap in already-allocated empty vectors to avoid malloc/free.
+ if (!code->swap(masm)) {
+ return false;
+ }
+
+ Label throwLabel;
+
+ JitSpew(JitSpew_Codegen, "# Emitting wasm import stubs");
+
+ for (uint32_t funcIndex = 0; funcIndex < imports.length(); funcIndex++) {
+ const FuncImport& fi = imports[funcIndex];
+ const FuncType& funcType = *env.funcs[funcIndex].type;
+
+ CallableOffsets interpOffsets;
+ if (!GenerateImportInterpExit(masm, fi, funcType, funcIndex, &throwLabel,
+ &interpOffsets)) {
+ return false;
+ }
+ if (!code->codeRanges.emplaceBack(CodeRange::ImportInterpExit, funcIndex,
+ interpOffsets)) {
+ return false;
+ }
+
+ // Skip if the function does not have a signature that allows for a JIT
+ // exit.
+ if (!funcType.canHaveJitExit()) {
+ continue;
+ }
+
+ CallableOffsets jitOffsets;
+ if (!GenerateImportJitExit(masm, fi, funcType, funcIndex, &throwLabel,
+ &jitOffsets)) {
+ return false;
+ }
+ if (!code->codeRanges.emplaceBack(CodeRange::ImportJitExit, funcIndex,
+ jitOffsets)) {
+ return false;
+ }
+ }
+
+ JitSpew(JitSpew_Codegen, "# Emitting wasm export stubs");
+
+ Maybe<ImmPtr> noAbsolute;
+ for (size_t i = 0; i < exports.length(); i++) {
+ const FuncExport& fe = exports[i];
+ const FuncType& funcType = (*env.types)[fe.typeIndex()].funcType();
+ if (!fe.hasEagerStubs()) {
+ continue;
+ }
+ if (!GenerateEntryStubs(masm, i, fe, funcType, noAbsolute, env.isAsmJS(),
+ &code->codeRanges)) {
+ return false;
+ }
+ }
+
+ JitSpew(JitSpew_Codegen, "# Emitting wasm exit stubs");
+
+ Offsets offsets;
+
+ if (!GenerateTrapExit(masm, &throwLabel, &offsets)) {
+ return false;
+ }
+ if (!code->codeRanges.emplaceBack(CodeRange::TrapExit, offsets)) {
+ return false;
+ }
+
+ CallableOffsets callableOffsets;
+ if (!GenerateDebugTrapStub(masm, &throwLabel, &callableOffsets)) {
+ return false;
+ }
+ if (!code->codeRanges.emplaceBack(CodeRange::DebugTrap, callableOffsets)) {
+ return false;
+ }
+
+ if (!GenerateThrowStub(masm, &throwLabel, &offsets)) {
+ return false;
+ }
+ if (!code->codeRanges.emplaceBack(CodeRange::Throw, offsets)) {
+ return false;
+ }
+
+ masm.finish();
+ if (masm.oom()) {
+ return false;
+ }
+
+ return code->swap(masm);
+}
diff --git a/js/src/wasm/WasmStubs.h b/js/src/wasm/WasmStubs.h
new file mode 100644
index 0000000000..6a6622fdf2
--- /dev/null
+++ b/js/src/wasm/WasmStubs.h
@@ -0,0 +1,370 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_stubs_h
+#define wasm_stubs_h
+
+#include "wasm/WasmFrameIter.h" // js::wasm::ExitReason
+#include "wasm/WasmGenerator.h"
+#include "wasm/WasmOpIter.h"
+
+namespace js {
+namespace wasm {
+
+using jit::FloatRegister;
+using jit::Register;
+using jit::Register64;
+
+// ValType and location for a single result: either in a register or on the
+// stack.
+
+class ABIResult {
+ ValType type_;
+ enum class Location { Gpr, Gpr64, Fpr, Stack } loc_;
+ union {
+ Register gpr_;
+ Register64 gpr64_;
+ FloatRegister fpr_;
+ uint32_t stackOffset_;
+ };
+
+ void validate() {
+#ifdef DEBUG
+ if (onStack()) {
+ return;
+ }
+ MOZ_ASSERT(inRegister());
+ switch (type_.kind()) {
+ case ValType::I32:
+ MOZ_ASSERT(loc_ == Location::Gpr);
+ break;
+ case ValType::I64:
+ MOZ_ASSERT(loc_ == Location::Gpr64);
+ break;
+ case ValType::F32:
+ case ValType::F64:
+ MOZ_ASSERT(loc_ == Location::Fpr);
+ break;
+ case ValType::Ref:
+ MOZ_ASSERT(loc_ == Location::Gpr);
+ break;
+ case ValType::V128:
+ MOZ_ASSERT(loc_ == Location::Fpr);
+ break;
+ }
+#endif
+ }
+
+ friend class ABIResultIter;
+ ABIResult() {}
+
+ public:
+ // Sizes of items in the stack area.
+ //
+ // The size values come from the implementations of Push() in
+ // MacroAssembler-x86-shared.cpp and MacroAssembler-arm-shared.cpp, and from
+ // VFPRegister::size() in Architecture-arm.h.
+ //
+ // On ARM unlike on x86 we push a single for float.
+
+ static constexpr size_t StackSizeOfPtr = sizeof(intptr_t);
+ static constexpr size_t StackSizeOfInt32 = StackSizeOfPtr;
+ static constexpr size_t StackSizeOfInt64 = sizeof(int64_t);
+#if defined(JS_CODEGEN_ARM)
+ static constexpr size_t StackSizeOfFloat = sizeof(float);
+#else
+ static constexpr size_t StackSizeOfFloat = sizeof(double);
+#endif
+ static constexpr size_t StackSizeOfDouble = sizeof(double);
+#ifdef ENABLE_WASM_SIMD
+ static constexpr size_t StackSizeOfV128 = sizeof(V128);
+#endif
+
+ ABIResult(ValType type, Register gpr)
+ : type_(type), loc_(Location::Gpr), gpr_(gpr) {
+ validate();
+ }
+ ABIResult(ValType type, Register64 gpr64)
+ : type_(type), loc_(Location::Gpr64), gpr64_(gpr64) {
+ validate();
+ }
+ ABIResult(ValType type, FloatRegister fpr)
+ : type_(type), loc_(Location::Fpr), fpr_(fpr) {
+ validate();
+ }
+ ABIResult(ValType type, uint32_t stackOffset)
+ : type_(type), loc_(Location::Stack), stackOffset_(stackOffset) {
+ validate();
+ }
+
+ ValType type() const { return type_; }
+ bool onStack() const { return loc_ == Location::Stack; }
+ bool inRegister() const { return !onStack(); }
+ Register gpr() const {
+ MOZ_ASSERT(loc_ == Location::Gpr);
+ return gpr_;
+ }
+ Register64 gpr64() const {
+ MOZ_ASSERT(loc_ == Location::Gpr64);
+ return gpr64_;
+ }
+ FloatRegister fpr() const {
+ MOZ_ASSERT(loc_ == Location::Fpr);
+ return fpr_;
+ }
+ // Offset from SP.
+ uint32_t stackOffset() const {
+ MOZ_ASSERT(loc_ == Location::Stack);
+ return stackOffset_;
+ }
+ uint32_t size() const;
+};
+
+// Just as WebAssembly functions can take multiple arguments, they can also
+// return multiple results. As with a call, a limited number of results will be
+// located in registers, and the rest will be stored in a stack area. The
+// |ABIResultIter| computes result locations, given a |ResultType|.
+//
+// Recall that a |ResultType| represents a sequence of value types t1..tN,
+// indexed from 1 to N. In principle it doesn't matter how we decide which
+// results get to be in registers and which go to the stack. To better
+// harmonize with WebAssembly's abstract stack machine, whose properties are
+// taken advantage of by the baseline compiler, our strategy is to start
+// allocating result locations in "reverse" order: from result N down to 1.
+//
+// If a result with index I is in a register, then all results with index J > I
+// are also in registers. If a result I is on the stack, then all results with
+// index K < I are also on the stack, farther away from the stack pointer than
+// result I.
+//
+// Currently only a single result is ever stored in a register, though this may
+// change in the future on register-rich platforms.
+//
+// NB: The baseline compiler also uses thie ABI for locations of block
+// parameters and return values, within individual WebAssembly functions.
+
+class ABIResultIter {
+ ResultType type_;
+ uint32_t count_;
+ uint32_t index_;
+ uint32_t nextStackOffset_;
+ enum { Next, Prev } direction_;
+ ABIResult cur_;
+
+ void settleRegister(ValType type);
+ void settleNext();
+ void settlePrev();
+
+ public:
+ explicit ABIResultIter(const ResultType& type)
+ : type_(type), count_(type.length()) {
+ reset();
+ }
+
+ void reset() {
+ index_ = nextStackOffset_ = 0;
+ direction_ = Next;
+ if (!done()) {
+ settleNext();
+ }
+ }
+ bool done() const { return index_ == count_; }
+ uint32_t index() const { return index_; }
+ uint32_t count() const { return count_; }
+ uint32_t remaining() const { return count_ - index_; }
+ void switchToNext() {
+ MOZ_ASSERT(direction_ == Prev);
+ if (!done() && cur().onStack()) {
+ nextStackOffset_ += cur().size();
+ }
+ index_ = count_ - index_;
+ direction_ = Next;
+ if (!done()) {
+ settleNext();
+ }
+ }
+ void switchToPrev() {
+ MOZ_ASSERT(direction_ == Next);
+ if (!done() && cur().onStack()) {
+ nextStackOffset_ -= cur().size();
+ }
+ index_ = count_ - index_;
+ direction_ = Prev;
+ if (!done()) settlePrev();
+ }
+ void next() {
+ MOZ_ASSERT(direction_ == Next);
+ MOZ_ASSERT(!done());
+ index_++;
+ if (!done()) {
+ settleNext();
+ }
+ }
+ void prev() {
+ MOZ_ASSERT(direction_ == Prev);
+ MOZ_ASSERT(!done());
+ index_++;
+ if (!done()) {
+ settlePrev();
+ }
+ }
+ const ABIResult& cur() const {
+ MOZ_ASSERT(!done());
+ return cur_;
+ }
+
+ uint32_t stackBytesConsumedSoFar() const { return nextStackOffset_; }
+
+ static inline bool HasStackResults(const ResultType& type) {
+ return type.length() > MaxRegisterResults;
+ }
+
+ static uint32_t MeasureStackBytes(const ResultType& type) {
+ if (!HasStackResults(type)) {
+ return 0;
+ }
+ ABIResultIter iter(type);
+ while (!iter.done()) {
+ iter.next();
+ }
+ return iter.stackBytesConsumedSoFar();
+ }
+};
+
+extern bool GenerateBuiltinThunk(jit::MacroAssembler& masm,
+ jit::ABIFunctionType abiType,
+ ExitReason exitReason, void* funcPtr,
+ CallableOffsets* offsets);
+
+extern bool GenerateImportFunctions(const ModuleEnvironment& env,
+ const FuncImportVector& imports,
+ CompiledCode* code);
+
+extern bool GenerateStubs(const ModuleEnvironment& env,
+ const FuncImportVector& imports,
+ const FuncExportVector& exports, CompiledCode* code);
+
+extern bool GenerateEntryStubs(jit::MacroAssembler& masm,
+ size_t funcExportIndex, const FuncExport& fe,
+ const FuncType& funcType,
+ const Maybe<jit::ImmPtr>& callee, bool isAsmJS,
+ CodeRangeVector* codeRanges);
+
+extern void GenerateTrapExitRegisterOffsets(jit::RegisterOffsets* offsets,
+ size_t* numWords);
+
+extern bool GenerateProvisionalLazyJitEntryStub(jit::MacroAssembler& masm,
+ Offsets* offsets);
+
+// A value that is written into the trap exit frame, which is useful for
+// cross-checking during garbage collection.
+static constexpr uintptr_t TrapExitDummyValue = 1337;
+
+// And its offset, in words, down from the highest-addressed word of the trap
+// exit frame. The value is written into the frame using WasmPush. In the
+// case where WasmPush allocates more than one word, the value will therefore
+// be written at the lowest-addressed word.
+#ifdef JS_CODEGEN_ARM64
+static constexpr size_t TrapExitDummyValueOffsetFromTop = 1;
+#else
+static constexpr size_t TrapExitDummyValueOffsetFromTop = 0;
+#endif
+
+// An argument that will end up on the stack according to the system ABI, to be
+// passed to GenerateDirectCallFromJit. Since the direct JIT call creates its
+// own frame, it is its responsibility to put stack arguments to their expected
+// locations; so the caller of GenerateDirectCallFromJit can put them anywhere.
+
+class JitCallStackArg {
+ public:
+ enum class Tag {
+ Imm32,
+ GPR,
+ FPU,
+ Address,
+ Undefined,
+ };
+
+ private:
+ Tag tag_;
+ union U {
+ int32_t imm32_;
+ jit::Register gpr_;
+ jit::FloatRegister fpu_;
+ jit::Address addr_;
+ U() {}
+ } arg;
+
+ public:
+ JitCallStackArg() : tag_(Tag::Undefined) {}
+ explicit JitCallStackArg(int32_t imm32) : tag_(Tag::Imm32) {
+ arg.imm32_ = imm32;
+ }
+ explicit JitCallStackArg(jit::Register gpr) : tag_(Tag::GPR) {
+ arg.gpr_ = gpr;
+ }
+ explicit JitCallStackArg(jit::FloatRegister fpu) : tag_(Tag::FPU) {
+ new (&arg) jit::FloatRegister(fpu);
+ }
+ explicit JitCallStackArg(const jit::Address& addr) : tag_(Tag::Address) {
+ new (&arg) jit::Address(addr);
+ }
+
+ Tag tag() const { return tag_; }
+ int32_t imm32() const {
+ MOZ_ASSERT(tag_ == Tag::Imm32);
+ return arg.imm32_;
+ }
+ jit::Register gpr() const {
+ MOZ_ASSERT(tag_ == Tag::GPR);
+ return arg.gpr_;
+ }
+ jit::FloatRegister fpu() const {
+ MOZ_ASSERT(tag_ == Tag::FPU);
+ return arg.fpu_;
+ }
+ const jit::Address& addr() const {
+ MOZ_ASSERT(tag_ == Tag::Address);
+ return arg.addr_;
+ }
+};
+
+using JitCallStackArgVector = Vector<JitCallStackArg, 4, SystemAllocPolicy>;
+
+// Generates an inline wasm call (during jit compilation) to a specific wasm
+// function (as specifed by the given FuncExport).
+// This call doesn't go through a wasm entry, but rather creates its own
+// inlined exit frame.
+// Assumes:
+// - all the registers have been preserved by the caller,
+// - all arguments passed in registers have been set up at the expected
+// locations,
+// - all arguments passed on stack slot are alive as defined by a corresponding
+// JitCallStackArg.
+
+extern void GenerateDirectCallFromJit(jit::MacroAssembler& masm,
+ const FuncExport& fe,
+ const Instance& inst,
+ const JitCallStackArgVector& stackArgs,
+ jit::Register scratch,
+ uint32_t* callOffset);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_stubs_h
diff --git a/js/src/wasm/WasmTable.cpp b/js/src/wasm/WasmTable.cpp
new file mode 100644
index 0000000000..076a344e29
--- /dev/null
+++ b/js/src/wasm/WasmTable.cpp
@@ -0,0 +1,473 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmTable.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/PodOperations.h"
+
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmJS.h"
+
+#include "gc/StableCellHasher-inl.h"
+#include "wasm/WasmInstance-inl.h"
+
+using namespace js;
+using namespace js::wasm;
+using mozilla::CheckedInt;
+using mozilla::PodZero;
+
+Table::Table(JSContext* cx, const TableDesc& desc,
+ Handle<WasmTableObject*> maybeObject, FuncRefVector&& functions)
+ : maybeObject_(maybeObject),
+ observers_(cx->zone()),
+ functions_(std::move(functions)),
+ elemType_(desc.elemType),
+ isAsmJS_(desc.isAsmJS),
+ length_(desc.initialLength),
+ maximum_(desc.maximumLength) {
+ MOZ_ASSERT(repr() == TableRepr::Func);
+}
+
+Table::Table(JSContext* cx, const TableDesc& desc,
+ Handle<WasmTableObject*> maybeObject, TableAnyRefVector&& objects)
+ : maybeObject_(maybeObject),
+ observers_(cx->zone()),
+ objects_(std::move(objects)),
+ elemType_(desc.elemType),
+ isAsmJS_(desc.isAsmJS),
+ length_(desc.initialLength),
+ maximum_(desc.maximumLength) {
+ MOZ_ASSERT(repr() == TableRepr::Ref);
+}
+
+/* static */
+SharedTable Table::create(JSContext* cx, const TableDesc& desc,
+ Handle<WasmTableObject*> maybeObject) {
+ // Tables are initialized with init_expr values at Instance::init or
+ // WasmTableObject::create.
+
+ switch (desc.elemType.tableRepr()) {
+ case TableRepr::Func: {
+ FuncRefVector functions;
+ if (!functions.resize(desc.initialLength)) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+ return SharedTable(
+ cx->new_<Table>(cx, desc, maybeObject, std::move(functions)));
+ }
+ case TableRepr::Ref: {
+ TableAnyRefVector objects;
+ if (!objects.resize(desc.initialLength)) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+ return SharedTable(
+ cx->new_<Table>(cx, desc, maybeObject, std::move(objects)));
+ }
+ }
+ MOZ_CRASH("switch is exhaustive");
+}
+
+void Table::tracePrivate(JSTracer* trc) {
+ // If this table has a WasmTableObject, then this method is only called by
+ // WasmTableObject's trace hook so maybeObject_ must already be marked.
+ // TraceEdge is called so that the pointer can be updated during a moving
+ // GC.
+ TraceNullableEdge(trc, &maybeObject_, "wasm table object");
+
+ switch (repr()) {
+ case TableRepr::Func: {
+ if (isAsmJS_) {
+#ifdef DEBUG
+ for (uint32_t i = 0; i < length_; i++) {
+ MOZ_ASSERT(!functions_[i].instance);
+ }
+#endif
+ break;
+ }
+
+ for (uint32_t i = 0; i < length_; i++) {
+ if (functions_[i].instance) {
+ wasm::TraceInstanceEdge(trc, functions_[i].instance,
+ "wasm table instance");
+ } else {
+ MOZ_ASSERT(!functions_[i].code);
+ }
+ }
+ break;
+ }
+ case TableRepr::Ref: {
+ objects_.trace(trc);
+ break;
+ }
+ }
+}
+
+void Table::trace(JSTracer* trc) {
+ // The trace hook of WasmTableObject will call Table::tracePrivate at
+ // which point we can mark the rest of the children. If there is no
+ // WasmTableObject, call Table::tracePrivate directly. Redirecting through
+ // the WasmTableObject avoids marking the entire Table on each incoming
+ // edge (once per dependent Instance).
+ if (maybeObject_) {
+ TraceEdge(trc, &maybeObject_, "wasm table object");
+ } else {
+ tracePrivate(trc);
+ }
+}
+
+uint8_t* Table::instanceElements() const {
+ if (repr() == TableRepr::Ref) {
+ return (uint8_t*)objects_.begin();
+ }
+ return (uint8_t*)functions_.begin();
+}
+
+const FunctionTableElem& Table::getFuncRef(uint32_t index) const {
+ MOZ_ASSERT(isFunction());
+ return functions_[index];
+}
+
+bool Table::getFuncRef(JSContext* cx, uint32_t index,
+ MutableHandleFunction fun) const {
+ MOZ_ASSERT(isFunction());
+
+ const FunctionTableElem& elem = getFuncRef(index);
+ if (!elem.code) {
+ fun.set(nullptr);
+ return true;
+ }
+
+ Instance& instance = *elem.instance;
+ const CodeRange& codeRange = *instance.code().lookupFuncRange(elem.code);
+
+ Rooted<WasmInstanceObject*> instanceObj(cx, instance.object());
+ return instanceObj->getExportedFunction(cx, instanceObj,
+ codeRange.funcIndex(), fun);
+}
+
+void Table::setFuncRef(uint32_t index, void* code, Instance* instance) {
+ MOZ_ASSERT(isFunction());
+
+ FunctionTableElem& elem = functions_[index];
+ if (elem.instance) {
+ gc::PreWriteBarrier(elem.instance->objectUnbarriered());
+ }
+
+ if (!isAsmJS_) {
+ elem.code = code;
+ elem.instance = instance;
+ MOZ_ASSERT(elem.instance->objectUnbarriered()->isTenured(),
+ "no postWriteBarrier (Table::set)");
+ } else {
+ elem.code = code;
+ elem.instance = nullptr;
+ }
+}
+
+void Table::fillFuncRef(uint32_t index, uint32_t fillCount, FuncRef ref,
+ JSContext* cx) {
+ MOZ_ASSERT(isFunction());
+
+ if (ref.isNull()) {
+ for (uint32_t i = index, end = index + fillCount; i != end; i++) {
+ setNull(i);
+ }
+ return;
+ }
+
+ RootedFunction fun(cx, ref.asJSFunction());
+ MOZ_RELEASE_ASSERT(IsWasmExportedFunction(fun));
+
+ Rooted<WasmInstanceObject*> instanceObj(
+ cx, ExportedFunctionToInstanceObject(fun));
+ uint32_t funcIndex = ExportedFunctionToFuncIndex(fun);
+
+#ifdef DEBUG
+ RootedFunction f(cx);
+ MOZ_ASSERT(instanceObj->getExportedFunction(cx, instanceObj, funcIndex, &f));
+ MOZ_ASSERT(fun == f);
+#endif
+
+ Instance& instance = instanceObj->instance();
+ Tier tier = instance.code().bestTier();
+ const MetadataTier& metadata = instance.metadata(tier);
+ const CodeRange& codeRange =
+ metadata.codeRange(metadata.lookupFuncExport(funcIndex));
+ void* code = instance.codeBase(tier) + codeRange.funcCheckedCallEntry();
+ for (uint32_t i = index, end = index + fillCount; i != end; i++) {
+ setFuncRef(i, code, &instance);
+ }
+}
+
+AnyRef Table::getAnyRef(uint32_t index) const {
+ MOZ_ASSERT(!isFunction());
+ // TODO/AnyRef-boxing: With boxed immediates and strings, the write barrier
+ // is going to have to be more complicated.
+ ASSERT_ANYREF_IS_JSOBJECT;
+ return AnyRef::fromJSObject(objects_[index]);
+}
+
+void Table::fillAnyRef(uint32_t index, uint32_t fillCount, AnyRef ref) {
+ MOZ_ASSERT(!isFunction());
+ // TODO/AnyRef-boxing: With boxed immediates and strings, the write barrier
+ // is going to have to be more complicated.
+ ASSERT_ANYREF_IS_JSOBJECT;
+ for (uint32_t i = index, end = index + fillCount; i != end; i++) {
+ objects_[i] = ref.asJSObject();
+ }
+}
+
+bool Table::getValue(JSContext* cx, uint32_t index,
+ MutableHandleValue result) const {
+ switch (repr()) {
+ case TableRepr::Func: {
+ MOZ_RELEASE_ASSERT(!isAsmJS());
+ RootedFunction fun(cx);
+ if (!getFuncRef(cx, index, &fun)) {
+ return false;
+ }
+ result.setObjectOrNull(fun);
+ return true;
+ }
+ case TableRepr::Ref: {
+ if (!ValType(elemType_).isExposable()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_VAL_TYPE);
+ return false;
+ }
+ return ToJSValue(cx, &objects_[index], ValType(elemType_), result);
+ }
+ default:
+ MOZ_CRASH();
+ }
+}
+
+void Table::setNull(uint32_t index) {
+ switch (repr()) {
+ case TableRepr::Func: {
+ MOZ_RELEASE_ASSERT(!isAsmJS_);
+ FunctionTableElem& elem = functions_[index];
+ if (elem.instance) {
+ gc::PreWriteBarrier(elem.instance->objectUnbarriered());
+ }
+
+ elem.code = nullptr;
+ elem.instance = nullptr;
+ break;
+ }
+ case TableRepr::Ref: {
+ fillAnyRef(index, 1, AnyRef::null());
+ break;
+ }
+ }
+}
+
+bool Table::copy(JSContext* cx, const Table& srcTable, uint32_t dstIndex,
+ uint32_t srcIndex) {
+ MOZ_RELEASE_ASSERT(!srcTable.isAsmJS_);
+ switch (repr()) {
+ case TableRepr::Func: {
+ MOZ_RELEASE_ASSERT(elemType().isFuncHierarchy() &&
+ srcTable.elemType().isFuncHierarchy());
+ FunctionTableElem& dst = functions_[dstIndex];
+ if (dst.instance) {
+ gc::PreWriteBarrier(dst.instance->objectUnbarriered());
+ }
+
+ const FunctionTableElem& src = srcTable.functions_[srcIndex];
+ dst.code = src.code;
+ dst.instance = src.instance;
+
+ if (dst.instance) {
+ MOZ_ASSERT(dst.code);
+ MOZ_ASSERT(dst.instance->objectUnbarriered()->isTenured(),
+ "no postWriteBarrier (Table::copy)");
+ } else {
+ MOZ_ASSERT(!dst.code);
+ }
+ break;
+ }
+ case TableRepr::Ref: {
+ switch (srcTable.repr()) {
+ case TableRepr::Ref: {
+ fillAnyRef(dstIndex, 1, srcTable.getAnyRef(srcIndex));
+ break;
+ }
+ case TableRepr::Func: {
+ MOZ_RELEASE_ASSERT(srcTable.elemType().isFuncHierarchy());
+ // Upcast.
+ RootedFunction fun(cx);
+ if (!srcTable.getFuncRef(cx, srcIndex, &fun)) {
+ // OOM, so just pass it on.
+ return false;
+ }
+ fillAnyRef(dstIndex, 1, AnyRef::fromJSObject(fun));
+ break;
+ }
+ }
+ break;
+ }
+ }
+ return true;
+}
+
+uint32_t Table::grow(uint32_t delta) {
+ // This isn't just an optimization: movingGrowable() assumes that
+ // onMovingGrowTable does not fire when length == maximum.
+ if (!delta) {
+ return length_;
+ }
+
+ uint32_t oldLength = length_;
+
+ CheckedInt<uint32_t> newLength = oldLength;
+ newLength += delta;
+ if (!newLength.isValid() || newLength.value() > MaxTableLength) {
+ return -1;
+ }
+
+ if (maximum_ && newLength.value() > maximum_.value()) {
+ return -1;
+ }
+
+ MOZ_ASSERT(movingGrowable());
+
+ switch (repr()) {
+ case TableRepr::Func: {
+ MOZ_RELEASE_ASSERT(!isAsmJS_);
+ if (!functions_.resize(newLength.value())) {
+ return -1;
+ }
+ break;
+ }
+ case TableRepr::Ref: {
+ if (!objects_.resize(newLength.value())) {
+ return -1;
+ }
+ break;
+ }
+ }
+
+ if (auto* object = maybeObject_.unbarrieredGet()) {
+ RemoveCellMemory(object, gcMallocBytes(), MemoryUse::WasmTableTable);
+ }
+
+ length_ = newLength.value();
+
+ if (auto* object = maybeObject_.unbarrieredGet()) {
+ AddCellMemory(object, gcMallocBytes(), MemoryUse::WasmTableTable);
+ }
+
+ for (InstanceSet::Range r = observers_.all(); !r.empty(); r.popFront()) {
+ r.front()->instance().onMovingGrowTable(this);
+ }
+
+ return oldLength;
+}
+
+bool Table::movingGrowable() const {
+ return !maximum_ || length_ < maximum_.value();
+}
+
+bool Table::addMovingGrowObserver(JSContext* cx, WasmInstanceObject* instance) {
+ MOZ_ASSERT(movingGrowable());
+
+ // A table can be imported multiple times into an instance, but we only
+ // register the instance as an observer once.
+
+ if (!observers_.put(instance)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+void Table::fillUninitialized(uint32_t index, uint32_t fillCount,
+ HandleAnyRef ref, JSContext* cx) {
+#ifdef DEBUG
+ assertRangeNull(index, fillCount);
+#endif // DEBUG
+ switch (repr()) {
+ case TableRepr::Func: {
+ MOZ_RELEASE_ASSERT(!isAsmJS_);
+ fillFuncRef(index, fillCount, FuncRef::fromAnyRefUnchecked(ref), cx);
+ break;
+ }
+ case TableRepr::Ref: {
+ fillAnyRef(index, fillCount, ref);
+ break;
+ }
+ }
+}
+
+#ifdef DEBUG
+void Table::assertRangeNull(uint32_t index, uint32_t length) const {
+ switch (repr()) {
+ case TableRepr::Func:
+ for (uint32_t i = index; i < index + length; i++) {
+ MOZ_ASSERT(getFuncRef(i).instance == nullptr);
+ MOZ_ASSERT(getFuncRef(i).code == nullptr);
+ }
+ break;
+ case TableRepr::Ref:
+ for (uint32_t i = index; i < index + length; i++) {
+ MOZ_ASSERT(getAnyRef(i).isNull());
+ }
+ break;
+ }
+}
+
+void Table::assertRangeNotNull(uint32_t index, uint32_t length) const {
+ switch (repr()) {
+ case TableRepr::Func:
+ for (uint32_t i = index; i < index + length; i++) {
+ MOZ_ASSERT_IF(!isAsmJS_, getFuncRef(i).instance != nullptr);
+ MOZ_ASSERT(getFuncRef(i).code != nullptr);
+ }
+ break;
+ case TableRepr::Ref:
+ for (uint32_t i = index; i < index + length; i++) {
+ MOZ_ASSERT(!getAnyRef(i).isNull());
+ }
+ break;
+ }
+}
+#endif // DEBUG
+
+size_t Table::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ if (isFunction()) {
+ return functions_.sizeOfExcludingThis(mallocSizeOf);
+ }
+ return objects_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t Table::gcMallocBytes() const {
+ size_t size = sizeof(*this);
+ if (isFunction()) {
+ size += length() * sizeof(FunctionTableElem);
+ } else {
+ size += length() * sizeof(TableAnyRefVector::ElementType);
+ }
+ return size;
+}
diff --git a/js/src/wasm/WasmTable.h b/js/src/wasm/WasmTable.h
new file mode 100644
index 0000000000..80cc4d3bdf
--- /dev/null
+++ b/js/src/wasm/WasmTable.h
@@ -0,0 +1,139 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_table_h
+#define wasm_table_h
+
+#include "gc/Policy.h"
+#include "wasm/WasmCode.h"
+
+namespace js {
+namespace wasm {
+
+// A Table is an indexable array of opaque values. Tables are first-class
+// stateful objects exposed to WebAssembly. asm.js also uses Tables to represent
+// its homogeneous function-pointer tables.
+//
+// A table of FuncRef holds FunctionTableElems, which are (code*,instance*)
+// pairs, where the instance must be traced.
+//
+// A table of AnyRef holds JSObject pointers, which must be traced.
+
+// TODO/AnyRef-boxing: With boxed immediates and strings, JSObject* is no longer
+// the most appropriate representation for Cell::anyref.
+STATIC_ASSERT_ANYREF_IS_JSOBJECT;
+
+using TableAnyRefVector = GCVector<HeapPtr<JSObject*>, 0, SystemAllocPolicy>;
+
+class Table : public ShareableBase<Table> {
+ using InstanceSet = JS::WeakCache<GCHashSet<
+ WeakHeapPtr<WasmInstanceObject*>,
+ StableCellHasher<WeakHeapPtr<WasmInstanceObject*>>, SystemAllocPolicy>>;
+ using FuncRefVector = Vector<FunctionTableElem, 0, SystemAllocPolicy>;
+
+ WeakHeapPtr<WasmTableObject*> maybeObject_;
+ InstanceSet observers_;
+ FuncRefVector functions_; // either functions_ has data
+ TableAnyRefVector objects_; // or objects_, but not both
+ const RefType elemType_;
+ const bool isAsmJS_;
+ uint32_t length_;
+ const Maybe<uint32_t> maximum_;
+
+ template <class>
+ friend struct js::MallocProvider;
+ Table(JSContext* cx, const TableDesc& desc,
+ Handle<WasmTableObject*> maybeObject, FuncRefVector&& functions);
+ Table(JSContext* cx, const TableDesc& desc,
+ Handle<WasmTableObject*> maybeObject, TableAnyRefVector&& objects);
+
+ void tracePrivate(JSTracer* trc);
+ friend class js::WasmTableObject;
+
+ public:
+ static RefPtr<Table> create(JSContext* cx, const TableDesc& desc,
+ Handle<WasmTableObject*> maybeObject);
+ void trace(JSTracer* trc);
+
+ RefType elemType() const { return elemType_; }
+ TableRepr repr() const { return elemType_.tableRepr(); }
+
+ bool isAsmJS() const {
+ MOZ_ASSERT(elemType_.isFuncHierarchy());
+ return isAsmJS_;
+ }
+
+ bool isFunction() const { return elemType().isFuncHierarchy(); }
+ uint32_t length() const { return length_; }
+ Maybe<uint32_t> maximum() const { return maximum_; }
+
+ // Raw pointer to the table for use in TableInstanceData.
+ uint8_t* instanceElements() const;
+
+ // set/get/fillFuncRef is allowed only on table-of-funcref.
+ // get/fillAnyRef is allowed only on table-of-anyref.
+ // setNull is allowed on either.
+
+ const FunctionTableElem& getFuncRef(uint32_t index) const;
+ [[nodiscard]] bool getFuncRef(JSContext* cx, uint32_t index,
+ MutableHandleFunction fun) const;
+ void setFuncRef(uint32_t index, void* code, Instance* instance);
+ void fillFuncRef(uint32_t index, uint32_t fillCount, FuncRef ref,
+ JSContext* cx);
+
+ AnyRef getAnyRef(uint32_t index) const;
+ void fillAnyRef(uint32_t index, uint32_t fillCount, AnyRef ref);
+
+ // Get the element at index and convert it to a JS value.
+ [[nodiscard]] bool getValue(JSContext* cx, uint32_t index,
+ MutableHandleValue result) const;
+
+ void setNull(uint32_t index);
+
+ // Copy entry from |srcTable| at |srcIndex| to this table at |dstIndex|. Used
+ // by table.copy. May OOM if it needs to box up a function during an upcast.
+ [[nodiscard]] bool copy(JSContext* cx, const Table& srcTable,
+ uint32_t dstIndex, uint32_t srcIndex);
+
+ // grow() returns (uint32_t)-1 if it could not grow.
+ [[nodiscard]] uint32_t grow(uint32_t delta);
+ [[nodiscard]] bool movingGrowable() const;
+ [[nodiscard]] bool addMovingGrowObserver(JSContext* cx,
+ WasmInstanceObject* instance);
+
+ void fillUninitialized(uint32_t index, uint32_t fillCount, HandleAnyRef ref,
+ JSContext* cx);
+#ifdef DEBUG
+ void assertRangeNull(uint32_t index, uint32_t length) const;
+ void assertRangeNotNull(uint32_t index, uint32_t length) const;
+#endif // DEBUG
+
+ // about:memory reporting:
+
+ size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const;
+
+ size_t gcMallocBytes() const;
+};
+
+using SharedTable = RefPtr<Table>;
+using SharedTableVector = Vector<SharedTable, 0, SystemAllocPolicy>;
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_table_h
diff --git a/js/src/wasm/WasmTypeDecls.h b/js/src/wasm/WasmTypeDecls.h
new file mode 100644
index 0000000000..71ea66845c
--- /dev/null
+++ b/js/src/wasm/WasmTypeDecls.h
@@ -0,0 +1,99 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_type_decls_h
+#define wasm_type_decls_h
+
+#include "NamespaceImports.h"
+#include "gc/Barrier.h"
+#include "js/GCVector.h"
+#include "js/HashTable.h"
+#include "js/RootingAPI.h"
+#include "js/UniquePtr.h"
+#include "js/Utility.h"
+#include "js/Vector.h"
+
+namespace js {
+
+using JSObjectVector = GCVector<JSObject*, 0, SystemAllocPolicy>;
+
+class WasmMemoryObject;
+class WasmModuleObject;
+class WasmInstanceObject;
+class WasmTableObject;
+class WasmGlobalObject;
+class WasmTagObject;
+class WasmExceptionObject;
+
+using WasmInstanceObjectVector = GCVector<WasmInstanceObject*>;
+using WasmTableObjectVector = GCVector<WasmTableObject*, 0, SystemAllocPolicy>;
+using WasmGlobalObjectVector =
+ GCVector<WasmGlobalObject*, 0, SystemAllocPolicy>;
+using WasmTagObjectVector = GCVector<WasmTagObject*, 0, SystemAllocPolicy>;
+
+namespace wasm {
+
+struct ModuleEnvironment;
+class CodeRange;
+class CodeTier;
+class ModuleSegment;
+struct Metadata;
+struct MetadataTier;
+class Decoder;
+class GeneratedSourceMap;
+class Instance;
+class Module;
+
+class Code;
+using SharedCode = RefPtr<const Code>;
+using MutableCode = RefPtr<Code>;
+
+class Table;
+using SharedTable = RefPtr<Table>;
+using SharedTableVector = Vector<SharedTable, 0, SystemAllocPolicy>;
+
+class DebugState;
+using UniqueDebugState = UniquePtr<DebugState>;
+
+struct DataSegment;
+using MutableDataSegment = RefPtr<DataSegment>;
+using SharedDataSegment = RefPtr<const DataSegment>;
+using DataSegmentVector = Vector<SharedDataSegment, 0, SystemAllocPolicy>;
+
+struct ElemSegment;
+using MutableElemSegment = RefPtr<ElemSegment>;
+using SharedElemSegment = RefPtr<const ElemSegment>;
+using ElemSegmentVector = Vector<SharedElemSegment, 0, SystemAllocPolicy>;
+
+class Val;
+using ValVector = GCVector<Val, 0, SystemAllocPolicy>;
+
+// Uint32Vector has initial size 8 on the basis that the dominant use cases
+// (line numbers and control stacks) tend to have a small but nonzero number
+// of elements.
+using Uint32Vector = Vector<uint32_t, 8, SystemAllocPolicy>;
+
+using Bytes = Vector<uint8_t, 0, SystemAllocPolicy>;
+using UTF8Bytes = Vector<char, 0, SystemAllocPolicy>;
+using InstanceVector = Vector<Instance*, 0, SystemAllocPolicy>;
+using UniqueCharsVector = Vector<UniqueChars, 0, SystemAllocPolicy>;
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_type_decls_h
diff --git a/js/src/wasm/WasmTypeDef.cpp b/js/src/wasm/WasmTypeDef.cpp
new file mode 100644
index 0000000000..8aad9fea31
--- /dev/null
+++ b/js/src/wasm/WasmTypeDef.cpp
@@ -0,0 +1,550 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmTypeDef.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/JitOptions.h"
+#include "js/friend/ErrorMessages.h" // JSMSG_*
+#include "js/HashTable.h"
+#include "js/Printf.h"
+#include "js/Value.h"
+#include "threading/ExclusiveData.h"
+#include "vm/Runtime.h"
+#include "vm/StringType.h"
+#include "wasm/WasmCodegenConstants.h"
+#include "wasm/WasmGcObject.h"
+#include "wasm/WasmJS.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::CheckedUint32;
+using mozilla::IsPowerOfTwo;
+
+// [SMDOC] Immediate type signature encoding
+//
+// call_indirect requires a signature check to ensure the dynamic callee type
+// matches the static specified callee type. This involves comparing whether
+// the two function types are equal. We canonicalize function types so that
+// comparing the pointers of the types will indicate if they're equal. The
+// canonicalized function types are loaded from the instance at runtime.
+//
+// For the common case of simple/small function types, we can avoid the cost
+// of loading the function type pointers from the instance by having an
+// alternate 'immediate' form that encodes a function type in a constant.
+// We encode the function types such that bitwise equality implies the original
+// function types were equal. We use a tag bit such that if one of the types
+// is a pointer and the other an immediate, they will not compare as equal.
+//
+// The encoding is optimized for common function types that have at most one
+// result and an arbitrary amount of arguments.
+//
+// [
+// 1 bit : tag (always 1),
+// 1 bit : numResults,
+// 3 bits : numArgs,
+// numResults * 3 bits : results,
+// numArgs * 3 bits : args
+// ]
+// (lsb -> msb order)
+//
+// Any function type that cannot be encoded in the above format is falls back
+// to the pointer representation.
+//
+
+//=========================================================================
+// ImmediateType
+
+// ImmediateType is 32-bits to ensure it's easy to materialize the constant
+// on all platforms.
+using ImmediateType = uint32_t;
+static const unsigned sTotalBits = sizeof(ImmediateType) * 8;
+static const unsigned sTagBits = 1;
+static const unsigned sNumResultsBits = 1;
+static const unsigned sNumArgsBits = 3;
+static const unsigned sValTypeBits = 3;
+static const unsigned sMaxValTypes = 8;
+
+static_assert(((1 << sNumResultsBits) - 1) + ((1 << sNumArgsBits) - 1) ==
+ sMaxValTypes,
+ "sNumResultsBits, sNumArgsBits, sMaxValTypes are consistent");
+
+static_assert(sTagBits + sNumResultsBits + sNumArgsBits +
+ sValTypeBits * sMaxValTypes <=
+ sTotalBits,
+ "have room");
+
+static bool IsImmediateValType(ValType vt) {
+ switch (vt.kind()) {
+ case ValType::I32:
+ case ValType::I64:
+ case ValType::F32:
+ case ValType::F64:
+ case ValType::V128:
+ return true;
+ case ValType::Ref:
+ // We don't have space to encode nullability, so we optimize for
+ // non-nullable types.
+ if (!vt.isNullable()) {
+ return false;
+ }
+ switch (vt.refType().kind()) {
+ case RefType::Func:
+ case RefType::Extern:
+ case RefType::Any:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+static unsigned EncodeImmediateValType(ValType vt) {
+ // We have run out of bits for each type, anything new must increase the
+ // sValTypeBits.
+ static_assert(7 < (1 << sValTypeBits), "enough space for ValType kind");
+
+ switch (vt.kind()) {
+ case ValType::I32:
+ return 0;
+ case ValType::I64:
+ return 1;
+ case ValType::F32:
+ return 2;
+ case ValType::F64:
+ return 3;
+ case ValType::V128:
+ return 4;
+ case ValType::Ref:
+ MOZ_ASSERT(vt.isNullable());
+ switch (vt.refType().kind()) {
+ case RefType::Func:
+ return 5;
+ case RefType::Extern:
+ return 6;
+ case RefType::Any:
+ return 7;
+ default:
+ MOZ_CRASH("bad RefType");
+ }
+ default:
+ MOZ_CRASH("bad ValType");
+ }
+}
+
+static bool IsImmediateFuncType(const FuncType& funcType) {
+ const ValTypeVector& results = funcType.results();
+ const ValTypeVector& args = funcType.args();
+
+ // Check the number of results and args fits
+ if (results.length() > ((1 << sNumResultsBits) - 1) ||
+ args.length() > ((1 << sNumArgsBits) - 1)) {
+ return false;
+ }
+
+ // Ensure every result is compatible
+ for (ValType v : results) {
+ if (!IsImmediateValType(v)) {
+ return false;
+ }
+ }
+
+ // Ensure every arg is compatible
+ for (ValType v : args) {
+ if (!IsImmediateValType(v)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static ImmediateType EncodeNumResults(uint32_t numResults) {
+ MOZ_ASSERT(numResults <= (1 << sNumResultsBits) - 1);
+ return numResults;
+}
+
+static ImmediateType EncodeNumArgs(uint32_t numArgs) {
+ MOZ_ASSERT(numArgs <= (1 << sNumArgsBits) - 1);
+ return numArgs;
+}
+
+static ImmediateType EncodeImmediateFuncType(const FuncType& funcType) {
+ ImmediateType immediate = FuncType::ImmediateBit;
+ uint32_t shift = sTagBits;
+
+ // Encode the results
+ immediate |= EncodeNumResults(funcType.results().length()) << shift;
+ shift += sNumResultsBits;
+
+ for (ValType resultType : funcType.results()) {
+ immediate |= EncodeImmediateValType(resultType) << shift;
+ shift += sValTypeBits;
+ }
+
+ // Encode the args
+ immediate |= EncodeNumArgs(funcType.args().length()) << shift;
+ shift += sNumArgsBits;
+
+ for (ValType argType : funcType.args()) {
+ immediate |= EncodeImmediateValType(argType) << shift;
+ shift += sValTypeBits;
+ }
+
+ MOZ_ASSERT(shift <= sTotalBits);
+ return immediate;
+}
+
+//=========================================================================
+// FuncType
+
+void FuncType::initImmediateTypeId() {
+ if (!IsImmediateFuncType(*this)) {
+ immediateTypeId_ = NO_IMMEDIATE_TYPE_ID;
+ return;
+ }
+ immediateTypeId_ = EncodeImmediateFuncType(*this);
+}
+
+bool FuncType::canHaveJitEntry() const {
+ return !hasUnexposableArgOrRet() &&
+ !temporarilyUnsupportedReftypeForEntry() &&
+ !temporarilyUnsupportedResultCountForJitEntry() &&
+ JitOptions.enableWasmJitEntry;
+}
+
+bool FuncType::canHaveJitExit() const {
+ return !hasUnexposableArgOrRet() && !temporarilyUnsupportedReftypeForExit() &&
+ !hasInt64Arg() && !temporarilyUnsupportedResultCountForJitExit() &&
+ JitOptions.enableWasmJitExit;
+}
+
+size_t FuncType::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return args_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+//=========================================================================
+// StructType and StructLayout
+
+static inline CheckedInt32 RoundUpToAlignment(CheckedInt32 address,
+ uint32_t align) {
+ MOZ_ASSERT(IsPowerOfTwo(align));
+
+ // Note: Be careful to order operators such that we first make the
+ // value smaller and then larger, so that we don't get false
+ // overflow errors due to (e.g.) adding `align` and then
+ // subtracting `1` afterwards when merely adding `align-1` would
+ // not have overflowed. Note that due to the nature of two's
+ // complement representation, if `address` is already aligned,
+ // then adding `align-1` cannot itself cause an overflow.
+
+ return ((address + (align - 1)) / align) * align;
+}
+
+CheckedInt32 StructLayout::addField(FieldType type) {
+ uint32_t fieldSize = type.size();
+ uint32_t fieldAlignment = type.alignmentInStruct();
+
+ // We have to ensure that `offset` is chosen so that no field crosses the
+ // inline/outline boundary. The assertions here ensure that. See comment
+ // on `class StructLayout` for background.
+ MOZ_ASSERT(fieldSize >= 1 && fieldSize <= 16);
+ MOZ_ASSERT((fieldSize & (fieldSize - 1)) == 0); // is a power of 2
+ MOZ_ASSERT(fieldAlignment == fieldSize); // is naturally aligned
+
+ // Alignment of the struct is the max of the alignment of its fields.
+ structAlignment = std::max(structAlignment, fieldAlignment);
+
+ // Align the pointer.
+ CheckedInt32 offset = RoundUpToAlignment(sizeSoFar, fieldAlignment);
+ if (!offset.isValid()) {
+ return offset;
+ }
+
+ // Allocate space.
+ sizeSoFar = offset + fieldSize;
+ if (!sizeSoFar.isValid()) {
+ return sizeSoFar;
+ }
+
+ // The following should hold if the three assertions above hold.
+ MOZ_ASSERT(offset / 16 == (offset + fieldSize - 1) / 16);
+ return offset;
+}
+
+CheckedInt32 StructLayout::close() {
+ return RoundUpToAlignment(sizeSoFar, structAlignment);
+}
+
+bool StructType::init() {
+ StructLayout layout;
+ for (StructField& field : fields_) {
+ CheckedInt32 offset = layout.addField(field.type);
+ if (!offset.isValid()) {
+ return false;
+ }
+ field.offset = offset.value();
+ if (!field.type.isRefRepr()) {
+ continue;
+ }
+
+ bool isOutline;
+ uint32_t adjustedOffset;
+ WasmStructObject::fieldOffsetToAreaAndOffset(field.type, field.offset,
+ &isOutline, &adjustedOffset);
+ if (isOutline) {
+ if (!outlineTraceOffsets_.append(adjustedOffset)) {
+ return false;
+ }
+ } else {
+ if (!inlineTraceOffsets_.append(adjustedOffset)) {
+ return false;
+ }
+ }
+ }
+
+ CheckedInt32 size = layout.close();
+ if (!size.isValid()) {
+ return false;
+ }
+ size_ = size.value();
+
+ return true;
+}
+
+size_t StructType::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return fields_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t ArrayType::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return 0;
+}
+
+size_t TypeDef::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ switch (kind_) {
+ case TypeDefKind::Struct: {
+ return structType_.sizeOfExcludingThis(mallocSizeOf);
+ }
+ case TypeDefKind::Func: {
+ return funcType_.sizeOfExcludingThis(mallocSizeOf);
+ }
+ case TypeDefKind::Array: {
+ return arrayType_.sizeOfExcludingThis(mallocSizeOf);
+ }
+ case TypeDefKind::None: {
+ return 0;
+ }
+ default:
+ break;
+ }
+ MOZ_ASSERT_UNREACHABLE();
+ return 0;
+}
+
+//=========================================================================
+// SuperTypeVector
+
+/* static */
+size_t SuperTypeVector::offsetOfTypeDefInVector(uint32_t typeDefDepth) {
+ return offsetof(SuperTypeVector, types_) + sizeof(void*) * typeDefDepth;
+}
+
+/* static */
+size_t SuperTypeVector::lengthForTypeDef(const TypeDef& typeDef) {
+ return std::max(uint32_t(typeDef.subTypingDepth()) + 1,
+ MinSuperTypeVectorLength);
+}
+
+/* static */
+size_t SuperTypeVector::byteSizeForTypeDef(const TypeDef& typeDef) {
+ static_assert(
+ sizeof(SuperTypeVector) + sizeof(void*) * (MaxSubTypingDepth + 1) <=
+ UINT16_MAX,
+ "cannot overflow");
+ return sizeof(SuperTypeVector) + (sizeof(void*) * lengthForTypeDef(typeDef));
+}
+
+/* static */
+const SuperTypeVector* SuperTypeVector::createMultipleForRecGroup(
+ RecGroup* recGroup) {
+ // Pre-size the amount of space needed for all the super type vectors in this
+ // recursion group.
+ CheckedUint32 totalBytes = 0;
+ for (uint32_t typeIndex = 0; typeIndex < recGroup->numTypes(); typeIndex++) {
+ totalBytes +=
+ SuperTypeVector::byteSizeForTypeDef(recGroup->type(typeIndex));
+ }
+ if (!totalBytes.isValid()) {
+ return nullptr;
+ }
+
+ // Allocate the batch, and retain reference to the first one.
+ SuperTypeVector* firstVector =
+ (SuperTypeVector*)js_malloc(totalBytes.value());
+ if (!firstVector) {
+ return nullptr;
+ }
+
+ // Initialize the vectors, one by one
+ SuperTypeVector* currentVector = firstVector;
+ for (uint32_t typeIndex = 0; typeIndex < recGroup->numTypes(); typeIndex++) {
+ TypeDef& typeDef = recGroup->type(typeIndex);
+
+ // Compute the size again to know where the next vector can be found.
+ size_t vectorByteSize = SuperTypeVector::byteSizeForTypeDef(typeDef);
+
+ // Make the typedef and the vector point at each other.
+ typeDef.setSuperTypeVector(currentVector);
+ currentVector->setTypeDef(&typeDef);
+
+ // Every vector stores all ancestor types and itself.
+ currentVector->setLength(SuperTypeVector::lengthForTypeDef(typeDef));
+
+ // Initialize the entries in the vector
+ const TypeDef* currentTypeDef = &typeDef;
+ for (uint32_t index = 0; index < currentVector->length(); index++) {
+ uint32_t reverseIndex = currentVector->length() - index - 1;
+
+ // If this entry is required just to hit the minimum size, then
+ // initialize it to null.
+ if (reverseIndex > typeDef.subTypingDepth()) {
+ currentVector->setType(reverseIndex, nullptr);
+ continue;
+ }
+
+ // Otherwise we should always be iterating at the same depth as our
+ // currentTypeDef.
+ MOZ_ASSERT(reverseIndex == currentTypeDef->subTypingDepth());
+
+ currentVector->setType(reverseIndex, currentTypeDef->superTypeVector());
+ currentTypeDef = currentTypeDef->superTypeDef();
+ }
+
+ // There should be no more super types left over
+ MOZ_ASSERT(currentTypeDef == nullptr);
+
+ // Advance to the next super type vector
+ currentVector =
+ (SuperTypeVector*)(((const char*)currentVector) + vectorByteSize);
+ }
+
+ return firstVector;
+}
+
+//=========================================================================
+// TypeIdSet and TypeContext
+
+struct RecGroupHashPolicy {
+ using Lookup = const SharedRecGroup&;
+
+ static HashNumber hash(Lookup lookup) { return lookup->hash(); }
+
+ static bool match(const SharedRecGroup& lhs, Lookup rhs) {
+ return RecGroup::matches(*rhs, *lhs);
+ }
+};
+
+// A global hash set of recursion groups for use in fast type equality checks.
+class TypeIdSet {
+ using Set = HashSet<SharedRecGroup, RecGroupHashPolicy, SystemAllocPolicy>;
+ Set set_;
+
+ public:
+ // Attempt to insert a recursion group into the set, returning an existing
+ // recursion group if there was one.
+ SharedRecGroup insert(SharedRecGroup recGroup) {
+ Set::AddPtr p = set_.lookupForAdd(recGroup);
+ if (p) {
+ // A canonical recursion group already existed, return it.
+ return *p;
+ }
+
+ // Insert this recursion group into the set, and return it as the canonical
+ // recursion group instance.
+ if (!set_.add(p, recGroup)) {
+ return nullptr;
+ }
+ return recGroup;
+ }
+
+ void purge() {
+ // TODO: this is not guaranteed to remove all types that are not referenced
+ // from outside the canonical set, as removing a type may make a previous
+ // type we've visited now only have one ref and be eligible to be freed.
+ //
+ // Solving this either involves iterating to a fixed point, or else a much
+ // more invasive change to the lifetime management of recursion groups.
+ for (auto iter = set_.modIter(); !iter.done(); iter.next()) {
+ if (iter.get()->hasOneRef()) {
+ iter.remove();
+ }
+ }
+ }
+
+ // Release the provided recursion group reference and remove it from the
+ // canonical set if it was the last reference. This is one unified method
+ // because we need to perform the lookup before releasing the reference, but
+ // need to release the reference in order to see if it was the last reference
+ // outside the canonical set.
+ void clearRecGroup(SharedRecGroup* recGroupCell) {
+ if (Set::Ptr p = set_.lookup(*recGroupCell)) {
+ *recGroupCell = nullptr;
+ if ((*p)->hasOneRef()) {
+ set_.remove(p);
+ }
+ } else {
+ *recGroupCell = nullptr;
+ }
+ }
+};
+
+ExclusiveData<TypeIdSet> typeIdSet(mutexid::WasmTypeIdSet);
+
+void wasm::PurgeCanonicalTypes() {
+ ExclusiveData<TypeIdSet>::Guard locked = typeIdSet.lock();
+ locked->purge();
+}
+
+SharedRecGroup TypeContext::canonicalizeGroup(SharedRecGroup recGroup) {
+ ExclusiveData<TypeIdSet>::Guard locked = typeIdSet.lock();
+ return locked->insert(recGroup);
+}
+
+TypeContext::~TypeContext() {
+ ExclusiveData<TypeIdSet>::Guard locked = typeIdSet.lock();
+
+ // Clear out the recursion groups in this module, freeing them from the
+ // canonical type set if needed.
+ //
+ // We iterate backwards here so that we free every previous recursion group
+ // that may be referring to the current recursion group we're freeing. This
+ // is possible due to recursion groups being ordered.
+ for (int32_t groupIndex = recGroups_.length() - 1; groupIndex >= 0;
+ groupIndex--) {
+ // Try to remove this entry from the canonical set if we have the last
+ // strong reference. The entry may not exist if canonicalization failed
+ // and this type context was aborted. This will clear the reference in the
+ // vector.
+ locked->clearRecGroup(&recGroups_[groupIndex]);
+ }
+}
diff --git a/js/src/wasm/WasmTypeDef.h b/js/src/wasm/WasmTypeDef.h
new file mode 100644
index 0000000000..126c04bc95
--- /dev/null
+++ b/js/src/wasm/WasmTypeDef.h
@@ -0,0 +1,1462 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_type_def_h
+#define wasm_type_def_h
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/HashTable.h"
+
+#include "js/RefCounted.h"
+
+#include "wasm/WasmCodegenConstants.h"
+#include "wasm/WasmCompileArgs.h"
+#include "wasm/WasmConstants.h"
+#include "wasm/WasmSerialize.h"
+#include "wasm/WasmUtility.h"
+#include "wasm/WasmValType.h"
+
+namespace js {
+namespace wasm {
+
+using mozilla::CheckedInt32;
+using mozilla::MallocSizeOf;
+
+class RecGroup;
+
+//=========================================================================
+// Function types
+
+// The FuncType class represents a WebAssembly function signature which takes a
+// list of value types and returns an expression type. The engine uses two
+// in-memory representations of the argument Vector's memory (when elements do
+// not fit inline): normal malloc allocation (via SystemAllocPolicy) and
+// allocation in a LifoAlloc (via LifoAllocPolicy). The former FuncType objects
+// can have any lifetime since they own the memory. The latter FuncType objects
+// must not outlive the associated LifoAlloc mark/release interval (which is
+// currently the duration of module validation+compilation). Thus, long-lived
+// objects like WasmModule must use malloced allocation.
+
+class FuncType {
+ ValTypeVector args_;
+ ValTypeVector results_;
+ // A packed structural type identifier for use in the call_indirect type
+ // check in the prologue of functions. If this function type cannot fit in
+ // this immediate, it will be NO_IMMEDIATE_TYPE_ID.
+ uint32_t immediateTypeId_;
+
+ // This function type cannot be packed into an immediate for call_indirect
+ // signature checks.
+ static const uint32_t NO_IMMEDIATE_TYPE_ID = UINT32_MAX;
+
+ // Entry from JS to wasm via the JIT is currently unimplemented for
+ // functions that return multiple values.
+ bool temporarilyUnsupportedResultCountForJitEntry() const {
+ return results().length() > MaxResultsForJitEntry;
+ }
+ // Calls out from wasm to JS that return multiple values is currently
+ // unsupported.
+ bool temporarilyUnsupportedResultCountForJitExit() const {
+ return results().length() > MaxResultsForJitExit;
+ }
+ // For JS->wasm jit entries, temporarily disallow certain types until the
+ // stubs generator is improved.
+ // * ref params may be nullable externrefs
+ // * ref results may not be type indices
+ // V128 types are excluded per spec but are guarded against separately.
+ bool temporarilyUnsupportedReftypeForEntry() const {
+ for (ValType arg : args()) {
+ if (arg.isRefType() && (!arg.isExternRef() || !arg.isNullable())) {
+ return true;
+ }
+ }
+ for (ValType result : results()) {
+ if (result.isTypeRef()) {
+ return true;
+ }
+ }
+ return false;
+ }
+ // For wasm->JS jit exits, temporarily disallow certain types until
+ // the stubs generator is improved.
+ // * ref results may be nullable externrefs
+ // Unexposable types must be guarded against separately.
+ bool temporarilyUnsupportedReftypeForExit() const {
+ for (ValType result : results()) {
+ if (result.isRefType() &&
+ (!result.isExternRef() || !result.isNullable())) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void initImmediateTypeId();
+
+ public:
+ FuncType() : args_(), results_() { initImmediateTypeId(); }
+
+ FuncType(ValTypeVector&& args, ValTypeVector&& results)
+ : args_(std::move(args)), results_(std::move(results)) {
+ initImmediateTypeId();
+ }
+
+ FuncType(FuncType&&) = default;
+ FuncType& operator=(FuncType&&) = default;
+
+ [[nodiscard]] bool clone(const FuncType& src) {
+ MOZ_ASSERT(args_.empty());
+ MOZ_ASSERT(results_.empty());
+ immediateTypeId_ = src.immediateTypeId_;
+ return args_.appendAll(src.args_) && results_.appendAll(src.results_);
+ }
+
+ ValType arg(unsigned i) const { return args_[i]; }
+ const ValTypeVector& args() const { return args_; }
+ ValType result(unsigned i) const { return results_[i]; }
+ const ValTypeVector& results() const { return results_; }
+
+ bool hasImmediateTypeId() const {
+ return immediateTypeId_ != NO_IMMEDIATE_TYPE_ID;
+ }
+ uint32_t immediateTypeId() const {
+ MOZ_ASSERT(hasImmediateTypeId());
+ return immediateTypeId_;
+ }
+
+ // The lsb for every immediate type id is set to distinguish an immediate type
+ // id from a type id represented by a pointer to the global hash type set.
+ static const uint32_t ImmediateBit = 0x1;
+
+ HashNumber hash(const RecGroup* recGroup) const {
+ HashNumber hn = 0;
+ for (const ValType& vt : args_) {
+ hn = mozilla::AddToHash(hn, vt.forMatch(recGroup).hash());
+ }
+ for (const ValType& vt : results_) {
+ hn = mozilla::AddToHash(hn, vt.forMatch(recGroup).hash());
+ }
+ return hn;
+ }
+
+ // Matches two function types for isorecursive equality. See
+ // "Matching type definitions" in WasmValType.h for more background.
+ static bool matches(const RecGroup* lhsRecGroup, const FuncType& lhs,
+ const RecGroup* rhsRecGroup, const FuncType& rhs) {
+ if (lhs.args_.length() != rhs.args_.length() ||
+ lhs.results_.length() != rhs.results_.length()) {
+ return false;
+ }
+ for (uint32_t i = 0; i < lhs.args_.length(); i++) {
+ if (lhs.args_[i].forMatch(lhsRecGroup) !=
+ rhs.args_[i].forMatch(rhsRecGroup)) {
+ return false;
+ }
+ }
+ for (uint32_t i = 0; i < lhs.results_.length(); i++) {
+ if (lhs.results_[i].forMatch(lhsRecGroup) !=
+ rhs.results_[i].forMatch(rhsRecGroup)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // Checks if every arg and result of the specified function types are bitwise
+ // equal. Type references must therefore point to exactly the same type
+ // definition instance.
+ static bool strictlyEquals(const FuncType& lhs, const FuncType& rhs) {
+ return EqualContainers(lhs.args(), rhs.args()) &&
+ EqualContainers(lhs.results(), rhs.results());
+ }
+
+ // Checks if two function types are compatible in a given subtyping
+ // relationship.
+ static bool canBeSubTypeOf(const FuncType& subType,
+ const FuncType& superType) {
+ // A subtype must have exactly as many arguments as its supertype
+ if (subType.args().length() != superType.args().length()) {
+ return false;
+ }
+
+ // A subtype must have exactly as many returns as its supertype
+ if (subType.results().length() != superType.results().length()) {
+ return false;
+ }
+
+ // Function result types are covariant
+ for (uint32_t i = 0; i < superType.results().length(); i++) {
+ if (!ValType::isSubTypeOf(subType.results()[i], superType.results()[i])) {
+ return false;
+ }
+ }
+
+ // Function argument types are contravariant
+ for (uint32_t i = 0; i < superType.args().length(); i++) {
+ if (!ValType::isSubTypeOf(superType.args()[i], subType.args()[i])) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ bool canHaveJitEntry() const;
+ bool canHaveJitExit() const;
+
+ bool hasInt64Arg() const {
+ for (ValType arg : args()) {
+ if (arg.kind() == ValType::Kind::I64) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ bool hasUnexposableArgOrRet() const {
+ for (ValType arg : args()) {
+ if (!arg.isExposable()) {
+ return true;
+ }
+ }
+ for (ValType result : results()) {
+ if (!result.isExposable()) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ WASM_DECLARE_FRIEND_SERIALIZE(FuncType);
+};
+
+//=========================================================================
+// Structure types
+
+// The Module owns a dense array of StructType values that represent the
+// structure types that the module knows about. It is created from the sparse
+// array of types in the ModuleEnvironment when the Module is created.
+
+struct StructField {
+ FieldType type;
+ uint32_t offset;
+ bool isMutable;
+
+ HashNumber hash(const RecGroup* recGroup) const {
+ HashNumber hn = 0;
+ hn = mozilla::AddToHash(hn, type.forMatch(recGroup).hash());
+ hn = mozilla::AddToHash(hn, HashNumber(isMutable));
+ return hn;
+ }
+
+ // Checks if two struct fields are compatible in a given subtyping
+ // relationship.
+ static bool canBeSubTypeOf(const StructField& subType,
+ const StructField& superType) {
+ // Mutable fields are invariant w.r.t. field types
+ if (subType.isMutable && superType.isMutable) {
+ return subType.type == superType.type;
+ }
+
+ // Immutable fields are covariant w.r.t. field types
+ if (!subType.isMutable && !superType.isMutable) {
+ return FieldType::isSubTypeOf(subType.type, superType.type);
+ }
+
+ return false;
+ }
+};
+
+using StructFieldVector = Vector<StructField, 0, SystemAllocPolicy>;
+
+using InlineTraceOffsetVector = Vector<uint32_t, 2, SystemAllocPolicy>;
+using OutlineTraceOffsetVector = Vector<uint32_t, 0, SystemAllocPolicy>;
+
+class StructType {
+ public:
+ StructFieldVector fields_; // Field type, offset, and mutability
+ uint32_t size_; // The size of the type in bytes.
+ InlineTraceOffsetVector inlineTraceOffsets_;
+ OutlineTraceOffsetVector outlineTraceOffsets_;
+
+ public:
+ StructType() : fields_(), size_(0) {}
+
+ explicit StructType(StructFieldVector&& fields)
+ : fields_(std::move(fields)), size_(0) {}
+
+ StructType(StructType&&) = default;
+ StructType& operator=(StructType&&) = default;
+
+ [[nodiscard]] bool clone(const StructType& src) {
+ if (!fields_.appendAll(src.fields_)) {
+ return false;
+ }
+ size_ = src.size_;
+ return true;
+ }
+
+ [[nodiscard]] bool init();
+
+ bool isDefaultable() const {
+ for (auto& field : fields_) {
+ if (!field.type.isDefaultable()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ HashNumber hash(const RecGroup* recGroup) const {
+ HashNumber hn = 0;
+ for (const StructField& field : fields_) {
+ hn = mozilla::AddToHash(hn, field.hash(recGroup));
+ }
+ return hn;
+ }
+
+ // Matches two struct types for isorecursive equality. See
+ // "Matching type definitions" in WasmValType.h for more background.
+ static bool matches(const RecGroup* lhsRecGroup, const StructType& lhs,
+ const RecGroup* rhsRecGroup, const StructType& rhs) {
+ if (lhs.fields_.length() != rhs.fields_.length()) {
+ return false;
+ }
+ for (uint32_t i = 0; i < lhs.fields_.length(); i++) {
+ const StructField& lhsField = lhs.fields_[i];
+ const StructField& rhsField = rhs.fields_[i];
+ if (lhsField.isMutable != rhsField.isMutable ||
+ lhsField.type.forMatch(lhsRecGroup) !=
+ rhsField.type.forMatch(rhsRecGroup)) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // Checks if two struct types are compatible in a given subtyping
+ // relationship.
+ static bool canBeSubTypeOf(const StructType& subType,
+ const StructType& superType) {
+ // A subtype must have at least as many fields as its supertype
+ if (subType.fields_.length() < superType.fields_.length()) {
+ return false;
+ }
+
+ // Every field that is in both superType and subType must be compatible
+ for (uint32_t i = 0; i < superType.fields_.length(); i++) {
+ if (!StructField::canBeSubTypeOf(subType.fields_[i],
+ superType.fields_[i])) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ WASM_DECLARE_FRIEND_SERIALIZE(StructType);
+};
+
+using StructTypeVector = Vector<StructType, 0, SystemAllocPolicy>;
+
+// Utility for computing field offset and alignments, and total size for
+// structs and tags. This is complicated by fact that a WasmStructObject has
+// an inline area, which is used first, and if that fills up an optional
+// C++-heap-allocated outline area is used. We need to be careful not to
+// split any data item across the boundary. This is ensured as follows:
+//
+// (1) the possible field sizes are 1, 2, 4, 8 and 16 only.
+// (2) each field is "naturally aligned" -- aligned to its size.
+// (3) MaxInlineBytes (the size of the inline area) % 16 == 0.
+//
+// From (1) and (2), it follows that all fields are placed so that their first
+// and last bytes fall within the same 16-byte chunk. That is,
+// offset_of_first_byte_of_field / 16 == offset_of_last_byte_of_field / 16.
+//
+// Given that, it follows from (3) that all fields fall completely within
+// either the inline or outline areas; no field crosses the boundary.
+class StructLayout {
+ CheckedInt32 sizeSoFar = 0;
+ uint32_t structAlignment = 1;
+
+ public:
+ // The field adders return the offset of the the field.
+ CheckedInt32 addField(FieldType type);
+
+ // The close method rounds up the structure size to the appropriate
+ // alignment and returns that size.
+ CheckedInt32 close();
+};
+
+//=========================================================================
+// Array types
+
+class ArrayType {
+ public:
+ FieldType elementType_; // field type
+ bool isMutable_; // mutability
+
+ public:
+ ArrayType() : isMutable_(false) {}
+ ArrayType(FieldType elementType, bool isMutable)
+ : elementType_(elementType), isMutable_(isMutable) {}
+
+ ArrayType(const ArrayType&) = default;
+ ArrayType& operator=(const ArrayType&) = default;
+
+ ArrayType(ArrayType&&) = default;
+ ArrayType& operator=(ArrayType&&) = default;
+
+ [[nodiscard]] bool clone(const ArrayType& src) {
+ elementType_ = src.elementType_;
+ isMutable_ = src.isMutable_;
+ return true;
+ }
+
+ bool isDefaultable() const { return elementType_.isDefaultable(); }
+
+ HashNumber hash(const RecGroup* recGroup) const {
+ HashNumber hn = 0;
+ hn = mozilla::AddToHash(hn, elementType_.forMatch(recGroup).hash());
+ hn = mozilla::AddToHash(hn, HashNumber(isMutable_));
+ return hn;
+ }
+
+ // Matches two array types for isorecursive equality. See
+ // "Matching type definitions" in WasmValType.h for more background.
+ static bool matches(const RecGroup* lhsRecGroup, const ArrayType& lhs,
+ const RecGroup* rhsRecGroup, const ArrayType& rhs) {
+ if (lhs.isMutable_ != rhs.isMutable_ ||
+ lhs.elementType_.forMatch(lhsRecGroup) !=
+ rhs.elementType_.forMatch(rhsRecGroup)) {
+ return false;
+ }
+ return true;
+ }
+
+ // Checks if two arrays are compatible in a given subtyping relationship.
+ static bool canBeSubTypeOf(const ArrayType& subType,
+ const ArrayType& superType) {
+ // Mutable fields are invariant w.r.t. field types
+ if (subType.isMutable_ && superType.isMutable_) {
+ return subType.elementType_ == superType.elementType_;
+ }
+
+ // Immutable fields are covariant w.r.t. field types
+ if (!subType.isMutable_ && !superType.isMutable_) {
+ return FieldType::isSubTypeOf(subType.elementType_,
+ superType.elementType_);
+ }
+
+ return true;
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+};
+
+WASM_DECLARE_CACHEABLE_POD(ArrayType);
+
+using ArrayTypeVector = Vector<ArrayType, 0, SystemAllocPolicy>;
+
+//=========================================================================
+// SuperTypeVector
+
+// [SMDOC] Super type vector
+//
+// A super type vector is a vector representation of the linked list of super
+// types that a type definition has. Every element is a raw pointer to a type
+// definition. It's possible to form a vector here because type definitions
+// are trees, not DAGs, with every type having at most one super type.
+//
+// The first element in the vector is the 'root' type definition without a
+// super type. The last element is to the type definition itself.
+//
+// ## Subtype checking
+//
+// The only purpose of a super type vector is to support constant time
+// subtyping checks. This is not free, it comes at the cost of worst case N^2
+// metadata size growth. We limit the max subtyping depth to counter this.
+//
+// To perform a subtype check we rely on the following:
+// (1) a type A is a subtype (<:) of type B iff:
+// type A == type B OR
+// type B is reachable by following declared super types of type A
+// (2) we order super type vectors from least to most derived types
+// (3) the 'subtyping depth' of all type definitions is statically known
+//
+// With the above, we know that if type B is a super type of type A, that it
+// must be in A's super type vector at type B's subtyping depth. We can
+// therefore just do an index and comparison to determine if that's the case.
+//
+// ## Example
+//
+// For the following type section:
+// ..
+// 12: (type (struct))
+// ..
+// 34: (type (sub 12 (struct)))
+// ..
+// 56: (type (sub 34 (struct)))
+// ..
+// 78: (type (sub 56 (struct)))
+// ..
+//
+// (type 12) would have the following super type vector:
+// [(type 12)]
+//
+// (type 78) would have the following super type vector:
+// [(type 12), (type 34), (type 56), (type 78)]
+//
+// Checking that (type 78) <: (type 12) can use the fact that (type 12) will
+// always be present at depth 0 of any super type vector it is in, and
+// therefore check the vector at that index.
+//
+// ## Minimum sizing
+//
+// As a further optimization to avoid bounds checking, we guarantee that all
+// super type vectors are at least `MinSuperTypeVectorLength`. All checks
+// against indices that we know statically are at/below that can skip bounds
+// checking. Extra entries added to reach the minimum size are initialized to
+// null.
+class SuperTypeVector {
+ SuperTypeVector() : typeDef_(nullptr), length_(0) {}
+
+ // The TypeDef for which this is the supertype vector. That TypeDef should
+ // point back to this SuperTypeVector.
+ const TypeDef* typeDef_;
+
+ // The length of types stored inline below.
+ uint32_t length_;
+
+ public:
+ // Raw pointers to the super types of this type definition. Ordered from
+ // least-derived to most-derived. Do not add any fields after this point.
+ const SuperTypeVector* types_[0];
+
+ // Batch allocate super type vectors for all the types in a recursion group.
+ // Returns a pointer to the first super type vector, which can be used to
+ // free all vectors.
+ [[nodiscard]] static const SuperTypeVector* createMultipleForRecGroup(
+ RecGroup* recGroup);
+
+ const TypeDef* typeDef() const { return typeDef_; }
+ void setTypeDef(const TypeDef* typeDef) { typeDef_ = typeDef; }
+
+ uint32_t length() const { return length_; }
+ void setLength(uint32_t length) { length_ = length; }
+
+ const SuperTypeVector* type(size_t index) const {
+ MOZ_ASSERT(index < length_);
+ return types_[index];
+ }
+ void setType(size_t index, const SuperTypeVector* type) {
+ MOZ_ASSERT(index < length_);
+ types_[index] = type;
+ }
+
+ // The length of a super type vector for a specific type def.
+ static size_t lengthForTypeDef(const TypeDef& typeDef);
+ // The byte size of a super type vector for a specific type def.
+ static size_t byteSizeForTypeDef(const TypeDef& typeDef);
+
+ static size_t offsetOfLength() { return offsetof(SuperTypeVector, length_); }
+ static size_t offsetOfSelfTypeDef() {
+ return offsetof(SuperTypeVector, typeDef_);
+ };
+ static size_t offsetOfTypeDefInVector(uint32_t typeDefDepth);
+};
+
+// Ensure it is safe to use `sizeof(SuperTypeVector)` to find the offset of
+// `types_[0]`.
+static_assert(offsetof(SuperTypeVector, types_) == sizeof(SuperTypeVector));
+
+//=========================================================================
+// TypeDef and supporting types
+
+// A tagged container for the various types that can be present in a wasm
+// module's type section.
+
+enum class TypeDefKind : uint8_t {
+ None = 0,
+ Func,
+ Struct,
+ Array,
+};
+
+class TypeDef {
+ uint32_t offsetToRecGroup_;
+
+ // The supertype vector for this TypeDef. That SuperTypeVector should point
+ // back to this TypeDef.
+ const SuperTypeVector* superTypeVector_;
+
+ const TypeDef* superTypeDef_;
+ uint16_t subTypingDepth_;
+ TypeDefKind kind_;
+ union {
+ FuncType funcType_;
+ StructType structType_;
+ ArrayType arrayType_;
+ };
+
+ void setRecGroup(RecGroup* recGroup) {
+ uintptr_t recGroupAddr = (uintptr_t)recGroup;
+ uintptr_t typeDefAddr = (uintptr_t)this;
+ MOZ_ASSERT(typeDefAddr > recGroupAddr);
+ MOZ_ASSERT(typeDefAddr - recGroupAddr <= UINT32_MAX);
+ offsetToRecGroup_ = typeDefAddr - recGroupAddr;
+ }
+
+ public:
+ explicit TypeDef(RecGroup* recGroup)
+ : offsetToRecGroup_(0),
+ superTypeVector_(nullptr),
+ superTypeDef_(nullptr),
+ subTypingDepth_(0),
+ kind_(TypeDefKind::None) {
+ setRecGroup(recGroup);
+ }
+
+ ~TypeDef() {
+ switch (kind_) {
+ case TypeDefKind::Func:
+ funcType_.~FuncType();
+ break;
+ case TypeDefKind::Struct:
+ structType_.~StructType();
+ break;
+ case TypeDefKind::Array:
+ arrayType_.~ArrayType();
+ break;
+ case TypeDefKind::None:
+ break;
+ }
+ }
+
+ TypeDef& operator=(FuncType&& that) noexcept {
+ MOZ_ASSERT(isNone());
+ kind_ = TypeDefKind::Func;
+ new (&funcType_) FuncType(std::move(that));
+ return *this;
+ }
+
+ TypeDef& operator=(StructType&& that) noexcept {
+ MOZ_ASSERT(isNone());
+ kind_ = TypeDefKind::Struct;
+ new (&structType_) StructType(std::move(that));
+ return *this;
+ }
+
+ TypeDef& operator=(ArrayType&& that) noexcept {
+ MOZ_ASSERT(isNone());
+ kind_ = TypeDefKind::Array;
+ new (&arrayType_) ArrayType(std::move(that));
+ return *this;
+ }
+
+ const SuperTypeVector* superTypeVector() const { return superTypeVector_; }
+
+ void setSuperTypeVector(const SuperTypeVector* superTypeVector) {
+ superTypeVector_ = superTypeVector;
+ }
+
+ static size_t offsetOfKind() { return offsetof(TypeDef, kind_); }
+
+ static size_t offsetOfSuperTypeVector() {
+ return offsetof(TypeDef, superTypeVector_);
+ }
+
+ const TypeDef* superTypeDef() const { return superTypeDef_; }
+
+ uint16_t subTypingDepth() const { return subTypingDepth_; }
+
+ const RecGroup& recGroup() const {
+ uintptr_t typeDefAddr = (uintptr_t)this;
+ uintptr_t recGroupAddr = typeDefAddr - offsetToRecGroup_;
+ return *(const RecGroup*)recGroupAddr;
+ }
+
+ TypeDefKind kind() const { return kind_; }
+
+ bool isNone() const { return kind_ == TypeDefKind::None; }
+
+ bool isFuncType() const { return kind_ == TypeDefKind::Func; }
+
+ bool isStructType() const { return kind_ == TypeDefKind::Struct; }
+
+ bool isArrayType() const { return kind_ == TypeDefKind::Array; }
+
+ const FuncType& funcType() const {
+ MOZ_ASSERT(isFuncType());
+ return funcType_;
+ }
+
+ FuncType& funcType() {
+ MOZ_ASSERT(isFuncType());
+ return funcType_;
+ }
+
+ const StructType& structType() const {
+ MOZ_ASSERT(isStructType());
+ return structType_;
+ }
+
+ StructType& structType() {
+ MOZ_ASSERT(isStructType());
+ return structType_;
+ }
+
+ const ArrayType& arrayType() const {
+ MOZ_ASSERT(isArrayType());
+ return arrayType_;
+ }
+
+ ArrayType& arrayType() {
+ MOZ_ASSERT(isArrayType());
+ return arrayType_;
+ }
+
+ // Get a value that can be used for matching type definitions across
+ // different recursion groups.
+ static inline uintptr_t forMatch(const TypeDef* typeDef,
+ const RecGroup* recGroup);
+
+ HashNumber hash() const {
+ HashNumber hn = HashNumber(kind_);
+ hn = mozilla::AddToHash(hn, TypeDef::forMatch(superTypeDef_, &recGroup()));
+ switch (kind_) {
+ case TypeDefKind::Func:
+ hn = mozilla::AddToHash(hn, funcType_.hash(&recGroup()));
+ break;
+ case TypeDefKind::Struct:
+ hn = mozilla::AddToHash(hn, structType_.hash(&recGroup()));
+ break;
+ case TypeDefKind::Array:
+ hn = mozilla::AddToHash(hn, arrayType_.hash(&recGroup()));
+ break;
+ case TypeDefKind::None:
+ break;
+ }
+ return hn;
+ }
+
+ // Matches two type definitions for isorecursive equality. See
+ // "Matching type definitions" in WasmValType.h for more background.
+ static bool matches(const TypeDef& lhs, const TypeDef& rhs) {
+ if (lhs.kind_ != rhs.kind_) {
+ return false;
+ }
+ if (TypeDef::forMatch(lhs.superTypeDef_, &lhs.recGroup()) !=
+ TypeDef::forMatch(rhs.superTypeDef_, &rhs.recGroup())) {
+ return false;
+ }
+ switch (lhs.kind_) {
+ case TypeDefKind::Func:
+ return FuncType::matches(&lhs.recGroup(), lhs.funcType_,
+ &rhs.recGroup(), rhs.funcType_);
+ case TypeDefKind::Struct:
+ return StructType::matches(&lhs.recGroup(), lhs.structType_,
+ &rhs.recGroup(), rhs.structType_);
+ case TypeDefKind::Array:
+ return ArrayType::matches(&lhs.recGroup(), lhs.arrayType_,
+ &rhs.recGroup(), rhs.arrayType_);
+ case TypeDefKind::None:
+ return true;
+ }
+ return false;
+ }
+
+ // Checks if two type definitions are compatible in a given subtyping
+ // relationship.
+ static bool canBeSubTypeOf(const TypeDef* subType, const TypeDef* superType) {
+ if (subType->kind() != superType->kind()) {
+ return false;
+ }
+
+ switch (subType->kind_) {
+ case TypeDefKind::Func:
+ return FuncType::canBeSubTypeOf(subType->funcType_,
+ superType->funcType_);
+ case TypeDefKind::Struct:
+ return StructType::canBeSubTypeOf(subType->structType_,
+ superType->structType_);
+ case TypeDefKind::Array:
+ return ArrayType::canBeSubTypeOf(subType->arrayType_,
+ superType->arrayType_);
+ case TypeDefKind::None:
+ MOZ_CRASH();
+ }
+ return false;
+ }
+
+ void setSuperTypeDef(const TypeDef* superTypeDef) {
+ superTypeDef_ = superTypeDef;
+ subTypingDepth_ = superTypeDef_->subTypingDepth_ + 1;
+ }
+
+ // Checks if `subTypeDef` is a declared sub type of `superTypeDef`.
+ static bool isSubTypeOf(const TypeDef* subTypeDef,
+ const TypeDef* superTypeDef) {
+ // Fast path for when the types are equal
+ if (MOZ_LIKELY(subTypeDef == superTypeDef)) {
+ return true;
+ }
+ const SuperTypeVector* subSuperTypeVector = subTypeDef->superTypeVector();
+
+ // During construction of a recursion group, the super type vector may not
+ // have been computed yet, in which case we need to fall back to a linear
+ // search.
+ if (!subSuperTypeVector) {
+ while (subTypeDef) {
+ if (subTypeDef == superTypeDef) {
+ return true;
+ }
+ subTypeDef = subTypeDef->superTypeDef();
+ }
+ return false;
+ }
+
+ // The supertype vector does exist. So check it points back here.
+ MOZ_ASSERT(subSuperTypeVector->typeDef() == subTypeDef);
+
+ // We need to check if `superTypeDef` is one of `subTypeDef`s super types
+ // by checking in `subTypeDef`s super type vector. We can use the static
+ // information of the depth of `superTypeDef` to index directly into the
+ // vector.
+ uint32_t subTypingDepth = superTypeDef->subTypingDepth();
+ if (subTypingDepth >= subSuperTypeVector->length()) {
+ return false;
+ }
+
+ const SuperTypeVector* superSuperTypeVector =
+ superTypeDef->superTypeVector();
+ MOZ_ASSERT(superSuperTypeVector);
+ MOZ_ASSERT(superSuperTypeVector->typeDef() == superTypeDef);
+
+ return subSuperTypeVector->type(subTypingDepth) == superSuperTypeVector;
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+ WASM_DECLARE_FRIEND_SERIALIZE(TypeDef);
+};
+
+using SharedTypeDef = RefPtr<const TypeDef>;
+using MutableTypeDef = RefPtr<TypeDef>;
+
+using TypeDefVector = Vector<TypeDef, 0, SystemAllocPolicy>;
+using TypeDefPtrVector = Vector<const TypeDef*, 0, SystemAllocPolicy>;
+
+using TypeDefPtrToIndexMap =
+ HashMap<const TypeDef*, uint32_t, PointerHasher<const TypeDef*>,
+ SystemAllocPolicy>;
+
+//=========================================================================
+// RecGroup
+
+// A recursion group is a set of type definitions that may refer to each other
+// or to type definitions in another recursion group. There is an ordering
+// restriction on type references such that references across recursion groups
+// must be acyclic.
+//
+// Type definitions are stored inline in their containing recursion group, and
+// have an offset to their containing recursion group. Recursion groups are
+// atomically refcounted and hold strong references to other recursion groups
+// they depend on.
+//
+// Type equality is structural in WebAssembly, and we canonicalize recursion
+// groups while building them so that pointer equality of types implies
+// equality of types. There is a global hash set of weak pointers to recursion
+// groups that holds the current canonical instance of a recursion group.
+class RecGroup : public AtomicRefCounted<RecGroup> {
+ // Whether this recursion group has been finished and acquired strong
+ // references to external recursion groups.
+ bool finalizedTypes_;
+ // The number of types stored in this recursion group.
+ uint32_t numTypes_;
+ // The batch allocated super type vectors for all type definitions in this
+ // recursion group.
+ const SuperTypeVector* vectors_;
+ // The first type definition stored inline in this recursion group.
+ TypeDef types_[0];
+
+ friend class TypeContext;
+
+ explicit RecGroup(uint32_t numTypes)
+ : finalizedTypes_(false), numTypes_(numTypes), vectors_(nullptr) {}
+
+ // Compute the size in bytes of a recursion group with the specified amount
+ // of types.
+ static constexpr size_t sizeOfRecGroup(uint32_t numTypes) {
+ static_assert(MaxTypes <= SIZE_MAX / sizeof(TypeDef));
+ return sizeof(RecGroup) + sizeof(TypeDef) * numTypes;
+ }
+
+ // Allocate a recursion group with the specified amount of types. The type
+ // definitions will be ready to be filled in. Users must call `finish` once
+ // type definitions are initialized so that strong references to external
+ // recursion groups are taken.
+ static RefPtr<RecGroup> allocate(uint32_t numTypes) {
+ // Allocate the recursion group with the correct size
+ RecGroup* recGroup = (RecGroup*)js_malloc(sizeOfRecGroup(numTypes));
+ if (!recGroup) {
+ return nullptr;
+ }
+
+ // Construct the recursion group and types that are stored inline
+ new (recGroup) RecGroup(numTypes);
+ for (uint32_t i = 0; i < numTypes; i++) {
+ new (recGroup->types_ + i) TypeDef(recGroup);
+ }
+ return recGroup;
+ }
+
+ // Finish initialization by acquiring strong references to groups referenced
+ // by type definitions.
+ [[nodiscard]] bool finalizeDefinitions() {
+ MOZ_ASSERT(!finalizedTypes_);
+ // Super type vectors are only needed for GC and have a size/time impact
+ // that we don't want to encur until we're ready for it. Only use them when
+ // GC is built into the binary.
+#ifdef ENABLE_WASM_GC
+ vectors_ = SuperTypeVector::createMultipleForRecGroup(this);
+ if (!vectors_) {
+ return false;
+ }
+#endif
+ visitReferencedGroups([](const RecGroup* recGroup) { recGroup->AddRef(); });
+ finalizedTypes_ = true;
+ return true;
+ }
+
+ // Visit every external recursion group that is referenced by the types in
+ // this recursion group.
+ template <typename Visitor>
+ void visitReferencedGroups(Visitor visitor) const {
+ auto visitValType = [this, visitor](ValType type) {
+ if (type.isTypeRef() && &type.typeDef()->recGroup() != this) {
+ visitor(&type.typeDef()->recGroup());
+ }
+ };
+ auto visitFieldType = [this, visitor](FieldType type) {
+ if (type.isTypeRef() && &type.typeDef()->recGroup() != this) {
+ visitor(&type.typeDef()->recGroup());
+ }
+ };
+
+ for (uint32_t i = 0; i < numTypes_; i++) {
+ const TypeDef& typeDef = types_[i];
+
+ if (typeDef.superTypeDef() &&
+ &typeDef.superTypeDef()->recGroup() != this) {
+ visitor(&typeDef.superTypeDef()->recGroup());
+ }
+
+ switch (typeDef.kind()) {
+ case TypeDefKind::Func: {
+ const FuncType& funcType = typeDef.funcType();
+ for (auto type : funcType.args()) {
+ visitValType(type);
+ }
+ for (auto type : funcType.results()) {
+ visitValType(type);
+ }
+ break;
+ }
+ case TypeDefKind::Struct: {
+ const StructType& structType = typeDef.structType();
+ for (const auto& field : structType.fields_) {
+ visitFieldType(field.type);
+ }
+ break;
+ }
+ case TypeDefKind::Array: {
+ const ArrayType& arrayType = typeDef.arrayType();
+ visitFieldType(arrayType.elementType_);
+ break;
+ }
+ case TypeDefKind::None: {
+ MOZ_CRASH();
+ }
+ }
+ }
+ }
+
+ public:
+ ~RecGroup() {
+ // Release the referenced recursion groups if we acquired references to
+ // them. Do this before the type definitions are destroyed below.
+ if (finalizedTypes_) {
+ finalizedTypes_ = false;
+ visitReferencedGroups(
+ [](const RecGroup* recGroup) { recGroup->Release(); });
+ }
+
+ if (vectors_) {
+ js_free((void*)vectors_);
+ vectors_ = nullptr;
+ }
+
+ // Call destructors on all the type definitions.
+ for (uint32_t i = 0; i < numTypes_; i++) {
+ type(i).~TypeDef();
+ }
+ }
+
+ // Recursion groups cannot be copied or moved
+ RecGroup& operator=(const RecGroup&) = delete;
+ RecGroup& operator=(RecGroup&&) = delete;
+
+ // Get the type definition at the group type index (not module type index).
+ TypeDef& type(uint32_t groupTypeIndex) {
+ // We cannot mutate type definitions after we've finalized them
+ MOZ_ASSERT(!finalizedTypes_);
+ return types_[groupTypeIndex];
+ }
+ const TypeDef& type(uint32_t groupTypeIndex) const {
+ return types_[groupTypeIndex];
+ }
+
+ // The number of types stored in this recursion group.
+ uint32_t numTypes() const { return numTypes_; }
+
+ // Get the index of a type definition that's in this recursion group.
+ uint32_t indexOf(const TypeDef* typeDef) const {
+ MOZ_ASSERT(typeDef >= types_);
+ size_t groupTypeIndex = (size_t)(typeDef - types_);
+ MOZ_ASSERT(groupTypeIndex < numTypes());
+ return (uint32_t)groupTypeIndex;
+ }
+
+ HashNumber hash() const {
+ HashNumber hn = 0;
+ for (uint32_t i = 0; i < numTypes(); i++) {
+ hn = mozilla::AddToHash(hn, types_[i].hash());
+ }
+ return hn;
+ }
+
+ // Matches two recursion groups for isorecursive equality. See
+ // "Matching type definitions" in WasmValType.h for more background.
+ static bool matches(const RecGroup& lhs, const RecGroup& rhs) {
+ if (lhs.numTypes() != rhs.numTypes()) {
+ return false;
+ }
+ for (uint32_t i = 0; i < lhs.numTypes(); i++) {
+ if (!TypeDef::matches(lhs.type(i), rhs.type(i))) {
+ return false;
+ }
+ }
+ return true;
+ }
+};
+
+// Remove all types from the canonical type set that are not referenced from
+// outside the type set.
+extern void PurgeCanonicalTypes();
+
+using SharedRecGroup = RefPtr<const RecGroup>;
+using MutableRecGroup = RefPtr<RecGroup>;
+using SharedRecGroupVector = Vector<SharedRecGroup, 0, SystemAllocPolicy>;
+
+//=========================================================================
+// TypeContext
+
+// A type context holds the recursion groups and corresponding type definitions
+// defined in a module.
+class TypeContext : public AtomicRefCounted<TypeContext> {
+ FeatureArgs features_;
+ // The pending recursion group that is currently being constructed
+ MutableRecGroup pendingRecGroup_;
+ // An in-order list of all the recursion groups defined in this module
+ SharedRecGroupVector recGroups_;
+ // An in-order list of the type definitions in the module. Each type is
+ // stored in a recursion group.
+ TypeDefPtrVector types_;
+ // A map from type definition to the original module index.
+ TypeDefPtrToIndexMap moduleIndices_;
+
+ static SharedRecGroup canonicalizeGroup(SharedRecGroup recGroup);
+
+ public:
+ TypeContext() = default;
+ explicit TypeContext(const FeatureArgs& features) : features_(features) {}
+ ~TypeContext();
+
+ size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return types_.sizeOfExcludingThis(mallocSizeOf) +
+ moduleIndices_.shallowSizeOfExcludingThis(mallocSizeOf);
+ }
+
+ // Disallow copy, allow move initialization
+ TypeContext(const TypeContext&) = delete;
+ TypeContext& operator=(const TypeContext&) = delete;
+ TypeContext(TypeContext&&) = delete;
+ TypeContext& operator=(TypeContext&&) = delete;
+
+ // Begin creating a recursion group with the specified number of types.
+ // Returns a recursion group to be filled in with type definitions. This must
+ // be paired with `endGroup`.
+ [[nodiscard]] MutableRecGroup startRecGroup(uint32_t numTypes) {
+ // We must not have a pending group
+ MOZ_ASSERT(!pendingRecGroup_);
+
+ // Create the group and add it to the list of groups
+ pendingRecGroup_ = RecGroup::allocate(numTypes);
+ if (!pendingRecGroup_ || !recGroups_.append(pendingRecGroup_)) {
+ return nullptr;
+ }
+
+ // Store the types of the group into our index space maps. These may get
+ // overwritten when we finish this group and canonicalize it. We need to do
+ // this before finishing, because these entries will be used by decoding
+ // and error printing.
+ for (uint32_t groupTypeIndex = 0; groupTypeIndex < numTypes;
+ groupTypeIndex++) {
+ const TypeDef* typeDef = &pendingRecGroup_->type(groupTypeIndex);
+ uint32_t typeIndex = types_.length();
+ if (!types_.append(typeDef) || !moduleIndices_.put(typeDef, typeIndex)) {
+ return nullptr;
+ }
+ }
+ return pendingRecGroup_;
+ }
+
+ // Finish creation of a recursion group after type definitions have been
+ // initialized. This must be paired with `startGroup`.
+ [[nodiscard]] bool endRecGroup() {
+ // We must have started a recursion group
+ MOZ_ASSERT(pendingRecGroup_);
+ MutableRecGroup recGroup = pendingRecGroup_;
+ pendingRecGroup_ = nullptr;
+
+ // Finalize the type definitions in the recursion group
+ if (!recGroup->finalizeDefinitions()) {
+ return false;
+ }
+
+ // Canonicalize the recursion group
+ SharedRecGroup canonicalRecGroup = canonicalizeGroup(recGroup);
+ if (!canonicalRecGroup) {
+ return false;
+ }
+
+ // Nothing left to do if this group became the canonical group
+ if (canonicalRecGroup == recGroup) {
+ return true;
+ }
+
+ // Store the canonical group into the list
+ recGroups_.back() = canonicalRecGroup;
+
+ // Overwrite all the entries we stored into the index space maps when we
+ // started this group.
+ MOZ_ASSERT(recGroup->numTypes() == canonicalRecGroup->numTypes());
+ for (uint32_t groupTypeIndex = 0; groupTypeIndex < recGroup->numTypes();
+ groupTypeIndex++) {
+ uint32_t typeIndex = length() - recGroup->numTypes() + groupTypeIndex;
+ const TypeDef* oldTypeDef = types_[typeIndex];
+ const TypeDef* newTypeDef = &canonicalRecGroup->type(groupTypeIndex);
+ types_[typeIndex] = newTypeDef;
+ moduleIndices_.remove(oldTypeDef);
+ if (!moduleIndices_.put(newTypeDef, typeIndex)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ template <typename T>
+ [[nodiscard]] bool addType(T&& type) {
+ MutableRecGroup recGroup = startRecGroup(1);
+ if (!recGroup) {
+ return false;
+ }
+ recGroup->type(0) = std::move(type);
+ return endRecGroup();
+ }
+
+ const TypeDef& type(uint32_t index) const { return *types_[index]; }
+ const TypeDef& operator[](uint32_t index) const { return *types_[index]; }
+
+ bool empty() const { return types_.empty(); }
+ uint32_t length() const { return types_.length(); }
+
+ const SharedRecGroupVector& groups() const { return recGroups_; }
+
+ // Map from type definition to index
+
+ uint32_t indexOf(const TypeDef& typeDef) const {
+ auto moduleIndex = moduleIndices_.readonlyThreadsafeLookup(&typeDef);
+ MOZ_RELEASE_ASSERT(moduleIndex.found());
+ return moduleIndex->value();
+ }
+};
+
+using SharedTypeContext = RefPtr<const TypeContext>;
+using MutableTypeContext = RefPtr<TypeContext>;
+
+//=========================================================================
+// TypeHandle
+
+// An unambiguous strong reference to a type definition in a specific type
+// context.
+class TypeHandle {
+ private:
+ SharedTypeContext context_;
+ uint32_t index_;
+
+ public:
+ TypeHandle(SharedTypeContext context, uint32_t index)
+ : context_(context), index_(index) {
+ MOZ_ASSERT(index_ < context_->length());
+ }
+ TypeHandle(SharedTypeContext context, const TypeDef& def)
+ : context_(context), index_(context->indexOf(def)) {}
+
+ TypeHandle(const TypeHandle&) = default;
+ TypeHandle& operator=(const TypeHandle&) = default;
+
+ const SharedTypeContext& context() const { return context_; }
+ uint32_t index() const { return index_; }
+ const TypeDef& def() const { return context_->type(index_); }
+};
+
+//=========================================================================
+// misc
+
+/* static */
+inline uintptr_t TypeDef::forMatch(const TypeDef* typeDef,
+ const RecGroup* recGroup) {
+ // TypeDef is aligned sufficiently to allow a tag to distinguish a local type
+ // reference (index) from a non-local type reference (pointer).
+ static_assert(alignof(TypeDef) > 1);
+ MOZ_ASSERT((uintptr_t(typeDef) & 0x1) == 0);
+
+ // Return a tagged index for local type references
+ if (typeDef && &typeDef->recGroup() == recGroup) {
+ return uintptr_t(recGroup->indexOf(typeDef)) | 0x1;
+ }
+
+ // Return an untagged pointer for non-local type references
+ return uintptr_t(typeDef);
+}
+
+/* static */
+inline MatchTypeCode MatchTypeCode::forMatch(PackedTypeCode ptc,
+ const RecGroup* recGroup) {
+ MatchTypeCode mtc = {};
+ mtc.typeCode = PackedRepr(ptc.typeCode());
+ mtc.typeRef = TypeDef::forMatch(ptc.typeDef(), recGroup);
+ mtc.nullable = ptc.isNullable();
+ return mtc;
+}
+
+inline RefTypeHierarchy RefType::hierarchy() const {
+ switch (kind()) {
+ case RefType::Func:
+ case RefType::NoFunc:
+ return RefTypeHierarchy::Func;
+ case RefType::Extern:
+ case RefType::NoExtern:
+ return RefTypeHierarchy::Extern;
+ case RefType::Any:
+ case RefType::None:
+ case RefType::Eq:
+ case RefType::Struct:
+ case RefType::Array:
+ return RefTypeHierarchy::Any;
+ case RefType::TypeRef:
+ switch (typeDef()->kind()) {
+ case TypeDefKind::Struct:
+ case TypeDefKind::Array:
+ return RefTypeHierarchy::Any;
+ case TypeDefKind::Func:
+ return RefTypeHierarchy::Func;
+ case TypeDefKind::None:
+ MOZ_CRASH();
+ }
+ }
+ MOZ_CRASH("switch is exhaustive");
+}
+
+inline TableRepr RefType::tableRepr() const {
+ switch (hierarchy()) {
+ case RefTypeHierarchy::Any:
+ case RefTypeHierarchy::Extern:
+ return TableRepr::Ref;
+ case RefTypeHierarchy::Func:
+ return TableRepr::Func;
+ }
+ MOZ_CRASH("switch is exhaustive");
+}
+
+inline bool RefType::isFuncHierarchy() const {
+ return hierarchy() == RefTypeHierarchy::Func;
+}
+inline bool RefType::isExternHierarchy() const {
+ return hierarchy() == RefTypeHierarchy::Extern;
+}
+inline bool RefType::isAnyHierarchy() const {
+ return hierarchy() == RefTypeHierarchy::Any;
+}
+
+/* static */
+inline bool RefType::isSubTypeOf(RefType subType, RefType superType) {
+ // Anything is a subtype of itself.
+ if (subType == superType) {
+ return true;
+ }
+
+ // A subtype must have the same nullability as the supertype or the
+ // supertype must be nullable.
+ if (subType.isNullable() && !superType.isNullable()) {
+ return false;
+ }
+
+ // Non type-index references are subtypes if they have the same kind
+ if (!subType.isTypeRef() && !superType.isTypeRef() &&
+ subType.kind() == superType.kind()) {
+ return true;
+ }
+
+ // eqref is a subtype of anyref
+ if (subType.isEq() && superType.isAny()) {
+ return true;
+ }
+
+ // structref/arrayref are subtypes of eqref and anyref
+ if ((subType.isStruct() || subType.isArray()) &&
+ (superType.isAny() || superType.isEq())) {
+ return true;
+ }
+
+ // Structs are subtypes of structref, eqref and anyref
+ if (subType.isTypeRef() && subType.typeDef()->isStructType() &&
+ (superType.isAny() || superType.isEq() || superType.isStruct())) {
+ return true;
+ }
+
+ // Arrays are subtypes of arrayref, eqref and anyref
+ if (subType.isTypeRef() && subType.typeDef()->isArrayType() &&
+ (superType.isAny() || superType.isEq() || superType.isArray())) {
+ return true;
+ }
+
+ // Funcs are subtypes of funcref
+ if (subType.isTypeRef() && subType.typeDef()->isFuncType() &&
+ superType.isFunc()) {
+ return true;
+ }
+
+ // Type references can be subtypes
+ if (subType.isTypeRef() && superType.isTypeRef()) {
+ return TypeDef::isSubTypeOf(subType.typeDef(), superType.typeDef());
+ }
+
+ // No func is the bottom type of the func hierarchy
+ if (subType.isNoFunc() && superType.hierarchy() == RefTypeHierarchy::Func) {
+ return true;
+ }
+
+ // No extern is the bottom type of the extern hierarchy
+ if (subType.isNoExtern() &&
+ superType.hierarchy() == RefTypeHierarchy::Extern) {
+ return true;
+ }
+
+ // None is the bottom type of the any hierarchy
+ if (subType.isNone() && superType.hierarchy() == RefTypeHierarchy::Any) {
+ return true;
+ }
+
+ return false;
+}
+
+/* static */
+inline bool RefType::castPossible(RefType sourceType, RefType destType) {
+ // Nullable types always have null in common.
+ if (sourceType.isNullable() && destType.isNullable()) {
+ return true;
+ }
+
+ // At least one of the types is non-nullable, so the only common values can be
+ // non-null. Therefore, if either type is a bottom type, common values are
+ // impossible.
+ if (sourceType.isRefBottom() || destType.isRefBottom()) {
+ return false;
+ }
+
+ // After excluding bottom types, our type hierarchy is a tree, and after
+ // excluding nulls, subtype relationships are sufficient to tell if the types
+ // share any values. If neither type is a subtype of the other, then they are
+ // on different branches of the tree and completely disjoint.
+ RefType sourceNonNull = sourceType.withIsNullable(false);
+ RefType destNonNull = destType.withIsNullable(false);
+ return RefType::isSubTypeOf(sourceNonNull, destNonNull) ||
+ RefType::isSubTypeOf(destNonNull, sourceNonNull);
+}
+
+//=========================================================================
+// [SMDOC] Signatures and runtime types
+//
+// TypeIdDesc describes the runtime representation of a TypeDef suitable for
+// type equality checks. The kind of representation depends on whether the type
+// is a function or a GC type. This design is in flux and will evolve.
+//
+// # Function types
+//
+// For functions in the general case, a FuncType is allocated and stored in a
+// process-wide hash table, so that pointer equality implies structural
+// equality. This process does not correctly handle type references (which would
+// require hash-consing of infinite-trees), but that's okay while
+// function-references and gc-types are experimental.
+//
+// A pointer to the hash table entry is stored in the global data
+// area for each instance, and TypeIdDesc gives the offset to this entry.
+//
+// ## Immediate function types
+//
+// As an optimization for the 99% case where the FuncType has a small number of
+// parameters, the FuncType is bit-packed into a uint32 immediate value so that
+// integer equality implies structural equality. Both cases can be handled with
+// a single comparison by always setting the LSB for the immediates
+// (the LSB is necessarily 0 for allocated FuncType pointers due to alignment).
+//
+// # GC types
+//
+// For GC types, an entry is always created in the global data area and a
+// unique RttValue (see wasm/TypedObject.h) is stored there. This RttValue
+// is the value given by 'rtt.canon $t' for each type definition. As each entry
+// is given a unique value and no canonicalization is done (which would require
+// hash-consing of infinite-trees), this is not yet spec compliant.
+//
+// # wasm::Instance and the global type context
+//
+// As GC objects (aka TypedObject) may outlive the module they are created in,
+// types are additionally transferred to a wasm::Context (which is part of
+// JSContext) upon instantiation. This wasm::Context contains the
+// 'global type context' that RTTValues refer to by type index. Types are never
+// freed from the global type context as that would shift the index space. In
+// the future, this will be fixed.
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_type_def_h
diff --git a/js/src/wasm/WasmUtility.h b/js/src/wasm/WasmUtility.h
new file mode 100644
index 0000000000..fc9f0e54b7
--- /dev/null
+++ b/js/src/wasm/WasmUtility.h
@@ -0,0 +1,65 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef wasm_utility_h
+#define wasm_utility_h
+
+#include <algorithm>
+namespace js {
+namespace wasm {
+
+template <class Container1, class Container2>
+static inline bool EqualContainers(const Container1& lhs,
+ const Container2& rhs) {
+ return std::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
+}
+
+#define WASM_DECLARE_POD_VECTOR(Type, VectorName) \
+ } \
+ } \
+ static_assert(std::is_trivially_copyable<js::wasm::Type>::value, \
+ "Must be trivially copyable"); \
+ static_assert(std::is_trivially_destructible<js::wasm::Type>::value, \
+ "Must be trivially destructible"); \
+ namespace js { \
+ namespace wasm { \
+ typedef Vector<Type, 0, SystemAllocPolicy> VectorName;
+
+using mozilla::MallocSizeOf;
+
+template <class T>
+static inline size_t SizeOfVectorElementExcludingThis(
+ const T& elem, MallocSizeOf mallocSizeOf) {
+ return elem.sizeOfExcludingThis(mallocSizeOf);
+}
+
+template <class T>
+static inline size_t SizeOfVectorElementExcludingThis(
+ const RefPtr<T>& elem, MallocSizeOf mallocSizeOf) {
+ return elem->sizeOfExcludingThis(mallocSizeOf);
+}
+
+template <class T, size_t N>
+static inline size_t SizeOfVectorExcludingThis(
+ const mozilla::Vector<T, N, SystemAllocPolicy>& vec,
+ MallocSizeOf mallocSizeOf) {
+ size_t size = vec.sizeOfExcludingThis(mallocSizeOf);
+ for (const T& t : vec) {
+ size += SizeOfVectorElementExcludingThis(t, mallocSizeOf);
+ }
+ return size;
+}
+
+template <class T>
+static inline size_t SizeOfMaybeExcludingThis(const mozilla::Maybe<T>& maybe,
+ MallocSizeOf mallocSizeOf) {
+ return maybe ? maybe->sizeOfExcludingThis(mallocSizeOf) : 0;
+}
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_utility_h
diff --git a/js/src/wasm/WasmValType.cpp b/js/src/wasm/WasmValType.cpp
new file mode 100644
index 0000000000..3265dae27f
--- /dev/null
+++ b/js/src/wasm/WasmValType.cpp
@@ -0,0 +1,398 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmValType.h"
+
+#include "js/Conversions.h"
+#include "js/ErrorReport.h"
+#include "js/friend/ErrorMessages.h" // JSMSG_*
+#include "js/Printf.h"
+#include "js/Value.h"
+
+#include "vm/JSAtom.h"
+#include "vm/JSObject.h"
+#include "vm/StringType.h"
+#include "wasm/WasmJS.h"
+
+#include "vm/JSAtom-inl.h"
+#include "vm/JSObject-inl.h"
+
+using namespace js;
+using namespace js::wasm;
+
+RefType RefType::topType() const {
+ switch (kind()) {
+ case RefType::Any:
+ case RefType::Eq:
+ case RefType::Array:
+ case RefType::Struct:
+ case RefType::None:
+ return RefType::any();
+ case RefType::Func:
+ case RefType::NoFunc:
+ return RefType::func();
+ case RefType::Extern:
+ case RefType::NoExtern:
+ return RefType::extern_();
+ case RefType::TypeRef:
+ switch (typeDef()->kind()) {
+ case TypeDefKind::Array:
+ case TypeDefKind::Struct:
+ return RefType::any();
+ case TypeDefKind::Func:
+ return RefType::func();
+ case TypeDefKind::None:
+ MOZ_CRASH("should not see TypeDefKind::None at this point");
+ }
+ }
+ MOZ_CRASH("switch is exhaustive");
+}
+
+TypeDefKind RefType::typeDefKind() const {
+ switch (kind()) {
+ case RefType::Struct:
+ return TypeDefKind::Struct;
+ case RefType::Array:
+ return TypeDefKind::Array;
+ case RefType::Func:
+ return TypeDefKind::Func;
+ default:
+ return TypeDefKind::None;
+ }
+ MOZ_CRASH("switch is exhaustive");
+}
+
+static bool ToRefType(JSContext* cx, JSLinearString* typeLinearStr,
+ RefType* out) {
+ if (StringEqualsLiteral(typeLinearStr, "anyfunc") ||
+ StringEqualsLiteral(typeLinearStr, "funcref")) {
+ // The JS API uses "anyfunc" uniformly as the external name of funcref. We
+ // also allow "funcref" for compatibility with code we've already shipped.
+ *out = RefType::func();
+ return true;
+ }
+ if (StringEqualsLiteral(typeLinearStr, "externref")) {
+ *out = RefType::extern_();
+ return true;
+ }
+#ifdef ENABLE_WASM_GC
+ if (GcAvailable(cx)) {
+ if (StringEqualsLiteral(typeLinearStr, "anyref")) {
+ *out = RefType::any();
+ return true;
+ }
+ if (StringEqualsLiteral(typeLinearStr, "eqref")) {
+ *out = RefType::eq();
+ return true;
+ }
+ if (StringEqualsLiteral(typeLinearStr, "structref")) {
+ *out = RefType::struct_();
+ return true;
+ }
+ if (StringEqualsLiteral(typeLinearStr, "arrayref")) {
+ *out = RefType::array();
+ return true;
+ }
+ if (StringEqualsLiteral(typeLinearStr, "nullfuncref")) {
+ *out = RefType::nofunc();
+ return true;
+ }
+ if (StringEqualsLiteral(typeLinearStr, "nullexternref")) {
+ *out = RefType::noextern();
+ return true;
+ }
+ if (StringEqualsLiteral(typeLinearStr, "nullref")) {
+ *out = RefType::none();
+ return true;
+ }
+ }
+#endif
+
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_STRING_VAL_TYPE);
+ return false;
+}
+
+enum class RefTypeResult {
+ Failure,
+ Parsed,
+ Unparsed,
+};
+
+static RefTypeResult MaybeToRefType(JSContext* cx, HandleObject obj,
+ RefType* out) {
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ if (!wasm::FunctionReferencesAvailable(cx)) {
+ return RefTypeResult::Unparsed;
+ }
+
+ JSAtom* refAtom = Atomize(cx, "ref", strlen("ref"));
+ if (!refAtom) {
+ return RefTypeResult::Failure;
+ }
+ RootedId refId(cx, AtomToId(refAtom));
+
+ RootedValue refVal(cx);
+ if (!GetProperty(cx, obj, obj, refId, &refVal)) {
+ return RefTypeResult::Failure;
+ }
+
+ RootedString typeStr(cx, ToString(cx, refVal));
+ if (!typeStr) {
+ return RefTypeResult::Failure;
+ }
+
+ Rooted<JSLinearString*> typeLinearStr(cx, typeStr->ensureLinear(cx));
+ if (!typeLinearStr) {
+ return RefTypeResult::Failure;
+ }
+
+ if (StringEqualsLiteral(typeLinearStr, "func")) {
+ *out = RefType::func();
+ } else if (StringEqualsLiteral(typeLinearStr, "extern")) {
+ *out = RefType::extern_();
+# ifdef ENABLE_WASM_GC
+ } else if (GcAvailable(cx) && StringEqualsLiteral(typeLinearStr, "any")) {
+ *out = RefType::any();
+ } else if (GcAvailable(cx) && StringEqualsLiteral(typeLinearStr, "eq")) {
+ *out = RefType::eq();
+ } else if (GcAvailable(cx) && StringEqualsLiteral(typeLinearStr, "struct")) {
+ *out = RefType::struct_();
+ } else if (GcAvailable(cx) && StringEqualsLiteral(typeLinearStr, "array")) {
+ *out = RefType::array();
+# endif
+ } else {
+ return RefTypeResult::Unparsed;
+ }
+
+ JSAtom* nullableAtom = Atomize(cx, "nullable", strlen("nullable"));
+ if (!nullableAtom) {
+ return RefTypeResult::Failure;
+ }
+ RootedId nullableId(cx, AtomToId(nullableAtom));
+ RootedValue nullableVal(cx);
+ if (!GetProperty(cx, obj, obj, nullableId, &nullableVal)) {
+ return RefTypeResult::Failure;
+ }
+
+ bool nullable = ToBoolean(nullableVal);
+ if (!nullable) {
+ *out = out->asNonNullable();
+ }
+ MOZ_ASSERT(out->isNullable() == nullable);
+ return RefTypeResult::Parsed;
+#else
+ return RefTypeResult::Unparsed;
+#endif
+}
+
+bool wasm::ToValType(JSContext* cx, HandleValue v, ValType* out) {
+ if (v.isObject()) {
+ RootedObject obj(cx, &v.toObject());
+ RefType refType;
+ switch (MaybeToRefType(cx, obj, &refType)) {
+ case RefTypeResult::Failure:
+ return false;
+ case RefTypeResult::Parsed:
+ *out = ValType(refType);
+ return true;
+ case RefTypeResult::Unparsed:
+ break;
+ }
+ }
+
+ RootedString typeStr(cx, ToString(cx, v));
+ if (!typeStr) {
+ return false;
+ }
+
+ Rooted<JSLinearString*> typeLinearStr(cx, typeStr->ensureLinear(cx));
+ if (!typeLinearStr) {
+ return false;
+ }
+
+ if (StringEqualsLiteral(typeLinearStr, "i32")) {
+ *out = ValType::I32;
+ } else if (StringEqualsLiteral(typeLinearStr, "i64")) {
+ *out = ValType::I64;
+ } else if (StringEqualsLiteral(typeLinearStr, "f32")) {
+ *out = ValType::F32;
+ } else if (StringEqualsLiteral(typeLinearStr, "f64")) {
+ *out = ValType::F64;
+#ifdef ENABLE_WASM_SIMD
+ } else if (SimdAvailable(cx) && StringEqualsLiteral(typeLinearStr, "v128")) {
+ *out = ValType::V128;
+#endif
+ } else {
+ RefType rt;
+ if (ToRefType(cx, typeLinearStr, &rt)) {
+ *out = ValType(rt);
+ } else {
+ // ToRefType will report an error when it fails, just return false
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool wasm::ToRefType(JSContext* cx, HandleValue v, RefType* out) {
+ if (v.isObject()) {
+ RootedObject obj(cx, &v.toObject());
+ switch (MaybeToRefType(cx, obj, out)) {
+ case RefTypeResult::Failure:
+ return false;
+ case RefTypeResult::Parsed:
+ return true;
+ case RefTypeResult::Unparsed:
+ break;
+ }
+ }
+
+ RootedString typeStr(cx, ToString(cx, v));
+ if (!typeStr) {
+ return false;
+ }
+
+ Rooted<JSLinearString*> typeLinearStr(cx, typeStr->ensureLinear(cx));
+ if (!typeLinearStr) {
+ return false;
+ }
+
+ return ToRefType(cx, typeLinearStr, out);
+}
+
+UniqueChars wasm::ToString(RefType type, const TypeContext* types) {
+ // Try to emit a shorthand version first
+ if (type.isNullable() && !type.isTypeRef()) {
+ const char* literal = nullptr;
+ switch (type.kind()) {
+ case RefType::Func:
+ literal = "funcref";
+ break;
+ case RefType::Extern:
+ literal = "externref";
+ break;
+ case RefType::Any:
+ literal = "anyref";
+ break;
+ case RefType::NoFunc:
+ literal = "nullfuncref";
+ break;
+ case RefType::NoExtern:
+ literal = "nullexternref";
+ break;
+ case RefType::None:
+ literal = "nullref";
+ break;
+ case RefType::Eq:
+ literal = "eqref";
+ break;
+ case RefType::Struct:
+ literal = "structref";
+ break;
+ case RefType::Array:
+ literal = "arrayref";
+ break;
+ case RefType::TypeRef: {
+ MOZ_CRASH("type ref should not be possible here");
+ }
+ }
+ return DuplicateString(literal);
+ }
+
+ // Emit the full reference type with heap type
+ const char* heapType = nullptr;
+ switch (type.kind()) {
+ case RefType::Func:
+ heapType = "func";
+ break;
+ case RefType::Extern:
+ heapType = "extern";
+ break;
+ case RefType::Any:
+ heapType = "any";
+ break;
+ case RefType::NoFunc:
+ heapType = "nofunc";
+ break;
+ case RefType::NoExtern:
+ heapType = "noextern";
+ break;
+ case RefType::None:
+ heapType = "none";
+ break;
+ case RefType::Eq:
+ heapType = "eq";
+ break;
+ case RefType::Struct:
+ heapType = "struct";
+ break;
+ case RefType::Array:
+ heapType = "array";
+ break;
+ case RefType::TypeRef: {
+ if (types) {
+ uint32_t typeIndex = types->indexOf(*type.typeDef());
+ return JS_smprintf("(ref %s%d)", type.isNullable() ? "null " : "",
+ typeIndex);
+ }
+ return JS_smprintf("(ref %s?)", type.isNullable() ? "null " : "");
+ }
+ }
+ return JS_smprintf("(ref %s%s)", type.isNullable() ? "null " : "", heapType);
+}
+
+UniqueChars wasm::ToString(ValType type, const TypeContext* types) {
+ return ToString(type.fieldType(), types);
+}
+
+UniqueChars wasm::ToString(FieldType type, const TypeContext* types) {
+ const char* literal = nullptr;
+ switch (type.kind()) {
+ case FieldType::I8:
+ literal = "i8";
+ break;
+ case FieldType::I16:
+ literal = "i16";
+ break;
+ case FieldType::I32:
+ literal = "i32";
+ break;
+ case FieldType::I64:
+ literal = "i64";
+ break;
+ case FieldType::V128:
+ literal = "v128";
+ break;
+ case FieldType::F32:
+ literal = "f32";
+ break;
+ case FieldType::F64:
+ literal = "f64";
+ break;
+ case FieldType::Ref:
+ return ToString(type.refType(), types);
+ }
+ return DuplicateString(literal);
+}
+
+UniqueChars wasm::ToString(const Maybe<ValType>& type,
+ const TypeContext* types) {
+ return type ? ToString(type.ref(), types) : JS_smprintf("%s", "void");
+}
diff --git a/js/src/wasm/WasmValType.h b/js/src/wasm/WasmValType.h
new file mode 100644
index 0000000000..e0fdd43249
--- /dev/null
+++ b/js/src/wasm/WasmValType.h
@@ -0,0 +1,890 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_valtype_h
+#define wasm_valtype_h
+
+#include "mozilla/HashTable.h"
+#include "mozilla/Maybe.h"
+
+#include <type_traits>
+
+#include "jit/IonTypes.h"
+#include "wasm/WasmConstants.h"
+#include "wasm/WasmSerialize.h"
+#include "wasm/WasmTypeDecls.h"
+
+namespace js {
+namespace wasm {
+
+using mozilla::Maybe;
+
+class RecGroup;
+class TypeDef;
+class TypeContext;
+enum class TypeDefKind : uint8_t;
+
+// A PackedTypeCode represents any value type.
+union PackedTypeCode {
+ public:
+ using PackedRepr = uint64_t;
+
+ private:
+ static constexpr size_t NullableBits = 1;
+ static constexpr size_t TypeCodeBits = 8;
+ static constexpr size_t TypeDefBits = 48;
+ static constexpr size_t PointerTagBits = 2;
+
+ static_assert(NullableBits + TypeCodeBits + TypeDefBits + PointerTagBits <=
+ (sizeof(PackedRepr) * 8),
+ "enough bits");
+
+ PackedRepr bits_;
+ struct {
+ PackedRepr nullable_ : NullableBits;
+ PackedRepr typeCode_ : TypeCodeBits;
+ // A pointer to the TypeDef this type references. We use 48-bits for this,
+ // and rely on system memory allocators not allocating outside of this
+ // range. This is also assumed by JS::Value, and so should be safe here.
+ PackedRepr typeDef_ : TypeDefBits;
+ // Reserve the bottom two bits for use as a tagging scheme for BlockType
+ // and ResultType, which can encode a ValType inside themselves in special
+ // cases.
+ PackedRepr pointerTag_ : PointerTagBits;
+ };
+
+ public:
+ static constexpr PackedRepr NoTypeCode = ((uint64_t)1 << TypeCodeBits) - 1;
+
+ static PackedTypeCode invalid() {
+ PackedTypeCode ptc = {};
+ ptc.typeCode_ = NoTypeCode;
+ return ptc;
+ }
+
+ static constexpr PackedTypeCode fromBits(PackedRepr bits) {
+ PackedTypeCode ptc = {};
+ ptc.bits_ = bits;
+ return ptc;
+ }
+
+ static PackedTypeCode pack(TypeCode tc, const TypeDef* typeDef,
+ bool isNullable) {
+ MOZ_ASSERT(uint32_t(tc) <= ((1 << TypeCodeBits) - 1));
+ MOZ_ASSERT_IF(tc != AbstractTypeRefCode, typeDef == nullptr);
+ MOZ_ASSERT_IF(tc == AbstractTypeRefCode, typeDef != nullptr);
+ // Double check that the type definition was allocated within 48-bits, as
+ // noted above.
+ MOZ_ASSERT((uint64_t)typeDef <= ((uint64_t)1 << TypeDefBits) - 1);
+ PackedTypeCode ptc = {};
+ ptc.typeCode_ = PackedRepr(tc);
+ ptc.typeDef_ = (uintptr_t)typeDef;
+ ptc.nullable_ = isNullable;
+ return ptc;
+ }
+
+ static PackedTypeCode pack(TypeCode tc, bool nullable) {
+ return pack(tc, nullptr, nullable);
+ }
+
+ static PackedTypeCode pack(TypeCode tc) { return pack(tc, nullptr, false); }
+
+ bool isValid() const { return typeCode_ != NoTypeCode; }
+
+ PackedRepr bits() const { return bits_; }
+
+ TypeCode typeCode() const {
+ MOZ_ASSERT(isValid());
+ return TypeCode(typeCode_);
+ }
+
+ // Return the TypeCode, but return AbstractReferenceTypeCode for any reference
+ // type.
+ //
+ // This function is very, very hot, hence what would normally be a switch on
+ // the value `c` to map the reference types to AbstractReferenceTypeCode has
+ // been distilled into a simple comparison; this is fastest. Should type
+ // codes become too complicated for this to work then a lookup table also has
+ // better performance than a switch.
+ //
+ // An alternative is for the PackedTypeCode to represent something closer to
+ // what ValType needs, so that this decoding step is not necessary, but that
+ // moves complexity elsewhere, and the perf gain here would be only about 1%
+ // for baseline compilation throughput.
+ TypeCode typeCodeAbstracted() const {
+ TypeCode tc = typeCode();
+ return tc < LowestPrimitiveTypeCode ? AbstractReferenceTypeCode : tc;
+ }
+
+ // Return whether this type is a reference type.
+ bool isRefType() const {
+ return typeCodeAbstracted() == AbstractReferenceTypeCode;
+ }
+
+ // Return whether this type is represented by a reference at runtime.
+ bool isRefRepr() const { return typeCode() < LowestPrimitiveTypeCode; }
+
+ const TypeDef* typeDef() const {
+ MOZ_ASSERT(isValid());
+ return (const TypeDef*)(uintptr_t)typeDef_;
+ }
+
+ bool isNullable() const {
+ MOZ_ASSERT(isValid());
+ return bool(nullable_);
+ }
+
+ PackedTypeCode withIsNullable(bool nullable) const {
+ MOZ_ASSERT(isRefType());
+ PackedTypeCode mutated = *this;
+ mutated.nullable_ = (PackedRepr)nullable;
+ return mutated;
+ }
+
+ bool operator==(const PackedTypeCode& rhs) const {
+ return bits_ == rhs.bits_;
+ }
+ bool operator!=(const PackedTypeCode& rhs) const {
+ return bits_ != rhs.bits_;
+ }
+};
+
+static_assert(sizeof(PackedTypeCode) == sizeof(uint64_t), "packed");
+
+// A SerializableTypeCode represents any value type in a form that can be
+// serialized and deserialized.
+union SerializableTypeCode {
+ using PackedRepr = uintptr_t;
+
+ static constexpr size_t NullableBits = 1;
+ static constexpr size_t TypeCodeBits = 8;
+ static constexpr size_t TypeIndexBits = 20;
+
+ PackedRepr bits;
+ struct {
+ PackedRepr nullable : NullableBits;
+ PackedRepr typeCode : TypeCodeBits;
+ PackedRepr typeIndex : TypeIndexBits;
+ };
+
+ WASM_CHECK_CACHEABLE_POD(bits);
+
+ static constexpr PackedRepr NoTypeIndex = (1 << TypeIndexBits) - 1;
+
+ static_assert(NullableBits + TypeCodeBits + TypeIndexBits <=
+ (sizeof(PackedRepr) * 8),
+ "enough bits");
+ static_assert(NoTypeIndex < (1 << TypeIndexBits), "enough bits");
+ static_assert(MaxTypes < NoTypeIndex, "enough bits");
+
+ // Defined in WasmSerialize.cpp
+ static inline SerializableTypeCode serialize(PackedTypeCode ptc,
+ const TypeContext& types);
+ inline PackedTypeCode deserialize(const TypeContext& types);
+};
+
+WASM_DECLARE_CACHEABLE_POD(SerializableTypeCode);
+static_assert(sizeof(SerializableTypeCode) == sizeof(uintptr_t), "packed");
+
+// [SMDOC] Matching type definitions
+//
+// WebAssembly type equality is structural, and we implement canonicalization
+// such that equality of pointers to type definitions means that the type
+// definitions are structurally equal.
+//
+// 'Matching' is the algorithm used to determine if two types are equal while
+// canonicalizing types.
+//
+// A match type code encodes a type code for use in equality and hashing
+// matching. It normalizes type references that are local to a recursion group
+// so that they can be bitwise compared to type references from other recursion
+// groups.
+//
+// This is useful for the following example:
+// (rec (func $a))
+// (rec
+// (func $b)
+// (struct
+// (field (ref $a)))
+// (field (ref $b)))
+// )
+// (rec
+// (func $c)
+// (struct
+// (field (ref $a)))
+// (field (ref $c)))
+// )
+//
+// The last two recursion groups are identical and should canonicalize to the
+// same instance. However, they will be initially represented as two separate
+// recursion group instances each with an array type instance with element
+// types that point to the function type instance before them. A bitwise
+// comparison of the element type pointers would fail.
+//
+// To solve this, we use `MatchTypeCode` to convert the example to:
+// (rec (func $a))
+// (rec
+// (func $b)
+// (struct
+// (field (ref nonlocal $a)))
+// (field (ref local 0)))
+// )
+// (rec
+// (func $c)
+// (struct
+// (field (ref nonlocal $a)))
+// (field (ref local 0)))
+// )
+//
+// Now, comparing the element types will see that these are local type
+// references of the same kinds. `MatchTypeCode` performs the same mechanism
+// as `tie` in the MVP presentation of type equality [1].
+//
+// [1]
+// https://github.com/WebAssembly/gc/blob/main/proposals/gc/MVP.md#equivalence
+union MatchTypeCode {
+ using PackedRepr = uint64_t;
+
+ static constexpr size_t NullableBits = 1;
+ static constexpr size_t TypeCodeBits = 8;
+ static constexpr size_t TypeRefBits = 48;
+
+ PackedRepr bits;
+ struct {
+ PackedRepr nullable : NullableBits;
+ PackedRepr typeCode : TypeCodeBits;
+ PackedRepr typeRef : TypeRefBits;
+ };
+
+ WASM_CHECK_CACHEABLE_POD(bits);
+
+ static_assert(NullableBits + TypeCodeBits + TypeRefBits <=
+ (sizeof(PackedRepr) * 8),
+ "enough bits");
+
+ // Defined in WasmTypeDef.h to avoid a cycle while allowing inlining
+ static inline MatchTypeCode forMatch(PackedTypeCode ptc,
+ const RecGroup* recGroup);
+
+ bool operator==(MatchTypeCode other) const { return bits == other.bits; }
+ bool operator!=(MatchTypeCode other) const { return bits != other.bits; }
+ HashNumber hash() const { return HashNumber(bits); }
+};
+
+// An enum that describes the representation classes for tables; The table
+// element type is mapped into this by Table::repr().
+
+enum class TableRepr { Ref, Func };
+
+// An enum that describes the different type hierarchies.
+
+enum class RefTypeHierarchy { Func, Extern, Any };
+
+// The RefType carries more information about types t for which t.isRefType()
+// is true.
+
+class RefType {
+ public:
+ enum Kind {
+ Func = uint8_t(TypeCode::FuncRef),
+ Extern = uint8_t(TypeCode::ExternRef),
+ Any = uint8_t(TypeCode::AnyRef),
+ NoFunc = uint8_t(TypeCode::NullFuncRef),
+ NoExtern = uint8_t(TypeCode::NullExternRef),
+ None = uint8_t(TypeCode::NullAnyRef),
+ Eq = uint8_t(TypeCode::EqRef),
+ Struct = uint8_t(TypeCode::StructRef),
+ Array = uint8_t(TypeCode::ArrayRef),
+ TypeRef = uint8_t(AbstractTypeRefCode)
+ };
+
+ private:
+ PackedTypeCode ptc_;
+
+ RefType(Kind kind, bool nullable)
+ : ptc_(PackedTypeCode::pack(TypeCode(kind), nullable)) {
+ MOZ_ASSERT(isValid());
+ }
+
+ RefType(const TypeDef* typeDef, bool nullable)
+ : ptc_(PackedTypeCode::pack(AbstractTypeRefCode, typeDef, nullable)) {
+ MOZ_ASSERT(isValid());
+ }
+
+ public:
+ RefType() : ptc_(PackedTypeCode::invalid()) {}
+ explicit RefType(PackedTypeCode ptc) : ptc_(ptc) { MOZ_ASSERT(isValid()); }
+
+ static RefType fromTypeCode(TypeCode tc, bool nullable) {
+ MOZ_ASSERT(tc != AbstractTypeRefCode);
+ return RefType(Kind(tc), nullable);
+ }
+
+ static RefType fromTypeDef(const TypeDef* typeDef, bool nullable) {
+ return RefType(typeDef, nullable);
+ }
+
+ Kind kind() const { return Kind(ptc_.typeCode()); }
+
+ const TypeDef* typeDef() const { return ptc_.typeDef(); }
+
+ PackedTypeCode packed() const { return ptc_; }
+ PackedTypeCode* addressOfPacked() { return &ptc_; }
+ const PackedTypeCode* addressOfPacked() const { return &ptc_; }
+
+#ifdef DEBUG
+ bool isValid() const {
+ MOZ_ASSERT((ptc_.typeCode() == AbstractTypeRefCode) ==
+ (ptc_.typeDef() != nullptr));
+ switch (ptc_.typeCode()) {
+ case TypeCode::FuncRef:
+ case TypeCode::ExternRef:
+ case TypeCode::AnyRef:
+ case TypeCode::EqRef:
+ case TypeCode::StructRef:
+ case TypeCode::ArrayRef:
+ case TypeCode::NullFuncRef:
+ case TypeCode::NullExternRef:
+ case TypeCode::NullAnyRef:
+ case AbstractTypeRefCode:
+ return true;
+ default:
+ return false;
+ }
+ }
+#endif
+
+ static RefType func() { return RefType(Func, true); }
+ static RefType extern_() { return RefType(Extern, true); }
+ static RefType any() { return RefType(Any, true); }
+ static RefType nofunc() { return RefType(NoFunc, true); }
+ static RefType noextern() { return RefType(NoExtern, true); }
+ static RefType none() { return RefType(None, true); }
+ static RefType eq() { return RefType(Eq, true); }
+ static RefType struct_() { return RefType(Struct, true); }
+ static RefType array() { return RefType(Array, true); }
+
+ bool isFunc() const { return kind() == RefType::Func; }
+ bool isExtern() const { return kind() == RefType::Extern; }
+ bool isAny() const { return kind() == RefType::Any; }
+ bool isNoFunc() const { return kind() == RefType::NoFunc; }
+ bool isNoExtern() const { return kind() == RefType::NoExtern; }
+ bool isNone() const { return kind() == RefType::None; }
+ bool isEq() const { return kind() == RefType::Eq; }
+ bool isStruct() const { return kind() == RefType::Struct; }
+ bool isArray() const { return kind() == RefType::Array; }
+ bool isTypeRef() const { return kind() == RefType::TypeRef; }
+
+ bool isNullable() const { return bool(ptc_.isNullable()); }
+ RefType asNonNullable() const { return withIsNullable(false); }
+ RefType withIsNullable(bool nullable) const {
+ return RefType(ptc_.withIsNullable(nullable));
+ }
+
+ bool isRefBottom() const { return isNone() || isNoFunc() || isNoExtern(); }
+
+ // These methods are defined in WasmTypeDef.h to avoid a cycle while allowing
+ // inlining.
+ inline RefTypeHierarchy hierarchy() const;
+ inline TableRepr tableRepr() const;
+ inline bool isFuncHierarchy() const;
+ inline bool isExternHierarchy() const;
+ inline bool isAnyHierarchy() const;
+ static bool isSubTypeOf(RefType subType, RefType superType);
+ static bool castPossible(RefType sourceType, RefType destType);
+
+ // Gets the top of the given type's hierarchy, e.g. Any for structs and
+ // arrays, and Func for funcs
+ RefType topType() const;
+
+ // Gets the TypeDefKind associated with this RefType, e.g. TypeDefKind::Struct
+ // for RefType::Struct.
+ TypeDefKind typeDefKind() const;
+
+ bool operator==(const RefType& that) const { return ptc_ == that.ptc_; }
+ bool operator!=(const RefType& that) const { return ptc_ != that.ptc_; }
+};
+
+class FieldTypeTraits {
+ public:
+ enum Kind {
+ I8 = uint8_t(TypeCode::I8),
+ I16 = uint8_t(TypeCode::I16),
+ I32 = uint8_t(TypeCode::I32),
+ I64 = uint8_t(TypeCode::I64),
+ F32 = uint8_t(TypeCode::F32),
+ F64 = uint8_t(TypeCode::F64),
+ V128 = uint8_t(TypeCode::V128),
+ Ref = uint8_t(AbstractReferenceTypeCode),
+ };
+
+ static bool isValidTypeCode(TypeCode tc) {
+ switch (tc) {
+#ifdef ENABLE_WASM_GC
+ case TypeCode::I8:
+ case TypeCode::I16:
+#endif
+ case TypeCode::I32:
+ case TypeCode::I64:
+ case TypeCode::F32:
+ case TypeCode::F64:
+#ifdef ENABLE_WASM_SIMD
+ case TypeCode::V128:
+#endif
+ case TypeCode::FuncRef:
+ case TypeCode::ExternRef:
+#ifdef ENABLE_WASM_GC
+ case TypeCode::AnyRef:
+ case TypeCode::EqRef:
+ case TypeCode::StructRef:
+ case TypeCode::ArrayRef:
+ case TypeCode::NullFuncRef:
+ case TypeCode::NullExternRef:
+ case TypeCode::NullAnyRef:
+#endif
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ case AbstractTypeRefCode:
+#endif
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ static bool isNumberTypeCode(TypeCode tc) {
+ switch (tc) {
+ case TypeCode::I32:
+ case TypeCode::I64:
+ case TypeCode::F32:
+ case TypeCode::F64:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ static bool isPackedTypeCode(TypeCode tc) {
+ switch (tc) {
+#ifdef ENABLE_WASM_GC
+ case TypeCode::I8:
+ case TypeCode::I16:
+ return true;
+#endif
+ default:
+ return false;
+ }
+ }
+
+ static bool isVectorTypeCode(TypeCode tc) {
+ switch (tc) {
+#ifdef ENABLE_WASM_SIMD
+ case TypeCode::V128:
+ return true;
+#endif
+ default:
+ return false;
+ }
+ }
+};
+
+class ValTypeTraits {
+ public:
+ enum Kind {
+ I32 = uint8_t(TypeCode::I32),
+ I64 = uint8_t(TypeCode::I64),
+ F32 = uint8_t(TypeCode::F32),
+ F64 = uint8_t(TypeCode::F64),
+ V128 = uint8_t(TypeCode::V128),
+ Ref = uint8_t(AbstractReferenceTypeCode),
+ };
+
+ static bool isValidTypeCode(TypeCode tc) {
+ switch (tc) {
+ case TypeCode::I32:
+ case TypeCode::I64:
+ case TypeCode::F32:
+ case TypeCode::F64:
+#ifdef ENABLE_WASM_SIMD
+ case TypeCode::V128:
+#endif
+ case TypeCode::FuncRef:
+ case TypeCode::ExternRef:
+#ifdef ENABLE_WASM_GC
+ case TypeCode::AnyRef:
+ case TypeCode::EqRef:
+ case TypeCode::StructRef:
+ case TypeCode::ArrayRef:
+ case TypeCode::NullFuncRef:
+ case TypeCode::NullExternRef:
+ case TypeCode::NullAnyRef:
+#endif
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ case AbstractTypeRefCode:
+#endif
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ static bool isNumberTypeCode(TypeCode tc) {
+ switch (tc) {
+ case TypeCode::I32:
+ case TypeCode::I64:
+ case TypeCode::F32:
+ case TypeCode::F64:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ static bool isPackedTypeCode(TypeCode tc) { return false; }
+
+ static bool isVectorTypeCode(TypeCode tc) {
+ switch (tc) {
+#ifdef ENABLE_WASM_SIMD
+ case TypeCode::V128:
+ return true;
+#endif
+ default:
+ return false;
+ }
+ }
+};
+
+// The PackedType represents the storage type of a WebAssembly location, whether
+// parameter, local, field, or global. See specializations below for ValType and
+// FieldType.
+
+template <class T>
+class PackedType : public T {
+ public:
+ using Kind = typename T::Kind;
+
+ protected:
+ PackedTypeCode tc_;
+
+ explicit PackedType(TypeCode c) : tc_(PackedTypeCode::pack(c)) {
+ MOZ_ASSERT(c != AbstractTypeRefCode);
+ MOZ_ASSERT(isValid());
+ }
+
+ TypeCode typeCode() const {
+ MOZ_ASSERT(isValid());
+ return tc_.typeCode();
+ }
+
+ public:
+ PackedType() : tc_(PackedTypeCode::invalid()) {}
+
+ MOZ_IMPLICIT PackedType(Kind c) : tc_(PackedTypeCode::pack(TypeCode(c))) {
+ MOZ_ASSERT(c != Kind::Ref);
+ MOZ_ASSERT(isValid());
+ }
+
+ MOZ_IMPLICIT PackedType(RefType rt) : tc_(rt.packed()) {
+ MOZ_ASSERT(isValid());
+ }
+
+ explicit PackedType(PackedTypeCode ptc) : tc_(ptc) { MOZ_ASSERT(isValid()); }
+
+ static PackedType fromMIRType(jit::MIRType mty) {
+ switch (mty) {
+ case jit::MIRType::Int32:
+ return PackedType::I32;
+ break;
+ case jit::MIRType::Int64:
+ return PackedType::I64;
+ break;
+ case jit::MIRType::Float32:
+ return PackedType::F32;
+ break;
+ case jit::MIRType::Double:
+ return PackedType::F64;
+ break;
+ case jit::MIRType::Simd128:
+ return PackedType::V128;
+ break;
+ case jit::MIRType::RefOrNull:
+ return PackedType::Ref;
+ default:
+ MOZ_CRASH("fromMIRType: unexpected type");
+ }
+ }
+
+ static PackedType fromNonRefTypeCode(TypeCode tc) {
+#ifdef DEBUG
+ switch (tc) {
+ case TypeCode::I8:
+ case TypeCode::I16:
+ case TypeCode::I32:
+ case TypeCode::I64:
+ case TypeCode::F32:
+ case TypeCode::F64:
+ case TypeCode::V128:
+ break;
+ default:
+ MOZ_CRASH("Bad type code");
+ }
+#endif
+ return PackedType(tc);
+ }
+
+ static PackedType fromBitsUnsafe(PackedTypeCode::PackedRepr bits) {
+ return PackedType(PackedTypeCode::fromBits(bits));
+ }
+
+ bool isValid() const {
+ if (!tc_.isValid()) {
+ return false;
+ }
+ return T::isValidTypeCode(tc_.typeCode());
+ }
+
+ MatchTypeCode forMatch(const RecGroup* recGroup) const {
+ return MatchTypeCode::forMatch(tc_, recGroup);
+ }
+
+ PackedTypeCode packed() const {
+ MOZ_ASSERT(isValid());
+ return tc_;
+ }
+ PackedTypeCode* addressOfPacked() { return &tc_; }
+ const PackedTypeCode* addressOfPacked() const { return &tc_; }
+
+ PackedTypeCode::PackedRepr bitsUnsafe() const {
+ MOZ_ASSERT(isValid());
+ return tc_.bits();
+ }
+
+ bool isNumber() const { return T::isNumberTypeCode(tc_.typeCode()); }
+
+ bool isPacked() const { return T::isPackedTypeCode(tc_.typeCode()); }
+
+ bool isVector() const { return T::isVectorTypeCode(tc_.typeCode()); }
+
+ bool isRefType() const { return tc_.isRefType(); }
+
+ bool isFuncRef() const { return tc_.typeCode() == TypeCode::FuncRef; }
+
+ bool isExternRef() const { return tc_.typeCode() == TypeCode::ExternRef; }
+
+ bool isAnyRef() const { return tc_.typeCode() == TypeCode::AnyRef; }
+
+ bool isNoFunc() const { return tc_.typeCode() == TypeCode::NullFuncRef; }
+
+ bool isNoExtern() const { return tc_.typeCode() == TypeCode::NullExternRef; }
+
+ bool isNone() const { return tc_.typeCode() == TypeCode::NullAnyRef; }
+
+ bool isEqRef() const { return tc_.typeCode() == TypeCode::EqRef; }
+
+ bool isStructRef() const { return tc_.typeCode() == TypeCode::StructRef; }
+
+ bool isArrayRef() const { return tc_.typeCode() == TypeCode::ArrayRef; }
+
+ bool isTypeRef() const { return tc_.typeCode() == AbstractTypeRefCode; }
+
+ bool isRefRepr() const { return tc_.isRefRepr(); }
+
+ // Returns whether the type has a default value.
+ bool isDefaultable() const { return !(isRefType() && !isNullable()); }
+
+ // Returns whether the type has a representation in JS.
+ bool isExposable() const {
+#if defined(ENABLE_WASM_SIMD)
+ return kind() != Kind::V128;
+#else
+ return true;
+#endif
+ }
+
+ bool isNullable() const { return tc_.isNullable(); }
+
+ const TypeDef* typeDef() const { return tc_.typeDef(); }
+
+ Kind kind() const { return Kind(tc_.typeCodeAbstracted()); }
+
+ RefType refType() const {
+ MOZ_ASSERT(isRefType());
+ return RefType(tc_);
+ }
+
+ RefType::Kind refTypeKind() const {
+ MOZ_ASSERT(isRefType());
+ return RefType(tc_).kind();
+ }
+
+ // Some types are encoded as JS::Value when they escape from Wasm (when passed
+ // as parameters to imports or returned from exports). For ExternRef the
+ // Value encoding is pretty much a requirement. For other types it's a choice
+ // that may (temporarily) simplify some code.
+ bool isEncodedAsJSValueOnEscape() const { return isRefType(); }
+
+ uint32_t size() const {
+ switch (tc_.typeCodeAbstracted()) {
+ case TypeCode::I8:
+ return 1;
+ case TypeCode::I16:
+ return 2;
+ case TypeCode::I32:
+ return 4;
+ case TypeCode::I64:
+ return 8;
+ case TypeCode::F32:
+ return 4;
+ case TypeCode::F64:
+ return 8;
+ case TypeCode::V128:
+ return 16;
+ case AbstractReferenceTypeCode:
+ return sizeof(void*);
+ default:
+ MOZ_ASSERT_UNREACHABLE();
+ return 0;
+ }
+ }
+ uint32_t alignmentInStruct() const { return size(); }
+ uint32_t indexingShift() const {
+ switch (size()) {
+ case 1:
+ return 0;
+ case 2:
+ return 1;
+ case 4:
+ return 2;
+ case 8:
+ return 3;
+ case 16:
+ return 4;
+ default:
+ MOZ_ASSERT_UNREACHABLE();
+ return 0;
+ }
+ }
+
+ PackedType<ValTypeTraits> widenToValType() const {
+ switch (tc_.typeCodeAbstracted()) {
+ case TypeCode::I8:
+ case TypeCode::I16:
+ return PackedType<ValTypeTraits>::I32;
+ default:
+ return PackedType<ValTypeTraits>(tc_);
+ }
+ }
+
+ PackedType<ValTypeTraits> valType() const {
+ MOZ_ASSERT(isValType());
+ return PackedType<ValTypeTraits>(tc_);
+ }
+
+ // Note, ToMIRType is only correct within Wasm, where an AnyRef is represented
+ // as a pointer. At the JS/wasm boundary, an AnyRef can be represented as a
+ // JS::Value, and the type translation may have to be handled specially and on
+ // a case-by-case basis.
+ jit::MIRType toMIRType() const {
+ switch (tc_.typeCodeAbstracted()) {
+ case TypeCode::I32:
+ return jit::MIRType::Int32;
+ case TypeCode::I64:
+ return jit::MIRType::Int64;
+ case TypeCode::F32:
+ return jit::MIRType::Float32;
+ case TypeCode::F64:
+ return jit::MIRType::Double;
+ case TypeCode::V128:
+ return jit::MIRType::Simd128;
+ case AbstractReferenceTypeCode:
+ return jit::MIRType::RefOrNull;
+ default:
+ MOZ_CRASH("bad type");
+ }
+ }
+
+ bool isValType() const {
+ switch (tc_.typeCode()) {
+ case TypeCode::I8:
+ case TypeCode::I16:
+ return false;
+ default:
+ return true;
+ }
+ }
+
+ PackedType<FieldTypeTraits> fieldType() const {
+ MOZ_ASSERT(isValid());
+ return PackedType<FieldTypeTraits>(tc_);
+ }
+
+ static bool isSubTypeOf(PackedType subType, PackedType superType) {
+ // Anything is a subtype of itself.
+ if (subType == superType) {
+ return true;
+ }
+
+ // A reference may be a subtype of another reference
+ if (subType.isRefType() && superType.isRefType()) {
+ return RefType::isSubTypeOf(subType.refType(), superType.refType());
+ }
+
+ return false;
+ }
+
+ bool operator==(const PackedType& that) const {
+ MOZ_ASSERT(isValid() && that.isValid());
+ return tc_ == that.tc_;
+ }
+
+ bool operator!=(const PackedType& that) const {
+ MOZ_ASSERT(isValid() && that.isValid());
+ return tc_ != that.tc_;
+ }
+
+ bool operator==(Kind that) const {
+ MOZ_ASSERT(isValid());
+ MOZ_ASSERT(that != Kind::Ref);
+ return Kind(typeCode()) == that;
+ }
+
+ bool operator!=(Kind that) const { return !(*this == that); }
+};
+
+using ValType = PackedType<ValTypeTraits>;
+using FieldType = PackedType<FieldTypeTraits>;
+
+// The dominant use of this data type is for locals and args, and profiling
+// with ZenGarden and Tanks suggests an initial size of 16 minimises heap
+// allocation, both in terms of blocks and bytes.
+using ValTypeVector = Vector<ValType, 16, SystemAllocPolicy>;
+
+// ValType utilities
+
+extern bool ToValType(JSContext* cx, HandleValue v, ValType* out);
+extern bool ToRefType(JSContext* cx, HandleValue v, RefType* out);
+
+extern UniqueChars ToString(RefType type, const TypeContext* types);
+extern UniqueChars ToString(ValType type, const TypeContext* types);
+extern UniqueChars ToString(FieldType type, const TypeContext* types);
+extern UniqueChars ToString(const Maybe<ValType>& type,
+ const TypeContext* types);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_valtype_h
diff --git a/js/src/wasm/WasmValidate.cpp b/js/src/wasm/WasmValidate.cpp
new file mode 100644
index 0000000000..eb7d7f04e0
--- /dev/null
+++ b/js/src/wasm/WasmValidate.cpp
@@ -0,0 +1,3223 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmValidate.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/Span.h"
+#include "mozilla/Utf8.h"
+
+#include "js/Printf.h"
+#include "js/String.h" // JS::MaxStringLength
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+#include "wasm/WasmOpIter.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::AsChars;
+using mozilla::CheckedInt;
+using mozilla::CheckedInt32;
+using mozilla::IsUtf8;
+using mozilla::Span;
+
+// Misc helpers.
+
+bool wasm::EncodeLocalEntries(Encoder& e, const ValTypeVector& locals) {
+ if (locals.length() > MaxLocals) {
+ return false;
+ }
+
+ uint32_t numLocalEntries = 0;
+ if (locals.length()) {
+ ValType prev = locals[0];
+ numLocalEntries++;
+ for (ValType t : locals) {
+ if (t != prev) {
+ numLocalEntries++;
+ prev = t;
+ }
+ }
+ }
+
+ if (!e.writeVarU32(numLocalEntries)) {
+ return false;
+ }
+
+ if (numLocalEntries) {
+ ValType prev = locals[0];
+ uint32_t count = 1;
+ for (uint32_t i = 1; i < locals.length(); i++, count++) {
+ if (prev != locals[i]) {
+ if (!e.writeVarU32(count)) {
+ return false;
+ }
+ if (!e.writeValType(prev)) {
+ return false;
+ }
+ prev = locals[i];
+ count = 0;
+ }
+ }
+ if (!e.writeVarU32(count)) {
+ return false;
+ }
+ if (!e.writeValType(prev)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool wasm::DecodeLocalEntries(Decoder& d, const TypeContext& types,
+ const FeatureArgs& features,
+ ValTypeVector* locals) {
+ uint32_t numLocalEntries;
+ if (!d.readVarU32(&numLocalEntries)) {
+ return d.fail("failed to read number of local entries");
+ }
+
+ for (uint32_t i = 0; i < numLocalEntries; i++) {
+ uint32_t count;
+ if (!d.readVarU32(&count)) {
+ return d.fail("failed to read local entry count");
+ }
+
+ if (MaxLocals - locals->length() < count) {
+ return d.fail("too many locals");
+ }
+
+ ValType type;
+ if (!d.readValType(types, features, &type)) {
+ return false;
+ }
+
+ if (!locals->appendN(type, count)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool wasm::DecodeValidatedLocalEntries(const TypeContext& types, Decoder& d,
+ ValTypeVector* locals) {
+ uint32_t numLocalEntries;
+ MOZ_ALWAYS_TRUE(d.readVarU32(&numLocalEntries));
+
+ for (uint32_t i = 0; i < numLocalEntries; i++) {
+ uint32_t count = d.uncheckedReadVarU32();
+ MOZ_ASSERT(MaxLocals - locals->length() >= count);
+ if (!locals->appendN(d.uncheckedReadValType(types), count)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool wasm::CheckIsSubtypeOf(Decoder& d, const ModuleEnvironment& env,
+ size_t opcodeOffset, FieldType subType,
+ FieldType superType) {
+ if (FieldType::isSubTypeOf(subType, superType)) {
+ return true;
+ }
+
+ UniqueChars subText = ToString(subType, env.types);
+ if (!subText) {
+ return false;
+ }
+
+ UniqueChars superText = ToString(superType, env.types);
+ if (!superText) {
+ return false;
+ }
+
+ UniqueChars error(
+ JS_smprintf("type mismatch: expression has type %s but expected %s",
+ subText.get(), superText.get()));
+ if (!error) {
+ return false;
+ }
+
+ return d.fail(opcodeOffset, error.get());
+}
+
+// Function body validation.
+
+static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
+ uint32_t funcIndex,
+ const ValTypeVector& locals,
+ const uint8_t* bodyEnd, Decoder* d) {
+ ValidatingOpIter iter(env, *d);
+
+ if (!iter.startFunction(funcIndex, locals)) {
+ return false;
+ }
+
+#define CHECK(c) \
+ if (!(c)) return false; \
+ break
+
+ while (true) {
+ OpBytes op;
+ if (!iter.readOp(&op)) {
+ return false;
+ }
+
+ Nothing nothing;
+ NothingVector nothings{};
+ ResultType unusedType;
+
+ switch (op.b0) {
+ case uint16_t(Op::End): {
+ LabelKind unusedKind;
+ if (!iter.readEnd(&unusedKind, &unusedType, &nothings, &nothings)) {
+ return false;
+ }
+ iter.popEnd();
+ if (iter.controlStackEmpty()) {
+ return iter.endFunction(bodyEnd);
+ }
+ break;
+ }
+ case uint16_t(Op::Nop):
+ CHECK(iter.readNop());
+ case uint16_t(Op::Drop):
+ CHECK(iter.readDrop());
+ case uint16_t(Op::Call): {
+ uint32_t unusedIndex;
+ NothingVector unusedArgs{};
+ CHECK(iter.readCall(&unusedIndex, &unusedArgs));
+ }
+ case uint16_t(Op::CallIndirect): {
+ uint32_t unusedIndex, unusedIndex2;
+ NothingVector unusedArgs{};
+ CHECK(iter.readCallIndirect(&unusedIndex, &unusedIndex2, &nothing,
+ &unusedArgs));
+ }
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ case uint16_t(Op::CallRef): {
+ if (!env.functionReferencesEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ const FuncType* unusedType;
+ NothingVector unusedArgs{};
+ CHECK(iter.readCallRef(&unusedType, &nothing, &unusedArgs));
+ }
+#endif
+ case uint16_t(Op::I32Const): {
+ int32_t unused;
+ CHECK(iter.readI32Const(&unused));
+ }
+ case uint16_t(Op::I64Const): {
+ int64_t unused;
+ CHECK(iter.readI64Const(&unused));
+ }
+ case uint16_t(Op::F32Const): {
+ float unused;
+ CHECK(iter.readF32Const(&unused));
+ }
+ case uint16_t(Op::F64Const): {
+ double unused;
+ CHECK(iter.readF64Const(&unused));
+ }
+ case uint16_t(Op::LocalGet): {
+ uint32_t unused;
+ CHECK(iter.readGetLocal(locals, &unused));
+ }
+ case uint16_t(Op::LocalSet): {
+ uint32_t unused;
+ CHECK(iter.readSetLocal(locals, &unused, &nothing));
+ }
+ case uint16_t(Op::LocalTee): {
+ uint32_t unused;
+ CHECK(iter.readTeeLocal(locals, &unused, &nothing));
+ }
+ case uint16_t(Op::GlobalGet): {
+ uint32_t unused;
+ CHECK(iter.readGetGlobal(&unused));
+ }
+ case uint16_t(Op::GlobalSet): {
+ uint32_t unused;
+ CHECK(iter.readSetGlobal(&unused, &nothing));
+ }
+ case uint16_t(Op::TableGet): {
+ uint32_t unusedTableIndex;
+ CHECK(iter.readTableGet(&unusedTableIndex, &nothing));
+ }
+ case uint16_t(Op::TableSet): {
+ uint32_t unusedTableIndex;
+ CHECK(iter.readTableSet(&unusedTableIndex, &nothing, &nothing));
+ }
+ case uint16_t(Op::SelectNumeric): {
+ StackType unused;
+ CHECK(iter.readSelect(/*typed*/ false, &unused, &nothing, &nothing,
+ &nothing));
+ }
+ case uint16_t(Op::SelectTyped): {
+ StackType unused;
+ CHECK(iter.readSelect(/*typed*/ true, &unused, &nothing, &nothing,
+ &nothing));
+ }
+ case uint16_t(Op::Block):
+ CHECK(iter.readBlock(&unusedType));
+ case uint16_t(Op::Loop):
+ CHECK(iter.readLoop(&unusedType));
+ case uint16_t(Op::If):
+ CHECK(iter.readIf(&unusedType, &nothing));
+ case uint16_t(Op::Else):
+ CHECK(iter.readElse(&unusedType, &unusedType, &nothings));
+ case uint16_t(Op::I32Clz):
+ case uint16_t(Op::I32Ctz):
+ case uint16_t(Op::I32Popcnt):
+ CHECK(iter.readUnary(ValType::I32, &nothing));
+ case uint16_t(Op::I64Clz):
+ case uint16_t(Op::I64Ctz):
+ case uint16_t(Op::I64Popcnt):
+ CHECK(iter.readUnary(ValType::I64, &nothing));
+ case uint16_t(Op::F32Abs):
+ case uint16_t(Op::F32Neg):
+ case uint16_t(Op::F32Ceil):
+ case uint16_t(Op::F32Floor):
+ case uint16_t(Op::F32Sqrt):
+ case uint16_t(Op::F32Trunc):
+ case uint16_t(Op::F32Nearest):
+ CHECK(iter.readUnary(ValType::F32, &nothing));
+ case uint16_t(Op::F64Abs):
+ case uint16_t(Op::F64Neg):
+ case uint16_t(Op::F64Ceil):
+ case uint16_t(Op::F64Floor):
+ case uint16_t(Op::F64Sqrt):
+ case uint16_t(Op::F64Trunc):
+ case uint16_t(Op::F64Nearest):
+ CHECK(iter.readUnary(ValType::F64, &nothing));
+ case uint16_t(Op::I32Add):
+ case uint16_t(Op::I32Sub):
+ case uint16_t(Op::I32Mul):
+ case uint16_t(Op::I32DivS):
+ case uint16_t(Op::I32DivU):
+ case uint16_t(Op::I32RemS):
+ case uint16_t(Op::I32RemU):
+ case uint16_t(Op::I32And):
+ case uint16_t(Op::I32Or):
+ case uint16_t(Op::I32Xor):
+ case uint16_t(Op::I32Shl):
+ case uint16_t(Op::I32ShrS):
+ case uint16_t(Op::I32ShrU):
+ case uint16_t(Op::I32Rotl):
+ case uint16_t(Op::I32Rotr):
+ CHECK(iter.readBinary(ValType::I32, &nothing, &nothing));
+ case uint16_t(Op::I64Add):
+ case uint16_t(Op::I64Sub):
+ case uint16_t(Op::I64Mul):
+ case uint16_t(Op::I64DivS):
+ case uint16_t(Op::I64DivU):
+ case uint16_t(Op::I64RemS):
+ case uint16_t(Op::I64RemU):
+ case uint16_t(Op::I64And):
+ case uint16_t(Op::I64Or):
+ case uint16_t(Op::I64Xor):
+ case uint16_t(Op::I64Shl):
+ case uint16_t(Op::I64ShrS):
+ case uint16_t(Op::I64ShrU):
+ case uint16_t(Op::I64Rotl):
+ case uint16_t(Op::I64Rotr):
+ CHECK(iter.readBinary(ValType::I64, &nothing, &nothing));
+ case uint16_t(Op::F32Add):
+ case uint16_t(Op::F32Sub):
+ case uint16_t(Op::F32Mul):
+ case uint16_t(Op::F32Div):
+ case uint16_t(Op::F32Min):
+ case uint16_t(Op::F32Max):
+ case uint16_t(Op::F32CopySign):
+ CHECK(iter.readBinary(ValType::F32, &nothing, &nothing));
+ case uint16_t(Op::F64Add):
+ case uint16_t(Op::F64Sub):
+ case uint16_t(Op::F64Mul):
+ case uint16_t(Op::F64Div):
+ case uint16_t(Op::F64Min):
+ case uint16_t(Op::F64Max):
+ case uint16_t(Op::F64CopySign):
+ CHECK(iter.readBinary(ValType::F64, &nothing, &nothing));
+ case uint16_t(Op::I32Eq):
+ case uint16_t(Op::I32Ne):
+ case uint16_t(Op::I32LtS):
+ case uint16_t(Op::I32LtU):
+ case uint16_t(Op::I32LeS):
+ case uint16_t(Op::I32LeU):
+ case uint16_t(Op::I32GtS):
+ case uint16_t(Op::I32GtU):
+ case uint16_t(Op::I32GeS):
+ case uint16_t(Op::I32GeU):
+ CHECK(iter.readComparison(ValType::I32, &nothing, &nothing));
+ case uint16_t(Op::I64Eq):
+ case uint16_t(Op::I64Ne):
+ case uint16_t(Op::I64LtS):
+ case uint16_t(Op::I64LtU):
+ case uint16_t(Op::I64LeS):
+ case uint16_t(Op::I64LeU):
+ case uint16_t(Op::I64GtS):
+ case uint16_t(Op::I64GtU):
+ case uint16_t(Op::I64GeS):
+ case uint16_t(Op::I64GeU):
+ CHECK(iter.readComparison(ValType::I64, &nothing, &nothing));
+ case uint16_t(Op::F32Eq):
+ case uint16_t(Op::F32Ne):
+ case uint16_t(Op::F32Lt):
+ case uint16_t(Op::F32Le):
+ case uint16_t(Op::F32Gt):
+ case uint16_t(Op::F32Ge):
+ CHECK(iter.readComparison(ValType::F32, &nothing, &nothing));
+ case uint16_t(Op::F64Eq):
+ case uint16_t(Op::F64Ne):
+ case uint16_t(Op::F64Lt):
+ case uint16_t(Op::F64Le):
+ case uint16_t(Op::F64Gt):
+ case uint16_t(Op::F64Ge):
+ CHECK(iter.readComparison(ValType::F64, &nothing, &nothing));
+ case uint16_t(Op::I32Eqz):
+ CHECK(iter.readConversion(ValType::I32, ValType::I32, &nothing));
+ case uint16_t(Op::I64Eqz):
+ case uint16_t(Op::I32WrapI64):
+ CHECK(iter.readConversion(ValType::I64, ValType::I32, &nothing));
+ case uint16_t(Op::I32TruncF32S):
+ case uint16_t(Op::I32TruncF32U):
+ case uint16_t(Op::I32ReinterpretF32):
+ CHECK(iter.readConversion(ValType::F32, ValType::I32, &nothing));
+ case uint16_t(Op::I32TruncF64S):
+ case uint16_t(Op::I32TruncF64U):
+ CHECK(iter.readConversion(ValType::F64, ValType::I32, &nothing));
+ case uint16_t(Op::I64ExtendI32S):
+ case uint16_t(Op::I64ExtendI32U):
+ CHECK(iter.readConversion(ValType::I32, ValType::I64, &nothing));
+ case uint16_t(Op::I64TruncF32S):
+ case uint16_t(Op::I64TruncF32U):
+ CHECK(iter.readConversion(ValType::F32, ValType::I64, &nothing));
+ case uint16_t(Op::I64TruncF64S):
+ case uint16_t(Op::I64TruncF64U):
+ case uint16_t(Op::I64ReinterpretF64):
+ CHECK(iter.readConversion(ValType::F64, ValType::I64, &nothing));
+ case uint16_t(Op::F32ConvertI32S):
+ case uint16_t(Op::F32ConvertI32U):
+ case uint16_t(Op::F32ReinterpretI32):
+ CHECK(iter.readConversion(ValType::I32, ValType::F32, &nothing));
+ case uint16_t(Op::F32ConvertI64S):
+ case uint16_t(Op::F32ConvertI64U):
+ CHECK(iter.readConversion(ValType::I64, ValType::F32, &nothing));
+ case uint16_t(Op::F32DemoteF64):
+ CHECK(iter.readConversion(ValType::F64, ValType::F32, &nothing));
+ case uint16_t(Op::F64ConvertI32S):
+ case uint16_t(Op::F64ConvertI32U):
+ CHECK(iter.readConversion(ValType::I32, ValType::F64, &nothing));
+ case uint16_t(Op::F64ConvertI64S):
+ case uint16_t(Op::F64ConvertI64U):
+ case uint16_t(Op::F64ReinterpretI64):
+ CHECK(iter.readConversion(ValType::I64, ValType::F64, &nothing));
+ case uint16_t(Op::F64PromoteF32):
+ CHECK(iter.readConversion(ValType::F32, ValType::F64, &nothing));
+ case uint16_t(Op::I32Extend8S):
+ case uint16_t(Op::I32Extend16S):
+ CHECK(iter.readConversion(ValType::I32, ValType::I32, &nothing));
+ case uint16_t(Op::I64Extend8S):
+ case uint16_t(Op::I64Extend16S):
+ case uint16_t(Op::I64Extend32S):
+ CHECK(iter.readConversion(ValType::I64, ValType::I64, &nothing));
+ case uint16_t(Op::I32Load8S):
+ case uint16_t(Op::I32Load8U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoad(ValType::I32, 1, &addr));
+ }
+ case uint16_t(Op::I32Load16S):
+ case uint16_t(Op::I32Load16U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoad(ValType::I32, 2, &addr));
+ }
+ case uint16_t(Op::I32Load): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoad(ValType::I32, 4, &addr));
+ }
+ case uint16_t(Op::I64Load8S):
+ case uint16_t(Op::I64Load8U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoad(ValType::I64, 1, &addr));
+ }
+ case uint16_t(Op::I64Load16S):
+ case uint16_t(Op::I64Load16U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoad(ValType::I64, 2, &addr));
+ }
+ case uint16_t(Op::I64Load32S):
+ case uint16_t(Op::I64Load32U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoad(ValType::I64, 4, &addr));
+ }
+ case uint16_t(Op::I64Load): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoad(ValType::I64, 8, &addr));
+ }
+ case uint16_t(Op::F32Load): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoad(ValType::F32, 4, &addr));
+ }
+ case uint16_t(Op::F64Load): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoad(ValType::F64, 8, &addr));
+ }
+ case uint16_t(Op::I32Store8): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStore(ValType::I32, 1, &addr, &nothing));
+ }
+ case uint16_t(Op::I32Store16): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStore(ValType::I32, 2, &addr, &nothing));
+ }
+ case uint16_t(Op::I32Store): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStore(ValType::I32, 4, &addr, &nothing));
+ }
+ case uint16_t(Op::I64Store8): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStore(ValType::I64, 1, &addr, &nothing));
+ }
+ case uint16_t(Op::I64Store16): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStore(ValType::I64, 2, &addr, &nothing));
+ }
+ case uint16_t(Op::I64Store32): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStore(ValType::I64, 4, &addr, &nothing));
+ }
+ case uint16_t(Op::I64Store): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStore(ValType::I64, 8, &addr, &nothing));
+ }
+ case uint16_t(Op::F32Store): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStore(ValType::F32, 4, &addr, &nothing));
+ }
+ case uint16_t(Op::F64Store): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStore(ValType::F64, 8, &addr, &nothing));
+ }
+ case uint16_t(Op::MemoryGrow):
+ CHECK(iter.readMemoryGrow(&nothing));
+ case uint16_t(Op::MemorySize):
+ CHECK(iter.readMemorySize());
+ case uint16_t(Op::Br): {
+ uint32_t unusedDepth;
+ CHECK(iter.readBr(&unusedDepth, &unusedType, &nothings));
+ }
+ case uint16_t(Op::BrIf): {
+ uint32_t unusedDepth;
+ CHECK(iter.readBrIf(&unusedDepth, &unusedType, &nothings, &nothing));
+ }
+ case uint16_t(Op::BrTable): {
+ Uint32Vector unusedDepths;
+ uint32_t unusedDefault;
+ CHECK(iter.readBrTable(&unusedDepths, &unusedDefault, &unusedType,
+ &nothings, &nothing));
+ }
+ case uint16_t(Op::Return):
+ CHECK(iter.readReturn(&nothings));
+ case uint16_t(Op::Unreachable):
+ CHECK(iter.readUnreachable());
+#ifdef ENABLE_WASM_GC
+ case uint16_t(Op::GcPrefix): {
+ if (!env.gcEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ switch (op.b1) {
+ case uint32_t(GcOp::StructNew): {
+ uint32_t unusedUint;
+ NothingVector unusedArgs{};
+ CHECK(iter.readStructNew(&unusedUint, &unusedArgs));
+ }
+ case uint32_t(GcOp::StructNewDefault): {
+ uint32_t unusedUint;
+ CHECK(iter.readStructNewDefault(&unusedUint));
+ }
+ case uint32_t(GcOp::StructGet): {
+ uint32_t unusedUint1, unusedUint2;
+ CHECK(iter.readStructGet(&unusedUint1, &unusedUint2,
+ FieldWideningOp::None, &nothing));
+ }
+ case uint32_t(GcOp::StructGetS): {
+ uint32_t unusedUint1, unusedUint2;
+ CHECK(iter.readStructGet(&unusedUint1, &unusedUint2,
+ FieldWideningOp::Signed, &nothing));
+ }
+ case uint32_t(GcOp::StructGetU): {
+ uint32_t unusedUint1, unusedUint2;
+ CHECK(iter.readStructGet(&unusedUint1, &unusedUint2,
+ FieldWideningOp::Unsigned, &nothing));
+ }
+ case uint32_t(GcOp::StructSet): {
+ uint32_t unusedUint1, unusedUint2;
+ CHECK(iter.readStructSet(&unusedUint1, &unusedUint2, &nothing,
+ &nothing));
+ }
+ case uint32_t(GcOp::ArrayNew): {
+ uint32_t unusedUint;
+ CHECK(iter.readArrayNew(&unusedUint, &nothing, &nothing));
+ }
+ case uint32_t(GcOp::ArrayNewFixed): {
+ uint32_t unusedUint1, unusedUint2;
+ CHECK(
+ iter.readArrayNewFixed(&unusedUint1, &unusedUint2, &nothings));
+ }
+ case uint32_t(GcOp::ArrayNewDefault): {
+ uint32_t unusedUint;
+ CHECK(iter.readArrayNewDefault(&unusedUint, &nothing));
+ }
+ case uint32_t(GcOp::ArrayNewData): {
+ uint32_t unusedUint1, unusedUint2;
+ CHECK(iter.readArrayNewData(&unusedUint1, &unusedUint2, &nothing,
+ &nothing));
+ }
+ case uint32_t(GcOp::ArrayInitFromElemStaticV5):
+ case uint32_t(GcOp::ArrayNewElem): {
+ uint32_t unusedUint1, unusedUint2;
+ CHECK(iter.readArrayNewElem(&unusedUint1, &unusedUint2, &nothing,
+ &nothing));
+ }
+ case uint32_t(GcOp::ArrayGet): {
+ uint32_t unusedUint1;
+ CHECK(iter.readArrayGet(&unusedUint1, FieldWideningOp::None,
+ &nothing, &nothing));
+ }
+ case uint32_t(GcOp::ArrayGetS): {
+ uint32_t unusedUint1;
+ CHECK(iter.readArrayGet(&unusedUint1, FieldWideningOp::Signed,
+ &nothing, &nothing));
+ }
+ case uint32_t(GcOp::ArrayGetU): {
+ uint32_t unusedUint1;
+ CHECK(iter.readArrayGet(&unusedUint1, FieldWideningOp::Unsigned,
+ &nothing, &nothing));
+ }
+ case uint32_t(GcOp::ArraySet): {
+ uint32_t unusedUint1;
+ CHECK(
+ iter.readArraySet(&unusedUint1, &nothing, &nothing, &nothing));
+ }
+ case uint32_t(GcOp::ArrayLenWithTypeIndex): {
+ CHECK(iter.readArrayLen(/*decodeIgnoredTypeIndex=*/true, &nothing));
+ }
+ case uint32_t(GcOp::ArrayLen): {
+ CHECK(
+ iter.readArrayLen(/*decodeIgnoredTypeIndex=*/false, &nothing));
+ }
+ case uint32_t(GcOp::ArrayCopy): {
+ int32_t unusedInt;
+ bool unusedBool;
+ CHECK(iter.readArrayCopy(&unusedInt, &unusedBool, &nothing,
+ &nothing, &nothing, &nothing, &nothing));
+ }
+ case uint16_t(GcOp::RefTestV5): {
+ RefType unusedSourceType;
+ uint32_t unusedTypeIndex;
+ CHECK(iter.readRefTestV5(&unusedSourceType, &unusedTypeIndex,
+ &nothing));
+ }
+ case uint16_t(GcOp::RefCastV5): {
+ RefType unusedSourceType;
+ uint32_t unusedTypeIndex;
+ CHECK(iter.readRefCastV5(&unusedSourceType, &unusedTypeIndex,
+ &nothing));
+ }
+ case uint16_t(GcOp::RefTest): {
+ RefType unusedSourceType;
+ RefType unusedDestType;
+ CHECK(iter.readRefTest(false, &unusedSourceType, &unusedDestType,
+ &nothing));
+ }
+ case uint16_t(GcOp::RefTestNull): {
+ RefType unusedSourceType;
+ RefType unusedDestType;
+ CHECK(iter.readRefTest(true, &unusedSourceType, &unusedDestType,
+ &nothing));
+ }
+ case uint16_t(GcOp::RefCast): {
+ RefType unusedSourceType;
+ RefType unusedDestType;
+ CHECK(iter.readRefCast(false, &unusedSourceType, &unusedDestType,
+ &nothing));
+ }
+ case uint16_t(GcOp::RefCastNull): {
+ RefType unusedSourceType;
+ RefType unusedDestType;
+ CHECK(iter.readRefCast(true, &unusedSourceType, &unusedDestType,
+ &nothing));
+ }
+ case uint16_t(GcOp::BrOnCast): {
+ bool unusedOnSuccess;
+ uint32_t unusedRelativeDepth;
+ RefType unusedSourceType;
+ RefType unusedDestType;
+ CHECK(iter.readBrOnCast(&unusedOnSuccess, &unusedRelativeDepth,
+ &unusedSourceType, &unusedDestType,
+ &unusedType, &nothings));
+ }
+ case uint16_t(GcOp::BrOnCastV5): {
+ uint32_t unusedRelativeDepth;
+ RefType unusedSourceType;
+ uint32_t typeIndex;
+ CHECK(iter.readBrOnCastV5(&unusedRelativeDepth, &unusedSourceType,
+ &typeIndex, &unusedType, &nothings));
+ }
+ case uint16_t(GcOp::BrOnCastFailV5): {
+ uint32_t unusedRelativeDepth;
+ RefType unusedSourceType;
+ uint32_t typeIndex;
+ CHECK(iter.readBrOnCastFailV5(&unusedRelativeDepth,
+ &unusedSourceType, &typeIndex,
+ &unusedType, &nothings));
+ }
+ case uint16_t(GcOp::BrOnCastHeapV5): {
+ uint32_t unusedRelativeDepth;
+ RefType unusedSourceType;
+ RefType unusedDestType;
+ CHECK(iter.readBrOnCastHeapV5(false, &unusedRelativeDepth,
+ &unusedSourceType, &unusedDestType,
+ &unusedType, &nothings));
+ }
+ case uint16_t(GcOp::BrOnCastHeapNullV5): {
+ uint32_t unusedRelativeDepth;
+ RefType unusedSourceType;
+ RefType unusedDestType;
+ CHECK(iter.readBrOnCastHeapV5(true, &unusedRelativeDepth,
+ &unusedSourceType, &unusedDestType,
+ &unusedType, &nothings));
+ }
+ case uint16_t(GcOp::BrOnCastFailHeapV5): {
+ uint32_t unusedRelativeDepth;
+ RefType unusedSourceType;
+ RefType unusedDestType;
+ CHECK(iter.readBrOnCastFailHeapV5(
+ false, &unusedRelativeDepth, &unusedSourceType, &unusedDestType,
+ &unusedType, &nothings));
+ }
+ case uint16_t(GcOp::BrOnCastFailHeapNullV5): {
+ uint32_t unusedRelativeDepth;
+ RefType unusedSourceType;
+ RefType unusedDestType;
+ CHECK(iter.readBrOnCastFailHeapV5(
+ true, &unusedRelativeDepth, &unusedSourceType, &unusedDestType,
+ &unusedType, &nothings));
+ }
+ case uint16_t(GcOp::RefAsStructV5): {
+ CHECK(iter.readConversion(
+ ValType(RefType::any()),
+ ValType(RefType::struct_().asNonNullable()), &nothing));
+ }
+ case uint16_t(GcOp::BrOnNonStructV5): {
+ uint32_t unusedRelativeDepth;
+ CHECK(iter.readBrOnNonStructV5(&unusedRelativeDepth, &unusedType,
+ &nothings));
+ }
+ case uint16_t(GcOp::ExternInternalize): {
+ CHECK(iter.readRefConversion(RefType::extern_(), RefType::any(),
+ &nothing));
+ }
+ case uint16_t(GcOp::ExternExternalize): {
+ CHECK(iter.readRefConversion(RefType::any(), RefType::extern_(),
+ &nothing));
+ }
+ default:
+ return iter.unrecognizedOpcode(&op);
+ }
+ break;
+ }
+#endif
+
+#ifdef ENABLE_WASM_SIMD
+ case uint16_t(Op::SimdPrefix): {
+ if (!env.simdAvailable()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ uint32_t noIndex;
+ switch (op.b1) {
+ case uint32_t(SimdOp::I8x16ExtractLaneS):
+ case uint32_t(SimdOp::I8x16ExtractLaneU):
+ CHECK(iter.readExtractLane(ValType::I32, 16, &noIndex, &nothing));
+ case uint32_t(SimdOp::I16x8ExtractLaneS):
+ case uint32_t(SimdOp::I16x8ExtractLaneU):
+ CHECK(iter.readExtractLane(ValType::I32, 8, &noIndex, &nothing));
+ case uint32_t(SimdOp::I32x4ExtractLane):
+ CHECK(iter.readExtractLane(ValType::I32, 4, &noIndex, &nothing));
+ case uint32_t(SimdOp::I64x2ExtractLane):
+ CHECK(iter.readExtractLane(ValType::I64, 2, &noIndex, &nothing));
+ case uint32_t(SimdOp::F32x4ExtractLane):
+ CHECK(iter.readExtractLane(ValType::F32, 4, &noIndex, &nothing));
+ case uint32_t(SimdOp::F64x2ExtractLane):
+ CHECK(iter.readExtractLane(ValType::F64, 2, &noIndex, &nothing));
+
+ case uint32_t(SimdOp::I8x16Splat):
+ case uint32_t(SimdOp::I16x8Splat):
+ case uint32_t(SimdOp::I32x4Splat):
+ CHECK(iter.readConversion(ValType::I32, ValType::V128, &nothing));
+ case uint32_t(SimdOp::I64x2Splat):
+ CHECK(iter.readConversion(ValType::I64, ValType::V128, &nothing));
+ case uint32_t(SimdOp::F32x4Splat):
+ CHECK(iter.readConversion(ValType::F32, ValType::V128, &nothing));
+ case uint32_t(SimdOp::F64x2Splat):
+ CHECK(iter.readConversion(ValType::F64, ValType::V128, &nothing));
+
+ case uint32_t(SimdOp::V128AnyTrue):
+ case uint32_t(SimdOp::I8x16AllTrue):
+ case uint32_t(SimdOp::I16x8AllTrue):
+ case uint32_t(SimdOp::I32x4AllTrue):
+ case uint32_t(SimdOp::I64x2AllTrue):
+ case uint32_t(SimdOp::I8x16Bitmask):
+ case uint32_t(SimdOp::I16x8Bitmask):
+ case uint32_t(SimdOp::I32x4Bitmask):
+ case uint32_t(SimdOp::I64x2Bitmask):
+ CHECK(iter.readConversion(ValType::V128, ValType::I32, &nothing));
+
+ case uint32_t(SimdOp::I8x16ReplaceLane):
+ CHECK(iter.readReplaceLane(ValType::I32, 16, &noIndex, &nothing,
+ &nothing));
+ case uint32_t(SimdOp::I16x8ReplaceLane):
+ CHECK(iter.readReplaceLane(ValType::I32, 8, &noIndex, &nothing,
+ &nothing));
+ case uint32_t(SimdOp::I32x4ReplaceLane):
+ CHECK(iter.readReplaceLane(ValType::I32, 4, &noIndex, &nothing,
+ &nothing));
+ case uint32_t(SimdOp::I64x2ReplaceLane):
+ CHECK(iter.readReplaceLane(ValType::I64, 2, &noIndex, &nothing,
+ &nothing));
+ case uint32_t(SimdOp::F32x4ReplaceLane):
+ CHECK(iter.readReplaceLane(ValType::F32, 4, &noIndex, &nothing,
+ &nothing));
+ case uint32_t(SimdOp::F64x2ReplaceLane):
+ CHECK(iter.readReplaceLane(ValType::F64, 2, &noIndex, &nothing,
+ &nothing));
+
+ case uint32_t(SimdOp::I8x16Eq):
+ case uint32_t(SimdOp::I8x16Ne):
+ case uint32_t(SimdOp::I8x16LtS):
+ case uint32_t(SimdOp::I8x16LtU):
+ case uint32_t(SimdOp::I8x16GtS):
+ case uint32_t(SimdOp::I8x16GtU):
+ case uint32_t(SimdOp::I8x16LeS):
+ case uint32_t(SimdOp::I8x16LeU):
+ case uint32_t(SimdOp::I8x16GeS):
+ case uint32_t(SimdOp::I8x16GeU):
+ case uint32_t(SimdOp::I16x8Eq):
+ case uint32_t(SimdOp::I16x8Ne):
+ case uint32_t(SimdOp::I16x8LtS):
+ case uint32_t(SimdOp::I16x8LtU):
+ case uint32_t(SimdOp::I16x8GtS):
+ case uint32_t(SimdOp::I16x8GtU):
+ case uint32_t(SimdOp::I16x8LeS):
+ case uint32_t(SimdOp::I16x8LeU):
+ case uint32_t(SimdOp::I16x8GeS):
+ case uint32_t(SimdOp::I16x8GeU):
+ case uint32_t(SimdOp::I32x4Eq):
+ case uint32_t(SimdOp::I32x4Ne):
+ case uint32_t(SimdOp::I32x4LtS):
+ case uint32_t(SimdOp::I32x4LtU):
+ case uint32_t(SimdOp::I32x4GtS):
+ case uint32_t(SimdOp::I32x4GtU):
+ case uint32_t(SimdOp::I32x4LeS):
+ case uint32_t(SimdOp::I32x4LeU):
+ case uint32_t(SimdOp::I32x4GeS):
+ case uint32_t(SimdOp::I32x4GeU):
+ case uint32_t(SimdOp::I64x2Eq):
+ case uint32_t(SimdOp::I64x2Ne):
+ case uint32_t(SimdOp::I64x2LtS):
+ case uint32_t(SimdOp::I64x2GtS):
+ case uint32_t(SimdOp::I64x2LeS):
+ case uint32_t(SimdOp::I64x2GeS):
+ case uint32_t(SimdOp::F32x4Eq):
+ case uint32_t(SimdOp::F32x4Ne):
+ case uint32_t(SimdOp::F32x4Lt):
+ case uint32_t(SimdOp::F32x4Gt):
+ case uint32_t(SimdOp::F32x4Le):
+ case uint32_t(SimdOp::F32x4Ge):
+ case uint32_t(SimdOp::F64x2Eq):
+ case uint32_t(SimdOp::F64x2Ne):
+ case uint32_t(SimdOp::F64x2Lt):
+ case uint32_t(SimdOp::F64x2Gt):
+ case uint32_t(SimdOp::F64x2Le):
+ case uint32_t(SimdOp::F64x2Ge):
+ case uint32_t(SimdOp::V128And):
+ case uint32_t(SimdOp::V128Or):
+ case uint32_t(SimdOp::V128Xor):
+ case uint32_t(SimdOp::V128AndNot):
+ case uint32_t(SimdOp::I8x16AvgrU):
+ case uint32_t(SimdOp::I16x8AvgrU):
+ case uint32_t(SimdOp::I8x16Add):
+ case uint32_t(SimdOp::I8x16AddSatS):
+ case uint32_t(SimdOp::I8x16AddSatU):
+ case uint32_t(SimdOp::I8x16Sub):
+ case uint32_t(SimdOp::I8x16SubSatS):
+ case uint32_t(SimdOp::I8x16SubSatU):
+ case uint32_t(SimdOp::I8x16MinS):
+ case uint32_t(SimdOp::I8x16MinU):
+ case uint32_t(SimdOp::I8x16MaxS):
+ case uint32_t(SimdOp::I8x16MaxU):
+ case uint32_t(SimdOp::I16x8Add):
+ case uint32_t(SimdOp::I16x8AddSatS):
+ case uint32_t(SimdOp::I16x8AddSatU):
+ case uint32_t(SimdOp::I16x8Sub):
+ case uint32_t(SimdOp::I16x8SubSatS):
+ case uint32_t(SimdOp::I16x8SubSatU):
+ case uint32_t(SimdOp::I16x8Mul):
+ case uint32_t(SimdOp::I16x8MinS):
+ case uint32_t(SimdOp::I16x8MinU):
+ case uint32_t(SimdOp::I16x8MaxS):
+ case uint32_t(SimdOp::I16x8MaxU):
+ case uint32_t(SimdOp::I32x4Add):
+ case uint32_t(SimdOp::I32x4Sub):
+ case uint32_t(SimdOp::I32x4Mul):
+ case uint32_t(SimdOp::I32x4MinS):
+ case uint32_t(SimdOp::I32x4MinU):
+ case uint32_t(SimdOp::I32x4MaxS):
+ case uint32_t(SimdOp::I32x4MaxU):
+ case uint32_t(SimdOp::I64x2Add):
+ case uint32_t(SimdOp::I64x2Sub):
+ case uint32_t(SimdOp::I64x2Mul):
+ case uint32_t(SimdOp::F32x4Add):
+ case uint32_t(SimdOp::F32x4Sub):
+ case uint32_t(SimdOp::F32x4Mul):
+ case uint32_t(SimdOp::F32x4Div):
+ case uint32_t(SimdOp::F32x4Min):
+ case uint32_t(SimdOp::F32x4Max):
+ case uint32_t(SimdOp::F64x2Add):
+ case uint32_t(SimdOp::F64x2Sub):
+ case uint32_t(SimdOp::F64x2Mul):
+ case uint32_t(SimdOp::F64x2Div):
+ case uint32_t(SimdOp::F64x2Min):
+ case uint32_t(SimdOp::F64x2Max):
+ case uint32_t(SimdOp::I8x16NarrowI16x8S):
+ case uint32_t(SimdOp::I8x16NarrowI16x8U):
+ case uint32_t(SimdOp::I16x8NarrowI32x4S):
+ case uint32_t(SimdOp::I16x8NarrowI32x4U):
+ case uint32_t(SimdOp::I8x16Swizzle):
+ case uint32_t(SimdOp::F32x4PMax):
+ case uint32_t(SimdOp::F32x4PMin):
+ case uint32_t(SimdOp::F64x2PMax):
+ case uint32_t(SimdOp::F64x2PMin):
+ case uint32_t(SimdOp::I32x4DotI16x8S):
+ case uint32_t(SimdOp::I16x8ExtmulLowI8x16S):
+ case uint32_t(SimdOp::I16x8ExtmulHighI8x16S):
+ case uint32_t(SimdOp::I16x8ExtmulLowI8x16U):
+ case uint32_t(SimdOp::I16x8ExtmulHighI8x16U):
+ case uint32_t(SimdOp::I32x4ExtmulLowI16x8S):
+ case uint32_t(SimdOp::I32x4ExtmulHighI16x8S):
+ case uint32_t(SimdOp::I32x4ExtmulLowI16x8U):
+ case uint32_t(SimdOp::I32x4ExtmulHighI16x8U):
+ case uint32_t(SimdOp::I64x2ExtmulLowI32x4S):
+ case uint32_t(SimdOp::I64x2ExtmulHighI32x4S):
+ case uint32_t(SimdOp::I64x2ExtmulLowI32x4U):
+ case uint32_t(SimdOp::I64x2ExtmulHighI32x4U):
+ case uint32_t(SimdOp::I16x8Q15MulrSatS):
+ CHECK(iter.readBinary(ValType::V128, &nothing, &nothing));
+
+ case uint32_t(SimdOp::I8x16Neg):
+ case uint32_t(SimdOp::I16x8Neg):
+ case uint32_t(SimdOp::I16x8ExtendLowI8x16S):
+ case uint32_t(SimdOp::I16x8ExtendHighI8x16S):
+ case uint32_t(SimdOp::I16x8ExtendLowI8x16U):
+ case uint32_t(SimdOp::I16x8ExtendHighI8x16U):
+ case uint32_t(SimdOp::I32x4Neg):
+ case uint32_t(SimdOp::I32x4ExtendLowI16x8S):
+ case uint32_t(SimdOp::I32x4ExtendHighI16x8S):
+ case uint32_t(SimdOp::I32x4ExtendLowI16x8U):
+ case uint32_t(SimdOp::I32x4ExtendHighI16x8U):
+ case uint32_t(SimdOp::I32x4TruncSatF32x4S):
+ case uint32_t(SimdOp::I32x4TruncSatF32x4U):
+ case uint32_t(SimdOp::I64x2Neg):
+ case uint32_t(SimdOp::I64x2ExtendLowI32x4S):
+ case uint32_t(SimdOp::I64x2ExtendHighI32x4S):
+ case uint32_t(SimdOp::I64x2ExtendLowI32x4U):
+ case uint32_t(SimdOp::I64x2ExtendHighI32x4U):
+ case uint32_t(SimdOp::F32x4Abs):
+ case uint32_t(SimdOp::F32x4Neg):
+ case uint32_t(SimdOp::F32x4Sqrt):
+ case uint32_t(SimdOp::F32x4ConvertI32x4S):
+ case uint32_t(SimdOp::F32x4ConvertI32x4U):
+ case uint32_t(SimdOp::F64x2Abs):
+ case uint32_t(SimdOp::F64x2Neg):
+ case uint32_t(SimdOp::F64x2Sqrt):
+ case uint32_t(SimdOp::V128Not):
+ case uint32_t(SimdOp::I8x16Popcnt):
+ case uint32_t(SimdOp::I8x16Abs):
+ case uint32_t(SimdOp::I16x8Abs):
+ case uint32_t(SimdOp::I32x4Abs):
+ case uint32_t(SimdOp::I64x2Abs):
+ case uint32_t(SimdOp::F32x4Ceil):
+ case uint32_t(SimdOp::F32x4Floor):
+ case uint32_t(SimdOp::F32x4Trunc):
+ case uint32_t(SimdOp::F32x4Nearest):
+ case uint32_t(SimdOp::F64x2Ceil):
+ case uint32_t(SimdOp::F64x2Floor):
+ case uint32_t(SimdOp::F64x2Trunc):
+ case uint32_t(SimdOp::F64x2Nearest):
+ case uint32_t(SimdOp::F32x4DemoteF64x2Zero):
+ case uint32_t(SimdOp::F64x2PromoteLowF32x4):
+ case uint32_t(SimdOp::F64x2ConvertLowI32x4S):
+ case uint32_t(SimdOp::F64x2ConvertLowI32x4U):
+ case uint32_t(SimdOp::I32x4TruncSatF64x2SZero):
+ case uint32_t(SimdOp::I32x4TruncSatF64x2UZero):
+ case uint32_t(SimdOp::I16x8ExtaddPairwiseI8x16S):
+ case uint32_t(SimdOp::I16x8ExtaddPairwiseI8x16U):
+ case uint32_t(SimdOp::I32x4ExtaddPairwiseI16x8S):
+ case uint32_t(SimdOp::I32x4ExtaddPairwiseI16x8U):
+ CHECK(iter.readUnary(ValType::V128, &nothing));
+
+ case uint32_t(SimdOp::I8x16Shl):
+ case uint32_t(SimdOp::I8x16ShrS):
+ case uint32_t(SimdOp::I8x16ShrU):
+ case uint32_t(SimdOp::I16x8Shl):
+ case uint32_t(SimdOp::I16x8ShrS):
+ case uint32_t(SimdOp::I16x8ShrU):
+ case uint32_t(SimdOp::I32x4Shl):
+ case uint32_t(SimdOp::I32x4ShrS):
+ case uint32_t(SimdOp::I32x4ShrU):
+ case uint32_t(SimdOp::I64x2Shl):
+ case uint32_t(SimdOp::I64x2ShrS):
+ case uint32_t(SimdOp::I64x2ShrU):
+ CHECK(iter.readVectorShift(&nothing, &nothing));
+
+ case uint32_t(SimdOp::V128Bitselect):
+ CHECK(
+ iter.readTernary(ValType::V128, &nothing, &nothing, &nothing));
+
+ case uint32_t(SimdOp::I8x16Shuffle): {
+ V128 mask;
+ CHECK(iter.readVectorShuffle(&nothing, &nothing, &mask));
+ }
+
+ case uint32_t(SimdOp::V128Const): {
+ V128 noVector;
+ CHECK(iter.readV128Const(&noVector));
+ }
+
+ case uint32_t(SimdOp::V128Load): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoad(ValType::V128, 16, &addr));
+ }
+
+ case uint32_t(SimdOp::V128Load8Splat): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadSplat(1, &addr));
+ }
+
+ case uint32_t(SimdOp::V128Load16Splat): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadSplat(2, &addr));
+ }
+
+ case uint32_t(SimdOp::V128Load32Splat): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadSplat(4, &addr));
+ }
+
+ case uint32_t(SimdOp::V128Load64Splat): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadSplat(8, &addr));
+ }
+
+ case uint32_t(SimdOp::V128Load8x8S):
+ case uint32_t(SimdOp::V128Load8x8U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadExtend(&addr));
+ }
+
+ case uint32_t(SimdOp::V128Load16x4S):
+ case uint32_t(SimdOp::V128Load16x4U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadExtend(&addr));
+ }
+
+ case uint32_t(SimdOp::V128Load32x2S):
+ case uint32_t(SimdOp::V128Load32x2U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadExtend(&addr));
+ }
+
+ case uint32_t(SimdOp::V128Store): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStore(ValType::V128, 16, &addr, &nothing));
+ }
+
+ case uint32_t(SimdOp::V128Load32Zero): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadSplat(4, &addr));
+ }
+
+ case uint32_t(SimdOp::V128Load64Zero): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadSplat(8, &addr));
+ }
+
+ case uint32_t(SimdOp::V128Load8Lane): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadLane(1, &addr, &noIndex, &nothing));
+ }
+
+ case uint32_t(SimdOp::V128Load16Lane): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadLane(2, &addr, &noIndex, &nothing));
+ }
+
+ case uint32_t(SimdOp::V128Load32Lane): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadLane(4, &addr, &noIndex, &nothing));
+ }
+
+ case uint32_t(SimdOp::V128Load64Lane): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadLane(8, &addr, &noIndex, &nothing));
+ }
+
+ case uint32_t(SimdOp::V128Store8Lane): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStoreLane(1, &addr, &noIndex, &nothing));
+ }
+
+ case uint32_t(SimdOp::V128Store16Lane): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStoreLane(2, &addr, &noIndex, &nothing));
+ }
+
+ case uint32_t(SimdOp::V128Store32Lane): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStoreLane(4, &addr, &noIndex, &nothing));
+ }
+
+ case uint32_t(SimdOp::V128Store64Lane): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStoreLane(8, &addr, &noIndex, &nothing));
+ }
+
+# ifdef ENABLE_WASM_RELAXED_SIMD
+ case uint32_t(SimdOp::F32x4RelaxedFma):
+ case uint32_t(SimdOp::F32x4RelaxedFnma):
+ case uint32_t(SimdOp::F64x2RelaxedFma):
+ case uint32_t(SimdOp::F64x2RelaxedFnma):
+ case uint32_t(SimdOp::I8x16RelaxedLaneSelect):
+ case uint32_t(SimdOp::I16x8RelaxedLaneSelect):
+ case uint32_t(SimdOp::I32x4RelaxedLaneSelect):
+ case uint32_t(SimdOp::I64x2RelaxedLaneSelect):
+ case uint32_t(SimdOp::I32x4DotI8x16I7x16AddS): {
+ if (!env.v128RelaxedEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ CHECK(
+ iter.readTernary(ValType::V128, &nothing, &nothing, &nothing));
+ }
+ case uint32_t(SimdOp::F32x4RelaxedMin):
+ case uint32_t(SimdOp::F32x4RelaxedMax):
+ case uint32_t(SimdOp::F64x2RelaxedMin):
+ case uint32_t(SimdOp::F64x2RelaxedMax):
+ case uint32_t(SimdOp::I16x8RelaxedQ15MulrS):
+ case uint32_t(SimdOp::I16x8DotI8x16I7x16S): {
+ if (!env.v128RelaxedEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ CHECK(iter.readBinary(ValType::V128, &nothing, &nothing));
+ }
+ case uint32_t(SimdOp::I32x4RelaxedTruncF32x4S):
+ case uint32_t(SimdOp::I32x4RelaxedTruncF32x4U):
+ case uint32_t(SimdOp::I32x4RelaxedTruncF64x2SZero):
+ case uint32_t(SimdOp::I32x4RelaxedTruncF64x2UZero): {
+ if (!env.v128RelaxedEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ CHECK(iter.readUnary(ValType::V128, &nothing));
+ }
+ case uint32_t(SimdOp::I8x16RelaxedSwizzle): {
+ if (!env.v128RelaxedEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ CHECK(iter.readBinary(ValType::V128, &nothing, &nothing));
+ }
+# endif
+
+ default:
+ return iter.unrecognizedOpcode(&op);
+ }
+ break;
+ }
+#endif // ENABLE_WASM_SIMD
+
+ case uint16_t(Op::MiscPrefix): {
+ switch (op.b1) {
+ case uint32_t(MiscOp::I32TruncSatF32S):
+ case uint32_t(MiscOp::I32TruncSatF32U):
+ CHECK(iter.readConversion(ValType::F32, ValType::I32, &nothing));
+ case uint32_t(MiscOp::I32TruncSatF64S):
+ case uint32_t(MiscOp::I32TruncSatF64U):
+ CHECK(iter.readConversion(ValType::F64, ValType::I32, &nothing));
+ case uint32_t(MiscOp::I64TruncSatF32S):
+ case uint32_t(MiscOp::I64TruncSatF32U):
+ CHECK(iter.readConversion(ValType::F32, ValType::I64, &nothing));
+ case uint32_t(MiscOp::I64TruncSatF64S):
+ case uint32_t(MiscOp::I64TruncSatF64U):
+ CHECK(iter.readConversion(ValType::F64, ValType::I64, &nothing));
+ case uint32_t(MiscOp::MemoryCopy): {
+ uint32_t unusedDestMemIndex;
+ uint32_t unusedSrcMemIndex;
+ CHECK(iter.readMemOrTableCopy(/*isMem=*/true, &unusedDestMemIndex,
+ &nothing, &unusedSrcMemIndex,
+ &nothing, &nothing));
+ }
+ case uint32_t(MiscOp::DataDrop): {
+ uint32_t unusedSegIndex;
+ CHECK(iter.readDataOrElemDrop(/*isData=*/true, &unusedSegIndex));
+ }
+ case uint32_t(MiscOp::MemoryFill):
+ CHECK(iter.readMemFill(&nothing, &nothing, &nothing));
+ case uint32_t(MiscOp::MemoryInit): {
+ uint32_t unusedSegIndex;
+ uint32_t unusedTableIndex;
+ CHECK(iter.readMemOrTableInit(/*isMem=*/true, &unusedSegIndex,
+ &unusedTableIndex, &nothing, &nothing,
+ &nothing));
+ }
+ case uint32_t(MiscOp::TableCopy): {
+ uint32_t unusedDestTableIndex;
+ uint32_t unusedSrcTableIndex;
+ CHECK(iter.readMemOrTableCopy(
+ /*isMem=*/false, &unusedDestTableIndex, &nothing,
+ &unusedSrcTableIndex, &nothing, &nothing));
+ }
+ case uint32_t(MiscOp::ElemDrop): {
+ uint32_t unusedSegIndex;
+ CHECK(iter.readDataOrElemDrop(/*isData=*/false, &unusedSegIndex));
+ }
+ case uint32_t(MiscOp::TableInit): {
+ uint32_t unusedSegIndex;
+ uint32_t unusedTableIndex;
+ CHECK(iter.readMemOrTableInit(/*isMem=*/false, &unusedSegIndex,
+ &unusedTableIndex, &nothing, &nothing,
+ &nothing));
+ }
+ case uint32_t(MiscOp::TableFill): {
+ uint32_t unusedTableIndex;
+ CHECK(iter.readTableFill(&unusedTableIndex, &nothing, &nothing,
+ &nothing));
+ }
+#ifdef ENABLE_WASM_MEMORY_CONTROL
+ case uint32_t(MiscOp::MemoryDiscard): {
+ if (!env.memoryControlEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ CHECK(iter.readMemDiscard(&nothing, &nothing));
+ }
+#endif
+ case uint32_t(MiscOp::TableGrow): {
+ uint32_t unusedTableIndex;
+ CHECK(iter.readTableGrow(&unusedTableIndex, &nothing, &nothing));
+ }
+ case uint32_t(MiscOp::TableSize): {
+ uint32_t unusedTableIndex;
+ CHECK(iter.readTableSize(&unusedTableIndex));
+ }
+ default:
+ return iter.unrecognizedOpcode(&op);
+ }
+ break;
+ }
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ case uint16_t(Op::RefAsNonNull): {
+ if (!env.functionReferencesEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ CHECK(iter.readRefAsNonNull(&nothing));
+ }
+ case uint16_t(Op::BrOnNull): {
+ if (!env.functionReferencesEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ uint32_t unusedDepth;
+ CHECK(
+ iter.readBrOnNull(&unusedDepth, &unusedType, &nothings, &nothing));
+ }
+ case uint16_t(Op::BrOnNonNull): {
+ if (!env.functionReferencesEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ uint32_t unusedDepth;
+ CHECK(iter.readBrOnNonNull(&unusedDepth, &unusedType, &nothings,
+ &nothing));
+ }
+#endif
+#ifdef ENABLE_WASM_GC
+ case uint16_t(Op::RefEq): {
+ if (!env.gcEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ CHECK(iter.readComparison(RefType::eq(), &nothing, &nothing));
+ }
+#endif
+ case uint16_t(Op::RefFunc): {
+ uint32_t unusedIndex;
+ CHECK(iter.readRefFunc(&unusedIndex));
+ }
+ case uint16_t(Op::RefNull): {
+ RefType type;
+ CHECK(iter.readRefNull(&type));
+ }
+ case uint16_t(Op::RefIsNull): {
+ Nothing nothing;
+ CHECK(iter.readRefIsNull(&nothing));
+ }
+ case uint16_t(Op::Try):
+ if (!env.exceptionsEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ CHECK(iter.readTry(&unusedType));
+ case uint16_t(Op::Catch): {
+ if (!env.exceptionsEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ LabelKind unusedKind;
+ uint32_t unusedIndex;
+ CHECK(iter.readCatch(&unusedKind, &unusedIndex, &unusedType,
+ &unusedType, &nothings));
+ }
+ case uint16_t(Op::CatchAll): {
+ if (!env.exceptionsEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ LabelKind unusedKind;
+ CHECK(iter.readCatchAll(&unusedKind, &unusedType, &unusedType,
+ &nothings));
+ }
+ case uint16_t(Op::Delegate): {
+ if (!env.exceptionsEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ uint32_t unusedDepth;
+ if (!iter.readDelegate(&unusedDepth, &unusedType, &nothings)) {
+ return false;
+ }
+ iter.popDelegate();
+ break;
+ }
+ case uint16_t(Op::Throw): {
+ if (!env.exceptionsEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ uint32_t unusedIndex;
+ CHECK(iter.readThrow(&unusedIndex, &nothings));
+ }
+ case uint16_t(Op::Rethrow): {
+ if (!env.exceptionsEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ uint32_t unusedDepth;
+ CHECK(iter.readRethrow(&unusedDepth));
+ }
+ case uint16_t(Op::ThreadPrefix): {
+ // Though thread ops can be used on nonshared memories, we make them
+ // unavailable if shared memory has been disabled in the prefs, for
+ // maximum predictability and safety and consistency with JS.
+ if (env.sharedMemoryEnabled() == Shareable::False) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ switch (op.b1) {
+ case uint32_t(ThreadOp::Wake): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readWake(&addr, &nothing));
+ }
+ case uint32_t(ThreadOp::I32Wait): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readWait(&addr, ValType::I32, 4, &nothing, &nothing));
+ }
+ case uint32_t(ThreadOp::I64Wait): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readWait(&addr, ValType::I64, 8, &nothing, &nothing));
+ }
+ case uint32_t(ThreadOp::Fence): {
+ CHECK(iter.readFence());
+ }
+ case uint32_t(ThreadOp::I32AtomicLoad): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicLoad(&addr, ValType::I32, 4));
+ }
+ case uint32_t(ThreadOp::I64AtomicLoad): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicLoad(&addr, ValType::I64, 8));
+ }
+ case uint32_t(ThreadOp::I32AtomicLoad8U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicLoad(&addr, ValType::I32, 1));
+ }
+ case uint32_t(ThreadOp::I32AtomicLoad16U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicLoad(&addr, ValType::I32, 2));
+ }
+ case uint32_t(ThreadOp::I64AtomicLoad8U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicLoad(&addr, ValType::I64, 1));
+ }
+ case uint32_t(ThreadOp::I64AtomicLoad16U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicLoad(&addr, ValType::I64, 2));
+ }
+ case uint32_t(ThreadOp::I64AtomicLoad32U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicLoad(&addr, ValType::I64, 4));
+ }
+ case uint32_t(ThreadOp::I32AtomicStore): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicStore(&addr, ValType::I32, 4, &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicStore): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicStore(&addr, ValType::I64, 8, &nothing));
+ }
+ case uint32_t(ThreadOp::I32AtomicStore8U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicStore(&addr, ValType::I32, 1, &nothing));
+ }
+ case uint32_t(ThreadOp::I32AtomicStore16U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicStore(&addr, ValType::I32, 2, &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicStore8U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicStore(&addr, ValType::I64, 1, &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicStore16U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicStore(&addr, ValType::I64, 2, &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicStore32U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicStore(&addr, ValType::I64, 4, &nothing));
+ }
+ case uint32_t(ThreadOp::I32AtomicAdd):
+ case uint32_t(ThreadOp::I32AtomicSub):
+ case uint32_t(ThreadOp::I32AtomicAnd):
+ case uint32_t(ThreadOp::I32AtomicOr):
+ case uint32_t(ThreadOp::I32AtomicXor):
+ case uint32_t(ThreadOp::I32AtomicXchg): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicRMW(&addr, ValType::I32, 4, &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicAdd):
+ case uint32_t(ThreadOp::I64AtomicSub):
+ case uint32_t(ThreadOp::I64AtomicAnd):
+ case uint32_t(ThreadOp::I64AtomicOr):
+ case uint32_t(ThreadOp::I64AtomicXor):
+ case uint32_t(ThreadOp::I64AtomicXchg): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicRMW(&addr, ValType::I64, 8, &nothing));
+ }
+ case uint32_t(ThreadOp::I32AtomicAdd8U):
+ case uint32_t(ThreadOp::I32AtomicSub8U):
+ case uint32_t(ThreadOp::I32AtomicAnd8U):
+ case uint32_t(ThreadOp::I32AtomicOr8U):
+ case uint32_t(ThreadOp::I32AtomicXor8U):
+ case uint32_t(ThreadOp::I32AtomicXchg8U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicRMW(&addr, ValType::I32, 1, &nothing));
+ }
+ case uint32_t(ThreadOp::I32AtomicAdd16U):
+ case uint32_t(ThreadOp::I32AtomicSub16U):
+ case uint32_t(ThreadOp::I32AtomicAnd16U):
+ case uint32_t(ThreadOp::I32AtomicOr16U):
+ case uint32_t(ThreadOp::I32AtomicXor16U):
+ case uint32_t(ThreadOp::I32AtomicXchg16U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicRMW(&addr, ValType::I32, 2, &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicAdd8U):
+ case uint32_t(ThreadOp::I64AtomicSub8U):
+ case uint32_t(ThreadOp::I64AtomicAnd8U):
+ case uint32_t(ThreadOp::I64AtomicOr8U):
+ case uint32_t(ThreadOp::I64AtomicXor8U):
+ case uint32_t(ThreadOp::I64AtomicXchg8U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicRMW(&addr, ValType::I64, 1, &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicAdd16U):
+ case uint32_t(ThreadOp::I64AtomicSub16U):
+ case uint32_t(ThreadOp::I64AtomicAnd16U):
+ case uint32_t(ThreadOp::I64AtomicOr16U):
+ case uint32_t(ThreadOp::I64AtomicXor16U):
+ case uint32_t(ThreadOp::I64AtomicXchg16U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicRMW(&addr, ValType::I64, 2, &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicAdd32U):
+ case uint32_t(ThreadOp::I64AtomicSub32U):
+ case uint32_t(ThreadOp::I64AtomicAnd32U):
+ case uint32_t(ThreadOp::I64AtomicOr32U):
+ case uint32_t(ThreadOp::I64AtomicXor32U):
+ case uint32_t(ThreadOp::I64AtomicXchg32U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicRMW(&addr, ValType::I64, 4, &nothing));
+ }
+ case uint32_t(ThreadOp::I32AtomicCmpXchg): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicCmpXchg(&addr, ValType::I32, 4, &nothing,
+ &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicCmpXchg): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicCmpXchg(&addr, ValType::I64, 8, &nothing,
+ &nothing));
+ }
+ case uint32_t(ThreadOp::I32AtomicCmpXchg8U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicCmpXchg(&addr, ValType::I32, 1, &nothing,
+ &nothing));
+ }
+ case uint32_t(ThreadOp::I32AtomicCmpXchg16U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicCmpXchg(&addr, ValType::I32, 2, &nothing,
+ &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicCmpXchg8U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicCmpXchg(&addr, ValType::I64, 1, &nothing,
+ &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicCmpXchg16U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicCmpXchg(&addr, ValType::I64, 2, &nothing,
+ &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicCmpXchg32U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicCmpXchg(&addr, ValType::I64, 4, &nothing,
+ &nothing));
+ }
+ default:
+ return iter.unrecognizedOpcode(&op);
+ }
+ break;
+ }
+ case uint16_t(Op::MozPrefix):
+ return iter.unrecognizedOpcode(&op);
+ default:
+ return iter.unrecognizedOpcode(&op);
+ }
+ }
+
+ MOZ_CRASH("unreachable");
+
+#undef CHECK
+}
+
+bool wasm::ValidateFunctionBody(const ModuleEnvironment& env,
+ uint32_t funcIndex, uint32_t bodySize,
+ Decoder& d) {
+ ValTypeVector locals;
+ if (!locals.appendAll(env.funcs[funcIndex].type->args())) {
+ return false;
+ }
+
+ const uint8_t* bodyBegin = d.currentPosition();
+
+ if (!DecodeLocalEntries(d, *env.types, env.features, &locals)) {
+ return false;
+ }
+
+ if (!DecodeFunctionBodyExprs(env, funcIndex, locals, bodyBegin + bodySize,
+ &d)) {
+ return false;
+ }
+
+ return true;
+}
+
+// Section macros.
+
+static bool DecodePreamble(Decoder& d) {
+ if (d.bytesRemain() > MaxModuleBytes) {
+ return d.fail("module too big");
+ }
+
+ uint32_t u32;
+ if (!d.readFixedU32(&u32) || u32 != MagicNumber) {
+ return d.fail("failed to match magic number");
+ }
+
+ if (!d.readFixedU32(&u32) || u32 != EncodingVersion) {
+ return d.failf("binary version 0x%" PRIx32
+ " does not match expected version 0x%" PRIx32,
+ u32, EncodingVersion);
+ }
+
+ return true;
+}
+
+static bool DecodeValTypeVector(Decoder& d, ModuleEnvironment* env,
+ uint32_t count, ValTypeVector* valTypes) {
+ if (!valTypes->resize(count)) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < count; i++) {
+ if (!d.readValType(*env->types, env->features, &(*valTypes)[i])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool DecodeFuncType(Decoder& d, ModuleEnvironment* env,
+ FuncType* funcType) {
+ uint32_t numArgs;
+ if (!d.readVarU32(&numArgs)) {
+ return d.fail("bad number of function args");
+ }
+ if (numArgs > MaxParams) {
+ return d.fail("too many arguments in signature");
+ }
+ ValTypeVector args;
+ if (!DecodeValTypeVector(d, env, numArgs, &args)) {
+ return false;
+ }
+
+ uint32_t numResults;
+ if (!d.readVarU32(&numResults)) {
+ return d.fail("bad number of function returns");
+ }
+ if (numResults > MaxResults) {
+ return d.fail("too many returns in signature");
+ }
+ ValTypeVector results;
+ if (!DecodeValTypeVector(d, env, numResults, &results)) {
+ return false;
+ }
+
+ *funcType = FuncType(std::move(args), std::move(results));
+ return true;
+}
+
+static bool DecodeStructType(Decoder& d, ModuleEnvironment* env,
+ StructType* structType) {
+ if (!env->gcEnabled()) {
+ return d.fail("Structure types not enabled");
+ }
+
+ uint32_t numFields;
+ if (!d.readVarU32(&numFields)) {
+ return d.fail("Bad number of fields");
+ }
+
+ if (numFields > MaxStructFields) {
+ return d.fail("too many fields in struct");
+ }
+
+ StructFieldVector fields;
+ if (!fields.resize(numFields)) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < numFields; i++) {
+ if (!d.readFieldType(*env->types, env->features, &fields[i].type)) {
+ return false;
+ }
+
+ uint8_t flags;
+ if (!d.readFixedU8(&flags)) {
+ return d.fail("expected flag");
+ }
+ if ((flags & ~uint8_t(FieldFlags::AllowedMask)) != 0) {
+ return d.fail("garbage flag bits");
+ }
+ fields[i].isMutable = flags & uint8_t(FieldFlags::Mutable);
+ }
+
+ *structType = StructType(std::move(fields));
+
+ // Compute the struct layout, and fail if the struct is too large
+ if (!structType->init()) {
+ return d.fail("too many fields in struct");
+ }
+ return true;
+}
+
+static bool DecodeArrayType(Decoder& d, ModuleEnvironment* env,
+ ArrayType* arrayType) {
+ if (!env->gcEnabled()) {
+ return d.fail("gc types not enabled");
+ }
+
+ FieldType elementType;
+ if (!d.readFieldType(*env->types, env->features, &elementType)) {
+ return false;
+ }
+
+ uint8_t flags;
+ if (!d.readFixedU8(&flags)) {
+ return d.fail("expected flag");
+ }
+ if ((flags & ~uint8_t(FieldFlags::AllowedMask)) != 0) {
+ return d.fail("garbage flag bits");
+ }
+ bool isMutable = flags & uint8_t(FieldFlags::Mutable);
+
+ *arrayType = ArrayType(elementType, isMutable);
+ return true;
+}
+
+static bool DecodeTypeSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Type, env, &range, "type")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ uint32_t numRecGroups;
+ if (!d.readVarU32(&numRecGroups)) {
+ return d.fail("expected number of types");
+ }
+
+ // Check if we've reached our implementation defined limit of recursion
+ // groups.
+ if (numRecGroups > MaxRecGroups) {
+ return d.fail("too many types");
+ }
+
+ for (uint32_t recGroupIndex = 0; recGroupIndex < numRecGroups;
+ recGroupIndex++) {
+ uint32_t recGroupLength = 1;
+
+ // Decode an optional recursion group length, if the GC proposal is
+ // enabled.
+ if (env->gcEnabled()) {
+ uint8_t firstTypeCode;
+ if (!d.peekByte(&firstTypeCode)) {
+ return d.fail("expected type form");
+ }
+
+ if (firstTypeCode == (uint8_t)TypeCode::RecGroup ||
+ firstTypeCode == (uint8_t)TypeCode::RecGroupOld) {
+ // Skip over the prefix byte that was peeked.
+ d.uncheckedReadFixedU8();
+
+ // Read the number of types in this recursion group
+ if (!d.readVarU32(&recGroupLength)) {
+ return d.fail("expected recursion group length");
+ }
+ }
+ }
+
+ // Start a recursion group. This will extend the type context with empty
+ // type definitions to be filled.
+ MutableRecGroup recGroup = env->types->startRecGroup(recGroupLength);
+ if (!recGroup) {
+ return false;
+ }
+
+ // First, iterate over the types, validate them and set super types.
+ // Subtyping relationship will be checked in a second iteration.
+ for (uint32_t recGroupTypeIndex = 0; recGroupTypeIndex < recGroupLength;
+ recGroupTypeIndex++) {
+ uint32_t typeIndex =
+ env->types->length() - recGroupLength + recGroupTypeIndex;
+
+ // Check if we've reached our implementation defined limit of type
+ // definitions.
+ if (typeIndex > MaxTypes) {
+ return d.fail("too many types");
+ }
+
+ uint8_t form;
+ const TypeDef* superTypeDef = nullptr;
+
+ // Decode an optional declared super type index, if the GC proposal is
+ // enabled.
+ if (env->gcEnabled() && d.peekByte(&form) &&
+ form == (uint8_t)TypeCode::SubType) {
+ // Skip over the `sub` prefix byte we peeked.
+ d.uncheckedReadFixedU8();
+
+ // Decode the number of super types, which is currently limited to at
+ // most one.
+ uint32_t numSuperTypes;
+ if (!d.readVarU32(&numSuperTypes)) {
+ return d.fail("expected number of super types");
+ }
+ if (numSuperTypes > 1) {
+ return d.fail("too many super types");
+ }
+
+ // Decode the super type, if any.
+ if (numSuperTypes == 1) {
+ uint32_t superTypeDefIndex;
+ if (!d.readVarU32(&superTypeDefIndex)) {
+ return d.fail("expected super type index");
+ }
+
+ // A super type index must be strictly less than the current type
+ // index in order to avoid cycles.
+ if (superTypeDefIndex >= typeIndex) {
+ return d.fail("invalid super type index");
+ }
+
+ superTypeDef = &env->types->type(superTypeDefIndex);
+ }
+ }
+
+ // Decode the kind of type definition
+ if (!d.readFixedU8(&form)) {
+ return d.fail("expected type form");
+ }
+
+ TypeDef* typeDef = &recGroup->type(recGroupTypeIndex);
+ switch (form) {
+ case uint8_t(TypeCode::Func): {
+ FuncType funcType;
+ if (!DecodeFuncType(d, env, &funcType)) {
+ return false;
+ }
+ *typeDef = std::move(funcType);
+ break;
+ }
+ case uint8_t(TypeCode::Struct): {
+ StructType structType;
+ if (!DecodeStructType(d, env, &structType)) {
+ return false;
+ }
+ *typeDef = std::move(structType);
+ break;
+ }
+ case uint8_t(TypeCode::Array): {
+ ArrayType arrayType;
+ if (!DecodeArrayType(d, env, &arrayType)) {
+ return false;
+ }
+ *typeDef = std::move(arrayType);
+ break;
+ }
+ default:
+ return d.fail("expected type form");
+ }
+
+ if (superTypeDef) {
+ // Check that we aren't creating too deep of a subtyping chain
+ if (superTypeDef->subTypingDepth() >= MaxSubTypingDepth) {
+ return d.fail("type is too deep");
+ }
+
+ typeDef->setSuperTypeDef(superTypeDef);
+ }
+ }
+
+ // Check the super types to make sure they are compatible with their
+ // subtypes. This is done in a second iteration to avoid dealing with not
+ // yet loaded types.
+ for (uint32_t recGroupTypeIndex = 0; recGroupTypeIndex < recGroupLength;
+ recGroupTypeIndex++) {
+ TypeDef* typeDef = &recGroup->type(recGroupTypeIndex);
+ if (typeDef->superTypeDef()) {
+ // Check that the super type is compatible with this type
+ if (!TypeDef::canBeSubTypeOf(typeDef, typeDef->superTypeDef())) {
+ return d.fail("incompatible super type");
+ }
+ }
+ }
+
+ // Finish the recursion group, which will canonicalize the types.
+ if (!env->types->endRecGroup()) {
+ return false;
+ }
+ }
+
+ return d.finishSection(*range, "type");
+}
+
+[[nodiscard]] static bool DecodeName(Decoder& d, CacheableName* name) {
+ uint32_t numBytes;
+ if (!d.readVarU32(&numBytes)) {
+ return false;
+ }
+
+ if (numBytes > MaxStringBytes) {
+ return false;
+ }
+
+ const uint8_t* bytes;
+ if (!d.readBytes(numBytes, &bytes)) {
+ return false;
+ }
+
+ if (!IsUtf8(AsChars(Span(bytes, numBytes)))) {
+ return false;
+ }
+
+ UTF8Bytes utf8Bytes;
+ if (!utf8Bytes.resizeUninitialized(numBytes)) {
+ return false;
+ }
+ memcpy(utf8Bytes.begin(), bytes, numBytes);
+
+ *name = CacheableName(std::move(utf8Bytes));
+ return true;
+}
+
+static bool DecodeFuncTypeIndex(Decoder& d, const SharedTypeContext& types,
+ uint32_t* funcTypeIndex) {
+ if (!d.readVarU32(funcTypeIndex)) {
+ return d.fail("expected signature index");
+ }
+
+ if (*funcTypeIndex >= types->length()) {
+ return d.fail("signature index out of range");
+ }
+
+ const TypeDef& def = (*types)[*funcTypeIndex];
+
+ if (!def.isFuncType()) {
+ return d.fail("signature index references non-signature");
+ }
+
+ return true;
+}
+
+static bool DecodeLimits(Decoder& d, LimitsKind kind, Limits* limits) {
+ uint8_t flags;
+ if (!d.readFixedU8(&flags)) {
+ return d.fail("expected flags");
+ }
+
+ uint8_t mask = kind == LimitsKind::Memory ? uint8_t(LimitsMask::Memory)
+ : uint8_t(LimitsMask::Table);
+
+ if (flags & ~uint8_t(mask)) {
+ return d.failf("unexpected bits set in flags: %" PRIu32,
+ uint32_t(flags & ~uint8_t(mask)));
+ }
+
+ uint64_t initial;
+ if (!d.readVarU64(&initial)) {
+ return d.fail("expected initial length");
+ }
+ limits->initial = initial;
+
+ if (flags & uint8_t(LimitsFlags::HasMaximum)) {
+ uint64_t maximum;
+ if (!d.readVarU64(&maximum)) {
+ return d.fail("expected maximum length");
+ }
+
+ if (limits->initial > maximum) {
+ return d.failf(
+ "memory size minimum must not be greater than maximum; "
+ "maximum length %" PRIu64 " is less than initial length %" PRIu64,
+ maximum, limits->initial);
+ }
+
+ limits->maximum.emplace(maximum);
+ }
+
+ limits->shared = Shareable::False;
+ limits->indexType = IndexType::I32;
+
+ // Memory limits may be shared or specify an alternate index type
+ if (kind == LimitsKind::Memory) {
+ if ((flags & uint8_t(LimitsFlags::IsShared)) &&
+ !(flags & uint8_t(LimitsFlags::HasMaximum))) {
+ return d.fail("maximum length required for shared memory");
+ }
+
+ limits->shared = (flags & uint8_t(LimitsFlags::IsShared))
+ ? Shareable::True
+ : Shareable::False;
+
+#ifdef ENABLE_WASM_MEMORY64
+ limits->indexType =
+ (flags & uint8_t(LimitsFlags::IsI64)) ? IndexType::I64 : IndexType::I32;
+#else
+ if (flags & uint8_t(LimitsFlags::IsI64)) {
+ return d.fail("i64 is not supported for memory limits");
+ }
+#endif
+ }
+
+ return true;
+}
+
+static bool DecodeTableTypeAndLimits(Decoder& d, ModuleEnvironment* env) {
+ bool initExprPresent = false;
+ uint8_t typeCode;
+ if (!d.peekByte(&typeCode)) {
+ return d.fail("expected type code");
+ }
+ if (typeCode == (uint8_t)TypeCode::TableHasInitExpr) {
+ d.uncheckedReadFixedU8();
+ uint8_t flags;
+ if (!d.readFixedU8(&flags) || flags != 0) {
+ return d.fail("expected reserved byte to be 0");
+ }
+ initExprPresent = true;
+ }
+
+ RefType tableElemType;
+ if (!d.readRefType(*env->types, env->features, &tableElemType)) {
+ return false;
+ }
+
+ Limits limits;
+ if (!DecodeLimits(d, LimitsKind::Table, &limits)) {
+ return false;
+ }
+
+ // Decoding limits for a table only supports i32
+ MOZ_ASSERT(limits.indexType == IndexType::I32);
+
+ // If there's a maximum, check it is in range. The check to exclude
+ // initial > maximum is carried out by the DecodeLimits call above, so
+ // we don't repeat it here.
+ if (limits.initial > MaxTableLimitField ||
+ ((limits.maximum.isSome() &&
+ limits.maximum.value() > MaxTableLimitField))) {
+ return d.fail("too many table elements");
+ }
+
+ if (env->tables.length() >= MaxTables) {
+ return d.fail("too many tables");
+ }
+
+ // The rest of the runtime expects table limits to be within a 32-bit range.
+ static_assert(MaxTableLimitField <= UINT32_MAX, "invariant");
+ uint32_t initialLength = uint32_t(limits.initial);
+ Maybe<uint32_t> maximumLength;
+ if (limits.maximum) {
+ maximumLength = Some(uint32_t(*limits.maximum));
+ }
+
+ Maybe<InitExpr> initExpr;
+ if (initExprPresent) {
+ InitExpr initializer;
+ if (!InitExpr::decodeAndValidate(d, env, tableElemType,
+ env->globals.length(), &initializer)) {
+ return false;
+ }
+ initExpr = Some(std::move(initializer));
+ } else {
+ if (!tableElemType.isNullable()) {
+ return d.fail("table with non-nullable references requires initializer");
+ }
+ }
+
+ return env->tables.emplaceBack(tableElemType, initialLength, maximumLength,
+ std::move(initExpr), /* isAsmJS */ false);
+}
+
+static bool DecodeGlobalType(Decoder& d, const SharedTypeContext& types,
+ const FeatureArgs& features, ValType* type,
+ bool* isMutable) {
+ if (!d.readValType(*types, features, type)) {
+ return d.fail("expected global type");
+ }
+
+ uint8_t flags;
+ if (!d.readFixedU8(&flags)) {
+ return d.fail("expected global flags");
+ }
+
+ if (flags & ~uint8_t(GlobalTypeImmediate::AllowedMask)) {
+ return d.fail("unexpected bits set in global flags");
+ }
+
+ *isMutable = flags & uint8_t(GlobalTypeImmediate::IsMutable);
+ return true;
+}
+
+static bool DecodeMemoryTypeAndLimits(Decoder& d, ModuleEnvironment* env) {
+ if (env->usesMemory()) {
+ return d.fail("already have default memory");
+ }
+
+ Limits limits;
+ if (!DecodeLimits(d, LimitsKind::Memory, &limits)) {
+ return false;
+ }
+
+ uint64_t maxField = MaxMemoryLimitField(limits.indexType);
+
+ if (limits.initial > maxField) {
+ return d.fail("initial memory size too big");
+ }
+
+ if (limits.maximum && *limits.maximum > maxField) {
+ return d.fail("maximum memory size too big");
+ }
+
+ if (limits.shared == Shareable::True &&
+ env->sharedMemoryEnabled() == Shareable::False) {
+ return d.fail("shared memory is disabled");
+ }
+
+ if (limits.indexType == IndexType::I64 && !env->memory64Enabled()) {
+ return d.fail("memory64 is disabled");
+ }
+
+ env->memory = Some(MemoryDesc(limits));
+ return true;
+}
+
+static bool DecodeTag(Decoder& d, ModuleEnvironment* env, TagKind* tagKind,
+ uint32_t* funcTypeIndex) {
+ uint32_t tagCode;
+ if (!d.readVarU32(&tagCode)) {
+ return d.fail("expected tag kind");
+ }
+
+ if (TagKind(tagCode) != TagKind::Exception) {
+ return d.fail("illegal tag kind");
+ }
+ *tagKind = TagKind(tagCode);
+
+ if (!d.readVarU32(funcTypeIndex)) {
+ return d.fail("expected function index in tag");
+ }
+ if (*funcTypeIndex >= env->numTypes()) {
+ return d.fail("function type index in tag out of bounds");
+ }
+ if (!(*env->types)[*funcTypeIndex].isFuncType()) {
+ return d.fail("function type index must index a function type");
+ }
+ if ((*env->types)[*funcTypeIndex].funcType().results().length() != 0) {
+ return d.fail("tag function types must not return anything");
+ }
+ return true;
+}
+
+static bool DecodeImport(Decoder& d, ModuleEnvironment* env) {
+ CacheableName moduleName;
+ if (!DecodeName(d, &moduleName)) {
+ return d.fail("expected valid import module name");
+ }
+
+ CacheableName funcName;
+ if (!DecodeName(d, &funcName)) {
+ return d.fail("expected valid import field name");
+ }
+
+ uint8_t rawImportKind;
+ if (!d.readFixedU8(&rawImportKind)) {
+ return d.fail("failed to read import kind");
+ }
+
+ DefinitionKind importKind = DefinitionKind(rawImportKind);
+
+ switch (importKind) {
+ case DefinitionKind::Function: {
+ uint32_t funcTypeIndex;
+ if (!DecodeFuncTypeIndex(d, env->types, &funcTypeIndex)) {
+ return false;
+ }
+ if (!env->funcs.append(FuncDesc(
+ &env->types->type(funcTypeIndex).funcType(), funcTypeIndex))) {
+ return false;
+ }
+ if (env->funcs.length() > MaxFuncs) {
+ return d.fail("too many functions");
+ }
+ break;
+ }
+ case DefinitionKind::Table: {
+ if (!DecodeTableTypeAndLimits(d, env)) {
+ return false;
+ }
+ env->tables.back().isImported = true;
+ break;
+ }
+ case DefinitionKind::Memory: {
+ if (!DecodeMemoryTypeAndLimits(d, env)) {
+ return false;
+ }
+ break;
+ }
+ case DefinitionKind::Global: {
+ ValType type;
+ bool isMutable;
+ if (!DecodeGlobalType(d, env->types, env->features, &type, &isMutable)) {
+ return false;
+ }
+ if (!env->globals.append(
+ GlobalDesc(type, isMutable, env->globals.length()))) {
+ return false;
+ }
+ if (env->globals.length() > MaxGlobals) {
+ return d.fail("too many globals");
+ }
+ break;
+ }
+ case DefinitionKind::Tag: {
+ TagKind tagKind;
+ uint32_t funcTypeIndex;
+ if (!DecodeTag(d, env, &tagKind, &funcTypeIndex)) {
+ return false;
+ }
+ ValTypeVector args;
+ if (!args.appendAll((*env->types)[funcTypeIndex].funcType().args())) {
+ return false;
+ }
+ MutableTagType tagType = js_new<TagType>();
+ if (!tagType || !tagType->initialize(std::move(args))) {
+ return false;
+ }
+ if (!env->tags.emplaceBack(tagKind, tagType)) {
+ return false;
+ }
+ if (env->tags.length() > MaxTags) {
+ return d.fail("too many tags");
+ }
+ break;
+ }
+ default:
+ return d.fail("unsupported import kind");
+ }
+
+ return env->imports.emplaceBack(std::move(moduleName), std::move(funcName),
+ importKind);
+}
+
+static bool DecodeImportSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Import, env, &range, "import")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ uint32_t numImports;
+ if (!d.readVarU32(&numImports)) {
+ return d.fail("failed to read number of imports");
+ }
+
+ if (numImports > MaxImports) {
+ return d.fail("too many imports");
+ }
+
+ for (uint32_t i = 0; i < numImports; i++) {
+ if (!DecodeImport(d, env)) {
+ return false;
+ }
+ }
+
+ if (!d.finishSection(*range, "import")) {
+ return false;
+ }
+
+ env->numFuncImports = env->funcs.length();
+ return true;
+}
+
+static bool DecodeFunctionSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Function, env, &range, "function")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ uint32_t numDefs;
+ if (!d.readVarU32(&numDefs)) {
+ return d.fail("expected number of function definitions");
+ }
+
+ CheckedInt<uint32_t> numFuncs = env->funcs.length();
+ numFuncs += numDefs;
+ if (!numFuncs.isValid() || numFuncs.value() > MaxFuncs) {
+ return d.fail("too many functions");
+ }
+
+ if (!env->funcs.reserve(numFuncs.value())) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < numDefs; i++) {
+ uint32_t funcTypeIndex;
+ if (!DecodeFuncTypeIndex(d, env->types, &funcTypeIndex)) {
+ return false;
+ }
+ env->funcs.infallibleAppend(
+ FuncDesc(&env->types->type(funcTypeIndex).funcType(), funcTypeIndex));
+ }
+
+ return d.finishSection(*range, "function");
+}
+
+static bool DecodeTableSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Table, env, &range, "table")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ uint32_t numTables;
+ if (!d.readVarU32(&numTables)) {
+ return d.fail("failed to read number of tables");
+ }
+
+ for (uint32_t i = 0; i < numTables; ++i) {
+ if (!DecodeTableTypeAndLimits(d, env)) {
+ return false;
+ }
+ }
+
+ return d.finishSection(*range, "table");
+}
+
+static bool DecodeMemorySection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Memory, env, &range, "memory")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ uint32_t numMemories;
+ if (!d.readVarU32(&numMemories)) {
+ return d.fail("failed to read number of memories");
+ }
+
+ if (numMemories > 1) {
+ return d.fail("the number of memories must be at most one");
+ }
+
+ for (uint32_t i = 0; i < numMemories; ++i) {
+ if (!DecodeMemoryTypeAndLimits(d, env)) {
+ return false;
+ }
+ }
+
+ return d.finishSection(*range, "memory");
+}
+
+static bool DecodeGlobalSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Global, env, &range, "global")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ uint32_t numDefs;
+ if (!d.readVarU32(&numDefs)) {
+ return d.fail("expected number of globals");
+ }
+
+ CheckedInt<uint32_t> numGlobals = env->globals.length();
+ numGlobals += numDefs;
+ if (!numGlobals.isValid() || numGlobals.value() > MaxGlobals) {
+ return d.fail("too many globals");
+ }
+
+ if (!env->globals.reserve(numGlobals.value())) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < numDefs; i++) {
+ ValType type;
+ bool isMutable;
+ if (!DecodeGlobalType(d, env->types, env->features, &type, &isMutable)) {
+ return false;
+ }
+
+ InitExpr initializer;
+ if (!InitExpr::decodeAndValidate(d, env, type, i, &initializer)) {
+ return false;
+ }
+
+ env->globals.infallibleAppend(
+ GlobalDesc(std::move(initializer), isMutable));
+ }
+
+ return d.finishSection(*range, "global");
+}
+
+static bool DecodeTagSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Tag, env, &range, "tag")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ if (!env->exceptionsEnabled()) {
+ return d.fail("exceptions not enabled");
+ }
+
+ uint32_t numDefs;
+ if (!d.readVarU32(&numDefs)) {
+ return d.fail("expected number of tags");
+ }
+
+ CheckedInt<uint32_t> numTags = env->tags.length();
+ numTags += numDefs;
+ if (!numTags.isValid() || numTags.value() > MaxTags) {
+ return d.fail("too many tags");
+ }
+
+ if (!env->tags.reserve(numTags.value())) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < numDefs; i++) {
+ TagKind tagKind;
+ uint32_t funcTypeIndex;
+ if (!DecodeTag(d, env, &tagKind, &funcTypeIndex)) {
+ return false;
+ }
+ ValTypeVector args;
+ if (!args.appendAll((*env->types)[funcTypeIndex].funcType().args())) {
+ return false;
+ }
+ MutableTagType tagType = js_new<TagType>();
+ if (!tagType || !tagType->initialize(std::move(args))) {
+ return false;
+ }
+ env->tags.infallibleEmplaceBack(tagKind, tagType);
+ }
+
+ return d.finishSection(*range, "tag");
+}
+
+using NameSet = HashSet<Span<char>, NameHasher, SystemAllocPolicy>;
+
+[[nodiscard]] static bool DecodeExportName(Decoder& d, NameSet* dupSet,
+ CacheableName* exportName) {
+ if (!DecodeName(d, exportName)) {
+ d.fail("expected valid export name");
+ return false;
+ }
+
+ NameSet::AddPtr p = dupSet->lookupForAdd(exportName->utf8Bytes());
+ if (p) {
+ d.fail("duplicate export");
+ return false;
+ }
+
+ return dupSet->add(p, exportName->utf8Bytes());
+}
+
+static bool DecodeExport(Decoder& d, ModuleEnvironment* env, NameSet* dupSet) {
+ CacheableName fieldName;
+ if (!DecodeExportName(d, dupSet, &fieldName)) {
+ return false;
+ }
+
+ uint8_t exportKind;
+ if (!d.readFixedU8(&exportKind)) {
+ return d.fail("failed to read export kind");
+ }
+
+ switch (DefinitionKind(exportKind)) {
+ case DefinitionKind::Function: {
+ uint32_t funcIndex;
+ if (!d.readVarU32(&funcIndex)) {
+ return d.fail("expected function index");
+ }
+
+ if (funcIndex >= env->numFuncs()) {
+ return d.fail("exported function index out of bounds");
+ }
+
+ env->declareFuncExported(funcIndex, /* eager */ true,
+ /* canRefFunc */ true);
+ return env->exports.emplaceBack(std::move(fieldName), funcIndex,
+ DefinitionKind::Function);
+ }
+ case DefinitionKind::Table: {
+ uint32_t tableIndex;
+ if (!d.readVarU32(&tableIndex)) {
+ return d.fail("expected table index");
+ }
+
+ if (tableIndex >= env->tables.length()) {
+ return d.fail("exported table index out of bounds");
+ }
+ env->tables[tableIndex].isExported = true;
+ return env->exports.emplaceBack(std::move(fieldName), tableIndex,
+ DefinitionKind::Table);
+ }
+ case DefinitionKind::Memory: {
+ uint32_t memoryIndex;
+ if (!d.readVarU32(&memoryIndex)) {
+ return d.fail("expected memory index");
+ }
+
+ if (memoryIndex > 0 || !env->usesMemory()) {
+ return d.fail("exported memory index out of bounds");
+ }
+
+ return env->exports.emplaceBack(std::move(fieldName),
+ DefinitionKind::Memory);
+ }
+ case DefinitionKind::Global: {
+ uint32_t globalIndex;
+ if (!d.readVarU32(&globalIndex)) {
+ return d.fail("expected global index");
+ }
+
+ if (globalIndex >= env->globals.length()) {
+ return d.fail("exported global index out of bounds");
+ }
+
+ GlobalDesc* global = &env->globals[globalIndex];
+ global->setIsExport();
+
+ return env->exports.emplaceBack(std::move(fieldName), globalIndex,
+ DefinitionKind::Global);
+ }
+ case DefinitionKind::Tag: {
+ uint32_t tagIndex;
+ if (!d.readVarU32(&tagIndex)) {
+ return d.fail("expected tag index");
+ }
+ if (tagIndex >= env->tags.length()) {
+ return d.fail("exported tag index out of bounds");
+ }
+
+ env->tags[tagIndex].isExport = true;
+ return env->exports.emplaceBack(std::move(fieldName), tagIndex,
+ DefinitionKind::Tag);
+ }
+ default:
+ return d.fail("unexpected export kind");
+ }
+
+ MOZ_CRASH("unreachable");
+}
+
+static bool DecodeExportSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Export, env, &range, "export")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ NameSet dupSet;
+
+ uint32_t numExports;
+ if (!d.readVarU32(&numExports)) {
+ return d.fail("failed to read number of exports");
+ }
+
+ if (numExports > MaxExports) {
+ return d.fail("too many exports");
+ }
+
+ for (uint32_t i = 0; i < numExports; i++) {
+ if (!DecodeExport(d, env, &dupSet)) {
+ return false;
+ }
+ }
+
+ return d.finishSection(*range, "export");
+}
+
+static bool DecodeStartSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Start, env, &range, "start")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ uint32_t funcIndex;
+ if (!d.readVarU32(&funcIndex)) {
+ return d.fail("failed to read start func index");
+ }
+
+ if (funcIndex >= env->numFuncs()) {
+ return d.fail("unknown start function");
+ }
+
+ const FuncType& funcType = *env->funcs[funcIndex].type;
+ if (funcType.results().length() > 0) {
+ return d.fail("start function must not return anything");
+ }
+
+ if (funcType.args().length()) {
+ return d.fail("start function must be nullary");
+ }
+
+ env->declareFuncExported(funcIndex, /* eager */ true, /* canFuncRef */ false);
+ env->startFuncIndex = Some(funcIndex);
+
+ return d.finishSection(*range, "start");
+}
+
+static inline ElemSegment::Kind NormalizeElemSegmentKind(
+ ElemSegmentKind decodedKind) {
+ switch (decodedKind) {
+ case ElemSegmentKind::Active:
+ case ElemSegmentKind::ActiveWithTableIndex: {
+ return ElemSegment::Kind::Active;
+ }
+ case ElemSegmentKind::Passive: {
+ return ElemSegment::Kind::Passive;
+ }
+ case ElemSegmentKind::Declared: {
+ return ElemSegment::Kind::Declared;
+ }
+ }
+ MOZ_CRASH("unexpected elem segment kind");
+}
+
+static bool DecodeElemSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Elem, env, &range, "elem")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ uint32_t numSegments;
+ if (!d.readVarU32(&numSegments)) {
+ return d.fail("failed to read number of elem segments");
+ }
+
+ if (numSegments > MaxElemSegments) {
+ return d.fail("too many elem segments");
+ }
+
+ if (!env->elemSegments.reserve(numSegments)) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < numSegments; i++) {
+ uint32_t segmentFlags;
+ if (!d.readVarU32(&segmentFlags)) {
+ return d.fail("expected elem segment flags field");
+ }
+
+ Maybe<ElemSegmentFlags> flags = ElemSegmentFlags::construct(segmentFlags);
+ if (!flags) {
+ return d.fail("invalid elem segment flags field");
+ }
+
+ MutableElemSegment seg = js_new<ElemSegment>();
+ if (!seg) {
+ return false;
+ }
+
+ ElemSegmentKind kind = flags->kind();
+ seg->kind = NormalizeElemSegmentKind(kind);
+
+ if (kind == ElemSegmentKind::Active ||
+ kind == ElemSegmentKind::ActiveWithTableIndex) {
+ if (env->tables.length() == 0) {
+ return d.fail("active elem segment requires a table");
+ }
+
+ uint32_t tableIndex = 0;
+ if (kind == ElemSegmentKind::ActiveWithTableIndex &&
+ !d.readVarU32(&tableIndex)) {
+ return d.fail("expected table index");
+ }
+ if (tableIndex >= env->tables.length()) {
+ return d.fail("table index out of range for element segment");
+ }
+ seg->tableIndex = tableIndex;
+
+ InitExpr offset;
+ if (!InitExpr::decodeAndValidate(d, env, ValType::I32,
+ env->globals.length(), &offset)) {
+ return false;
+ }
+ seg->offsetIfActive.emplace(std::move(offset));
+ } else {
+ // Too many bugs result from keeping this value zero. For passive
+ // or declared segments, there really is no table index, and we should
+ // never touch the field.
+ MOZ_ASSERT(kind == ElemSegmentKind::Passive ||
+ kind == ElemSegmentKind::Declared);
+ seg->tableIndex = (uint32_t)-1;
+ }
+
+ ElemSegmentPayload payload = flags->payload();
+ RefType elemType;
+
+ // `ActiveWithTableIndex`, `Declared`, and `Passive` element segments encode
+ // the type or definition kind of the payload. `Active` element segments are
+ // restricted to MVP behavior, which assumes only function indices.
+ if (kind == ElemSegmentKind::Active) {
+ elemType = RefType::func();
+ } else {
+ switch (payload) {
+ case ElemSegmentPayload::ElemExpression: {
+ if (!d.readRefType(*env->types, env->features, &elemType)) {
+ return false;
+ }
+ break;
+ }
+ case ElemSegmentPayload::ExternIndex: {
+ uint8_t form;
+ if (!d.readFixedU8(&form)) {
+ return d.fail("expected type or extern kind");
+ }
+
+ if (form != uint8_t(DefinitionKind::Function)) {
+ return d.fail(
+ "segments with extern indices can only contain function "
+ "references");
+ }
+ elemType = RefType::func();
+ }
+ }
+ }
+
+ // Check constraints on the element type.
+ switch (kind) {
+ case ElemSegmentKind::Active:
+ case ElemSegmentKind::ActiveWithTableIndex: {
+ RefType tblElemType = env->tables[seg->tableIndex].elemType;
+ if (!CheckIsSubtypeOf(d, *env, d.currentOffset(),
+ ValType(elemType).fieldType(),
+ ValType(tblElemType).fieldType())) {
+ return false;
+ }
+ break;
+ }
+ case ElemSegmentKind::Declared:
+ case ElemSegmentKind::Passive: {
+ // Passive segment element types are checked when used with a
+ // `table.init` instruction.
+ break;
+ }
+ }
+ seg->elemType = elemType;
+
+ uint32_t numElems;
+ if (!d.readVarU32(&numElems)) {
+ return d.fail("expected segment size");
+ }
+
+ if (numElems > MaxElemSegmentLength) {
+ return d.fail("too many table elements");
+ }
+
+ if (!seg->elemFuncIndices.reserve(numElems)) {
+ return false;
+ }
+
+ bool isAsmJS = seg->active() && env->tables[seg->tableIndex].isAsmJS;
+
+ // For passive segments we should use InitExpr but we don't really want to
+ // generalize the ElemSection data structure yet, so instead read the
+ // required Ref.Func and End here.
+
+ for (uint32_t i = 0; i < numElems; i++) {
+ bool needIndex = true;
+
+ if (payload == ElemSegmentPayload::ElemExpression) {
+ OpBytes op;
+ if (!d.readOp(&op)) {
+ return d.fail("failed to read initializer operation");
+ }
+
+ RefType initType = RefType::extern_();
+ switch (op.b0) {
+ case uint16_t(Op::RefFunc):
+ initType = RefType::func();
+ break;
+ case uint16_t(Op::RefNull):
+ if (!d.readHeapType(*env->types, env->features, true, &initType)) {
+ return false;
+ }
+ needIndex = false;
+ break;
+ default:
+ return d.fail("failed to read initializer operation");
+ }
+ if (!CheckIsSubtypeOf(d, *env, d.currentOffset(),
+ ValType(initType).fieldType(),
+ ValType(elemType).fieldType())) {
+ return false;
+ }
+ }
+
+ uint32_t funcIndex = NullFuncIndex;
+ if (needIndex) {
+ if (!d.readVarU32(&funcIndex)) {
+ return d.fail("failed to read element function index");
+ }
+ if (funcIndex >= env->numFuncs()) {
+ return d.fail("table element out of range");
+ }
+ }
+
+ if (payload == ElemSegmentPayload::ElemExpression) {
+ OpBytes end;
+ if (!d.readOp(&end) || end.b0 != uint16_t(Op::End)) {
+ return d.fail("failed to read end of initializer expression");
+ }
+ }
+
+ seg->elemFuncIndices.infallibleAppend(funcIndex);
+ if (funcIndex != NullFuncIndex && !isAsmJS) {
+ env->declareFuncExported(funcIndex, /* eager */ false,
+ /* canRefFunc */ true);
+ }
+ }
+
+ env->elemSegments.infallibleAppend(std::move(seg));
+ }
+
+ return d.finishSection(*range, "elem");
+}
+
+static bool DecodeDataCountSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::DataCount, env, &range, "datacount")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ uint32_t dataCount;
+ if (!d.readVarU32(&dataCount)) {
+ return d.fail("expected data segment count");
+ }
+
+ env->dataCount.emplace(dataCount);
+
+ return d.finishSection(*range, "datacount");
+}
+
+bool wasm::StartsCodeSection(const uint8_t* begin, const uint8_t* end,
+ SectionRange* codeSection) {
+ UniqueChars unused;
+ Decoder d(begin, end, 0, &unused);
+
+ if (!DecodePreamble(d)) {
+ return false;
+ }
+
+ while (!d.done()) {
+ uint8_t id;
+ SectionRange range;
+ if (!d.readSectionHeader(&id, &range)) {
+ return false;
+ }
+
+ if (id == uint8_t(SectionId::Code)) {
+ *codeSection = range;
+ return true;
+ }
+
+ if (!d.readBytes(range.size)) {
+ return false;
+ }
+ }
+
+ return false;
+}
+
+bool wasm::DecodeModuleEnvironment(Decoder& d, ModuleEnvironment* env) {
+ if (!DecodePreamble(d)) {
+ return false;
+ }
+
+ if (!DecodeTypeSection(d, env)) {
+ return false;
+ }
+
+ if (!DecodeImportSection(d, env)) {
+ return false;
+ }
+
+ if (!DecodeFunctionSection(d, env)) {
+ return false;
+ }
+
+ if (!DecodeTableSection(d, env)) {
+ return false;
+ }
+
+ if (!DecodeMemorySection(d, env)) {
+ return false;
+ }
+
+ if (!DecodeTagSection(d, env)) {
+ return false;
+ }
+
+ if (!DecodeGlobalSection(d, env)) {
+ return false;
+ }
+
+ if (!DecodeExportSection(d, env)) {
+ return false;
+ }
+
+ if (!DecodeStartSection(d, env)) {
+ return false;
+ }
+
+ if (!DecodeElemSection(d, env)) {
+ return false;
+ }
+
+ if (!DecodeDataCountSection(d, env)) {
+ return false;
+ }
+
+ if (!d.startSection(SectionId::Code, env, &env->codeSection, "code")) {
+ return false;
+ }
+
+ if (env->codeSection && env->codeSection->size > MaxCodeSectionBytes) {
+ return d.fail("code section too big");
+ }
+
+ return true;
+}
+
+static bool DecodeFunctionBody(Decoder& d, const ModuleEnvironment& env,
+ uint32_t funcIndex) {
+ uint32_t bodySize;
+ if (!d.readVarU32(&bodySize)) {
+ return d.fail("expected number of function body bytes");
+ }
+
+ if (bodySize > MaxFunctionBytes) {
+ return d.fail("function body too big");
+ }
+
+ if (d.bytesRemain() < bodySize) {
+ return d.fail("function body length too big");
+ }
+
+ return ValidateFunctionBody(env, funcIndex, bodySize, d);
+}
+
+static bool DecodeCodeSection(Decoder& d, ModuleEnvironment* env) {
+ if (!env->codeSection) {
+ if (env->numFuncDefs() != 0) {
+ return d.fail("expected code section");
+ }
+ return true;
+ }
+
+ uint32_t numFuncDefs;
+ if (!d.readVarU32(&numFuncDefs)) {
+ return d.fail("expected function body count");
+ }
+
+ if (numFuncDefs != env->numFuncDefs()) {
+ return d.fail(
+ "function body count does not match function signature count");
+ }
+
+ for (uint32_t funcDefIndex = 0; funcDefIndex < numFuncDefs; funcDefIndex++) {
+ if (!DecodeFunctionBody(d, *env, env->numFuncImports + funcDefIndex)) {
+ return false;
+ }
+ }
+
+ return d.finishSection(*env->codeSection, "code");
+}
+
+static bool DecodeDataSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Data, env, &range, "data")) {
+ return false;
+ }
+ if (!range) {
+ if (env->dataCount.isSome() && *env->dataCount > 0) {
+ return d.fail("number of data segments does not match declared count");
+ }
+ return true;
+ }
+
+ uint32_t numSegments;
+ if (!d.readVarU32(&numSegments)) {
+ return d.fail("failed to read number of data segments");
+ }
+
+ if (numSegments > MaxDataSegments) {
+ return d.fail("too many data segments");
+ }
+
+ if (env->dataCount.isSome() && numSegments != *env->dataCount) {
+ return d.fail("number of data segments does not match declared count");
+ }
+
+ for (uint32_t i = 0; i < numSegments; i++) {
+ uint32_t initializerKindVal;
+ if (!d.readVarU32(&initializerKindVal)) {
+ return d.fail("expected data initializer-kind field");
+ }
+
+ switch (initializerKindVal) {
+ case uint32_t(DataSegmentKind::Active):
+ case uint32_t(DataSegmentKind::Passive):
+ case uint32_t(DataSegmentKind::ActiveWithMemoryIndex):
+ break;
+ default:
+ return d.fail("invalid data initializer-kind field");
+ }
+
+ DataSegmentKind initializerKind = DataSegmentKind(initializerKindVal);
+
+ if (initializerKind != DataSegmentKind::Passive && !env->usesMemory()) {
+ return d.fail("active data segment requires a memory section");
+ }
+
+ uint32_t memIndex = 0;
+ if (initializerKind == DataSegmentKind::ActiveWithMemoryIndex) {
+ if (!d.readVarU32(&memIndex)) {
+ return d.fail("expected memory index");
+ }
+ if (memIndex > 0) {
+ return d.fail("memory index must be zero");
+ }
+ }
+
+ DataSegmentEnv seg;
+ if (initializerKind == DataSegmentKind::Active ||
+ initializerKind == DataSegmentKind::ActiveWithMemoryIndex) {
+ InitExpr segOffset;
+ ValType exprType = ToValType(env->memory->indexType());
+ if (!InitExpr::decodeAndValidate(d, env, exprType, env->globals.length(),
+ &segOffset)) {
+ return false;
+ }
+ seg.offsetIfActive.emplace(std::move(segOffset));
+ }
+
+ if (!d.readVarU32(&seg.length)) {
+ return d.fail("expected segment size");
+ }
+
+ if (seg.length > MaxDataSegmentLengthPages * PageSize) {
+ return d.fail("segment size too big");
+ }
+
+ seg.bytecodeOffset = d.currentOffset();
+
+ if (!d.readBytes(seg.length)) {
+ return d.fail("data segment shorter than declared");
+ }
+
+ if (!env->dataSegments.append(std::move(seg))) {
+ return false;
+ }
+ }
+
+ return d.finishSection(*range, "data");
+}
+
+static bool DecodeModuleNameSubsection(Decoder& d,
+ const CustomSectionEnv& nameSection,
+ ModuleEnvironment* env) {
+ Maybe<uint32_t> endOffset;
+ if (!d.startNameSubsection(NameType::Module, &endOffset)) {
+ return false;
+ }
+ if (!endOffset) {
+ return true;
+ }
+
+ Name moduleName;
+ if (!d.readVarU32(&moduleName.length)) {
+ return d.fail("failed to read module name length");
+ }
+
+ MOZ_ASSERT(d.currentOffset() >= nameSection.payloadOffset);
+ moduleName.offsetInNamePayload =
+ d.currentOffset() - nameSection.payloadOffset;
+
+ const uint8_t* bytes;
+ if (!d.readBytes(moduleName.length, &bytes)) {
+ return d.fail("failed to read module name bytes");
+ }
+
+ if (!d.finishNameSubsection(*endOffset)) {
+ return false;
+ }
+
+ // Only save the module name if the whole subsection validates.
+ env->moduleName.emplace(moduleName);
+ return true;
+}
+
+static bool DecodeFunctionNameSubsection(Decoder& d,
+ const CustomSectionEnv& nameSection,
+ ModuleEnvironment* env) {
+ Maybe<uint32_t> endOffset;
+ if (!d.startNameSubsection(NameType::Function, &endOffset)) {
+ return false;
+ }
+ if (!endOffset) {
+ return true;
+ }
+
+ uint32_t nameCount = 0;
+ if (!d.readVarU32(&nameCount) || nameCount > MaxFuncs) {
+ return d.fail("bad function name count");
+ }
+
+ NameVector funcNames;
+
+ for (uint32_t i = 0; i < nameCount; ++i) {
+ uint32_t funcIndex = 0;
+ if (!d.readVarU32(&funcIndex)) {
+ return d.fail("unable to read function index");
+ }
+
+ // Names must refer to real functions and be given in ascending order.
+ if (funcIndex >= env->numFuncs() || funcIndex < funcNames.length()) {
+ return d.fail("invalid function index");
+ }
+
+ Name funcName;
+ if (!d.readVarU32(&funcName.length) ||
+ funcName.length > JS::MaxStringLength) {
+ return d.fail("unable to read function name length");
+ }
+
+ if (!funcName.length) {
+ continue;
+ }
+
+ if (!funcNames.resize(funcIndex + 1)) {
+ return false;
+ }
+
+ MOZ_ASSERT(d.currentOffset() >= nameSection.payloadOffset);
+ funcName.offsetInNamePayload =
+ d.currentOffset() - nameSection.payloadOffset;
+
+ if (!d.readBytes(funcName.length)) {
+ return d.fail("unable to read function name bytes");
+ }
+
+ funcNames[funcIndex] = funcName;
+ }
+
+ if (!d.finishNameSubsection(*endOffset)) {
+ return false;
+ }
+
+ // To encourage fully valid function names subsections; only save names if
+ // the entire subsection decoded correctly.
+ env->funcNames = std::move(funcNames);
+ return true;
+}
+
+static bool DecodeNameSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startCustomSection(NameSectionName, env, &range)) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ env->nameCustomSectionIndex = Some(env->customSections.length() - 1);
+ const CustomSectionEnv& nameSection = env->customSections.back();
+
+ // Once started, custom sections do not report validation errors.
+
+ if (!DecodeModuleNameSubsection(d, nameSection, env)) {
+ goto finish;
+ }
+
+ if (!DecodeFunctionNameSubsection(d, nameSection, env)) {
+ goto finish;
+ }
+
+ while (d.currentOffset() < range->end()) {
+ if (!d.skipNameSubsection()) {
+ goto finish;
+ }
+ }
+
+finish:
+ d.finishCustomSection(NameSectionName, *range);
+ return true;
+}
+
+bool wasm::DecodeModuleTail(Decoder& d, ModuleEnvironment* env) {
+ if (!DecodeDataSection(d, env)) {
+ return false;
+ }
+
+ if (!DecodeNameSection(d, env)) {
+ return false;
+ }
+
+ while (!d.done()) {
+ if (!d.skipCustomSection(env)) {
+ if (d.resilientMode()) {
+ d.clearError();
+ return true;
+ }
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// Validate algorithm.
+
+bool wasm::Validate(JSContext* cx, const ShareableBytes& bytecode,
+ const FeatureOptions& options, UniqueChars* error) {
+ Decoder d(bytecode.bytes, 0, error);
+
+ FeatureArgs features = FeatureArgs::build(cx, options);
+ ModuleEnvironment env(features);
+ if (!env.init()) {
+ return false;
+ }
+
+ if (!DecodeModuleEnvironment(d, &env)) {
+ return false;
+ }
+
+ if (!DecodeCodeSection(d, &env)) {
+ return false;
+ }
+
+ if (!DecodeModuleTail(d, &env)) {
+ return false;
+ }
+
+ MOZ_ASSERT(!*error, "unreported error in decoding");
+ return true;
+}
diff --git a/js/src/wasm/WasmValidate.h b/js/src/wasm/WasmValidate.h
new file mode 100644
index 0000000000..8bb2720fad
--- /dev/null
+++ b/js/src/wasm/WasmValidate.h
@@ -0,0 +1,308 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_validate_h
+#define wasm_validate_h
+
+#include <type_traits>
+
+#include "js/Utility.h"
+#include "js/WasmFeatures.h"
+
+#include "wasm/WasmBinary.h"
+#include "wasm/WasmCompile.h"
+#include "wasm/WasmCompileArgs.h"
+#include "wasm/WasmModuleTypes.h"
+#include "wasm/WasmProcess.h"
+#include "wasm/WasmTypeDef.h"
+
+namespace js {
+namespace wasm {
+
+using mozilla::Some;
+
+// ModuleEnvironment contains all the state necessary to process or render
+// functions, and all of the state necessary to validate all aspects of the
+// functions.
+//
+// A ModuleEnvironment is created by decoding all the sections before the wasm
+// code section and then used immutably during. When compiling a module using a
+// ModuleGenerator, the ModuleEnvironment holds state shared between the
+// ModuleGenerator thread and background compile threads. All the threads
+// are given a read-only view of the ModuleEnvironment, thus preventing race
+// conditions.
+
+struct ModuleEnvironment {
+ // Constant parameters for the entire compilation:
+ const ModuleKind kind;
+ const FeatureArgs features;
+
+ // Module fields decoded from the module environment (or initialized while
+ // validating an asm.js module) and immutable during compilation:
+ Maybe<uint32_t> dataCount;
+ Maybe<MemoryDesc> memory;
+ MutableTypeContext types;
+ FuncDescVector funcs;
+ uint32_t numFuncImports;
+ GlobalDescVector globals;
+ TagDescVector tags;
+ TableDescVector tables;
+ Uint32Vector asmJSSigToTableIndex;
+ ImportVector imports;
+ ExportVector exports;
+ Maybe<uint32_t> startFuncIndex;
+ ElemSegmentVector elemSegments;
+ MaybeSectionRange codeSection;
+
+ // The start offset of the FuncImportInstanceData[] section of the instance
+ // data. There is one entry for every imported function.
+ uint32_t funcImportsOffsetStart;
+ // The start offset of the TypeDefInstanceData[] section of the instance
+ // data. There is one entry for every type.
+ uint32_t typeDefsOffsetStart;
+ // The start offset of the TableInstanceData[] section of the instance data.
+ // There is one entry for every table.
+ uint32_t tablesOffsetStart;
+ // The start offset of the tag section of the instance data. There is one
+ // entry for every tag.
+ uint32_t tagsOffsetStart;
+
+ // Fields decoded as part of the wasm module tail:
+ DataSegmentEnvVector dataSegments;
+ CustomSectionEnvVector customSections;
+ Maybe<uint32_t> nameCustomSectionIndex;
+ Maybe<Name> moduleName;
+ NameVector funcNames;
+
+ explicit ModuleEnvironment(FeatureArgs features,
+ ModuleKind kind = ModuleKind::Wasm)
+ : kind(kind),
+ features(features),
+ memory(Nothing()),
+ numFuncImports(0),
+ funcImportsOffsetStart(UINT32_MAX),
+ typeDefsOffsetStart(UINT32_MAX),
+ tablesOffsetStart(UINT32_MAX),
+ tagsOffsetStart(UINT32_MAX) {}
+
+ [[nodiscard]] bool init() {
+ types = js_new<TypeContext>(features);
+ return types;
+ }
+
+ size_t numTables() const { return tables.length(); }
+ size_t numTypes() const { return types->length(); }
+ size_t numFuncs() const { return funcs.length(); }
+ size_t numFuncDefs() const { return funcs.length() - numFuncImports; }
+
+ bool funcIsImport(uint32_t funcIndex) const {
+ return funcIndex < numFuncImports;
+ }
+
+#define WASM_FEATURE(NAME, SHORT_NAME, ...) \
+ bool SHORT_NAME##Enabled() const { return features.SHORT_NAME; }
+ JS_FOR_WASM_FEATURES(WASM_FEATURE, WASM_FEATURE, WASM_FEATURE)
+#undef WASM_FEATURE
+ Shareable sharedMemoryEnabled() const { return features.sharedMemory; }
+ bool hugeMemoryEnabled() const {
+ return !isAsmJS() && usesMemory() &&
+ IsHugeMemoryEnabled(memory->indexType());
+ }
+ bool simdAvailable() const { return features.simd; }
+ bool intrinsicsEnabled() const { return features.intrinsics; }
+
+ bool isAsmJS() const { return kind == ModuleKind::AsmJS; }
+
+ bool usesMemory() const { return memory.isSome(); }
+ bool usesSharedMemory() const {
+ return memory.isSome() && memory->isShared();
+ }
+
+ void declareFuncExported(uint32_t funcIndex, bool eager, bool canRefFunc) {
+ FuncFlags flags = funcs[funcIndex].flags;
+
+ // Set the `Exported` flag, if not set.
+ flags = FuncFlags(uint8_t(flags) | uint8_t(FuncFlags::Exported));
+
+ // Merge in the `Eager` and `CanRefFunc` flags, if they're set. Be sure
+ // to not unset them if they've already been set.
+ if (eager) {
+ flags = FuncFlags(uint8_t(flags) | uint8_t(FuncFlags::Eager));
+ }
+ if (canRefFunc) {
+ flags = FuncFlags(uint8_t(flags) | uint8_t(FuncFlags::CanRefFunc));
+ }
+
+ funcs[funcIndex].flags = flags;
+ }
+
+ uint32_t offsetOfFuncImportInstanceData(uint32_t funcIndex) const {
+ MOZ_ASSERT(funcIndex < numFuncImports);
+ return funcImportsOffsetStart + funcIndex * sizeof(FuncImportInstanceData);
+ }
+
+ uint32_t offsetOfTypeDefInstanceData(uint32_t typeIndex) const {
+ MOZ_ASSERT(typeIndex < types->length());
+ return typeDefsOffsetStart + typeIndex * sizeof(TypeDefInstanceData);
+ }
+
+ uint32_t offsetOfTypeDef(uint32_t typeIndex) const {
+ return offsetOfTypeDefInstanceData(typeIndex) +
+ offsetof(TypeDefInstanceData, typeDef);
+ }
+ uint32_t offsetOfSuperTypeVector(uint32_t typeIndex) const {
+ return offsetOfTypeDefInstanceData(typeIndex) +
+ offsetof(TypeDefInstanceData, superTypeVector);
+ }
+
+ uint32_t offsetOfTableInstanceData(uint32_t tableIndex) const {
+ MOZ_ASSERT(tableIndex < tables.length());
+ return tablesOffsetStart + tableIndex * sizeof(TableInstanceData);
+ }
+
+ uint32_t offsetOfTagInstanceData(uint32_t tagIndex) const {
+ MOZ_ASSERT(tagIndex < tags.length());
+ return tagsOffsetStart + tagIndex * sizeof(TagInstanceData);
+ }
+};
+
+// ElemSegmentFlags provides methods for decoding and encoding the flags field
+// of an element segment. This is needed as the flags field has a non-trivial
+// encoding that is effectively split into independent `kind` and `payload`
+// enums.
+class ElemSegmentFlags {
+ enum class Flags : uint32_t {
+ Passive = 0x1,
+ WithIndexOrDeclared = 0x2,
+ ElemExpression = 0x4,
+ // Below this line are convenient combinations of flags
+ KindMask = Passive | WithIndexOrDeclared,
+ PayloadMask = ElemExpression,
+ AllFlags = Passive | WithIndexOrDeclared | ElemExpression,
+ };
+ uint32_t encoded_;
+
+ explicit ElemSegmentFlags(uint32_t encoded) : encoded_(encoded) {}
+
+ public:
+ ElemSegmentFlags(ElemSegmentKind kind, ElemSegmentPayload payload) {
+ encoded_ = uint32_t(kind) | uint32_t(payload);
+ }
+
+ static Maybe<ElemSegmentFlags> construct(uint32_t encoded) {
+ if (encoded > uint32_t(Flags::AllFlags)) {
+ return Nothing();
+ }
+ return Some(ElemSegmentFlags(encoded));
+ }
+
+ uint32_t encoded() const { return encoded_; }
+
+ ElemSegmentKind kind() const {
+ return static_cast<ElemSegmentKind>(encoded_ & uint32_t(Flags::KindMask));
+ }
+ ElemSegmentPayload payload() const {
+ return static_cast<ElemSegmentPayload>(encoded_ &
+ uint32_t(Flags::PayloadMask));
+ }
+};
+
+// OpIter specialized for validation.
+
+class NothingVector {
+ Nothing unused_;
+
+ public:
+ bool resize(size_t length) { return true; }
+ Nothing& operator[](size_t) { return unused_; }
+ Nothing& back() { return unused_; }
+ size_t length() const { return 0; }
+ bool append(Nothing& nothing) { return true; }
+};
+
+struct ValidatingPolicy {
+ using Value = Nothing;
+ using ValueVector = NothingVector;
+ using ControlItem = Nothing;
+};
+
+template <typename Policy>
+class OpIter;
+
+using ValidatingOpIter = OpIter<ValidatingPolicy>;
+
+// Shared subtyping function across validation.
+
+[[nodiscard]] bool CheckIsSubtypeOf(Decoder& d, const ModuleEnvironment& env,
+ size_t opcodeOffset, FieldType subType,
+ FieldType superType);
+
+// The local entries are part of function bodies and thus serialized by both
+// wasm and asm.js and decoded as part of both validation and compilation.
+
+[[nodiscard]] bool EncodeLocalEntries(Encoder& e, const ValTypeVector& locals);
+
+// This performs no validation; the local entries must already have been
+// validated by an earlier pass.
+
+[[nodiscard]] bool DecodeValidatedLocalEntries(const TypeContext& types,
+ Decoder& d,
+ ValTypeVector* locals);
+
+// This validates the entries.
+
+[[nodiscard]] bool DecodeLocalEntries(Decoder& d, const TypeContext& types,
+ const FeatureArgs& features,
+ ValTypeVector* locals);
+
+// Returns whether the given [begin, end) prefix of a module's bytecode starts a
+// code section and, if so, returns the SectionRange of that code section.
+// Note that, even if this function returns 'false', [begin, end) may actually
+// be a valid module in the special case when there are no function defs and the
+// code section is not present. Such modules can be valid so the caller must
+// handle this special case.
+
+[[nodiscard]] bool StartsCodeSection(const uint8_t* begin, const uint8_t* end,
+ SectionRange* codeSection);
+
+// Calling DecodeModuleEnvironment decodes all sections up to the code section
+// and performs full validation of all those sections. The client must then
+// decode the code section itself, reusing ValidateFunctionBody if necessary,
+// and finally call DecodeModuleTail to decode all remaining sections after the
+// code section (again, performing full validation).
+
+[[nodiscard]] bool DecodeModuleEnvironment(Decoder& d, ModuleEnvironment* env);
+
+[[nodiscard]] bool ValidateFunctionBody(const ModuleEnvironment& env,
+ uint32_t funcIndex, uint32_t bodySize,
+ Decoder& d);
+
+[[nodiscard]] bool DecodeModuleTail(Decoder& d, ModuleEnvironment* env);
+
+// Validate an entire module, returning true if the module was validated
+// successfully. If Validate returns false:
+// - if *error is null, the caller should report out-of-memory
+// - otherwise, there was a legitimate error described by *error
+
+[[nodiscard]] bool Validate(JSContext* cx, const ShareableBytes& bytecode,
+ const FeatureOptions& options, UniqueChars* error);
+
+} // namespace wasm
+} // namespace js
+
+#endif // namespace wasm_validate_h
diff --git a/js/src/wasm/WasmValue.cpp b/js/src/wasm/WasmValue.cpp
new file mode 100644
index 0000000000..f05070d1b9
--- /dev/null
+++ b/js/src/wasm/WasmValue.cpp
@@ -0,0 +1,927 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmValue.h"
+
+#include "jsmath.h"
+#include "js/friend/ErrorMessages.h" // JSMSG_*
+#include "js/Printf.h"
+#include "js/Value.h"
+#include "vm/BigIntType.h"
+#include "vm/GlobalObject.h"
+#include "vm/JSContext.h"
+#include "vm/JSObject.h"
+#include "vm/StringType.h"
+#include "wasm/WasmGcObject.h"
+#include "wasm/WasmJS.h"
+#include "wasm/WasmLog.h"
+
+#include "vm/JSObject-inl.h"
+
+using namespace js;
+using namespace js::wasm;
+
+Val::Val(const LitVal& val) {
+ type_ = val.type();
+ switch (type_.kind()) {
+ case ValType::I32:
+ cell_.i32_ = val.i32();
+ return;
+ case ValType::F32:
+ cell_.f32_ = val.f32();
+ return;
+ case ValType::I64:
+ cell_.i64_ = val.i64();
+ return;
+ case ValType::F64:
+ cell_.f64_ = val.f64();
+ return;
+ case ValType::V128:
+ cell_.v128_ = val.v128();
+ return;
+ case ValType::Ref:
+ cell_.ref_ = val.ref();
+ return;
+ }
+ MOZ_CRASH();
+}
+
+void Val::readFromRootedLocation(const void* loc) {
+ memset(&cell_, 0, sizeof(Cell));
+ memcpy(&cell_, loc, type_.size());
+}
+
+void Val::initFromRootedLocation(ValType type, const void* loc) {
+ MOZ_ASSERT(!type_.isValid());
+ type_ = type;
+ memset(&cell_, 0, sizeof(Cell));
+ memcpy(&cell_, loc, type_.size());
+}
+
+void Val::initFromHeapLocation(ValType type, const void* loc) {
+ MOZ_ASSERT(!type_.isValid());
+ type_ = type;
+ memset(&cell_, 0, sizeof(Cell));
+ readFromHeapLocation(loc);
+}
+
+void Val::writeToRootedLocation(void* loc, bool mustWrite64) const {
+ memcpy(loc, &cell_, type_.size());
+ if (mustWrite64 && type_.size() == 4) {
+ memset((uint8_t*)(loc) + 4, 0, 4);
+ }
+}
+
+void Val::readFromHeapLocation(const void* loc) {
+ memcpy(&cell_, loc, type_.size());
+}
+
+void Val::writeToHeapLocation(void* loc) const {
+ if (type_.isRefRepr()) {
+ // TODO/AnyRef-boxing: With boxed immediates and strings, the write
+ // barrier is going to have to be more complicated.
+ *((GCPtr<JSObject*>*)loc) = cell_.ref_.asJSObject();
+ return;
+ }
+ memcpy(loc, &cell_, type_.size());
+}
+
+bool Val::fromJSValue(JSContext* cx, ValType targetType, HandleValue val,
+ MutableHandleVal rval) {
+ rval.get().type_ = targetType;
+ // No pre/post barrier needed as rval is rooted
+ return ToWebAssemblyValue(cx, val, targetType, &rval.get().cell_,
+ targetType.size() == 8);
+}
+
+bool Val::toJSValue(JSContext* cx, MutableHandleValue rval) const {
+ return ToJSValue(cx, &cell_, type_, rval);
+}
+
+void Val::trace(JSTracer* trc) const {
+ if (isJSObject()) {
+ // TODO/AnyRef-boxing: With boxed immediates and strings, the write
+ // barrier is going to have to be more complicated.
+ ASSERT_ANYREF_IS_JSOBJECT;
+ TraceManuallyBarrieredEdge(trc, asJSObjectAddress(), "wasm val");
+ }
+}
+
+bool wasm::CheckRefType(JSContext* cx, RefType targetType, HandleValue v,
+ MutableHandleFunction fnval,
+ MutableHandleAnyRef refval) {
+ if (!targetType.isNullable() && v.isNull()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_REF_NONNULLABLE_VALUE);
+ return false;
+ }
+
+ switch (targetType.kind()) {
+ case RefType::Func:
+ return CheckFuncRefValue(cx, v, fnval);
+ case RefType::Extern:
+ return BoxAnyRef(cx, v, refval);
+ case RefType::Any:
+ return CheckAnyRefValue(cx, v, refval);
+ case RefType::NoFunc:
+ return CheckNullFuncRefValue(cx, v, fnval);
+ case RefType::NoExtern:
+ return CheckNullExternRefValue(cx, v, refval);
+ case RefType::None:
+ return CheckNullRefValue(cx, v, refval);
+ case RefType::Eq:
+ return CheckEqRefValue(cx, v, refval);
+ case RefType::Struct:
+ return CheckStructRefValue(cx, v, refval);
+ case RefType::Array:
+ return CheckArrayRefValue(cx, v, refval);
+ case RefType::TypeRef:
+ return CheckTypeRefValue(cx, targetType.typeDef(), v, refval);
+ }
+
+ MOZ_ASSERT(!ValType(targetType).isExposable());
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_VAL_TYPE);
+ return false;
+}
+
+bool wasm::CheckFuncRefValue(JSContext* cx, HandleValue v,
+ MutableHandleFunction fun) {
+ if (v.isNull()) {
+ MOZ_ASSERT(!fun);
+ return true;
+ }
+
+ if (v.isObject()) {
+ JSObject& obj = v.toObject();
+ if (obj.is<JSFunction>()) {
+ JSFunction* f = &obj.as<JSFunction>();
+ if (IsWasmExportedFunction(f)) {
+ fun.set(f);
+ return true;
+ }
+ }
+ }
+
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_FUNCREF_VALUE);
+ return false;
+}
+
+bool wasm::CheckAnyRefValue(JSContext* cx, HandleValue v,
+ MutableHandleAnyRef vp) {
+ if (v.isNull()) {
+ vp.set(AnyRef::null());
+ return true;
+ }
+
+ if (v.isObject()) {
+ JSObject& obj = v.toObject();
+ if (obj.is<WasmGcObject>()) {
+ vp.set(AnyRef::fromJSObject(&obj.as<WasmGcObject>()));
+ return true;
+ }
+ }
+
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_ANYREF_VALUE);
+ return false;
+}
+
+bool wasm::CheckNullFuncRefValue(JSContext* cx, HandleValue v,
+ MutableHandleFunction fun) {
+ if (!v.isNull()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_NULL_FUNCREF_VALUE);
+ return false;
+ }
+ MOZ_ASSERT(!fun);
+ return true;
+}
+
+bool wasm::CheckNullExternRefValue(JSContext* cx, HandleValue v,
+ MutableHandleAnyRef vp) {
+ if (!v.isNull()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_NULL_EXTERNREF_VALUE);
+ return false;
+ }
+
+ vp.set(AnyRef::null());
+ return true;
+}
+
+bool wasm::CheckNullRefValue(JSContext* cx, HandleValue v,
+ MutableHandleAnyRef vp) {
+ if (!v.isNull()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_NULL_ANYREF_VALUE);
+ return false;
+ }
+
+ vp.set(AnyRef::null());
+ return true;
+}
+
+bool wasm::CheckEqRefValue(JSContext* cx, HandleValue v,
+ MutableHandleAnyRef vp) {
+ if (v.isNull()) {
+ vp.set(AnyRef::null());
+ return true;
+ }
+
+ if (v.isObject()) {
+ JSObject& obj = v.toObject();
+ if (obj.is<WasmGcObject>()) {
+ vp.set(AnyRef::fromJSObject(&obj.as<WasmGcObject>()));
+ return true;
+ }
+ }
+
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_EQREF_VALUE);
+ return false;
+}
+
+bool wasm::CheckStructRefValue(JSContext* cx, HandleValue v,
+ MutableHandleAnyRef vp) {
+ if (v.isNull()) {
+ vp.set(AnyRef::null());
+ return true;
+ }
+
+ if (v.isObject()) {
+ JSObject& obj = v.toObject();
+ if (obj.is<WasmStructObject>()) {
+ vp.set(AnyRef::fromJSObject(&obj.as<WasmStructObject>()));
+ return true;
+ }
+ }
+
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_STRUCTREF_VALUE);
+ return false;
+}
+
+bool wasm::CheckArrayRefValue(JSContext* cx, HandleValue v,
+ MutableHandleAnyRef vp) {
+ if (v.isNull()) {
+ vp.set(AnyRef::null());
+ return true;
+ }
+
+ if (v.isObject()) {
+ JSObject& obj = v.toObject();
+ if (obj.is<WasmArrayObject>()) {
+ vp.set(AnyRef::fromJSObject(&obj.as<WasmArrayObject>()));
+ return true;
+ }
+ }
+
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_ARRAYREF_VALUE);
+ return false;
+}
+
+bool wasm::CheckTypeRefValue(JSContext* cx, const TypeDef* typeDef,
+ HandleValue v, MutableHandleAnyRef vp) {
+ if (v.isNull()) {
+ vp.set(AnyRef::null());
+ return true;
+ }
+
+ if (v.isObject()) {
+ JSObject& obj = v.toObject();
+ if (obj.is<WasmGcObject>() &&
+ obj.as<WasmGcObject>().isRuntimeSubtypeOf(typeDef)) {
+ vp.set(AnyRef::fromJSObject(&obj.as<WasmGcObject>()));
+ return true;
+ }
+ }
+
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_TYPEREF_VALUE);
+ return false;
+}
+
+class wasm::NoDebug {
+ public:
+ template <typename T>
+ static void print(T v) {}
+};
+
+class wasm::DebugCodegenVal {
+ template <typename T>
+ static void print(const char* fmt, T v) {
+ DebugCodegen(DebugChannel::Function, fmt, v);
+ }
+
+ public:
+ static void print(int32_t v) { print(" i32(%d)", v); }
+ static void print(int64_t v) { print(" i64(%" PRId64 ")", v); }
+ static void print(float v) { print(" f32(%f)", v); }
+ static void print(double v) { print(" f64(%lf)", v); }
+ static void print(void* v) { print(" ptr(%p)", v); }
+};
+
+template bool wasm::ToWebAssemblyValue<NoDebug>(JSContext* cx, HandleValue val,
+ FieldType type, void* loc,
+ bool mustWrite64,
+ CoercionLevel level);
+template bool wasm::ToWebAssemblyValue<DebugCodegenVal>(
+ JSContext* cx, HandleValue val, FieldType type, void* loc, bool mustWrite64,
+ CoercionLevel level);
+template bool wasm::ToJSValue<NoDebug>(JSContext* cx, const void* src,
+ FieldType type, MutableHandleValue dst,
+ CoercionLevel level);
+template bool wasm::ToJSValue<DebugCodegenVal>(JSContext* cx, const void* src,
+ FieldType type,
+ MutableHandleValue dst,
+ CoercionLevel level);
+template bool wasm::ToJSValueMayGC<NoDebug>(FieldType type);
+template bool wasm::ToJSValueMayGC<DebugCodegenVal>(FieldType type);
+
+template bool wasm::ToWebAssemblyValue<NoDebug>(JSContext* cx, HandleValue val,
+ ValType type, void* loc,
+ bool mustWrite64,
+ CoercionLevel level);
+template bool wasm::ToWebAssemblyValue<DebugCodegenVal>(JSContext* cx,
+ HandleValue val,
+ ValType type, void* loc,
+ bool mustWrite64,
+ CoercionLevel level);
+template bool wasm::ToJSValue<NoDebug>(JSContext* cx, const void* src,
+ ValType type, MutableHandleValue dst,
+ CoercionLevel level);
+template bool wasm::ToJSValue<DebugCodegenVal>(JSContext* cx, const void* src,
+ ValType type,
+ MutableHandleValue dst,
+ CoercionLevel level);
+template bool wasm::ToJSValueMayGC<NoDebug>(ValType type);
+template bool wasm::ToJSValueMayGC<DebugCodegenVal>(ValType type);
+
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_i8(JSContext* cx, HandleValue val, int8_t* loc) {
+ bool ok = ToInt8(cx, val, loc);
+ Debug::print(*loc);
+ return ok;
+}
+
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_i16(JSContext* cx, HandleValue val, int16_t* loc) {
+ bool ok = ToInt16(cx, val, loc);
+ Debug::print(*loc);
+ return ok;
+}
+
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_i32(JSContext* cx, HandleValue val, int32_t* loc,
+ bool mustWrite64) {
+ bool ok = ToInt32(cx, val, loc);
+ if (ok && mustWrite64) {
+#if defined(JS_CODEGEN_MIPS64)
+ loc[1] = loc[0] >> 31;
+#else
+ loc[1] = 0;
+#endif
+ }
+ Debug::print(*loc);
+ return ok;
+}
+
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_i64(JSContext* cx, HandleValue val, int64_t* loc,
+ bool mustWrite64) {
+ MOZ_ASSERT(mustWrite64);
+ JS_TRY_VAR_OR_RETURN_FALSE(cx, *loc, ToBigInt64(cx, val));
+ Debug::print(*loc);
+ return true;
+}
+
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_f32(JSContext* cx, HandleValue val, float* loc,
+ bool mustWrite64) {
+ bool ok = RoundFloat32(cx, val, loc);
+ if (ok && mustWrite64) {
+ loc[1] = 0.0;
+ }
+ Debug::print(*loc);
+ return ok;
+}
+
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_f64(JSContext* cx, HandleValue val, double* loc,
+ bool mustWrite64) {
+ MOZ_ASSERT(mustWrite64);
+ bool ok = ToNumber(cx, val, loc);
+ Debug::print(*loc);
+ return ok;
+}
+
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_externref(JSContext* cx, HandleValue val, void** loc,
+ bool mustWrite64) {
+ RootedAnyRef result(cx, AnyRef::null());
+ if (!BoxAnyRef(cx, val, &result)) {
+ return false;
+ }
+ loc[0] = result.get().forCompiledCode();
+#ifndef JS_64BIT
+ if (mustWrite64) {
+ loc[1] = nullptr;
+ }
+#endif
+ Debug::print(*loc);
+ return true;
+}
+
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_nullexternref(JSContext* cx, HandleValue val,
+ void** loc, bool mustWrite64) {
+ RootedAnyRef result(cx, AnyRef::null());
+ if (!CheckNullExternRefValue(cx, val, &result)) {
+ return false;
+ }
+ loc[0] = result.get().forCompiledCode();
+#ifndef JS_64BIT
+ if (mustWrite64) {
+ loc[1] = nullptr;
+ }
+#endif
+ Debug::print(*loc);
+ return true;
+}
+
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_funcref(JSContext* cx, HandleValue val, void** loc,
+ bool mustWrite64) {
+ RootedFunction fun(cx);
+ if (!CheckFuncRefValue(cx, val, &fun)) {
+ return false;
+ }
+ loc[0] = fun;
+#ifndef JS_64BIT
+ if (mustWrite64) {
+ loc[1] = nullptr;
+ }
+#endif
+ Debug::print(*loc);
+ return true;
+}
+
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_nullfuncref(JSContext* cx, HandleValue val, void** loc,
+ bool mustWrite64) {
+ RootedFunction fun(cx);
+ if (!CheckNullFuncRefValue(cx, val, &fun)) {
+ return false;
+ }
+ loc[0] = fun;
+#ifndef JS_64BIT
+ if (mustWrite64) {
+ loc[1] = nullptr;
+ }
+#endif
+ Debug::print(*loc);
+ return true;
+}
+
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_anyref(JSContext* cx, HandleValue val, void** loc,
+ bool mustWrite64) {
+ RootedAnyRef result(cx, AnyRef::null());
+ if (!CheckAnyRefValue(cx, val, &result)) {
+ return false;
+ }
+ loc[0] = result.get().forCompiledCode();
+#ifndef JS_64BIT
+ if (mustWrite64) {
+ loc[1] = nullptr;
+ }
+#endif
+ Debug::print(*loc);
+ return true;
+}
+
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_nullref(JSContext* cx, HandleValue val, void** loc,
+ bool mustWrite64) {
+ RootedAnyRef result(cx, AnyRef::null());
+ if (!CheckNullRefValue(cx, val, &result)) {
+ return false;
+ }
+ loc[0] = result.get().forCompiledCode();
+#ifndef JS_64BIT
+ if (mustWrite64) {
+ loc[1] = nullptr;
+ }
+#endif
+ Debug::print(*loc);
+ return true;
+}
+
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_eqref(JSContext* cx, HandleValue val, void** loc,
+ bool mustWrite64) {
+ return ToWebAssemblyValue_anyref(cx, val, loc, mustWrite64);
+}
+
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_structref(JSContext* cx, HandleValue val, void** loc,
+ bool mustWrite64) {
+ RootedAnyRef result(cx, AnyRef::null());
+ if (!CheckStructRefValue(cx, val, &result)) {
+ return false;
+ }
+ loc[0] = result.get().forCompiledCode();
+#ifndef JS_64BIT
+ if (mustWrite64) {
+ loc[1] = nullptr;
+ }
+#endif
+ Debug::print(*loc);
+ return true;
+}
+
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_arrayref(JSContext* cx, HandleValue val, void** loc,
+ bool mustWrite64) {
+ RootedAnyRef result(cx, AnyRef::null());
+ if (!CheckArrayRefValue(cx, val, &result)) {
+ return false;
+ }
+ loc[0] = result.get().forCompiledCode();
+#ifndef JS_64BIT
+ if (mustWrite64) {
+ loc[1] = nullptr;
+ }
+#endif
+ Debug::print(*loc);
+ return true;
+}
+
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_typeref(JSContext* cx, const TypeDef* typeDef,
+ HandleValue val, void** loc, bool mustWrite64) {
+ RootedAnyRef result(cx, AnyRef::null());
+ if (!CheckTypeRefValue(cx, typeDef, val, &result)) {
+ return false;
+ }
+ loc[0] = result.get().forCompiledCode();
+#ifndef JS_64BIT
+ if (mustWrite64) {
+ loc[1] = nullptr;
+ }
+#endif
+ Debug::print(*loc);
+ return true;
+}
+
+bool ToWebAssemblyValue_lossless(JSContext* cx, HandleValue val, ValType type,
+ void* loc, bool mustWrite64) {
+ if (!val.isObject() || !val.toObject().is<WasmGlobalObject>()) {
+ return false;
+ }
+ Rooted<WasmGlobalObject*> srcVal(cx, &val.toObject().as<WasmGlobalObject>());
+
+ if (srcVal->type() != type) {
+ return false;
+ }
+
+ srcVal->val().get().writeToRootedLocation(loc, mustWrite64);
+ return true;
+}
+
+template <typename Debug>
+bool wasm::ToWebAssemblyValue(JSContext* cx, HandleValue val, FieldType type,
+ void* loc, bool mustWrite64,
+ CoercionLevel level) {
+ if (level == CoercionLevel::Lossless &&
+ ToWebAssemblyValue_lossless(cx, val, type.valType(), (void*)loc,
+ mustWrite64)) {
+ return true;
+ }
+
+ switch (type.kind()) {
+ case FieldType::I8:
+ return ToWebAssemblyValue_i8<Debug>(cx, val, (int8_t*)loc);
+ case FieldType::I16:
+ return ToWebAssemblyValue_i16<Debug>(cx, val, (int16_t*)loc);
+ case FieldType::I32:
+ return ToWebAssemblyValue_i32<Debug>(cx, val, (int32_t*)loc, mustWrite64);
+ case FieldType::I64:
+ return ToWebAssemblyValue_i64<Debug>(cx, val, (int64_t*)loc, mustWrite64);
+ case FieldType::F32:
+ return ToWebAssemblyValue_f32<Debug>(cx, val, (float*)loc, mustWrite64);
+ case FieldType::F64:
+ return ToWebAssemblyValue_f64<Debug>(cx, val, (double*)loc, mustWrite64);
+ case FieldType::V128:
+ break;
+ case FieldType::Ref:
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ if (!type.isNullable() && val.isNull()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_REF_NONNULLABLE_VALUE);
+ return false;
+ }
+#else
+ MOZ_ASSERT(type.isNullable());
+#endif
+ switch (type.refTypeKind()) {
+ case RefType::Func:
+ return ToWebAssemblyValue_funcref<Debug>(cx, val, (void**)loc,
+ mustWrite64);
+ case RefType::Extern:
+ return ToWebAssemblyValue_externref<Debug>(cx, val, (void**)loc,
+ mustWrite64);
+ case RefType::Any:
+ return ToWebAssemblyValue_anyref<Debug>(cx, val, (void**)loc,
+ mustWrite64);
+ case RefType::NoFunc:
+ return ToWebAssemblyValue_nullfuncref<Debug>(cx, val, (void**)loc,
+ mustWrite64);
+ case RefType::NoExtern:
+ return ToWebAssemblyValue_nullexternref<Debug>(cx, val, (void**)loc,
+ mustWrite64);
+ case RefType::None:
+ return ToWebAssemblyValue_nullref<Debug>(cx, val, (void**)loc,
+ mustWrite64);
+ case RefType::Eq:
+ return ToWebAssemblyValue_eqref<Debug>(cx, val, (void**)loc,
+ mustWrite64);
+ case RefType::Struct:
+ return ToWebAssemblyValue_structref<Debug>(cx, val, (void**)loc,
+ mustWrite64);
+ case RefType::Array:
+ return ToWebAssemblyValue_arrayref<Debug>(cx, val, (void**)loc,
+ mustWrite64);
+ case RefType::TypeRef:
+ return ToWebAssemblyValue_typeref<Debug>(cx, type.typeDef(), val,
+ (void**)loc, mustWrite64);
+ }
+ }
+
+ MOZ_ASSERT(!type.isExposable());
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_VAL_TYPE);
+ return false;
+}
+
+template <typename Debug>
+bool wasm::ToWebAssemblyValue(JSContext* cx, HandleValue val, ValType type,
+ void* loc, bool mustWrite64,
+ CoercionLevel level) {
+ return wasm::ToWebAssemblyValue(cx, val, FieldType(type.packed()), loc,
+ mustWrite64, level);
+}
+
+template <typename Debug = NoDebug>
+bool ToJSValue_i8(JSContext* cx, int8_t src, MutableHandleValue dst) {
+ dst.set(Int32Value(src));
+ Debug::print(src);
+ return true;
+}
+
+template <typename Debug = NoDebug>
+bool ToJSValue_i16(JSContext* cx, int16_t src, MutableHandleValue dst) {
+ dst.set(Int32Value(src));
+ Debug::print(src);
+ return true;
+}
+
+template <typename Debug = NoDebug>
+bool ToJSValue_i32(JSContext* cx, int32_t src, MutableHandleValue dst) {
+ dst.set(Int32Value(src));
+ Debug::print(src);
+ return true;
+}
+
+template <typename Debug = NoDebug>
+bool ToJSValue_i64(JSContext* cx, int64_t src, MutableHandleValue dst) {
+ // If bi is manipulated other than test & storing, it would need
+ // to be rooted here.
+ BigInt* bi = BigInt::createFromInt64(cx, src);
+ if (!bi) {
+ return false;
+ }
+ dst.set(BigIntValue(bi));
+ Debug::print(src);
+ return true;
+}
+
+template <typename Debug = NoDebug>
+bool ToJSValue_f32(JSContext* cx, float src, MutableHandleValue dst) {
+ dst.set(JS::CanonicalizedDoubleValue(src));
+ Debug::print(src);
+ return true;
+}
+
+template <typename Debug = NoDebug>
+bool ToJSValue_f64(JSContext* cx, double src, MutableHandleValue dst) {
+ dst.set(JS::CanonicalizedDoubleValue(src));
+ Debug::print(src);
+ return true;
+}
+
+template <typename Debug = NoDebug>
+bool ToJSValue_funcref(JSContext* cx, void* src, MutableHandleValue dst) {
+ dst.set(UnboxFuncRef(FuncRef::fromCompiledCode(src)));
+ Debug::print(src);
+ return true;
+}
+
+template <typename Debug = NoDebug>
+bool ToJSValue_externref(JSContext* cx, void* src, MutableHandleValue dst) {
+ dst.set(UnboxAnyRef(AnyRef::fromCompiledCode(src)));
+ Debug::print(src);
+ return true;
+}
+
+template <typename Debug = NoDebug>
+bool ToJSValue_anyref(JSContext* cx, void* src, MutableHandleValue dst) {
+ dst.set(UnboxAnyRef(AnyRef::fromCompiledCode(src)));
+ Debug::print(src);
+ return true;
+}
+
+template <typename Debug = NoDebug>
+bool ToJSValue_lossless(JSContext* cx, const void* src, MutableHandleValue dst,
+ ValType type) {
+ RootedVal srcVal(cx);
+ srcVal.get().initFromRootedLocation(type, src);
+ RootedObject prototype(
+ cx, GlobalObject::getOrCreatePrototype(cx, JSProto_WasmGlobal));
+ Rooted<WasmGlobalObject*> srcGlobal(
+ cx, WasmGlobalObject::create(cx, srcVal, false, prototype));
+ dst.set(ObjectValue(*srcGlobal.get()));
+ return true;
+}
+
+template <typename Debug>
+bool wasm::ToJSValue(JSContext* cx, const void* src, FieldType type,
+ MutableHandleValue dst, CoercionLevel level) {
+ if (level == CoercionLevel::Lossless) {
+ MOZ_ASSERT(type.isValType());
+ return ToJSValue_lossless(cx, src, dst, type.valType());
+ }
+
+ switch (type.kind()) {
+ case FieldType::I8:
+ return ToJSValue_i8<Debug>(cx, *reinterpret_cast<const int8_t*>(src),
+ dst);
+ case FieldType::I16:
+ return ToJSValue_i16<Debug>(cx, *reinterpret_cast<const int16_t*>(src),
+ dst);
+ case FieldType::I32:
+ return ToJSValue_i32<Debug>(cx, *reinterpret_cast<const int32_t*>(src),
+ dst);
+ case FieldType::I64:
+ return ToJSValue_i64<Debug>(cx, *reinterpret_cast<const int64_t*>(src),
+ dst);
+ case FieldType::F32:
+ return ToJSValue_f32<Debug>(cx, *reinterpret_cast<const float*>(src),
+ dst);
+ case FieldType::F64:
+ return ToJSValue_f64<Debug>(cx, *reinterpret_cast<const double*>(src),
+ dst);
+ case FieldType::V128:
+ break;
+ case FieldType::Ref:
+ switch (type.refType().hierarchy()) {
+ case RefTypeHierarchy::Func:
+ return ToJSValue_funcref<Debug>(
+ cx, *reinterpret_cast<void* const*>(src), dst);
+ case RefTypeHierarchy::Extern:
+ return ToJSValue_externref<Debug>(
+ cx, *reinterpret_cast<void* const*>(src), dst);
+ case RefTypeHierarchy::Any:
+ return ToJSValue_anyref<Debug>(
+ cx, *reinterpret_cast<void* const*>(src), dst);
+ break;
+ }
+ }
+ MOZ_ASSERT(!type.isExposable());
+ Debug::print(nullptr);
+ dst.setUndefined();
+ return true;
+}
+
+template <typename Debug>
+bool wasm::ToJSValueMayGC(FieldType type) {
+ return type.kind() == FieldType::I64;
+}
+
+template <typename Debug>
+bool wasm::ToJSValue(JSContext* cx, const void* src, ValType type,
+ MutableHandleValue dst, CoercionLevel level) {
+ return wasm::ToJSValue(cx, src, FieldType(type.packed()), dst, level);
+}
+
+template <typename Debug>
+bool wasm::ToJSValueMayGC(ValType type) {
+ return wasm::ToJSValueMayGC(FieldType(type.packed()));
+}
+
+void AnyRef::trace(JSTracer* trc) {
+ if (value_) {
+ TraceManuallyBarrieredEdge(trc, &value_, "wasm anyref referent");
+ }
+}
+
+const JSClass WasmValueBox::class_ = {
+ "WasmValueBox", JSCLASS_HAS_RESERVED_SLOTS(RESERVED_SLOTS)};
+
+WasmValueBox* WasmValueBox::create(JSContext* cx, HandleValue val) {
+ WasmValueBox* obj = NewObjectWithGivenProto<WasmValueBox>(cx, nullptr);
+ if (!obj) {
+ return nullptr;
+ }
+ obj->setFixedSlot(VALUE_SLOT, val);
+ return obj;
+}
+
+bool wasm::BoxAnyRef(JSContext* cx, HandleValue val,
+ MutableHandleAnyRef result) {
+ if (val.isNull()) {
+ result.set(AnyRef::null());
+ return true;
+ }
+
+ if (val.isObject()) {
+ JSObject* obj = &val.toObject();
+ MOZ_ASSERT(!obj->is<WasmValueBox>());
+ MOZ_ASSERT(obj->compartment() == cx->compartment());
+ result.set(AnyRef::fromJSObject(obj));
+ return true;
+ }
+
+ WasmValueBox* box = WasmValueBox::create(cx, val);
+ if (!box) return false;
+ result.set(AnyRef::fromJSObject(box));
+ return true;
+}
+
+JSObject* wasm::BoxBoxableValue(JSContext* cx, HandleValue val) {
+ MOZ_ASSERT(!val.isNull() && !val.isObject());
+ return WasmValueBox::create(cx, val);
+}
+
+Value wasm::UnboxAnyRef(AnyRef val) {
+ // If UnboxAnyRef needs to allocate then we need a more complicated API, and
+ // we need to root the value in the callers, see comments in callExport().
+ JSObject* obj = val.asJSObject();
+ Value result;
+ if (obj == nullptr) {
+ result.setNull();
+ } else if (obj->is<WasmValueBox>()) {
+ result = obj->as<WasmValueBox>().value();
+ } else {
+ result.setObjectOrNull(obj);
+ }
+ return result;
+}
+
+/* static */
+wasm::FuncRef wasm::FuncRef::fromAnyRefUnchecked(AnyRef p) {
+#ifdef DEBUG
+ Value v = UnboxAnyRef(p);
+ if (v.isNull()) {
+ return FuncRef(nullptr);
+ }
+ if (v.toObject().is<JSFunction>()) {
+ return FuncRef(&v.toObject().as<JSFunction>());
+ }
+ MOZ_CRASH("Bad value");
+#else
+ return FuncRef(&p.asJSObject()->as<JSFunction>());
+#endif
+}
+
+void wasm::FuncRef::trace(JSTracer* trc) const {
+ if (value_) {
+ TraceManuallyBarrieredEdge(trc, &value_, "wasm funcref referent");
+ }
+}
+
+Value wasm::UnboxFuncRef(FuncRef val) {
+ JSFunction* fn = val.asJSFunction();
+ Value result;
+ MOZ_ASSERT_IF(fn, fn->is<JSFunction>());
+ result.setObjectOrNull(fn);
+ return result;
+}
diff --git a/js/src/wasm/WasmValue.h b/js/src/wasm/WasmValue.h
new file mode 100644
index 0000000000..26ff65e327
--- /dev/null
+++ b/js/src/wasm/WasmValue.h
@@ -0,0 +1,652 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2021 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_val_h
+#define wasm_val_h
+
+#include "js/Class.h" // JSClassOps, ClassSpec
+#include "vm/JSObject.h"
+#include "vm/NativeObject.h" // NativeObject
+#include "wasm/WasmSerialize.h"
+#include "wasm/WasmTypeDef.h"
+
+namespace js {
+namespace wasm {
+
+// A V128 value.
+
+struct V128 {
+ uint8_t bytes[16] = {}; // Little-endian
+
+ WASM_CHECK_CACHEABLE_POD(bytes);
+
+ V128() = default;
+
+ explicit V128(uint8_t splatValue) {
+ memset(bytes, int(splatValue), sizeof(bytes));
+ }
+
+ template <typename T>
+ void extractLane(unsigned lane, T* result) const {
+ MOZ_ASSERT(lane < 16 / sizeof(T));
+ memcpy(result, bytes + sizeof(T) * lane, sizeof(T));
+ }
+
+ template <typename T>
+ void insertLane(unsigned lane, T value) {
+ MOZ_ASSERT(lane < 16 / sizeof(T));
+ memcpy(bytes + sizeof(T) * lane, &value, sizeof(T));
+ }
+
+ bool operator==(const V128& rhs) const {
+ for (size_t i = 0; i < sizeof(bytes); i++) {
+ if (bytes[i] != rhs.bytes[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool operator!=(const V128& rhs) const { return !(*this == rhs); }
+};
+
+WASM_DECLARE_CACHEABLE_POD(V128);
+
+static_assert(sizeof(V128) == 16, "Invariant");
+
+// An AnyRef is a boxed value that can represent any wasm reference type and any
+// host type that the host system allows to flow into and out of wasm
+// transparently. It is a pointer-sized datum that has the same representation
+// as all its subtypes (funcref, externref, eqref, (ref T), et al) due to the
+// non-coercive subtyping of the wasm type system. Its current representation
+// is a plain JSObject*, and the private JSObject subtype WasmValueBox is used
+// to box non-object non-null JS values.
+//
+// The C++/wasm boundary always uses a 'void*' type to express AnyRef values, to
+// emphasize the pointer-ness of the value. The C++ code must transform the
+// void* into an AnyRef by calling AnyRef::fromCompiledCode(), and transform an
+// AnyRef into a void* by calling AnyRef::toCompiledCode(). Once in C++, we use
+// AnyRef everywhere. A JS Value is transformed into an AnyRef by calling
+// AnyRef::box(), and the AnyRef is transformed into a JS Value by calling
+// AnyRef::unbox().
+//
+// NOTE that AnyRef values may point to GC'd storage and as such need to be
+// rooted if they are kept live in boxed form across code that may cause GC!
+// Use RootedAnyRef / HandleAnyRef / MutableHandleAnyRef where necessary.
+//
+// The lowest bits of the pointer value are used for tagging, to allow for some
+// representation optimizations and to distinguish various types.
+
+// For version 0, we simply equate AnyRef and JSObject* (this means that there
+// are technically no tags at all yet). We use a simple boxing scheme that
+// wraps a JS value that is not already JSObject in a distinguishable JSObject
+// that holds the value, see WasmTypes.cpp for details. Knowledge of this
+// mapping is embedded in CodeGenerator.cpp (in WasmBoxValue and
+// WasmAnyRefFromJSObject) and in WasmStubs.cpp (in functions Box* and Unbox*).
+
+class AnyRef {
+ // mutable so that tracing may access a JSObject* from a `const Val` or
+ // `const AnyRef`.
+ mutable JSObject* value_;
+
+ explicit AnyRef() : value_((JSObject*)-1) {}
+ explicit AnyRef(JSObject* p) : value_(p) {
+ MOZ_ASSERT(((uintptr_t)p & 0x03) == 0);
+ }
+
+ public:
+ // An invalid AnyRef cannot arise naturally from wasm and so can be used as
+ // a sentinel value to indicate failure from an AnyRef-returning function.
+ static AnyRef invalid() { return AnyRef(); }
+
+ // Given a void* that comes from compiled wasm code, turn it into AnyRef.
+ static AnyRef fromCompiledCode(void* p) { return AnyRef((JSObject*)p); }
+
+ // Given a JSObject* that comes from JS, turn it into AnyRef.
+ static AnyRef fromJSObject(JSObject* p) { return AnyRef(p); }
+
+ // Generate an AnyRef null pointer.
+ static AnyRef null() { return AnyRef(nullptr); }
+
+ bool isNull() const { return value_ == nullptr; }
+
+ bool operator==(const AnyRef& rhs) const {
+ return this->value_ == rhs.value_;
+ }
+
+ bool operator!=(const AnyRef& rhs) const { return !(*this == rhs); }
+
+ void* forCompiledCode() const { return value_; }
+
+ JSObject* asJSObject() const { return value_; }
+
+ JSObject** asJSObjectAddress() const { return &value_; }
+
+ void trace(JSTracer* trc);
+
+ // Tags (to be developed further)
+ static constexpr uintptr_t AnyRefTagMask = 1;
+ static constexpr uintptr_t AnyRefObjTag = 0;
+};
+
+using RootedAnyRef = Rooted<AnyRef>;
+using HandleAnyRef = Handle<AnyRef>;
+using MutableHandleAnyRef = MutableHandle<AnyRef>;
+
+// TODO/AnyRef-boxing: With boxed immediates and strings, these will be defined
+// as MOZ_CRASH or similar so that we can find all locations that need to be
+// fixed.
+
+#define ASSERT_ANYREF_IS_JSOBJECT (void)(0)
+#define STATIC_ASSERT_ANYREF_IS_JSOBJECT static_assert(1, "AnyRef is JSObject")
+
+// Given any JS value, box it as an AnyRef and store it in *result. Returns
+// false on OOM.
+
+bool BoxAnyRef(JSContext* cx, HandleValue val, MutableHandleAnyRef result);
+
+// Given a JS value that requires an object box, box it as an AnyRef and return
+// it, returning nullptr on OOM.
+//
+// Currently the values requiring a box are those other than JSObject* or
+// nullptr, but in the future more values will be represented without an
+// allocation.
+JSObject* BoxBoxableValue(JSContext* cx, HandleValue val);
+
+// Given any AnyRef, unbox it as a JS Value. If it is a reference to a wasm
+// object it will be reflected as a JSObject* representing some TypedObject
+// instance.
+
+Value UnboxAnyRef(AnyRef val);
+
+class WasmValueBox : public NativeObject {
+ static const unsigned VALUE_SLOT = 0;
+
+ public:
+ static const unsigned RESERVED_SLOTS = 1;
+ static const JSClass class_;
+
+ static WasmValueBox* create(JSContext* cx, HandleValue val);
+ Value value() const { return getFixedSlot(VALUE_SLOT); }
+ static size_t offsetOfValue() {
+ return NativeObject::getFixedSlotOffset(VALUE_SLOT);
+ }
+};
+
+// A FuncRef is a JSFunction* and is hence also an AnyRef, and the remarks above
+// about AnyRef apply also to FuncRef. When 'funcref' is used as a value type
+// in wasm code, the value that is held is "the canonical function value", which
+// is a function for which IsWasmExportedFunction() is true, and which has the
+// correct identity wrt reference equality of functions. Notably, if a function
+// is imported then its ref.func value compares === in JS to the function that
+// was passed as an import when the instance was created.
+//
+// These rules ensure that casts from funcref to anyref are non-converting
+// (generate no code), and that no wrapping or unwrapping needs to happen when a
+// funcref or anyref flows across the JS/wasm boundary, and that functions have
+// the necessary identity when observed from JS, and in the future, from wasm.
+//
+// Functions stored in tables, whether wasm tables or internal tables, can be
+// stored in a form that optimizes for eg call speed, however.
+//
+// Reading a funcref from a funcref table, writing a funcref to a funcref table,
+// and generating the value for a ref.func instruction are therefore nontrivial
+// operations that require mapping between the canonical JSFunction and the
+// optimized table representation. Once we get an instruction to call a
+// ref.func directly it too will require such a mapping.
+
+// In many cases, a FuncRef is exactly the same as AnyRef and we can use AnyRef
+// functionality on funcref values. The FuncRef class exists mostly to add more
+// checks and to make it clear, when we need to, that we're manipulating funcref
+// values. FuncRef does not currently subclass AnyRef because there's been no
+// need to, but it probably could.
+
+class FuncRef {
+ // mutable so that tracing may access a JSFunction* from a `const FuncRef`
+ mutable JSFunction* value_;
+
+ explicit FuncRef() : value_((JSFunction*)-1) {}
+ explicit FuncRef(JSFunction* p) : value_(p) {
+ MOZ_ASSERT(((uintptr_t)p & 0x03) == 0);
+ }
+
+ public:
+ // Given a void* that comes from compiled wasm code, turn it into FuncRef.
+ static FuncRef fromCompiledCode(void* p) { return FuncRef((JSFunction*)p); }
+
+ // Given a JSFunction* that comes from JS, turn it into FuncRef.
+ static FuncRef fromJSFunction(JSFunction* p) { return FuncRef(p); }
+
+ // Given an AnyRef that represents a possibly-null funcref, turn it into a
+ // FuncRef.
+ static FuncRef fromAnyRefUnchecked(AnyRef p);
+
+ AnyRef asAnyRef() { return AnyRef::fromJSObject((JSObject*)value_); }
+
+ void* forCompiledCode() const { return value_; }
+
+ JSFunction* asJSFunction() { return value_; }
+
+ bool isNull() const { return value_ == nullptr; }
+
+ void trace(JSTracer* trc) const;
+};
+
+using RootedFuncRef = Rooted<FuncRef>;
+using HandleFuncRef = Handle<FuncRef>;
+using MutableHandleFuncRef = MutableHandle<FuncRef>;
+
+// Given any FuncRef, unbox it as a JS Value -- always a JSFunction*.
+
+Value UnboxFuncRef(FuncRef val);
+
+// The LitVal class represents a single WebAssembly value of a given value
+// type, mostly for the purpose of numeric literals and initializers. A LitVal
+// does not directly map to a JS value since there is not (currently) a precise
+// representation of i64 values. A LitVal may contain non-canonical NaNs since,
+// within WebAssembly, floats are not canonicalized. Canonicalization must
+// happen at the JS boundary.
+
+class LitVal {
+ public:
+ union Cell {
+ uint32_t i32_;
+ uint64_t i64_;
+ float f32_;
+ double f64_;
+ wasm::V128 v128_;
+ wasm::AnyRef ref_;
+
+ Cell() : v128_() {}
+ ~Cell() = default;
+
+ WASM_CHECK_CACHEABLE_POD(i32_, i64_, f32_, f64_, v128_);
+ WASM_ALLOW_NON_CACHEABLE_POD_FIELD(
+ ref_,
+ "The pointer value in ref_ is guaranteed to always be null in a "
+ "LitVal.");
+ };
+
+ protected:
+ ValType type_;
+ Cell cell_;
+
+ public:
+ LitVal() : type_(ValType()), cell_{} {}
+
+ explicit LitVal(ValType type) : type_(type) {
+ switch (type.kind()) {
+ case ValType::Kind::I32: {
+ cell_.i32_ = 0;
+ break;
+ }
+ case ValType::Kind::I64: {
+ cell_.i64_ = 0;
+ break;
+ }
+ case ValType::Kind::F32: {
+ cell_.f32_ = 0;
+ break;
+ }
+ case ValType::Kind::F64: {
+ cell_.f64_ = 0;
+ break;
+ }
+ case ValType::Kind::V128: {
+ new (&cell_.v128_) V128();
+ break;
+ }
+ case ValType::Kind::Ref: {
+ cell_.ref_ = AnyRef::null();
+ break;
+ }
+ }
+ }
+
+ explicit LitVal(uint32_t i32) : type_(ValType::I32) { cell_.i32_ = i32; }
+ explicit LitVal(uint64_t i64) : type_(ValType::I64) { cell_.i64_ = i64; }
+
+ explicit LitVal(float f32) : type_(ValType::F32) { cell_.f32_ = f32; }
+ explicit LitVal(double f64) : type_(ValType::F64) { cell_.f64_ = f64; }
+
+ explicit LitVal(V128 v128) : type_(ValType::V128) { cell_.v128_ = v128; }
+
+ explicit LitVal(ValType type, AnyRef any) : type_(type) {
+ MOZ_ASSERT(type.isRefRepr());
+ MOZ_ASSERT(any.isNull(),
+ "use Val for non-nullptr ref types to get tracing");
+ cell_.ref_ = any;
+ }
+
+ ValType type() const { return type_; }
+ static constexpr size_t sizeofLargestValue() { return sizeof(cell_); }
+
+ Cell& cell() { return cell_; }
+ const Cell& cell() const { return cell_; }
+
+ uint32_t i32() const {
+ MOZ_ASSERT(type_ == ValType::I32);
+ return cell_.i32_;
+ }
+ uint64_t i64() const {
+ MOZ_ASSERT(type_ == ValType::I64);
+ return cell_.i64_;
+ }
+ const float& f32() const {
+ MOZ_ASSERT(type_ == ValType::F32);
+ return cell_.f32_;
+ }
+ const double& f64() const {
+ MOZ_ASSERT(type_ == ValType::F64);
+ return cell_.f64_;
+ }
+ AnyRef ref() const {
+ MOZ_ASSERT(type_.isRefRepr());
+ return cell_.ref_;
+ }
+ const V128& v128() const {
+ MOZ_ASSERT(type_ == ValType::V128);
+ return cell_.v128_;
+ }
+
+ WASM_DECLARE_FRIEND_SERIALIZE(LitVal);
+};
+
+WASM_DECLARE_CACHEABLE_POD(LitVal::Cell);
+
+// A Val is a LitVal that can contain (non-null) pointers to GC things. All Vals
+// must be used with the rooting APIs as they may contain JS objects.
+
+class MOZ_NON_PARAM Val : public LitVal {
+ public:
+ Val() : LitVal() {}
+ explicit Val(ValType type) : LitVal(type) {}
+ explicit Val(const LitVal& val);
+ explicit Val(uint32_t i32) : LitVal(i32) {}
+ explicit Val(uint64_t i64) : LitVal(i64) {}
+ explicit Val(float f32) : LitVal(f32) {}
+ explicit Val(double f64) : LitVal(f64) {}
+ explicit Val(V128 v128) : LitVal(v128) {}
+ explicit Val(ValType type, AnyRef val) : LitVal(type, AnyRef::null()) {
+ MOZ_ASSERT(type.isRefRepr());
+ cell_.ref_ = val;
+ }
+ explicit Val(ValType type, FuncRef val) : LitVal(type, AnyRef::null()) {
+ MOZ_ASSERT(type.refType().isFuncHierarchy());
+ cell_.ref_ = val.asAnyRef();
+ }
+
+ Val(const Val&) = default;
+ Val& operator=(const Val&) = default;
+
+ bool operator==(const Val& rhs) const {
+ if (type_ != rhs.type_) {
+ return false;
+ }
+ switch (type_.kind()) {
+ case ValType::I32:
+ return cell_.i32_ == rhs.cell_.i32_;
+ case ValType::I64:
+ return cell_.i64_ == rhs.cell_.i64_;
+ case ValType::F32:
+ return cell_.f32_ == rhs.cell_.f32_;
+ case ValType::F64:
+ return cell_.f64_ == rhs.cell_.f64_;
+ case ValType::V128:
+ return cell_.v128_ == rhs.cell_.v128_;
+ case ValType::Ref:
+ return cell_.ref_ == rhs.cell_.ref_;
+ }
+ MOZ_ASSERT_UNREACHABLE();
+ return false;
+ }
+ bool operator!=(const Val& rhs) const { return !(*this == rhs); }
+
+ bool isJSObject() const {
+ return type_.isValid() && type_.isRefRepr() && !cell_.ref_.isNull();
+ }
+
+ JSObject* asJSObject() const {
+ MOZ_ASSERT(isJSObject());
+ return cell_.ref_.asJSObject();
+ }
+
+ JSObject** asJSObjectAddress() const {
+ return cell_.ref_.asJSObjectAddress();
+ }
+
+ // Read from `loc` which is a rooted location and needs no barriers.
+ void readFromRootedLocation(const void* loc);
+
+ // Initialize from `loc` which is a rooted location and needs no barriers.
+ void initFromRootedLocation(ValType type, const void* loc);
+ void initFromHeapLocation(ValType type, const void* loc);
+
+ // Write to `loc` which is a rooted location and needs no barriers.
+ void writeToRootedLocation(void* loc, bool mustWrite64) const;
+
+ // Read from `loc` which is in the heap.
+ void readFromHeapLocation(const void* loc);
+ // Write to `loc` which is in the heap and must be barriered.
+ void writeToHeapLocation(void* loc) const;
+
+ // See the comment for `ToWebAssemblyValue` below.
+ static bool fromJSValue(JSContext* cx, ValType targetType, HandleValue val,
+ MutableHandle<Val> rval);
+ // See the comment for `ToJSValue` below.
+ bool toJSValue(JSContext* cx, MutableHandleValue rval) const;
+
+ void trace(JSTracer* trc) const;
+};
+
+using GCPtrVal = GCPtr<Val>;
+using RootedVal = Rooted<Val>;
+using HandleVal = Handle<Val>;
+using MutableHandleVal = MutableHandle<Val>;
+
+using ValVector = GCVector<Val, 0, SystemAllocPolicy>;
+using RootedValVector = Rooted<ValVector>;
+using HandleValVector = Handle<ValVector>;
+using MutableHandleValVector = MutableHandle<ValVector>;
+
+template <int N>
+using ValVectorN = GCVector<Val, N, SystemAllocPolicy>;
+template <int N>
+using RootedValVectorN = Rooted<ValVectorN<N>>;
+
+// Check a value against the given reference type. If the targetType
+// is RefType::Extern then the test always passes, but the value may be boxed.
+// If the test passes then the value is stored either in fnval (for
+// RefType::Func) or in refval (for other types); this split is not strictly
+// necessary but is convenient for the users of this function.
+//
+// This can return false if the type check fails, or if a boxing into AnyRef
+// throws an OOM.
+[[nodiscard]] extern bool CheckRefType(JSContext* cx, RefType targetType,
+ HandleValue v,
+ MutableHandleFunction fnval,
+ MutableHandleAnyRef refval);
+
+// The same as above for when the target type is 'funcref'.
+[[nodiscard]] extern bool CheckFuncRefValue(JSContext* cx, HandleValue v,
+ MutableHandleFunction fun);
+
+// The same as above for when the target type is 'anyref'.
+[[nodiscard]] extern bool CheckAnyRefValue(JSContext* cx, HandleValue v,
+ MutableHandleAnyRef vp);
+
+// The same as above for when the target type is 'nullexternref'.
+[[nodiscard]] extern bool CheckNullExternRefValue(JSContext* cx, HandleValue v,
+ MutableHandleAnyRef vp);
+
+// The same as above for when the target type is 'nullfuncref'.
+[[nodiscard]] extern bool CheckNullFuncRefValue(JSContext* cx, HandleValue v,
+ MutableHandleFunction fun);
+
+// The same as above for when the target type is 'nullref'.
+[[nodiscard]] extern bool CheckNullRefValue(JSContext* cx, HandleValue v,
+ MutableHandleAnyRef vp);
+
+// The same as above for when the target type is 'eqref'.
+[[nodiscard]] extern bool CheckEqRefValue(JSContext* cx, HandleValue v,
+ MutableHandleAnyRef vp);
+
+// The same as above for when the target type is 'structref'.
+[[nodiscard]] extern bool CheckStructRefValue(JSContext* cx, HandleValue v,
+ MutableHandleAnyRef vp);
+
+// The same as above for when the target type is 'arrayref'.
+[[nodiscard]] extern bool CheckArrayRefValue(JSContext* cx, HandleValue v,
+ MutableHandleAnyRef vp);
+
+// The same as above for when the target type is '(ref T)'.
+[[nodiscard]] extern bool CheckTypeRefValue(JSContext* cx,
+ const TypeDef* typeDef,
+ HandleValue v,
+ MutableHandleAnyRef vp);
+class NoDebug;
+class DebugCodegenVal;
+
+// The level of coercion to apply in `ToWebAssemblyValue` and `ToJSValue`.
+enum class CoercionLevel {
+ // The default coercions given by the JS-API specification.
+ Spec,
+ // Allow for the coercions given by `Spec` but also use WebAssembly.Global
+ // as a container for lossless conversions. This is only available through
+ // the wasmLosslessInvoke testing function and is used in tests.
+ Lossless,
+};
+
+// Coercion function from a JS value to a WebAssembly value [1].
+//
+// This function may fail for any of the following reasons:
+// * The input value has an incorrect type for the targetType
+// * The targetType is not exposable
+// * An OOM ocurred
+// An error will be set upon failure.
+//
+// [1] https://webassembly.github.io/spec/js-api/index.html#towebassemblyvalue
+template <typename Debug = NoDebug>
+extern bool ToWebAssemblyValue(JSContext* cx, HandleValue val, FieldType type,
+ void* loc, bool mustWrite64,
+ CoercionLevel level = CoercionLevel::Spec);
+template <typename Debug = NoDebug>
+extern bool ToWebAssemblyValue(JSContext* cx, HandleValue val, ValType type,
+ void* loc, bool mustWrite64,
+ CoercionLevel level = CoercionLevel::Spec);
+
+// Coercion function from a WebAssembly value to a JS value [1].
+//
+// This function will only fail if an OOM ocurred. If the type of WebAssembly
+// value being coerced is not exposable to JS, then it will be coerced to
+// 'undefined'. Callers are responsible for guarding against this if this is
+// not desirable.
+//
+// [1] https://webassembly.github.io/spec/js-api/index.html#tojsvalue
+template <typename Debug = NoDebug>
+extern bool ToJSValue(JSContext* cx, const void* src, FieldType type,
+ MutableHandleValue dst,
+ CoercionLevel level = CoercionLevel::Spec);
+template <typename Debug = NoDebug>
+extern bool ToJSValueMayGC(FieldType type);
+template <typename Debug = NoDebug>
+extern bool ToJSValue(JSContext* cx, const void* src, ValType type,
+ MutableHandleValue dst,
+ CoercionLevel level = CoercionLevel::Spec);
+template <typename Debug = NoDebug>
+extern bool ToJSValueMayGC(ValType type);
+} // namespace wasm
+
+template <>
+struct InternalBarrierMethods<wasm::Val> {
+ STATIC_ASSERT_ANYREF_IS_JSOBJECT;
+
+ static bool isMarkable(const wasm::Val& v) { return v.isJSObject(); }
+
+ static void preBarrier(const wasm::Val& v) {
+ if (v.isJSObject()) {
+ gc::PreWriteBarrier(v.asJSObject());
+ }
+ }
+
+ static MOZ_ALWAYS_INLINE void postBarrier(wasm::Val* vp,
+ const wasm::Val& prev,
+ const wasm::Val& next) {
+ MOZ_RELEASE_ASSERT(!prev.type().isValid() || prev.type() == next.type());
+ JSObject* prevObj = prev.isJSObject() ? prev.asJSObject() : nullptr;
+ JSObject* nextObj = next.isJSObject() ? next.asJSObject() : nullptr;
+ if (nextObj) {
+ JSObject::postWriteBarrier(vp->asJSObjectAddress(), prevObj, nextObj);
+ }
+ }
+
+ static void readBarrier(const wasm::Val& v) {
+ if (v.isJSObject()) {
+ gc::ReadBarrier(v.asJSObject());
+ }
+ }
+
+#ifdef DEBUG
+ static void assertThingIsNotGray(const wasm::Val& v) {
+ if (v.isJSObject()) {
+ JS::AssertObjectIsNotGray(v.asJSObject());
+ }
+ }
+#endif
+};
+
+template <>
+struct InternalBarrierMethods<wasm::AnyRef> {
+ STATIC_ASSERT_ANYREF_IS_JSOBJECT;
+
+ static bool isMarkable(const wasm::AnyRef v) { return !v.isNull(); }
+
+ static void preBarrier(const wasm::AnyRef v) {
+ if (!v.isNull()) {
+ gc::PreWriteBarrier(v.asJSObject());
+ }
+ }
+
+ static MOZ_ALWAYS_INLINE void postBarrier(wasm::AnyRef* vp,
+ const wasm::AnyRef prev,
+ const wasm::AnyRef next) {
+ JSObject* prevObj = !prev.isNull() ? prev.asJSObject() : nullptr;
+ JSObject* nextObj = !next.isNull() ? next.asJSObject() : nullptr;
+ if (nextObj) {
+ JSObject::postWriteBarrier(vp->asJSObjectAddress(), prevObj, nextObj);
+ }
+ }
+
+ static void readBarrier(const wasm::AnyRef v) {
+ if (!v.isNull()) {
+ gc::ReadBarrier(v.asJSObject());
+ }
+ }
+
+#ifdef DEBUG
+ static void assertThingIsNotGray(const wasm::AnyRef v) {
+ if (!v.isNull()) {
+ JS::AssertObjectIsNotGray(v.asJSObject());
+ }
+ }
+#endif
+};
+
+} // namespace js
+
+#endif // wasm_val_h
diff --git a/js/src/wasm/moz.build b/js/src/wasm/moz.build
new file mode 100644
index 0000000000..3899fbf41b
--- /dev/null
+++ b/js/src/wasm/moz.build
@@ -0,0 +1,66 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+FINAL_LIBRARY = "js"
+
+# Includes should be relative to parent path
+LOCAL_INCLUDES += ["!..", ".."]
+
+include("../js-config.mozbuild")
+include("../js-cxxflags.mozbuild")
+
+UNIFIED_SOURCES += [
+ "AsmJS.cpp",
+ "WasmBaselineCompile.cpp",
+ "WasmBCFrame.cpp",
+ "WasmBCMemory.cpp",
+ "WasmBinary.cpp",
+ "WasmBuiltins.cpp",
+ "WasmCode.cpp",
+ "WasmCodegenTypes.cpp",
+ "WasmCompile.cpp",
+ "WasmDebug.cpp",
+ "WasmDebugFrame.cpp",
+ "WasmFrameIter.cpp",
+ "WasmGC.cpp",
+ "WasmGcObject.cpp",
+ "WasmGenerator.cpp",
+ "WasmInitExpr.cpp",
+ "WasmInstance.cpp",
+ "WasmIntrinsic.cpp",
+ "WasmIonCompile.cpp",
+ "WasmJS.cpp",
+ "WasmLog.cpp",
+ "WasmMemory.cpp",
+ "WasmModule.cpp",
+ "WasmModuleTypes.cpp",
+ "WasmOpIter.cpp",
+ "WasmProcess.cpp",
+ "WasmRealm.cpp",
+ "WasmSerialize.cpp",
+ "WasmStubs.cpp",
+ "WasmTable.cpp",
+ "WasmTypeDef.cpp",
+ "WasmValidate.cpp",
+ "WasmValType.cpp",
+ "WasmValue.cpp",
+]
+
+# Generate wasm/WasmIntrinsicGenerated.h from wasm/WasmIntrinsic.yaml
+GeneratedFile(
+ "WasmIntrinsicGenerated.h",
+ script="GenerateIntrinsics.py",
+ inputs=["WasmIntrinsic.yaml"],
+)
+
+# We don't support signals for wasi yet.
+if CONFIG["OS_ARCH"] != "WASI":
+ UNIFIED_SOURCES += ["WasmSignalHandlers.cpp"]
+
+# Make sure all WebAssembly code is built with libfuzzer
+# coverage instrumentation in FUZZING mode.
+if CONFIG["FUZZING_INTERFACES"] and CONFIG["LIBFUZZER"]:
+ include("/tools/fuzzing/libfuzzer-config.mozbuild")