summaryrefslogtreecommitdiffstats
path: root/js/src/wasm
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--js/src/wasm/AsmJS.cpp7275
-rw-r--r--js/src/wasm/AsmJS.h112
-rw-r--r--js/src/wasm/TypedObject-inl.h23
-rw-r--r--js/src/wasm/TypedObject.cpp755
-rw-r--r--js/src/wasm/TypedObject.h290
-rw-r--r--js/src/wasm/WasmBaselineCompile.cpp15908
-rw-r--r--js/src/wasm/WasmBaselineCompile.h103
-rw-r--r--js/src/wasm/WasmBuiltins.cpp1576
-rw-r--r--js/src/wasm/WasmBuiltins.h120
-rw-r--r--js/src/wasm/WasmCode.cpp1510
-rw-r--r--js/src/wasm/WasmCode.h767
-rw-r--r--js/src/wasm/WasmCompile.cpp790
-rw-r--r--js/src/wasm/WasmCompile.h145
-rw-r--r--js/src/wasm/WasmConstants.h1008
-rw-r--r--js/src/wasm/WasmContext.cpp38
-rw-r--r--js/src/wasm/WasmContext.h62
-rw-r--r--js/src/wasm/WasmCraneliftCompile.cpp768
-rw-r--r--js/src/wasm/WasmCraneliftCompile.h57
-rw-r--r--js/src/wasm/WasmDebug.cpp496
-rw-r--r--js/src/wasm/WasmDebug.h158
-rw-r--r--js/src/wasm/WasmFrameIter.cpp1539
-rw-r--r--js/src/wasm/WasmFrameIter.h270
-rw-r--r--js/src/wasm/WasmGC.cpp261
-rw-r--r--js/src/wasm/WasmGC.h406
-rw-r--r--js/src/wasm/WasmGenerator.cpp1362
-rw-r--r--js/src/wasm/WasmGenerator.h270
-rw-r--r--js/src/wasm/WasmInstance.cpp2099
-rw-r--r--js/src/wasm/WasmInstance.h236
-rw-r--r--js/src/wasm/WasmIonCompile.cpp5593
-rw-r--r--js/src/wasm/WasmIonCompile.h43
-rw-r--r--js/src/wasm/WasmJS.cpp4485
-rw-r--r--js/src/wasm/WasmJS.h527
-rw-r--r--js/src/wasm/WasmModule.cpp1360
-rw-r--r--js/src/wasm/WasmModule.h257
-rw-r--r--js/src/wasm/WasmOpIter.cpp702
-rw-r--r--js/src/wasm/WasmOpIter.h2827
-rw-r--r--js/src/wasm/WasmProcess.cpp407
-rw-r--r--js/src/wasm/WasmProcess.h73
-rw-r--r--js/src/wasm/WasmRealm.cpp142
-rw-r--r--js/src/wasm/WasmRealm.h79
-rw-r--r--js/src/wasm/WasmSerialize.h198
-rw-r--r--js/src/wasm/WasmSignalHandlers.cpp1221
-rw-r--r--js/src/wasm/WasmSignalHandlers.h65
-rw-r--r--js/src/wasm/WasmStubs.cpp3037
-rw-r--r--js/src/wasm/WasmStubs.h364
-rw-r--r--js/src/wasm/WasmTable.cpp401
-rw-r--r--js/src/wasm/WasmTable.h126
-rw-r--r--js/src/wasm/WasmTypes.cpp1554
-rw-r--r--js/src/wasm/WasmTypes.h4000
-rw-r--r--js/src/wasm/WasmUtility.h23
-rw-r--r--js/src/wasm/WasmValidate.cpp3382
-rw-r--r--js/src/wasm/WasmValidate.h960
-rw-r--r--js/src/wasm/cranelift/Cargo.toml37
-rw-r--r--js/src/wasm/cranelift/baldrapi.h283
-rw-r--r--js/src/wasm/cranelift/build.rs101
-rw-r--r--js/src/wasm/cranelift/clifapi.h77
-rw-r--r--js/src/wasm/cranelift/rustfmt.toml0
-rw-r--r--js/src/wasm/cranelift/src/bindings/low_level.rs27
-rw-r--r--js/src/wasm/cranelift/src/bindings/mod.rs528
-rw-r--r--js/src/wasm/cranelift/src/compile.rs538
-rw-r--r--js/src/wasm/cranelift/src/isa.rs253
-rw-r--r--js/src/wasm/cranelift/src/lib.rs272
-rw-r--r--js/src/wasm/cranelift/src/utils.rs55
-rw-r--r--js/src/wasm/cranelift/src/wasm2clif.rs1433
-rw-r--r--js/src/wasm/moz.build49
65 files changed, 73883 insertions, 0 deletions
diff --git a/js/src/wasm/AsmJS.cpp b/js/src/wasm/AsmJS.cpp
new file mode 100644
index 0000000000..9c1d7f3edb
--- /dev/null
+++ b/js/src/wasm/AsmJS.cpp
@@ -0,0 +1,7275 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2014 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/AsmJS.h"
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Compression.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/ScopeExit.h"
+#include "mozilla/Sprintf.h" // SprintfLiteral
+#include "mozilla/Unused.h"
+#include "mozilla/Utf8.h" // mozilla::Utf8Unit
+#include "mozilla/Variant.h"
+
+#include <algorithm>
+#include <new>
+
+#include "jsmath.h"
+
+#include "frontend/FunctionSyntaxKind.h" // FunctionSyntaxKind
+#include "frontend/ParseNode.h"
+#include "frontend/Parser.h"
+#include "frontend/ParserAtom.h"
+#include "frontend/SharedContext.h" // TopLevelFunction
+#include "gc/Policy.h"
+#include "js/BuildId.h" // JS::BuildIdCharVector
+#include "js/friend/ErrorMessages.h" // JSMSG_*
+#include "js/MemoryMetrics.h"
+#include "js/Printf.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "js/SourceText.h"
+#include "js/StableStringChars.h"
+#include "js/Wrapper.h"
+#include "util/DifferentialTesting.h"
+#include "util/StringBuffer.h"
+#include "util/Text.h"
+#include "vm/ErrorReporting.h"
+#include "vm/FunctionFlags.h" // js::FunctionFlags
+#include "vm/GeneratorAndAsyncKind.h" // js::GeneratorKind, js::FunctionAsyncKind
+#include "vm/SelfHosting.h"
+#include "vm/Time.h"
+#include "vm/TypedArrayObject.h"
+#include "vm/Warnings.h" // js::WarnNumberASCII
+#include "wasm/WasmCompile.h"
+#include "wasm/WasmGenerator.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmIonCompile.h"
+#include "wasm/WasmJS.h"
+#include "wasm/WasmSerialize.h"
+#include "wasm/WasmValidate.h"
+
+#include "frontend/SharedContext-inl.h"
+#include "vm/ArrayBufferObject-inl.h"
+#include "vm/JSObject-inl.h"
+
+using namespace js;
+using namespace js::frontend;
+using namespace js::jit;
+using namespace js::wasm;
+
+using JS::AsmJSOption;
+using JS::AutoStableStringChars;
+using JS::GenericNaN;
+using JS::SourceOwnership;
+using JS::SourceText;
+using mozilla::Abs;
+using mozilla::AsVariant;
+using mozilla::CeilingLog2;
+using mozilla::HashGeneric;
+using mozilla::IsNaN;
+using mozilla::IsNegativeZero;
+using mozilla::IsPositiveZero;
+using mozilla::IsPowerOfTwo;
+using mozilla::PodZero;
+using mozilla::PositiveInfinity;
+using mozilla::Unused;
+using mozilla::Utf8Unit;
+using mozilla::Compression::LZ4;
+
+/*****************************************************************************/
+
+// The asm.js valid heap lengths are precisely the WASM valid heap lengths for
+// ARM greater or equal to MinHeapLength
+static const size_t MinHeapLength = PageSize;
+
+static uint64_t RoundUpToNextValidAsmJSHeapLength(uint64_t length) {
+ if (length <= MinHeapLength) {
+ return MinHeapLength;
+ }
+
+ return wasm::RoundUpToNextValidARMImmediate(length);
+}
+
+/*****************************************************************************/
+// asm.js module object
+
+// The asm.js spec recognizes this set of builtin Math functions.
+enum AsmJSMathBuiltinFunction {
+ AsmJSMathBuiltin_sin,
+ AsmJSMathBuiltin_cos,
+ AsmJSMathBuiltin_tan,
+ AsmJSMathBuiltin_asin,
+ AsmJSMathBuiltin_acos,
+ AsmJSMathBuiltin_atan,
+ AsmJSMathBuiltin_ceil,
+ AsmJSMathBuiltin_floor,
+ AsmJSMathBuiltin_exp,
+ AsmJSMathBuiltin_log,
+ AsmJSMathBuiltin_pow,
+ AsmJSMathBuiltin_sqrt,
+ AsmJSMathBuiltin_abs,
+ AsmJSMathBuiltin_atan2,
+ AsmJSMathBuiltin_imul,
+ AsmJSMathBuiltin_fround,
+ AsmJSMathBuiltin_min,
+ AsmJSMathBuiltin_max,
+ AsmJSMathBuiltin_clz32
+};
+
+// LitValPOD is a restricted version of LitVal suitable for asm.js that is
+// always POD.
+
+struct LitValPOD {
+ PackedTypeCode valType_;
+ union U {
+ uint32_t u32_;
+ uint64_t u64_;
+ float f32_;
+ double f64_;
+ } u;
+
+ LitValPOD() = default;
+
+ explicit LitValPOD(uint32_t u32) : valType_(ValType(ValType::I32).packed()) {
+ u.u32_ = u32;
+ }
+ explicit LitValPOD(uint64_t u64) : valType_(ValType(ValType::I64).packed()) {
+ u.u64_ = u64;
+ }
+
+ explicit LitValPOD(float f32) : valType_(ValType(ValType::F32).packed()) {
+ u.f32_ = f32;
+ }
+ explicit LitValPOD(double f64) : valType_(ValType(ValType::F64).packed()) {
+ u.f64_ = f64;
+ }
+
+ LitVal asLitVal() const {
+ switch (UnpackTypeCodeType(valType_)) {
+ case TypeCode::I32:
+ return LitVal(u.u32_);
+ case TypeCode::I64:
+ return LitVal(u.u64_);
+ case TypeCode::F32:
+ return LitVal(u.f32_);
+ case TypeCode::F64:
+ return LitVal(u.f64_);
+ default:
+ MOZ_CRASH("Can't happen");
+ }
+ }
+};
+
+static_assert(std::is_pod_v<LitValPOD>,
+ "must be POD to be simply serialized/deserialized");
+
+// An AsmJSGlobal represents a JS global variable in the asm.js module function.
+class AsmJSGlobal {
+ public:
+ enum Which {
+ Variable,
+ FFI,
+ ArrayView,
+ ArrayViewCtor,
+ MathBuiltinFunction,
+ Constant
+ };
+ enum VarInitKind { InitConstant, InitImport };
+ enum ConstantKind { GlobalConstant, MathConstant };
+
+ private:
+ struct CacheablePod {
+ Which which_;
+ union V {
+ struct {
+ VarInitKind initKind_;
+ union U {
+ PackedTypeCode importValType_;
+ LitValPOD val_;
+ } u;
+ } var;
+ uint32_t ffiIndex_;
+ Scalar::Type viewType_;
+ AsmJSMathBuiltinFunction mathBuiltinFunc_;
+ struct {
+ ConstantKind kind_;
+ double value_;
+ } constant;
+ } u;
+ } pod;
+ CacheableChars field_;
+
+ friend class ModuleValidatorShared;
+ template <typename Unit>
+ friend class ModuleValidator;
+
+ public:
+ AsmJSGlobal() = default;
+ AsmJSGlobal(Which which, UniqueChars field) {
+ mozilla::PodZero(&pod); // zero padding for Valgrind
+ pod.which_ = which;
+ field_ = std::move(field);
+ }
+ const char* field() const { return field_.get(); }
+ Which which() const { return pod.which_; }
+ VarInitKind varInitKind() const {
+ MOZ_ASSERT(pod.which_ == Variable);
+ return pod.u.var.initKind_;
+ }
+ LitValPOD varInitVal() const {
+ MOZ_ASSERT(pod.which_ == Variable);
+ MOZ_ASSERT(pod.u.var.initKind_ == InitConstant);
+ return pod.u.var.u.val_;
+ }
+ ValType varInitImportType() const {
+ MOZ_ASSERT(pod.which_ == Variable);
+ MOZ_ASSERT(pod.u.var.initKind_ == InitImport);
+ return ValType(pod.u.var.u.importValType_);
+ }
+ uint32_t ffiIndex() const {
+ MOZ_ASSERT(pod.which_ == FFI);
+ return pod.u.ffiIndex_;
+ }
+ // When a view is created from an imported constructor:
+ // var I32 = stdlib.Int32Array;
+ // var i32 = new I32(buffer);
+ // the second import has nothing to validate and thus has a null field.
+ Scalar::Type viewType() const {
+ MOZ_ASSERT(pod.which_ == ArrayView || pod.which_ == ArrayViewCtor);
+ return pod.u.viewType_;
+ }
+ AsmJSMathBuiltinFunction mathBuiltinFunction() const {
+ MOZ_ASSERT(pod.which_ == MathBuiltinFunction);
+ return pod.u.mathBuiltinFunc_;
+ }
+ ConstantKind constantKind() const {
+ MOZ_ASSERT(pod.which_ == Constant);
+ return pod.u.constant.kind_;
+ }
+ double constantValue() const {
+ MOZ_ASSERT(pod.which_ == Constant);
+ return pod.u.constant.value_;
+ }
+};
+
+typedef Vector<AsmJSGlobal, 0, SystemAllocPolicy> AsmJSGlobalVector;
+
+// An AsmJSImport is slightly different than an asm.js FFI function: a single
+// asm.js FFI function can be called with many different signatures. When
+// compiled to wasm, each unique FFI function paired with signature generates a
+// wasm import.
+class AsmJSImport {
+ uint32_t ffiIndex_;
+
+ public:
+ AsmJSImport() = default;
+ explicit AsmJSImport(uint32_t ffiIndex) : ffiIndex_(ffiIndex) {}
+ uint32_t ffiIndex() const { return ffiIndex_; }
+};
+
+typedef Vector<AsmJSImport, 0, SystemAllocPolicy> AsmJSImportVector;
+
+// An AsmJSExport logically extends Export with the extra information needed for
+// an asm.js exported function, viz., the offsets in module's source chars in
+// case the function is toString()ed.
+class AsmJSExport {
+ uint32_t funcIndex_ = 0;
+
+ // All fields are treated as cacheable POD:
+ uint32_t startOffsetInModule_ = 0; // Store module-start-relative offsets
+ uint32_t endOffsetInModule_ = 0; // so preserved by serialization.
+
+ public:
+ AsmJSExport() = default;
+ AsmJSExport(uint32_t funcIndex, uint32_t startOffsetInModule,
+ uint32_t endOffsetInModule)
+ : funcIndex_(funcIndex),
+ startOffsetInModule_(startOffsetInModule),
+ endOffsetInModule_(endOffsetInModule) {}
+ uint32_t funcIndex() const { return funcIndex_; }
+ uint32_t startOffsetInModule() const { return startOffsetInModule_; }
+ uint32_t endOffsetInModule() const { return endOffsetInModule_; }
+};
+
+typedef Vector<AsmJSExport, 0, SystemAllocPolicy> AsmJSExportVector;
+
+// Holds the immutable guts of an AsmJSModule.
+//
+// AsmJSMetadata is built incrementally by ModuleValidator and then shared
+// immutably between AsmJSModules.
+
+struct AsmJSMetadataCacheablePod {
+ uint32_t numFFIs = 0;
+ uint32_t srcLength = 0;
+ uint32_t srcLengthWithRightBrace = 0;
+
+ AsmJSMetadataCacheablePod() = default;
+};
+
+struct js::AsmJSMetadata : Metadata, AsmJSMetadataCacheablePod {
+ AsmJSGlobalVector asmJSGlobals;
+ AsmJSImportVector asmJSImports;
+ AsmJSExportVector asmJSExports;
+ CacheableCharsVector asmJSFuncNames;
+ CacheableChars globalArgumentName;
+ CacheableChars importArgumentName;
+ CacheableChars bufferArgumentName;
+
+ // These values are not serialized since they are relative to the
+ // containing script which can be different between serialization and
+ // deserialization contexts. Thus, they must be set explicitly using the
+ // ambient Parser/ScriptSource after deserialization.
+ //
+ // srcStart refers to the offset in the ScriptSource to the beginning of
+ // the asm.js module function. If the function has been created with the
+ // Function constructor, this will be the first character in the function
+ // source. Otherwise, it will be the opening parenthesis of the arguments
+ // list.
+ uint32_t toStringStart;
+ uint32_t srcStart;
+ bool strict;
+ ScriptSourceHolder scriptSource;
+
+ uint32_t srcEndBeforeCurly() const { return srcStart + srcLength; }
+ uint32_t srcEndAfterCurly() const {
+ return srcStart + srcLengthWithRightBrace;
+ }
+
+ AsmJSMetadata()
+ : Metadata(ModuleKind::AsmJS),
+ toStringStart(0),
+ srcStart(0),
+ strict(false) {}
+ ~AsmJSMetadata() override = default;
+
+ const AsmJSExport& lookupAsmJSExport(uint32_t funcIndex) const {
+ // The AsmJSExportVector isn't stored in sorted order so do a linear
+ // search. This is for the super-cold and already-expensive toString()
+ // path and the number of exports is generally small.
+ for (const AsmJSExport& exp : asmJSExports) {
+ if (exp.funcIndex() == funcIndex) {
+ return exp;
+ }
+ }
+ MOZ_CRASH("missing asm.js func export");
+ }
+
+ bool mutedErrors() const override {
+ return scriptSource.get()->mutedErrors();
+ }
+ const char16_t* displayURL() const override {
+ return scriptSource.get()->hasDisplayURL()
+ ? scriptSource.get()->displayURL()
+ : nullptr;
+ }
+ ScriptSource* maybeScriptSource() const override {
+ return scriptSource.get();
+ }
+ bool getFuncName(NameContext ctx, uint32_t funcIndex,
+ UTF8Bytes* name) const override {
+ const char* p = asmJSFuncNames[funcIndex].get();
+ if (!p) {
+ return true;
+ }
+ return name->append(p, strlen(p));
+ }
+
+ AsmJSMetadataCacheablePod& pod() { return *this; }
+ const AsmJSMetadataCacheablePod& pod() const { return *this; }
+};
+
+using MutableAsmJSMetadata = RefPtr<AsmJSMetadata>;
+
+/*****************************************************************************/
+// ParseNode utilities
+
+static inline ParseNode* NextNode(ParseNode* pn) { return pn->pn_next; }
+
+static inline ParseNode* UnaryKid(ParseNode* pn) {
+ return pn->as<UnaryNode>().kid();
+}
+
+static inline ParseNode* BinaryRight(ParseNode* pn) {
+ return pn->as<BinaryNode>().right();
+}
+
+static inline ParseNode* BinaryLeft(ParseNode* pn) {
+ return pn->as<BinaryNode>().left();
+}
+
+static inline ParseNode* ReturnExpr(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::ReturnStmt));
+ return UnaryKid(pn);
+}
+
+static inline ParseNode* TernaryKid1(ParseNode* pn) {
+ return pn->as<TernaryNode>().kid1();
+}
+
+static inline ParseNode* TernaryKid2(ParseNode* pn) {
+ return pn->as<TernaryNode>().kid2();
+}
+
+static inline ParseNode* TernaryKid3(ParseNode* pn) {
+ return pn->as<TernaryNode>().kid3();
+}
+
+static inline ParseNode* ListHead(ParseNode* pn) {
+ return pn->as<ListNode>().head();
+}
+
+static inline unsigned ListLength(ParseNode* pn) {
+ return pn->as<ListNode>().count();
+}
+
+static inline ParseNode* CallCallee(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::CallExpr));
+ return BinaryLeft(pn);
+}
+
+static inline unsigned CallArgListLength(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::CallExpr));
+ return ListLength(BinaryRight(pn));
+}
+
+static inline ParseNode* CallArgList(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::CallExpr));
+ return ListHead(BinaryRight(pn));
+}
+
+static inline ParseNode* VarListHead(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::VarStmt) ||
+ pn->isKind(ParseNodeKind::ConstDecl));
+ return ListHead(pn);
+}
+
+static inline bool IsDefaultCase(ParseNode* pn) {
+ return pn->as<CaseClause>().isDefault();
+}
+
+static inline ParseNode* CaseExpr(ParseNode* pn) {
+ return pn->as<CaseClause>().caseExpression();
+}
+
+static inline ParseNode* CaseBody(ParseNode* pn) {
+ return pn->as<CaseClause>().statementList();
+}
+
+static inline ParseNode* BinaryOpLeft(ParseNode* pn) {
+ MOZ_ASSERT(pn->isBinaryOperation());
+ MOZ_ASSERT(pn->as<ListNode>().count() == 2);
+ return ListHead(pn);
+}
+
+static inline ParseNode* BinaryOpRight(ParseNode* pn) {
+ MOZ_ASSERT(pn->isBinaryOperation());
+ MOZ_ASSERT(pn->as<ListNode>().count() == 2);
+ return NextNode(ListHead(pn));
+}
+
+static inline ParseNode* BitwiseLeft(ParseNode* pn) { return BinaryOpLeft(pn); }
+
+static inline ParseNode* BitwiseRight(ParseNode* pn) {
+ return BinaryOpRight(pn);
+}
+
+static inline ParseNode* MultiplyLeft(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::MulExpr));
+ return BinaryOpLeft(pn);
+}
+
+static inline ParseNode* MultiplyRight(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::MulExpr));
+ return BinaryOpRight(pn);
+}
+
+static inline ParseNode* AddSubLeft(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::AddExpr) ||
+ pn->isKind(ParseNodeKind::SubExpr));
+ return BinaryOpLeft(pn);
+}
+
+static inline ParseNode* AddSubRight(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::AddExpr) ||
+ pn->isKind(ParseNodeKind::SubExpr));
+ return BinaryOpRight(pn);
+}
+
+static inline ParseNode* DivOrModLeft(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::DivExpr) ||
+ pn->isKind(ParseNodeKind::ModExpr));
+ return BinaryOpLeft(pn);
+}
+
+static inline ParseNode* DivOrModRight(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::DivExpr) ||
+ pn->isKind(ParseNodeKind::ModExpr));
+ return BinaryOpRight(pn);
+}
+
+static inline ParseNode* ComparisonLeft(ParseNode* pn) {
+ return BinaryOpLeft(pn);
+}
+
+static inline ParseNode* ComparisonRight(ParseNode* pn) {
+ return BinaryOpRight(pn);
+}
+
+static inline bool IsExpressionStatement(ParseNode* pn) {
+ return pn->isKind(ParseNodeKind::ExpressionStmt);
+}
+
+static inline ParseNode* ExpressionStatementExpr(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::ExpressionStmt));
+ return UnaryKid(pn);
+}
+
+static inline const ParserName* LoopControlMaybeLabel(ParseNode* pn) {
+ MOZ_ASSERT(pn->isKind(ParseNodeKind::BreakStmt) ||
+ pn->isKind(ParseNodeKind::ContinueStmt));
+ return pn->as<LoopControlStatement>().label();
+}
+
+static inline const ParserName* LabeledStatementLabel(ParseNode* pn) {
+ return pn->as<LabeledStatement>().label();
+}
+
+static inline ParseNode* LabeledStatementStatement(ParseNode* pn) {
+ return pn->as<LabeledStatement>().statement();
+}
+
+static double NumberNodeValue(ParseNode* pn) {
+ return pn->as<NumericLiteral>().value();
+}
+
+static bool NumberNodeHasFrac(ParseNode* pn) {
+ return pn->as<NumericLiteral>().decimalPoint() == HasDecimal;
+}
+
+static ParseNode* DotBase(ParseNode* pn) {
+ return &pn->as<PropertyAccess>().expression();
+}
+
+static const ParserName* DotMember(ParseNode* pn) {
+ return pn->as<PropertyAccess>().name();
+}
+
+static ParseNode* ElemBase(ParseNode* pn) {
+ return &pn->as<PropertyByValue>().expression();
+}
+
+static ParseNode* ElemIndex(ParseNode* pn) {
+ return &pn->as<PropertyByValue>().key();
+}
+
+static inline const ParserName* FunctionName(FunctionNode* funNode) {
+ if (const ParserAtom* name = funNode->funbox()->explicitName()) {
+ return name->asName();
+ }
+ return nullptr;
+}
+
+static inline ParseNode* FunctionStatementList(FunctionNode* funNode) {
+ MOZ_ASSERT(funNode->body()->isKind(ParseNodeKind::ParamsBody));
+ LexicalScopeNode* last =
+ &funNode->body()->as<ListNode>().last()->as<LexicalScopeNode>();
+ MOZ_ASSERT(last->isEmptyScope());
+ ParseNode* body = last->scopeBody();
+ MOZ_ASSERT(body->isKind(ParseNodeKind::StatementList));
+ return body;
+}
+
+static inline bool IsNormalObjectField(ParseNode* pn) {
+ return pn->isKind(ParseNodeKind::PropertyDefinition) &&
+ pn->as<PropertyDefinition>().accessorType() == AccessorType::None &&
+ BinaryLeft(pn)->isKind(ParseNodeKind::ObjectPropertyName);
+}
+
+static inline const ParserName* ObjectNormalFieldName(ParseNode* pn) {
+ MOZ_ASSERT(IsNormalObjectField(pn));
+ MOZ_ASSERT(BinaryLeft(pn)->isKind(ParseNodeKind::ObjectPropertyName));
+ return BinaryLeft(pn)->as<NameNode>().atom()->asName();
+}
+
+static inline ParseNode* ObjectNormalFieldInitializer(ParseNode* pn) {
+ MOZ_ASSERT(IsNormalObjectField(pn));
+ return BinaryRight(pn);
+}
+
+static inline bool IsUseOfName(ParseNode* pn, const ParserName* name) {
+ return pn->isName(name);
+}
+
+static inline bool IsIgnoredDirectiveName(JSContext* cx,
+ const ParserAtom* atom) {
+ return atom != cx->parserNames().useStrict;
+}
+
+static inline bool IsIgnoredDirective(JSContext* cx, ParseNode* pn) {
+ return pn->isKind(ParseNodeKind::ExpressionStmt) &&
+ UnaryKid(pn)->isKind(ParseNodeKind::StringExpr) &&
+ IsIgnoredDirectiveName(cx, UnaryKid(pn)->as<NameNode>().atom());
+}
+
+static inline bool IsEmptyStatement(ParseNode* pn) {
+ return pn->isKind(ParseNodeKind::EmptyStmt);
+}
+
+static inline ParseNode* SkipEmptyStatements(ParseNode* pn) {
+ while (pn && IsEmptyStatement(pn)) {
+ pn = pn->pn_next;
+ }
+ return pn;
+}
+
+static inline ParseNode* NextNonEmptyStatement(ParseNode* pn) {
+ return SkipEmptyStatements(pn->pn_next);
+}
+
+template <typename Unit>
+static bool GetToken(AsmJSParser<Unit>& parser, TokenKind* tkp) {
+ auto& ts = parser.tokenStream;
+ TokenKind tk;
+ while (true) {
+ if (!ts.getToken(&tk, TokenStreamShared::SlashIsRegExp)) {
+ return false;
+ }
+ if (tk != TokenKind::Semi) {
+ break;
+ }
+ }
+ *tkp = tk;
+ return true;
+}
+
+template <typename Unit>
+static bool PeekToken(AsmJSParser<Unit>& parser, TokenKind* tkp) {
+ auto& ts = parser.tokenStream;
+ TokenKind tk;
+ while (true) {
+ if (!ts.peekToken(&tk, TokenStream::SlashIsRegExp)) {
+ return false;
+ }
+ if (tk != TokenKind::Semi) {
+ break;
+ }
+ ts.consumeKnownToken(TokenKind::Semi, TokenStreamShared::SlashIsRegExp);
+ }
+ *tkp = tk;
+ return true;
+}
+
+template <typename Unit>
+static bool ParseVarOrConstStatement(AsmJSParser<Unit>& parser,
+ ParseNode** var) {
+ TokenKind tk;
+ if (!PeekToken(parser, &tk)) {
+ return false;
+ }
+ if (tk != TokenKind::Var && tk != TokenKind::Const) {
+ *var = nullptr;
+ return true;
+ }
+
+ *var = parser.statementListItem(YieldIsName);
+ if (!*var) {
+ return false;
+ }
+
+ MOZ_ASSERT((*var)->isKind(ParseNodeKind::VarStmt) ||
+ (*var)->isKind(ParseNodeKind::ConstDecl));
+ return true;
+}
+
+/*****************************************************************************/
+
+// Represents the type and value of an asm.js numeric literal.
+//
+// A literal is a double iff the literal contains a decimal point (even if the
+// fractional part is 0). Otherwise, integers may be classified:
+// fixnum: [0, 2^31)
+// negative int: [-2^31, 0)
+// big unsigned: [2^31, 2^32)
+// out of range: otherwise
+// Lastly, a literal may be a float literal which is any double or integer
+// literal coerced with Math.fround.
+class NumLit {
+ public:
+ enum Which {
+ Fixnum,
+ NegativeInt,
+ BigUnsigned,
+ Double,
+ Float,
+ OutOfRangeInt = -1
+ };
+
+ private:
+ Which which_;
+ JS::Value value_;
+
+ public:
+ NumLit() = default;
+
+ NumLit(Which w, const Value& v) : which_(w), value_(v) {}
+
+ Which which() const { return which_; }
+
+ int32_t toInt32() const {
+ MOZ_ASSERT(which_ == Fixnum || which_ == NegativeInt ||
+ which_ == BigUnsigned);
+ return value_.toInt32();
+ }
+
+ uint32_t toUint32() const { return (uint32_t)toInt32(); }
+
+ double toDouble() const {
+ MOZ_ASSERT(which_ == Double);
+ return value_.toDouble();
+ }
+
+ float toFloat() const {
+ MOZ_ASSERT(which_ == Float);
+ return float(value_.toDouble());
+ }
+
+ Value scalarValue() const {
+ MOZ_ASSERT(which_ != OutOfRangeInt);
+ return value_;
+ }
+
+ bool valid() const { return which_ != OutOfRangeInt; }
+
+ bool isZeroBits() const {
+ MOZ_ASSERT(valid());
+ switch (which()) {
+ case NumLit::Fixnum:
+ case NumLit::NegativeInt:
+ case NumLit::BigUnsigned:
+ return toInt32() == 0;
+ case NumLit::Double:
+ return IsPositiveZero(toDouble());
+ case NumLit::Float:
+ return IsPositiveZero(toFloat());
+ case NumLit::OutOfRangeInt:
+ MOZ_CRASH("can't be here because of valid() check above");
+ }
+ return false;
+ }
+
+ LitValPOD value() const {
+ switch (which_) {
+ case NumLit::Fixnum:
+ case NumLit::NegativeInt:
+ case NumLit::BigUnsigned:
+ return LitValPOD(toUint32());
+ case NumLit::Float:
+ return LitValPOD(toFloat());
+ case NumLit::Double:
+ return LitValPOD(toDouble());
+ case NumLit::OutOfRangeInt:;
+ }
+ MOZ_CRASH("bad literal");
+ }
+};
+
+// Represents the type of a general asm.js expression.
+//
+// A canonical subset of types representing the coercion targets: Int, Float,
+// Double.
+//
+// Void is also part of the canonical subset.
+
+class Type {
+ public:
+ enum Which {
+ Fixnum = NumLit::Fixnum,
+ Signed = NumLit::NegativeInt,
+ Unsigned = NumLit::BigUnsigned,
+ DoubleLit = NumLit::Double,
+ Float = NumLit::Float,
+ Double,
+ MaybeDouble,
+ MaybeFloat,
+ Floatish,
+ Int,
+ Intish,
+ Void
+ };
+
+ private:
+ Which which_;
+
+ public:
+ Type() = default;
+ MOZ_IMPLICIT Type(Which w) : which_(w) {}
+
+ // Map an already canonicalized Type to the return type of a function call.
+ static Type ret(Type t) {
+ MOZ_ASSERT(t.isCanonical());
+ // The 32-bit external type is Signed, not Int.
+ return t.isInt() ? Signed : t;
+ }
+
+ static Type lit(const NumLit& lit) {
+ MOZ_ASSERT(lit.valid());
+ Which which = Type::Which(lit.which());
+ MOZ_ASSERT(which >= Fixnum && which <= Float);
+ Type t;
+ t.which_ = which;
+ return t;
+ }
+
+ // Map |t| to one of the canonical vartype representations of a
+ // wasm::ValType.
+ static Type canonicalize(Type t) {
+ switch (t.which()) {
+ case Fixnum:
+ case Signed:
+ case Unsigned:
+ case Int:
+ return Int;
+
+ case Float:
+ return Float;
+
+ case DoubleLit:
+ case Double:
+ return Double;
+
+ case Void:
+ return Void;
+
+ case MaybeDouble:
+ case MaybeFloat:
+ case Floatish:
+ case Intish:
+ // These types need some kind of coercion, they can't be mapped
+ // to an VarType.
+ break;
+ }
+ MOZ_CRASH("Invalid vartype");
+ }
+
+ Which which() const { return which_; }
+
+ bool operator==(Type rhs) const { return which_ == rhs.which_; }
+ bool operator!=(Type rhs) const { return which_ != rhs.which_; }
+
+ bool operator<=(Type rhs) const {
+ switch (rhs.which_) {
+ case Signed:
+ return isSigned();
+ case Unsigned:
+ return isUnsigned();
+ case DoubleLit:
+ return isDoubleLit();
+ case Double:
+ return isDouble();
+ case Float:
+ return isFloat();
+ case MaybeDouble:
+ return isMaybeDouble();
+ case MaybeFloat:
+ return isMaybeFloat();
+ case Floatish:
+ return isFloatish();
+ case Int:
+ return isInt();
+ case Intish:
+ return isIntish();
+ case Fixnum:
+ return isFixnum();
+ case Void:
+ return isVoid();
+ }
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected rhs type");
+ }
+
+ bool isFixnum() const { return which_ == Fixnum; }
+
+ bool isSigned() const { return which_ == Signed || which_ == Fixnum; }
+
+ bool isUnsigned() const { return which_ == Unsigned || which_ == Fixnum; }
+
+ bool isInt() const { return isSigned() || isUnsigned() || which_ == Int; }
+
+ bool isIntish() const { return isInt() || which_ == Intish; }
+
+ bool isDoubleLit() const { return which_ == DoubleLit; }
+
+ bool isDouble() const { return isDoubleLit() || which_ == Double; }
+
+ bool isMaybeDouble() const { return isDouble() || which_ == MaybeDouble; }
+
+ bool isFloat() const { return which_ == Float; }
+
+ bool isMaybeFloat() const { return isFloat() || which_ == MaybeFloat; }
+
+ bool isFloatish() const { return isMaybeFloat() || which_ == Floatish; }
+
+ bool isVoid() const { return which_ == Void; }
+
+ bool isExtern() const { return isDouble() || isSigned(); }
+
+ // Check if this is one of the valid types for a function argument.
+ bool isArgType() const { return isInt() || isFloat() || isDouble(); }
+
+ // Check if this is one of the valid types for a function return value.
+ bool isReturnType() const {
+ return isSigned() || isFloat() || isDouble() || isVoid();
+ }
+
+ // Check if this is one of the valid types for a global variable.
+ bool isGlobalVarType() const { return isArgType(); }
+
+ // Check if this is one of the canonical vartype representations of a
+ // wasm::ValType, or is void. See Type::canonicalize().
+ bool isCanonical() const {
+ switch (which()) {
+ case Int:
+ case Float:
+ case Double:
+ case Void:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ // Check if this is a canonical representation of a wasm::ValType.
+ bool isCanonicalValType() const { return !isVoid() && isCanonical(); }
+
+ // Convert this canonical type to a wasm::ValType.
+ ValType canonicalToValType() const {
+ switch (which()) {
+ case Int:
+ return ValType::I32;
+ case Float:
+ return ValType::F32;
+ case Double:
+ return ValType::F64;
+ default:
+ MOZ_CRASH("Need canonical type");
+ }
+ }
+
+ Maybe<ValType> canonicalToReturnType() const {
+ return isVoid() ? Nothing() : Some(canonicalToValType());
+ }
+
+ // Convert this type to a wasm::TypeCode for use in a wasm
+ // block signature. This works for all types, including non-canonical
+ // ones. Consequently, the type isn't valid for subsequent asm.js
+ // validation; it's only valid for use in producing wasm.
+ TypeCode toWasmBlockSignatureType() const {
+ switch (which()) {
+ case Fixnum:
+ case Signed:
+ case Unsigned:
+ case Int:
+ case Intish:
+ return TypeCode::I32;
+
+ case Float:
+ case MaybeFloat:
+ case Floatish:
+ return TypeCode::F32;
+
+ case DoubleLit:
+ case Double:
+ case MaybeDouble:
+ return TypeCode::F64;
+
+ case Void:
+ return TypeCode::BlockVoid;
+ }
+ MOZ_CRASH("Invalid Type");
+ }
+
+ const char* toChars() const {
+ switch (which_) {
+ case Double:
+ return "double";
+ case DoubleLit:
+ return "doublelit";
+ case MaybeDouble:
+ return "double?";
+ case Float:
+ return "float";
+ case Floatish:
+ return "floatish";
+ case MaybeFloat:
+ return "float?";
+ case Fixnum:
+ return "fixnum";
+ case Int:
+ return "int";
+ case Signed:
+ return "signed";
+ case Unsigned:
+ return "unsigned";
+ case Intish:
+ return "intish";
+ case Void:
+ return "void";
+ }
+ MOZ_CRASH("Invalid Type");
+ }
+};
+
+static const unsigned VALIDATION_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
+
+class MOZ_STACK_CLASS ModuleValidatorShared {
+ public:
+ class Func {
+ const ParserName* name_;
+ uint32_t sigIndex_;
+ uint32_t firstUse_;
+ uint32_t funcDefIndex_;
+
+ bool defined_;
+
+ // Available when defined:
+ uint32_t srcBegin_;
+ uint32_t srcEnd_;
+ uint32_t line_;
+ Bytes bytes_;
+ Uint32Vector callSiteLineNums_;
+
+ public:
+ Func(const ParserName* name, uint32_t sigIndex, uint32_t firstUse,
+ uint32_t funcDefIndex)
+ : name_(name),
+ sigIndex_(sigIndex),
+ firstUse_(firstUse),
+ funcDefIndex_(funcDefIndex),
+ defined_(false),
+ srcBegin_(0),
+ srcEnd_(0),
+ line_(0) {}
+
+ const ParserName* name() const { return name_; }
+ uint32_t sigIndex() const { return sigIndex_; }
+ uint32_t firstUse() const { return firstUse_; }
+ bool defined() const { return defined_; }
+ uint32_t funcDefIndex() const { return funcDefIndex_; }
+
+ void define(ParseNode* fn, uint32_t line, Bytes&& bytes,
+ Uint32Vector&& callSiteLineNums) {
+ MOZ_ASSERT(!defined_);
+ defined_ = true;
+ srcBegin_ = fn->pn_pos.begin;
+ srcEnd_ = fn->pn_pos.end;
+ line_ = line;
+ bytes_ = std::move(bytes);
+ callSiteLineNums_ = std::move(callSiteLineNums);
+ }
+
+ uint32_t srcBegin() const {
+ MOZ_ASSERT(defined_);
+ return srcBegin_;
+ }
+ uint32_t srcEnd() const {
+ MOZ_ASSERT(defined_);
+ return srcEnd_;
+ }
+ uint32_t line() const {
+ MOZ_ASSERT(defined_);
+ return line_;
+ }
+ const Bytes& bytes() const {
+ MOZ_ASSERT(defined_);
+ return bytes_;
+ }
+ Uint32Vector& callSiteLineNums() {
+ MOZ_ASSERT(defined_);
+ return callSiteLineNums_;
+ }
+ };
+
+ using ConstFuncVector = Vector<const Func*>;
+ using FuncVector = Vector<Func>;
+
+ class Table {
+ uint32_t sigIndex_;
+ const ParserName* name_;
+ uint32_t firstUse_;
+ uint32_t mask_;
+ bool defined_;
+
+ Table(Table&& rhs) = delete;
+
+ public:
+ Table(uint32_t sigIndex, const ParserName* name, uint32_t firstUse,
+ uint32_t mask)
+ : sigIndex_(sigIndex),
+ name_(name),
+ firstUse_(firstUse),
+ mask_(mask),
+ defined_(false) {}
+
+ uint32_t sigIndex() const { return sigIndex_; }
+ const ParserName* name() const { return name_; }
+ uint32_t firstUse() const { return firstUse_; }
+ unsigned mask() const { return mask_; }
+ bool defined() const { return defined_; }
+ void define() {
+ MOZ_ASSERT(!defined_);
+ defined_ = true;
+ }
+ };
+
+ using TableVector = Vector<Table*>;
+
+ class Global {
+ public:
+ enum Which {
+ Variable,
+ ConstantLiteral,
+ ConstantImport,
+ Function,
+ Table,
+ FFI,
+ ArrayView,
+ ArrayViewCtor,
+ MathBuiltinFunction
+ };
+
+ private:
+ Which which_;
+ union U {
+ struct VarOrConst {
+ Type::Which type_;
+ unsigned index_;
+ NumLit literalValue_;
+
+ VarOrConst(unsigned index, const NumLit& lit)
+ : type_(Type::lit(lit).which()),
+ index_(index),
+ literalValue_(lit) // copies |lit|
+ {}
+
+ VarOrConst(unsigned index, Type::Which which)
+ : type_(which), index_(index) {
+ // The |literalValue_| field remains unused and
+ // uninitialized for non-constant variables.
+ }
+
+ explicit VarOrConst(double constant)
+ : type_(Type::Double),
+ literalValue_(NumLit::Double, DoubleValue(constant)) {
+ // The index_ field is unused and uninitialized for
+ // constant doubles.
+ }
+ } varOrConst;
+ uint32_t funcDefIndex_;
+ uint32_t tableIndex_;
+ uint32_t ffiIndex_;
+ Scalar::Type viewType_;
+ AsmJSMathBuiltinFunction mathBuiltinFunc_;
+
+ // |varOrConst|, through |varOrConst.literalValue_|, has a
+ // non-trivial constructor and therefore MUST be placement-new'd
+ // into existence.
+ MOZ_PUSH_DISABLE_NONTRIVIAL_UNION_WARNINGS
+ U() : funcDefIndex_(0) {}
+ MOZ_POP_DISABLE_NONTRIVIAL_UNION_WARNINGS
+ } u;
+
+ friend class ModuleValidatorShared;
+ template <typename Unit>
+ friend class ModuleValidator;
+ friend class js::LifoAlloc;
+
+ explicit Global(Which which) : which_(which) {}
+
+ public:
+ Which which() const { return which_; }
+ Type varOrConstType() const {
+ MOZ_ASSERT(which_ == Variable || which_ == ConstantLiteral ||
+ which_ == ConstantImport);
+ return u.varOrConst.type_;
+ }
+ unsigned varOrConstIndex() const {
+ MOZ_ASSERT(which_ == Variable || which_ == ConstantImport);
+ return u.varOrConst.index_;
+ }
+ bool isConst() const {
+ return which_ == ConstantLiteral || which_ == ConstantImport;
+ }
+ NumLit constLiteralValue() const {
+ MOZ_ASSERT(which_ == ConstantLiteral);
+ return u.varOrConst.literalValue_;
+ }
+ uint32_t funcDefIndex() const {
+ MOZ_ASSERT(which_ == Function);
+ return u.funcDefIndex_;
+ }
+ uint32_t tableIndex() const {
+ MOZ_ASSERT(which_ == Table);
+ return u.tableIndex_;
+ }
+ unsigned ffiIndex() const {
+ MOZ_ASSERT(which_ == FFI);
+ return u.ffiIndex_;
+ }
+ Scalar::Type viewType() const {
+ MOZ_ASSERT(which_ == ArrayView || which_ == ArrayViewCtor);
+ return u.viewType_;
+ }
+ bool isMathFunction() const { return which_ == MathBuiltinFunction; }
+ AsmJSMathBuiltinFunction mathBuiltinFunction() const {
+ MOZ_ASSERT(which_ == MathBuiltinFunction);
+ return u.mathBuiltinFunc_;
+ }
+ };
+
+ struct MathBuiltin {
+ enum Kind { Function, Constant };
+ Kind kind;
+
+ union {
+ double cst;
+ AsmJSMathBuiltinFunction func;
+ } u;
+
+ MathBuiltin() : kind(Kind(-1)), u{} {}
+ explicit MathBuiltin(double cst) : kind(Constant) { u.cst = cst; }
+ explicit MathBuiltin(AsmJSMathBuiltinFunction func) : kind(Function) {
+ u.func = func;
+ }
+ };
+
+ struct ArrayView {
+ ArrayView(const ParserName* name, Scalar::Type type)
+ : name(name), type(type) {}
+
+ const ParserName* name;
+ Scalar::Type type;
+ };
+
+ protected:
+ class HashableSig {
+ uint32_t sigIndex_;
+ const TypeContext& types_;
+
+ public:
+ HashableSig(uint32_t sigIndex, const TypeContext& types)
+ : sigIndex_(sigIndex), types_(types) {}
+ uint32_t sigIndex() const { return sigIndex_; }
+ const FuncType& funcType() const { return types_[sigIndex_].funcType(); }
+
+ // Implement HashPolicy:
+ using Lookup = const FuncType&;
+ static HashNumber hash(Lookup l) { return l.hash(); }
+ static bool match(HashableSig lhs, Lookup rhs) {
+ return lhs.funcType() == rhs;
+ }
+ };
+
+ class NamedSig : public HashableSig {
+ const ParserName* name_;
+
+ public:
+ NamedSig(const ParserName* name, uint32_t sigIndex,
+ const TypeContext& types)
+ : HashableSig(sigIndex, types), name_(name) {}
+ const ParserName* name() const { return name_; }
+
+ // Implement HashPolicy:
+ struct Lookup {
+ const ParserName* name;
+ const FuncType& funcType;
+ Lookup(const ParserName* name, const FuncType& funcType)
+ : name(name), funcType(funcType) {}
+ };
+ static HashNumber hash(Lookup l) {
+ return HashGeneric(l.name, l.funcType.hash());
+ }
+ static bool match(NamedSig lhs, Lookup rhs) {
+ return lhs.name() == rhs.name && lhs.funcType() == rhs.funcType;
+ }
+ };
+
+ using SigSet = HashSet<HashableSig, HashableSig>;
+ using FuncImportMap = HashMap<NamedSig, uint32_t, NamedSig>;
+ using GlobalMap = HashMap<const ParserName*, Global*>;
+ using MathNameMap = HashMap<const ParserName*, MathBuiltin>;
+ using ArrayViewVector = Vector<ArrayView>;
+
+ protected:
+ JSContext* cx_;
+ ParserAtomsTable& parserAtoms_;
+ FunctionNode* moduleFunctionNode_;
+ const ParserName* moduleFunctionName_;
+ const ParserName* globalArgumentName_ = nullptr;
+ const ParserName* importArgumentName_ = nullptr;
+ const ParserName* bufferArgumentName_ = nullptr;
+ MathNameMap standardLibraryMathNames_;
+
+ // Validation-internal state:
+ LifoAlloc validationLifo_;
+ FuncVector funcDefs_;
+ TableVector tables_;
+ GlobalMap globalMap_;
+ SigSet sigSet_;
+ FuncImportMap funcImportMap_;
+ ArrayViewVector arrayViews_;
+
+ // State used to build the AsmJSModule in finish():
+ CompilerEnvironment compilerEnv_;
+ ModuleEnvironment moduleEnv_;
+ MutableAsmJSMetadata asmJSMetadata_;
+
+ // Error reporting:
+ UniqueChars errorString_ = nullptr;
+ uint32_t errorOffset_ = UINT32_MAX;
+ bool errorOverRecursed_ = false;
+
+ protected:
+ ModuleValidatorShared(JSContext* cx, ParserAtomsTable& parserAtoms,
+ FunctionNode* moduleFunctionNode)
+ : cx_(cx),
+ parserAtoms_(parserAtoms),
+ moduleFunctionNode_(moduleFunctionNode),
+ moduleFunctionName_(FunctionName(moduleFunctionNode)),
+ standardLibraryMathNames_(cx),
+ validationLifo_(VALIDATION_LIFO_DEFAULT_CHUNK_SIZE),
+ funcDefs_(cx),
+ tables_(cx),
+ globalMap_(cx),
+ sigSet_(cx),
+ funcImportMap_(cx),
+ arrayViews_(cx),
+ compilerEnv_(CompileMode::Once, Tier::Optimized, OptimizedBackend::Ion,
+ DebugEnabled::False),
+ moduleEnv_(FeatureArgs(), ModuleKind::AsmJS) {
+ compilerEnv_.computeParameters();
+ moduleEnv_.minMemoryLength = RoundUpToNextValidAsmJSHeapLength(0);
+ }
+
+ protected:
+ [[nodiscard]] bool addStandardLibraryMathInfo() {
+ static constexpr struct {
+ const char* name;
+ AsmJSMathBuiltinFunction func;
+ } functions[] = {
+ {"sin", AsmJSMathBuiltin_sin}, {"cos", AsmJSMathBuiltin_cos},
+ {"tan", AsmJSMathBuiltin_tan}, {"asin", AsmJSMathBuiltin_asin},
+ {"acos", AsmJSMathBuiltin_acos}, {"atan", AsmJSMathBuiltin_atan},
+ {"ceil", AsmJSMathBuiltin_ceil}, {"floor", AsmJSMathBuiltin_floor},
+ {"exp", AsmJSMathBuiltin_exp}, {"log", AsmJSMathBuiltin_log},
+ {"pow", AsmJSMathBuiltin_pow}, {"sqrt", AsmJSMathBuiltin_sqrt},
+ {"abs", AsmJSMathBuiltin_abs}, {"atan2", AsmJSMathBuiltin_atan2},
+ {"imul", AsmJSMathBuiltin_imul}, {"clz32", AsmJSMathBuiltin_clz32},
+ {"fround", AsmJSMathBuiltin_fround}, {"min", AsmJSMathBuiltin_min},
+ {"max", AsmJSMathBuiltin_max},
+ };
+
+ auto AddMathFunction = [this](const char* name,
+ AsmJSMathBuiltinFunction func) {
+ const ParserAtom* atom =
+ parserAtoms_.internAscii(cx_, name, strlen(name));
+ if (!atom) {
+ return false;
+ }
+ MathBuiltin builtin(func);
+ return this->standardLibraryMathNames_.putNew(atom->asName(), builtin);
+ };
+
+ for (const auto& info : functions) {
+ if (!AddMathFunction(info.name, info.func)) {
+ return false;
+ }
+ }
+
+ static constexpr struct {
+ const char* name;
+ double value;
+ } constants[] = {
+ {"E", M_E},
+ {"LN10", M_LN10},
+ {"LN2", M_LN2},
+ {"LOG2E", M_LOG2E},
+ {"LOG10E", M_LOG10E},
+ {"PI", M_PI},
+ {"SQRT1_2", M_SQRT1_2},
+ {"SQRT2", M_SQRT2},
+ };
+
+ auto AddMathConstant = [this](const char* name, double cst) {
+ const ParserAtom* atom =
+ parserAtoms_.internAscii(cx_, name, strlen(name));
+ if (!atom) {
+ return false;
+ }
+ MathBuiltin builtin(cst);
+ return this->standardLibraryMathNames_.putNew(atom->asName(), builtin);
+ };
+
+ for (const auto& info : constants) {
+ if (!AddMathConstant(info.name, info.value)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ public:
+ JSContext* cx() const { return cx_; }
+ const ParserName* moduleFunctionName() const { return moduleFunctionName_; }
+ const ParserName* globalArgumentName() const { return globalArgumentName_; }
+ const ParserName* importArgumentName() const { return importArgumentName_; }
+ const ParserName* bufferArgumentName() const { return bufferArgumentName_; }
+ const ModuleEnvironment& env() { return moduleEnv_; }
+
+ uint64_t minMemoryLength() const { return moduleEnv_.minMemoryLength; }
+
+ void initModuleFunctionName(const ParserName* name) {
+ MOZ_ASSERT(!moduleFunctionName_);
+ moduleFunctionName_ = name;
+ }
+ [[nodiscard]] bool initGlobalArgumentName(const ParserName* n) {
+ globalArgumentName_ = n;
+ if (n) {
+ asmJSMetadata_->globalArgumentName = ParserAtomToNewUTF8CharsZ(cx_, n);
+ if (!asmJSMetadata_->globalArgumentName) {
+ return false;
+ }
+ }
+ return true;
+ }
+ [[nodiscard]] bool initImportArgumentName(const ParserName* n) {
+ importArgumentName_ = n;
+ if (n) {
+ asmJSMetadata_->importArgumentName = ParserAtomToNewUTF8CharsZ(cx_, n);
+ if (!asmJSMetadata_->importArgumentName) {
+ return false;
+ }
+ }
+ return true;
+ }
+ [[nodiscard]] bool initBufferArgumentName(const ParserName* n) {
+ bufferArgumentName_ = n;
+ if (n) {
+ asmJSMetadata_->bufferArgumentName = ParserAtomToNewUTF8CharsZ(cx_, n);
+ if (!asmJSMetadata_->bufferArgumentName) {
+ return false;
+ }
+ }
+ return true;
+ }
+ bool addGlobalVarInit(const ParserName* var, const NumLit& lit, Type type,
+ bool isConst) {
+ MOZ_ASSERT(type.isGlobalVarType());
+ MOZ_ASSERT(type == Type::canonicalize(Type::lit(lit)));
+
+ uint32_t index = moduleEnv_.globals.length();
+ if (!moduleEnv_.globals.emplaceBack(type.canonicalToValType(), !isConst,
+ index, ModuleKind::AsmJS)) {
+ return false;
+ }
+
+ Global::Which which = isConst ? Global::ConstantLiteral : Global::Variable;
+ Global* global = validationLifo_.new_<Global>(which);
+ if (!global) {
+ return false;
+ }
+ if (isConst) {
+ new (&global->u.varOrConst) Global::U::VarOrConst(index, lit);
+ } else {
+ new (&global->u.varOrConst) Global::U::VarOrConst(index, type.which());
+ }
+ if (!globalMap_.putNew(var, global)) {
+ return false;
+ }
+
+ AsmJSGlobal g(AsmJSGlobal::Variable, nullptr);
+ g.pod.u.var.initKind_ = AsmJSGlobal::InitConstant;
+ g.pod.u.var.u.val_ = lit.value();
+ return asmJSMetadata_->asmJSGlobals.append(std::move(g));
+ }
+ bool addGlobalVarImport(const ParserName* var, const ParserName* field,
+ Type type, bool isConst) {
+ MOZ_ASSERT(type.isGlobalVarType());
+
+ UniqueChars fieldChars = ParserAtomToNewUTF8CharsZ(cx_, field);
+ if (!fieldChars) {
+ return false;
+ }
+
+ uint32_t index = moduleEnv_.globals.length();
+ ValType valType = type.canonicalToValType();
+ if (!moduleEnv_.globals.emplaceBack(valType, !isConst, index,
+ ModuleKind::AsmJS)) {
+ return false;
+ }
+
+ Global::Which which = isConst ? Global::ConstantImport : Global::Variable;
+ Global* global = validationLifo_.new_<Global>(which);
+ if (!global) {
+ return false;
+ }
+ new (&global->u.varOrConst) Global::U::VarOrConst(index, type.which());
+ if (!globalMap_.putNew(var, global)) {
+ return false;
+ }
+
+ AsmJSGlobal g(AsmJSGlobal::Variable, std::move(fieldChars));
+ g.pod.u.var.initKind_ = AsmJSGlobal::InitImport;
+ g.pod.u.var.u.importValType_ = valType.packed();
+ return asmJSMetadata_->asmJSGlobals.append(std::move(g));
+ }
+ bool addArrayView(const ParserName* var, Scalar::Type vt,
+ const ParserName* maybeField) {
+ UniqueChars fieldChars;
+ if (maybeField) {
+ fieldChars = ParserAtomToNewUTF8CharsZ(cx_, maybeField);
+ if (!fieldChars) {
+ return false;
+ }
+ }
+
+ if (!arrayViews_.append(ArrayView(var, vt))) {
+ return false;
+ }
+
+ Global* global = validationLifo_.new_<Global>(Global::ArrayView);
+ if (!global) {
+ return false;
+ }
+ new (&global->u.viewType_) Scalar::Type(vt);
+ if (!globalMap_.putNew(var, global)) {
+ return false;
+ }
+
+ AsmJSGlobal g(AsmJSGlobal::ArrayView, std::move(fieldChars));
+ g.pod.u.viewType_ = vt;
+ return asmJSMetadata_->asmJSGlobals.append(std::move(g));
+ }
+ bool addMathBuiltinFunction(const ParserName* var,
+ AsmJSMathBuiltinFunction func,
+ const ParserName* field) {
+ UniqueChars fieldChars = ParserAtomToNewUTF8CharsZ(cx_, field);
+ if (!fieldChars) {
+ return false;
+ }
+
+ Global* global = validationLifo_.new_<Global>(Global::MathBuiltinFunction);
+ if (!global) {
+ return false;
+ }
+ new (&global->u.mathBuiltinFunc_) AsmJSMathBuiltinFunction(func);
+ if (!globalMap_.putNew(var, global)) {
+ return false;
+ }
+
+ AsmJSGlobal g(AsmJSGlobal::MathBuiltinFunction, std::move(fieldChars));
+ g.pod.u.mathBuiltinFunc_ = func;
+ return asmJSMetadata_->asmJSGlobals.append(std::move(g));
+ }
+
+ private:
+ bool addGlobalDoubleConstant(const ParserName* var, double constant) {
+ Global* global = validationLifo_.new_<Global>(Global::ConstantLiteral);
+ if (!global) {
+ return false;
+ }
+ new (&global->u.varOrConst) Global::U::VarOrConst(constant);
+ return globalMap_.putNew(var, global);
+ }
+
+ public:
+ bool addMathBuiltinConstant(const ParserName* var, double constant,
+ const ParserName* field) {
+ UniqueChars fieldChars = ParserAtomToNewUTF8CharsZ(cx_, field);
+ if (!fieldChars) {
+ return false;
+ }
+
+ if (!addGlobalDoubleConstant(var, constant)) {
+ return false;
+ }
+
+ AsmJSGlobal g(AsmJSGlobal::Constant, std::move(fieldChars));
+ g.pod.u.constant.value_ = constant;
+ g.pod.u.constant.kind_ = AsmJSGlobal::MathConstant;
+ return asmJSMetadata_->asmJSGlobals.append(std::move(g));
+ }
+ bool addGlobalConstant(const ParserName* var, double constant,
+ const ParserName* field) {
+ UniqueChars fieldChars = ParserAtomToNewUTF8CharsZ(cx_, field);
+ if (!fieldChars) {
+ return false;
+ }
+
+ if (!addGlobalDoubleConstant(var, constant)) {
+ return false;
+ }
+
+ AsmJSGlobal g(AsmJSGlobal::Constant, std::move(fieldChars));
+ g.pod.u.constant.value_ = constant;
+ g.pod.u.constant.kind_ = AsmJSGlobal::GlobalConstant;
+ return asmJSMetadata_->asmJSGlobals.append(std::move(g));
+ }
+ bool addArrayViewCtor(const ParserName* var, Scalar::Type vt,
+ const ParserName* field) {
+ UniqueChars fieldChars = ParserAtomToNewUTF8CharsZ(cx_, field);
+ if (!fieldChars) {
+ return false;
+ }
+
+ Global* global = validationLifo_.new_<Global>(Global::ArrayViewCtor);
+ if (!global) {
+ return false;
+ }
+ new (&global->u.viewType_) Scalar::Type(vt);
+ if (!globalMap_.putNew(var, global)) {
+ return false;
+ }
+
+ AsmJSGlobal g(AsmJSGlobal::ArrayViewCtor, std::move(fieldChars));
+ g.pod.u.viewType_ = vt;
+ return asmJSMetadata_->asmJSGlobals.append(std::move(g));
+ }
+ bool addFFI(const ParserName* var, const ParserName* field) {
+ UniqueChars fieldChars = ParserAtomToNewUTF8CharsZ(cx_, field);
+ if (!fieldChars) {
+ return false;
+ }
+
+ if (asmJSMetadata_->numFFIs == UINT32_MAX) {
+ return false;
+ }
+ uint32_t ffiIndex = asmJSMetadata_->numFFIs++;
+
+ Global* global = validationLifo_.new_<Global>(Global::FFI);
+ if (!global) {
+ return false;
+ }
+ new (&global->u.ffiIndex_) uint32_t(ffiIndex);
+ if (!globalMap_.putNew(var, global)) {
+ return false;
+ }
+
+ AsmJSGlobal g(AsmJSGlobal::FFI, std::move(fieldChars));
+ g.pod.u.ffiIndex_ = ffiIndex;
+ return asmJSMetadata_->asmJSGlobals.append(std::move(g));
+ }
+ bool addExportField(const Func& func, const ParserName* maybeField) {
+ // Record the field name of this export.
+ CacheableChars fieldChars;
+ if (maybeField) {
+ fieldChars = ParserAtomToNewUTF8CharsZ(cx_, maybeField);
+ } else {
+ fieldChars = DuplicateString("");
+ }
+ if (!fieldChars) {
+ return false;
+ }
+
+ // Declare which function is exported which gives us an index into the
+ // module ExportVector.
+ uint32_t funcIndex = funcImportMap_.count() + func.funcDefIndex();
+ if (!moduleEnv_.exports.emplaceBack(std::move(fieldChars), funcIndex,
+ DefinitionKind::Function)) {
+ return false;
+ }
+
+ // The exported function might have already been exported in which case
+ // the index will refer into the range of AsmJSExports.
+ return asmJSMetadata_->asmJSExports.emplaceBack(
+ funcIndex, func.srcBegin() - asmJSMetadata_->srcStart,
+ func.srcEnd() - asmJSMetadata_->srcStart);
+ }
+
+ bool defineFuncPtrTable(uint32_t tableIndex, Uint32Vector&& elems) {
+ Table& table = *tables_[tableIndex];
+ if (table.defined()) {
+ return false;
+ }
+
+ table.define();
+
+ for (uint32_t& index : elems) {
+ index += funcImportMap_.count();
+ }
+
+ MutableElemSegment seg = js_new<ElemSegment>();
+ if (!seg) {
+ return false;
+ }
+ seg->elemType = RefType::func();
+ seg->tableIndex = tableIndex;
+ seg->offsetIfActive = Some(InitExpr::fromConstant(LitVal(uint32_t(0))));
+ seg->elemFuncIndices = std::move(elems);
+ return moduleEnv_.elemSegments.append(std::move(seg));
+ }
+
+ bool tryConstantAccess(uint64_t start, uint64_t width) {
+ MOZ_ASSERT(UINT64_MAX - start > width);
+ uint64_t len = start + width;
+ if (len > uint64_t(INT32_MAX) + 1) {
+ return false;
+ }
+ len = RoundUpToNextValidAsmJSHeapLength(len);
+ if (len > moduleEnv_.minMemoryLength) {
+ moduleEnv_.minMemoryLength = len;
+ }
+ return true;
+ }
+
+ // Error handling.
+ bool hasAlreadyFailed() const { return !!errorString_; }
+
+ bool failOffset(uint32_t offset, const char* str) {
+ MOZ_ASSERT(!hasAlreadyFailed());
+ MOZ_ASSERT(errorOffset_ == UINT32_MAX);
+ MOZ_ASSERT(str);
+ errorOffset_ = offset;
+ errorString_ = DuplicateString(str);
+ return false;
+ }
+
+ bool fail(ParseNode* pn, const char* str) {
+ return failOffset(pn->pn_pos.begin, str);
+ }
+
+ bool failfVAOffset(uint32_t offset, const char* fmt, va_list ap)
+ MOZ_FORMAT_PRINTF(3, 0) {
+ MOZ_ASSERT(!hasAlreadyFailed());
+ MOZ_ASSERT(errorOffset_ == UINT32_MAX);
+ MOZ_ASSERT(fmt);
+ errorOffset_ = offset;
+ errorString_ = JS_vsmprintf(fmt, ap);
+ return false;
+ }
+
+ bool failfOffset(uint32_t offset, const char* fmt, ...)
+ MOZ_FORMAT_PRINTF(3, 4) {
+ va_list ap;
+ va_start(ap, fmt);
+ failfVAOffset(offset, fmt, ap);
+ va_end(ap);
+ return false;
+ }
+
+ bool failf(ParseNode* pn, const char* fmt, ...) MOZ_FORMAT_PRINTF(3, 4) {
+ va_list ap;
+ va_start(ap, fmt);
+ failfVAOffset(pn->pn_pos.begin, fmt, ap);
+ va_end(ap);
+ return false;
+ }
+
+ bool failNameOffset(uint32_t offset, const char* fmt,
+ const ParserName* name) {
+ // This function is invoked without the caller properly rooting its locals.
+ gc::AutoSuppressGC suppress(cx_);
+ if (UniqueChars bytes = ParserAtomToPrintableString(cx_, name)) {
+ failfOffset(offset, fmt, bytes.get());
+ }
+ return false;
+ }
+
+ bool failName(ParseNode* pn, const char* fmt, const ParserName* name) {
+ return failNameOffset(pn->pn_pos.begin, fmt, name);
+ }
+
+ bool failOverRecursed() {
+ errorOverRecursed_ = true;
+ return false;
+ }
+
+ unsigned numArrayViews() const { return arrayViews_.length(); }
+ const ArrayView& arrayView(unsigned i) const { return arrayViews_[i]; }
+ unsigned numFuncDefs() const { return funcDefs_.length(); }
+ const Func& funcDef(unsigned i) const { return funcDefs_[i]; }
+ unsigned numFuncPtrTables() const { return tables_.length(); }
+ Table& table(unsigned i) const { return *tables_[i]; }
+
+ const Global* lookupGlobal(const ParserName* name) const {
+ if (GlobalMap::Ptr p = globalMap_.lookup(name)) {
+ return p->value();
+ }
+ return nullptr;
+ }
+
+ Func* lookupFuncDef(const ParserName* name) {
+ if (GlobalMap::Ptr p = globalMap_.lookup(name)) {
+ Global* value = p->value();
+ if (value->which() == Global::Function) {
+ return &funcDefs_[value->funcDefIndex()];
+ }
+ }
+ return nullptr;
+ }
+
+ bool lookupStandardLibraryMathName(const ParserName* name,
+ MathBuiltin* mathBuiltin) const {
+ if (MathNameMap::Ptr p = standardLibraryMathNames_.lookup(name)) {
+ *mathBuiltin = p->value();
+ return true;
+ }
+ return false;
+ }
+
+ bool startFunctionBodies() {
+ if (!arrayViews_.empty()) {
+ moduleEnv_.memoryUsage = MemoryUsage::Unshared;
+ } else {
+ moduleEnv_.memoryUsage = MemoryUsage::None;
+ }
+ return true;
+ }
+};
+
+// The ModuleValidator encapsulates the entire validation of an asm.js module.
+// Its lifetime goes from the validation of the top components of an asm.js
+// module (all the globals), the emission of bytecode for all the functions in
+// the module and the validation of function's pointer tables. It also finishes
+// the compilation of all the module's stubs.
+template <typename Unit>
+class MOZ_STACK_CLASS ModuleValidator : public ModuleValidatorShared {
+ private:
+ AsmJSParser<Unit>& parser_;
+
+ public:
+ ModuleValidator(JSContext* cx, ParserAtomsTable& parserAtoms,
+ AsmJSParser<Unit>& parser, FunctionNode* moduleFunctionNode)
+ : ModuleValidatorShared(cx, parserAtoms, moduleFunctionNode),
+ parser_(parser) {}
+
+ ~ModuleValidator() {
+ if (errorString_) {
+ MOZ_ASSERT(errorOffset_ != UINT32_MAX);
+ typeFailure(errorOffset_, errorString_.get());
+ }
+ if (errorOverRecursed_) {
+ ReportOverRecursed(cx_);
+ }
+ }
+
+ private:
+ // Helpers:
+ bool newSig(FuncType&& sig, uint32_t* sigIndex) {
+ if (moduleEnv_.types.length() >= MaxTypes) {
+ return failCurrentOffset("too many signatures");
+ }
+
+ *sigIndex = moduleEnv_.types.length();
+ return moduleEnv_.types.append(std::move(sig)) &&
+ moduleEnv_.typeIds.append(TypeIdDesc());
+ }
+ bool declareSig(FuncType&& sig, uint32_t* sigIndex) {
+ SigSet::AddPtr p = sigSet_.lookupForAdd(sig);
+ if (p) {
+ *sigIndex = p->sigIndex();
+ MOZ_ASSERT(moduleEnv_.types.funcType(*sigIndex) == sig);
+ return true;
+ }
+
+ return newSig(std::move(sig), sigIndex) &&
+ sigSet_.add(p, HashableSig(*sigIndex, moduleEnv_.types));
+ }
+
+ private:
+ void typeFailure(uint32_t offset, ...) {
+ va_list args;
+ va_start(args, offset);
+
+ auto& ts = tokenStream();
+ ErrorMetadata metadata;
+ if (ts.computeErrorMetadata(&metadata, AsVariant(offset))) {
+ if (ts.anyCharsAccess().options().throwOnAsmJSValidationFailureOption) {
+ ReportCompileErrorLatin1(cx_, std::move(metadata), nullptr,
+ JSMSG_USE_ASM_TYPE_FAIL, &args);
+ } else {
+ // asm.js type failure is indicated by calling one of the fail*
+ // functions below. These functions always return false to
+ // halt asm.js parsing. Whether normal parsing is attempted as
+ // fallback, depends whether an exception is also set.
+ //
+ // If warning succeeds, no exception is set. If warning fails,
+ // an exception is set and execution will halt. Thus it's safe
+ // and correct to ignore the return value here.
+ Unused << ts.compileWarning(std::move(metadata), nullptr,
+ JSMSG_USE_ASM_TYPE_FAIL, &args);
+ }
+ }
+
+ va_end(args);
+ }
+
+ public:
+ bool init() {
+ asmJSMetadata_ = cx_->new_<AsmJSMetadata>();
+ if (!asmJSMetadata_) {
+ return false;
+ }
+
+ asmJSMetadata_->toStringStart =
+ moduleFunctionNode_->funbox()->extent().toStringStart;
+ asmJSMetadata_->srcStart = moduleFunctionNode_->body()->pn_pos.begin;
+ asmJSMetadata_->strict = parser_.pc_->sc()->strict() &&
+ !parser_.pc_->sc()->hasExplicitUseStrict();
+ asmJSMetadata_->scriptSource.reset(parser_.ss);
+
+ if (!addStandardLibraryMathInfo()) {
+ return false;
+ }
+
+ return true;
+ }
+
+ AsmJSParser<Unit>& parser() const { return parser_; }
+
+ auto& tokenStream() const { return parser_.tokenStream; }
+
+ public:
+ bool addFuncDef(const ParserName* name, uint32_t firstUse, FuncType&& sig,
+ Func** func) {
+ uint32_t sigIndex;
+ if (!declareSig(std::move(sig), &sigIndex)) {
+ return false;
+ }
+
+ uint32_t funcDefIndex = funcDefs_.length();
+ if (funcDefIndex >= MaxFuncs) {
+ return failCurrentOffset("too many functions");
+ }
+
+ Global* global = validationLifo_.new_<Global>(Global::Function);
+ if (!global) {
+ return false;
+ }
+ new (&global->u.funcDefIndex_) uint32_t(funcDefIndex);
+ if (!globalMap_.putNew(name, global)) {
+ return false;
+ }
+ if (!funcDefs_.emplaceBack(name, sigIndex, firstUse, funcDefIndex)) {
+ return false;
+ }
+ *func = &funcDefs_.back();
+ return true;
+ }
+ bool declareFuncPtrTable(FuncType&& sig, const ParserName* name,
+ uint32_t firstUse, uint32_t mask,
+ uint32_t* tableIndex) {
+ if (mask > MaxTableLength) {
+ return failCurrentOffset("function pointer table too big");
+ }
+
+ MOZ_ASSERT(moduleEnv_.tables.length() == tables_.length());
+ *tableIndex = moduleEnv_.tables.length();
+
+ uint32_t sigIndex;
+ if (!newSig(std::move(sig), &sigIndex)) {
+ return false;
+ }
+
+ MOZ_ASSERT(sigIndex >= moduleEnv_.asmJSSigToTableIndex.length());
+ if (!moduleEnv_.asmJSSigToTableIndex.resize(sigIndex + 1)) {
+ return false;
+ }
+
+ moduleEnv_.asmJSSigToTableIndex[sigIndex] = moduleEnv_.tables.length();
+ if (!moduleEnv_.tables.emplaceBack(RefType::func(), mask + 1, Nothing(),
+ /*isAsmJS*/ true)) {
+ return false;
+ }
+
+ Global* global = validationLifo_.new_<Global>(Global::Table);
+ if (!global) {
+ return false;
+ }
+
+ new (&global->u.tableIndex_) uint32_t(*tableIndex);
+ if (!globalMap_.putNew(name, global)) {
+ return false;
+ }
+
+ Table* t = validationLifo_.new_<Table>(sigIndex, name, firstUse, mask);
+ return t && tables_.append(t);
+ }
+ bool declareImport(const ParserName* name, FuncType&& sig, unsigned ffiIndex,
+ uint32_t* importIndex) {
+ FuncImportMap::AddPtr p =
+ funcImportMap_.lookupForAdd(NamedSig::Lookup(name, sig));
+ if (p) {
+ *importIndex = p->value();
+ return true;
+ }
+
+ *importIndex = funcImportMap_.count();
+ MOZ_ASSERT(*importIndex == asmJSMetadata_->asmJSImports.length());
+
+ if (*importIndex >= MaxImports) {
+ return failCurrentOffset("too many imports");
+ }
+
+ if (!asmJSMetadata_->asmJSImports.emplaceBack(ffiIndex)) {
+ return false;
+ }
+
+ uint32_t sigIndex;
+ if (!declareSig(std::move(sig), &sigIndex)) {
+ return false;
+ }
+
+ return funcImportMap_.add(p, NamedSig(name, sigIndex, moduleEnv_.types),
+ *importIndex);
+ }
+
+ // Error handling.
+ bool failCurrentOffset(const char* str) {
+ return failOffset(tokenStream().anyCharsAccess().currentToken().pos.begin,
+ str);
+ }
+
+ SharedModule finish() {
+ MOZ_ASSERT(moduleEnv_.funcs.empty());
+ if (!moduleEnv_.funcs.resize(funcImportMap_.count() + funcDefs_.length())) {
+ return nullptr;
+ }
+ for (FuncImportMap::Range r = funcImportMap_.all(); !r.empty();
+ r.popFront()) {
+ uint32_t funcIndex = r.front().value();
+ uint32_t funcTypeIndex = r.front().key().sigIndex();
+ MOZ_ASSERT(!moduleEnv_.funcs[funcIndex].type);
+ moduleEnv_.funcs[funcIndex] =
+ FuncDesc(&moduleEnv_.types.funcType(funcTypeIndex),
+ &moduleEnv_.typeIds[funcTypeIndex], funcTypeIndex);
+ }
+ for (const Func& func : funcDefs_) {
+ uint32_t funcIndex = funcImportMap_.count() + func.funcDefIndex();
+ uint32_t funcTypeIndex = func.sigIndex();
+ MOZ_ASSERT(!moduleEnv_.funcs[funcIndex].type);
+ moduleEnv_.funcs[funcIndex] =
+ FuncDesc(&moduleEnv_.types.funcType(funcTypeIndex),
+ &moduleEnv_.typeIds[funcTypeIndex], funcTypeIndex);
+ }
+
+ if (!moduleEnv_.funcImportGlobalDataOffsets.resize(
+ funcImportMap_.count())) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(asmJSMetadata_->asmJSFuncNames.empty());
+ if (!asmJSMetadata_->asmJSFuncNames.resize(funcImportMap_.count())) {
+ return nullptr;
+ }
+ for (const Func& func : funcDefs_) {
+ CacheableChars funcName = ParserAtomToNewUTF8CharsZ(cx_, func.name());
+ if (!funcName ||
+ !asmJSMetadata_->asmJSFuncNames.emplaceBack(std::move(funcName))) {
+ return nullptr;
+ }
+ }
+
+ uint32_t endBeforeCurly =
+ tokenStream().anyCharsAccess().currentToken().pos.end;
+ asmJSMetadata_->srcLength = endBeforeCurly - asmJSMetadata_->srcStart;
+
+ TokenPos pos;
+ MOZ_ALWAYS_TRUE(
+ tokenStream().peekTokenPos(&pos, TokenStreamShared::SlashIsRegExp));
+ uint32_t endAfterCurly = pos.end;
+ asmJSMetadata_->srcLengthWithRightBrace =
+ endAfterCurly - asmJSMetadata_->srcStart;
+
+ ScriptedCaller scriptedCaller;
+ if (parser_.ss->filename()) {
+ scriptedCaller.line = 0; // unused
+ scriptedCaller.filename = DuplicateString(parser_.ss->filename());
+ if (!scriptedCaller.filename) {
+ return nullptr;
+ }
+ }
+
+ SharedCompileArgs args = CompileArgs::build(cx_, std::move(scriptedCaller));
+ if (!args) {
+ return nullptr;
+ }
+
+ uint32_t codeSectionSize = 0;
+ for (const Func& func : funcDefs_) {
+ codeSectionSize += func.bytes().length();
+ }
+
+ moduleEnv_.codeSection.emplace();
+ moduleEnv_.codeSection->start = 0;
+ moduleEnv_.codeSection->size = codeSectionSize;
+
+ // asm.js does not have any wasm bytecode to save; view-source is
+ // provided through the ScriptSource.
+ SharedBytes bytes = cx_->new_<ShareableBytes>();
+ if (!bytes) {
+ return nullptr;
+ }
+
+ ModuleGenerator mg(*args, &moduleEnv_, &compilerEnv_, nullptr, nullptr);
+ if (!mg.init(asmJSMetadata_.get())) {
+ return nullptr;
+ }
+
+ for (Func& func : funcDefs_) {
+ if (!mg.compileFuncDef(funcImportMap_.count() + func.funcDefIndex(),
+ func.line(), func.bytes().begin(),
+ func.bytes().end(),
+ std::move(func.callSiteLineNums()))) {
+ return nullptr;
+ }
+ }
+
+ if (!mg.finishFuncDefs()) {
+ return nullptr;
+ }
+
+ return mg.finishModule(*bytes);
+ }
+};
+
+/*****************************************************************************/
+// Numeric literal utilities
+
+static bool IsNumericNonFloatLiteral(ParseNode* pn) {
+ // Note: '-' is never rolled into the number; numbers are always positive
+ // and negations must be applied manually.
+ return pn->isKind(ParseNodeKind::NumberExpr) ||
+ (pn->isKind(ParseNodeKind::NegExpr) &&
+ UnaryKid(pn)->isKind(ParseNodeKind::NumberExpr));
+}
+
+static bool IsCallToGlobal(ModuleValidatorShared& m, ParseNode* pn,
+ const ModuleValidatorShared::Global** global) {
+ if (!pn->isKind(ParseNodeKind::CallExpr)) {
+ return false;
+ }
+
+ ParseNode* callee = CallCallee(pn);
+ if (!callee->isKind(ParseNodeKind::Name)) {
+ return false;
+ }
+
+ *global = m.lookupGlobal(callee->as<NameNode>().name());
+ return !!*global;
+}
+
+static bool IsCoercionCall(ModuleValidatorShared& m, ParseNode* pn,
+ Type* coerceTo, ParseNode** coercedExpr) {
+ const ModuleValidatorShared::Global* global;
+ if (!IsCallToGlobal(m, pn, &global)) {
+ return false;
+ }
+
+ if (CallArgListLength(pn) != 1) {
+ return false;
+ }
+
+ if (coercedExpr) {
+ *coercedExpr = CallArgList(pn);
+ }
+
+ if (global->isMathFunction() &&
+ global->mathBuiltinFunction() == AsmJSMathBuiltin_fround) {
+ *coerceTo = Type::Float;
+ return true;
+ }
+
+ return false;
+}
+
+static bool IsFloatLiteral(ModuleValidatorShared& m, ParseNode* pn) {
+ ParseNode* coercedExpr;
+ Type coerceTo;
+ if (!IsCoercionCall(m, pn, &coerceTo, &coercedExpr)) {
+ return false;
+ }
+ // Don't fold into || to avoid clang/memcheck bug (bug 1077031).
+ if (!coerceTo.isFloat()) {
+ return false;
+ }
+ return IsNumericNonFloatLiteral(coercedExpr);
+}
+
+static bool IsNumericLiteral(ModuleValidatorShared& m, ParseNode* pn) {
+ return IsNumericNonFloatLiteral(pn) || IsFloatLiteral(m, pn);
+}
+
+// The JS grammar treats -42 as -(42) (i.e., with separate grammar
+// productions) for the unary - and literal 42). However, the asm.js spec
+// recognizes -42 (modulo parens, so -(42) and -((42))) as a single literal
+// so fold the two potential parse nodes into a single double value.
+static double ExtractNumericNonFloatValue(ParseNode* pn,
+ ParseNode** out = nullptr) {
+ MOZ_ASSERT(IsNumericNonFloatLiteral(pn));
+
+ if (pn->isKind(ParseNodeKind::NegExpr)) {
+ pn = UnaryKid(pn);
+ if (out) {
+ *out = pn;
+ }
+ return -NumberNodeValue(pn);
+ }
+
+ return NumberNodeValue(pn);
+}
+
+static NumLit ExtractNumericLiteral(ModuleValidatorShared& m, ParseNode* pn) {
+ MOZ_ASSERT(IsNumericLiteral(m, pn));
+
+ if (pn->isKind(ParseNodeKind::CallExpr)) {
+ // Float literals are explicitly coerced and thus the coerced literal may be
+ // any valid (non-float) numeric literal.
+ MOZ_ASSERT(CallArgListLength(pn) == 1);
+ pn = CallArgList(pn);
+ double d = ExtractNumericNonFloatValue(pn);
+ return NumLit(NumLit::Float, DoubleValue(d));
+ }
+
+ double d = ExtractNumericNonFloatValue(pn, &pn);
+
+ // The asm.js spec syntactically distinguishes any literal containing a
+ // decimal point or the literal -0 as having double type.
+ if (NumberNodeHasFrac(pn) || IsNegativeZero(d)) {
+ return NumLit(NumLit::Double, DoubleValue(d));
+ }
+
+ // The syntactic checks above rule out these double values.
+ MOZ_ASSERT(!IsNegativeZero(d));
+ MOZ_ASSERT(!IsNaN(d));
+
+ // Although doubles can only *precisely* represent 53-bit integers, they
+ // can *imprecisely* represent integers much bigger than an int64_t.
+ // Furthermore, d may be inf or -inf. In both cases, casting to an int64_t
+ // is undefined, so test against the integer bounds using doubles.
+ if (d < double(INT32_MIN) || d > double(UINT32_MAX)) {
+ return NumLit(NumLit::OutOfRangeInt, UndefinedValue());
+ }
+
+ // With the above syntactic and range limitations, d is definitely an
+ // integer in the range [INT32_MIN, UINT32_MAX] range.
+ int64_t i64 = int64_t(d);
+ if (i64 >= 0) {
+ if (i64 <= INT32_MAX) {
+ return NumLit(NumLit::Fixnum, Int32Value(i64));
+ }
+ MOZ_ASSERT(i64 <= UINT32_MAX);
+ return NumLit(NumLit::BigUnsigned, Int32Value(uint32_t(i64)));
+ }
+ MOZ_ASSERT(i64 >= INT32_MIN);
+ return NumLit(NumLit::NegativeInt, Int32Value(i64));
+}
+
+static inline bool IsLiteralInt(const NumLit& lit, uint32_t* u32) {
+ switch (lit.which()) {
+ case NumLit::Fixnum:
+ case NumLit::BigUnsigned:
+ case NumLit::NegativeInt:
+ *u32 = lit.toUint32();
+ return true;
+ case NumLit::Double:
+ case NumLit::Float:
+ case NumLit::OutOfRangeInt:
+ return false;
+ }
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Bad literal type");
+}
+
+static inline bool IsLiteralInt(ModuleValidatorShared& m, ParseNode* pn,
+ uint32_t* u32) {
+ return IsNumericLiteral(m, pn) &&
+ IsLiteralInt(ExtractNumericLiteral(m, pn), u32);
+}
+
+/*****************************************************************************/
+
+namespace {
+
+typedef Vector<const ParserName*, 4, SystemAllocPolicy> LabelVector;
+
+class MOZ_STACK_CLASS FunctionValidatorShared {
+ public:
+ struct Local {
+ Type type;
+ unsigned slot;
+ Local(Type t, unsigned slot) : type(t), slot(slot) {
+ MOZ_ASSERT(type.isCanonicalValType());
+ }
+ };
+
+ protected:
+ using LocalMap = HashMap<const ParserName*, Local>;
+ using LabelMap = HashMap<const ParserName*, uint32_t>;
+
+ // This is also a ModuleValidator<Unit>& after the appropriate static_cast<>.
+ ModuleValidatorShared& m_;
+
+ ParseNode* fn_;
+ Bytes bytes_;
+ Encoder encoder_;
+ Uint32Vector callSiteLineNums_;
+ LocalMap locals_;
+
+ // Labels
+ LabelMap breakLabels_;
+ LabelMap continueLabels_;
+ Uint32Vector breakableStack_;
+ Uint32Vector continuableStack_;
+ uint32_t blockDepth_;
+
+ bool hasAlreadyReturned_;
+ Maybe<ValType> ret_;
+
+ private:
+ FunctionValidatorShared(ModuleValidatorShared& m, ParseNode* fn,
+ JSContext* cx)
+ : m_(m),
+ fn_(fn),
+ encoder_(bytes_),
+ locals_(cx),
+ breakLabels_(cx),
+ continueLabels_(cx),
+ blockDepth_(0),
+ hasAlreadyReturned_(false) {}
+
+ protected:
+ template <typename Unit>
+ FunctionValidatorShared(ModuleValidator<Unit>& m, ParseNode* fn,
+ JSContext* cx)
+ : FunctionValidatorShared(static_cast<ModuleValidatorShared&>(m), fn,
+ cx) {}
+
+ public:
+ ModuleValidatorShared& m() const { return m_; }
+
+ JSContext* cx() const { return m_.cx(); }
+ ParseNode* fn() const { return fn_; }
+
+ void define(ModuleValidatorShared::Func* func, unsigned line) {
+ MOZ_ASSERT(!blockDepth_);
+ MOZ_ASSERT(breakableStack_.empty());
+ MOZ_ASSERT(continuableStack_.empty());
+ MOZ_ASSERT(breakLabels_.empty());
+ MOZ_ASSERT(continueLabels_.empty());
+ func->define(fn_, line, std::move(bytes_), std::move(callSiteLineNums_));
+ }
+
+ bool fail(ParseNode* pn, const char* str) { return m_.fail(pn, str); }
+
+ bool failf(ParseNode* pn, const char* fmt, ...) MOZ_FORMAT_PRINTF(3, 4) {
+ va_list ap;
+ va_start(ap, fmt);
+ m_.failfVAOffset(pn->pn_pos.begin, fmt, ap);
+ va_end(ap);
+ return false;
+ }
+
+ bool failName(ParseNode* pn, const char* fmt, const ParserName* name) {
+ return m_.failName(pn, fmt, name);
+ }
+
+ /***************************************************** Local scope setup */
+
+ bool addLocal(ParseNode* pn, const ParserName* name, Type type) {
+ LocalMap::AddPtr p = locals_.lookupForAdd(name);
+ if (p) {
+ return failName(pn, "duplicate local name '%s' not allowed", name);
+ }
+ return locals_.add(p, name, Local(type, locals_.count()));
+ }
+
+ /****************************** For consistency of returns in a function */
+
+ bool hasAlreadyReturned() const { return hasAlreadyReturned_; }
+
+ Maybe<ValType> returnedType() const { return ret_; }
+
+ void setReturnedType(const Maybe<ValType>& ret) {
+ MOZ_ASSERT(!hasAlreadyReturned_);
+ ret_ = ret;
+ hasAlreadyReturned_ = true;
+ }
+
+ /**************************************************************** Labels */
+ private:
+ bool writeBr(uint32_t absolute, Op op = Op::Br) {
+ MOZ_ASSERT(op == Op::Br || op == Op::BrIf);
+ MOZ_ASSERT(absolute < blockDepth_);
+ return encoder().writeOp(op) &&
+ encoder().writeVarU32(blockDepth_ - 1 - absolute);
+ }
+ void removeLabel(const ParserName* label, LabelMap* map) {
+ LabelMap::Ptr p = map->lookup(label);
+ MOZ_ASSERT(p);
+ map->remove(p);
+ }
+
+ public:
+ bool pushBreakableBlock() {
+ return encoder().writeOp(Op::Block) &&
+ encoder().writeFixedU8(uint8_t(TypeCode::BlockVoid)) &&
+ breakableStack_.append(blockDepth_++);
+ }
+ bool popBreakableBlock() {
+ MOZ_ALWAYS_TRUE(breakableStack_.popCopy() == --blockDepth_);
+ return encoder().writeOp(Op::End);
+ }
+
+ bool pushUnbreakableBlock(const LabelVector* labels = nullptr) {
+ if (labels) {
+ for (const ParserName* label : *labels) {
+ if (!breakLabels_.putNew(label, blockDepth_)) {
+ return false;
+ }
+ }
+ }
+ blockDepth_++;
+ return encoder().writeOp(Op::Block) &&
+ encoder().writeFixedU8(uint8_t(TypeCode::BlockVoid));
+ }
+ bool popUnbreakableBlock(const LabelVector* labels = nullptr) {
+ if (labels) {
+ for (const ParserName* label : *labels) {
+ removeLabel(label, &breakLabels_);
+ }
+ }
+ --blockDepth_;
+ return encoder().writeOp(Op::End);
+ }
+
+ bool pushContinuableBlock() {
+ return encoder().writeOp(Op::Block) &&
+ encoder().writeFixedU8(uint8_t(TypeCode::BlockVoid)) &&
+ continuableStack_.append(blockDepth_++);
+ }
+ bool popContinuableBlock() {
+ MOZ_ALWAYS_TRUE(continuableStack_.popCopy() == --blockDepth_);
+ return encoder().writeOp(Op::End);
+ }
+
+ bool pushLoop() {
+ return encoder().writeOp(Op::Block) &&
+ encoder().writeFixedU8(uint8_t(TypeCode::BlockVoid)) &&
+ encoder().writeOp(Op::Loop) &&
+ encoder().writeFixedU8(uint8_t(TypeCode::BlockVoid)) &&
+ breakableStack_.append(blockDepth_++) &&
+ continuableStack_.append(blockDepth_++);
+ }
+ bool popLoop() {
+ MOZ_ALWAYS_TRUE(continuableStack_.popCopy() == --blockDepth_);
+ MOZ_ALWAYS_TRUE(breakableStack_.popCopy() == --blockDepth_);
+ return encoder().writeOp(Op::End) && encoder().writeOp(Op::End);
+ }
+
+ bool pushIf(size_t* typeAt) {
+ ++blockDepth_;
+ return encoder().writeOp(Op::If) && encoder().writePatchableFixedU7(typeAt);
+ }
+ bool switchToElse() {
+ MOZ_ASSERT(blockDepth_ > 0);
+ return encoder().writeOp(Op::Else);
+ }
+ void setIfType(size_t typeAt, TypeCode type) {
+ encoder().patchFixedU7(typeAt, uint8_t(type));
+ }
+ bool popIf() {
+ MOZ_ASSERT(blockDepth_ > 0);
+ --blockDepth_;
+ return encoder().writeOp(Op::End);
+ }
+ bool popIf(size_t typeAt, TypeCode type) {
+ MOZ_ASSERT(blockDepth_ > 0);
+ --blockDepth_;
+ if (!encoder().writeOp(Op::End)) {
+ return false;
+ }
+
+ setIfType(typeAt, type);
+ return true;
+ }
+
+ bool writeBreakIf() { return writeBr(breakableStack_.back(), Op::BrIf); }
+ bool writeContinueIf() { return writeBr(continuableStack_.back(), Op::BrIf); }
+ bool writeUnlabeledBreakOrContinue(bool isBreak) {
+ return writeBr(isBreak ? breakableStack_.back() : continuableStack_.back());
+ }
+ bool writeContinue() { return writeBr(continuableStack_.back()); }
+
+ bool addLabels(const LabelVector& labels, uint32_t relativeBreakDepth,
+ uint32_t relativeContinueDepth) {
+ for (const ParserName* label : labels) {
+ if (!breakLabels_.putNew(label, blockDepth_ + relativeBreakDepth)) {
+ return false;
+ }
+ if (!continueLabels_.putNew(label, blockDepth_ + relativeContinueDepth)) {
+ return false;
+ }
+ }
+ return true;
+ }
+ void removeLabels(const LabelVector& labels) {
+ for (const ParserName* label : labels) {
+ removeLabel(label, &breakLabels_);
+ removeLabel(label, &continueLabels_);
+ }
+ }
+ bool writeLabeledBreakOrContinue(const ParserName* label, bool isBreak) {
+ LabelMap& map = isBreak ? breakLabels_ : continueLabels_;
+ if (LabelMap::Ptr p = map.lookup(label)) {
+ return writeBr(p->value());
+ }
+ MOZ_CRASH("nonexistent label");
+ }
+
+ /*************************************************** Read-only interface */
+
+ const Local* lookupLocal(const ParserName* name) const {
+ if (auto p = locals_.lookup(name)) {
+ return &p->value();
+ }
+ return nullptr;
+ }
+
+ const ModuleValidatorShared::Global* lookupGlobal(
+ const ParserName* name) const {
+ if (locals_.has(name)) {
+ return nullptr;
+ }
+ return m_.lookupGlobal(name);
+ }
+
+ size_t numLocals() const { return locals_.count(); }
+
+ /**************************************************** Encoding interface */
+
+ Encoder& encoder() { return encoder_; }
+
+ [[nodiscard]] bool writeInt32Lit(int32_t i32) {
+ return encoder().writeOp(Op::I32Const) && encoder().writeVarS32(i32);
+ }
+ [[nodiscard]] bool writeConstExpr(const NumLit& lit) {
+ switch (lit.which()) {
+ case NumLit::Fixnum:
+ case NumLit::NegativeInt:
+ case NumLit::BigUnsigned:
+ return writeInt32Lit(lit.toInt32());
+ case NumLit::Float:
+ return encoder().writeOp(Op::F32Const) &&
+ encoder().writeFixedF32(lit.toFloat());
+ case NumLit::Double:
+ return encoder().writeOp(Op::F64Const) &&
+ encoder().writeFixedF64(lit.toDouble());
+ case NumLit::OutOfRangeInt:
+ break;
+ }
+ MOZ_CRASH("unexpected literal type");
+ }
+};
+
+// Encapsulates the building of an asm bytecode function from an asm.js function
+// source code, packing the asm.js code into the asm bytecode form that can
+// be decoded and compiled with a FunctionCompiler.
+template <typename Unit>
+class MOZ_STACK_CLASS FunctionValidator : public FunctionValidatorShared {
+ public:
+ FunctionValidator(ModuleValidator<Unit>& m, ParseNode* fn)
+ : FunctionValidatorShared(m, fn, m.cx()) {}
+
+ public:
+ ModuleValidator<Unit>& m() const {
+ return static_cast<ModuleValidator<Unit>&>(FunctionValidatorShared::m());
+ }
+
+ [[nodiscard]] bool writeCall(ParseNode* pn, Op op) {
+ if (!encoder().writeOp(op)) {
+ return false;
+ }
+
+ return appendCallSiteLineNumber(pn);
+ }
+ [[nodiscard]] bool writeCall(ParseNode* pn, MozOp op) {
+ if (!encoder().writeOp(op)) {
+ return false;
+ }
+
+ return appendCallSiteLineNumber(pn);
+ }
+ [[nodiscard]] bool prepareCall(ParseNode* pn) {
+ return appendCallSiteLineNumber(pn);
+ }
+
+ private:
+ [[nodiscard]] bool appendCallSiteLineNumber(ParseNode* node) {
+ const TokenStreamAnyChars& anyChars = m().tokenStream().anyCharsAccess();
+ auto lineToken = anyChars.lineToken(node->pn_pos.begin);
+ uint32_t lineNumber = anyChars.lineNumber(lineToken);
+ if (lineNumber > CallSiteDesc::MAX_LINE_OR_BYTECODE_VALUE) {
+ return fail(node, "line number exceeding implementation limits");
+ }
+ return callSiteLineNums_.append(lineNumber);
+ }
+};
+
+} /* anonymous namespace */
+
+/*****************************************************************************/
+// asm.js type-checking and code-generation algorithm
+
+static bool CheckIdentifier(ModuleValidatorShared& m, ParseNode* usepn,
+ const ParserName* name) {
+ if (name == m.cx()->parserNames().arguments ||
+ name == m.cx()->parserNames().eval) {
+ return m.failName(usepn, "'%s' is not an allowed identifier", name);
+ }
+ return true;
+}
+
+static bool CheckModuleLevelName(ModuleValidatorShared& m, ParseNode* usepn,
+ const ParserName* name) {
+ if (!CheckIdentifier(m, usepn, name)) {
+ return false;
+ }
+
+ if (name == m.moduleFunctionName() || name == m.globalArgumentName() ||
+ name == m.importArgumentName() || name == m.bufferArgumentName() ||
+ m.lookupGlobal(name)) {
+ return m.failName(usepn, "duplicate name '%s' not allowed", name);
+ }
+
+ return true;
+}
+
+static bool CheckFunctionHead(ModuleValidatorShared& m, FunctionNode* funNode) {
+ FunctionBox* funbox = funNode->funbox();
+ MOZ_ASSERT(!funbox->hasExprBody());
+
+ if (funbox->hasRest()) {
+ return m.fail(funNode, "rest args not allowed");
+ }
+ if (funbox->hasDestructuringArgs) {
+ return m.fail(funNode, "destructuring args not allowed");
+ }
+ return true;
+}
+
+static bool CheckArgument(ModuleValidatorShared& m, ParseNode* arg,
+ const ParserName** name) {
+ *name = nullptr;
+
+ if (!arg->isKind(ParseNodeKind::Name)) {
+ return m.fail(arg, "argument is not a plain name");
+ }
+
+ const ParserName* argName = arg->as<NameNode>().name();
+ if (!CheckIdentifier(m, arg, argName)) {
+ return false;
+ }
+
+ *name = argName;
+ return true;
+}
+
+static bool CheckModuleArgument(ModuleValidatorShared& m, ParseNode* arg,
+ const ParserName** name) {
+ if (!CheckArgument(m, arg, name)) {
+ return false;
+ }
+
+ if (!CheckModuleLevelName(m, arg, *name)) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool CheckModuleArguments(ModuleValidatorShared& m,
+ FunctionNode* funNode) {
+ unsigned numFormals;
+ ParseNode* arg1 = FunctionFormalParametersList(funNode, &numFormals);
+ ParseNode* arg2 = arg1 ? NextNode(arg1) : nullptr;
+ ParseNode* arg3 = arg2 ? NextNode(arg2) : nullptr;
+
+ if (numFormals > 3) {
+ return m.fail(funNode, "asm.js modules takes at most 3 argument");
+ }
+
+ const ParserName* arg1Name = nullptr;
+ if (arg1 && !CheckModuleArgument(m, arg1, &arg1Name)) {
+ return false;
+ }
+ if (!m.initGlobalArgumentName(arg1Name)) {
+ return false;
+ }
+
+ const ParserName* arg2Name = nullptr;
+ if (arg2 && !CheckModuleArgument(m, arg2, &arg2Name)) {
+ return false;
+ }
+ if (!m.initImportArgumentName(arg2Name)) {
+ return false;
+ }
+
+ const ParserName* arg3Name = nullptr;
+ if (arg3 && !CheckModuleArgument(m, arg3, &arg3Name)) {
+ return false;
+ }
+ if (!m.initBufferArgumentName(arg3Name)) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool CheckPrecedingStatements(ModuleValidatorShared& m,
+ ParseNode* stmtList) {
+ MOZ_ASSERT(stmtList->isKind(ParseNodeKind::StatementList));
+
+ ParseNode* stmt = ListHead(stmtList);
+ for (unsigned i = 0, n = ListLength(stmtList); i < n; i++) {
+ if (!IsIgnoredDirective(m.cx(), stmt)) {
+ return m.fail(stmt, "invalid asm.js statement");
+ }
+ }
+
+ return true;
+}
+
+static bool CheckGlobalVariableInitConstant(ModuleValidatorShared& m,
+ const ParserName* varName,
+ ParseNode* initNode, bool isConst) {
+ NumLit lit = ExtractNumericLiteral(m, initNode);
+ if (!lit.valid()) {
+ return m.fail(initNode,
+ "global initializer is out of representable integer range");
+ }
+
+ Type canonicalType = Type::canonicalize(Type::lit(lit));
+ if (!canonicalType.isGlobalVarType()) {
+ return m.fail(initNode, "global variable type not allowed");
+ }
+
+ return m.addGlobalVarInit(varName, lit, canonicalType, isConst);
+}
+
+static bool CheckTypeAnnotation(ModuleValidatorShared& m,
+ ParseNode* coercionNode, Type* coerceTo,
+ ParseNode** coercedExpr = nullptr) {
+ switch (coercionNode->getKind()) {
+ case ParseNodeKind::BitOrExpr: {
+ ParseNode* rhs = BitwiseRight(coercionNode);
+ uint32_t i;
+ if (!IsLiteralInt(m, rhs, &i) || i != 0) {
+ return m.fail(rhs, "must use |0 for argument/return coercion");
+ }
+ *coerceTo = Type::Int;
+ if (coercedExpr) {
+ *coercedExpr = BitwiseLeft(coercionNode);
+ }
+ return true;
+ }
+ case ParseNodeKind::PosExpr: {
+ *coerceTo = Type::Double;
+ if (coercedExpr) {
+ *coercedExpr = UnaryKid(coercionNode);
+ }
+ return true;
+ }
+ case ParseNodeKind::CallExpr: {
+ if (IsCoercionCall(m, coercionNode, coerceTo, coercedExpr)) {
+ return true;
+ }
+ break;
+ }
+ default:;
+ }
+
+ return m.fail(coercionNode, "must be of the form +x, x|0 or fround(x)");
+}
+
+static bool CheckGlobalVariableInitImport(ModuleValidatorShared& m,
+ const ParserName* varName,
+ ParseNode* initNode, bool isConst) {
+ Type coerceTo;
+ ParseNode* coercedExpr;
+ if (!CheckTypeAnnotation(m, initNode, &coerceTo, &coercedExpr)) {
+ return false;
+ }
+
+ if (!coercedExpr->isKind(ParseNodeKind::DotExpr)) {
+ return m.failName(coercedExpr, "invalid import expression for global '%s'",
+ varName);
+ }
+
+ if (!coerceTo.isGlobalVarType()) {
+ return m.fail(initNode, "global variable type not allowed");
+ }
+
+ ParseNode* base = DotBase(coercedExpr);
+ const ParserName* field = DotMember(coercedExpr);
+
+ const ParserName* importName = m.importArgumentName();
+ if (!importName) {
+ return m.fail(coercedExpr,
+ "cannot import without an asm.js foreign parameter");
+ }
+ if (!IsUseOfName(base, importName)) {
+ return m.failName(coercedExpr, "base of import expression must be '%s'",
+ importName);
+ }
+
+ return m.addGlobalVarImport(varName, field, coerceTo, isConst);
+}
+
+static bool IsArrayViewCtorName(ModuleValidatorShared& m,
+ const ParserName* name, Scalar::Type* type) {
+ js::frontend::WellKnownParserAtoms& names = m.cx()->parserNames();
+ if (name == names.Int8Array) {
+ *type = Scalar::Int8;
+ } else if (name == names.Uint8Array) {
+ *type = Scalar::Uint8;
+ } else if (name == names.Int16Array) {
+ *type = Scalar::Int16;
+ } else if (name == names.Uint16Array) {
+ *type = Scalar::Uint16;
+ } else if (name == names.Int32Array) {
+ *type = Scalar::Int32;
+ } else if (name == names.Uint32Array) {
+ *type = Scalar::Uint32;
+ } else if (name == names.Float32Array) {
+ *type = Scalar::Float32;
+ } else if (name == names.Float64Array) {
+ *type = Scalar::Float64;
+ } else {
+ return false;
+ }
+ return true;
+}
+
+static bool CheckNewArrayViewArgs(ModuleValidatorShared& m, ParseNode* newExpr,
+ const ParserName* bufferName) {
+ ParseNode* ctorExpr = BinaryLeft(newExpr);
+ ParseNode* ctorArgs = BinaryRight(newExpr);
+ ParseNode* bufArg = ListHead(ctorArgs);
+ if (!bufArg || NextNode(bufArg) != nullptr) {
+ return m.fail(ctorExpr,
+ "array view constructor takes exactly one argument");
+ }
+
+ if (!IsUseOfName(bufArg, bufferName)) {
+ return m.failName(bufArg, "argument to array view constructor must be '%s'",
+ bufferName);
+ }
+
+ return true;
+}
+
+static bool CheckNewArrayView(ModuleValidatorShared& m,
+ const ParserName* varName, ParseNode* newExpr) {
+ const ParserName* globalName = m.globalArgumentName();
+ if (!globalName) {
+ return m.fail(
+ newExpr, "cannot create array view without an asm.js global parameter");
+ }
+
+ const ParserName* bufferName = m.bufferArgumentName();
+ if (!bufferName) {
+ return m.fail(newExpr,
+ "cannot create array view without an asm.js heap parameter");
+ }
+
+ ParseNode* ctorExpr = BinaryLeft(newExpr);
+
+ const ParserName* field;
+ Scalar::Type type;
+ if (ctorExpr->isKind(ParseNodeKind::DotExpr)) {
+ ParseNode* base = DotBase(ctorExpr);
+
+ if (!IsUseOfName(base, globalName)) {
+ return m.failName(base, "expecting '%s.*Array", globalName);
+ }
+
+ field = DotMember(ctorExpr);
+ if (!IsArrayViewCtorName(m, field, &type)) {
+ return m.fail(ctorExpr, "could not match typed array name");
+ }
+ } else {
+ if (!ctorExpr->isKind(ParseNodeKind::Name)) {
+ return m.fail(ctorExpr,
+ "expecting name of imported array view constructor");
+ }
+
+ const ParserName* globalName = ctorExpr->as<NameNode>().name();
+ const ModuleValidatorShared::Global* global = m.lookupGlobal(globalName);
+ if (!global) {
+ return m.failName(ctorExpr, "%s not found in module global scope",
+ globalName);
+ }
+
+ if (global->which() != ModuleValidatorShared::Global::ArrayViewCtor) {
+ return m.failName(ctorExpr,
+ "%s must be an imported array view constructor",
+ globalName);
+ }
+
+ field = nullptr;
+ type = global->viewType();
+ }
+
+ if (!CheckNewArrayViewArgs(m, newExpr, bufferName)) {
+ return false;
+ }
+
+ return m.addArrayView(varName, type, field);
+}
+
+static bool CheckGlobalMathImport(ModuleValidatorShared& m, ParseNode* initNode,
+ const ParserName* varName,
+ const ParserName* field) {
+ // Math builtin, with the form glob.Math.[[builtin]]
+ ModuleValidatorShared::MathBuiltin mathBuiltin;
+ if (!m.lookupStandardLibraryMathName(field, &mathBuiltin)) {
+ return m.failName(initNode, "'%s' is not a standard Math builtin", field);
+ }
+
+ switch (mathBuiltin.kind) {
+ case ModuleValidatorShared::MathBuiltin::Function:
+ return m.addMathBuiltinFunction(varName, mathBuiltin.u.func, field);
+ case ModuleValidatorShared::MathBuiltin::Constant:
+ return m.addMathBuiltinConstant(varName, mathBuiltin.u.cst, field);
+ default:
+ break;
+ }
+ MOZ_CRASH("unexpected or uninitialized math builtin type");
+}
+
+static bool CheckGlobalDotImport(ModuleValidatorShared& m,
+ const ParserName* varName,
+ ParseNode* initNode) {
+ ParseNode* base = DotBase(initNode);
+ const ParserName* field = DotMember(initNode);
+
+ if (base->isKind(ParseNodeKind::DotExpr)) {
+ ParseNode* global = DotBase(base);
+ const ParserName* math = DotMember(base);
+
+ const ParserName* globalName = m.globalArgumentName();
+ if (!globalName) {
+ return m.fail(
+ base, "import statement requires the module have a stdlib parameter");
+ }
+
+ if (!IsUseOfName(global, globalName)) {
+ if (global->isKind(ParseNodeKind::DotExpr)) {
+ return m.failName(base,
+ "imports can have at most two dot accesses "
+ "(e.g. %s.Math.sin)",
+ globalName);
+ }
+ return m.failName(base, "expecting %s.*", globalName);
+ }
+
+ if (math == m.cx()->parserNames().Math) {
+ return CheckGlobalMathImport(m, initNode, varName, field);
+ }
+ return m.failName(base, "expecting %s.Math", globalName);
+ }
+
+ if (!base->isKind(ParseNodeKind::Name)) {
+ return m.fail(base, "expected name of variable or parameter");
+ }
+
+ auto baseName = base->as<NameNode>().name();
+ if (baseName == m.globalArgumentName()) {
+ if (field == m.cx()->parserNames().NaN) {
+ return m.addGlobalConstant(varName, GenericNaN(), field);
+ }
+ if (field == m.cx()->parserNames().Infinity) {
+ return m.addGlobalConstant(varName, PositiveInfinity<double>(), field);
+ }
+
+ Scalar::Type type;
+ if (IsArrayViewCtorName(m, field, &type)) {
+ return m.addArrayViewCtor(varName, type, field);
+ }
+
+ return m.failName(
+ initNode, "'%s' is not a standard constant or typed array name", field);
+ }
+
+ if (baseName != m.importArgumentName()) {
+ return m.fail(base, "expected global or import name");
+ }
+
+ return m.addFFI(varName, field);
+}
+
+static bool CheckModuleGlobal(ModuleValidatorShared& m, ParseNode* decl,
+ bool isConst) {
+ if (!decl->isKind(ParseNodeKind::AssignExpr)) {
+ return m.fail(decl, "module import needs initializer");
+ }
+ AssignmentNode* assignNode = &decl->as<AssignmentNode>();
+
+ ParseNode* var = assignNode->left();
+
+ if (!var->isKind(ParseNodeKind::Name)) {
+ return m.fail(var, "import variable is not a plain name");
+ }
+
+ const ParserName* varName = var->as<NameNode>().name();
+ if (!CheckModuleLevelName(m, var, varName)) {
+ return false;
+ }
+
+ ParseNode* initNode = assignNode->right();
+
+ if (IsNumericLiteral(m, initNode)) {
+ return CheckGlobalVariableInitConstant(m, varName, initNode, isConst);
+ }
+
+ if (initNode->isKind(ParseNodeKind::BitOrExpr) ||
+ initNode->isKind(ParseNodeKind::PosExpr) ||
+ initNode->isKind(ParseNodeKind::CallExpr)) {
+ return CheckGlobalVariableInitImport(m, varName, initNode, isConst);
+ }
+
+ if (initNode->isKind(ParseNodeKind::NewExpr)) {
+ return CheckNewArrayView(m, varName, initNode);
+ }
+
+ if (initNode->isKind(ParseNodeKind::DotExpr)) {
+ return CheckGlobalDotImport(m, varName, initNode);
+ }
+
+ return m.fail(initNode, "unsupported import expression");
+}
+
+template <typename Unit>
+static bool CheckModuleProcessingDirectives(ModuleValidator<Unit>& m) {
+ auto& ts = m.parser().tokenStream;
+ while (true) {
+ bool matched;
+ if (!ts.matchToken(&matched, TokenKind::String,
+ TokenStreamShared::SlashIsRegExp)) {
+ return false;
+ }
+ if (!matched) {
+ return true;
+ }
+
+ if (!IsIgnoredDirectiveName(m.cx(),
+ ts.anyCharsAccess().currentToken().atom())) {
+ return m.failCurrentOffset("unsupported processing directive");
+ }
+
+ TokenKind tt;
+ if (!ts.getToken(&tt)) {
+ return false;
+ }
+ if (tt != TokenKind::Semi) {
+ return m.failCurrentOffset("expected semicolon after string literal");
+ }
+ }
+}
+
+template <typename Unit>
+static bool CheckModuleGlobals(ModuleValidator<Unit>& m) {
+ while (true) {
+ ParseNode* varStmt;
+ if (!ParseVarOrConstStatement(m.parser(), &varStmt)) {
+ return false;
+ }
+ if (!varStmt) {
+ break;
+ }
+ for (ParseNode* var = VarListHead(varStmt); var; var = NextNode(var)) {
+ if (!CheckModuleGlobal(m, var,
+ varStmt->isKind(ParseNodeKind::ConstDecl))) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+static bool ArgFail(FunctionValidatorShared& f, const ParserName* argName,
+ ParseNode* stmt) {
+ return f.failName(stmt,
+ "expecting argument type declaration for '%s' of the "
+ "form 'arg = arg|0' or 'arg = +arg' or 'arg = fround(arg)'",
+ argName);
+}
+
+static bool CheckArgumentType(FunctionValidatorShared& f, ParseNode* stmt,
+ const ParserName* name, Type* type) {
+ if (!stmt || !IsExpressionStatement(stmt)) {
+ return ArgFail(f, name, stmt ? stmt : f.fn());
+ }
+
+ ParseNode* initNode = ExpressionStatementExpr(stmt);
+ if (!initNode->isKind(ParseNodeKind::AssignExpr)) {
+ return ArgFail(f, name, stmt);
+ }
+
+ ParseNode* argNode = BinaryLeft(initNode);
+ ParseNode* coercionNode = BinaryRight(initNode);
+
+ if (!IsUseOfName(argNode, name)) {
+ return ArgFail(f, name, stmt);
+ }
+
+ ParseNode* coercedExpr;
+ if (!CheckTypeAnnotation(f.m(), coercionNode, type, &coercedExpr)) {
+ return false;
+ }
+
+ if (!type->isArgType()) {
+ return f.failName(stmt, "invalid type for argument '%s'", name);
+ }
+
+ if (!IsUseOfName(coercedExpr, name)) {
+ return ArgFail(f, name, stmt);
+ }
+
+ return true;
+}
+
+static bool CheckProcessingDirectives(ModuleValidatorShared& m,
+ ParseNode** stmtIter) {
+ ParseNode* stmt = *stmtIter;
+
+ while (stmt && IsIgnoredDirective(m.cx(), stmt)) {
+ stmt = NextNode(stmt);
+ }
+
+ *stmtIter = stmt;
+ return true;
+}
+
+static bool CheckArguments(FunctionValidatorShared& f, ParseNode** stmtIter,
+ ValTypeVector* argTypes) {
+ ParseNode* stmt = *stmtIter;
+
+ unsigned numFormals;
+ ParseNode* argpn = FunctionFormalParametersList(f.fn(), &numFormals);
+
+ for (unsigned i = 0; i < numFormals;
+ i++, argpn = NextNode(argpn), stmt = NextNode(stmt)) {
+ const ParserName* name = nullptr;
+ if (!CheckArgument(f.m(), argpn, &name)) {
+ return false;
+ }
+
+ Type type;
+ if (!CheckArgumentType(f, stmt, name, &type)) {
+ return false;
+ }
+
+ if (!argTypes->append(type.canonicalToValType())) {
+ return false;
+ }
+
+ if (!f.addLocal(argpn, name, type)) {
+ return false;
+ }
+ }
+
+ *stmtIter = stmt;
+ return true;
+}
+
+static bool IsLiteralOrConst(FunctionValidatorShared& f, ParseNode* pn,
+ NumLit* lit) {
+ if (pn->isKind(ParseNodeKind::Name)) {
+ const ModuleValidatorShared::Global* global =
+ f.lookupGlobal(pn->as<NameNode>().name());
+ if (!global ||
+ global->which() != ModuleValidatorShared::Global::ConstantLiteral) {
+ return false;
+ }
+
+ *lit = global->constLiteralValue();
+ return true;
+ }
+
+ if (!IsNumericLiteral(f.m(), pn)) {
+ return false;
+ }
+
+ *lit = ExtractNumericLiteral(f.m(), pn);
+ return true;
+}
+
+static bool CheckFinalReturn(FunctionValidatorShared& f,
+ ParseNode* lastNonEmptyStmt) {
+ if (!f.encoder().writeOp(Op::End)) {
+ return false;
+ }
+
+ if (!f.hasAlreadyReturned()) {
+ f.setReturnedType(Nothing());
+ return true;
+ }
+
+ if (!lastNonEmptyStmt->isKind(ParseNodeKind::ReturnStmt) &&
+ f.returnedType()) {
+ return f.fail(lastNonEmptyStmt,
+ "void incompatible with previous return type");
+ }
+
+ return true;
+}
+
+static bool CheckVariable(FunctionValidatorShared& f, ParseNode* decl,
+ ValTypeVector* types, Vector<NumLit>* inits) {
+ if (!decl->isKind(ParseNodeKind::AssignExpr)) {
+ return f.failName(
+ decl, "var '%s' needs explicit type declaration via an initial value",
+ decl->as<NameNode>().name());
+ }
+ AssignmentNode* assignNode = &decl->as<AssignmentNode>();
+
+ ParseNode* var = assignNode->left();
+
+ if (!var->isKind(ParseNodeKind::Name)) {
+ return f.fail(var, "local variable is not a plain name");
+ }
+
+ const ParserName* name = var->as<NameNode>().name();
+
+ if (!CheckIdentifier(f.m(), var, name)) {
+ return false;
+ }
+
+ ParseNode* initNode = assignNode->right();
+
+ NumLit lit;
+ if (!IsLiteralOrConst(f, initNode, &lit)) {
+ return f.failName(
+ var, "var '%s' initializer must be literal or const literal", name);
+ }
+
+ if (!lit.valid()) {
+ return f.failName(var, "var '%s' initializer out of range", name);
+ }
+
+ Type type = Type::canonicalize(Type::lit(lit));
+
+ return f.addLocal(var, name, type) &&
+ types->append(type.canonicalToValType()) && inits->append(lit);
+}
+
+static bool CheckVariables(FunctionValidatorShared& f, ParseNode** stmtIter) {
+ ParseNode* stmt = *stmtIter;
+
+ uint32_t firstVar = f.numLocals();
+
+ ValTypeVector types;
+ Vector<NumLit> inits(f.cx());
+
+ for (; stmt && stmt->isKind(ParseNodeKind::VarStmt);
+ stmt = NextNonEmptyStatement(stmt)) {
+ for (ParseNode* var = VarListHead(stmt); var; var = NextNode(var)) {
+ if (!CheckVariable(f, var, &types, &inits)) {
+ return false;
+ }
+ }
+ }
+
+ MOZ_ASSERT(f.encoder().empty());
+
+ if (!EncodeLocalEntries(f.encoder(), types)) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < inits.length(); i++) {
+ NumLit lit = inits[i];
+ if (lit.isZeroBits()) {
+ continue;
+ }
+ if (!f.writeConstExpr(lit)) {
+ return false;
+ }
+ if (!f.encoder().writeOp(Op::SetLocal)) {
+ return false;
+ }
+ if (!f.encoder().writeVarU32(firstVar + i)) {
+ return false;
+ }
+ }
+
+ *stmtIter = stmt;
+ return true;
+}
+
+template <typename Unit>
+static bool CheckExpr(FunctionValidator<Unit>& f, ParseNode* op, Type* type);
+
+template <typename Unit>
+static bool CheckNumericLiteral(FunctionValidator<Unit>& f, ParseNode* num,
+ Type* type) {
+ NumLit lit = ExtractNumericLiteral(f.m(), num);
+ if (!lit.valid()) {
+ return f.fail(num, "numeric literal out of representable integer range");
+ }
+ *type = Type::lit(lit);
+ return f.writeConstExpr(lit);
+}
+
+static bool CheckVarRef(FunctionValidatorShared& f, ParseNode* varRef,
+ Type* type) {
+ const ParserName* name = varRef->as<NameNode>().name();
+
+ if (const FunctionValidatorShared::Local* local = f.lookupLocal(name)) {
+ if (!f.encoder().writeOp(Op::GetLocal)) {
+ return false;
+ }
+ if (!f.encoder().writeVarU32(local->slot)) {
+ return false;
+ }
+ *type = local->type;
+ return true;
+ }
+
+ if (const ModuleValidatorShared::Global* global = f.lookupGlobal(name)) {
+ switch (global->which()) {
+ case ModuleValidatorShared::Global::ConstantLiteral:
+ *type = global->varOrConstType();
+ return f.writeConstExpr(global->constLiteralValue());
+ case ModuleValidatorShared::Global::ConstantImport:
+ case ModuleValidatorShared::Global::Variable: {
+ *type = global->varOrConstType();
+ return f.encoder().writeOp(Op::GetGlobal) &&
+ f.encoder().writeVarU32(global->varOrConstIndex());
+ }
+ case ModuleValidatorShared::Global::Function:
+ case ModuleValidatorShared::Global::FFI:
+ case ModuleValidatorShared::Global::MathBuiltinFunction:
+ case ModuleValidatorShared::Global::Table:
+ case ModuleValidatorShared::Global::ArrayView:
+ case ModuleValidatorShared::Global::ArrayViewCtor:
+ break;
+ }
+ return f.failName(varRef,
+ "'%s' may not be accessed by ordinary expressions", name);
+ }
+
+ return f.failName(varRef, "'%s' not found in local or asm.js module scope",
+ name);
+}
+
+static inline bool IsLiteralOrConstInt(FunctionValidatorShared& f,
+ ParseNode* pn, uint32_t* u32) {
+ NumLit lit;
+ if (!IsLiteralOrConst(f, pn, &lit)) {
+ return false;
+ }
+
+ return IsLiteralInt(lit, u32);
+}
+
+static const int32_t NoMask = -1;
+
+template <typename Unit>
+static bool CheckArrayAccess(FunctionValidator<Unit>& f, ParseNode* viewName,
+ ParseNode* indexExpr, Scalar::Type* viewType) {
+ if (!viewName->isKind(ParseNodeKind::Name)) {
+ return f.fail(viewName,
+ "base of array access must be a typed array view name");
+ }
+
+ const ModuleValidatorShared::Global* global =
+ f.lookupGlobal(viewName->as<NameNode>().name());
+ if (!global || global->which() != ModuleValidatorShared::Global::ArrayView) {
+ return f.fail(viewName,
+ "base of array access must be a typed array view name");
+ }
+
+ *viewType = global->viewType();
+
+ uint32_t index;
+ if (IsLiteralOrConstInt(f, indexExpr, &index)) {
+ uint64_t byteOffset = uint64_t(index) << TypedArrayShift(*viewType);
+ uint64_t width = TypedArrayElemSize(*viewType);
+ if (!f.m().tryConstantAccess(byteOffset, width)) {
+ return f.fail(indexExpr, "constant index out of range");
+ }
+
+ return f.writeInt32Lit(byteOffset);
+ }
+
+ // Mask off the low bits to account for the clearing effect of a right shift
+ // followed by the left shift implicit in the array access. E.g., H32[i>>2]
+ // loses the low two bits.
+ int32_t mask = ~(TypedArrayElemSize(*viewType) - 1);
+
+ if (indexExpr->isKind(ParseNodeKind::RshExpr)) {
+ ParseNode* shiftAmountNode = BitwiseRight(indexExpr);
+
+ uint32_t shift;
+ if (!IsLiteralInt(f.m(), shiftAmountNode, &shift)) {
+ return f.failf(shiftAmountNode, "shift amount must be constant");
+ }
+
+ unsigned requiredShift = TypedArrayShift(*viewType);
+ if (shift != requiredShift) {
+ return f.failf(shiftAmountNode, "shift amount must be %u", requiredShift);
+ }
+
+ ParseNode* pointerNode = BitwiseLeft(indexExpr);
+
+ Type pointerType;
+ if (!CheckExpr(f, pointerNode, &pointerType)) {
+ return false;
+ }
+
+ if (!pointerType.isIntish()) {
+ return f.failf(pointerNode, "%s is not a subtype of int",
+ pointerType.toChars());
+ }
+ } else {
+ // For legacy scalar access compatibility, accept Int8/Uint8 accesses
+ // with no shift.
+ if (TypedArrayShift(*viewType) != 0) {
+ return f.fail(
+ indexExpr,
+ "index expression isn't shifted; must be an Int8/Uint8 access");
+ }
+
+ MOZ_ASSERT(mask == NoMask);
+
+ ParseNode* pointerNode = indexExpr;
+
+ Type pointerType;
+ if (!CheckExpr(f, pointerNode, &pointerType)) {
+ return false;
+ }
+ if (!pointerType.isInt()) {
+ return f.failf(pointerNode, "%s is not a subtype of int",
+ pointerType.toChars());
+ }
+ }
+
+ // Don't generate the mask op if there is no need for it which could happen
+ // for a shift of zero.
+ if (mask != NoMask) {
+ return f.writeInt32Lit(mask) && f.encoder().writeOp(Op::I32And);
+ }
+
+ return true;
+}
+
+static bool WriteArrayAccessFlags(FunctionValidatorShared& f,
+ Scalar::Type viewType) {
+ // asm.js only has naturally-aligned accesses.
+ size_t align = TypedArrayElemSize(viewType);
+ MOZ_ASSERT(IsPowerOfTwo(align));
+ if (!f.encoder().writeFixedU8(CeilingLog2(align))) {
+ return false;
+ }
+
+ // asm.js doesn't have constant offsets, so just encode a 0.
+ if (!f.encoder().writeVarU32(0)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckLoadArray(FunctionValidator<Unit>& f, ParseNode* elem,
+ Type* type) {
+ Scalar::Type viewType;
+
+ if (!CheckArrayAccess(f, ElemBase(elem), ElemIndex(elem), &viewType)) {
+ return false;
+ }
+
+ switch (viewType) {
+ case Scalar::Int8:
+ if (!f.encoder().writeOp(Op::I32Load8S)) return false;
+ break;
+ case Scalar::Uint8:
+ if (!f.encoder().writeOp(Op::I32Load8U)) return false;
+ break;
+ case Scalar::Int16:
+ if (!f.encoder().writeOp(Op::I32Load16S)) return false;
+ break;
+ case Scalar::Uint16:
+ if (!f.encoder().writeOp(Op::I32Load16U)) return false;
+ break;
+ case Scalar::Uint32:
+ case Scalar::Int32:
+ if (!f.encoder().writeOp(Op::I32Load)) return false;
+ break;
+ case Scalar::Float32:
+ if (!f.encoder().writeOp(Op::F32Load)) return false;
+ break;
+ case Scalar::Float64:
+ if (!f.encoder().writeOp(Op::F64Load)) return false;
+ break;
+ default:
+ MOZ_CRASH("unexpected scalar type");
+ }
+
+ switch (viewType) {
+ case Scalar::Int8:
+ case Scalar::Int16:
+ case Scalar::Int32:
+ case Scalar::Uint8:
+ case Scalar::Uint16:
+ case Scalar::Uint32:
+ *type = Type::Intish;
+ break;
+ case Scalar::Float32:
+ *type = Type::MaybeFloat;
+ break;
+ case Scalar::Float64:
+ *type = Type::MaybeDouble;
+ break;
+ default:
+ MOZ_CRASH("Unexpected array type");
+ }
+
+ if (!WriteArrayAccessFlags(f, viewType)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckStoreArray(FunctionValidator<Unit>& f, ParseNode* lhs,
+ ParseNode* rhs, Type* type) {
+ Scalar::Type viewType;
+ if (!CheckArrayAccess(f, ElemBase(lhs), ElemIndex(lhs), &viewType)) {
+ return false;
+ }
+
+ Type rhsType;
+ if (!CheckExpr(f, rhs, &rhsType)) {
+ return false;
+ }
+
+ switch (viewType) {
+ case Scalar::Int8:
+ case Scalar::Int16:
+ case Scalar::Int32:
+ case Scalar::Uint8:
+ case Scalar::Uint16:
+ case Scalar::Uint32:
+ if (!rhsType.isIntish()) {
+ return f.failf(lhs, "%s is not a subtype of intish", rhsType.toChars());
+ }
+ break;
+ case Scalar::Float32:
+ if (!rhsType.isMaybeDouble() && !rhsType.isFloatish()) {
+ return f.failf(lhs, "%s is not a subtype of double? or floatish",
+ rhsType.toChars());
+ }
+ break;
+ case Scalar::Float64:
+ if (!rhsType.isMaybeFloat() && !rhsType.isMaybeDouble()) {
+ return f.failf(lhs, "%s is not a subtype of float? or double?",
+ rhsType.toChars());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected view type");
+ }
+
+ switch (viewType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ if (!f.encoder().writeOp(MozOp::I32TeeStore8)) {
+ return false;
+ }
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ if (!f.encoder().writeOp(MozOp::I32TeeStore16)) {
+ return false;
+ }
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ if (!f.encoder().writeOp(MozOp::I32TeeStore)) {
+ return false;
+ }
+ break;
+ case Scalar::Float32:
+ if (rhsType.isFloatish()) {
+ if (!f.encoder().writeOp(MozOp::F32TeeStore)) {
+ return false;
+ }
+ } else {
+ if (!f.encoder().writeOp(MozOp::F64TeeStoreF32)) {
+ return false;
+ }
+ }
+ break;
+ case Scalar::Float64:
+ if (rhsType.isFloatish()) {
+ if (!f.encoder().writeOp(MozOp::F32TeeStoreF64)) {
+ return false;
+ }
+ } else {
+ if (!f.encoder().writeOp(MozOp::F64TeeStore)) {
+ return false;
+ }
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected scalar type");
+ }
+
+ if (!WriteArrayAccessFlags(f, viewType)) {
+ return false;
+ }
+
+ *type = rhsType;
+ return true;
+}
+
+template <typename Unit>
+static bool CheckAssignName(FunctionValidator<Unit>& f, ParseNode* lhs,
+ ParseNode* rhs, Type* type) {
+ const ParserName* name = lhs->as<NameNode>().name();
+
+ if (const FunctionValidatorShared::Local* lhsVar = f.lookupLocal(name)) {
+ Type rhsType;
+ if (!CheckExpr(f, rhs, &rhsType)) {
+ return false;
+ }
+
+ if (!f.encoder().writeOp(Op::TeeLocal)) {
+ return false;
+ }
+ if (!f.encoder().writeVarU32(lhsVar->slot)) {
+ return false;
+ }
+
+ if (!(rhsType <= lhsVar->type)) {
+ return f.failf(lhs, "%s is not a subtype of %s", rhsType.toChars(),
+ lhsVar->type.toChars());
+ }
+ *type = rhsType;
+ return true;
+ }
+
+ if (const ModuleValidatorShared::Global* global = f.lookupGlobal(name)) {
+ if (global->which() != ModuleValidatorShared::Global::Variable) {
+ return f.failName(lhs, "'%s' is not a mutable variable", name);
+ }
+
+ Type rhsType;
+ if (!CheckExpr(f, rhs, &rhsType)) {
+ return false;
+ }
+
+ Type globType = global->varOrConstType();
+ if (!(rhsType <= globType)) {
+ return f.failf(lhs, "%s is not a subtype of %s", rhsType.toChars(),
+ globType.toChars());
+ }
+ if (!f.encoder().writeOp(MozOp::TeeGlobal)) {
+ return false;
+ }
+ if (!f.encoder().writeVarU32(global->varOrConstIndex())) {
+ return false;
+ }
+
+ *type = rhsType;
+ return true;
+ }
+
+ return f.failName(lhs, "'%s' not found in local or asm.js module scope",
+ name);
+}
+
+template <typename Unit>
+static bool CheckAssign(FunctionValidator<Unit>& f, ParseNode* assign,
+ Type* type) {
+ MOZ_ASSERT(assign->isKind(ParseNodeKind::AssignExpr));
+
+ ParseNode* lhs = BinaryLeft(assign);
+ ParseNode* rhs = BinaryRight(assign);
+
+ if (lhs->getKind() == ParseNodeKind::ElemExpr) {
+ return CheckStoreArray(f, lhs, rhs, type);
+ }
+
+ if (lhs->getKind() == ParseNodeKind::Name) {
+ return CheckAssignName(f, lhs, rhs, type);
+ }
+
+ return f.fail(
+ assign,
+ "left-hand side of assignment must be a variable or array access");
+}
+
+template <typename Unit>
+static bool CheckMathIMul(FunctionValidator<Unit>& f, ParseNode* call,
+ Type* type) {
+ if (CallArgListLength(call) != 2) {
+ return f.fail(call, "Math.imul must be passed 2 arguments");
+ }
+
+ ParseNode* lhs = CallArgList(call);
+ ParseNode* rhs = NextNode(lhs);
+
+ Type lhsType;
+ if (!CheckExpr(f, lhs, &lhsType)) {
+ return false;
+ }
+
+ Type rhsType;
+ if (!CheckExpr(f, rhs, &rhsType)) {
+ return false;
+ }
+
+ if (!lhsType.isIntish()) {
+ return f.failf(lhs, "%s is not a subtype of intish", lhsType.toChars());
+ }
+ if (!rhsType.isIntish()) {
+ return f.failf(rhs, "%s is not a subtype of intish", rhsType.toChars());
+ }
+
+ *type = Type::Signed;
+ return f.encoder().writeOp(Op::I32Mul);
+}
+
+template <typename Unit>
+static bool CheckMathClz32(FunctionValidator<Unit>& f, ParseNode* call,
+ Type* type) {
+ if (CallArgListLength(call) != 1) {
+ return f.fail(call, "Math.clz32 must be passed 1 argument");
+ }
+
+ ParseNode* arg = CallArgList(call);
+
+ Type argType;
+ if (!CheckExpr(f, arg, &argType)) {
+ return false;
+ }
+
+ if (!argType.isIntish()) {
+ return f.failf(arg, "%s is not a subtype of intish", argType.toChars());
+ }
+
+ *type = Type::Fixnum;
+ return f.encoder().writeOp(Op::I32Clz);
+}
+
+template <typename Unit>
+static bool CheckMathAbs(FunctionValidator<Unit>& f, ParseNode* call,
+ Type* type) {
+ if (CallArgListLength(call) != 1) {
+ return f.fail(call, "Math.abs must be passed 1 argument");
+ }
+
+ ParseNode* arg = CallArgList(call);
+
+ Type argType;
+ if (!CheckExpr(f, arg, &argType)) {
+ return false;
+ }
+
+ if (argType.isSigned()) {
+ *type = Type::Unsigned;
+ return f.encoder().writeOp(MozOp::I32Abs);
+ }
+
+ if (argType.isMaybeDouble()) {
+ *type = Type::Double;
+ return f.encoder().writeOp(Op::F64Abs);
+ }
+
+ if (argType.isMaybeFloat()) {
+ *type = Type::Floatish;
+ return f.encoder().writeOp(Op::F32Abs);
+ }
+
+ return f.failf(call, "%s is not a subtype of signed, float? or double?",
+ argType.toChars());
+}
+
+template <typename Unit>
+static bool CheckMathSqrt(FunctionValidator<Unit>& f, ParseNode* call,
+ Type* type) {
+ if (CallArgListLength(call) != 1) {
+ return f.fail(call, "Math.sqrt must be passed 1 argument");
+ }
+
+ ParseNode* arg = CallArgList(call);
+
+ Type argType;
+ if (!CheckExpr(f, arg, &argType)) {
+ return false;
+ }
+
+ if (argType.isMaybeDouble()) {
+ *type = Type::Double;
+ return f.encoder().writeOp(Op::F64Sqrt);
+ }
+
+ if (argType.isMaybeFloat()) {
+ *type = Type::Floatish;
+ return f.encoder().writeOp(Op::F32Sqrt);
+ }
+
+ return f.failf(call, "%s is neither a subtype of double? nor float?",
+ argType.toChars());
+}
+
+template <typename Unit>
+static bool CheckMathMinMax(FunctionValidator<Unit>& f, ParseNode* callNode,
+ bool isMax, Type* type) {
+ if (CallArgListLength(callNode) < 2) {
+ return f.fail(callNode, "Math.min/max must be passed at least 2 arguments");
+ }
+
+ ParseNode* firstArg = CallArgList(callNode);
+ Type firstType;
+ if (!CheckExpr(f, firstArg, &firstType)) {
+ return false;
+ }
+
+ Op op = Op::Limit;
+ MozOp mozOp = MozOp::Limit;
+ if (firstType.isMaybeDouble()) {
+ *type = Type::Double;
+ firstType = Type::MaybeDouble;
+ op = isMax ? Op::F64Max : Op::F64Min;
+ } else if (firstType.isMaybeFloat()) {
+ *type = Type::Float;
+ firstType = Type::MaybeFloat;
+ op = isMax ? Op::F32Max : Op::F32Min;
+ } else if (firstType.isSigned()) {
+ *type = Type::Signed;
+ firstType = Type::Signed;
+ mozOp = isMax ? MozOp::I32Max : MozOp::I32Min;
+ } else {
+ return f.failf(firstArg, "%s is not a subtype of double?, float? or signed",
+ firstType.toChars());
+ }
+
+ unsigned numArgs = CallArgListLength(callNode);
+ ParseNode* nextArg = NextNode(firstArg);
+ for (unsigned i = 1; i < numArgs; i++, nextArg = NextNode(nextArg)) {
+ Type nextType;
+ if (!CheckExpr(f, nextArg, &nextType)) {
+ return false;
+ }
+ if (!(nextType <= firstType)) {
+ return f.failf(nextArg, "%s is not a subtype of %s", nextType.toChars(),
+ firstType.toChars());
+ }
+
+ if (op != Op::Limit) {
+ if (!f.encoder().writeOp(op)) {
+ return false;
+ }
+ } else {
+ if (!f.encoder().writeOp(mozOp)) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+using CheckArgType = bool (*)(FunctionValidatorShared& f, ParseNode* argNode,
+ Type type);
+
+template <CheckArgType checkArg, typename Unit>
+static bool CheckCallArgs(FunctionValidator<Unit>& f, ParseNode* callNode,
+ ValTypeVector* args) {
+ ParseNode* argNode = CallArgList(callNode);
+ for (unsigned i = 0; i < CallArgListLength(callNode);
+ i++, argNode = NextNode(argNode)) {
+ Type type;
+ if (!CheckExpr(f, argNode, &type)) {
+ return false;
+ }
+
+ if (!checkArg(f, argNode, type)) {
+ return false;
+ }
+
+ if (!args->append(Type::canonicalize(type).canonicalToValType())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool CheckSignatureAgainstExisting(ModuleValidatorShared& m,
+ ParseNode* usepn, const FuncType& sig,
+ const FuncType& existing) {
+ if (sig != existing) {
+ return m.failf(usepn, "incompatible argument types to function");
+ }
+ return true;
+}
+
+template <typename Unit>
+static bool CheckFunctionSignature(ModuleValidator<Unit>& m, ParseNode* usepn,
+ FuncType&& sig, const ParserName* name,
+ ModuleValidatorShared::Func** func) {
+ if (sig.args().length() > MaxParams) {
+ return m.failf(usepn, "too many parameters");
+ }
+
+ ModuleValidatorShared::Func* existing = m.lookupFuncDef(name);
+ if (!existing) {
+ if (!CheckModuleLevelName(m, usepn, name)) {
+ return false;
+ }
+ return m.addFuncDef(name, usepn->pn_pos.begin, std::move(sig), func);
+ }
+
+ const FuncType& existingSig = m.env().types.funcType(existing->sigIndex());
+
+ if (!CheckSignatureAgainstExisting(m, usepn, sig, existingSig)) {
+ return false;
+ }
+
+ *func = existing;
+ return true;
+}
+
+static bool CheckIsArgType(FunctionValidatorShared& f, ParseNode* argNode,
+ Type type) {
+ if (!type.isArgType()) {
+ return f.failf(argNode, "%s is not a subtype of int, float, or double",
+ type.toChars());
+ }
+ return true;
+}
+
+template <typename Unit>
+static bool CheckInternalCall(FunctionValidator<Unit>& f, ParseNode* callNode,
+ const ParserName* calleeName, Type ret,
+ Type* type) {
+ MOZ_ASSERT(ret.isCanonical());
+
+ ValTypeVector args;
+ if (!CheckCallArgs<CheckIsArgType>(f, callNode, &args)) {
+ return false;
+ }
+
+ ValTypeVector results;
+ Maybe<ValType> retType = ret.canonicalToReturnType();
+ if (retType && !results.append(retType.ref())) {
+ return false;
+ }
+
+ FuncType sig(std::move(args), std::move(results));
+
+ ModuleValidatorShared::Func* callee;
+ if (!CheckFunctionSignature(f.m(), callNode, std::move(sig), calleeName,
+ &callee)) {
+ return false;
+ }
+
+ if (!f.writeCall(callNode, MozOp::OldCallDirect)) {
+ return false;
+ }
+
+ if (!f.encoder().writeVarU32(callee->funcDefIndex())) {
+ return false;
+ }
+
+ *type = Type::ret(ret);
+ return true;
+}
+
+template <typename Unit>
+static bool CheckFuncPtrTableAgainstExisting(ModuleValidator<Unit>& m,
+ ParseNode* usepn,
+ const ParserName* name,
+ FuncType&& sig, unsigned mask,
+ uint32_t* tableIndex) {
+ if (const ModuleValidatorShared::Global* existing = m.lookupGlobal(name)) {
+ if (existing->which() != ModuleValidatorShared::Global::Table) {
+ return m.failName(usepn, "'%s' is not a function-pointer table", name);
+ }
+
+ ModuleValidatorShared::Table& table = m.table(existing->tableIndex());
+ if (mask != table.mask()) {
+ return m.failf(usepn, "mask does not match previous value (%u)",
+ table.mask());
+ }
+
+ if (!CheckSignatureAgainstExisting(
+ m, usepn, sig, m.env().types.funcType(table.sigIndex()))) {
+ return false;
+ }
+
+ *tableIndex = existing->tableIndex();
+ return true;
+ }
+
+ if (!CheckModuleLevelName(m, usepn, name)) {
+ return false;
+ }
+
+ if (!m.declareFuncPtrTable(std::move(sig), name, usepn->pn_pos.begin, mask,
+ tableIndex)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckFuncPtrCall(FunctionValidator<Unit>& f, ParseNode* callNode,
+ Type ret, Type* type) {
+ MOZ_ASSERT(ret.isCanonical());
+
+ ParseNode* callee = CallCallee(callNode);
+ ParseNode* tableNode = ElemBase(callee);
+ ParseNode* indexExpr = ElemIndex(callee);
+
+ if (!tableNode->isKind(ParseNodeKind::Name)) {
+ return f.fail(tableNode, "expecting name of function-pointer array");
+ }
+
+ const ParserName* name = tableNode->as<NameNode>().name();
+ if (const ModuleValidatorShared::Global* existing = f.lookupGlobal(name)) {
+ if (existing->which() != ModuleValidatorShared::Global::Table) {
+ return f.failName(
+ tableNode, "'%s' is not the name of a function-pointer array", name);
+ }
+ }
+
+ if (!indexExpr->isKind(ParseNodeKind::BitAndExpr)) {
+ return f.fail(indexExpr,
+ "function-pointer table index expression needs & mask");
+ }
+
+ ParseNode* indexNode = BitwiseLeft(indexExpr);
+ ParseNode* maskNode = BitwiseRight(indexExpr);
+
+ uint32_t mask;
+ if (!IsLiteralInt(f.m(), maskNode, &mask) || mask == UINT32_MAX ||
+ !IsPowerOfTwo(mask + 1)) {
+ return f.fail(maskNode,
+ "function-pointer table index mask value must be a power of "
+ "two minus 1");
+ }
+
+ Type indexType;
+ if (!CheckExpr(f, indexNode, &indexType)) {
+ return false;
+ }
+
+ if (!indexType.isIntish()) {
+ return f.failf(indexNode, "%s is not a subtype of intish",
+ indexType.toChars());
+ }
+
+ ValTypeVector args;
+ if (!CheckCallArgs<CheckIsArgType>(f, callNode, &args)) {
+ return false;
+ }
+
+ ValTypeVector results;
+ Maybe<ValType> retType = ret.canonicalToReturnType();
+ if (retType && !results.append(retType.ref())) {
+ return false;
+ }
+
+ FuncType sig(std::move(args), std::move(results));
+
+ uint32_t tableIndex;
+ if (!CheckFuncPtrTableAgainstExisting(f.m(), tableNode, name, std::move(sig),
+ mask, &tableIndex)) {
+ return false;
+ }
+
+ if (!f.writeCall(callNode, MozOp::OldCallIndirect)) {
+ return false;
+ }
+
+ // Call signature
+ if (!f.encoder().writeVarU32(f.m().table(tableIndex).sigIndex())) {
+ return false;
+ }
+
+ *type = Type::ret(ret);
+ return true;
+}
+
+static bool CheckIsExternType(FunctionValidatorShared& f, ParseNode* argNode,
+ Type type) {
+ if (!type.isExtern()) {
+ return f.failf(argNode, "%s is not a subtype of extern", type.toChars());
+ }
+ return true;
+}
+
+template <typename Unit>
+static bool CheckFFICall(FunctionValidator<Unit>& f, ParseNode* callNode,
+ unsigned ffiIndex, Type ret, Type* type) {
+ MOZ_ASSERT(ret.isCanonical());
+
+ const ParserName* calleeName = CallCallee(callNode)->as<NameNode>().name();
+
+ if (ret.isFloat()) {
+ return f.fail(callNode, "FFI calls can't return float");
+ }
+
+ ValTypeVector args;
+ if (!CheckCallArgs<CheckIsExternType>(f, callNode, &args)) {
+ return false;
+ }
+
+ ValTypeVector results;
+ Maybe<ValType> retType = ret.canonicalToReturnType();
+ if (retType && !results.append(retType.ref())) {
+ return false;
+ }
+
+ FuncType sig(std::move(args), std::move(results));
+
+ uint32_t importIndex;
+ if (!f.m().declareImport(calleeName, std::move(sig), ffiIndex,
+ &importIndex)) {
+ return false;
+ }
+
+ if (!f.writeCall(callNode, Op::Call)) {
+ return false;
+ }
+
+ if (!f.encoder().writeVarU32(importIndex)) {
+ return false;
+ }
+
+ *type = Type::ret(ret);
+ return true;
+}
+
+static bool CheckFloatCoercionArg(FunctionValidatorShared& f,
+ ParseNode* inputNode, Type inputType) {
+ if (inputType.isMaybeDouble()) {
+ return f.encoder().writeOp(Op::F32DemoteF64);
+ }
+ if (inputType.isSigned()) {
+ return f.encoder().writeOp(Op::F32ConvertSI32);
+ }
+ if (inputType.isUnsigned()) {
+ return f.encoder().writeOp(Op::F32ConvertUI32);
+ }
+ if (inputType.isFloatish()) {
+ return true;
+ }
+
+ return f.failf(inputNode,
+ "%s is not a subtype of signed, unsigned, double? or floatish",
+ inputType.toChars());
+}
+
+template <typename Unit>
+static bool CheckCoercedCall(FunctionValidator<Unit>& f, ParseNode* call,
+ Type ret, Type* type);
+
+template <typename Unit>
+static bool CheckCoercionArg(FunctionValidator<Unit>& f, ParseNode* arg,
+ Type expected, Type* type) {
+ MOZ_ASSERT(expected.isCanonicalValType());
+
+ if (arg->isKind(ParseNodeKind::CallExpr)) {
+ return CheckCoercedCall(f, arg, expected, type);
+ }
+
+ Type argType;
+ if (!CheckExpr(f, arg, &argType)) {
+ return false;
+ }
+
+ if (expected.isFloat()) {
+ if (!CheckFloatCoercionArg(f, arg, argType)) {
+ return false;
+ }
+ } else {
+ MOZ_CRASH("not call coercions");
+ }
+
+ *type = Type::ret(expected);
+ return true;
+}
+
+template <typename Unit>
+static bool CheckMathFRound(FunctionValidator<Unit>& f, ParseNode* callNode,
+ Type* type) {
+ if (CallArgListLength(callNode) != 1) {
+ return f.fail(callNode, "Math.fround must be passed 1 argument");
+ }
+
+ ParseNode* argNode = CallArgList(callNode);
+ Type argType;
+ if (!CheckCoercionArg(f, argNode, Type::Float, &argType)) {
+ return false;
+ }
+
+ MOZ_ASSERT(argType == Type::Float);
+ *type = Type::Float;
+ return true;
+}
+
+template <typename Unit>
+static bool CheckMathBuiltinCall(FunctionValidator<Unit>& f,
+ ParseNode* callNode,
+ AsmJSMathBuiltinFunction func, Type* type) {
+ unsigned arity = 0;
+ Op f32 = Op::Limit;
+ Op f64 = Op::Limit;
+ MozOp mozf64 = MozOp::Limit;
+ switch (func) {
+ case AsmJSMathBuiltin_imul:
+ return CheckMathIMul(f, callNode, type);
+ case AsmJSMathBuiltin_clz32:
+ return CheckMathClz32(f, callNode, type);
+ case AsmJSMathBuiltin_abs:
+ return CheckMathAbs(f, callNode, type);
+ case AsmJSMathBuiltin_sqrt:
+ return CheckMathSqrt(f, callNode, type);
+ case AsmJSMathBuiltin_fround:
+ return CheckMathFRound(f, callNode, type);
+ case AsmJSMathBuiltin_min:
+ return CheckMathMinMax(f, callNode, /* isMax = */ false, type);
+ case AsmJSMathBuiltin_max:
+ return CheckMathMinMax(f, callNode, /* isMax = */ true, type);
+ case AsmJSMathBuiltin_ceil:
+ arity = 1;
+ f64 = Op::F64Ceil;
+ f32 = Op::F32Ceil;
+ break;
+ case AsmJSMathBuiltin_floor:
+ arity = 1;
+ f64 = Op::F64Floor;
+ f32 = Op::F32Floor;
+ break;
+ case AsmJSMathBuiltin_sin:
+ arity = 1;
+ mozf64 = MozOp::F64Sin;
+ f32 = Op::Unreachable;
+ break;
+ case AsmJSMathBuiltin_cos:
+ arity = 1;
+ mozf64 = MozOp::F64Cos;
+ f32 = Op::Unreachable;
+ break;
+ case AsmJSMathBuiltin_tan:
+ arity = 1;
+ mozf64 = MozOp::F64Tan;
+ f32 = Op::Unreachable;
+ break;
+ case AsmJSMathBuiltin_asin:
+ arity = 1;
+ mozf64 = MozOp::F64Asin;
+ f32 = Op::Unreachable;
+ break;
+ case AsmJSMathBuiltin_acos:
+ arity = 1;
+ mozf64 = MozOp::F64Acos;
+ f32 = Op::Unreachable;
+ break;
+ case AsmJSMathBuiltin_atan:
+ arity = 1;
+ mozf64 = MozOp::F64Atan;
+ f32 = Op::Unreachable;
+ break;
+ case AsmJSMathBuiltin_exp:
+ arity = 1;
+ mozf64 = MozOp::F64Exp;
+ f32 = Op::Unreachable;
+ break;
+ case AsmJSMathBuiltin_log:
+ arity = 1;
+ mozf64 = MozOp::F64Log;
+ f32 = Op::Unreachable;
+ break;
+ case AsmJSMathBuiltin_pow:
+ arity = 2;
+ mozf64 = MozOp::F64Pow;
+ f32 = Op::Unreachable;
+ break;
+ case AsmJSMathBuiltin_atan2:
+ arity = 2;
+ mozf64 = MozOp::F64Atan2;
+ f32 = Op::Unreachable;
+ break;
+ default:
+ MOZ_CRASH("unexpected mathBuiltin function");
+ }
+
+ unsigned actualArity = CallArgListLength(callNode);
+ if (actualArity != arity) {
+ return f.failf(callNode, "call passed %u arguments, expected %u",
+ actualArity, arity);
+ }
+
+ if (!f.prepareCall(callNode)) {
+ return false;
+ }
+
+ Type firstType;
+ ParseNode* argNode = CallArgList(callNode);
+ if (!CheckExpr(f, argNode, &firstType)) {
+ return false;
+ }
+
+ if (!firstType.isMaybeFloat() && !firstType.isMaybeDouble()) {
+ return f.fail(
+ argNode,
+ "arguments to math call should be a subtype of double? or float?");
+ }
+
+ bool opIsDouble = firstType.isMaybeDouble();
+ if (!opIsDouble && f32 == Op::Unreachable) {
+ return f.fail(callNode, "math builtin cannot be used as float");
+ }
+
+ if (arity == 2) {
+ Type secondType;
+ argNode = NextNode(argNode);
+ if (!CheckExpr(f, argNode, &secondType)) {
+ return false;
+ }
+
+ if (firstType.isMaybeDouble() && !secondType.isMaybeDouble()) {
+ return f.fail(
+ argNode,
+ "both arguments to math builtin call should be the same type");
+ }
+ if (firstType.isMaybeFloat() && !secondType.isMaybeFloat()) {
+ return f.fail(
+ argNode,
+ "both arguments to math builtin call should be the same type");
+ }
+ }
+
+ if (opIsDouble) {
+ if (f64 != Op::Limit) {
+ if (!f.encoder().writeOp(f64)) {
+ return false;
+ }
+ } else {
+ if (!f.encoder().writeOp(mozf64)) {
+ return false;
+ }
+ }
+ } else {
+ if (!f.encoder().writeOp(f32)) {
+ return false;
+ }
+ }
+
+ *type = opIsDouble ? Type::Double : Type::Floatish;
+ return true;
+}
+
+template <typename Unit>
+static bool CheckUncoercedCall(FunctionValidator<Unit>& f, ParseNode* expr,
+ Type* type) {
+ MOZ_ASSERT(expr->isKind(ParseNodeKind::CallExpr));
+
+ const ModuleValidatorShared::Global* global;
+ if (IsCallToGlobal(f.m(), expr, &global) && global->isMathFunction()) {
+ return CheckMathBuiltinCall(f, expr, global->mathBuiltinFunction(), type);
+ }
+
+ return f.fail(
+ expr,
+ "all function calls must be calls to standard lib math functions,"
+ " ignored (via f(); or comma-expression), coerced to signed (via f()|0),"
+ " coerced to float (via fround(f())), or coerced to double (via +f())");
+}
+
+static bool CoerceResult(FunctionValidatorShared& f, ParseNode* expr,
+ Type expected, Type actual, Type* type) {
+ MOZ_ASSERT(expected.isCanonical());
+
+ // At this point, the bytecode resembles this:
+ // | the thing we wanted to coerce | current position |>
+ switch (expected.which()) {
+ case Type::Void:
+ if (!actual.isVoid()) {
+ if (!f.encoder().writeOp(Op::Drop)) {
+ return false;
+ }
+ }
+ break;
+ case Type::Int:
+ if (!actual.isIntish()) {
+ return f.failf(expr, "%s is not a subtype of intish", actual.toChars());
+ }
+ break;
+ case Type::Float:
+ if (!CheckFloatCoercionArg(f, expr, actual)) {
+ return false;
+ }
+ break;
+ case Type::Double:
+ if (actual.isMaybeDouble()) {
+ // No conversion necessary.
+ } else if (actual.isMaybeFloat()) {
+ if (!f.encoder().writeOp(Op::F64PromoteF32)) {
+ return false;
+ }
+ } else if (actual.isSigned()) {
+ if (!f.encoder().writeOp(Op::F64ConvertSI32)) {
+ return false;
+ }
+ } else if (actual.isUnsigned()) {
+ if (!f.encoder().writeOp(Op::F64ConvertUI32)) {
+ return false;
+ }
+ } else {
+ return f.failf(
+ expr, "%s is not a subtype of double?, float?, signed or unsigned",
+ actual.toChars());
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected uncoerced result type");
+ }
+
+ *type = Type::ret(expected);
+ return true;
+}
+
+template <typename Unit>
+static bool CheckCoercedMathBuiltinCall(FunctionValidator<Unit>& f,
+ ParseNode* callNode,
+ AsmJSMathBuiltinFunction func, Type ret,
+ Type* type) {
+ Type actual;
+ if (!CheckMathBuiltinCall(f, callNode, func, &actual)) {
+ return false;
+ }
+ return CoerceResult(f, callNode, ret, actual, type);
+}
+
+template <typename Unit>
+static bool CheckCoercedCall(FunctionValidator<Unit>& f, ParseNode* call,
+ Type ret, Type* type) {
+ MOZ_ASSERT(ret.isCanonical());
+
+ if (!CheckRecursionLimitDontReport(f.cx())) {
+ return f.m().failOverRecursed();
+ }
+
+ if (IsNumericLiteral(f.m(), call)) {
+ NumLit lit = ExtractNumericLiteral(f.m(), call);
+ if (!f.writeConstExpr(lit)) {
+ return false;
+ }
+ return CoerceResult(f, call, ret, Type::lit(lit), type);
+ }
+
+ ParseNode* callee = CallCallee(call);
+
+ if (callee->isKind(ParseNodeKind::ElemExpr)) {
+ return CheckFuncPtrCall(f, call, ret, type);
+ }
+
+ if (!callee->isKind(ParseNodeKind::Name)) {
+ return f.fail(callee, "unexpected callee expression type");
+ }
+
+ const ParserName* calleeName = callee->as<NameNode>().name();
+
+ if (const ModuleValidatorShared::Global* global =
+ f.lookupGlobal(calleeName)) {
+ switch (global->which()) {
+ case ModuleValidatorShared::Global::FFI:
+ return CheckFFICall(f, call, global->ffiIndex(), ret, type);
+ case ModuleValidatorShared::Global::MathBuiltinFunction:
+ return CheckCoercedMathBuiltinCall(
+ f, call, global->mathBuiltinFunction(), ret, type);
+ case ModuleValidatorShared::Global::ConstantLiteral:
+ case ModuleValidatorShared::Global::ConstantImport:
+ case ModuleValidatorShared::Global::Variable:
+ case ModuleValidatorShared::Global::Table:
+ case ModuleValidatorShared::Global::ArrayView:
+ case ModuleValidatorShared::Global::ArrayViewCtor:
+ return f.failName(callee, "'%s' is not callable function", calleeName);
+ case ModuleValidatorShared::Global::Function:
+ break;
+ }
+ }
+
+ return CheckInternalCall(f, call, calleeName, ret, type);
+}
+
+template <typename Unit>
+static bool CheckPos(FunctionValidator<Unit>& f, ParseNode* pos, Type* type) {
+ MOZ_ASSERT(pos->isKind(ParseNodeKind::PosExpr));
+ ParseNode* operand = UnaryKid(pos);
+
+ if (operand->isKind(ParseNodeKind::CallExpr)) {
+ return CheckCoercedCall(f, operand, Type::Double, type);
+ }
+
+ Type actual;
+ if (!CheckExpr(f, operand, &actual)) {
+ return false;
+ }
+
+ return CoerceResult(f, operand, Type::Double, actual, type);
+}
+
+template <typename Unit>
+static bool CheckNot(FunctionValidator<Unit>& f, ParseNode* expr, Type* type) {
+ MOZ_ASSERT(expr->isKind(ParseNodeKind::NotExpr));
+ ParseNode* operand = UnaryKid(expr);
+
+ Type operandType;
+ if (!CheckExpr(f, operand, &operandType)) {
+ return false;
+ }
+
+ if (!operandType.isInt()) {
+ return f.failf(operand, "%s is not a subtype of int",
+ operandType.toChars());
+ }
+
+ *type = Type::Int;
+ return f.encoder().writeOp(Op::I32Eqz);
+}
+
+template <typename Unit>
+static bool CheckNeg(FunctionValidator<Unit>& f, ParseNode* expr, Type* type) {
+ MOZ_ASSERT(expr->isKind(ParseNodeKind::NegExpr));
+ ParseNode* operand = UnaryKid(expr);
+
+ Type operandType;
+ if (!CheckExpr(f, operand, &operandType)) {
+ return false;
+ }
+
+ if (operandType.isInt()) {
+ *type = Type::Intish;
+ return f.encoder().writeOp(MozOp::I32Neg);
+ }
+
+ if (operandType.isMaybeDouble()) {
+ *type = Type::Double;
+ return f.encoder().writeOp(Op::F64Neg);
+ }
+
+ if (operandType.isMaybeFloat()) {
+ *type = Type::Floatish;
+ return f.encoder().writeOp(Op::F32Neg);
+ }
+
+ return f.failf(operand, "%s is not a subtype of int, float? or double?",
+ operandType.toChars());
+}
+
+template <typename Unit>
+static bool CheckCoerceToInt(FunctionValidator<Unit>& f, ParseNode* expr,
+ Type* type) {
+ MOZ_ASSERT(expr->isKind(ParseNodeKind::BitNotExpr));
+ ParseNode* operand = UnaryKid(expr);
+
+ Type operandType;
+ if (!CheckExpr(f, operand, &operandType)) {
+ return false;
+ }
+
+ if (operandType.isMaybeDouble() || operandType.isMaybeFloat()) {
+ *type = Type::Signed;
+ Op opcode =
+ operandType.isMaybeDouble() ? Op::I32TruncSF64 : Op::I32TruncSF32;
+ return f.encoder().writeOp(opcode);
+ }
+
+ if (!operandType.isIntish()) {
+ return f.failf(operand, "%s is not a subtype of double?, float? or intish",
+ operandType.toChars());
+ }
+
+ *type = Type::Signed;
+ return true;
+}
+
+template <typename Unit>
+static bool CheckBitNot(FunctionValidator<Unit>& f, ParseNode* neg,
+ Type* type) {
+ MOZ_ASSERT(neg->isKind(ParseNodeKind::BitNotExpr));
+ ParseNode* operand = UnaryKid(neg);
+
+ if (operand->isKind(ParseNodeKind::BitNotExpr)) {
+ return CheckCoerceToInt(f, operand, type);
+ }
+
+ Type operandType;
+ if (!CheckExpr(f, operand, &operandType)) {
+ return false;
+ }
+
+ if (!operandType.isIntish()) {
+ return f.failf(operand, "%s is not a subtype of intish",
+ operandType.toChars());
+ }
+
+ if (!f.encoder().writeOp(MozOp::I32BitNot)) {
+ return false;
+ }
+
+ *type = Type::Signed;
+ return true;
+}
+
+template <typename Unit>
+static bool CheckAsExprStatement(FunctionValidator<Unit>& f,
+ ParseNode* exprStmt);
+
+template <typename Unit>
+static bool CheckComma(FunctionValidator<Unit>& f, ParseNode* comma,
+ Type* type) {
+ MOZ_ASSERT(comma->isKind(ParseNodeKind::CommaExpr));
+ ParseNode* operands = ListHead(comma);
+
+ // The block depth isn't taken into account here, because a comma list can't
+ // contain breaks and continues and nested control flow structures.
+ if (!f.encoder().writeOp(Op::Block)) {
+ return false;
+ }
+
+ size_t typeAt;
+ if (!f.encoder().writePatchableFixedU7(&typeAt)) {
+ return false;
+ }
+
+ ParseNode* pn = operands;
+ for (; NextNode(pn); pn = NextNode(pn)) {
+ if (!CheckAsExprStatement(f, pn)) {
+ return false;
+ }
+ }
+
+ if (!CheckExpr(f, pn, type)) {
+ return false;
+ }
+
+ f.encoder().patchFixedU7(typeAt, uint8_t(type->toWasmBlockSignatureType()));
+
+ return f.encoder().writeOp(Op::End);
+}
+
+template <typename Unit>
+static bool CheckConditional(FunctionValidator<Unit>& f, ParseNode* ternary,
+ Type* type) {
+ MOZ_ASSERT(ternary->isKind(ParseNodeKind::ConditionalExpr));
+
+ ParseNode* cond = TernaryKid1(ternary);
+ ParseNode* thenExpr = TernaryKid2(ternary);
+ ParseNode* elseExpr = TernaryKid3(ternary);
+
+ Type condType;
+ if (!CheckExpr(f, cond, &condType)) {
+ return false;
+ }
+
+ if (!condType.isInt()) {
+ return f.failf(cond, "%s is not a subtype of int", condType.toChars());
+ }
+
+ size_t typeAt;
+ if (!f.pushIf(&typeAt)) {
+ return false;
+ }
+
+ Type thenType;
+ if (!CheckExpr(f, thenExpr, &thenType)) {
+ return false;
+ }
+
+ if (!f.switchToElse()) {
+ return false;
+ }
+
+ Type elseType;
+ if (!CheckExpr(f, elseExpr, &elseType)) {
+ return false;
+ }
+
+ if (thenType.isInt() && elseType.isInt()) {
+ *type = Type::Int;
+ } else if (thenType.isDouble() && elseType.isDouble()) {
+ *type = Type::Double;
+ } else if (thenType.isFloat() && elseType.isFloat()) {
+ *type = Type::Float;
+ } else {
+ return f.failf(
+ ternary,
+ "then/else branches of conditional must both produce int, float, "
+ "double, current types are %s and %s",
+ thenType.toChars(), elseType.toChars());
+ }
+
+ if (!f.popIf(typeAt, type->toWasmBlockSignatureType())) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool IsValidIntMultiplyConstant(ModuleValidator<Unit>& m,
+ ParseNode* expr) {
+ if (!IsNumericLiteral(m, expr)) {
+ return false;
+ }
+
+ NumLit lit = ExtractNumericLiteral(m, expr);
+ switch (lit.which()) {
+ case NumLit::Fixnum:
+ case NumLit::NegativeInt:
+ if (Abs(lit.toInt32()) < (uint32_t(1) << 20)) {
+ return true;
+ }
+ return false;
+ case NumLit::BigUnsigned:
+ case NumLit::Double:
+ case NumLit::Float:
+ case NumLit::OutOfRangeInt:
+ return false;
+ }
+
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Bad literal");
+}
+
+template <typename Unit>
+static bool CheckMultiply(FunctionValidator<Unit>& f, ParseNode* star,
+ Type* type) {
+ MOZ_ASSERT(star->isKind(ParseNodeKind::MulExpr));
+ ParseNode* lhs = MultiplyLeft(star);
+ ParseNode* rhs = MultiplyRight(star);
+
+ Type lhsType;
+ if (!CheckExpr(f, lhs, &lhsType)) {
+ return false;
+ }
+
+ Type rhsType;
+ if (!CheckExpr(f, rhs, &rhsType)) {
+ return false;
+ }
+
+ if (lhsType.isInt() && rhsType.isInt()) {
+ if (!IsValidIntMultiplyConstant(f.m(), lhs) &&
+ !IsValidIntMultiplyConstant(f.m(), rhs)) {
+ return f.fail(
+ star,
+ "one arg to int multiply must be a small (-2^20, 2^20) int literal");
+ }
+ *type = Type::Intish;
+ return f.encoder().writeOp(Op::I32Mul);
+ }
+
+ if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) {
+ *type = Type::Double;
+ return f.encoder().writeOp(Op::F64Mul);
+ }
+
+ if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) {
+ *type = Type::Floatish;
+ return f.encoder().writeOp(Op::F32Mul);
+ }
+
+ return f.fail(
+ star, "multiply operands must be both int, both double? or both float?");
+}
+
+template <typename Unit>
+static bool CheckAddOrSub(FunctionValidator<Unit>& f, ParseNode* expr,
+ Type* type, unsigned* numAddOrSubOut = nullptr) {
+ if (!CheckRecursionLimitDontReport(f.cx())) {
+ return f.m().failOverRecursed();
+ }
+
+ MOZ_ASSERT(expr->isKind(ParseNodeKind::AddExpr) ||
+ expr->isKind(ParseNodeKind::SubExpr));
+ ParseNode* lhs = AddSubLeft(expr);
+ ParseNode* rhs = AddSubRight(expr);
+
+ Type lhsType, rhsType;
+ unsigned lhsNumAddOrSub, rhsNumAddOrSub;
+
+ if (lhs->isKind(ParseNodeKind::AddExpr) ||
+ lhs->isKind(ParseNodeKind::SubExpr)) {
+ if (!CheckAddOrSub(f, lhs, &lhsType, &lhsNumAddOrSub)) {
+ return false;
+ }
+ if (lhsType == Type::Intish) {
+ lhsType = Type::Int;
+ }
+ } else {
+ if (!CheckExpr(f, lhs, &lhsType)) {
+ return false;
+ }
+ lhsNumAddOrSub = 0;
+ }
+
+ if (rhs->isKind(ParseNodeKind::AddExpr) ||
+ rhs->isKind(ParseNodeKind::SubExpr)) {
+ if (!CheckAddOrSub(f, rhs, &rhsType, &rhsNumAddOrSub)) {
+ return false;
+ }
+ if (rhsType == Type::Intish) {
+ rhsType = Type::Int;
+ }
+ } else {
+ if (!CheckExpr(f, rhs, &rhsType)) {
+ return false;
+ }
+ rhsNumAddOrSub = 0;
+ }
+
+ unsigned numAddOrSub = lhsNumAddOrSub + rhsNumAddOrSub + 1;
+ if (numAddOrSub > (1 << 20)) {
+ return f.fail(expr, "too many + or - without intervening coercion");
+ }
+
+ if (lhsType.isInt() && rhsType.isInt()) {
+ if (!f.encoder().writeOp(
+ expr->isKind(ParseNodeKind::AddExpr) ? Op::I32Add : Op::I32Sub)) {
+ return false;
+ }
+ *type = Type::Intish;
+ } else if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) {
+ if (!f.encoder().writeOp(
+ expr->isKind(ParseNodeKind::AddExpr) ? Op::F64Add : Op::F64Sub)) {
+ return false;
+ }
+ *type = Type::Double;
+ } else if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) {
+ if (!f.encoder().writeOp(
+ expr->isKind(ParseNodeKind::AddExpr) ? Op::F32Add : Op::F32Sub)) {
+ return false;
+ }
+ *type = Type::Floatish;
+ } else {
+ return f.failf(
+ expr,
+ "operands to + or - must both be int, float? or double?, got %s and %s",
+ lhsType.toChars(), rhsType.toChars());
+ }
+
+ if (numAddOrSubOut) {
+ *numAddOrSubOut = numAddOrSub;
+ }
+ return true;
+}
+
+template <typename Unit>
+static bool CheckDivOrMod(FunctionValidator<Unit>& f, ParseNode* expr,
+ Type* type) {
+ MOZ_ASSERT(expr->isKind(ParseNodeKind::DivExpr) ||
+ expr->isKind(ParseNodeKind::ModExpr));
+
+ ParseNode* lhs = DivOrModLeft(expr);
+ ParseNode* rhs = DivOrModRight(expr);
+
+ Type lhsType, rhsType;
+ if (!CheckExpr(f, lhs, &lhsType)) {
+ return false;
+ }
+ if (!CheckExpr(f, rhs, &rhsType)) {
+ return false;
+ }
+
+ if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) {
+ *type = Type::Double;
+ if (expr->isKind(ParseNodeKind::DivExpr)) {
+ return f.encoder().writeOp(Op::F64Div);
+ }
+ return f.encoder().writeOp(MozOp::F64Mod);
+ }
+
+ if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) {
+ *type = Type::Floatish;
+ if (expr->isKind(ParseNodeKind::DivExpr)) {
+ return f.encoder().writeOp(Op::F32Div);
+ } else {
+ return f.fail(expr, "modulo cannot receive float arguments");
+ }
+ }
+
+ if (lhsType.isSigned() && rhsType.isSigned()) {
+ *type = Type::Intish;
+ return f.encoder().writeOp(
+ expr->isKind(ParseNodeKind::DivExpr) ? Op::I32DivS : Op::I32RemS);
+ }
+
+ if (lhsType.isUnsigned() && rhsType.isUnsigned()) {
+ *type = Type::Intish;
+ return f.encoder().writeOp(
+ expr->isKind(ParseNodeKind::DivExpr) ? Op::I32DivU : Op::I32RemU);
+ }
+
+ return f.failf(
+ expr,
+ "arguments to / or %% must both be double?, float?, signed, or unsigned; "
+ "%s and %s are given",
+ lhsType.toChars(), rhsType.toChars());
+}
+
+template <typename Unit>
+static bool CheckComparison(FunctionValidator<Unit>& f, ParseNode* comp,
+ Type* type) {
+ MOZ_ASSERT(comp->isKind(ParseNodeKind::LtExpr) ||
+ comp->isKind(ParseNodeKind::LeExpr) ||
+ comp->isKind(ParseNodeKind::GtExpr) ||
+ comp->isKind(ParseNodeKind::GeExpr) ||
+ comp->isKind(ParseNodeKind::EqExpr) ||
+ comp->isKind(ParseNodeKind::NeExpr));
+
+ ParseNode* lhs = ComparisonLeft(comp);
+ ParseNode* rhs = ComparisonRight(comp);
+
+ Type lhsType, rhsType;
+ if (!CheckExpr(f, lhs, &lhsType)) {
+ return false;
+ }
+ if (!CheckExpr(f, rhs, &rhsType)) {
+ return false;
+ }
+
+ if (!(lhsType.isSigned() && rhsType.isSigned()) &&
+ !(lhsType.isUnsigned() && rhsType.isUnsigned()) &&
+ !(lhsType.isDouble() && rhsType.isDouble()) &&
+ !(lhsType.isFloat() && rhsType.isFloat())) {
+ return f.failf(comp,
+ "arguments to a comparison must both be signed, unsigned, "
+ "floats or doubles; "
+ "%s and %s are given",
+ lhsType.toChars(), rhsType.toChars());
+ }
+
+ Op stmt;
+ if (lhsType.isSigned() && rhsType.isSigned()) {
+ switch (comp->getKind()) {
+ case ParseNodeKind::EqExpr:
+ stmt = Op::I32Eq;
+ break;
+ case ParseNodeKind::NeExpr:
+ stmt = Op::I32Ne;
+ break;
+ case ParseNodeKind::LtExpr:
+ stmt = Op::I32LtS;
+ break;
+ case ParseNodeKind::LeExpr:
+ stmt = Op::I32LeS;
+ break;
+ case ParseNodeKind::GtExpr:
+ stmt = Op::I32GtS;
+ break;
+ case ParseNodeKind::GeExpr:
+ stmt = Op::I32GeS;
+ break;
+ default:
+ MOZ_CRASH("unexpected comparison op");
+ }
+ } else if (lhsType.isUnsigned() && rhsType.isUnsigned()) {
+ switch (comp->getKind()) {
+ case ParseNodeKind::EqExpr:
+ stmt = Op::I32Eq;
+ break;
+ case ParseNodeKind::NeExpr:
+ stmt = Op::I32Ne;
+ break;
+ case ParseNodeKind::LtExpr:
+ stmt = Op::I32LtU;
+ break;
+ case ParseNodeKind::LeExpr:
+ stmt = Op::I32LeU;
+ break;
+ case ParseNodeKind::GtExpr:
+ stmt = Op::I32GtU;
+ break;
+ case ParseNodeKind::GeExpr:
+ stmt = Op::I32GeU;
+ break;
+ default:
+ MOZ_CRASH("unexpected comparison op");
+ }
+ } else if (lhsType.isDouble()) {
+ switch (comp->getKind()) {
+ case ParseNodeKind::EqExpr:
+ stmt = Op::F64Eq;
+ break;
+ case ParseNodeKind::NeExpr:
+ stmt = Op::F64Ne;
+ break;
+ case ParseNodeKind::LtExpr:
+ stmt = Op::F64Lt;
+ break;
+ case ParseNodeKind::LeExpr:
+ stmt = Op::F64Le;
+ break;
+ case ParseNodeKind::GtExpr:
+ stmt = Op::F64Gt;
+ break;
+ case ParseNodeKind::GeExpr:
+ stmt = Op::F64Ge;
+ break;
+ default:
+ MOZ_CRASH("unexpected comparison op");
+ }
+ } else if (lhsType.isFloat()) {
+ switch (comp->getKind()) {
+ case ParseNodeKind::EqExpr:
+ stmt = Op::F32Eq;
+ break;
+ case ParseNodeKind::NeExpr:
+ stmt = Op::F32Ne;
+ break;
+ case ParseNodeKind::LtExpr:
+ stmt = Op::F32Lt;
+ break;
+ case ParseNodeKind::LeExpr:
+ stmt = Op::F32Le;
+ break;
+ case ParseNodeKind::GtExpr:
+ stmt = Op::F32Gt;
+ break;
+ case ParseNodeKind::GeExpr:
+ stmt = Op::F32Ge;
+ break;
+ default:
+ MOZ_CRASH("unexpected comparison op");
+ }
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+
+ *type = Type::Int;
+ return f.encoder().writeOp(stmt);
+}
+
+template <typename Unit>
+static bool CheckBitwise(FunctionValidator<Unit>& f, ParseNode* bitwise,
+ Type* type) {
+ ParseNode* lhs = BitwiseLeft(bitwise);
+ ParseNode* rhs = BitwiseRight(bitwise);
+
+ int32_t identityElement;
+ bool onlyOnRight;
+ switch (bitwise->getKind()) {
+ case ParseNodeKind::BitOrExpr:
+ identityElement = 0;
+ onlyOnRight = false;
+ *type = Type::Signed;
+ break;
+ case ParseNodeKind::BitAndExpr:
+ identityElement = -1;
+ onlyOnRight = false;
+ *type = Type::Signed;
+ break;
+ case ParseNodeKind::BitXorExpr:
+ identityElement = 0;
+ onlyOnRight = false;
+ *type = Type::Signed;
+ break;
+ case ParseNodeKind::LshExpr:
+ identityElement = 0;
+ onlyOnRight = true;
+ *type = Type::Signed;
+ break;
+ case ParseNodeKind::RshExpr:
+ identityElement = 0;
+ onlyOnRight = true;
+ *type = Type::Signed;
+ break;
+ case ParseNodeKind::UrshExpr:
+ identityElement = 0;
+ onlyOnRight = true;
+ *type = Type::Unsigned;
+ break;
+ default:
+ MOZ_CRASH("not a bitwise op");
+ }
+
+ uint32_t i;
+ if (!onlyOnRight && IsLiteralInt(f.m(), lhs, &i) &&
+ i == uint32_t(identityElement)) {
+ Type rhsType;
+ if (!CheckExpr(f, rhs, &rhsType)) {
+ return false;
+ }
+ if (!rhsType.isIntish()) {
+ return f.failf(bitwise, "%s is not a subtype of intish",
+ rhsType.toChars());
+ }
+ return true;
+ }
+
+ if (IsLiteralInt(f.m(), rhs, &i) && i == uint32_t(identityElement)) {
+ if (bitwise->isKind(ParseNodeKind::BitOrExpr) &&
+ lhs->isKind(ParseNodeKind::CallExpr)) {
+ return CheckCoercedCall(f, lhs, Type::Int, type);
+ }
+
+ Type lhsType;
+ if (!CheckExpr(f, lhs, &lhsType)) {
+ return false;
+ }
+ if (!lhsType.isIntish()) {
+ return f.failf(bitwise, "%s is not a subtype of intish",
+ lhsType.toChars());
+ }
+ return true;
+ }
+
+ Type lhsType;
+ if (!CheckExpr(f, lhs, &lhsType)) {
+ return false;
+ }
+
+ Type rhsType;
+ if (!CheckExpr(f, rhs, &rhsType)) {
+ return false;
+ }
+
+ if (!lhsType.isIntish()) {
+ return f.failf(lhs, "%s is not a subtype of intish", lhsType.toChars());
+ }
+ if (!rhsType.isIntish()) {
+ return f.failf(rhs, "%s is not a subtype of intish", rhsType.toChars());
+ }
+
+ switch (bitwise->getKind()) {
+ case ParseNodeKind::BitOrExpr:
+ if (!f.encoder().writeOp(Op::I32Or)) return false;
+ break;
+ case ParseNodeKind::BitAndExpr:
+ if (!f.encoder().writeOp(Op::I32And)) return false;
+ break;
+ case ParseNodeKind::BitXorExpr:
+ if (!f.encoder().writeOp(Op::I32Xor)) return false;
+ break;
+ case ParseNodeKind::LshExpr:
+ if (!f.encoder().writeOp(Op::I32Shl)) return false;
+ break;
+ case ParseNodeKind::RshExpr:
+ if (!f.encoder().writeOp(Op::I32ShrS)) return false;
+ break;
+ case ParseNodeKind::UrshExpr:
+ if (!f.encoder().writeOp(Op::I32ShrU)) return false;
+ break;
+ default:
+ MOZ_CRASH("not a bitwise op");
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckExpr(FunctionValidator<Unit>& f, ParseNode* expr, Type* type) {
+ if (!CheckRecursionLimitDontReport(f.cx())) {
+ return f.m().failOverRecursed();
+ }
+
+ if (IsNumericLiteral(f.m(), expr)) {
+ return CheckNumericLiteral(f, expr, type);
+ }
+
+ switch (expr->getKind()) {
+ case ParseNodeKind::Name:
+ return CheckVarRef(f, expr, type);
+ case ParseNodeKind::ElemExpr:
+ return CheckLoadArray(f, expr, type);
+ case ParseNodeKind::AssignExpr:
+ return CheckAssign(f, expr, type);
+ case ParseNodeKind::PosExpr:
+ return CheckPos(f, expr, type);
+ case ParseNodeKind::NotExpr:
+ return CheckNot(f, expr, type);
+ case ParseNodeKind::NegExpr:
+ return CheckNeg(f, expr, type);
+ case ParseNodeKind::BitNotExpr:
+ return CheckBitNot(f, expr, type);
+ case ParseNodeKind::CommaExpr:
+ return CheckComma(f, expr, type);
+ case ParseNodeKind::ConditionalExpr:
+ return CheckConditional(f, expr, type);
+ case ParseNodeKind::MulExpr:
+ return CheckMultiply(f, expr, type);
+ case ParseNodeKind::CallExpr:
+ return CheckUncoercedCall(f, expr, type);
+
+ case ParseNodeKind::AddExpr:
+ case ParseNodeKind::SubExpr:
+ return CheckAddOrSub(f, expr, type);
+
+ case ParseNodeKind::DivExpr:
+ case ParseNodeKind::ModExpr:
+ return CheckDivOrMod(f, expr, type);
+
+ case ParseNodeKind::LtExpr:
+ case ParseNodeKind::LeExpr:
+ case ParseNodeKind::GtExpr:
+ case ParseNodeKind::GeExpr:
+ case ParseNodeKind::EqExpr:
+ case ParseNodeKind::NeExpr:
+ return CheckComparison(f, expr, type);
+
+ case ParseNodeKind::BitOrExpr:
+ case ParseNodeKind::BitAndExpr:
+ case ParseNodeKind::BitXorExpr:
+ case ParseNodeKind::LshExpr:
+ case ParseNodeKind::RshExpr:
+ case ParseNodeKind::UrshExpr:
+ return CheckBitwise(f, expr, type);
+
+ default:;
+ }
+
+ return f.fail(expr, "unsupported expression");
+}
+
+template <typename Unit>
+static bool CheckStatement(FunctionValidator<Unit>& f, ParseNode* stmt);
+
+template <typename Unit>
+static bool CheckAsExprStatement(FunctionValidator<Unit>& f, ParseNode* expr) {
+ if (expr->isKind(ParseNodeKind::CallExpr)) {
+ Type ignored;
+ return CheckCoercedCall(f, expr, Type::Void, &ignored);
+ }
+
+ Type resultType;
+ if (!CheckExpr(f, expr, &resultType)) {
+ return false;
+ }
+
+ if (!resultType.isVoid()) {
+ if (!f.encoder().writeOp(Op::Drop)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckExprStatement(FunctionValidator<Unit>& f,
+ ParseNode* exprStmt) {
+ MOZ_ASSERT(exprStmt->isKind(ParseNodeKind::ExpressionStmt));
+ return CheckAsExprStatement(f, UnaryKid(exprStmt));
+}
+
+template <typename Unit>
+static bool CheckLoopConditionOnEntry(FunctionValidator<Unit>& f,
+ ParseNode* cond) {
+ uint32_t maybeLit;
+ if (IsLiteralInt(f.m(), cond, &maybeLit) && maybeLit) {
+ return true;
+ }
+
+ Type condType;
+ if (!CheckExpr(f, cond, &condType)) {
+ return false;
+ }
+ if (!condType.isInt()) {
+ return f.failf(cond, "%s is not a subtype of int", condType.toChars());
+ }
+
+ if (!f.encoder().writeOp(Op::I32Eqz)) {
+ return false;
+ }
+
+ // brIf (i32.eqz $f) $out
+ if (!f.writeBreakIf()) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckWhile(FunctionValidator<Unit>& f, ParseNode* whileStmt,
+ const LabelVector* labels = nullptr) {
+ MOZ_ASSERT(whileStmt->isKind(ParseNodeKind::WhileStmt));
+ ParseNode* cond = BinaryLeft(whileStmt);
+ ParseNode* body = BinaryRight(whileStmt);
+
+ // A while loop `while(#cond) #body` is equivalent to:
+ // (block $after_loop
+ // (loop $top
+ // (brIf $after_loop (i32.eq 0 #cond))
+ // #body
+ // (br $top)
+ // )
+ // )
+ if (labels && !f.addLabels(*labels, 0, 1)) {
+ return false;
+ }
+
+ if (!f.pushLoop()) {
+ return false;
+ }
+
+ if (!CheckLoopConditionOnEntry(f, cond)) {
+ return false;
+ }
+ if (!CheckStatement(f, body)) {
+ return false;
+ }
+ if (!f.writeContinue()) {
+ return false;
+ }
+
+ if (!f.popLoop()) {
+ return false;
+ }
+ if (labels) {
+ f.removeLabels(*labels);
+ }
+ return true;
+}
+
+template <typename Unit>
+static bool CheckFor(FunctionValidator<Unit>& f, ParseNode* forStmt,
+ const LabelVector* labels = nullptr) {
+ MOZ_ASSERT(forStmt->isKind(ParseNodeKind::ForStmt));
+ ParseNode* forHead = BinaryLeft(forStmt);
+ ParseNode* body = BinaryRight(forStmt);
+
+ if (!forHead->isKind(ParseNodeKind::ForHead)) {
+ return f.fail(forHead, "unsupported for-loop statement");
+ }
+
+ ParseNode* maybeInit = TernaryKid1(forHead);
+ ParseNode* maybeCond = TernaryKid2(forHead);
+ ParseNode* maybeInc = TernaryKid3(forHead);
+
+ // A for-loop `for (#init; #cond; #inc) #body` is equivalent to:
+ // (block // depth X
+ // (#init)
+ // (block $after_loop // depth X+1 (block)
+ // (loop $loop_top // depth X+2 (loop)
+ // (brIf $after (eq 0 #cond))
+ // (block $after_body #body) // depth X+3
+ // #inc
+ // (br $loop_top)
+ // )
+ // )
+ // )
+ // A break in the body should break out to $after_loop, i.e. depth + 1.
+ // A continue in the body should break out to $after_body, i.e. depth + 3.
+ if (labels && !f.addLabels(*labels, 1, 3)) {
+ return false;
+ }
+
+ if (!f.pushUnbreakableBlock()) {
+ return false;
+ }
+
+ if (maybeInit && !CheckAsExprStatement(f, maybeInit)) {
+ return false;
+ }
+
+ {
+ if (!f.pushLoop()) {
+ return false;
+ }
+
+ if (maybeCond && !CheckLoopConditionOnEntry(f, maybeCond)) {
+ return false;
+ }
+
+ {
+ // Continuing in the body should just break out to the increment.
+ if (!f.pushContinuableBlock()) {
+ return false;
+ }
+ if (!CheckStatement(f, body)) {
+ return false;
+ }
+ if (!f.popContinuableBlock()) {
+ return false;
+ }
+ }
+
+ if (maybeInc && !CheckAsExprStatement(f, maybeInc)) {
+ return false;
+ }
+
+ if (!f.writeContinue()) {
+ return false;
+ }
+ if (!f.popLoop()) {
+ return false;
+ }
+ }
+
+ if (!f.popUnbreakableBlock()) {
+ return false;
+ }
+
+ if (labels) {
+ f.removeLabels(*labels);
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckDoWhile(FunctionValidator<Unit>& f, ParseNode* whileStmt,
+ const LabelVector* labels = nullptr) {
+ MOZ_ASSERT(whileStmt->isKind(ParseNodeKind::DoWhileStmt));
+ ParseNode* body = BinaryLeft(whileStmt);
+ ParseNode* cond = BinaryRight(whileStmt);
+
+ // A do-while loop `do { #body } while (#cond)` is equivalent to:
+ // (block $after_loop // depth X
+ // (loop $top // depth X+1
+ // (block #body) // depth X+2
+ // (brIf #cond $top)
+ // )
+ // )
+ // A break should break out of the entire loop, i.e. at depth 0.
+ // A continue should break out to the condition, i.e. at depth 2.
+ if (labels && !f.addLabels(*labels, 0, 2)) {
+ return false;
+ }
+
+ if (!f.pushLoop()) {
+ return false;
+ }
+
+ {
+ // An unlabeled continue in the body should break out to the condition.
+ if (!f.pushContinuableBlock()) {
+ return false;
+ }
+ if (!CheckStatement(f, body)) {
+ return false;
+ }
+ if (!f.popContinuableBlock()) {
+ return false;
+ }
+ }
+
+ Type condType;
+ if (!CheckExpr(f, cond, &condType)) {
+ return false;
+ }
+ if (!condType.isInt()) {
+ return f.failf(cond, "%s is not a subtype of int", condType.toChars());
+ }
+
+ if (!f.writeContinueIf()) {
+ return false;
+ }
+
+ if (!f.popLoop()) {
+ return false;
+ }
+ if (labels) {
+ f.removeLabels(*labels);
+ }
+ return true;
+}
+
+template <typename Unit>
+static bool CheckStatementList(FunctionValidator<Unit>& f, ParseNode*,
+ const LabelVector* = nullptr);
+
+template <typename Unit>
+static bool CheckLabel(FunctionValidator<Unit>& f, ParseNode* labeledStmt) {
+ MOZ_ASSERT(labeledStmt->isKind(ParseNodeKind::LabelStmt));
+
+ LabelVector labels;
+ ParseNode* innermost = labeledStmt;
+ do {
+ if (!labels.append(LabeledStatementLabel(innermost))) {
+ return false;
+ }
+ innermost = LabeledStatementStatement(innermost);
+ } while (innermost->getKind() == ParseNodeKind::LabelStmt);
+
+ switch (innermost->getKind()) {
+ case ParseNodeKind::ForStmt:
+ return CheckFor(f, innermost, &labels);
+ case ParseNodeKind::DoWhileStmt:
+ return CheckDoWhile(f, innermost, &labels);
+ case ParseNodeKind::WhileStmt:
+ return CheckWhile(f, innermost, &labels);
+ case ParseNodeKind::StatementList:
+ return CheckStatementList(f, innermost, &labels);
+ default:
+ break;
+ }
+
+ if (!f.pushUnbreakableBlock(&labels)) {
+ return false;
+ }
+
+ if (!CheckStatement(f, innermost)) {
+ return false;
+ }
+
+ if (!f.popUnbreakableBlock(&labels)) {
+ return false;
+ }
+ return true;
+}
+
+template <typename Unit>
+static bool CheckIf(FunctionValidator<Unit>& f, ParseNode* ifStmt) {
+ uint32_t numIfEnd = 1;
+
+recurse:
+ MOZ_ASSERT(ifStmt->isKind(ParseNodeKind::IfStmt));
+ ParseNode* cond = TernaryKid1(ifStmt);
+ ParseNode* thenStmt = TernaryKid2(ifStmt);
+ ParseNode* elseStmt = TernaryKid3(ifStmt);
+
+ Type condType;
+ if (!CheckExpr(f, cond, &condType)) {
+ return false;
+ }
+ if (!condType.isInt()) {
+ return f.failf(cond, "%s is not a subtype of int", condType.toChars());
+ }
+
+ size_t typeAt;
+ if (!f.pushIf(&typeAt)) {
+ return false;
+ }
+
+ f.setIfType(typeAt, TypeCode::BlockVoid);
+
+ if (!CheckStatement(f, thenStmt)) {
+ return false;
+ }
+
+ if (elseStmt) {
+ if (!f.switchToElse()) {
+ return false;
+ }
+
+ if (elseStmt->isKind(ParseNodeKind::IfStmt)) {
+ ifStmt = elseStmt;
+ if (numIfEnd++ == UINT32_MAX) {
+ return false;
+ }
+ goto recurse;
+ }
+
+ if (!CheckStatement(f, elseStmt)) {
+ return false;
+ }
+ }
+
+ for (uint32_t i = 0; i != numIfEnd; ++i) {
+ if (!f.popIf()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool CheckCaseExpr(FunctionValidatorShared& f, ParseNode* caseExpr,
+ int32_t* value) {
+ if (!IsNumericLiteral(f.m(), caseExpr)) {
+ return f.fail(caseExpr,
+ "switch case expression must be an integer literal");
+ }
+
+ NumLit lit = ExtractNumericLiteral(f.m(), caseExpr);
+ switch (lit.which()) {
+ case NumLit::Fixnum:
+ case NumLit::NegativeInt:
+ *value = lit.toInt32();
+ break;
+ case NumLit::OutOfRangeInt:
+ case NumLit::BigUnsigned:
+ return f.fail(caseExpr, "switch case expression out of integer range");
+ case NumLit::Double:
+ case NumLit::Float:
+ return f.fail(caseExpr,
+ "switch case expression must be an integer literal");
+ }
+
+ return true;
+}
+
+static bool CheckDefaultAtEnd(FunctionValidatorShared& f, ParseNode* stmt) {
+ for (; stmt; stmt = NextNode(stmt)) {
+ if (IsDefaultCase(stmt) && NextNode(stmt) != nullptr) {
+ return f.fail(stmt, "default label must be at the end");
+ }
+ }
+
+ return true;
+}
+
+static bool CheckSwitchRange(FunctionValidatorShared& f, ParseNode* stmt,
+ int32_t* low, int32_t* high,
+ uint32_t* tableLength) {
+ if (IsDefaultCase(stmt)) {
+ *low = 0;
+ *high = -1;
+ *tableLength = 0;
+ return true;
+ }
+
+ int32_t i = 0;
+ if (!CheckCaseExpr(f, CaseExpr(stmt), &i)) {
+ return false;
+ }
+
+ *low = *high = i;
+
+ ParseNode* initialStmt = stmt;
+ for (stmt = NextNode(stmt); stmt && !IsDefaultCase(stmt);
+ stmt = NextNode(stmt)) {
+ int32_t i = 0;
+ if (!CheckCaseExpr(f, CaseExpr(stmt), &i)) {
+ return false;
+ }
+
+ *low = std::min(*low, i);
+ *high = std::max(*high, i);
+ }
+
+ int64_t i64 = (int64_t(*high) - int64_t(*low)) + 1;
+ if (i64 > MaxBrTableElems) {
+ return f.fail(
+ initialStmt,
+ "all switch statements generate tables; this table would be too big");
+ }
+
+ *tableLength = uint32_t(i64);
+ return true;
+}
+
+template <typename Unit>
+static bool CheckSwitchExpr(FunctionValidator<Unit>& f, ParseNode* switchExpr) {
+ Type exprType;
+ if (!CheckExpr(f, switchExpr, &exprType)) {
+ return false;
+ }
+ if (!exprType.isSigned()) {
+ return f.failf(switchExpr, "%s is not a subtype of signed",
+ exprType.toChars());
+ }
+ return true;
+}
+
+// A switch will be constructed as:
+// - the default block wrapping all the other blocks, to be able to break
+// out of the switch with an unlabeled break statement. It has two statements
+// (an inner block and the default expr). asm.js rules require default to be at
+// the end, so the default block always encloses all the cases blocks.
+// - one block per case between low and high; undefined cases just jump to the
+// default case. Each of these blocks contain two statements: the next case's
+// block and the possibly empty statement list comprising the case body. The
+// last block pushed is the first case so the (relative) branch target therefore
+// matches the sequential order of cases.
+// - one block for the br_table, so that the first break goes to the first
+// case's block.
+template <typename Unit>
+static bool CheckSwitch(FunctionValidator<Unit>& f, ParseNode* switchStmt) {
+ MOZ_ASSERT(switchStmt->isKind(ParseNodeKind::SwitchStmt));
+
+ ParseNode* switchExpr = BinaryLeft(switchStmt);
+ ParseNode* switchBody = BinaryRight(switchStmt);
+
+ if (switchBody->is<LexicalScopeNode>()) {
+ LexicalScopeNode* scope = &switchBody->as<LexicalScopeNode>();
+ if (!scope->isEmptyScope()) {
+ return f.fail(scope, "switch body may not contain lexical declarations");
+ }
+ switchBody = scope->scopeBody();
+ }
+
+ ParseNode* stmt = ListHead(switchBody);
+ if (!stmt) {
+ if (!CheckSwitchExpr(f, switchExpr)) {
+ return false;
+ }
+ if (!f.encoder().writeOp(Op::Drop)) {
+ return false;
+ }
+ return true;
+ }
+
+ if (!CheckDefaultAtEnd(f, stmt)) {
+ return false;
+ }
+
+ int32_t low = 0, high = 0;
+ uint32_t tableLength = 0;
+ if (!CheckSwitchRange(f, stmt, &low, &high, &tableLength)) {
+ return false;
+ }
+
+ static const uint32_t CASE_NOT_DEFINED = UINT32_MAX;
+
+ Uint32Vector caseDepths;
+ if (!caseDepths.appendN(CASE_NOT_DEFINED, tableLength)) {
+ return false;
+ }
+
+ uint32_t numCases = 0;
+ for (ParseNode* s = stmt; s && !IsDefaultCase(s); s = NextNode(s)) {
+ int32_t caseValue = ExtractNumericLiteral(f.m(), CaseExpr(s)).toInt32();
+
+ MOZ_ASSERT(caseValue >= low);
+ unsigned i = caseValue - low;
+ if (caseDepths[i] != CASE_NOT_DEFINED) {
+ return f.fail(s, "no duplicate case labels");
+ }
+
+ MOZ_ASSERT(numCases != CASE_NOT_DEFINED);
+ caseDepths[i] = numCases++;
+ }
+
+ // Open the wrapping breakable default block.
+ if (!f.pushBreakableBlock()) {
+ return false;
+ }
+
+ // Open all the case blocks.
+ for (uint32_t i = 0; i < numCases; i++) {
+ if (!f.pushUnbreakableBlock()) {
+ return false;
+ }
+ }
+
+ // Open the br_table block.
+ if (!f.pushUnbreakableBlock()) {
+ return false;
+ }
+
+ // The default block is the last one.
+ uint32_t defaultDepth = numCases;
+
+ // Subtract lowest case value, so that all the cases start from 0.
+ if (low) {
+ if (!CheckSwitchExpr(f, switchExpr)) {
+ return false;
+ }
+ if (!f.writeInt32Lit(low)) {
+ return false;
+ }
+ if (!f.encoder().writeOp(Op::I32Sub)) {
+ return false;
+ }
+ } else {
+ if (!CheckSwitchExpr(f, switchExpr)) {
+ return false;
+ }
+ }
+
+ // Start the br_table block.
+ if (!f.encoder().writeOp(Op::BrTable)) {
+ return false;
+ }
+
+ // Write the number of cases (tableLength - 1 + 1 (default)).
+ // Write the number of cases (tableLength - 1 + 1 (default)).
+ if (!f.encoder().writeVarU32(tableLength)) {
+ return false;
+ }
+
+ // Each case value describes the relative depth to the actual block. When
+ // a case is not explicitly defined, it goes to the default.
+ for (size_t i = 0; i < tableLength; i++) {
+ uint32_t target =
+ caseDepths[i] == CASE_NOT_DEFINED ? defaultDepth : caseDepths[i];
+ if (!f.encoder().writeVarU32(target)) {
+ return false;
+ }
+ }
+
+ // Write the default depth.
+ if (!f.encoder().writeVarU32(defaultDepth)) {
+ return false;
+ }
+
+ // Our br_table is done. Close its block, write the cases down in order.
+ if (!f.popUnbreakableBlock()) {
+ return false;
+ }
+
+ for (; stmt && !IsDefaultCase(stmt); stmt = NextNode(stmt)) {
+ if (!CheckStatement(f, CaseBody(stmt))) {
+ return false;
+ }
+ if (!f.popUnbreakableBlock()) {
+ return false;
+ }
+ }
+
+ // Write the default block.
+ if (stmt && IsDefaultCase(stmt)) {
+ if (!CheckStatement(f, CaseBody(stmt))) {
+ return false;
+ }
+ }
+
+ // Close the wrapping block.
+ if (!f.popBreakableBlock()) {
+ return false;
+ }
+ return true;
+}
+
+static bool CheckReturnType(FunctionValidatorShared& f, ParseNode* usepn,
+ Type ret) {
+ Maybe<ValType> type = ret.canonicalToReturnType();
+
+ if (!f.hasAlreadyReturned()) {
+ f.setReturnedType(type);
+ return true;
+ }
+
+ if (f.returnedType() != type) {
+ return f.failf(usepn, "%s incompatible with previous return of type %s",
+ ToString(type).get(), ToString(f.returnedType()).get());
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckReturn(FunctionValidator<Unit>& f, ParseNode* returnStmt) {
+ ParseNode* expr = ReturnExpr(returnStmt);
+
+ if (!expr) {
+ if (!CheckReturnType(f, returnStmt, Type::Void)) {
+ return false;
+ }
+ } else {
+ Type type;
+ if (!CheckExpr(f, expr, &type)) {
+ return false;
+ }
+
+ if (!type.isReturnType()) {
+ return f.failf(expr, "%s is not a valid return type", type.toChars());
+ }
+
+ if (!CheckReturnType(f, expr, Type::canonicalize(type))) {
+ return false;
+ }
+ }
+
+ if (!f.encoder().writeOp(Op::Return)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckStatementList(FunctionValidator<Unit>& f, ParseNode* stmtList,
+ const LabelVector* labels /*= nullptr */) {
+ MOZ_ASSERT(stmtList->isKind(ParseNodeKind::StatementList));
+
+ if (!f.pushUnbreakableBlock(labels)) {
+ return false;
+ }
+
+ for (ParseNode* stmt = ListHead(stmtList); stmt; stmt = NextNode(stmt)) {
+ if (!CheckStatement(f, stmt)) {
+ return false;
+ }
+ }
+
+ if (!f.popUnbreakableBlock(labels)) {
+ return false;
+ }
+ return true;
+}
+
+template <typename Unit>
+static bool CheckLexicalScope(FunctionValidator<Unit>& f, ParseNode* node) {
+ LexicalScopeNode* lexicalScope = &node->as<LexicalScopeNode>();
+ if (!lexicalScope->isEmptyScope()) {
+ return f.fail(lexicalScope, "cannot have 'let' or 'const' declarations");
+ }
+
+ return CheckStatement(f, lexicalScope->scopeBody());
+}
+
+static bool CheckBreakOrContinue(FunctionValidatorShared& f, bool isBreak,
+ ParseNode* stmt) {
+ if (const ParserName* maybeLabel = LoopControlMaybeLabel(stmt)) {
+ return f.writeLabeledBreakOrContinue(maybeLabel, isBreak);
+ }
+ return f.writeUnlabeledBreakOrContinue(isBreak);
+}
+
+template <typename Unit>
+static bool CheckStatement(FunctionValidator<Unit>& f, ParseNode* stmt) {
+ if (!CheckRecursionLimitDontReport(f.cx())) {
+ return f.m().failOverRecursed();
+ }
+
+ switch (stmt->getKind()) {
+ case ParseNodeKind::EmptyStmt:
+ return true;
+ case ParseNodeKind::ExpressionStmt:
+ return CheckExprStatement(f, stmt);
+ case ParseNodeKind::WhileStmt:
+ return CheckWhile(f, stmt);
+ case ParseNodeKind::ForStmt:
+ return CheckFor(f, stmt);
+ case ParseNodeKind::DoWhileStmt:
+ return CheckDoWhile(f, stmt);
+ case ParseNodeKind::LabelStmt:
+ return CheckLabel(f, stmt);
+ case ParseNodeKind::IfStmt:
+ return CheckIf(f, stmt);
+ case ParseNodeKind::SwitchStmt:
+ return CheckSwitch(f, stmt);
+ case ParseNodeKind::ReturnStmt:
+ return CheckReturn(f, stmt);
+ case ParseNodeKind::StatementList:
+ return CheckStatementList(f, stmt);
+ case ParseNodeKind::BreakStmt:
+ return CheckBreakOrContinue(f, true, stmt);
+ case ParseNodeKind::ContinueStmt:
+ return CheckBreakOrContinue(f, false, stmt);
+ case ParseNodeKind::LexicalScope:
+ return CheckLexicalScope(f, stmt);
+ default:;
+ }
+
+ return f.fail(stmt, "unexpected statement kind");
+}
+
+template <typename Unit>
+static bool ParseFunction(ModuleValidator<Unit>& m, FunctionNode** funNodeOut,
+ unsigned* line) {
+ auto& tokenStream = m.tokenStream();
+
+ tokenStream.consumeKnownToken(TokenKind::Function,
+ TokenStreamShared::SlashIsRegExp);
+
+ auto& anyChars = tokenStream.anyCharsAccess();
+ uint32_t toStringStart = anyChars.currentToken().pos.begin;
+ *line = anyChars.lineNumber(anyChars.lineToken(toStringStart));
+
+ TokenKind tk;
+ if (!tokenStream.getToken(&tk, TokenStreamShared::SlashIsRegExp)) {
+ return false;
+ }
+ if (tk == TokenKind::Mul) {
+ return m.failCurrentOffset("unexpected generator function");
+ }
+ if (!TokenKindIsPossibleIdentifier(tk)) {
+ return false; // The regular parser will throw a SyntaxError, no need to
+ // m.fail.
+ }
+
+ const ParserName* name = m.parser().bindingIdentifier(YieldIsName);
+ if (!name) {
+ return false;
+ }
+
+ FunctionNode* funNode = m.parser().handler_.newFunction(
+ FunctionSyntaxKind::Statement, m.parser().pos());
+ if (!funNode) {
+ return false;
+ }
+
+ ParseContext* outerpc = m.parser().pc_;
+ Directives directives(outerpc);
+ FunctionFlags flags(FunctionFlags::INTERPRETED_NORMAL);
+ FunctionBox* funbox = m.parser().newFunctionBox(
+ funNode, name, flags, toStringStart, directives,
+ GeneratorKind::NotGenerator, FunctionAsyncKind::SyncFunction);
+ if (!funbox) {
+ return false;
+ }
+ funbox->initWithEnclosingParseContext(outerpc, flags,
+ FunctionSyntaxKind::Statement);
+
+ Directives newDirectives = directives;
+ SourceParseContext funpc(&m.parser(), funbox, &newDirectives);
+ if (!funpc.init()) {
+ return false;
+ }
+
+ if (!m.parser().functionFormalParametersAndBody(
+ InAllowed, YieldIsName, &funNode, FunctionSyntaxKind::Statement)) {
+ if (anyChars.hadError() || directives == newDirectives) {
+ return false;
+ }
+
+ return m.fail(funNode, "encountered new directive in function");
+ }
+
+ MOZ_ASSERT(!anyChars.hadError());
+ MOZ_ASSERT(directives == newDirectives);
+
+ *funNodeOut = funNode;
+ return true;
+}
+
+template <typename Unit>
+static bool CheckFunction(ModuleValidator<Unit>& m) {
+ // asm.js modules can be quite large when represented as parse trees so pop
+ // the backing LifoAlloc after parsing/compiling each function. Release the
+ // parser's lifo memory after the last use of a parse node.
+ frontend::ParserBase::Mark mark = m.parser().mark();
+ auto releaseMark =
+ mozilla::MakeScopeExit([&m, &mark] { m.parser().release(mark); });
+
+ FunctionNode* funNode = nullptr;
+ unsigned line = 0;
+ if (!ParseFunction(m, &funNode, &line)) {
+ return false;
+ }
+
+ if (!CheckFunctionHead(m, funNode)) {
+ return false;
+ }
+
+ FunctionValidator<Unit> f(m, funNode);
+
+ ParseNode* stmtIter = ListHead(FunctionStatementList(funNode));
+
+ if (!CheckProcessingDirectives(m, &stmtIter)) {
+ return false;
+ }
+
+ ValTypeVector args;
+ if (!CheckArguments(f, &stmtIter, &args)) {
+ return false;
+ }
+
+ if (!CheckVariables(f, &stmtIter)) {
+ return false;
+ }
+
+ ParseNode* lastNonEmptyStmt = nullptr;
+ for (; stmtIter; stmtIter = NextNonEmptyStatement(stmtIter)) {
+ lastNonEmptyStmt = stmtIter;
+ if (!CheckStatement(f, stmtIter)) {
+ return false;
+ }
+ }
+
+ if (!CheckFinalReturn(f, lastNonEmptyStmt)) {
+ return false;
+ }
+
+ ValTypeVector results;
+ if (f.returnedType()) {
+ if (!results.append(f.returnedType().ref())) {
+ return false;
+ }
+ }
+
+ ModuleValidatorShared::Func* func = nullptr;
+ if (!CheckFunctionSignature(m, funNode,
+ FuncType(std::move(args), std::move(results)),
+ FunctionName(funNode), &func)) {
+ return false;
+ }
+
+ if (func->defined()) {
+ return m.failName(funNode, "function '%s' already defined",
+ FunctionName(funNode));
+ }
+
+ f.define(func, line);
+
+ return true;
+}
+
+static bool CheckAllFunctionsDefined(ModuleValidatorShared& m) {
+ for (unsigned i = 0; i < m.numFuncDefs(); i++) {
+ const ModuleValidatorShared::Func& f = m.funcDef(i);
+ if (!f.defined()) {
+ return m.failNameOffset(f.firstUse(), "missing definition of function %s",
+ f.name());
+ }
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckFunctions(ModuleValidator<Unit>& m) {
+ while (true) {
+ TokenKind tk;
+ if (!PeekToken(m.parser(), &tk)) {
+ return false;
+ }
+
+ if (tk != TokenKind::Function) {
+ break;
+ }
+
+ if (!CheckFunction(m)) {
+ return false;
+ }
+ }
+
+ return CheckAllFunctionsDefined(m);
+}
+
+template <typename Unit>
+static bool CheckFuncPtrTable(ModuleValidator<Unit>& m, ParseNode* decl) {
+ if (!decl->isKind(ParseNodeKind::AssignExpr)) {
+ return m.fail(decl, "function-pointer table must have initializer");
+ }
+ AssignmentNode* assignNode = &decl->as<AssignmentNode>();
+
+ ParseNode* var = assignNode->left();
+
+ if (!var->isKind(ParseNodeKind::Name)) {
+ return m.fail(var, "function-pointer table name is not a plain name");
+ }
+
+ ParseNode* arrayLiteral = assignNode->right();
+
+ if (!arrayLiteral->isKind(ParseNodeKind::ArrayExpr)) {
+ return m.fail(
+ var, "function-pointer table's initializer must be an array literal");
+ }
+
+ unsigned length = ListLength(arrayLiteral);
+
+ if (!IsPowerOfTwo(length)) {
+ return m.failf(arrayLiteral,
+ "function-pointer table length must be a power of 2 (is %u)",
+ length);
+ }
+
+ unsigned mask = length - 1;
+
+ Uint32Vector elemFuncDefIndices;
+ const FuncType* sig = nullptr;
+ for (ParseNode* elem = ListHead(arrayLiteral); elem; elem = NextNode(elem)) {
+ if (!elem->isKind(ParseNodeKind::Name)) {
+ return m.fail(
+ elem, "function-pointer table's elements must be names of functions");
+ }
+
+ const ParserName* funcName = elem->as<NameNode>().name();
+ const ModuleValidatorShared::Func* func = m.lookupFuncDef(funcName);
+ if (!func) {
+ return m.fail(
+ elem, "function-pointer table's elements must be names of functions");
+ }
+
+ const FuncType& funcSig = m.env().types.funcType(func->sigIndex());
+ if (sig) {
+ if (*sig != funcSig) {
+ return m.fail(elem, "all functions in table must have same signature");
+ }
+ } else {
+ sig = &funcSig;
+ }
+
+ if (!elemFuncDefIndices.append(func->funcDefIndex())) {
+ return false;
+ }
+ }
+
+ FuncType copy;
+ if (!copy.clone(*sig)) {
+ return false;
+ }
+
+ uint32_t tableIndex;
+ if (!CheckFuncPtrTableAgainstExisting(m, var, var->as<NameNode>().name(),
+ std::move(copy), mask, &tableIndex)) {
+ return false;
+ }
+
+ if (!m.defineFuncPtrTable(tableIndex, std::move(elemFuncDefIndices))) {
+ return m.fail(var, "duplicate function-pointer definition");
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckFuncPtrTables(ModuleValidator<Unit>& m) {
+ while (true) {
+ ParseNode* varStmt;
+ if (!ParseVarOrConstStatement(m.parser(), &varStmt)) {
+ return false;
+ }
+ if (!varStmt) {
+ break;
+ }
+ for (ParseNode* var = VarListHead(varStmt); var; var = NextNode(var)) {
+ if (!CheckFuncPtrTable(m, var)) {
+ return false;
+ }
+ }
+ }
+
+ for (unsigned i = 0; i < m.numFuncPtrTables(); i++) {
+ ModuleValidatorShared::Table& table = m.table(i);
+ if (!table.defined()) {
+ return m.failNameOffset(table.firstUse(),
+ "function-pointer table %s wasn't defined",
+ table.name());
+ }
+ }
+
+ return true;
+}
+
+static bool CheckModuleExportFunction(
+ ModuleValidatorShared& m, ParseNode* pn,
+ const ParserName* maybeFieldName = nullptr) {
+ if (!pn->isKind(ParseNodeKind::Name)) {
+ return m.fail(pn, "expected name of exported function");
+ }
+
+ const ParserName* funcName = pn->as<NameNode>().name();
+ const ModuleValidatorShared::Func* func = m.lookupFuncDef(funcName);
+ if (!func) {
+ return m.failName(pn, "function '%s' not found", funcName);
+ }
+
+ return m.addExportField(*func, maybeFieldName);
+}
+
+static bool CheckModuleExportObject(ModuleValidatorShared& m,
+ ParseNode* object) {
+ MOZ_ASSERT(object->isKind(ParseNodeKind::ObjectExpr));
+
+ for (ParseNode* pn = ListHead(object); pn; pn = NextNode(pn)) {
+ if (!IsNormalObjectField(pn)) {
+ return m.fail(pn,
+ "only normal object properties may be used in the export "
+ "object literal");
+ }
+
+ const ParserName* fieldName = ObjectNormalFieldName(pn);
+
+ ParseNode* initNode = ObjectNormalFieldInitializer(pn);
+ if (!initNode->isKind(ParseNodeKind::Name)) {
+ return m.fail(
+ initNode,
+ "initializer of exported object literal must be name of function");
+ }
+
+ if (!CheckModuleExportFunction(m, initNode, fieldName)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckModuleReturn(ModuleValidator<Unit>& m) {
+ TokenKind tk;
+ if (!GetToken(m.parser(), &tk)) {
+ return false;
+ }
+ auto& ts = m.parser().tokenStream;
+ if (tk != TokenKind::Return) {
+ return m.failCurrentOffset(
+ (tk == TokenKind::RightCurly || tk == TokenKind::Eof)
+ ? "expecting return statement"
+ : "invalid asm.js. statement");
+ }
+ ts.anyCharsAccess().ungetToken();
+
+ ParseNode* returnStmt = m.parser().statementListItem(YieldIsName);
+ if (!returnStmt) {
+ return false;
+ }
+
+ ParseNode* returnExpr = ReturnExpr(returnStmt);
+ if (!returnExpr) {
+ return m.fail(returnStmt, "export statement must return something");
+ }
+
+ if (returnExpr->isKind(ParseNodeKind::ObjectExpr)) {
+ if (!CheckModuleExportObject(m, returnExpr)) {
+ return false;
+ }
+ } else {
+ if (!CheckModuleExportFunction(m, returnExpr)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool CheckModuleEnd(ModuleValidator<Unit>& m) {
+ TokenKind tk;
+ if (!GetToken(m.parser(), &tk)) {
+ return false;
+ }
+
+ if (tk != TokenKind::Eof && tk != TokenKind::RightCurly) {
+ return m.failCurrentOffset(
+ "top-level export (return) must be the last statement");
+ }
+
+ m.parser().tokenStream.anyCharsAccess().ungetToken();
+ return true;
+}
+
+template <typename Unit>
+static SharedModule CheckModule(JSContext* cx, ParserAtomsTable& parserAtoms,
+ AsmJSParser<Unit>& parser, ParseNode* stmtList,
+ unsigned* time) {
+ int64_t before = PRMJ_Now();
+
+ FunctionNode* moduleFunctionNode = parser.pc_->functionBox()->functionNode;
+
+ ModuleValidator<Unit> m(cx, parserAtoms, parser, moduleFunctionNode);
+ if (!m.init()) {
+ return nullptr;
+ }
+
+ if (!CheckFunctionHead(m, moduleFunctionNode)) {
+ return nullptr;
+ }
+
+ if (!CheckModuleArguments(m, moduleFunctionNode)) {
+ return nullptr;
+ }
+
+ if (!CheckPrecedingStatements(m, stmtList)) {
+ return nullptr;
+ }
+
+ if (!CheckModuleProcessingDirectives(m)) {
+ return nullptr;
+ }
+
+ if (!CheckModuleGlobals(m)) {
+ return nullptr;
+ }
+
+ if (!m.startFunctionBodies()) {
+ return nullptr;
+ }
+
+ if (!CheckFunctions(m)) {
+ return nullptr;
+ }
+
+ if (!CheckFuncPtrTables(m)) {
+ return nullptr;
+ }
+
+ if (!CheckModuleReturn(m)) {
+ return nullptr;
+ }
+
+ if (!CheckModuleEnd(m)) {
+ return nullptr;
+ }
+
+ SharedModule module = m.finish();
+ if (!module) {
+ return nullptr;
+ }
+
+ *time = (PRMJ_Now() - before) / PRMJ_USEC_PER_MSEC;
+ return module;
+}
+
+/*****************************************************************************/
+// Link-time validation
+
+static bool LinkFail(JSContext* cx, const char* str) {
+ WarnNumberASCII(cx, JSMSG_USE_ASM_LINK_FAIL, str);
+ return false;
+}
+
+static bool IsMaybeWrappedScriptedProxy(JSObject* obj) {
+ JSObject* unwrapped = UncheckedUnwrap(obj);
+ return unwrapped && IsScriptedProxy(unwrapped);
+}
+
+static bool GetDataProperty(JSContext* cx, HandleValue objVal, HandleAtom field,
+ MutableHandleValue v) {
+ if (!objVal.isObject()) {
+ return LinkFail(cx, "accessing property of non-object");
+ }
+
+ RootedObject obj(cx, &objVal.toObject());
+ if (IsMaybeWrappedScriptedProxy(obj)) {
+ return LinkFail(cx, "accessing property of a Proxy");
+ }
+
+ Rooted<PropertyDescriptor> desc(cx);
+ RootedId id(cx, AtomToId(field));
+ if (!GetPropertyDescriptor(cx, obj, id, &desc)) {
+ return false;
+ }
+
+ if (!desc.object()) {
+ return LinkFail(cx, "property not present on object");
+ }
+
+ if (!desc.isDataDescriptor()) {
+ return LinkFail(cx, "property is not a data property");
+ }
+
+ v.set(desc.value());
+ return true;
+}
+
+static bool GetDataProperty(JSContext* cx, HandleValue objVal,
+ const char* fieldChars, MutableHandleValue v) {
+ RootedAtom field(cx, AtomizeUTF8Chars(cx, fieldChars, strlen(fieldChars)));
+ if (!field) {
+ return false;
+ }
+
+ return GetDataProperty(cx, objVal, field, v);
+}
+
+static bool GetDataProperty(JSContext* cx, HandleValue objVal,
+ const ImmutablePropertyNamePtr& field,
+ MutableHandleValue v) {
+ // Help the conversion along for all the cx->parserNames().* users.
+ HandlePropertyName fieldHandle = field;
+ return GetDataProperty(cx, objVal, fieldHandle, v);
+}
+
+static bool HasObjectValueOfMethodPure(JSObject* obj, JSContext* cx) {
+ Value v;
+ if (!GetPropertyPure(cx, obj, NameToId(cx->names().valueOf), &v)) {
+ return false;
+ }
+
+ JSFunction* fun;
+ if (!IsFunctionObject(v, &fun)) {
+ return false;
+ }
+
+ return IsSelfHostedFunctionWithName(fun, cx->names().Object_valueOf);
+}
+
+static bool HasPureCoercion(JSContext* cx, HandleValue v) {
+ // Ideally, we'd reject all non-primitives, but Emscripten has a bug that
+ // generates code that passes functions for some imports. To avoid breaking
+ // all the code that contains this bug, we make an exception for functions
+ // that don't have user-defined valueOf or toString, for their coercions
+ // are not observable and coercion via ToNumber/ToInt32 definitely produces
+ // NaN/0. We should remove this special case later once most apps have been
+ // built with newer Emscripten.
+ if (v.toObject().is<JSFunction>() &&
+ HasNoToPrimitiveMethodPure(&v.toObject(), cx) &&
+ HasObjectValueOfMethodPure(&v.toObject(), cx) &&
+ HasNativeMethodPure(&v.toObject(), cx->names().toString, fun_toString,
+ cx)) {
+ return true;
+ }
+ return false;
+}
+
+static bool ValidateGlobalVariable(JSContext* cx, const AsmJSGlobal& global,
+ HandleValue importVal,
+ Maybe<LitValPOD>* val) {
+ switch (global.varInitKind()) {
+ case AsmJSGlobal::InitConstant:
+ val->emplace(global.varInitVal());
+ return true;
+
+ case AsmJSGlobal::InitImport: {
+ RootedValue v(cx);
+ if (!GetDataProperty(cx, importVal, global.field(), &v)) {
+ return false;
+ }
+
+ if (!v.isPrimitive() && !HasPureCoercion(cx, v)) {
+ return LinkFail(cx, "Imported values must be primitives");
+ }
+
+ switch (global.varInitImportType().kind()) {
+ case ValType::I32: {
+ int32_t i32;
+ if (!ToInt32(cx, v, &i32)) {
+ return false;
+ }
+ val->emplace(uint32_t(i32));
+ return true;
+ }
+ case ValType::I64:
+ MOZ_CRASH("int64");
+ case ValType::V128:
+ MOZ_CRASH("v128");
+ case ValType::F32: {
+ float f;
+ if (!RoundFloat32(cx, v, &f)) {
+ return false;
+ }
+ val->emplace(f);
+ return true;
+ }
+ case ValType::F64: {
+ double d;
+ if (!ToNumber(cx, v, &d)) {
+ return false;
+ }
+ val->emplace(d);
+ return true;
+ }
+ case ValType::Ref: {
+ MOZ_CRASH("not available in asm.js");
+ }
+ }
+ }
+ }
+
+ MOZ_CRASH("unreachable");
+}
+
+static bool ValidateFFI(JSContext* cx, const AsmJSGlobal& global,
+ HandleValue importVal,
+ MutableHandle<FunctionVector> ffis) {
+ RootedValue v(cx);
+ if (!GetDataProperty(cx, importVal, global.field(), &v)) {
+ return false;
+ }
+
+ if (!IsFunctionObject(v)) {
+ return LinkFail(cx, "FFI imports must be functions");
+ }
+
+ ffis[global.ffiIndex()].set(&v.toObject().as<JSFunction>());
+ return true;
+}
+
+static bool ValidateArrayView(JSContext* cx, const AsmJSGlobal& global,
+ HandleValue globalVal) {
+ if (!global.field()) {
+ return true;
+ }
+
+ if (Scalar::isBigIntType(global.viewType())) {
+ return LinkFail(cx, "bad typed array constructor");
+ }
+
+ RootedValue v(cx);
+ if (!GetDataProperty(cx, globalVal, global.field(), &v)) {
+ return false;
+ }
+
+ bool tac = IsTypedArrayConstructor(v, global.viewType());
+ if (!tac) {
+ return LinkFail(cx, "bad typed array constructor");
+ }
+
+ return true;
+}
+
+static bool ValidateMathBuiltinFunction(JSContext* cx,
+ const AsmJSGlobal& global,
+ HandleValue globalVal) {
+ RootedValue v(cx);
+ if (!GetDataProperty(cx, globalVal, cx->names().Math, &v)) {
+ return false;
+ }
+
+ if (!GetDataProperty(cx, v, global.field(), &v)) {
+ return false;
+ }
+
+ Native native = nullptr;
+ switch (global.mathBuiltinFunction()) {
+ case AsmJSMathBuiltin_sin:
+ native = math_sin;
+ break;
+ case AsmJSMathBuiltin_cos:
+ native = math_cos;
+ break;
+ case AsmJSMathBuiltin_tan:
+ native = math_tan;
+ break;
+ case AsmJSMathBuiltin_asin:
+ native = math_asin;
+ break;
+ case AsmJSMathBuiltin_acos:
+ native = math_acos;
+ break;
+ case AsmJSMathBuiltin_atan:
+ native = math_atan;
+ break;
+ case AsmJSMathBuiltin_ceil:
+ native = math_ceil;
+ break;
+ case AsmJSMathBuiltin_floor:
+ native = math_floor;
+ break;
+ case AsmJSMathBuiltin_exp:
+ native = math_exp;
+ break;
+ case AsmJSMathBuiltin_log:
+ native = math_log;
+ break;
+ case AsmJSMathBuiltin_pow:
+ native = math_pow;
+ break;
+ case AsmJSMathBuiltin_sqrt:
+ native = math_sqrt;
+ break;
+ case AsmJSMathBuiltin_min:
+ native = math_min;
+ break;
+ case AsmJSMathBuiltin_max:
+ native = math_max;
+ break;
+ case AsmJSMathBuiltin_abs:
+ native = math_abs;
+ break;
+ case AsmJSMathBuiltin_atan2:
+ native = math_atan2;
+ break;
+ case AsmJSMathBuiltin_imul:
+ native = math_imul;
+ break;
+ case AsmJSMathBuiltin_clz32:
+ native = math_clz32;
+ break;
+ case AsmJSMathBuiltin_fround:
+ native = math_fround;
+ break;
+ }
+
+ if (!IsNativeFunction(v, native)) {
+ return LinkFail(cx, "bad Math.* builtin function");
+ }
+
+ return true;
+}
+
+static bool ValidateConstant(JSContext* cx, const AsmJSGlobal& global,
+ HandleValue globalVal) {
+ RootedValue v(cx, globalVal);
+
+ if (global.constantKind() == AsmJSGlobal::MathConstant) {
+ if (!GetDataProperty(cx, v, cx->names().Math, &v)) {
+ return false;
+ }
+ }
+
+ if (!GetDataProperty(cx, v, global.field(), &v)) {
+ return false;
+ }
+
+ if (!v.isNumber()) {
+ return LinkFail(cx, "math / global constant value needs to be a number");
+ }
+
+ // NaN != NaN
+ if (IsNaN(global.constantValue())) {
+ if (!IsNaN(v.toNumber())) {
+ return LinkFail(cx, "global constant value needs to be NaN");
+ }
+ } else {
+ if (v.toNumber() != global.constantValue()) {
+ return LinkFail(cx, "global constant value mismatch");
+ }
+ }
+
+ return true;
+}
+
+static bool CheckBuffer(JSContext* cx, const AsmJSMetadata& metadata,
+ HandleValue bufferVal,
+ MutableHandle<ArrayBufferObjectMaybeShared*> buffer) {
+ if (metadata.memoryUsage == MemoryUsage::Shared) {
+ if (!IsSharedArrayBuffer(bufferVal)) {
+ return LinkFail(
+ cx, "shared views can only be constructed onto SharedArrayBuffer");
+ }
+ } else {
+ if (!IsArrayBuffer(bufferVal)) {
+ return LinkFail(
+ cx, "unshared views can only be constructed onto ArrayBuffer");
+ }
+ }
+
+ buffer.set(&AsAnyArrayBuffer(bufferVal));
+
+ // Do not assume the buffer's length fits within the wasm heap limit, so do
+ // not call ByteLength32().
+ size_t memoryLength = buffer->byteLength().get();
+
+ if (!IsValidAsmJSHeapLength(memoryLength)) {
+ UniqueChars msg(
+ JS_smprintf("ArrayBuffer byteLength 0x%" PRIx64
+ " is not a valid heap length. The next "
+ "valid length is 0x%" PRIx64,
+ uint64_t(memoryLength),
+ RoundUpToNextValidAsmJSHeapLength(memoryLength)));
+ if (!msg) {
+ return false;
+ }
+ return LinkFail(cx, msg.get());
+ }
+
+ // This check is sufficient without considering the size of the loaded datum
+ // because heap loads and stores start on an aligned boundary and the heap
+ // byteLength has larger alignment.
+ MOZ_ASSERT((metadata.minMemoryLength - 1) <= INT32_MAX);
+ if (memoryLength < metadata.minMemoryLength) {
+ UniqueChars msg(JS_smprintf("ArrayBuffer byteLength of 0x%" PRIx64
+ " is less than 0x%" PRIx64 " (the "
+ "size implied "
+ "by const heap accesses).",
+ uint64_t(memoryLength),
+ metadata.minMemoryLength));
+ if (!msg) {
+ return false;
+ }
+ return LinkFail(cx, msg.get());
+ }
+
+ if (buffer->is<ArrayBufferObject>()) {
+ Rooted<ArrayBufferObject*> arrayBuffer(cx,
+ &buffer->as<ArrayBufferObject>());
+ if (!arrayBuffer->prepareForAsmJS()) {
+ return LinkFail(cx, "Unable to prepare ArrayBuffer for asm.js use");
+ }
+ } else {
+ return LinkFail(cx, "Unable to prepare SharedArrayBuffer for asm.js use");
+ }
+
+ MOZ_ASSERT(buffer->isPreparedForAsmJS());
+ return true;
+}
+
+static bool GetImports(JSContext* cx, const AsmJSMetadata& metadata,
+ HandleValue globalVal, HandleValue importVal,
+ ImportValues* imports) {
+ Rooted<FunctionVector> ffis(cx, FunctionVector(cx));
+ if (!ffis.resize(metadata.numFFIs)) {
+ return false;
+ }
+
+ for (const AsmJSGlobal& global : metadata.asmJSGlobals) {
+ switch (global.which()) {
+ case AsmJSGlobal::Variable: {
+ Maybe<LitValPOD> litVal;
+ if (!ValidateGlobalVariable(cx, global, importVal, &litVal)) {
+ return false;
+ }
+ if (!imports->globalValues.append(Val(litVal->asLitVal()))) {
+ return false;
+ }
+ break;
+ }
+ case AsmJSGlobal::FFI:
+ if (!ValidateFFI(cx, global, importVal, &ffis)) {
+ return false;
+ }
+ break;
+ case AsmJSGlobal::ArrayView:
+ case AsmJSGlobal::ArrayViewCtor:
+ if (!ValidateArrayView(cx, global, globalVal)) {
+ return false;
+ }
+ break;
+ case AsmJSGlobal::MathBuiltinFunction:
+ if (!ValidateMathBuiltinFunction(cx, global, globalVal)) {
+ return false;
+ }
+ break;
+ case AsmJSGlobal::Constant:
+ if (!ValidateConstant(cx, global, globalVal)) {
+ return false;
+ }
+ break;
+ }
+ }
+
+ for (const AsmJSImport& import : metadata.asmJSImports) {
+ if (!imports->funcs.append(ffis[import.ffiIndex()])) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool TryInstantiate(JSContext* cx, CallArgs args, const Module& module,
+ const AsmJSMetadata& metadata,
+ MutableHandleWasmInstanceObject instanceObj,
+ MutableHandleObject exportObj) {
+ HandleValue globalVal = args.get(0);
+ HandleValue importVal = args.get(1);
+ HandleValue bufferVal = args.get(2);
+
+ // Re-check HasPlatformSupport(cx) since this varies per-thread and
+ // 'module' may have been produced on a parser thread.
+ if (!HasPlatformSupport(cx)) {
+ return LinkFail(cx, "no platform support");
+ }
+
+ Rooted<ImportValues> imports(cx);
+
+ if (module.metadata().usesMemory()) {
+ RootedArrayBufferObjectMaybeShared buffer(cx);
+ if (!CheckBuffer(cx, metadata, bufferVal, &buffer)) {
+ return false;
+ }
+
+ imports.get().memory = WasmMemoryObject::create(cx, buffer, nullptr);
+ if (!imports.get().memory) {
+ return false;
+ }
+ }
+
+ if (!GetImports(cx, metadata, globalVal, importVal, imports.address())) {
+ return false;
+ }
+
+ if (!module.instantiate(cx, imports.get(), nullptr, instanceObj)) {
+ return false;
+ }
+
+ exportObj.set(&instanceObj->exportsObj());
+ return true;
+}
+
+static bool HandleInstantiationFailure(JSContext* cx, CallArgs args,
+ const AsmJSMetadata& metadata) {
+ using js::frontend::FunctionSyntaxKind;
+
+ RootedAtom name(cx, args.callee().as<JSFunction>().explicitName());
+
+ if (cx->isExceptionPending()) {
+ return false;
+ }
+
+ ScriptSource* source = metadata.scriptSource.get();
+
+ // Source discarding is allowed to affect JS semantics because it is never
+ // enabled for normal JS content.
+ bool haveSource;
+ if (!ScriptSource::loadSource(cx, source, &haveSource)) {
+ return false;
+ }
+ if (!haveSource) {
+ JS_ReportErrorASCII(cx,
+ "asm.js link failure with source discarding enabled");
+ return false;
+ }
+
+ uint32_t begin = metadata.toStringStart;
+ uint32_t end = metadata.srcEndAfterCurly();
+ Rooted<JSLinearString*> src(cx, source->substringDontDeflate(cx, begin, end));
+ if (!src) {
+ return false;
+ }
+
+ JS::CompileOptions options(cx);
+ options.setMutedErrors(source->mutedErrors())
+ .setFile(source->filename())
+ .setNoScriptRval(false);
+ options.asmJSOption = AsmJSOption::Disabled;
+
+ // The exported function inherits an implicit strict context if the module
+ // also inherited it somehow.
+ if (metadata.strict) {
+ options.setForceStrictMode();
+ }
+
+ AutoStableStringChars stableChars(cx);
+ if (!stableChars.initTwoByte(cx, src)) {
+ return false;
+ }
+
+ SourceText<char16_t> srcBuf;
+
+ const char16_t* chars = stableChars.twoByteRange().begin().get();
+ SourceOwnership ownership = stableChars.maybeGiveOwnershipToCaller()
+ ? SourceOwnership::TakeOwnership
+ : SourceOwnership::Borrowed;
+ if (!srcBuf.init(cx, chars, end - begin, ownership)) {
+ return false;
+ }
+
+ FunctionSyntaxKind syntaxKind = FunctionSyntaxKind::Statement;
+
+ RootedFunction fun(cx, frontend::CompileStandaloneFunction(
+ cx, options, srcBuf, Nothing(), syntaxKind));
+ if (!fun) {
+ return false;
+ }
+
+ fun->initEnvironment(&cx->global()->lexicalEnvironment());
+
+ // Call the function we just recompiled.
+ args.setCallee(ObjectValue(*fun));
+ return InternalCallOrConstruct(
+ cx, args, args.isConstructing() ? CONSTRUCT : NO_CONSTRUCT);
+}
+
+static const Module& AsmJSModuleFunctionToModule(JSFunction* fun) {
+ MOZ_ASSERT(IsAsmJSModule(fun));
+ const Value& v = fun->getExtendedSlot(FunctionExtended::ASMJS_MODULE_SLOT);
+ return v.toObject().as<WasmModuleObject>().module();
+}
+
+// Implements the semantics of an asm.js module function that has been
+// successfully validated.
+bool js::InstantiateAsmJS(JSContext* cx, unsigned argc, JS::Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ JSFunction* callee = &args.callee().as<JSFunction>();
+ const Module& module = AsmJSModuleFunctionToModule(callee);
+ const AsmJSMetadata& metadata = module.metadata().asAsmJS();
+
+ RootedWasmInstanceObject instanceObj(cx);
+ RootedObject exportObj(cx);
+ if (!TryInstantiate(cx, args, module, metadata, &instanceObj, &exportObj)) {
+ // Link-time validation checks failed, so reparse the entire asm.js
+ // module from scratch to get normal interpreted bytecode which we can
+ // simply Invoke. Very slow.
+ return HandleInstantiationFailure(cx, args, metadata);
+ }
+
+ args.rval().set(ObjectValue(*exportObj));
+ return true;
+}
+
+/*****************************************************************************/
+// Top-level js::CompileAsmJS
+
+static bool NoExceptionPending(JSContext* cx) {
+ return cx->isHelperThreadContext() || !cx->isExceptionPending();
+}
+
+static bool SuccessfulValidation(frontend::ParserBase& parser,
+ unsigned compilationTime) {
+ unsigned errNum = js::SupportDifferentialTesting()
+ ? JSMSG_USE_ASM_TYPE_OK_NO_TIME
+ : JSMSG_USE_ASM_TYPE_OK;
+
+ char timeChars[20];
+ SprintfLiteral(timeChars, "%u", compilationTime);
+
+ return parser.warningNoOffset(errNum, timeChars);
+}
+
+static bool TypeFailureWarning(frontend::ParserBase& parser, const char* str) {
+ if (parser.options().throwOnAsmJSValidationFailureOption) {
+ parser.errorNoOffset(JSMSG_USE_ASM_TYPE_FAIL, str ? str : "");
+ return false;
+ }
+
+ // Per the asm.js standard convention, whether failure sets a pending
+ // exception determines whether to attempt non-asm.js reparsing, so ignore
+ // the return value below.
+ Unused << parser.warningNoOffset(JSMSG_USE_ASM_TYPE_FAIL, str ? str : "");
+ return false;
+}
+
+// asm.js requires Ion to be available on the current hardware/OS and to be
+// enabled for wasm, since asm.js compilation goes via wasm.
+static bool IsAsmJSCompilerAvailable(JSContext* cx) {
+ return HasPlatformSupport(cx) && IonAvailable(cx);
+}
+
+static bool EstablishPreconditions(JSContext* cx,
+ frontend::ParserBase& parser) {
+ if (!IsAsmJSCompilerAvailable(cx)) {
+ return TypeFailureWarning(parser, "Disabled by lack of compiler support");
+ }
+
+ switch (parser.options().asmJSOption) {
+ case AsmJSOption::Disabled:
+ return TypeFailureWarning(parser, "Disabled by 'asmjs' runtime option");
+ case AsmJSOption::DisabledByDebugger:
+ return TypeFailureWarning(parser, "Disabled by debugger");
+ case AsmJSOption::Enabled:
+ break;
+ }
+
+ if (parser.pc_->isGenerator()) {
+ return TypeFailureWarning(parser, "Disabled by generator context");
+ }
+
+ if (parser.pc_->isAsync()) {
+ return TypeFailureWarning(parser, "Disabled by async context");
+ }
+
+ if (parser.pc_->isArrowFunction()) {
+ return TypeFailureWarning(parser, "Disabled by arrow function context");
+ }
+
+ // Class constructors are also methods
+ if (parser.pc_->isMethod() || parser.pc_->isGetterOrSetter()) {
+ return TypeFailureWarning(
+ parser, "Disabled by class constructor or method context");
+ }
+
+ return true;
+}
+
+template <typename Unit>
+static bool DoCompileAsmJS(JSContext* cx, ParserAtomsTable& parserAtoms,
+ AsmJSParser<Unit>& parser, ParseNode* stmtList,
+ bool* validated) {
+ *validated = false;
+
+ // Various conditions disable asm.js optimizations.
+ if (!EstablishPreconditions(cx, parser)) {
+ return NoExceptionPending(cx);
+ }
+
+ // "Checking" parses, validates and compiles, producing a fully compiled
+ // WasmModuleObject as result.
+ unsigned time;
+ SharedModule module = CheckModule(cx, parserAtoms, parser, stmtList, &time);
+ if (!module) {
+ return NoExceptionPending(cx);
+ }
+
+ // Finished! Save the ref-counted module on the FunctionBox. When JSFunctions
+ // are eventually allocated we will create an asm.js constructor for it.
+ FunctionBox* funbox = parser.pc_->functionBox();
+ MOZ_ASSERT(funbox->isInterpreted());
+ if (!funbox->setAsmJSModule(module)) {
+ return NoExceptionPending(cx);
+ }
+
+ // Success! Write to the console with a "warning" message indicating
+ // total compilation time.
+ *validated = true;
+ SuccessfulValidation(parser, time);
+ return NoExceptionPending(cx);
+}
+
+bool js::CompileAsmJS(JSContext* cx, ParserAtomsTable& parserAtoms,
+ AsmJSParser<char16_t>& parser, ParseNode* stmtList,
+ bool* validated) {
+ return DoCompileAsmJS(cx, parserAtoms, parser, stmtList, validated);
+}
+
+bool js::CompileAsmJS(JSContext* cx, ParserAtomsTable& parserAtoms,
+ AsmJSParser<Utf8Unit>& parser, ParseNode* stmtList,
+ bool* validated) {
+ return DoCompileAsmJS(cx, parserAtoms, parser, stmtList, validated);
+}
+
+/*****************************************************************************/
+// asm.js testing functions
+
+bool js::IsAsmJSModuleNative(Native native) {
+ return native == InstantiateAsmJS;
+}
+
+bool js::IsAsmJSModule(JSFunction* fun) {
+ return fun->maybeNative() == InstantiateAsmJS;
+}
+
+bool js::IsAsmJSFunction(JSFunction* fun) {
+ return fun->kind() == FunctionFlags::AsmJS;
+}
+
+bool js::IsAsmJSStrictModeModuleOrFunction(JSFunction* fun) {
+ if (IsAsmJSModule(fun)) {
+ return AsmJSModuleFunctionToModule(fun).metadata().asAsmJS().strict;
+ }
+
+ if (IsAsmJSFunction(fun)) {
+ return ExportedFunctionToInstance(fun).metadata().asAsmJS().strict;
+ }
+
+ return false;
+}
+
+bool js::IsAsmJSCompilationAvailable(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ bool available = cx->options().asmJS() && IsAsmJSCompilerAvailable(cx);
+
+ args.rval().set(BooleanValue(available));
+ return true;
+}
+
+static JSFunction* MaybeWrappedNativeFunction(const Value& v) {
+ if (!v.isObject()) {
+ return nullptr;
+ }
+
+ return v.toObject().maybeUnwrapIf<JSFunction>();
+}
+
+bool js::IsAsmJSModule(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ bool rval = false;
+ if (JSFunction* fun = MaybeWrappedNativeFunction(args.get(0))) {
+ rval = IsAsmJSModule(fun);
+ }
+
+ args.rval().set(BooleanValue(rval));
+ return true;
+}
+
+bool js::IsAsmJSFunction(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ bool rval = false;
+ if (JSFunction* fun = MaybeWrappedNativeFunction(args.get(0))) {
+ rval = IsAsmJSFunction(fun);
+ }
+
+ args.rval().set(BooleanValue(rval));
+ return true;
+}
+
+/*****************************************************************************/
+// asm.js toString/toSource support
+
+JSString* js::AsmJSModuleToString(JSContext* cx, HandleFunction fun,
+ bool isToSource) {
+ MOZ_ASSERT(IsAsmJSModule(fun));
+
+ const AsmJSMetadata& metadata =
+ AsmJSModuleFunctionToModule(fun).metadata().asAsmJS();
+ uint32_t begin = metadata.toStringStart;
+ uint32_t end = metadata.srcEndAfterCurly();
+ ScriptSource* source = metadata.scriptSource.get();
+
+ JSStringBuilder out(cx);
+
+ if (isToSource && fun->isLambda() && !out.append("(")) {
+ return nullptr;
+ }
+
+ bool haveSource;
+ if (!ScriptSource::loadSource(cx, source, &haveSource)) {
+ return nullptr;
+ }
+
+ if (!haveSource) {
+ if (!out.append("function ")) {
+ return nullptr;
+ }
+ if (fun->explicitName() && !out.append(fun->explicitName())) {
+ return nullptr;
+ }
+ if (!out.append("() {\n [native code]\n}")) {
+ return nullptr;
+ }
+ } else {
+ Rooted<JSLinearString*> src(cx, source->substring(cx, begin, end));
+ if (!src) {
+ return nullptr;
+ }
+
+ if (!out.append(src)) {
+ return nullptr;
+ }
+ }
+
+ if (isToSource && fun->isLambda() && !out.append(")")) {
+ return nullptr;
+ }
+
+ return out.finishString();
+}
+
+JSString* js::AsmJSFunctionToString(JSContext* cx, HandleFunction fun) {
+ MOZ_ASSERT(IsAsmJSFunction(fun));
+
+ const AsmJSMetadata& metadata =
+ ExportedFunctionToInstance(fun).metadata().asAsmJS();
+ const AsmJSExport& f =
+ metadata.lookupAsmJSExport(ExportedFunctionToFuncIndex(fun));
+
+ uint32_t begin = metadata.srcStart + f.startOffsetInModule();
+ uint32_t end = metadata.srcStart + f.endOffsetInModule();
+
+ ScriptSource* source = metadata.scriptSource.get();
+ JSStringBuilder out(cx);
+
+ if (!out.append("function ")) {
+ return nullptr;
+ }
+
+ bool haveSource;
+ if (!ScriptSource::loadSource(cx, source, &haveSource)) {
+ return nullptr;
+ }
+
+ if (!haveSource) {
+ // asm.js functions can't be anonymous
+ MOZ_ASSERT(fun->explicitName());
+ if (!out.append(fun->explicitName())) {
+ return nullptr;
+ }
+ if (!out.append("() {\n [native code]\n}")) {
+ return nullptr;
+ }
+ } else {
+ Rooted<JSLinearString*> src(cx, source->substring(cx, begin, end));
+ if (!src) {
+ return nullptr;
+ }
+ if (!out.append(src)) {
+ return nullptr;
+ }
+ }
+
+ return out.finishString();
+}
+
+bool js::IsValidAsmJSHeapLength(size_t length) {
+ if (length < MinHeapLength) {
+ return false;
+ }
+
+ // The heap length is limited by what wasm can handle.
+ if (length > MaxMemory32Bytes) {
+ return false;
+ }
+
+ return wasm::IsValidARMImmediate(length);
+}
diff --git a/js/src/wasm/AsmJS.h b/js/src/wasm/AsmJS.h
new file mode 100644
index 0000000000..0372393bba
--- /dev/null
+++ b/js/src/wasm/AsmJS.h
@@ -0,0 +1,112 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2014 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_AsmJS_h
+#define wasm_AsmJS_h
+
+#include "mozilla/Utf8.h" // mozilla::Utf8Unit
+
+#include <stdint.h> // uint32_t
+
+#include "jstypes.h" // JS_PUBLIC_API
+#include "js/CallArgs.h" // JSNative
+
+struct JS_PUBLIC_API JSContext;
+class JS_PUBLIC_API JSFunction;
+
+namespace JS {
+
+class JS_PUBLIC_API Value;
+
+template <typename T>
+class Handle;
+
+} // namespace JS
+
+namespace js {
+
+namespace frontend {
+
+class ParserAtomsTable;
+class ParseContext;
+class ParseNode;
+
+template <class ParseHandler, typename CharT>
+class Parser;
+class FullParseHandler;
+
+} // namespace frontend
+
+template <typename Unit>
+using AsmJSParser = frontend::Parser<frontend::FullParseHandler, Unit>;
+
+// This function takes over parsing of a function starting with "use asm". The
+// return value indicates whether an error was reported which the caller should
+// propagate. If no error was reported, the function may still fail to validate
+// as asm.js. In this case, the parser.tokenStream has been advanced an
+// indeterminate amount and the entire function should be reparsed from the
+// beginning.
+
+[[nodiscard]] extern bool CompileAsmJS(JSContext* cx,
+ frontend::ParserAtomsTable& parserAtoms,
+ AsmJSParser<mozilla::Utf8Unit>& parser,
+ frontend::ParseNode* stmtList,
+ bool* validated);
+
+[[nodiscard]] extern bool CompileAsmJS(JSContext* cx,
+ frontend::ParserAtomsTable& parserAtoms,
+ AsmJSParser<char16_t>& parser,
+ frontend::ParseNode* stmtList,
+ bool* validated);
+
+// asm.js module/export queries:
+
+extern bool IsAsmJSModuleNative(JSNative native);
+
+extern bool IsAsmJSModule(JSFunction* fun);
+
+extern bool IsAsmJSFunction(JSFunction* fun);
+
+extern bool IsAsmJSStrictModeModuleOrFunction(JSFunction* fun);
+
+extern bool InstantiateAsmJS(JSContext* cx, unsigned argc, JS::Value* vp);
+
+// asm.js testing natives:
+
+extern bool IsAsmJSCompilationAvailable(JSContext* cx, unsigned argc,
+ JS::Value* vp);
+
+extern bool IsAsmJSModule(JSContext* cx, unsigned argc, JS::Value* vp);
+
+extern bool IsAsmJSFunction(JSContext* cx, unsigned argc, JS::Value* vp);
+
+// asm.js toString/toSource support:
+
+extern JSString* AsmJSFunctionToString(JSContext* cx,
+ JS::Handle<JSFunction*> fun);
+
+extern JSString* AsmJSModuleToString(JSContext* cx, JS::Handle<JSFunction*> fun,
+ bool isToSource);
+
+// asm.js heap:
+
+extern bool IsValidAsmJSHeapLength(size_t length);
+
+} // namespace js
+
+#endif // wasm_AsmJS_h
diff --git a/js/src/wasm/TypedObject-inl.h b/js/src/wasm/TypedObject-inl.h
new file mode 100644
index 0000000000..4a015e7f65
--- /dev/null
+++ b/js/src/wasm/TypedObject-inl.h
@@ -0,0 +1,23 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef wasm_TypedObject_inl_h
+#define wasm_TypedObject_inl_h
+
+#include "wasm/TypedObject.h"
+
+#include "gc/ObjectKind-inl.h"
+
+/* static */
+js::gc::AllocKind js::InlineTypedObject::allocKindForTypeDescriptor(
+ TypeDescr* descr) {
+ size_t nbytes = descr->size();
+ MOZ_ASSERT(nbytes <= MaxInlineBytes);
+
+ return gc::GetGCObjectKindForBytes(nbytes + sizeof(TypedObject));
+}
+
+#endif // wasm_TypedObject_inl_h
diff --git a/js/src/wasm/TypedObject.cpp b/js/src/wasm/TypedObject.cpp
new file mode 100644
index 0000000000..6d9d564aca
--- /dev/null
+++ b/js/src/wasm/TypedObject.cpp
@@ -0,0 +1,755 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "wasm/TypedObject-inl.h"
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/Casting.h"
+#include "mozilla/CheckedInt.h"
+
+#include <algorithm>
+
+#include "gc/Marking.h"
+#include "js/CharacterEncoding.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/PropertySpec.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "js/Vector.h"
+#include "util/StringBuffer.h"
+#include "vm/GlobalObject.h"
+#include "vm/JSFunction.h"
+#include "vm/JSObject.h"
+#include "vm/PlainObject.h" // js::PlainObject
+#include "vm/Realm.h"
+#include "vm/SelfHosting.h"
+#include "vm/StringType.h"
+#include "vm/TypedArrayObject.h"
+#include "vm/Uint8Clamped.h"
+
+#include "wasm/WasmTypes.h" // WasmValueBox
+#include "gc/Marking-inl.h"
+#include "gc/Nursery-inl.h"
+#include "gc/StoreBuffer-inl.h"
+#include "vm/JSAtom-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/NativeObject-inl.h"
+#include "vm/Shape-inl.h"
+
+using mozilla::AssertedCast;
+using mozilla::CheckedInt32;
+using mozilla::IsPowerOfTwo;
+using mozilla::PodCopy;
+using mozilla::PointerRangeSize;
+
+using namespace js;
+using namespace wasm;
+
+/***************************************************************************
+ * Typed Prototypes
+ *
+ * Every type descriptor has an associated prototype. Instances of
+ * that type descriptor use this as their prototype. Per the spec,
+ * typed object prototypes cannot be mutated.
+ */
+
+const JSClass js::TypedProto::class_ = {"TypedProto"};
+
+TypedProto* TypedProto::create(JSContext* cx) {
+ Handle<GlobalObject*> global = cx->global();
+ RootedObject objProto(cx,
+ GlobalObject::getOrCreateObjectPrototype(cx, global));
+ if (!objProto) {
+ return nullptr;
+ }
+
+ return NewTenuredObjectWithGivenProto<TypedProto>(cx, objProto);
+}
+
+static const JSClassOps TypeDescrClassOps = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ TypeDescr::finalize, // finalize
+ nullptr, // call
+ nullptr, // hasInstance
+ nullptr, // construct
+ nullptr, // trace
+};
+
+const JSClass js::TypeDescr::class_ = {
+ "TypeDescr",
+ JSCLASS_HAS_RESERVED_SLOTS(TypeDescr::SlotCount) |
+ JSCLASS_BACKGROUND_FINALIZE,
+ &TypeDescrClassOps};
+
+static bool CreateTraceList(JSContext* cx, HandleTypeDescr descr);
+
+TypeDescr* TypeDescr::createFromHandle(JSContext* cx, TypeHandle handle) {
+ const TypeDef& type = handle.get(cx->wasm().typeContext.get());
+ MOZ_ASSERT(type.isStructType());
+ const StructType& structType = type.structType();
+
+ Rooted<TypeDescr*> descr(
+ cx, NewTenuredObjectWithGivenProto<TypeDescr>(cx, nullptr));
+ if (!descr) {
+ return nullptr;
+ }
+
+ Rooted<TypedProto*> proto(cx, TypedProto::create(cx));
+ if (!proto) {
+ return nullptr;
+ }
+
+ descr->initReservedSlot(TypeDescr::Handle, Int32Value(handle.index()));
+ descr->initReservedSlot(TypeDescr::Size, Int32Value(structType.size_));
+ descr->initReservedSlot(TypeDescr::Proto, ObjectValue(*proto));
+ descr->initReservedSlot(TypeDescr::TraceList, UndefinedValue());
+
+ if (!CreateTraceList(cx, descr)) {
+ return nullptr;
+ }
+
+ if (!cx->zone()->addTypeDescrObject(cx, descr)) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ return descr;
+}
+
+/******************************************************************************
+ * Typed objects
+ */
+
+uint32_t TypedObject::offset() const {
+ if (is<InlineTypedObject>()) {
+ return 0;
+ }
+ return PointerRangeSize(typedMemBase(), typedMem());
+}
+
+uint8_t* TypedObject::typedMem() const {
+ if (is<InlineTypedObject>()) {
+ return as<InlineTypedObject>().inlineTypedMem();
+ }
+ return as<OutlineTypedObject>().outOfLineTypedMem();
+}
+
+uint8_t* TypedObject::typedMemBase() const {
+ MOZ_ASSERT(is<OutlineTypedObject>());
+
+ JSObject& owner = as<OutlineTypedObject>().owner();
+ if (owner.is<ArrayBufferObject>()) {
+ return owner.as<ArrayBufferObject>().dataPointer();
+ }
+ return owner.as<InlineTypedObject>().inlineTypedMem();
+}
+
+/******************************************************************************
+ * Outline typed objects
+ */
+
+void OutlineTypedObject::setOwnerAndData(JSObject* owner, uint8_t* data) {
+ // Typed objects cannot move from one owner to another, so don't worry
+ // about pre barriers during this initialization.
+ owner_ = owner;
+ data_ = data;
+
+ if (owner) {
+ if (!IsInsideNursery(this) && IsInsideNursery(owner)) {
+ // Trigger a post barrier when attaching an object outside the nursery to
+ // one that is inside it.
+ owner->storeBuffer()->putWholeCell(this);
+ } else if (IsInsideNursery(this) && !IsInsideNursery(owner)) {
+ // ...and also when attaching an object inside the nursery to one that is
+ // outside it, for a subtle reason -- the outline object now points to
+ // the memory owned by 'owner', and can modify object/string references
+ // stored in that memory, potentially storing nursery pointers in it. If
+ // the outline object is in the nursery, then the post barrier will do
+ // nothing; you will be writing a nursery pointer "into" a nursery
+ // object. But that will result in the tenured owner's data containing a
+ // nursery pointer, and thus we need a store buffer edge. Since we can't
+ // catch the actual write, register the owner preemptively now.
+ storeBuffer()->putWholeCell(owner);
+ }
+ }
+}
+
+/*static*/
+OutlineTypedObject* OutlineTypedObject::createUnattached(JSContext* cx,
+ HandleTypeDescr descr,
+ gc::InitialHeap heap) {
+ AutoSetNewObjectMetadata metadata(cx);
+
+ RootedObjectGroup group(cx, ObjectGroup::defaultNewGroup(
+ cx, &OutlineTypedObject::class_,
+ TaggedProto(&descr->typedProto()), descr));
+ if (!group) {
+ return nullptr;
+ }
+
+ NewObjectKind newKind =
+ (heap == gc::TenuredHeap) ? TenuredObject : GenericObject;
+ OutlineTypedObject* obj = NewObjectWithGroup<OutlineTypedObject>(
+ cx, group, gc::AllocKind::OBJECT0, newKind);
+ if (!obj) {
+ return nullptr;
+ }
+
+ obj->setOwnerAndData(nullptr, nullptr);
+ return obj;
+}
+
+void OutlineTypedObject::attach(ArrayBufferObject& buffer) {
+ MOZ_ASSERT(size() <= wasm::ByteLength32(buffer));
+ MOZ_ASSERT(buffer.hasTypedObjectViews());
+ MOZ_ASSERT(!buffer.isDetached());
+
+ setOwnerAndData(&buffer, buffer.dataPointer());
+}
+
+/*static*/
+OutlineTypedObject* OutlineTypedObject::createZeroed(JSContext* cx,
+ HandleTypeDescr descr,
+ gc::InitialHeap heap) {
+ // Create unattached wrapper object.
+ Rooted<OutlineTypedObject*> obj(
+ cx, OutlineTypedObject::createUnattached(cx, descr, heap));
+ if (!obj) {
+ return nullptr;
+ }
+
+ // Allocate and initialize the memory for this instance.
+ size_t totalSize = descr->size();
+ Rooted<ArrayBufferObject*> buffer(cx);
+ buffer = ArrayBufferObject::createForTypedObject(cx, BufferSize(totalSize));
+ if (!buffer) {
+ return nullptr;
+ }
+ descr->initInstance(cx, buffer->dataPointer());
+ obj->attach(*buffer);
+ return obj;
+}
+
+/*static*/
+TypedObject* TypedObject::createZeroed(JSContext* cx, HandleTypeDescr descr,
+ gc::InitialHeap heap) {
+ // If possible, create an object with inline data.
+ if (InlineTypedObject::canAccommodateType(descr)) {
+ AutoSetNewObjectMetadata metadata(cx);
+
+ InlineTypedObject* obj = InlineTypedObject::create(cx, descr, heap);
+ if (!obj) {
+ return nullptr;
+ }
+ JS::AutoCheckCannotGC nogc(cx);
+ descr->initInstance(cx, obj->inlineTypedMem(nogc));
+ return obj;
+ }
+
+ return OutlineTypedObject::createZeroed(cx, descr, heap);
+}
+
+/* static */
+void OutlineTypedObject::obj_trace(JSTracer* trc, JSObject* object) {
+ OutlineTypedObject& typedObj = object->as<OutlineTypedObject>();
+
+ TraceEdge(trc, typedObj.shapePtr(), "OutlineTypedObject_shape");
+
+ if (!typedObj.owner_) {
+ MOZ_ASSERT(!typedObj.data_);
+ return;
+ }
+ MOZ_ASSERT(typedObj.data_);
+
+ TypeDescr& descr = typedObj.typeDescr();
+
+ // Mark the owner, watching in case it is moved by the tracer.
+ JSObject* oldOwner = typedObj.owner_;
+ TraceManuallyBarrieredEdge(trc, &typedObj.owner_, "typed object owner");
+ JSObject* owner = typedObj.owner_;
+
+ uint8_t* oldData = typedObj.outOfLineTypedMem();
+ uint8_t* newData = oldData;
+
+ // Update the data pointer if the owner moved and the owner's data is
+ // inline with it.
+ if (owner != oldOwner &&
+ (IsInlineTypedObjectClass(gc::MaybeForwardedObjectClass(owner)) ||
+ gc::MaybeForwardedObjectAs<ArrayBufferObject>(owner).hasInlineData())) {
+ newData += reinterpret_cast<uint8_t*>(owner) -
+ reinterpret_cast<uint8_t*>(oldOwner);
+ typedObj.setData(newData);
+
+ if (trc->isTenuringTracer()) {
+ Nursery& nursery = trc->runtime()->gc.nursery();
+ nursery.maybeSetForwardingPointer(trc, oldData, newData,
+ /* direct = */ false);
+ }
+ }
+
+ if (descr.hasTraceList()) {
+ gc::VisitTraceList(trc, object, descr.traceList(), newData);
+ return;
+ }
+
+ descr.traceInstance(trc, newData);
+}
+
+const TypeDef& TypeDescr::getType(JSContext* cx) const {
+ TypeHandle handle(uint32_t(getReservedSlot(Slot::Handle).toInt32()));
+ return handle.get(cx->wasm().typeContext.get());
+}
+
+bool TypeDescr::lookupProperty(JSContext* cx, jsid id, uint32_t* offset,
+ ValType* type) {
+ const auto& typeDef = getType(cx);
+ MOZ_RELEASE_ASSERT(typeDef.isStructType());
+ const auto& structType = typeDef.structType();
+ uint32_t index;
+ if (!IdIsIndex(id, &index)) {
+ return false;
+ }
+ if (index >= structType.fields_.length()) {
+ return false;
+ }
+ const StructField& field = structType.fields_[index];
+ *offset = field.offset;
+ *type = field.type;
+ return true;
+ ;
+}
+
+uint32_t TypeDescr::propertyCount(JSContext* cx) {
+ const auto& typeDef = getType(cx);
+ MOZ_RELEASE_ASSERT(typeDef.isStructType());
+ return typeDef.structType().fields_.length();
+}
+
+/* static */
+bool TypedObject::obj_lookupProperty(JSContext* cx, HandleObject obj,
+ HandleId id, MutableHandleObject objp,
+ MutableHandle<PropertyResult> propp) {
+ if (obj->as<TypedObject>().typeDescr().hasProperty(cx, id)) {
+ propp.setNonNativeProperty();
+ objp.set(obj);
+ return true;
+ }
+
+ RootedObject proto(cx, obj->staticPrototype());
+ if (!proto) {
+ objp.set(nullptr);
+ propp.setNotFound();
+ return true;
+ }
+
+ return LookupProperty(cx, proto, id, objp, propp);
+}
+
+bool TypedObject::obj_defineProperty(JSContext* cx, HandleObject obj,
+ HandleId id,
+ Handle<PropertyDescriptor> desc,
+ ObjectOpResult& result) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_OBJECT_NOT_EXTENSIBLE, "TypedObject");
+ return false;
+}
+
+bool TypedObject::obj_hasProperty(JSContext* cx, HandleObject obj, HandleId id,
+ bool* foundp) {
+ Rooted<TypedObject*> typedObj(cx, &obj->as<TypedObject>());
+ if (typedObj->typeDescr().hasProperty(cx, id)) {
+ *foundp = true;
+ return true;
+ }
+
+ RootedObject proto(cx, obj->staticPrototype());
+ if (!proto) {
+ *foundp = false;
+ return true;
+ }
+
+ return HasProperty(cx, proto, id, foundp);
+}
+
+bool TypedObject::obj_getProperty(JSContext* cx, HandleObject obj,
+ HandleValue receiver, HandleId id,
+ MutableHandleValue vp) {
+ Rooted<TypedObject*> typedObj(cx, &obj->as<TypedObject>());
+
+ uint32_t offset;
+ ValType type;
+ if (typedObj->typeDescr().lookupProperty(cx, id, &offset, &type)) {
+ return typedObj->loadValue(cx, offset, type, vp);
+ }
+
+ RootedObject proto(cx, obj->staticPrototype());
+ if (!proto) {
+ vp.setUndefined();
+ return true;
+ }
+
+ return GetProperty(cx, proto, receiver, id, vp);
+}
+
+bool TypedObject::obj_setProperty(JSContext* cx, HandleObject obj, HandleId id,
+ HandleValue v, HandleValue receiver,
+ ObjectOpResult& result) {
+ Rooted<TypedObject*> typedObj(cx, &obj->as<TypedObject>());
+
+ if (typedObj->typeDescr().hasProperty(cx, id)) {
+ if (!receiver.isObject() || obj != &receiver.toObject()) {
+ return SetPropertyByDefining(cx, id, v, receiver, result);
+ }
+
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_TYPEDOBJECT_SETTING_IMMUTABLE);
+ return false;
+ }
+
+ return SetPropertyOnProto(cx, obj, id, v, receiver, result);
+}
+
+bool TypedObject::obj_getOwnPropertyDescriptor(
+ JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandle<PropertyDescriptor> desc) {
+ Rooted<TypedObject*> typedObj(cx, &obj->as<TypedObject>());
+
+ uint32_t offset;
+ ValType type;
+ if (typedObj->typeDescr().lookupProperty(cx, id, &offset, &type)) {
+ if (!typedObj->loadValue(cx, offset, type, desc.value())) {
+ return false;
+ }
+ desc.setAttributes(JSPROP_ENUMERATE | JSPROP_PERMANENT);
+ desc.object().set(obj);
+ return true;
+ }
+
+ desc.object().set(nullptr);
+ return true;
+}
+
+bool TypedObject::obj_deleteProperty(JSContext* cx, HandleObject obj,
+ HandleId id, ObjectOpResult& result) {
+ Rooted<TypedObject*> typedObj(cx, &obj->as<TypedObject>());
+ if (typedObj->typeDescr().hasProperty(cx, id)) {
+ return Throw(cx, id, JSMSG_CANT_DELETE);
+ }
+
+ RootedObject proto(cx, obj->staticPrototype());
+ if (!proto) {
+ return result.succeed();
+ }
+
+ return DeleteProperty(cx, proto, id, result);
+}
+
+bool TypedObject::obj_newEnumerate(JSContext* cx, HandleObject obj,
+ MutableHandleIdVector properties,
+ bool enumerableOnly) {
+ MOZ_ASSERT(obj->is<TypedObject>());
+ Rooted<TypedObject*> typedObj(cx, &obj->as<TypedObject>());
+
+ size_t propertyCount = typedObj->typeDescr().propertyCount(cx);
+ if (!properties.reserve(propertyCount)) {
+ return false;
+ }
+
+ RootedId id(cx);
+ for (size_t index = 0; index < propertyCount; index++) {
+ id = INT_TO_JSID(index);
+ properties.infallibleAppend(id);
+ }
+
+ return true;
+}
+
+bool TypedObject::loadValue(JSContext* cx, size_t offset, ValType type,
+ MutableHandleValue vp) {
+ // Temporary hack, (ref T) is not exposable to JS yet but some tests would
+ // like to access it so we erase (ref T) with eqref when loading. This is
+ // safe as (ref T) <: eqref and we're not in the writing case where we
+ // would need to perform a type check.
+ if (type.isTypeIndex()) {
+ type = RefType::fromTypeCode(TypeCode::EqRef, true);
+ }
+ if (!type.isExposable()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_VAL_TYPE);
+ return false;
+ }
+ return ToJSValue(cx, typedMem() + offset, type, vp);
+}
+
+/******************************************************************************
+ * Inline typed objects
+ */
+
+/* static */
+InlineTypedObject* InlineTypedObject::create(JSContext* cx,
+ HandleTypeDescr descr,
+ gc::InitialHeap heap) {
+ gc::AllocKind allocKind = allocKindForTypeDescriptor(descr);
+
+ RootedObjectGroup group(cx, ObjectGroup::defaultNewGroup(
+ cx, &InlineTypedObject::class_,
+ TaggedProto(&descr->typedProto()), descr));
+ if (!group) {
+ return nullptr;
+ }
+
+ NewObjectKind newKind =
+ (heap == gc::TenuredHeap) ? TenuredObject : GenericObject;
+ return NewObjectWithGroup<InlineTypedObject>(cx, group, allocKind, newKind);
+}
+
+/* static */
+void InlineTypedObject::obj_trace(JSTracer* trc, JSObject* object) {
+ InlineTypedObject& typedObj = object->as<InlineTypedObject>();
+
+ TraceEdge(trc, typedObj.shapePtr(), "InlineTypedObject_shape");
+
+ TypeDescr& descr = typedObj.typeDescr();
+ if (descr.hasTraceList()) {
+ gc::VisitTraceList(trc, object, typedObj.typeDescr().traceList(),
+ typedObj.inlineTypedMem());
+ return;
+ }
+
+ descr.traceInstance(trc, typedObj.inlineTypedMem());
+}
+
+/* static */
+size_t InlineTypedObject::obj_moved(JSObject* dst, JSObject* src) { return 0; }
+
+/******************************************************************************
+ * Typed object classes
+ */
+
+const ObjectOps TypedObject::objectOps_ = {
+ TypedObject::obj_lookupProperty, // lookupProperty
+ TypedObject::obj_defineProperty, // defineProperty
+ TypedObject::obj_hasProperty, // hasProperty
+ TypedObject::obj_getProperty, // getProperty
+ TypedObject::obj_setProperty, // setProperty
+ TypedObject::obj_getOwnPropertyDescriptor, // getOwnPropertyDescriptor
+ TypedObject::obj_deleteProperty, // deleteProperty
+ nullptr, // getElements
+ nullptr, // funToString
+};
+
+#define DEFINE_TYPEDOBJ_CLASS(Name, Trace, Moved) \
+ static const JSClassOps Name##ClassOps = { \
+ nullptr, /* addProperty */ \
+ nullptr, /* delProperty */ \
+ nullptr, /* enumerate */ \
+ TypedObject::obj_newEnumerate, \
+ nullptr, /* resolve */ \
+ nullptr, /* mayResolve */ \
+ nullptr, /* finalize */ \
+ nullptr, /* call */ \
+ nullptr, /* hasInstance */ \
+ nullptr, /* construct */ \
+ Trace, \
+ }; \
+ static const ClassExtension Name##ClassExt = { \
+ Moved /* objectMovedOp */ \
+ }; \
+ const JSClass Name::class_ = { \
+ #Name, JSClass::NON_NATIVE | JSCLASS_DELAY_METADATA_BUILDER, \
+ &Name##ClassOps, JS_NULL_CLASS_SPEC, \
+ &Name##ClassExt, &TypedObject::objectOps_}
+
+DEFINE_TYPEDOBJ_CLASS(OutlineTypedObject, OutlineTypedObject::obj_trace,
+ nullptr);
+DEFINE_TYPEDOBJ_CLASS(InlineTypedObject, InlineTypedObject::obj_trace,
+ InlineTypedObject::obj_moved);
+
+/* static */ JS::Result<TypedObject*, JS::OOM> TypedObject::create(
+ JSContext* cx, js::gc::AllocKind kind, js::gc::InitialHeap heap,
+ js::HandleShape shape, js::HandleObjectGroup group) {
+ debugCheckNewObject(group, shape, kind, heap);
+
+ const JSClass* clasp = group->clasp();
+ MOZ_ASSERT(::IsTypedObjectClass(clasp));
+
+ JSObject* obj =
+ js::AllocateObject(cx, kind, /* nDynamicSlots = */ 0, heap, clasp);
+ if (!obj) {
+ return cx->alreadyReportedOOM();
+ }
+
+ TypedObject* tobj = static_cast<TypedObject*>(obj);
+ tobj->initGroup(group);
+ tobj->initShape(shape);
+
+ MOZ_ASSERT(clasp->shouldDelayMetadataBuilder());
+ cx->realm()->setObjectPendingMetadata(cx, tobj);
+
+ js::gc::gcprobes::CreateObject(tobj);
+
+ return tobj;
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Walking memory
+
+template <typename V>
+static void VisitReferences(JSContext* cx, TypeDescr& descr, uint8_t* base,
+ V& visitor, size_t offset) {
+ const auto& typeDef = descr.getType(cx);
+
+ if (typeDef.isStructType()) {
+ const auto& structType = typeDef.structType();
+ for (const StructField& field : structType.fields_) {
+ if (field.type.isReference()) {
+ uint32_t fieldOffset = offset + field.offset;
+ visitor.visitReference(base, fieldOffset);
+ }
+ }
+ return;
+ }
+
+ MOZ_ASSERT_UNREACHABLE();
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Initializing instances
+
+namespace {
+
+class MemoryInitVisitor {
+ public:
+ void visitReference(uint8_t* base, size_t offset);
+};
+
+} // namespace
+
+void MemoryInitVisitor::visitReference(uint8_t* base, size_t offset) {
+ js::GCPtrObject* objectPtr =
+ reinterpret_cast<js::GCPtrObject*>(base + offset);
+ objectPtr->init(nullptr);
+}
+
+void TypeDescr::initInstance(JSContext* cx, uint8_t* mem) {
+ MemoryInitVisitor visitor;
+
+ // Initialize the instance
+ memset(mem, 0, size());
+ VisitReferences(cx, *this, mem, visitor, 0);
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Tracing instances
+
+namespace {
+
+class MemoryTracingVisitor {
+ JSTracer* trace_;
+
+ public:
+ explicit MemoryTracingVisitor(JSTracer* trace) : trace_(trace) {}
+
+ void visitReference(uint8_t* base, size_t offset);
+};
+
+} // namespace
+
+void MemoryTracingVisitor::visitReference(uint8_t* base, size_t offset) {
+ GCPtrObject* objectPtr = reinterpret_cast<js::GCPtrObject*>(base + offset);
+ TraceNullableEdge(trace_, objectPtr, "reference-obj");
+}
+
+void TypeDescr::traceInstance(JSTracer* trace, uint8_t* mem) {
+ JSContext* cx = trace->runtime()->mainContextFromOwnThread();
+ MemoryTracingVisitor visitor(trace);
+
+ VisitReferences(cx, *this, mem, visitor, 0);
+}
+
+namespace {
+
+struct TraceListVisitor {
+ using OffsetVector = Vector<uint32_t, 0, SystemAllocPolicy>;
+ // TODO/AnyRef-boxing: Once a WasmAnyRef is no longer just a JSObject*
+ // we must revisit this structure.
+ OffsetVector objectOffsets;
+
+ void visitReference(uint8_t* base, size_t offset);
+
+ bool fillList(Vector<uint32_t>& entries);
+};
+
+} // namespace
+
+void TraceListVisitor::visitReference(uint8_t* base, size_t offset) {
+ MOZ_ASSERT(!base);
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+
+ MOZ_ASSERT(offset <= UINT32_MAX);
+ if (!objectOffsets.append(offset)) {
+ oomUnsafe.crash("TraceListVisitor::visitReference");
+ }
+}
+
+bool TraceListVisitor::fillList(Vector<uint32_t>& entries) {
+ return entries.append(0) /* stringOffsets.length() */ &&
+ entries.append(objectOffsets.length()) &&
+ entries.append(0) /* valueOffsets.length() */ &&
+ entries.appendAll(objectOffsets);
+}
+
+static bool CreateTraceList(JSContext* cx, HandleTypeDescr descr) {
+ // Trace lists are only used for inline typed objects. We don't use them
+ // for larger objects, both to limit the size of the trace lists and
+ // because tracing outline typed objects is considerably more complicated
+ // than inline ones.
+ if (!InlineTypedObject::canAccommodateType(descr)) {
+ return true;
+ }
+
+ TraceListVisitor visitor;
+ VisitReferences(cx, *descr, nullptr, visitor, 0);
+
+ Vector<uint32_t> entries(cx);
+ if (!visitor.fillList(entries)) {
+ return false;
+ }
+
+ // Trace lists aren't necessary for descriptors with no references.
+ MOZ_ASSERT(entries.length() >= 3);
+ if (entries.length() == 3) {
+ MOZ_ASSERT(entries[0] == 0 && entries[1] == 0 && entries[2] == 0);
+ return true;
+ }
+
+ uint32_t* list = cx->pod_malloc<uint32_t>(entries.length());
+ if (!list) {
+ return false;
+ }
+
+ PodCopy(list, entries.begin(), entries.length());
+
+ size_t size = entries.length() * sizeof(uint32_t);
+ InitReservedSlot(descr, TypeDescr::TraceList, list, size,
+ MemoryUse::TypeDescrTraceList);
+ return true;
+}
+
+/* static */
+void TypeDescr::finalize(JSFreeOp* fop, JSObject* obj) {
+ TypeDescr& descr = obj->as<TypeDescr>();
+ if (descr.hasTraceList()) {
+ auto list = const_cast<uint32_t*>(descr.traceList());
+ size_t size = (3 + list[0] + list[1] + list[2]) * sizeof(uint32_t);
+ fop->free_(obj, list, size, MemoryUse::TypeDescrTraceList);
+ }
+}
diff --git a/js/src/wasm/TypedObject.h b/js/src/wasm/TypedObject.h
new file mode 100644
index 0000000000..5fd202f6d6
--- /dev/null
+++ b/js/src/wasm/TypedObject.h
@@ -0,0 +1,290 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef wasm_TypedObject_h
+#define wasm_TypedObject_h
+
+#include "mozilla/CheckedInt.h"
+
+#include "gc/Allocator.h"
+#include "vm/ArrayBufferObject.h"
+#include "vm/JSObject.h"
+#include "wasm/WasmTypes.h"
+
+namespace js {
+
+/* The prototype for typed objects. */
+class TypedProto : public NativeObject {
+ public:
+ static const JSClass class_;
+ static TypedProto* create(JSContext* cx);
+};
+
+class TypeDescr : public NativeObject {
+ public:
+ static const JSClass class_;
+
+ enum Slot {
+ Handle = 0, // Type handle index
+ Size = 1, // Size of type in bytes
+ Proto = 2, // Prototype for instances, if any
+ TraceList = 3, // List of references for use in tracing
+ // Maximum number of slots
+ SlotCount = 4,
+ };
+
+ static TypeDescr* createFromHandle(JSContext* cx, wasm::TypeHandle handle);
+
+ TypedProto& typedProto() const {
+ return getReservedSlot(Slot::Proto).toObject().as<TypedProto>();
+ }
+
+ size_t size() const { return getReservedSlot(Slot::Size).toInt32(); }
+
+ const wasm::TypeDef& getType(JSContext* cx) const;
+
+ [[nodiscard]] bool lookupProperty(JSContext* cx, jsid id, uint32_t* offset,
+ wasm::ValType* type);
+ [[nodiscard]] bool hasProperty(JSContext* cx, jsid id) {
+ uint32_t offset;
+ wasm::ValType type;
+ return lookupProperty(cx, id, &offset, &type);
+ }
+ uint32_t propertyCount(JSContext* cx);
+
+ // Type descriptors may contain a list of their references for use during
+ // scanning. Typed object trace hooks can use this to call an optimized
+ // marking path that doesn't need to dispatch on the tracer kind for each
+ // edge. This list is only specified when (a) the descriptor is short enough
+ // that it can fit in an InlineTypedObject, and (b) the descriptor contains at
+ // least one reference. Otherwise its value is undefined.
+ //
+ // The list is three consecutive arrays of uint32_t offsets, preceded by a
+ // header consisting of the length of each array. The arrays store offsets of
+ // string, object/anyref, and value references in the descriptor, in that
+ // order.
+ // TODO/AnyRef-boxing: once anyref has a more complicated structure, we must
+ // revisit this.
+ [[nodiscard]] bool hasTraceList() const {
+ return !getFixedSlot(Slot::TraceList).isUndefined();
+ }
+ const uint32_t* traceList() const {
+ MOZ_ASSERT(hasTraceList());
+ return reinterpret_cast<uint32_t*>(
+ getFixedSlot(Slot::TraceList).toPrivate());
+ }
+
+ void initInstance(JSContext* cx, uint8_t* mem);
+ void traceInstance(JSTracer* trace, uint8_t* mem);
+
+ static void finalize(JSFreeOp* fop, JSObject* obj);
+};
+
+using HandleTypeDescr = Handle<TypeDescr*>;
+
+/* Base type for typed objects. */
+class TypedObject : public JSObject {
+ protected:
+ static const ObjectOps objectOps_;
+
+ [[nodiscard]] static bool obj_lookupProperty(
+ JSContext* cx, HandleObject obj, HandleId id, MutableHandleObject objp,
+ MutableHandle<PropertyResult> propp);
+
+ [[nodiscard]] static bool obj_defineProperty(JSContext* cx, HandleObject obj,
+ HandleId id,
+ Handle<PropertyDescriptor> desc,
+ ObjectOpResult& result);
+
+ [[nodiscard]] static bool obj_hasProperty(JSContext* cx, HandleObject obj,
+ HandleId id, bool* foundp);
+
+ [[nodiscard]] static bool obj_getProperty(JSContext* cx, HandleObject obj,
+ HandleValue receiver, HandleId id,
+ MutableHandleValue vp);
+
+ [[nodiscard]] static bool obj_setProperty(JSContext* cx, HandleObject obj,
+ HandleId id, HandleValue v,
+ HandleValue receiver,
+ ObjectOpResult& result);
+
+ [[nodiscard]] static bool obj_getOwnPropertyDescriptor(
+ JSContext* cx, HandleObject obj, HandleId id,
+ MutableHandle<PropertyDescriptor> desc);
+
+ [[nodiscard]] static bool obj_deleteProperty(JSContext* cx, HandleObject obj,
+ HandleId id,
+ ObjectOpResult& result);
+
+ bool loadValue(JSContext* cx, size_t offset, wasm::ValType type,
+ MutableHandleValue vp);
+
+ uint8_t* typedMem() const;
+ uint8_t* typedMemBase() const;
+
+ public:
+ [[nodiscard]] static bool obj_newEnumerate(JSContext* cx, HandleObject obj,
+ MutableHandleIdVector properties,
+ bool enumerableOnly);
+
+ TypedProto& typedProto() const {
+ // Typed objects' prototypes can't be modified.
+ return staticPrototype()->as<TypedProto>();
+ }
+ TypeDescr& typeDescr() const { return group()->typeDescr(); }
+
+ static JS::Result<TypedObject*, JS::OOM> create(JSContext* cx,
+ js::gc::AllocKind kind,
+ js::gc::InitialHeap heap,
+ js::HandleShape shape,
+ js::HandleObjectGroup group);
+
+ uint32_t offset() const;
+ uint32_t size() const { return typeDescr().size(); }
+ uint8_t* typedMem(const JS::AutoRequireNoGC&) const { return typedMem(); }
+ uint8_t* typedMem(size_t offset, const JS::AutoRequireNoGC& nogc) const {
+ // It seems a bit surprising that one might request an offset
+ // == size(), but it can happen when taking the "address of" a
+ // 0-sized value. (In other words, we maintain the invariant
+ // that `offset + size <= size()` -- this is always checked in
+ // the caller's side.)
+ MOZ_ASSERT(offset <= (size_t)size());
+ return typedMem(nogc) + offset;
+ }
+
+ // Creates a new typed object whose memory is freshly allocated and
+ // initialized with zeroes (or, in the case of references, an appropriate
+ // default value).
+ static TypedObject* createZeroed(JSContext* cx, HandleTypeDescr typeObj,
+ gc::InitialHeap heap = gc::DefaultHeap);
+
+ Shape** addressOfShapeFromGC() { return shape_.unbarrieredAddress(); }
+};
+
+using HandleTypedObject = Handle<TypedObject*>;
+
+class OutlineTypedObject : public TypedObject {
+ // The object which owns the data this object points to. Because this
+ // pointer is managed in tandem with |data|, this is not a GCPtr and
+ // barriers are managed directly.
+ JSObject* owner_;
+
+ // Data pointer to some offset in the owner's contents.
+ uint8_t* data_;
+
+ void setOwnerAndData(JSObject* owner, uint8_t* data);
+
+ void setData(uint8_t* data) { data_ = data; }
+
+ public:
+ // JIT accessors.
+ static size_t offsetOfData() { return offsetof(OutlineTypedObject, data_); }
+ static size_t offsetOfOwner() { return offsetof(OutlineTypedObject, owner_); }
+
+ static const JSClass class_;
+
+ JSObject& owner() const {
+ MOZ_ASSERT(owner_);
+ return *owner_;
+ }
+
+ uint8_t* outOfLineTypedMem() const { return data_; }
+
+ private:
+ // Creates an unattached typed object or handle (depending on the
+ // type parameter T). Note that it is only legal for unattached
+ // handles to escape to the end user; for non-handles, the caller
+ // should always invoke one of the `attach()` methods below.
+ //
+ // Arguments:
+ // - type: type object for resulting object
+ static OutlineTypedObject* createUnattached(
+ JSContext* cx, HandleTypeDescr type,
+ gc::InitialHeap heap = gc::DefaultHeap);
+
+ public:
+ static OutlineTypedObject* createZeroed(JSContext* cx, HandleTypeDescr descr,
+ gc::InitialHeap heap);
+
+ private:
+ // This method should only be used when `buffer` is the owner of the memory.
+ void attach(ArrayBufferObject& buffer);
+
+ public:
+ static void obj_trace(JSTracer* trace, JSObject* object);
+};
+
+// Class for a typed object whose data is allocated inline.
+class InlineTypedObject : public TypedObject {
+ friend class TypedObject;
+
+ // Start of the inline data, which immediately follows the shape and type.
+ uint8_t data_[1];
+
+ public:
+ static const JSClass class_;
+
+ static const size_t MaxInlineBytes =
+ JSObject::MAX_BYTE_SIZE - sizeof(TypedObject);
+
+ protected:
+ uint8_t* inlineTypedMem() const { return (uint8_t*)&data_; }
+
+ public:
+ static inline gc::AllocKind allocKindForTypeDescriptor(TypeDescr* descr);
+
+ static bool canAccommodateSize(size_t size) { return size <= MaxInlineBytes; }
+
+ static bool canAccommodateType(TypeDescr* type) {
+ return type->size() <= MaxInlineBytes;
+ }
+
+ uint8_t* inlineTypedMem(const JS::AutoRequireNoGC&) const {
+ return inlineTypedMem();
+ }
+
+ static void obj_trace(JSTracer* trace, JSObject* object);
+ static size_t obj_moved(JSObject* dst, JSObject* src);
+
+ static size_t offsetOfDataStart() {
+ return offsetof(InlineTypedObject, data_);
+ }
+
+ static InlineTypedObject* create(JSContext* cx, HandleTypeDescr descr,
+ gc::InitialHeap heap = gc::DefaultHeap);
+};
+
+inline bool IsTypedObjectClass(const JSClass* class_) {
+ return class_ == &OutlineTypedObject::class_ ||
+ class_ == &InlineTypedObject::class_;
+}
+
+inline bool IsOutlineTypedObjectClass(const JSClass* class_) {
+ return class_ == &OutlineTypedObject::class_;
+}
+
+inline bool IsInlineTypedObjectClass(const JSClass* class_) {
+ return class_ == &InlineTypedObject::class_;
+}
+
+} // namespace js
+
+template <>
+inline bool JSObject::is<js::TypedObject>() const {
+ return js::IsTypedObjectClass(getClass());
+}
+
+template <>
+inline bool JSObject::is<js::OutlineTypedObject>() const {
+ return js::IsOutlineTypedObjectClass(getClass());
+}
+
+template <>
+inline bool JSObject::is<js::InlineTypedObject>() const {
+ return js::IsInlineTypedObjectClass(getClass());
+}
+
+#endif /* wasm_TypedObject_h */
diff --git a/js/src/wasm/WasmBaselineCompile.cpp b/js/src/wasm/WasmBaselineCompile.cpp
new file mode 100644
index 0000000000..a22a07b944
--- /dev/null
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -0,0 +1,15908 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * [SMDOC] WebAssembly baseline compiler (RabaldrMonkey)
+ *
+ * General assumptions for 32-bit vs 64-bit code:
+ *
+ * - A 32-bit register can be extended in-place to a 64-bit register on 64-bit
+ * systems.
+ *
+ * - Code that knows that Register64 has a '.reg' member on 64-bit systems and
+ * '.high' and '.low' members on 32-bit systems, or knows the implications
+ * thereof, is #ifdef JS_PUNBOX64. All other code is #if(n)?def JS_64BIT.
+ *
+ *
+ * Coding standards:
+ *
+ * - In "small" code generating functions (eg emitMultiplyF64, emitQuotientI32,
+ * and surrounding functions; most functions fall into this class) where the
+ * meaning is obvious:
+ *
+ * - if there is a single source + destination register, it is called 'r'
+ * - if there is one source and a different destination, they are called 'rs'
+ * and 'rd'
+ * - if there is one source + destination register and another source register
+ * they are called 'r' and 'rs'
+ * - if there are two source registers and a destination register they are
+ * called 'rs0', 'rs1', and 'rd'.
+ *
+ * - Generic temp registers are named /temp[0-9]?/ not /tmp[0-9]?/.
+ *
+ * - Registers can be named non-generically for their function ('rp' for the
+ * 'pointer' register and 'rv' for the 'value' register are typical) and those
+ * names may or may not have an 'r' prefix.
+ *
+ * - "Larger" code generating functions make their own rules.
+ *
+ *
+ * General status notes:
+ *
+ * "FIXME" indicates a known or suspected bug. Always has a bug#.
+ *
+ * "TODO" indicates an opportunity for a general improvement, with an additional
+ * tag to indicate the area of improvement. Usually has a bug#.
+ *
+ * There are lots of machine dependencies here but they are pretty well isolated
+ * to a segment of the compiler. Many dependencies will eventually be factored
+ * into the MacroAssembler layer and shared with other code generators.
+ *
+ *
+ * High-value compiler performance improvements:
+ *
+ * - (Bug 1316802) The specific-register allocator (the needI32(r), needI64(r)
+ * etc methods) can avoid syncing the value stack if the specific register is
+ * in use but there is a free register to shuffle the specific register into.
+ * (This will also improve the generated code.) The sync happens often enough
+ * here to show up in profiles, because it is triggered by integer multiply
+ * and divide.
+ *
+ *
+ * High-value code generation improvements:
+ *
+ * - (Bug 1316804) brTable pessimizes by always dispatching to code that pops
+ * the stack and then jumps to the code for the target case. If no cleanup is
+ * needed we could just branch conditionally to the target; if the same amount
+ * of cleanup is needed for all cases then the cleanup can be done before the
+ * dispatch. Both are highly likely.
+ *
+ * - (Bug 1316806) Register management around calls: At the moment we sync the
+ * value stack unconditionally (this is simple) but there are probably many
+ * common cases where we could instead save/restore live caller-saves
+ * registers and perform parallel assignment into argument registers. This
+ * may be important if we keep some locals in registers.
+ *
+ * - (Bug 1316808) Allocate some locals to registers on machines where there are
+ * enough registers. This is probably hard to do well in a one-pass compiler
+ * but it might be that just keeping register arguments and the first few
+ * locals in registers is a viable strategy; another (more general) strategy
+ * is caching locals in registers in straight-line code. Such caching could
+ * also track constant values in registers, if that is deemed valuable. A
+ * combination of techniques may be desirable: parameters and the first few
+ * locals could be cached on entry to the function but not statically assigned
+ * to registers throughout.
+ *
+ * (On a large corpus of code it should be possible to compute, for every
+ * signature comprising the types of parameters and locals, and using a static
+ * weight for loops, a list in priority order of which parameters and locals
+ * that should be assigned to registers. Or something like that. Wasm makes
+ * this simple. Static assignments are desirable because they are not flushed
+ * to memory by the pre-block sync() call.)
+ */
+
+#include "wasm/WasmBaselineCompile.h"
+
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "jit/AtomicOp.h"
+#include "jit/IonTypes.h"
+#include "jit/JitAllocPolicy.h"
+#include "jit/Label.h"
+#include "jit/MIR.h"
+#include "jit/RegisterAllocator.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+#if defined(JS_CODEGEN_ARM)
+# include "jit/arm/Assembler-arm.h"
+#endif
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+# include "jit/x86-shared/Architecture-x86-shared.h"
+# include "jit/x86-shared/Assembler-x86-shared.h"
+#endif
+#if defined(JS_CODEGEN_MIPS32)
+# include "jit/mips-shared/Assembler-mips-shared.h"
+# include "jit/mips32/Assembler-mips32.h"
+#endif
+#if defined(JS_CODEGEN_MIPS64)
+# include "jit/mips-shared/Assembler-mips-shared.h"
+# include "jit/mips64/Assembler-mips64.h"
+#endif
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "util/Memory.h"
+#include "wasm/WasmGC.h"
+#include "wasm/WasmGenerator.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmOpIter.h"
+#include "wasm/WasmSignalHandlers.h"
+#include "wasm/WasmStubs.h"
+#include "wasm/WasmValidate.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using mozilla::DebugOnly;
+using mozilla::FloorLog2;
+using mozilla::IsPowerOfTwo;
+using mozilla::Maybe;
+
+namespace js {
+namespace wasm {
+
+using namespace js::jit;
+
+using HandleNaNSpecially = bool;
+using InvertBranch = bool;
+using IsKnownNotZero = bool;
+using IsUnsigned = bool;
+using NeedsBoundsCheck = bool;
+using WantResult = bool;
+using ZeroOnOverflow = bool;
+
+class BaseStackFrame;
+
+// Two flags, useABI and interModule, control how calls are made.
+//
+// UseABI::Wasm implies that the Tls/Heap/Global registers are nonvolatile,
+// except when InterModule::True is also set, when they are volatile.
+//
+// UseABI::Builtin implies that the Tls/Heap/Global registers are volatile.
+// In this case, we require InterModule::False. The calling convention
+// is otherwise like UseABI::Wasm.
+//
+// UseABI::System implies that the Tls/Heap/Global registers are volatile.
+// Additionally, the parameter passing mechanism may be slightly different from
+// the UseABI::Wasm convention.
+//
+// When the Tls/Heap/Global registers are not volatile, the baseline compiler
+// will restore the Tls register from its save slot before the call, since the
+// baseline compiler uses the Tls register for other things.
+//
+// When those registers are volatile, the baseline compiler will reload them
+// after the call (it will restore the Tls register from the save slot and load
+// the other two from the Tls data).
+
+enum class UseABI { Wasm, Builtin, System };
+enum class InterModule { False = false, True = true };
+enum class RhsDestOp { True = true };
+
+#if defined(JS_CODEGEN_NONE)
+# define RABALDR_SCRATCH_I32
+# define RABALDR_SCRATCH_F32
+# define RABALDR_SCRATCH_F64
+
+static constexpr Register RabaldrScratchI32 = Register::Invalid();
+static constexpr FloatRegister RabaldrScratchF32 = InvalidFloatReg;
+static constexpr FloatRegister RabaldrScratchF64 = InvalidFloatReg;
+#endif
+
+#ifdef JS_CODEGEN_ARM64
+# define RABALDR_CHUNKY_STACK
+# define RABALDR_SIDEALLOC_V128
+# define RABALDR_SCRATCH_I32
+# define RABALDR_SCRATCH_F32
+# define RABALDR_SCRATCH_F64
+# define RABALDR_SCRATCH_V128
+# define RABALDR_SCRATCH_F32_ALIASES_F64
+
+static constexpr Register RabaldrScratchI32{Registers::x15};
+
+// Note, the float scratch regs cannot be registers that are used for parameter
+// passing in any ABI we use. Argregs tend to be low-numbered; register 30
+// should be safe.
+
+static constexpr FloatRegister RabaldrScratchF32{FloatRegisters::s30,
+ FloatRegisters::Single};
+static constexpr FloatRegister RabaldrScratchF64{FloatRegisters::d30,
+ FloatRegisters::Double};
+# ifdef ENABLE_WASM_SIMD
+static constexpr FloatRegister RabaldrScratchV128{FloatRegisters::d30,
+ FloatRegisters::Simd128};
+# endif
+
+static_assert(RabaldrScratchF32 != ScratchFloat32Reg, "Too busy");
+static_assert(RabaldrScratchF64 != ScratchDoubleReg, "Too busy");
+# ifdef ENABLE_WASM_SIMD
+static_assert(RabaldrScratchV128 != ScratchSimd128Reg, "Too busy");
+# endif
+#endif
+
+#ifdef JS_CODEGEN_X86
+// The selection of EBX here steps gingerly around: the need for EDX
+// to be allocatable for multiply/divide; ECX to be allocatable for
+// shift/rotate; EAX (= ReturnReg) to be allocatable as the result
+// register; EBX not being one of the WasmTableCall registers; and
+// needing a temp register for load/store that has a single-byte
+// persona.
+//
+// The compiler assumes that RabaldrScratchI32 has a single-byte
+// persona. Code for 8-byte atomic operations assumes that
+// RabaldrScratchI32 is in fact ebx.
+
+# define RABALDR_SCRATCH_I32
+static constexpr Register RabaldrScratchI32 = ebx;
+
+# define RABALDR_INT_DIV_I64_CALLOUT
+#endif
+
+#ifdef JS_CODEGEN_ARM
+// We use our own scratch register, because the macro assembler uses
+// the regular scratch register(s) pretty liberally. We could
+// work around that in several cases but the mess does not seem
+// worth it yet. CallTempReg2 seems safe.
+
+# define RABALDR_SCRATCH_I32
+static constexpr Register RabaldrScratchI32 = CallTempReg2;
+
+# define RABALDR_INT_DIV_I64_CALLOUT
+# define RABALDR_I64_TO_FLOAT_CALLOUT
+# define RABALDR_FLOAT_TO_I64_CALLOUT
+#endif
+
+#ifdef JS_CODEGEN_MIPS32
+# define RABALDR_SCRATCH_I32
+static constexpr Register RabaldrScratchI32 = CallTempReg2;
+
+# define RABALDR_INT_DIV_I64_CALLOUT
+# define RABALDR_I64_TO_FLOAT_CALLOUT
+# define RABALDR_FLOAT_TO_I64_CALLOUT
+#endif
+
+#ifdef JS_CODEGEN_MIPS64
+# define RABALDR_SCRATCH_I32
+static constexpr Register RabaldrScratchI32 = CallTempReg2;
+#endif
+
+#ifdef RABALDR_SCRATCH_F32_ALIASES_F64
+# if !defined(RABALDR_SCRATCH_F32) || !defined(RABALDR_SCRATCH_F64)
+# error "Bad configuration"
+# endif
+#endif
+
+template <MIRType t>
+struct RegTypeOf {
+#ifdef ENABLE_WASM_SIMD
+ static_assert(t == MIRType::Float32 || t == MIRType::Double ||
+ t == MIRType::Simd128,
+ "Float mask type");
+#else
+ static_assert(t == MIRType::Float32 || t == MIRType::Double,
+ "Float mask type");
+#endif
+};
+
+template <>
+struct RegTypeOf<MIRType::Float32> {
+ static constexpr RegTypeName value = RegTypeName::Float32;
+};
+template <>
+struct RegTypeOf<MIRType::Double> {
+ static constexpr RegTypeName value = RegTypeName::Float64;
+};
+#ifdef ENABLE_WASM_SIMD
+template <>
+struct RegTypeOf<MIRType::Simd128> {
+ static constexpr RegTypeName value = RegTypeName::Vector128;
+};
+#endif
+
+// The strongly typed register wrappers are especially useful to distinguish
+// float registers from double registers, but they also clearly distinguish
+// 32-bit registers from 64-bit register pairs on 32-bit systems.
+
+struct RegI32 : public Register {
+ RegI32() : Register(Register::Invalid()) {}
+ explicit RegI32(Register reg) : Register(reg) {
+ MOZ_ASSERT(reg != Invalid());
+ }
+ bool isInvalid() const { return *this == Invalid(); }
+ bool isValid() const { return !isInvalid(); }
+ static RegI32 Invalid() { return RegI32(); }
+};
+
+struct RegI64 : public Register64 {
+ RegI64() : Register64(Register64::Invalid()) {}
+ explicit RegI64(Register64 reg) : Register64(reg) {
+ MOZ_ASSERT(reg != Invalid());
+ }
+ bool isInvalid() const { return *this == Invalid(); }
+ bool isValid() const { return !isInvalid(); }
+ static RegI64 Invalid() { return RegI64(); }
+};
+
+struct RegPtr : public Register {
+ RegPtr() : Register(Register::Invalid()) {}
+ explicit RegPtr(Register reg) : Register(reg) {
+ MOZ_ASSERT(reg != Invalid());
+ }
+ bool isInvalid() const { return *this == Invalid(); }
+ bool isValid() const { return !isInvalid(); }
+ static RegPtr Invalid() { return RegPtr(); }
+};
+
+struct RegF32 : public FloatRegister {
+ RegF32() : FloatRegister() {}
+ explicit RegF32(FloatRegister reg) : FloatRegister(reg) {
+ MOZ_ASSERT(isSingle());
+ }
+ bool isValid() const { return !isInvalid(); }
+ static RegF32 Invalid() { return RegF32(); }
+};
+
+struct RegF64 : public FloatRegister {
+ RegF64() : FloatRegister() {}
+ explicit RegF64(FloatRegister reg) : FloatRegister(reg) {
+ MOZ_ASSERT(isDouble());
+ }
+ bool isValid() const { return !isInvalid(); }
+ static RegF64 Invalid() { return RegF64(); }
+};
+
+#ifdef ENABLE_WASM_SIMD
+# ifdef RABALDR_SIDEALLOC_V128
+class RegV128 {
+ // fpr_ is either invalid or a double that aliases the simd register, see
+ // comments below at BaseRegAlloc.
+ FloatRegister fpr_;
+
+ public:
+ RegV128() : fpr_(FloatRegister()) {}
+ explicit RegV128(FloatRegister reg)
+ : fpr_(FloatRegister(reg.encoding(), FloatRegisters::Double)) {
+ MOZ_ASSERT(reg.isSimd128());
+ }
+ static RegV128 fromDouble(FloatRegister reg) {
+ MOZ_ASSERT(reg.isDouble());
+ return RegV128(FloatRegister(reg.encoding(), FloatRegisters::Simd128));
+ }
+ FloatRegister asDouble() const { return fpr_; }
+ bool isInvalid() const { return fpr_.isInvalid(); }
+ bool isValid() const { return !isInvalid(); }
+ static RegV128 Invalid() { return RegV128(); }
+
+ operator FloatRegister() const {
+ return FloatRegister(fpr_.encoding(), FloatRegisters::Simd128);
+ }
+
+ bool operator==(const RegV128& that) const {
+ return asDouble() == that.asDouble();
+ }
+
+ bool operator!=(const RegV128& that) const {
+ return asDouble() != that.asDouble();
+ }
+};
+# else
+struct RegV128 : public FloatRegister {
+ RegV128() : FloatRegister() {}
+ explicit RegV128(FloatRegister reg) : FloatRegister(reg) {
+ MOZ_ASSERT(isSimd128());
+ }
+ bool isValid() const { return !isInvalid(); }
+ static RegV128 Invalid() { return RegV128(); }
+};
+# endif
+#endif
+
+struct AnyReg {
+ union {
+ RegI32 i32_;
+ RegI64 i64_;
+ RegPtr ref_;
+ RegF32 f32_;
+ RegF64 f64_;
+#ifdef ENABLE_WASM_SIMD
+ RegV128 v128_;
+#endif
+ };
+
+ enum {
+ I32,
+ I64,
+ REF,
+ F32,
+ F64,
+#ifdef ENABLE_WASM_SIMD
+ V128
+#endif
+ } tag;
+
+ explicit AnyReg(RegI32 r) {
+ tag = I32;
+ i32_ = r;
+ }
+ explicit AnyReg(RegI64 r) {
+ tag = I64;
+ i64_ = r;
+ }
+ explicit AnyReg(RegF32 r) {
+ tag = F32;
+ f32_ = r;
+ }
+ explicit AnyReg(RegF64 r) {
+ tag = F64;
+ f64_ = r;
+ }
+#ifdef ENABLE_WASM_SIMD
+ explicit AnyReg(RegV128 r) {
+ tag = V128;
+ v128_ = r;
+ }
+#endif
+ explicit AnyReg(RegPtr r) {
+ tag = REF;
+ ref_ = r;
+ }
+
+ RegI32 i32() const {
+ MOZ_ASSERT(tag == I32);
+ return i32_;
+ }
+ RegI64 i64() const {
+ MOZ_ASSERT(tag == I64);
+ return i64_;
+ }
+ RegF32 f32() const {
+ MOZ_ASSERT(tag == F32);
+ return f32_;
+ }
+ RegF64 f64() const {
+ MOZ_ASSERT(tag == F64);
+ return f64_;
+ }
+#ifdef ENABLE_WASM_SIMD
+ RegV128 v128() const {
+ MOZ_ASSERT(tag == V128);
+ return v128_;
+ }
+#endif
+ RegPtr ref() const {
+ MOZ_ASSERT(tag == REF);
+ return ref_;
+ }
+
+ AnyRegister any() const {
+ switch (tag) {
+ case F32:
+ return AnyRegister(f32_);
+ case F64:
+ return AnyRegister(f64_);
+#ifdef ENABLE_WASM_SIMD
+ case V128:
+ return AnyRegister(v128_);
+#endif
+ case I32:
+ return AnyRegister(i32_);
+ case I64:
+#ifdef JS_PUNBOX64
+ return AnyRegister(i64_.reg);
+#else
+ // The compiler is written so that this is never needed: any() is
+ // called on arbitrary registers for asm.js but asm.js does not have
+ // 64-bit ints. For wasm, any() is called on arbitrary registers
+ // only on 64-bit platforms.
+ MOZ_CRASH("AnyReg::any() on 32-bit platform");
+#endif
+ case REF:
+ MOZ_CRASH("AnyReg::any() not implemented for ref types");
+ default:
+ MOZ_CRASH();
+ }
+ // Work around GCC 5 analysis/warning bug.
+ MOZ_CRASH("AnyReg::any(): impossible case");
+ }
+};
+
+// Platform-specific registers.
+//
+// All platforms must define struct SpecificRegs. All 32-bit platforms must
+// have an abiReturnRegI64 member in that struct.
+
+#if defined(JS_CODEGEN_X64)
+struct SpecificRegs {
+ RegI32 eax, ecx, edx, edi, esi;
+ RegI64 rax, rcx, rdx;
+
+ SpecificRegs()
+ : eax(RegI32(js::jit::eax)),
+ ecx(RegI32(js::jit::ecx)),
+ edx(RegI32(js::jit::edx)),
+ edi(RegI32(js::jit::edi)),
+ esi(RegI32(js::jit::esi)),
+ rax(RegI64(Register64(js::jit::rax))),
+ rcx(RegI64(Register64(js::jit::rcx))),
+ rdx(RegI64(Register64(js::jit::rdx))) {}
+};
+#elif defined(JS_CODEGEN_X86)
+struct SpecificRegs {
+ RegI32 eax, ecx, edx, edi, esi;
+ RegI64 ecx_ebx, edx_eax, abiReturnRegI64;
+
+ SpecificRegs()
+ : eax(RegI32(js::jit::eax)),
+ ecx(RegI32(js::jit::ecx)),
+ edx(RegI32(js::jit::edx)),
+ edi(RegI32(js::jit::edi)),
+ esi(RegI32(js::jit::esi)),
+ ecx_ebx(RegI64(Register64(js::jit::ecx, js::jit::ebx))),
+ edx_eax(RegI64(Register64(js::jit::edx, js::jit::eax))),
+ abiReturnRegI64(edx_eax) {}
+};
+#elif defined(JS_CODEGEN_ARM)
+struct SpecificRegs {
+ RegI64 abiReturnRegI64;
+
+ SpecificRegs() : abiReturnRegI64(ReturnReg64) {}
+};
+#elif defined(JS_CODEGEN_ARM64)
+struct SpecificRegs {};
+#elif defined(JS_CODEGEN_MIPS32)
+struct SpecificRegs {
+ RegI64 abiReturnRegI64;
+
+ SpecificRegs() : abiReturnRegI64(ReturnReg64) {}
+};
+#elif defined(JS_CODEGEN_MIPS64)
+struct SpecificRegs {};
+#else
+struct SpecificRegs {
+# ifndef JS_64BIT
+ RegI64 abiReturnRegI64;
+# endif
+
+ SpecificRegs() { MOZ_CRASH("BaseCompiler porting interface: SpecificRegs"); }
+};
+#endif
+
+class BaseCompilerInterface {
+ public:
+ // Spill all spillable registers.
+ //
+ // TODO / OPTIMIZE (Bug 1316802): It's possible to do better here by
+ // spilling only enough registers to satisfy current needs.
+ virtual void sync() = 0;
+ virtual void saveTempPtr(RegPtr r) = 0;
+ virtual void restoreTempPtr(RegPtr r) = 0;
+};
+
+// Register allocator.
+
+class BaseRegAlloc {
+ // Notes on float register allocation.
+ //
+ // The general rule in SpiderMonkey is that float registers can alias double
+ // registers, but there are predicates to handle exceptions to that rule:
+ // hasUnaliasedDouble() and hasMultiAlias(). The way aliasing actually
+ // works is platform dependent and exposed through the aliased(n, &r)
+ // predicate, etc.
+ //
+ // - hasUnaliasedDouble(): on ARM VFPv3-D32 there are double registers that
+ // cannot be treated as float.
+ // - hasMultiAlias(): on ARM and MIPS a double register aliases two float
+ // registers.
+ //
+ // On some platforms (x86, x64, ARM64) but not all (ARM)
+ // ScratchFloat32Register is the same as ScratchDoubleRegister.
+ //
+ // It's a basic invariant of the AllocatableRegisterSet that it deals
+ // properly with aliasing of registers: if s0 or s1 are allocated then d0 is
+ // not allocatable; if s0 and s1 are freed individually then d0 becomes
+ // allocatable.
+ //
+ // On platforms with RABALDR_SIDEALLOC_V128, the register set does not
+ // represent SIMD registers. Instead, we allocate and free these registers as
+ // doubles and change the kind to Simd128 while the register is exposed to
+ // masm. (This is the case on ARM64 for now, and is a consequence of needing
+ // more than 64 bits for FloatRegisters::SetType to represent SIMD registers.
+ // See lengty comment in Architecture-arm64.h.)
+
+ BaseCompilerInterface* bc;
+ AllocatableGeneralRegisterSet availGPR;
+ AllocatableFloatRegisterSet availFPU;
+#ifdef DEBUG
+ // The registers available after removing ScratchReg, HeapReg, etc.
+ AllocatableGeneralRegisterSet allGPR;
+ AllocatableFloatRegisterSet allFPU;
+ uint32_t scratchTaken;
+#endif
+#ifdef JS_CODEGEN_X86
+ AllocatableGeneralRegisterSet singleByteRegs;
+#endif
+
+ bool hasGPR() { return !availGPR.empty(); }
+
+ bool hasGPR64() {
+#ifdef JS_PUNBOX64
+ return !availGPR.empty();
+#else
+ if (availGPR.empty()) {
+ return false;
+ }
+ Register r = allocGPR();
+ bool available = !availGPR.empty();
+ freeGPR(r);
+ return available;
+#endif
+ }
+
+ template <MIRType t>
+ bool hasFPU() {
+#ifdef RABALDR_SIDEALLOC_V128
+ // Workaround for GCC problem, bug 1677690
+ if constexpr (t == MIRType::Simd128) {
+ MOZ_CRASH("Should not happen");
+ } else
+#endif
+ {
+ return availFPU.hasAny<RegTypeOf<t>::value>();
+ }
+ }
+
+ bool isAvailableGPR(Register r) { return availGPR.has(r); }
+
+ bool isAvailableFPU(FloatRegister r) {
+#ifdef RABALDR_SIDEALLOC_V128
+ MOZ_ASSERT(!r.isSimd128());
+#endif
+ return availFPU.has(r);
+ }
+
+ void allocGPR(Register r) {
+ MOZ_ASSERT(isAvailableGPR(r));
+ availGPR.take(r);
+ }
+
+ Register allocGPR() {
+ MOZ_ASSERT(hasGPR());
+ return availGPR.takeAny();
+ }
+
+ void allocInt64(Register64 r) {
+#ifdef JS_PUNBOX64
+ allocGPR(r.reg);
+#else
+ allocGPR(r.low);
+ allocGPR(r.high);
+#endif
+ }
+
+ Register64 allocInt64() {
+ MOZ_ASSERT(hasGPR64());
+#ifdef JS_PUNBOX64
+ return Register64(availGPR.takeAny());
+#else
+ Register high = availGPR.takeAny();
+ Register low = availGPR.takeAny();
+ return Register64(high, low);
+#endif
+ }
+
+#ifdef JS_CODEGEN_ARM
+ // r12 is normally the ScratchRegister and r13 is always the stack pointer,
+ // so the highest possible pair has r10 as the even-numbered register.
+
+ static constexpr uint32_t PAIR_LIMIT = 10;
+
+ bool hasGPRPair() {
+ for (uint32_t i = 0; i <= PAIR_LIMIT; i += 2) {
+ if (isAvailableGPR(Register::FromCode(i)) &&
+ isAvailableGPR(Register::FromCode(i + 1))) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void allocGPRPair(Register* low, Register* high) {
+ MOZ_ASSERT(hasGPRPair());
+ for (uint32_t i = 0; i <= PAIR_LIMIT; i += 2) {
+ if (isAvailableGPR(Register::FromCode(i)) &&
+ isAvailableGPR(Register::FromCode(i + 1))) {
+ *low = Register::FromCode(i);
+ *high = Register::FromCode(i + 1);
+ allocGPR(*low);
+ allocGPR(*high);
+ return;
+ }
+ }
+ MOZ_CRASH("No pair");
+ }
+#endif
+
+ void allocFPU(FloatRegister r) {
+#ifdef RABALDR_SIDEALLOC_V128
+ MOZ_ASSERT(!r.isSimd128());
+#endif
+ MOZ_ASSERT(isAvailableFPU(r));
+ availFPU.take(r);
+ }
+
+ template <MIRType t>
+ FloatRegister allocFPU() {
+#ifdef RABALDR_SIDEALLOC_V128
+ // Workaround for GCC problem, bug 1677690
+ if constexpr (t == MIRType::Simd128) {
+ MOZ_CRASH("Should not happen");
+ } else
+#endif
+ {
+ return availFPU.takeAny<RegTypeOf<t>::value>();
+ }
+ }
+
+ void freeGPR(Register r) { availGPR.add(r); }
+
+ void freeInt64(Register64 r) {
+#ifdef JS_PUNBOX64
+ freeGPR(r.reg);
+#else
+ freeGPR(r.low);
+ freeGPR(r.high);
+#endif
+ }
+
+ void freeFPU(FloatRegister r) {
+#ifdef RABALDR_SIDEALLOC_V128
+ MOZ_ASSERT(!r.isSimd128());
+#endif
+ availFPU.add(r);
+ }
+
+ public:
+ explicit BaseRegAlloc()
+ : bc(nullptr),
+ availGPR(GeneralRegisterSet::All()),
+ availFPU(FloatRegisterSet::All())
+#ifdef DEBUG
+ ,
+ scratchTaken(0)
+#endif
+#ifdef JS_CODEGEN_X86
+ ,
+ singleByteRegs(GeneralRegisterSet(Registers::SingleByteRegs))
+#endif
+ {
+ RegisterAllocator::takeWasmRegisters(availGPR);
+
+ // Allocate any private scratch registers.
+#if defined(RABALDR_SCRATCH_I32)
+ if (RabaldrScratchI32 != RegI32::Invalid()) {
+ availGPR.take(RabaldrScratchI32);
+ }
+#endif
+
+#ifdef RABALDR_SCRATCH_F32_ALIASES_F64
+ static_assert(RabaldrScratchF32 != InvalidFloatReg, "Float reg definition");
+ static_assert(RabaldrScratchF64 != InvalidFloatReg, "Float reg definition");
+#endif
+
+#if defined(RABALDR_SCRATCH_F32) && !defined(RABALDR_SCRATCH_F32_ALIASES_F64)
+ if (RabaldrScratchF32 != RegF32::Invalid()) {
+ availFPU.take(RabaldrScratchF32);
+ }
+#endif
+
+#if defined(RABALDR_SCRATCH_F64)
+# ifdef RABALDR_SCRATCH_F32_ALIASES_F64
+ MOZ_ASSERT(availFPU.has(RabaldrScratchF32));
+# endif
+ if (RabaldrScratchF64 != RegF64::Invalid()) {
+ availFPU.take(RabaldrScratchF64);
+ }
+# ifdef RABALDR_SCRATCH_F32_ALIASES_F64
+ MOZ_ASSERT(!availFPU.has(RabaldrScratchF32));
+# endif
+#endif
+
+#ifdef DEBUG
+ allGPR = availGPR;
+ allFPU = availFPU;
+#endif
+ }
+
+ void init(BaseCompilerInterface* bc) { this->bc = bc; }
+
+ enum class ScratchKind { I32 = 1, F32 = 2, F64 = 4, V128 = 8 };
+
+#ifdef DEBUG
+ bool isScratchRegisterTaken(ScratchKind s) const {
+ return (scratchTaken & uint32_t(s)) != 0;
+ }
+
+ void setScratchRegisterTaken(ScratchKind s, bool state) {
+ if (state) {
+ scratchTaken |= uint32_t(s);
+ } else {
+ scratchTaken &= ~uint32_t(s);
+ }
+ }
+#endif
+
+#ifdef JS_CODEGEN_X86
+ bool isSingleByteI32(Register r) { return singleByteRegs.has(r); }
+#endif
+
+ bool isAvailableI32(RegI32 r) { return isAvailableGPR(r); }
+
+ bool isAvailableI64(RegI64 r) {
+#ifdef JS_PUNBOX64
+ return isAvailableGPR(r.reg);
+#else
+ return isAvailableGPR(r.low) && isAvailableGPR(r.high);
+#endif
+ }
+
+ bool isAvailablePtr(RegPtr r) { return isAvailableGPR(r); }
+
+ bool isAvailableF32(RegF32 r) { return isAvailableFPU(r); }
+
+ bool isAvailableF64(RegF64 r) { return isAvailableFPU(r); }
+
+#ifdef ENABLE_WASM_SIMD
+# ifdef RABALDR_SIDEALLOC_V128
+ bool isAvailableV128(RegV128 r) { return isAvailableFPU(r.asDouble()); }
+# else
+ bool isAvailableV128(RegV128 r) { return isAvailableFPU(r); }
+# endif
+#endif
+
+ // TODO / OPTIMIZE (Bug 1316802): Do not sync everything on allocation
+ // failure, only as much as we need.
+
+ [[nodiscard]] RegI32 needI32() {
+ if (!hasGPR()) {
+ bc->sync();
+ }
+ return RegI32(allocGPR());
+ }
+
+ void needI32(RegI32 specific) {
+ if (!isAvailableI32(specific)) {
+ bc->sync();
+ }
+ allocGPR(specific);
+ }
+
+ [[nodiscard]] RegI64 needI64() {
+ if (!hasGPR64()) {
+ bc->sync();
+ }
+ return RegI64(allocInt64());
+ }
+
+ void needI64(RegI64 specific) {
+ if (!isAvailableI64(specific)) {
+ bc->sync();
+ }
+ allocInt64(specific);
+ }
+
+ [[nodiscard]] RegPtr needPtr() {
+ if (!hasGPR()) {
+ bc->sync();
+ }
+ return RegPtr(allocGPR());
+ }
+
+ void needPtr(RegPtr specific) {
+ if (!isAvailablePtr(specific)) {
+ bc->sync();
+ }
+ allocGPR(specific);
+ }
+
+ // Use when you need a register for a short time but explicitly want to avoid
+ // a full sync().
+ [[nodiscard]] RegPtr needTempPtr(RegPtr fallback, bool* saved) {
+ if (hasGPR()) {
+ *saved = false;
+ return RegPtr(allocGPR());
+ }
+ *saved = true;
+ bc->saveTempPtr(fallback);
+ MOZ_ASSERT(isAvailablePtr(fallback));
+ allocGPR(fallback);
+ return RegPtr(fallback);
+ }
+
+ [[nodiscard]] RegF32 needF32() {
+ if (!hasFPU<MIRType::Float32>()) {
+ bc->sync();
+ }
+ return RegF32(allocFPU<MIRType::Float32>());
+ }
+
+ void needF32(RegF32 specific) {
+ if (!isAvailableF32(specific)) {
+ bc->sync();
+ }
+ allocFPU(specific);
+ }
+
+ [[nodiscard]] RegF64 needF64() {
+ if (!hasFPU<MIRType::Double>()) {
+ bc->sync();
+ }
+ return RegF64(allocFPU<MIRType::Double>());
+ }
+
+ void needF64(RegF64 specific) {
+ if (!isAvailableF64(specific)) {
+ bc->sync();
+ }
+ allocFPU(specific);
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ [[nodiscard]] RegV128 needV128() {
+# ifdef RABALDR_SIDEALLOC_V128
+ if (!hasFPU<MIRType::Double>()) {
+ bc->sync();
+ }
+ return RegV128::fromDouble(allocFPU<MIRType::Double>());
+# else
+ if (!hasFPU<MIRType::Simd128>()) {
+ bc->sync();
+ }
+ return RegV128(allocFPU<MIRType::Simd128>());
+# endif
+ }
+
+ void needV128(RegV128 specific) {
+# ifdef RABALDR_SIDEALLOC_V128
+ if (!isAvailableV128(specific)) {
+ bc->sync();
+ }
+ allocFPU(specific.asDouble());
+# else
+ if (!isAvailableV128(specific)) {
+ bc->sync();
+ }
+ allocFPU(specific);
+# endif
+ }
+#endif
+
+ void freeI32(RegI32 r) { freeGPR(r); }
+
+ void freeI64(RegI64 r) { freeInt64(r); }
+
+ void freePtr(RegPtr r) { freeGPR(r); }
+
+ void freeF64(RegF64 r) { freeFPU(r); }
+
+ void freeF32(RegF32 r) { freeFPU(r); }
+
+#ifdef ENABLE_WASM_SIMD
+ void freeV128(RegV128 r) {
+# ifdef RABALDR_SIDEALLOC_V128
+ freeFPU(r.asDouble());
+# else
+ freeFPU(r);
+# endif
+ }
+#endif
+
+ void freeTempPtr(RegPtr r, bool saved) {
+ freePtr(r);
+ if (saved) {
+ bc->restoreTempPtr(r);
+ MOZ_ASSERT(!isAvailablePtr(r));
+ }
+ }
+
+#ifdef JS_CODEGEN_ARM
+ [[nodiscard]] RegI64 needI64Pair() {
+ if (!hasGPRPair()) {
+ bc->sync();
+ }
+ Register low, high;
+ allocGPRPair(&low, &high);
+ return RegI64(Register64(high, low));
+ }
+#endif
+
+#ifdef DEBUG
+ friend class LeakCheck;
+
+ class MOZ_RAII LeakCheck {
+ private:
+ const BaseRegAlloc& ra;
+ AllocatableGeneralRegisterSet knownGPR_;
+ AllocatableFloatRegisterSet knownFPU_;
+
+ public:
+ explicit LeakCheck(const BaseRegAlloc& ra) : ra(ra) {
+ knownGPR_ = ra.availGPR;
+ knownFPU_ = ra.availFPU;
+ }
+
+ ~LeakCheck() {
+ MOZ_ASSERT(knownGPR_.bits() == ra.allGPR.bits());
+ MOZ_ASSERT(knownFPU_.bits() == ra.allFPU.bits());
+ }
+
+ void addKnownI32(RegI32 r) { knownGPR_.add(r); }
+
+ void addKnownI64(RegI64 r) {
+# ifdef JS_PUNBOX64
+ knownGPR_.add(r.reg);
+# else
+ knownGPR_.add(r.high);
+ knownGPR_.add(r.low);
+# endif
+ }
+
+ void addKnownF32(RegF32 r) { knownFPU_.add(r); }
+
+ void addKnownF64(RegF64 r) { knownFPU_.add(r); }
+
+# ifdef ENABLE_WASM_SIMD
+ void addKnownV128(RegV128 r) {
+# ifdef RABALDR_SIDEALLOC_V128
+ knownFPU_.add(r.asDouble());
+# else
+ knownFPU_.add(r);
+# endif
+ }
+# endif
+
+ void addKnownRef(RegPtr r) { knownGPR_.add(r); }
+ };
+#endif
+};
+
+// Scratch register abstractions.
+//
+// We define our own scratch registers when the platform doesn't provide what we
+// need. A notable use case is that we will need a private scratch register
+// when the platform masm uses its scratch register very frequently (eg, ARM).
+
+class BaseScratchRegister {
+#ifdef DEBUG
+ BaseRegAlloc& ra;
+ BaseRegAlloc::ScratchKind kind_;
+
+ public:
+ explicit BaseScratchRegister(BaseRegAlloc& ra, BaseRegAlloc::ScratchKind kind)
+ : ra(ra), kind_(kind) {
+ MOZ_ASSERT(!ra.isScratchRegisterTaken(kind_));
+ ra.setScratchRegisterTaken(kind_, true);
+ }
+ ~BaseScratchRegister() {
+ MOZ_ASSERT(ra.isScratchRegisterTaken(kind_));
+ ra.setScratchRegisterTaken(kind_, false);
+ }
+#else
+ public:
+ explicit BaseScratchRegister(BaseRegAlloc& ra,
+ BaseRegAlloc::ScratchKind kind) {}
+#endif
+};
+
+#ifdef ENABLE_WASM_SIMD
+# ifdef RABALDR_SCRATCH_V128
+class ScratchV128 : public BaseScratchRegister {
+ public:
+ explicit ScratchV128(BaseRegAlloc& ra)
+ : BaseScratchRegister(ra, BaseRegAlloc::ScratchKind::V128) {}
+ operator RegV128() const { return RegV128(RabaldrScratchV128); }
+};
+# else
+class ScratchV128 : public ScratchSimd128Scope {
+ public:
+ explicit ScratchV128(MacroAssembler& m) : ScratchSimd128Scope(m) {}
+ operator RegV128() const { return RegV128(FloatRegister(*this)); }
+};
+# endif
+#endif
+
+#ifdef RABALDR_SCRATCH_F64
+class ScratchF64 : public BaseScratchRegister {
+ public:
+ explicit ScratchF64(BaseRegAlloc& ra)
+ : BaseScratchRegister(ra, BaseRegAlloc::ScratchKind::F64) {}
+ operator RegF64() const { return RegF64(RabaldrScratchF64); }
+};
+#else
+class ScratchF64 : public ScratchDoubleScope {
+ public:
+ explicit ScratchF64(MacroAssembler& m) : ScratchDoubleScope(m) {}
+ operator RegF64() const { return RegF64(FloatRegister(*this)); }
+};
+#endif
+
+#ifdef RABALDR_SCRATCH_F32
+class ScratchF32 : public BaseScratchRegister {
+ public:
+ explicit ScratchF32(BaseRegAlloc& ra)
+ : BaseScratchRegister(ra, BaseRegAlloc::ScratchKind::F32) {}
+ operator RegF32() const { return RegF32(RabaldrScratchF32); }
+};
+#else
+class ScratchF32 : public ScratchFloat32Scope {
+ public:
+ explicit ScratchF32(MacroAssembler& m) : ScratchFloat32Scope(m) {}
+ operator RegF32() const { return RegF32(FloatRegister(*this)); }
+};
+#endif
+
+#ifdef RABALDR_SCRATCH_I32
+template <class RegType>
+class ScratchGPR : public BaseScratchRegister {
+ public:
+ explicit ScratchGPR(BaseRegAlloc& ra)
+ : BaseScratchRegister(ra, BaseRegAlloc::ScratchKind::I32) {}
+ operator RegType() const { return RegType(RabaldrScratchI32); }
+};
+#else
+template <class RegType>
+class ScratchGPR : public ScratchRegisterScope {
+ public:
+ explicit ScratchGPR(MacroAssembler& m) : ScratchRegisterScope(m) {}
+ operator RegType() const { return RegType(Register(*this)); }
+};
+#endif
+
+using ScratchI32 = ScratchGPR<RegI32>;
+using ScratchPtr = ScratchGPR<RegPtr>;
+
+#if defined(JS_CODEGEN_X86)
+// ScratchEBX is a mnemonic device: For some atomic ops we really need EBX,
+// no other register will do. And we would normally have to allocate that
+// register using ScratchI32 since normally the scratch register is EBX.
+// But the whole point of ScratchI32 is to hide that relationship. By using
+// the ScratchEBX alias, we document that at that point we require the
+// scratch register to be EBX.
+using ScratchEBX = ScratchI32;
+
+// ScratchI8 is a mnemonic device: For some ops we need a register with a
+// byte subregister.
+using ScratchI8 = ScratchI32;
+#endif
+
+// The stack frame.
+//
+// The stack frame has four parts ("below" means at lower addresses):
+//
+// - the Frame element;
+// - the Local area, including the DebugFrame element and possibly a spilled
+// pointer to stack results, if any; allocated below the header with various
+// forms of alignment;
+// - the Dynamic area, comprising the temporary storage the compiler uses for
+// register spilling, allocated below the Local area;
+// - the Arguments area, comprising memory allocated for outgoing calls,
+// allocated below the Dynamic area.
+//
+// +==============================+
+// | Incoming stack arg |
+// | ... |
+// ------------- +==============================+
+// | Frame (fixed size) |
+// ------------- +==============================+ <-------------------- FP
+// ^ | DebugFrame (optional) | ^ ^ ^^
+// localSize | Register arg local | | | ||
+// | | ... | | | framePushed
+// | | Register stack result ptr?| | | ||
+// | | Non-arg local | | | ||
+// | | ... | | | ||
+// | | (padding) | | | ||
+// | | Tls pointer | | | ||
+// | +------------------------------+ | | ||
+// v | (padding) | | v ||
+// ------------- +==============================+ currentStackHeight ||
+// ^ | Dynamic (variable size) | | ||
+// dynamicSize | ... | | ||
+// v | ... | v ||
+// ------------- | (free space, sometimes) | --------- v|
+// +==============================+ <----- SP not-during calls
+// | Arguments (sometimes) | |
+// | ... | v
+// +==============================+ <----- SP during calls
+//
+// The Frame is addressed off the stack pointer. masm.framePushed() is always
+// correct, and masm.getStackPointer() + masm.framePushed() always addresses the
+// Frame, with the DebugFrame optionally below it.
+//
+// The Local area (including the DebugFrame and, if needed, the spilled value of
+// the stack results area pointer) is laid out by BaseLocalIter and is allocated
+// and deallocated by standard prologue and epilogue functions that manipulate
+// the stack pointer, but it is accessed via BaseStackFrame.
+//
+// The Dynamic area is maintained by and accessed via BaseStackFrame. On some
+// systems (such as ARM64), the Dynamic memory may be allocated in chunks
+// because the SP needs a specific alignment, and in this case there will
+// normally be some free space directly above the SP. The stack height does not
+// include the free space, it reflects the logically used space only.
+//
+// The Dynamic area is where space for stack results is allocated when calling
+// functions that return results on the stack. If a function has stack results,
+// a pointer to the low address of the stack result area is passed as an
+// additional argument, according to the usual ABI. See
+// ABIResultIter::HasStackResults.
+//
+// The Arguments area is allocated and deallocated via BaseStackFrame (see
+// comments later) but is accessed directly off the stack pointer.
+
+// BaseLocalIter iterates over a vector of types of locals and provides offsets
+// from the Frame address for those locals, and associated data.
+//
+// The implementation of BaseLocalIter is the property of the BaseStackFrame.
+// But it is also exposed for eg the debugger to use.
+
+BaseLocalIter::BaseLocalIter(const ValTypeVector& locals,
+ const ArgTypeVector& args, bool debugEnabled)
+ : locals_(locals),
+ args_(args),
+ argsIter_(args_),
+ index_(0),
+ nextFrameSize_(debugEnabled ? DebugFrame::offsetOfFrame() : 0),
+ frameOffset_(INT32_MAX),
+ stackResultPointerOffset_(INT32_MAX),
+ mirType_(MIRType::Undefined),
+ done_(false) {
+ MOZ_ASSERT(args.lengthWithoutStackResults() <= locals.length());
+ settle();
+}
+
+int32_t BaseLocalIter::pushLocal(size_t nbytes) {
+ MOZ_ASSERT(nbytes % 4 == 0 && nbytes <= 16);
+ nextFrameSize_ = AlignBytes(frameSize_, nbytes) + nbytes;
+ return nextFrameSize_; // Locals grow down so capture base address.
+}
+
+void BaseLocalIter::settle() {
+ MOZ_ASSERT(!done_);
+ frameSize_ = nextFrameSize_;
+
+ if (!argsIter_.done()) {
+ mirType_ = argsIter_.mirType();
+ MIRType concreteType = mirType_;
+ switch (mirType_) {
+ case MIRType::StackResults:
+ // The pointer to stack results is handled like any other argument:
+ // either addressed in place if it is passed on the stack, or we spill
+ // it in the frame if it's in a register.
+ MOZ_ASSERT(args_.isSyntheticStackResultPointerArg(index_));
+ concreteType = MIRType::Pointer;
+ [[fallthrough]];
+ case MIRType::Int32:
+ case MIRType::Int64:
+ case MIRType::Double:
+ case MIRType::Float32:
+ case MIRType::RefOrNull:
+#ifdef ENABLE_WASM_SIMD
+ case MIRType::Simd128:
+#endif
+ if (argsIter_->argInRegister()) {
+ frameOffset_ = pushLocal(MIRTypeToSize(concreteType));
+ } else {
+ frameOffset_ = -(argsIter_->offsetFromArgBase() + sizeof(Frame));
+ }
+ break;
+ default:
+ MOZ_CRASH("Argument type");
+ }
+ if (mirType_ == MIRType::StackResults) {
+ stackResultPointerOffset_ = frameOffset();
+ // Advance past the synthetic stack result pointer argument and fall
+ // through to the next case.
+ argsIter_++;
+ frameSize_ = nextFrameSize_;
+ MOZ_ASSERT(argsIter_.done());
+ } else {
+ return;
+ }
+ }
+
+ if (index_ < locals_.length()) {
+ switch (locals_[index_].kind()) {
+ case ValType::I32:
+ case ValType::I64:
+ case ValType::F32:
+ case ValType::F64:
+#ifdef ENABLE_WASM_SIMD
+ case ValType::V128:
+#endif
+ case ValType::Ref:
+ // TODO/AnyRef-boxing: With boxed immediates and strings, the
+ // debugger must be made aware that AnyRef != Pointer.
+ ASSERT_ANYREF_IS_JSOBJECT;
+ mirType_ = ToMIRType(locals_[index_]);
+ frameOffset_ = pushLocal(MIRTypeToSize(mirType_));
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: Unexpected local type");
+ }
+ return;
+ }
+
+ done_ = true;
+}
+
+void BaseLocalIter::operator++(int) {
+ MOZ_ASSERT(!done_);
+ index_++;
+ if (!argsIter_.done()) {
+ argsIter_++;
+ }
+ settle();
+}
+
+// Abstraction of the height of the stack frame, to avoid type confusion.
+
+class StackHeight {
+ friend class BaseStackFrameAllocator;
+
+ uint32_t height;
+
+ public:
+ explicit StackHeight(uint32_t h) : height(h) {}
+ static StackHeight Invalid() { return StackHeight(UINT32_MAX); }
+ bool isValid() const { return height != UINT32_MAX; }
+ bool operator==(StackHeight rhs) const {
+ MOZ_ASSERT(isValid() && rhs.isValid());
+ return height == rhs.height;
+ }
+ bool operator!=(StackHeight rhs) const { return !(*this == rhs); }
+};
+
+// Abstraction for where multi-value results go on the machine stack.
+
+class StackResultsLoc {
+ uint32_t bytes_;
+ size_t count_;
+ Maybe<uint32_t> height_;
+
+ public:
+ StackResultsLoc() : bytes_(0), count_(0){};
+ StackResultsLoc(uint32_t bytes, size_t count, uint32_t height)
+ : bytes_(bytes), count_(count), height_(Some(height)) {
+ MOZ_ASSERT(bytes != 0);
+ MOZ_ASSERT(count != 0);
+ MOZ_ASSERT(height != 0);
+ }
+
+ uint32_t bytes() const { return bytes_; }
+ uint32_t count() const { return count_; }
+ uint32_t height() const { return height_.value(); }
+
+ bool hasStackResults() const { return bytes() != 0; }
+ StackResults stackResults() const {
+ return hasStackResults() ? StackResults::HasStackResults
+ : StackResults::NoStackResults;
+ }
+};
+
+// Abstraction of the baseline compiler's stack frame (except for the Frame /
+// DebugFrame parts). See comments above for more. Remember, "below" on the
+// stack means at lower addresses.
+//
+// The abstraction is split into two parts: BaseStackFrameAllocator is
+// responsible for allocating and deallocating space on the stack and for
+// performing computations that are affected by how the allocation is performed;
+// BaseStackFrame then provides a pleasant interface for stack frame management.
+
+class BaseStackFrameAllocator {
+ MacroAssembler& masm;
+
+#ifdef RABALDR_CHUNKY_STACK
+ // On platforms that require the stack pointer to be aligned on a boundary
+ // greater than the typical stack item (eg, ARM64 requires 16-byte alignment
+ // but items are 8 bytes), allocate stack memory in chunks, and use a
+ // separate stack height variable to track the effective stack pointer
+ // within the allocated area. Effectively, there's a variable amount of
+ // free space directly above the stack pointer. See diagram above.
+
+ // The following must be true in order for the stack height to be
+ // predictable at control flow joins:
+ //
+ // - The Local area is always aligned according to WasmStackAlignment, ie,
+ // masm.framePushed() % WasmStackAlignment is zero after allocating
+ // locals.
+ //
+ // - ChunkSize is always a multiple of WasmStackAlignment.
+ //
+ // - Pushing and popping are always in units of ChunkSize (hence preserving
+ // alignment).
+ //
+ // - The free space on the stack (masm.framePushed() - currentStackHeight_)
+ // is a predictable (nonnegative) amount.
+
+ // As an optimization, we pre-allocate some space on the stack, the size of
+ // this allocation is InitialChunk and it must be a multiple of ChunkSize.
+ // It is allocated as part of the function prologue and deallocated as part
+ // of the epilogue, along with the locals.
+ //
+ // If ChunkSize is too large then we risk overflowing the stack on simple
+ // recursions with few live values where stack overflow should not be a
+ // risk; if it is too small we spend too much time adjusting the stack
+ // pointer.
+ //
+ // Good values for ChunkSize are the subject of future empirical analysis;
+ // eight words is just an educated guess.
+
+ static constexpr uint32_t ChunkSize = 8 * sizeof(void*);
+ static constexpr uint32_t InitialChunk = ChunkSize;
+
+ // The current logical height of the frame is
+ // currentStackHeight_ = localSize_ + dynamicSize
+ // where dynamicSize is not accounted for explicitly and localSize_ also
+ // includes size for the DebugFrame.
+ //
+ // The allocated size of the frame, provided by masm.framePushed(), is usually
+ // larger than currentStackHeight_, notably at the beginning of execution when
+ // we've allocated InitialChunk extra space.
+
+ uint32_t currentStackHeight_;
+#endif
+
+ // Size of the Local area in bytes (stable after BaseCompiler::init() has
+ // called BaseStackFrame::setupLocals(), which in turn calls
+ // BaseStackFrameAllocator::setLocalSize()), always rounded to the proper
+ // stack alignment. The Local area is then allocated in beginFunction(),
+ // following the allocation of the Header. See onFixedStackAllocated()
+ // below.
+
+ uint32_t localSize_;
+
+ protected:
+ ///////////////////////////////////////////////////////////////////////////
+ //
+ // Initialization
+
+ explicit BaseStackFrameAllocator(MacroAssembler& masm)
+ : masm(masm),
+#ifdef RABALDR_CHUNKY_STACK
+ currentStackHeight_(0),
+#endif
+ localSize_(UINT32_MAX) {
+ }
+
+ protected:
+ //////////////////////////////////////////////////////////////////////
+ //
+ // The Local area - the static part of the frame.
+
+ // Record the size of the Local area, once it is known.
+
+ void setLocalSize(uint32_t localSize) {
+ MOZ_ASSERT(localSize == AlignBytes(localSize, sizeof(void*)),
+ "localSize_ should be aligned to at least a pointer");
+ MOZ_ASSERT(localSize_ == UINT32_MAX);
+ localSize_ = localSize;
+ }
+
+ // Record the current stack height, after it has become stable in
+ // beginFunction(). See also BaseStackFrame::onFixedStackAllocated().
+
+ void onFixedStackAllocated() {
+ MOZ_ASSERT(localSize_ != UINT32_MAX);
+#ifdef RABALDR_CHUNKY_STACK
+ currentStackHeight_ = localSize_;
+#endif
+ }
+
+ public:
+ // The fixed amount of memory, in bytes, allocated on the stack below the
+ // Header for purposes such as locals and other fixed values. Includes all
+ // necessary alignment, and on ARM64 also the initial chunk for the working
+ // stack memory.
+
+ uint32_t fixedAllocSize() const {
+ MOZ_ASSERT(localSize_ != UINT32_MAX);
+#ifdef RABALDR_CHUNKY_STACK
+ return localSize_ + InitialChunk;
+#else
+ return localSize_;
+#endif
+ }
+
+#ifdef RABALDR_CHUNKY_STACK
+ // The allocated frame size is frequently larger than the logical stack
+ // height; we round up to a chunk boundary, and special case the initial
+ // chunk.
+ uint32_t framePushedForHeight(uint32_t logicalHeight) {
+ if (logicalHeight <= fixedAllocSize()) {
+ return fixedAllocSize();
+ }
+ return fixedAllocSize() +
+ AlignBytes(logicalHeight - fixedAllocSize(), ChunkSize);
+ }
+#endif
+
+ protected:
+ //////////////////////////////////////////////////////////////////////
+ //
+ // The Dynamic area - the dynamic part of the frame, for spilling and saving
+ // intermediate values.
+
+ // Offset off of sp_ for the slot at stack area location `offset`.
+
+ int32_t stackOffset(int32_t offset) {
+ MOZ_ASSERT(offset > 0);
+ return masm.framePushed() - offset;
+ }
+
+ uint32_t computeHeightWithStackResults(StackHeight stackBase,
+ uint32_t stackResultBytes) {
+ MOZ_ASSERT(stackResultBytes);
+ MOZ_ASSERT(currentStackHeight() >= stackBase.height);
+ return stackBase.height + stackResultBytes;
+ }
+
+#ifdef RABALDR_CHUNKY_STACK
+ void pushChunkyBytes(uint32_t bytes) {
+ checkChunkyInvariants();
+ uint32_t freeSpace = masm.framePushed() - currentStackHeight_;
+ if (freeSpace < bytes) {
+ uint32_t bytesToReserve = AlignBytes(bytes - freeSpace, ChunkSize);
+ MOZ_ASSERT(bytesToReserve + freeSpace >= bytes);
+ masm.reserveStack(bytesToReserve);
+ }
+ currentStackHeight_ += bytes;
+ checkChunkyInvariants();
+ }
+
+ void popChunkyBytes(uint32_t bytes) {
+ checkChunkyInvariants();
+ currentStackHeight_ -= bytes;
+ // Sometimes, popChunkyBytes() is used to pop a larger area, as when we drop
+ // values consumed by a call, and we may need to drop several chunks. But
+ // never drop the initial chunk. Crucially, the amount we drop is always an
+ // integral number of chunks.
+ uint32_t freeSpace = masm.framePushed() - currentStackHeight_;
+ if (freeSpace >= ChunkSize) {
+ uint32_t targetAllocSize = framePushedForHeight(currentStackHeight_);
+ uint32_t amountToFree = masm.framePushed() - targetAllocSize;
+ MOZ_ASSERT(amountToFree % ChunkSize == 0);
+ if (amountToFree) {
+ masm.freeStack(amountToFree);
+ }
+ }
+ checkChunkyInvariants();
+ }
+#endif
+
+ uint32_t currentStackHeight() const {
+#ifdef RABALDR_CHUNKY_STACK
+ return currentStackHeight_;
+#else
+ return masm.framePushed();
+#endif
+ }
+
+ private:
+#ifdef RABALDR_CHUNKY_STACK
+ void checkChunkyInvariants() {
+ MOZ_ASSERT(masm.framePushed() >= fixedAllocSize());
+ MOZ_ASSERT(masm.framePushed() >= currentStackHeight_);
+ MOZ_ASSERT(masm.framePushed() == fixedAllocSize() ||
+ masm.framePushed() - currentStackHeight_ < ChunkSize);
+ MOZ_ASSERT((masm.framePushed() - localSize_) % ChunkSize == 0);
+ }
+#endif
+
+ // For a given stack height, return the appropriate size of the allocated
+ // frame.
+
+ uint32_t framePushedForHeight(StackHeight stackHeight) {
+#ifdef RABALDR_CHUNKY_STACK
+ // A more complicated adjustment is needed.
+ return framePushedForHeight(stackHeight.height);
+#else
+ // The allocated frame size equals the stack height.
+ return stackHeight.height;
+#endif
+ }
+
+ public:
+ // The current height of the stack area, not necessarily zero-based, in a
+ // type-safe way.
+
+ StackHeight stackHeight() const { return StackHeight(currentStackHeight()); }
+
+ // Set the frame height to a previously recorded value.
+
+ void setStackHeight(StackHeight amount) {
+#ifdef RABALDR_CHUNKY_STACK
+ currentStackHeight_ = amount.height;
+ masm.setFramePushed(framePushedForHeight(amount));
+ checkChunkyInvariants();
+#else
+ masm.setFramePushed(amount.height);
+#endif
+ }
+
+ // The current height of the dynamic part of the stack area (ie, the backing
+ // store for the evaluation stack), zero-based.
+
+ uint32_t dynamicHeight() const { return currentStackHeight() - localSize_; }
+
+ // Before branching to an outer control label, pop the execution stack to
+ // the level expected by that region, but do not update masm.framePushed()
+ // as that will happen as compilation leaves the block.
+ //
+ // Note these operate directly on the stack pointer register.
+
+ void popStackBeforeBranch(StackHeight destStackHeight,
+ uint32_t stackResultBytes) {
+ uint32_t framePushedHere = masm.framePushed();
+ StackHeight heightThere =
+ StackHeight(destStackHeight.height + stackResultBytes);
+ uint32_t framePushedThere = framePushedForHeight(heightThere);
+ if (framePushedHere > framePushedThere) {
+ masm.addToStackPtr(Imm32(framePushedHere - framePushedThere));
+ }
+ }
+
+ void popStackBeforeBranch(StackHeight destStackHeight, ResultType type) {
+ popStackBeforeBranch(destStackHeight,
+ ABIResultIter::MeasureStackBytes(type));
+ }
+
+ // Given that there are |stackParamSize| bytes on the dynamic stack
+ // corresponding to the stack results, return the stack height once these
+ // parameters are popped.
+
+ StackHeight stackResultsBase(uint32_t stackParamSize) {
+ return StackHeight(currentStackHeight() - stackParamSize);
+ }
+
+ // For most of WebAssembly, adjacent instructions have fallthrough control
+ // flow between them, which allows us to simply thread the current stack
+ // height through the compiler. There are two exceptions to this rule: when
+ // leaving a block via dead code, and when entering the "else" arm of an "if".
+ // In these cases, the stack height is the block entry height, plus any stack
+ // values (results in the block exit case, parameters in the else entry case).
+
+ void resetStackHeight(StackHeight destStackHeight, ResultType type) {
+ uint32_t height = destStackHeight.height;
+ height += ABIResultIter::MeasureStackBytes(type);
+ setStackHeight(StackHeight(height));
+ }
+
+ // Return offset of stack result.
+
+ uint32_t locateStackResult(const ABIResult& result, StackHeight stackBase,
+ uint32_t stackResultBytes) {
+ MOZ_ASSERT(result.onStack());
+ MOZ_ASSERT(result.stackOffset() + result.size() <= stackResultBytes);
+ uint32_t end = computeHeightWithStackResults(stackBase, stackResultBytes);
+ return end - result.stackOffset();
+ }
+
+ public:
+ //////////////////////////////////////////////////////////////////////
+ //
+ // The Argument area - for outgoing calls.
+ //
+ // We abstract these operations as an optimization: we can merge the freeing
+ // of the argument area and dropping values off the stack after a call. But
+ // they always amount to manipulating the real stack pointer by some amount.
+ //
+ // Note that we do not update currentStackHeight_ for this; the frame does
+ // not know about outgoing arguments. But we do update framePushed(), so we
+ // can still index into the frame below the outgoing arguments area.
+
+ // This is always equivalent to a masm.reserveStack() call.
+
+ void allocArgArea(size_t argSize) {
+ if (argSize) {
+ masm.reserveStack(argSize);
+ }
+ }
+
+ // This frees the argument area allocated by allocArgArea(), and `argSize`
+ // must be equal to the `argSize` argument to allocArgArea(). In addition
+ // we drop some values from the frame, corresponding to the values that were
+ // consumed by the call.
+
+ void freeArgAreaAndPopBytes(size_t argSize, size_t dropSize) {
+#ifdef RABALDR_CHUNKY_STACK
+ // Freeing the outgoing arguments and freeing the consumed values have
+ // different semantics here, which is why the operation is split.
+ if (argSize) {
+ masm.freeStack(argSize);
+ }
+ popChunkyBytes(dropSize);
+#else
+ if (argSize + dropSize) {
+ masm.freeStack(argSize + dropSize);
+ }
+#endif
+ }
+};
+
+class BaseStackFrame final : public BaseStackFrameAllocator {
+ MacroAssembler& masm;
+
+ // The largest observed value of masm.framePushed(), ie, the size of the
+ // stack frame. Read this for its true value only when code generation is
+ // finished.
+ uint32_t maxFramePushed_;
+
+ // Patch point where we check for stack overflow.
+ CodeOffset stackAddOffset_;
+
+ // Low byte offset of pointer to stack results, if any.
+ Maybe<int32_t> stackResultsPtrOffset_;
+
+ // The offset of TLS pointer.
+ uint32_t tlsPointerOffset_;
+
+ // Low byte offset of local area for true locals (not parameters).
+ uint32_t varLow_;
+
+ // High byte offset + 1 of local area for true locals.
+ uint32_t varHigh_;
+
+ // The stack pointer, cached for brevity.
+ RegisterOrSP sp_;
+
+ public:
+ explicit BaseStackFrame(MacroAssembler& masm)
+ : BaseStackFrameAllocator(masm),
+ masm(masm),
+ maxFramePushed_(0),
+ stackAddOffset_(0),
+ tlsPointerOffset_(UINT32_MAX),
+ varLow_(UINT32_MAX),
+ varHigh_(UINT32_MAX),
+ sp_(masm.getStackPointer()) {}
+
+ ///////////////////////////////////////////////////////////////////////////
+ //
+ // Stack management and overflow checking
+
+ // This must be called once beginFunction has allocated space for the Header
+ // (the Frame and DebugFrame) and the Local area, and will record the current
+ // frame size for internal use by the stack abstractions.
+
+ void onFixedStackAllocated() {
+ maxFramePushed_ = masm.framePushed();
+ BaseStackFrameAllocator::onFixedStackAllocated();
+ }
+
+ // We won't know until after we've generated code how big the frame will be
+ // (we may need arbitrary spill slots and outgoing param slots) so emit a
+ // patchable add that is patched in endFunction().
+ //
+ // Note the platform scratch register may be used by branchPtr(), so
+ // generally tmp must be something else.
+
+ void checkStack(Register tmp, BytecodeOffset trapOffset) {
+ stackAddOffset_ = masm.sub32FromStackPtrWithPatch(tmp);
+ Label ok;
+ masm.branchPtr(Assembler::Below,
+ Address(WasmTlsReg, offsetof(wasm::TlsData, stackLimit)),
+ tmp, &ok);
+ masm.wasmTrap(Trap::StackOverflow, trapOffset);
+ masm.bind(&ok);
+ }
+
+ void patchCheckStack() {
+ masm.patchSub32FromStackPtr(stackAddOffset_,
+ Imm32(int32_t(maxFramePushed_)));
+ }
+
+ // Very large frames are implausible, probably an attack.
+
+ bool checkStackHeight() {
+ // 512KiB should be enough, considering how Rabaldr uses the stack and
+ // what the standard limits are:
+ //
+ // - 1,000 parameters
+ // - 50,000 locals
+ // - 10,000 values on the eval stack (not an official limit)
+ //
+ // At sizeof(int64) bytes per slot this works out to about 480KiB.
+ return maxFramePushed_ <= 512 * 1024;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ //
+ // Local area
+
+ struct Local {
+ // Type of the value.
+ const MIRType type;
+
+ // Byte offset from Frame "into" the locals, ie positive for true locals
+ // and negative for incoming args that read directly from the arg area.
+ // It assumes the stack is growing down and that locals are on the stack
+ // at lower addresses than Frame, and is the offset from Frame of the
+ // lowest-addressed byte of the local.
+ const int32_t offs;
+
+ Local(MIRType type, int32_t offs) : type(type), offs(offs) {}
+
+ bool isStackArgument() const { return offs < 0; }
+ };
+
+ // Profiling shows that the number of parameters and locals frequently
+ // touches or exceeds 8. So 16 seems like a reasonable starting point.
+ using LocalVector = Vector<Local, 16, SystemAllocPolicy>;
+
+ // Initialize `localInfo` based on the types of `locals` and `args`.
+ MOZ_MUST_USE bool setupLocals(const ValTypeVector& locals,
+ const ArgTypeVector& args, bool debugEnabled,
+ LocalVector* localInfo) {
+ if (!localInfo->reserve(locals.length())) {
+ return false;
+ }
+
+ DebugOnly<uint32_t> index = 0;
+ BaseLocalIter i(locals, args, debugEnabled);
+ for (; !i.done() && i.index() < args.lengthWithoutStackResults(); i++) {
+ MOZ_ASSERT(i.isArg());
+ MOZ_ASSERT(i.index() == index);
+ localInfo->infallibleEmplaceBack(i.mirType(), i.frameOffset());
+ index++;
+ }
+
+ varLow_ = i.frameSize();
+ for (; !i.done(); i++) {
+ MOZ_ASSERT(!i.isArg());
+ MOZ_ASSERT(i.index() == index);
+ localInfo->infallibleEmplaceBack(i.mirType(), i.frameOffset());
+ index++;
+ }
+ varHigh_ = i.frameSize();
+
+ // Reserve an additional stack slot for the TLS pointer.
+ const uint32_t pointerAlignedVarHigh = AlignBytes(varHigh_, sizeof(void*));
+ const uint32_t localSize = pointerAlignedVarHigh + sizeof(void*);
+ tlsPointerOffset_ = localSize;
+
+ setLocalSize(AlignBytes(localSize, WasmStackAlignment));
+
+ if (args.hasSyntheticStackResultPointerArg()) {
+ stackResultsPtrOffset_ = Some(i.stackResultPointerOffset());
+ }
+
+ return true;
+ }
+
+ void zeroLocals(BaseRegAlloc* ra);
+
+ Address addressOfLocal(const Local& local, uint32_t additionalOffset = 0) {
+ if (local.isStackArgument()) {
+ return Address(FramePointer,
+ stackArgumentOffsetFromFp(local) + additionalOffset);
+ }
+ return Address(sp_, localOffsetFromSp(local) + additionalOffset);
+ }
+
+ void loadLocalI32(const Local& src, RegI32 dest) {
+ masm.load32(addressOfLocal(src), dest);
+ }
+
+#ifndef JS_PUNBOX64
+ void loadLocalI64Low(const Local& src, RegI32 dest) {
+ masm.load32(addressOfLocal(src, INT64LOW_OFFSET), dest);
+ }
+
+ void loadLocalI64High(const Local& src, RegI32 dest) {
+ masm.load32(addressOfLocal(src, INT64HIGH_OFFSET), dest);
+ }
+#endif
+
+ void loadLocalI64(const Local& src, RegI64 dest) {
+ masm.load64(addressOfLocal(src), dest);
+ }
+
+ void loadLocalPtr(const Local& src, RegPtr dest) {
+ masm.loadPtr(addressOfLocal(src), dest);
+ }
+
+ void loadLocalF64(const Local& src, RegF64 dest) {
+ masm.loadDouble(addressOfLocal(src), dest);
+ }
+
+ void loadLocalF32(const Local& src, RegF32 dest) {
+ masm.loadFloat32(addressOfLocal(src), dest);
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ void loadLocalV128(const Local& src, RegV128 dest) {
+ masm.loadUnalignedSimd128(addressOfLocal(src), dest);
+ }
+#endif
+
+ void storeLocalI32(RegI32 src, const Local& dest) {
+ masm.store32(src, addressOfLocal(dest));
+ }
+
+ void storeLocalI64(RegI64 src, const Local& dest) {
+ masm.store64(src, addressOfLocal(dest));
+ }
+
+ void storeLocalPtr(Register src, const Local& dest) {
+ masm.storePtr(src, addressOfLocal(dest));
+ }
+
+ void storeLocalF64(RegF64 src, const Local& dest) {
+ masm.storeDouble(src, addressOfLocal(dest));
+ }
+
+ void storeLocalF32(RegF32 src, const Local& dest) {
+ masm.storeFloat32(src, addressOfLocal(dest));
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ void storeLocalV128(RegV128 src, const Local& dest) {
+ masm.storeUnalignedSimd128(src, addressOfLocal(dest));
+ }
+#endif
+
+ // Offset off of sp_ for `local`.
+ int32_t localOffsetFromSp(const Local& local) {
+ MOZ_ASSERT(!local.isStackArgument());
+ return localOffset(local.offs);
+ }
+
+ // Offset off of frame pointer for `stack argument`.
+ int32_t stackArgumentOffsetFromFp(const Local& local) {
+ MOZ_ASSERT(local.isStackArgument());
+ return -local.offs;
+ }
+
+ // The incoming stack result area pointer is for stack results of the function
+ // being compiled.
+ void loadIncomingStackResultAreaPtr(RegPtr reg) {
+ const int32_t offset = stackResultsPtrOffset_.value();
+ Address src = offset < 0 ? Address(FramePointer, -offset)
+ : Address(sp_, stackOffset(offset));
+ masm.loadPtr(src, reg);
+ }
+
+ void storeIncomingStackResultAreaPtr(RegPtr reg) {
+ // If we get here, that means the pointer to the stack results area was
+ // passed in as a register, and therefore it will be spilled below the
+ // frame, so the offset is a positive height.
+ MOZ_ASSERT(stackResultsPtrOffset_.value() > 0);
+ masm.storePtr(reg,
+ Address(sp_, stackOffset(stackResultsPtrOffset_.value())));
+ }
+
+ void loadTlsPtr(Register dst) {
+ masm.loadPtr(Address(sp_, stackOffset(tlsPointerOffset_)), dst);
+ }
+
+ void storeTlsPtr(Register tls) {
+ masm.storePtr(tls, Address(sp_, stackOffset(tlsPointerOffset_)));
+ }
+
+ int32_t getTlsPtrOffset() { return stackOffset(tlsPointerOffset_); }
+
+ // An outgoing stack result area pointer is for stack results of callees of
+ // the function being compiled.
+ void computeOutgoingStackResultAreaPtr(const StackResultsLoc& results,
+ RegPtr dest) {
+ MOZ_ASSERT(results.height() <= masm.framePushed());
+ uint32_t offsetFromSP = masm.framePushed() - results.height();
+ masm.moveStackPtrTo(dest);
+ if (offsetFromSP) {
+ masm.addPtr(Imm32(offsetFromSP), dest);
+ }
+ }
+
+ private:
+ // Offset off of sp_ for a local with offset `offset` from Frame.
+ int32_t localOffset(int32_t offset) { return masm.framePushed() - offset; }
+
+ public:
+ ///////////////////////////////////////////////////////////////////////////
+ //
+ // Dynamic area
+
+ static constexpr size_t StackSizeOfPtr = ABIResult::StackSizeOfPtr;
+ static constexpr size_t StackSizeOfInt64 = ABIResult::StackSizeOfInt64;
+ static constexpr size_t StackSizeOfFloat = ABIResult::StackSizeOfFloat;
+ static constexpr size_t StackSizeOfDouble = ABIResult::StackSizeOfDouble;
+#ifdef ENABLE_WASM_SIMD
+ static constexpr size_t StackSizeOfV128 = ABIResult::StackSizeOfV128;
+#endif
+
+ uint32_t pushPtr(Register r) {
+ DebugOnly<uint32_t> stackBefore = currentStackHeight();
+#ifdef RABALDR_CHUNKY_STACK
+ pushChunkyBytes(StackSizeOfPtr);
+ masm.storePtr(r, Address(sp_, stackOffset(currentStackHeight())));
+#else
+ masm.Push(r);
+#endif
+ maxFramePushed_ = std::max(maxFramePushed_, masm.framePushed());
+ MOZ_ASSERT(stackBefore + StackSizeOfPtr == currentStackHeight());
+ return currentStackHeight();
+ }
+
+ uint32_t pushFloat32(FloatRegister r) {
+ DebugOnly<uint32_t> stackBefore = currentStackHeight();
+#ifdef RABALDR_CHUNKY_STACK
+ pushChunkyBytes(StackSizeOfFloat);
+ masm.storeFloat32(r, Address(sp_, stackOffset(currentStackHeight())));
+#else
+ masm.Push(r);
+#endif
+ maxFramePushed_ = std::max(maxFramePushed_, masm.framePushed());
+ MOZ_ASSERT(stackBefore + StackSizeOfFloat == currentStackHeight());
+ return currentStackHeight();
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ uint32_t pushV128(RegV128 r) {
+ DebugOnly<uint32_t> stackBefore = currentStackHeight();
+# ifdef RABALDR_CHUNKY_STACK
+ pushChunkyBytes(StackSizeOfV128);
+# else
+ masm.adjustStack(-(int)StackSizeOfV128);
+# endif
+ masm.storeUnalignedSimd128(r,
+ Address(sp_, stackOffset(currentStackHeight())));
+ maxFramePushed_ = std::max(maxFramePushed_, masm.framePushed());
+ MOZ_ASSERT(stackBefore + StackSizeOfV128 == currentStackHeight());
+ return currentStackHeight();
+ }
+#endif
+
+ uint32_t pushDouble(FloatRegister r) {
+ DebugOnly<uint32_t> stackBefore = currentStackHeight();
+#ifdef RABALDR_CHUNKY_STACK
+ pushChunkyBytes(StackSizeOfDouble);
+ masm.storeDouble(r, Address(sp_, stackOffset(currentStackHeight())));
+#else
+ masm.Push(r);
+#endif
+ maxFramePushed_ = std::max(maxFramePushed_, masm.framePushed());
+ MOZ_ASSERT(stackBefore + StackSizeOfDouble == currentStackHeight());
+ return currentStackHeight();
+ }
+
+ void popPtr(Register r) {
+ DebugOnly<uint32_t> stackBefore = currentStackHeight();
+#ifdef RABALDR_CHUNKY_STACK
+ masm.loadPtr(Address(sp_, stackOffset(currentStackHeight())), r);
+ popChunkyBytes(StackSizeOfPtr);
+#else
+ masm.Pop(r);
+#endif
+ MOZ_ASSERT(stackBefore - StackSizeOfPtr == currentStackHeight());
+ }
+
+ void popFloat32(FloatRegister r) {
+ DebugOnly<uint32_t> stackBefore = currentStackHeight();
+#ifdef RABALDR_CHUNKY_STACK
+ masm.loadFloat32(Address(sp_, stackOffset(currentStackHeight())), r);
+ popChunkyBytes(StackSizeOfFloat);
+#else
+ masm.Pop(r);
+#endif
+ MOZ_ASSERT(stackBefore - StackSizeOfFloat == currentStackHeight());
+ }
+
+ void popDouble(FloatRegister r) {
+ DebugOnly<uint32_t> stackBefore = currentStackHeight();
+#ifdef RABALDR_CHUNKY_STACK
+ masm.loadDouble(Address(sp_, stackOffset(currentStackHeight())), r);
+ popChunkyBytes(StackSizeOfDouble);
+#else
+ masm.Pop(r);
+#endif
+ MOZ_ASSERT(stackBefore - StackSizeOfDouble == currentStackHeight());
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ void popV128(RegV128 r) {
+ DebugOnly<uint32_t> stackBefore = currentStackHeight();
+ masm.loadUnalignedSimd128(Address(sp_, stackOffset(currentStackHeight())),
+ r);
+# ifdef RABALDR_CHUNKY_STACK
+ popChunkyBytes(StackSizeOfV128);
+# else
+ masm.adjustStack((int)StackSizeOfV128);
+# endif
+ MOZ_ASSERT(stackBefore - StackSizeOfV128 == currentStackHeight());
+ }
+#endif
+
+ void popBytes(size_t bytes) {
+ if (bytes > 0) {
+#ifdef RABALDR_CHUNKY_STACK
+ popChunkyBytes(bytes);
+#else
+ masm.freeStack(bytes);
+#endif
+ }
+ }
+
+ void loadStackI32(int32_t offset, RegI32 dest) {
+ masm.load32(Address(sp_, stackOffset(offset)), dest);
+ }
+
+ void loadStackI64(int32_t offset, RegI64 dest) {
+ masm.load64(Address(sp_, stackOffset(offset)), dest);
+ }
+
+#ifndef JS_PUNBOX64
+ void loadStackI64Low(int32_t offset, RegI32 dest) {
+ masm.load32(Address(sp_, stackOffset(offset - INT64LOW_OFFSET)), dest);
+ }
+
+ void loadStackI64High(int32_t offset, RegI32 dest) {
+ masm.load32(Address(sp_, stackOffset(offset - INT64HIGH_OFFSET)), dest);
+ }
+#endif
+
+ // Disambiguation: this loads a "Ptr" value from the stack, it does not load
+ // the "StackPtr".
+
+ void loadStackPtr(int32_t offset, RegPtr dest) {
+ masm.loadPtr(Address(sp_, stackOffset(offset)), dest);
+ }
+
+ void loadStackF64(int32_t offset, RegF64 dest) {
+ masm.loadDouble(Address(sp_, stackOffset(offset)), dest);
+ }
+
+ void loadStackF32(int32_t offset, RegF32 dest) {
+ masm.loadFloat32(Address(sp_, stackOffset(offset)), dest);
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ void loadStackV128(int32_t offset, RegV128 dest) {
+ masm.loadUnalignedSimd128(Address(sp_, stackOffset(offset)), dest);
+ }
+#endif
+
+ uint32_t prepareStackResultArea(StackHeight stackBase,
+ uint32_t stackResultBytes) {
+ uint32_t end = computeHeightWithStackResults(stackBase, stackResultBytes);
+ if (currentStackHeight() < end) {
+ uint32_t bytes = end - currentStackHeight();
+#ifdef RABALDR_CHUNKY_STACK
+ pushChunkyBytes(bytes);
+#else
+ masm.reserveStack(bytes);
+#endif
+ maxFramePushed_ = std::max(maxFramePushed_, masm.framePushed());
+ }
+ return end;
+ }
+
+ void finishStackResultArea(StackHeight stackBase, uint32_t stackResultBytes) {
+ uint32_t end = computeHeightWithStackResults(stackBase, stackResultBytes);
+ MOZ_ASSERT(currentStackHeight() >= end);
+ popBytes(currentStackHeight() - end);
+ }
+
+ // |srcHeight| and |destHeight| are stack heights *including* |bytes|.
+ void shuffleStackResultsTowardFP(uint32_t srcHeight, uint32_t destHeight,
+ uint32_t bytes, Register temp) {
+ MOZ_ASSERT(destHeight < srcHeight);
+ MOZ_ASSERT(bytes % sizeof(uint32_t) == 0);
+ uint32_t destOffset = stackOffset(destHeight) + bytes;
+ uint32_t srcOffset = stackOffset(srcHeight) + bytes;
+ while (bytes >= sizeof(intptr_t)) {
+ destOffset -= sizeof(intptr_t);
+ srcOffset -= sizeof(intptr_t);
+ bytes -= sizeof(intptr_t);
+ masm.loadPtr(Address(sp_, srcOffset), temp);
+ masm.storePtr(temp, Address(sp_, destOffset));
+ }
+ if (bytes) {
+ MOZ_ASSERT(bytes == sizeof(uint32_t));
+ destOffset -= sizeof(uint32_t);
+ srcOffset -= sizeof(uint32_t);
+ masm.load32(Address(sp_, srcOffset), temp);
+ masm.store32(temp, Address(sp_, destOffset));
+ }
+ }
+
+ // Unlike the overload that operates on raw heights, |srcHeight| and
+ // |destHeight| are stack heights *not including* |bytes|.
+ void shuffleStackResultsTowardFP(StackHeight srcHeight,
+ StackHeight destHeight, uint32_t bytes,
+ Register temp) {
+ MOZ_ASSERT(srcHeight.isValid());
+ MOZ_ASSERT(destHeight.isValid());
+ uint32_t src = computeHeightWithStackResults(srcHeight, bytes);
+ uint32_t dest = computeHeightWithStackResults(destHeight, bytes);
+ MOZ_ASSERT(src <= currentStackHeight());
+ MOZ_ASSERT(dest <= currentStackHeight());
+ shuffleStackResultsTowardFP(src, dest, bytes, temp);
+ }
+
+ // |srcHeight| and |destHeight| are stack heights *including* |bytes|.
+ void shuffleStackResultsTowardSP(uint32_t srcHeight, uint32_t destHeight,
+ uint32_t bytes, Register temp) {
+ MOZ_ASSERT(destHeight > srcHeight);
+ MOZ_ASSERT(bytes % sizeof(uint32_t) == 0);
+ uint32_t destOffset = stackOffset(destHeight);
+ uint32_t srcOffset = stackOffset(srcHeight);
+ while (bytes >= sizeof(intptr_t)) {
+ masm.loadPtr(Address(sp_, srcOffset), temp);
+ masm.storePtr(temp, Address(sp_, destOffset));
+ destOffset += sizeof(intptr_t);
+ srcOffset += sizeof(intptr_t);
+ bytes -= sizeof(intptr_t);
+ }
+ if (bytes) {
+ MOZ_ASSERT(bytes == sizeof(uint32_t));
+ masm.load32(Address(sp_, srcOffset), temp);
+ masm.store32(temp, Address(sp_, destOffset));
+ }
+ }
+
+ // Copy results from the top of the current stack frame to an area of memory,
+ // and pop the stack accordingly. `dest` is the address of the low byte of
+ // that memory.
+ void popStackResultsToMemory(Register dest, uint32_t bytes, Register temp) {
+ MOZ_ASSERT(bytes <= currentStackHeight());
+ MOZ_ASSERT(bytes % sizeof(uint32_t) == 0);
+ uint32_t bytesToPop = bytes;
+ uint32_t srcOffset = stackOffset(currentStackHeight());
+ uint32_t destOffset = 0;
+ while (bytes >= sizeof(intptr_t)) {
+ masm.loadPtr(Address(sp_, srcOffset), temp);
+ masm.storePtr(temp, Address(dest, destOffset));
+ destOffset += sizeof(intptr_t);
+ srcOffset += sizeof(intptr_t);
+ bytes -= sizeof(intptr_t);
+ }
+ if (bytes) {
+ MOZ_ASSERT(bytes == sizeof(uint32_t));
+ masm.load32(Address(sp_, srcOffset), temp);
+ masm.store32(temp, Address(dest, destOffset));
+ }
+ popBytes(bytesToPop);
+ }
+
+ private:
+ void store32BitsToStack(int32_t imm, uint32_t destHeight, Register temp) {
+ masm.move32(Imm32(imm), temp);
+ masm.store32(temp, Address(sp_, stackOffset(destHeight)));
+ }
+
+ void store64BitsToStack(int64_t imm, uint32_t destHeight, Register temp) {
+#ifdef JS_PUNBOX64
+ masm.move64(Imm64(imm), Register64(temp));
+ masm.store64(Register64(temp), Address(sp_, stackOffset(destHeight)));
+#else
+ union {
+ int64_t i64;
+ int32_t i32[2];
+ } bits = {.i64 = imm};
+ static_assert(sizeof(bits) == 8);
+ store32BitsToStack(bits.i32[0], destHeight, temp);
+ store32BitsToStack(bits.i32[1], destHeight - sizeof(int32_t), temp);
+#endif
+ }
+
+ public:
+ void storeImmediatePtrToStack(intptr_t imm, uint32_t destHeight,
+ Register temp) {
+#ifdef JS_PUNBOX64
+ static_assert(StackSizeOfPtr == 8);
+ store64BitsToStack(imm, destHeight, temp);
+#else
+ static_assert(StackSizeOfPtr == 4);
+ store32BitsToStack(int32_t(imm), destHeight, temp);
+#endif
+ }
+
+ void storeImmediateI64ToStack(int64_t imm, uint32_t destHeight,
+ Register temp) {
+ store64BitsToStack(imm, destHeight, temp);
+ }
+
+ void storeImmediateF32ToStack(float imm, uint32_t destHeight, Register temp) {
+ union {
+ int32_t i32;
+ float f32;
+ } bits = {.f32 = imm};
+ static_assert(sizeof(bits) == 4);
+ // Do not store 4 bytes if StackSizeOfFloat == 8. It's probably OK to do
+ // so, but it costs little to store something predictable.
+ if (StackSizeOfFloat == 4) {
+ store32BitsToStack(bits.i32, destHeight, temp);
+ } else {
+ store64BitsToStack(uint32_t(bits.i32), destHeight, temp);
+ }
+ }
+
+ void storeImmediateF64ToStack(double imm, uint32_t destHeight,
+ Register temp) {
+ union {
+ int64_t i64;
+ double f64;
+ } bits = {.f64 = imm};
+ static_assert(sizeof(bits) == 8);
+ store64BitsToStack(bits.i64, destHeight, temp);
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ void storeImmediateV128ToStack(V128 imm, uint32_t destHeight, Register temp) {
+ union {
+ int32_t i32[4];
+ uint8_t bytes[16];
+ } bits;
+ static_assert(sizeof(bits) == 16);
+ memcpy(bits.bytes, imm.bytes, 16);
+ for (unsigned i = 0; i < 4; i++) {
+ store32BitsToStack(bits.i32[i], destHeight - i * sizeof(int32_t), temp);
+ }
+ }
+#endif
+};
+
+void BaseStackFrame::zeroLocals(BaseRegAlloc* ra) {
+ MOZ_ASSERT(varLow_ != UINT32_MAX);
+
+ if (varLow_ == varHigh_) {
+ return;
+ }
+
+ static const uint32_t wordSize = sizeof(void*);
+
+ // The adjustments to 'low' by the size of the item being stored compensates
+ // for the fact that locals offsets are the offsets from Frame to the bytes
+ // directly "above" the locals in the locals area. See comment at Local.
+
+ // On 64-bit systems we may have 32-bit alignment for the local area as it
+ // may be preceded by parameters and prologue/debug data.
+
+ uint32_t low = varLow_;
+ if (low % wordSize) {
+ masm.store32(Imm32(0), Address(sp_, localOffset(low + 4)));
+ low += 4;
+ }
+ MOZ_ASSERT(low % wordSize == 0);
+
+ const uint32_t high = AlignBytes(varHigh_, wordSize);
+
+ // An UNROLL_LIMIT of 16 is chosen so that we only need an 8-bit signed
+ // immediate to represent the offset in the store instructions in the loop
+ // on x64.
+
+ const uint32_t UNROLL_LIMIT = 16;
+ const uint32_t initWords = (high - low) / wordSize;
+ const uint32_t tailWords = initWords % UNROLL_LIMIT;
+ const uint32_t loopHigh = high - (tailWords * wordSize);
+
+ // With only one word to initialize, just store an immediate zero.
+
+ if (initWords == 1) {
+ masm.storePtr(ImmWord(0), Address(sp_, localOffset(low + wordSize)));
+ return;
+ }
+
+ // For other cases, it's best to have a zero in a register.
+ //
+ // One can do more here with SIMD registers (store 16 bytes at a time) or
+ // with instructions like STRD on ARM (store 8 bytes at a time), but that's
+ // for another day.
+
+ RegI32 zero = ra->needI32();
+ masm.mov(ImmWord(0), zero);
+
+ // For the general case we want to have a loop body of UNROLL_LIMIT stores
+ // and then a tail of less than UNROLL_LIMIT stores. When initWords is less
+ // than 2*UNROLL_LIMIT the loop trip count is at most 1 and there is no
+ // benefit to having the pointer calculations and the compare-and-branch.
+ // So we completely unroll when we have initWords < 2 * UNROLL_LIMIT. (In
+ // this case we'll end up using 32-bit offsets on x64 for up to half of the
+ // stores, though.)
+
+ // Fully-unrolled case.
+
+ if (initWords < 2 * UNROLL_LIMIT) {
+ for (uint32_t i = low; i < high; i += wordSize) {
+ masm.storePtr(zero, Address(sp_, localOffset(i + wordSize)));
+ }
+ ra->freeI32(zero);
+ return;
+ }
+
+ // Unrolled loop with a tail. Stores will use negative offsets. That's OK
+ // for x86 and ARM, at least.
+
+ // Compute pointer to the highest-addressed slot on the frame.
+ RegI32 p = ra->needI32();
+ masm.computeEffectiveAddress(Address(sp_, localOffset(low + wordSize)), p);
+
+ // Compute pointer to the lowest-addressed slot on the frame that will be
+ // initialized by the loop body.
+ RegI32 lim = ra->needI32();
+ masm.computeEffectiveAddress(Address(sp_, localOffset(loopHigh + wordSize)),
+ lim);
+
+ // The loop body. Eventually we'll have p == lim and exit the loop.
+ Label again;
+ masm.bind(&again);
+ for (uint32_t i = 0; i < UNROLL_LIMIT; ++i) {
+ masm.storePtr(zero, Address(p, -(wordSize * i)));
+ }
+ masm.subPtr(Imm32(UNROLL_LIMIT * wordSize), p);
+ masm.branchPtr(Assembler::LessThan, lim, p, &again);
+
+ // The tail.
+ for (uint32_t i = 0; i < tailWords; ++i) {
+ masm.storePtr(zero, Address(p, -(wordSize * i)));
+ }
+
+ ra->freeI32(p);
+ ra->freeI32(lim);
+ ra->freeI32(zero);
+}
+
+// Value stack: stack elements
+
+struct Stk {
+ private:
+ Stk() : kind_(Unknown), i64val_(0) {}
+
+ public:
+ enum Kind {
+ // The Mem opcodes are all clustered at the beginning to
+ // allow for a quick test within sync().
+ MemI32, // 32-bit integer stack value ("offs")
+ MemI64, // 64-bit integer stack value ("offs")
+ MemF32, // 32-bit floating stack value ("offs")
+ MemF64, // 64-bit floating stack value ("offs")
+#ifdef ENABLE_WASM_SIMD
+ MemV128, // 128-bit vector stack value ("offs")
+#endif
+ MemRef, // reftype (pointer wide) stack value ("offs")
+
+ // The Local opcodes follow the Mem opcodes for a similar
+ // quick test within hasLocal().
+ LocalI32, // Local int32 var ("slot")
+ LocalI64, // Local int64 var ("slot")
+ LocalF32, // Local float32 var ("slot")
+ LocalF64, // Local double var ("slot")
+#ifdef ENABLE_WASM_SIMD
+ LocalV128, // Local v128 var ("slot")
+#endif
+ LocalRef, // Local reftype (pointer wide) var ("slot")
+
+ RegisterI32, // 32-bit integer register ("i32reg")
+ RegisterI64, // 64-bit integer register ("i64reg")
+ RegisterF32, // 32-bit floating register ("f32reg")
+ RegisterF64, // 64-bit floating register ("f64reg")
+#ifdef ENABLE_WASM_SIMD
+ RegisterV128, // 128-bit vector register ("v128reg")
+#endif
+ RegisterRef, // reftype (pointer wide) register ("refReg")
+
+ ConstI32, // 32-bit integer constant ("i32val")
+ ConstI64, // 64-bit integer constant ("i64val")
+ ConstF32, // 32-bit floating constant ("f32val")
+ ConstF64, // 64-bit floating constant ("f64val")
+#ifdef ENABLE_WASM_SIMD
+ ConstV128, // 128-bit vector constant ("v128val")
+#endif
+ ConstRef, // reftype (pointer wide) constant ("refval")
+
+ Unknown,
+ };
+
+ Kind kind_;
+
+ static const Kind MemLast = MemRef;
+ static const Kind LocalLast = LocalRef;
+
+ union {
+ RegI32 i32reg_;
+ RegI64 i64reg_;
+ RegPtr refReg_;
+ RegF32 f32reg_;
+ RegF64 f64reg_;
+#ifdef ENABLE_WASM_SIMD
+ RegV128 v128reg_;
+#endif
+ int32_t i32val_;
+ int64_t i64val_;
+ intptr_t refval_;
+ float f32val_;
+ double f64val_;
+#ifdef ENABLE_WASM_SIMD
+ V128 v128val_;
+#endif
+ uint32_t slot_;
+ uint32_t offs_;
+ };
+
+ explicit Stk(RegI32 r) : kind_(RegisterI32), i32reg_(r) {}
+ explicit Stk(RegI64 r) : kind_(RegisterI64), i64reg_(r) {}
+ explicit Stk(RegPtr r) : kind_(RegisterRef), refReg_(r) {}
+ explicit Stk(RegF32 r) : kind_(RegisterF32), f32reg_(r) {}
+ explicit Stk(RegF64 r) : kind_(RegisterF64), f64reg_(r) {}
+#ifdef ENABLE_WASM_SIMD
+ explicit Stk(RegV128 r) : kind_(RegisterV128), v128reg_(r) {}
+#endif
+ explicit Stk(int32_t v) : kind_(ConstI32), i32val_(v) {}
+ explicit Stk(int64_t v) : kind_(ConstI64), i64val_(v) {}
+ explicit Stk(float v) : kind_(ConstF32), f32val_(v) {}
+ explicit Stk(double v) : kind_(ConstF64), f64val_(v) {}
+#ifdef ENABLE_WASM_SIMD
+ explicit Stk(V128 v) : kind_(ConstV128), v128val_(v) {}
+#endif
+ explicit Stk(Kind k, uint32_t v) : kind_(k), slot_(v) {
+ MOZ_ASSERT(k > MemLast && k <= LocalLast);
+ }
+ static Stk StkRef(intptr_t v) {
+ Stk s;
+ s.kind_ = ConstRef;
+ s.refval_ = v;
+ return s;
+ }
+ static Stk StackResult(ValType type, uint32_t offs) {
+ Kind k;
+ switch (type.kind()) {
+ case ValType::I32:
+ k = Stk::MemI32;
+ break;
+ case ValType::I64:
+ k = Stk::MemI64;
+ break;
+ case ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ k = Stk::MemV128;
+ break;
+#else
+ MOZ_CRASH("No SIMD");
+#endif
+ case ValType::F32:
+ k = Stk::MemF32;
+ break;
+ case ValType::F64:
+ k = Stk::MemF64;
+ break;
+ case ValType::Ref:
+ k = Stk::MemRef;
+ break;
+ }
+ Stk s;
+ s.setOffs(k, offs);
+ return s;
+ }
+
+ void setOffs(Kind k, uint32_t v) {
+ MOZ_ASSERT(k <= MemLast);
+ kind_ = k;
+ offs_ = v;
+ }
+
+ Kind kind() const { return kind_; }
+ bool isMem() const { return kind_ <= MemLast; }
+
+ RegI32 i32reg() const {
+ MOZ_ASSERT(kind_ == RegisterI32);
+ return i32reg_;
+ }
+ RegI64 i64reg() const {
+ MOZ_ASSERT(kind_ == RegisterI64);
+ return i64reg_;
+ }
+ RegPtr refReg() const {
+ MOZ_ASSERT(kind_ == RegisterRef);
+ return refReg_;
+ }
+ RegF32 f32reg() const {
+ MOZ_ASSERT(kind_ == RegisterF32);
+ return f32reg_;
+ }
+ RegF64 f64reg() const {
+ MOZ_ASSERT(kind_ == RegisterF64);
+ return f64reg_;
+ }
+#ifdef ENABLE_WASM_SIMD
+ RegV128 v128reg() const {
+ MOZ_ASSERT(kind_ == RegisterV128);
+ return v128reg_;
+ }
+#endif
+ int32_t i32val() const {
+ MOZ_ASSERT(kind_ == ConstI32);
+ return i32val_;
+ }
+ int64_t i64val() const {
+ MOZ_ASSERT(kind_ == ConstI64);
+ return i64val_;
+ }
+ intptr_t refval() const {
+ MOZ_ASSERT(kind_ == ConstRef);
+ return refval_;
+ }
+
+ // For these two, use an out-param instead of simply returning, to
+ // use the normal stack and not the x87 FP stack (which has effect on
+ // NaNs with the signaling bit set).
+
+ void f32val(float* out) const {
+ MOZ_ASSERT(kind_ == ConstF32);
+ *out = f32val_;
+ }
+ void f64val(double* out) const {
+ MOZ_ASSERT(kind_ == ConstF64);
+ *out = f64val_;
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ // For SIMD, do the same as for floats since we're using float registers to
+ // hold vectors; this is just conservative.
+ void v128val(V128* out) const {
+ MOZ_ASSERT(kind_ == ConstV128);
+ *out = v128val_;
+ }
+#endif
+
+ uint32_t slot() const {
+ MOZ_ASSERT(kind_ > MemLast && kind_ <= LocalLast);
+ return slot_;
+ }
+ uint32_t offs() const {
+ MOZ_ASSERT(isMem());
+ return offs_;
+ }
+};
+
+typedef Vector<Stk, 0, SystemAllocPolicy> StkVector;
+
+// MachineStackTracker, used for stack-slot pointerness tracking.
+
+class MachineStackTracker {
+ // Simulates the machine's stack, with one bool per word. Index zero in
+ // this vector corresponds to the highest address in the machine stack. The
+ // last entry corresponds to what SP currently points at. This all assumes
+ // a grow-down stack.
+ //
+ // numPtrs_ contains the number of "true" values in vec_, and is therefore
+ // redundant. But it serves as a constant-time way to detect the common
+ // case where vec_ holds no "true" values.
+ size_t numPtrs_;
+ Vector<bool, 64, SystemAllocPolicy> vec_;
+
+ public:
+ MachineStackTracker() : numPtrs_(0) {}
+
+ ~MachineStackTracker() {
+#ifdef DEBUG
+ size_t n = 0;
+ for (bool b : vec_) {
+ n += (b ? 1 : 0);
+ }
+ MOZ_ASSERT(n == numPtrs_);
+#endif
+ }
+
+ // Clone this MachineStackTracker, writing the result at |dst|.
+ [[nodiscard]] bool cloneTo(MachineStackTracker* dst) {
+ MOZ_ASSERT(dst->vec_.empty());
+ if (!dst->vec_.appendAll(vec_)) {
+ return false;
+ }
+ dst->numPtrs_ = numPtrs_;
+ return true;
+ }
+
+ // Notionally push |n| non-pointers on the stack.
+ [[nodiscard]] bool pushNonGCPointers(size_t n) {
+ return vec_.appendN(false, n);
+ }
+
+ // Mark the stack slot |offsetFromSP| up from the bottom as holding a
+ // pointer.
+ void setGCPointer(size_t offsetFromSP) {
+ // offsetFromSP == 0 denotes the most recently pushed item, == 1 the
+ // second most recently pushed item, etc.
+ MOZ_ASSERT(offsetFromSP < vec_.length());
+
+ size_t offsetFromTop = vec_.length() - 1 - offsetFromSP;
+ numPtrs_ = numPtrs_ + 1 - (vec_[offsetFromTop] ? 1 : 0);
+ vec_[offsetFromTop] = true;
+ }
+
+ // Query the pointerness of the slot |offsetFromSP| up from the bottom.
+ bool isGCPointer(size_t offsetFromSP) {
+ MOZ_ASSERT(offsetFromSP < vec_.length());
+
+ size_t offsetFromTop = vec_.length() - 1 - offsetFromSP;
+ return vec_[offsetFromTop];
+ }
+
+ // Return the number of words tracked by this MachineStackTracker.
+ size_t length() { return vec_.length(); }
+
+ // Return the number of pointer-typed words tracked by this
+ // MachineStackTracker.
+ size_t numPtrs() {
+ MOZ_ASSERT(numPtrs_ <= length());
+ return numPtrs_;
+ }
+
+ // Discard all contents, but (per mozilla::Vector::clear semantics) don't
+ // free or reallocate any dynamic storage associated with |vec_|.
+ void clear() {
+ vec_.clear();
+ numPtrs_ = 0;
+ }
+};
+
+// StackMapGenerator, which carries all state needed to create stack maps.
+
+enum class HasDebugFrame { No, Yes };
+
+struct StackMapGenerator {
+ private:
+ // --- These are constant for the life of the function's compilation ---
+
+ // For generating stack maps, we'll need to know the offsets of registers
+ // as saved by the trap exit stub.
+ const MachineState& trapExitLayout_;
+ const size_t trapExitLayoutNumWords_;
+
+ // Completed stackmaps are added here
+ StackMaps* stackMaps_;
+
+ // So as to be able to get current offset when creating stack maps
+ const MacroAssembler& masm_;
+
+ public:
+ // --- These are constant once we've completed beginFunction() ---
+
+ // The number of words of arguments passed to this function in memory.
+ size_t numStackArgWords;
+
+ MachineStackTracker machineStackTracker; // tracks machine stack pointerness
+
+ // This holds masm.framePushed at entry to the function's body. It is a
+ // Maybe because createStackMap needs to know whether or not we're still
+ // in the prologue. It makes a Nothing-to-Some transition just once per
+ // function.
+ Maybe<uint32_t> framePushedAtEntryToBody;
+
+ // --- These can change at any point ---
+
+ // This holds masm.framePushed at it would be be for a function call
+ // instruction, but excluding the stack area used to pass arguments in
+ // memory. That is, for an upcoming function call, this will hold
+ //
+ // masm.framePushed() at the call instruction -
+ // StackArgAreaSizeUnaligned(argumentTypes)
+ //
+ // This value denotes the lowest-addressed stack word covered by the current
+ // function's stackmap. Words below this point form the highest-addressed
+ // area of the callee's stackmap. Note that all alignment padding above the
+ // arguments-in-memory themselves belongs to the caller's stack map, which
+ // is why this is defined in terms of StackArgAreaSizeUnaligned() rather than
+ // StackArgAreaSizeAligned().
+ //
+ // When not inside a function call setup/teardown sequence, it is Nothing.
+ // It can make Nothing-to/from-Some transitions arbitrarily as we progress
+ // through the function body.
+ Maybe<uint32_t> framePushedExcludingOutboundCallArgs;
+
+ // The number of memory-resident, ref-typed entries on the containing
+ // BaseCompiler::stk_.
+ size_t memRefsOnStk;
+
+ // This is a copy of machineStackTracker that is used only within individual
+ // calls to createStackMap. It is here only to avoid possible heap allocation
+ // costs resulting from making it local to createStackMap().
+ MachineStackTracker augmentedMst;
+
+ StackMapGenerator(StackMaps* stackMaps, const MachineState& trapExitLayout,
+ const size_t trapExitLayoutNumWords,
+ const MacroAssembler& masm)
+ : trapExitLayout_(trapExitLayout),
+ trapExitLayoutNumWords_(trapExitLayoutNumWords),
+ stackMaps_(stackMaps),
+ masm_(masm),
+ numStackArgWords(0),
+ memRefsOnStk(0) {}
+
+ // At the beginning of a function, we may have live roots in registers (as
+ // arguments) at the point where we perform a stack overflow check. This
+ // method generates the "extra" stackmap entries to describe that, in the
+ // case that the check fails and we wind up calling into the wasm exit
+ // stub, as generated by GenerateTrapExit().
+ //
+ // The resulting map must correspond precisely with the stack layout
+ // created for the integer registers as saved by (code generated by)
+ // GenerateTrapExit(). To do that we use trapExitLayout_ and
+ // trapExitLayoutNumWords_, which together comprise a description of the
+ // layout and are created by GenerateTrapExitMachineState().
+ [[nodiscard]] bool generateStackmapEntriesForTrapExit(
+ const ArgTypeVector& args, ExitStubMapVector* extras) {
+ return GenerateStackmapEntriesForTrapExit(args, trapExitLayout_,
+ trapExitLayoutNumWords_, extras);
+ }
+
+ // Creates a stackmap associated with the instruction denoted by
+ // |assemblerOffset|, incorporating pointers from the current operand
+ // stack |stk|, incorporating possible extra pointers in |extra| at the
+ // lower addressed end, and possibly with the associated frame having a
+ // ref-typed DebugFrame as indicated by |refDebugFrame|.
+ [[nodiscard]] bool createStackMap(const char* who,
+ const ExitStubMapVector& extras,
+ uint32_t assemblerOffset,
+ HasDebugFrame debugFrame,
+ const StkVector& stk) {
+ size_t countedPointers = machineStackTracker.numPtrs() + memRefsOnStk;
+#ifndef DEBUG
+ // An important optimization. If there are obviously no pointers, as
+ // we expect in the majority of cases, exit quickly.
+ if (countedPointers == 0 && debugFrame == HasDebugFrame::No) {
+ // We can skip creating the map if there are no |true| elements in
+ // |extras|.
+ bool extrasHasRef = false;
+ for (bool b : extras) {
+ if (b) {
+ extrasHasRef = true;
+ break;
+ }
+ }
+ if (!extrasHasRef) {
+ return true;
+ }
+ }
+#else
+ // In the debug case, create the stack map regardless, and cross-check
+ // the pointer-counting below. We expect the final map to have
+ // |countedPointers| in total. This doesn't include those in the
+ // DebugFrame, but they do not appear in the map's bitmap. Note that
+ // |countedPointers| is debug-only from this point onwards.
+ for (bool b : extras) {
+ countedPointers += (b ? 1 : 0);
+ }
+#endif
+
+ // Start with the frame-setup map, and add operand-stack information to
+ // that. augmentedMst holds live data only within individual calls to
+ // createStackMap.
+ augmentedMst.clear();
+ if (!machineStackTracker.cloneTo(&augmentedMst)) {
+ return false;
+ }
+
+ // At this point, augmentedMst only contains entries covering the
+ // incoming argument area (if any) and for the area allocated by this
+ // function's prologue. We now need to calculate how far the machine's
+ // stack pointer is below where it was at the start of the body. But we
+ // must take care not to include any words pushed as arguments to an
+ // upcoming function call, since those words "belong" to the stackmap of
+ // the callee, not to the stackmap of this function. Note however that
+ // any alignment padding pushed prior to pushing the args *does* belong to
+ // this function.
+ //
+ // That padding is taken into account at the point where
+ // framePushedExcludingOutboundCallArgs is set, viz, in startCallArgs(),
+ // and comprises two components:
+ //
+ // * call->frameAlignAdjustment
+ // * the padding applied to the stack arg area itself. That is:
+ // StackArgAreaSize(argTys) - StackArgAreaSizeUnpadded(argTys)
+ Maybe<uint32_t> framePushedExcludingArgs;
+ if (framePushedAtEntryToBody.isNothing()) {
+ // Still in the prologue. framePushedExcludingArgs remains Nothing.
+ MOZ_ASSERT(framePushedExcludingOutboundCallArgs.isNothing());
+ } else {
+ // In the body.
+ MOZ_ASSERT(masm_.framePushed() >= framePushedAtEntryToBody.value());
+ if (framePushedExcludingOutboundCallArgs.isSome()) {
+ // In the body, and we've potentially pushed some args onto the stack.
+ // We must ignore them when sizing the stackmap.
+ MOZ_ASSERT(masm_.framePushed() >=
+ framePushedExcludingOutboundCallArgs.value());
+ MOZ_ASSERT(framePushedExcludingOutboundCallArgs.value() >=
+ framePushedAtEntryToBody.value());
+ framePushedExcludingArgs =
+ Some(framePushedExcludingOutboundCallArgs.value());
+ } else {
+ // In the body, but not with call args on the stack. The stackmap
+ // must be sized so as to extend all the way "down" to
+ // masm_.framePushed().
+ framePushedExcludingArgs = Some(masm_.framePushed());
+ }
+ }
+
+ if (framePushedExcludingArgs.isSome()) {
+ uint32_t bodyPushedBytes =
+ framePushedExcludingArgs.value() - framePushedAtEntryToBody.value();
+ MOZ_ASSERT(0 == bodyPushedBytes % sizeof(void*));
+ if (!augmentedMst.pushNonGCPointers(bodyPushedBytes / sizeof(void*))) {
+ return false;
+ }
+ }
+
+ // Scan the operand stack, marking pointers in the just-added new
+ // section.
+ MOZ_ASSERT_IF(framePushedAtEntryToBody.isNothing(), stk.empty());
+ MOZ_ASSERT_IF(framePushedExcludingArgs.isNothing(), stk.empty());
+
+ for (const Stk& v : stk) {
+#ifndef DEBUG
+ // We don't track roots in registers, per rationale below, so if this
+ // doesn't hold, something is seriously wrong, and we're likely to get a
+ // GC-related crash.
+ MOZ_RELEASE_ASSERT(v.kind() != Stk::RegisterRef);
+ if (v.kind() != Stk::MemRef) {
+ continue;
+ }
+#else
+ // Take the opportunity to check everything we reasonably can about
+ // operand stack elements.
+ switch (v.kind()) {
+ case Stk::MemI32:
+ case Stk::MemI64:
+ case Stk::MemF32:
+ case Stk::MemF64:
+ case Stk::ConstI32:
+ case Stk::ConstI64:
+ case Stk::ConstF32:
+ case Stk::ConstF64:
+# ifdef ENABLE_WASM_SIMD
+ case Stk::MemV128:
+ case Stk::ConstV128:
+# endif
+ // All of these have uninteresting type.
+ continue;
+ case Stk::LocalI32:
+ case Stk::LocalI64:
+ case Stk::LocalF32:
+ case Stk::LocalF64:
+# ifdef ENABLE_WASM_SIMD
+ case Stk::LocalV128:
+# endif
+ // These also have uninteresting type. Check that they live in the
+ // section of stack set up by beginFunction(). The unguarded use of
+ // |value()| here is safe due to the assertion above this loop.
+ MOZ_ASSERT(v.offs() <= framePushedAtEntryToBody.value());
+ continue;
+ case Stk::RegisterI32:
+ case Stk::RegisterI64:
+ case Stk::RegisterF32:
+ case Stk::RegisterF64:
+# ifdef ENABLE_WASM_SIMD
+ case Stk::RegisterV128:
+# endif
+ // These also have uninteresting type, but more to the point: all
+ // registers holding live values should have been flushed to the
+ // machine stack immediately prior to the instruction to which this
+ // stackmap pertains. So these can't happen.
+ MOZ_CRASH("createStackMap: operand stack has Register-non-Ref");
+ case Stk::MemRef:
+ // This is the only case we care about. We'll handle it after the
+ // switch.
+ break;
+ case Stk::LocalRef:
+ // We need the stackmap to mention this pointer, but it should
+ // already be in the machineStackTracker section created by
+ // beginFunction().
+ MOZ_ASSERT(v.offs() <= framePushedAtEntryToBody.value());
+ continue;
+ case Stk::ConstRef:
+ // This can currently only be a null pointer.
+ MOZ_ASSERT(v.refval() == 0);
+ continue;
+ case Stk::RegisterRef:
+ // This can't happen, per rationale above.
+ MOZ_CRASH("createStackMap: operand stack contains RegisterRef");
+ default:
+ MOZ_CRASH("createStackMap: unknown operand stack element");
+ }
+#endif
+ // v.offs() holds masm.framePushed() at the point immediately after it
+ // was pushed on the stack. Since it's still on the stack,
+ // masm.framePushed() can't be less.
+ MOZ_ASSERT(v.offs() <= framePushedExcludingArgs.value());
+ uint32_t offsFromMapLowest = framePushedExcludingArgs.value() - v.offs();
+ MOZ_ASSERT(0 == offsFromMapLowest % sizeof(void*));
+ augmentedMst.setGCPointer(offsFromMapLowest / sizeof(void*));
+ }
+
+ // Create the final StackMap. The initial map is zeroed out, so there's
+ // no need to write zero bits in it.
+ const uint32_t extraWords = extras.length();
+ const uint32_t augmentedMstWords = augmentedMst.length();
+ const uint32_t numMappedWords = extraWords + augmentedMstWords;
+ StackMap* stackMap = StackMap::create(numMappedWords);
+ if (!stackMap) {
+ return false;
+ }
+
+ {
+ // First the exit stub extra words, if any.
+ uint32_t i = 0;
+ for (bool b : extras) {
+ if (b) {
+ stackMap->setBit(i);
+ }
+ i++;
+ }
+ }
+ // Followed by the "main" part of the map.
+ for (uint32_t i = 0; i < augmentedMstWords; i++) {
+ if (augmentedMst.isGCPointer(i)) {
+ stackMap->setBit(extraWords + i);
+ }
+ }
+
+ stackMap->setExitStubWords(extraWords);
+
+ // Record in the map, how far down from the highest address the Frame* is.
+ // Take the opportunity to check that we haven't marked any part of the
+ // Frame itself as a pointer.
+ stackMap->setFrameOffsetFromTop(numStackArgWords +
+ sizeof(Frame) / sizeof(void*));
+#ifdef DEBUG
+ for (uint32_t i = 0; i < sizeof(Frame) / sizeof(void*); i++) {
+ MOZ_ASSERT(stackMap->getBit(stackMap->numMappedWords -
+ stackMap->frameOffsetFromTop + i) == 0);
+ }
+#endif
+
+ // Note the presence of a ref-typed DebugFrame, if any.
+ if (debugFrame == HasDebugFrame::Yes) {
+ stackMap->setHasDebugFrame();
+ }
+
+ // Add the completed map to the running collection thereof.
+ if (!stackMaps_->add((uint8_t*)(uintptr_t)assemblerOffset, stackMap)) {
+ stackMap->destroy();
+ return false;
+ }
+
+#ifdef DEBUG
+ {
+ // Crosscheck the map pointer counting.
+ uint32_t nw = stackMap->numMappedWords;
+ uint32_t np = 0;
+ for (uint32_t i = 0; i < nw; i++) {
+ np += stackMap->getBit(i);
+ }
+ MOZ_ASSERT(size_t(np) == countedPointers);
+ }
+#endif
+
+ return true;
+ }
+};
+
+// The baseline compiler proper.
+
+class BaseCompiler final : public BaseCompilerInterface {
+ using Local = BaseStackFrame::Local;
+ using LabelVector = Vector<NonAssertingLabel, 8, SystemAllocPolicy>;
+
+ // Bit set used for simple bounds check elimination. Capping this at 64
+ // locals makes sense; even 32 locals would probably be OK in practice.
+ //
+ // For more information about BCE, see the block comment above
+ // popMemoryAccess(), below.
+
+ using BCESet = uint64_t;
+
+ // Control node, representing labels and stack heights at join points.
+
+ struct Control {
+ NonAssertingLabel label; // The "exit" label
+ NonAssertingLabel otherLabel; // Used for the "else" branch of if-then-else
+ StackHeight stackHeight; // From BaseStackFrame
+ uint32_t stackSize; // Value stack height
+ BCESet bceSafeOnEntry; // Bounds check info flowing into the item
+ BCESet bceSafeOnExit; // Bounds check info flowing out of the item
+ bool deadOnArrival; // deadCode_ was set on entry to the region
+ bool deadThenBranch; // deadCode_ was set on exit from "then"
+
+ Control()
+ : stackHeight(StackHeight::Invalid()),
+ stackSize(UINT32_MAX),
+ bceSafeOnEntry(0),
+ bceSafeOnExit(~BCESet(0)),
+ deadOnArrival(false),
+ deadThenBranch(false) {}
+ };
+
+ class NothingVector {
+ Nothing unused_;
+
+ public:
+ bool resize(size_t length) { return true; }
+ Nothing& operator[](size_t) { return unused_; }
+ Nothing& back() { return unused_; }
+ };
+
+ struct BaseCompilePolicy {
+ // The baseline compiler tracks values on a stack of its own -- it
+ // needs to scan that stack for spilling -- and thus has no need
+ // for the values maintained by the iterator.
+ using Value = Nothing;
+ using ValueVector = NothingVector;
+
+ // The baseline compiler uses the iterator's control stack, attaching
+ // its own control information.
+ using ControlItem = Control;
+ };
+
+ using BaseOpIter = OpIter<BaseCompilePolicy>;
+
+ // The baseline compiler will use OOL code more sparingly than
+ // Baldr since our code is not high performance and frills like
+ // code density and branch prediction friendliness will be less
+ // important.
+
+ class OutOfLineCode : public TempObject {
+ private:
+ NonAssertingLabel entry_;
+ NonAssertingLabel rejoin_;
+ StackHeight stackHeight_;
+
+ public:
+ OutOfLineCode() : stackHeight_(StackHeight::Invalid()) {}
+
+ Label* entry() { return &entry_; }
+ Label* rejoin() { return &rejoin_; }
+
+ void setStackHeight(StackHeight stackHeight) {
+ MOZ_ASSERT(!stackHeight_.isValid());
+ stackHeight_ = stackHeight;
+ }
+
+ void bind(BaseStackFrame* fr, MacroAssembler* masm) {
+ MOZ_ASSERT(stackHeight_.isValid());
+ masm->bind(&entry_);
+ fr->setStackHeight(stackHeight_);
+ }
+
+ // The generate() method must be careful about register use
+ // because it will be invoked when there is a register
+ // assignment in the BaseCompiler that does not correspond
+ // to the available registers when the generated OOL code is
+ // executed. The register allocator *must not* be called.
+ //
+ // The best strategy is for the creator of the OOL object to
+ // allocate all temps that the OOL code will need.
+ //
+ // Input, output, and temp registers are embedded in the OOL
+ // object and are known to the code generator.
+ //
+ // Scratch registers are available to use in OOL code.
+ //
+ // All other registers must be explicitly saved and restored
+ // by the OOL code before being used.
+
+ virtual void generate(MacroAssembler* masm) = 0;
+ };
+
+ enum class LatentOp { None, Compare, Eqz };
+
+ struct AccessCheck {
+ AccessCheck()
+ : omitBoundsCheck(false),
+ omitAlignmentCheck(false),
+ onlyPointerAlignment(false) {}
+
+ // If `omitAlignmentCheck` is true then we need check neither the
+ // pointer nor the offset. Otherwise, if `onlyPointerAlignment` is true
+ // then we need check only the pointer. Otherwise, check the sum of
+ // pointer and offset.
+
+ bool omitBoundsCheck;
+ bool omitAlignmentCheck;
+ bool onlyPointerAlignment;
+ };
+
+ const ModuleEnvironment& moduleEnv_;
+ const CompilerEnvironment& compilerEnv_;
+ BaseOpIter iter_;
+ const FuncCompileInput& func_;
+ size_t lastReadCallSite_;
+ TempAllocator::Fallible alloc_;
+ const ValTypeVector& locals_; // Types of parameters and locals
+ bool deadCode_; // Flag indicating we should decode & discard the opcode
+ BCESet
+ bceSafe_; // Locals that have been bounds checked and not updated since
+ ValTypeVector SigD_;
+ ValTypeVector SigF_;
+ NonAssertingLabel returnLabel_;
+
+ LatentOp latentOp_; // Latent operation for branch (seen next)
+ ValType latentType_; // Operand type, if latentOp_ is true
+ Assembler::Condition
+ latentIntCmp_; // Comparison operator, if latentOp_ == Compare, int types
+ Assembler::DoubleCondition
+ latentDoubleCmp_; // Comparison operator, if latentOp_ == Compare, float
+ // types
+
+ FuncOffsets offsets_;
+ MacroAssembler& masm; // No '_' suffix - too tedious...
+ BaseRegAlloc ra; // Ditto
+ BaseStackFrame fr;
+
+ StackMapGenerator stackMapGenerator_;
+
+ BaseStackFrame::LocalVector localInfo_;
+ Vector<OutOfLineCode*, 8, SystemAllocPolicy> outOfLine_;
+
+ // On specific platforms we sometimes need to use specific registers.
+
+ SpecificRegs specific_;
+
+ // There are more members scattered throughout.
+
+ public:
+ BaseCompiler(const ModuleEnvironment& moduleEnv,
+ const CompilerEnvironment& compilerEnv,
+ const FuncCompileInput& input, const ValTypeVector& locals,
+ const MachineState& trapExitLayout,
+ size_t trapExitLayoutNumWords, Decoder& decoder,
+ StkVector& stkSource, TempAllocator* alloc, MacroAssembler* masm,
+ StackMaps* stackMaps);
+ ~BaseCompiler();
+
+ [[nodiscard]] bool init();
+
+ FuncOffsets finish();
+
+ [[nodiscard]] bool emitFunction();
+ void emitInitStackLocals();
+
+ const FuncType& funcType() const {
+ return *moduleEnv_.funcs[func_.index].type;
+ }
+
+ const TypeIdDesc& funcTypeId() const {
+ return *moduleEnv_.funcs[func_.index].typeId;
+ }
+
+ // Used by some of the ScratchRegister implementations.
+ operator MacroAssembler&() const { return masm; }
+ operator BaseRegAlloc&() { return ra; }
+
+ bool usesSharedMemory() const { return moduleEnv_.usesSharedMemory(); }
+
+ private:
+ ////////////////////////////////////////////////////////////
+ //
+ // Out of line code management.
+
+ [[nodiscard]] OutOfLineCode* addOutOfLineCode(OutOfLineCode* ool) {
+ if (!ool || !outOfLine_.append(ool)) {
+ return nullptr;
+ }
+ ool->setStackHeight(fr.stackHeight());
+ return ool;
+ }
+
+ [[nodiscard]] bool generateOutOfLineCode() {
+ for (uint32_t i = 0; i < outOfLine_.length(); i++) {
+ OutOfLineCode* ool = outOfLine_[i];
+ ool->bind(&fr, &masm);
+ ool->generate(&masm);
+ }
+
+ return !masm.oom();
+ }
+
+ // Utility.
+
+ const Local& localFromSlot(uint32_t slot, MIRType type) {
+ MOZ_ASSERT(localInfo_[slot].type == type);
+ return localInfo_[slot];
+ }
+
+ ////////////////////////////////////////////////////////////
+ //
+ // High-level register management.
+
+ bool isAvailableI32(RegI32 r) { return ra.isAvailableI32(r); }
+ bool isAvailableI64(RegI64 r) { return ra.isAvailableI64(r); }
+ bool isAvailableRef(RegPtr r) { return ra.isAvailablePtr(r); }
+ bool isAvailableF32(RegF32 r) { return ra.isAvailableF32(r); }
+ bool isAvailableF64(RegF64 r) { return ra.isAvailableF64(r); }
+#ifdef ENABLE_WASM_SIMD
+ bool isAvailableV128(RegV128 r) { return ra.isAvailableV128(r); }
+#endif
+
+ [[nodiscard]] RegI32 needI32() { return ra.needI32(); }
+ [[nodiscard]] RegI64 needI64() { return ra.needI64(); }
+ [[nodiscard]] RegPtr needRef() { return ra.needPtr(); }
+ [[nodiscard]] RegF32 needF32() { return ra.needF32(); }
+ [[nodiscard]] RegF64 needF64() { return ra.needF64(); }
+#ifdef ENABLE_WASM_SIMD
+ [[nodiscard]] RegV128 needV128() { return ra.needV128(); }
+#endif
+
+ void needI32(RegI32 specific) { ra.needI32(specific); }
+ void needI64(RegI64 specific) { ra.needI64(specific); }
+ void needRef(RegPtr specific) { ra.needPtr(specific); }
+ void needF32(RegF32 specific) { ra.needF32(specific); }
+ void needF64(RegF64 specific) { ra.needF64(specific); }
+#ifdef ENABLE_WASM_SIMD
+ void needV128(RegV128 specific) { ra.needV128(specific); }
+#endif
+
+#if defined(JS_CODEGEN_ARM)
+ [[nodiscard]] RegI64 needI64Pair() { return ra.needI64Pair(); }
+#endif
+
+ void freeI32(RegI32 r) { ra.freeI32(r); }
+ void freeI64(RegI64 r) { ra.freeI64(r); }
+ void freeRef(RegPtr r) { ra.freePtr(r); }
+ void freeF32(RegF32 r) { ra.freeF32(r); }
+ void freeF64(RegF64 r) { ra.freeF64(r); }
+#ifdef ENABLE_WASM_SIMD
+ void freeV128(RegV128 r) { ra.freeV128(r); }
+#endif
+
+ void freeI64Except(RegI64 r, RegI32 except) {
+#ifdef JS_PUNBOX64
+ MOZ_ASSERT(r.reg == except);
+#else
+ MOZ_ASSERT(r.high == except || r.low == except);
+ freeI64(r);
+ needI32(except);
+#endif
+ }
+
+ void maybeFreeI32(RegI32 r) {
+ if (r.isValid()) {
+ freeI32(r);
+ }
+ }
+
+ void maybeFreeI64(RegI64 r) {
+ if (r.isValid()) {
+ freeI64(r);
+ }
+ }
+
+ void maybeFreeF64(RegF64 r) {
+ if (r.isValid()) {
+ freeF64(r);
+ }
+ }
+
+ void needI32NoSync(RegI32 r) {
+ MOZ_ASSERT(isAvailableI32(r));
+ needI32(r);
+ }
+
+ // TODO / OPTIMIZE: need2xI32() can be optimized along with needI32()
+ // to avoid sync(). (Bug 1316802)
+
+ void need2xI32(RegI32 r0, RegI32 r1) {
+ needI32(r0);
+ needI32(r1);
+ }
+
+ void need2xI64(RegI64 r0, RegI64 r1) {
+ needI64(r0);
+ needI64(r1);
+ }
+
+ RegI32 fromI64(RegI64 r) { return RegI32(lowPart(r)); }
+
+#ifdef JS_PUNBOX64
+ RegI64 fromI32(RegI32 r) { return RegI64(Register64(r)); }
+#endif
+
+ RegI64 widenI32(RegI32 r) {
+ MOZ_ASSERT(!isAvailableI32(r));
+#ifdef JS_PUNBOX64
+ return fromI32(r);
+#else
+ RegI32 high = needI32();
+ return RegI64(Register64(high, r));
+#endif
+ }
+
+ RegI32 narrowI64(RegI64 r) {
+#ifdef JS_PUNBOX64
+ return RegI32(r.reg);
+#else
+ freeI32(RegI32(r.high));
+ return RegI32(r.low);
+#endif
+ }
+
+ RegI32 narrowPtr(RegPtr r) { return RegI32(r); }
+
+ RegI32 lowPart(RegI64 r) {
+#ifdef JS_PUNBOX64
+ return RegI32(r.reg);
+#else
+ return RegI32(r.low);
+#endif
+ }
+
+ RegI32 maybeHighPart(RegI64 r) {
+#ifdef JS_PUNBOX64
+ return RegI32::Invalid();
+#else
+ return RegI32(r.high);
+#endif
+ }
+
+ void maybeClearHighPart(RegI64 r) {
+#if !defined(JS_PUNBOX64)
+ moveImm32(0, RegI32(r.high));
+#endif
+ }
+
+ void moveI32(RegI32 src, RegI32 dest) {
+ if (src != dest) {
+ masm.move32(src, dest);
+ }
+ }
+
+ void moveI64(RegI64 src, RegI64 dest) {
+ if (src != dest) {
+ masm.move64(src, dest);
+ }
+ }
+
+ void moveRef(RegPtr src, RegPtr dest) {
+ if (src != dest) {
+ masm.movePtr(src, dest);
+ }
+ }
+
+ void moveF64(RegF64 src, RegF64 dest) {
+ if (src != dest) {
+ masm.moveDouble(src, dest);
+ }
+ }
+
+ void moveF32(RegF32 src, RegF32 dest) {
+ if (src != dest) {
+ masm.moveFloat32(src, dest);
+ }
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ void moveV128(RegV128 src, RegV128 dest) {
+ if (src != dest) {
+ masm.moveSimd128(src, dest);
+ }
+ }
+#endif
+
+ ////////////////////////////////////////////////////////////////////////////
+ //
+ // Block parameters and results.
+ //
+ // Blocks may have multiple parameters and multiple results. Blocks can also
+ // be the target of branches: the entry for loops, and the exit for
+ // non-loops.
+ //
+ // Passing multiple values to a non-branch target (i.e., the entry of a
+ // "block") falls out naturally: any items on the value stack can flow
+ // directly from one block to another.
+ //
+ // However, for branch targets, we need to allocate well-known locations for
+ // the branch values. The approach taken in the baseline compiler is to
+ // allocate registers to the top N values (currently N=1), and then stack
+ // locations for the rest.
+ //
+
+ enum class RegKind { All, OnlyGPRs };
+
+ inline void needResultRegisters(ResultType type, RegKind which) {
+ if (type.empty()) {
+ return;
+ }
+
+ for (ABIResultIter iter(type); !iter.done(); iter.next()) {
+ ABIResult result = iter.cur();
+ // Register results are visited first; when we see a stack result we're
+ // done.
+ if (!result.inRegister()) {
+ return;
+ }
+ switch (result.type().kind()) {
+ case ValType::I32:
+ needI32(RegI32(result.gpr()));
+ break;
+ case ValType::I64:
+ needI64(RegI64(result.gpr64()));
+ break;
+ case ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ if (which == RegKind::All) {
+ needV128(RegV128(result.fpr()));
+ }
+ break;
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ case ValType::F32:
+ if (which == RegKind::All) {
+ needF32(RegF32(result.fpr()));
+ }
+ break;
+ case ValType::F64:
+ if (which == RegKind::All) {
+ needF64(RegF64(result.fpr()));
+ }
+ break;
+ case ValType::Ref:
+ needRef(RegPtr(result.gpr()));
+ break;
+ }
+ }
+ }
+
+#ifdef JS_CODEGEN_X64
+ inline void maskResultRegisters(ResultType type) {
+ MOZ_ASSERT(JitOptions.spectreIndexMasking);
+
+ if (type.empty()) {
+ return;
+ }
+
+ for (ABIResultIter iter(type); !iter.done(); iter.next()) {
+ ABIResult result = iter.cur();
+ if (result.inRegister() && result.type().kind() == ValType::I32) {
+ masm.movl(result.gpr(), result.gpr());
+ }
+ }
+ }
+#endif
+
+ inline void freeResultRegisters(ResultType type, RegKind which) {
+ if (type.empty()) {
+ return;
+ }
+
+ for (ABIResultIter iter(type); !iter.done(); iter.next()) {
+ ABIResult result = iter.cur();
+ // Register results are visited first; when we see a stack result we're
+ // done.
+ if (!result.inRegister()) {
+ return;
+ }
+ switch (result.type().kind()) {
+ case ValType::I32:
+ freeI32(RegI32(result.gpr()));
+ break;
+ case ValType::I64:
+ freeI64(RegI64(result.gpr64()));
+ break;
+ case ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ if (which == RegKind::All) {
+ freeV128(RegV128(result.fpr()));
+ }
+ break;
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ case ValType::F32:
+ if (which == RegKind::All) {
+ freeF32(RegF32(result.fpr()));
+ }
+ break;
+ case ValType::F64:
+ if (which == RegKind::All) {
+ freeF64(RegF64(result.fpr()));
+ }
+ break;
+ case ValType::Ref:
+ freeRef(RegPtr(result.gpr()));
+ break;
+ }
+ }
+ }
+
+ void needIntegerResultRegisters(ResultType type) {
+ needResultRegisters(type, RegKind::OnlyGPRs);
+ }
+ void freeIntegerResultRegisters(ResultType type) {
+ freeResultRegisters(type, RegKind::OnlyGPRs);
+ }
+
+ void needResultRegisters(ResultType type) {
+ needResultRegisters(type, RegKind::All);
+ }
+ void freeResultRegisters(ResultType type) {
+ freeResultRegisters(type, RegKind::All);
+ }
+
+ void assertResultRegistersAvailable(ResultType type) {
+#ifdef DEBUG
+ for (ABIResultIter iter(type); !iter.done(); iter.next()) {
+ ABIResult result = iter.cur();
+ if (!result.inRegister()) {
+ return;
+ }
+ switch (result.type().kind()) {
+ case ValType::I32:
+ MOZ_ASSERT(isAvailableI32(RegI32(result.gpr())));
+ break;
+ case ValType::I64:
+ MOZ_ASSERT(isAvailableI64(RegI64(result.gpr64())));
+ break;
+ case ValType::V128:
+# ifdef ENABLE_WASM_SIMD
+ MOZ_ASSERT(isAvailableV128(RegV128(result.fpr())));
+ break;
+# else
+ MOZ_CRASH("No SIMD support");
+# endif
+ case ValType::F32:
+ MOZ_ASSERT(isAvailableF32(RegF32(result.fpr())));
+ break;
+ case ValType::F64:
+ MOZ_ASSERT(isAvailableF64(RegF64(result.fpr())));
+ break;
+ case ValType::Ref:
+ MOZ_ASSERT(isAvailableRef(RegPtr(result.gpr())));
+ break;
+ }
+ }
+#endif
+ }
+
+ void captureResultRegisters(ResultType type) {
+ assertResultRegistersAvailable(type);
+ needResultRegisters(type);
+ }
+
+ void captureCallResultRegisters(ResultType type) {
+ captureResultRegisters(type);
+#ifdef JS_CODEGEN_X64
+ if (JitOptions.spectreIndexMasking) {
+ maskResultRegisters(type);
+ }
+#endif
+ }
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Value stack and spilling.
+ //
+ // The value stack facilitates some on-the-fly register allocation
+ // and immediate-constant use. It tracks constants, latent
+ // references to locals, register contents, and values on the CPU
+ // stack.
+ //
+ // The stack can be flushed to memory using sync(). This is handy
+ // to avoid problems with control flow and messy register usage
+ // patterns.
+
+ // This is the value stack actually used during compilation. It is a
+ // StkVector rather than a StkVector& since constantly dereferencing a
+ // StkVector& adds about 0.5% or more to the compiler's dynamic instruction
+ // count.
+ StkVector stk_;
+
+ static constexpr size_t MaxPushesPerOpcode = 10;
+
+ // BaselineCompileFunctions() "lends" us the StkVector to use in this
+ // BaseCompiler object, and that is installed in |stk_| in our constructor.
+ // This is so as to avoid having to malloc/free the vector's contents at
+ // each creation/destruction of a BaseCompiler object. It does however mean
+ // that we need to hold on to a reference to BaselineCompileFunctions()'s
+ // vector, so we can swap (give) its contents back when this BaseCompiler
+ // object is destroyed. This significantly reduces the heap turnover of the
+ // baseline compiler. See bug 1532592.
+ StkVector& stkSource_;
+
+#ifdef DEBUG
+ size_t countMemRefsOnStk() {
+ size_t nRefs = 0;
+ for (Stk& v : stk_) {
+ if (v.kind() == Stk::MemRef) {
+ nRefs++;
+ }
+ }
+ return nRefs;
+ }
+#endif
+
+ template <typename T>
+ void push(T item) {
+ // None of the single-arg Stk constructors create a Stk::MemRef, so
+ // there's no need to increment stackMapGenerator_.memRefsOnStk here.
+ stk_.infallibleEmplaceBack(Stk(item));
+ }
+
+ void pushConstRef(intptr_t v) { stk_.infallibleEmplaceBack(Stk::StkRef(v)); }
+
+ void loadConstI32(const Stk& src, RegI32 dest) {
+ moveImm32(src.i32val(), dest);
+ }
+
+ void loadMemI32(const Stk& src, RegI32 dest) {
+ fr.loadStackI32(src.offs(), dest);
+ }
+
+ void loadLocalI32(const Stk& src, RegI32 dest) {
+ fr.loadLocalI32(localFromSlot(src.slot(), MIRType::Int32), dest);
+ }
+
+ void loadRegisterI32(const Stk& src, RegI32 dest) {
+ moveI32(src.i32reg(), dest);
+ }
+
+ void loadConstI64(const Stk& src, RegI64 dest) {
+ moveImm64(src.i64val(), dest);
+ }
+
+ void loadMemI64(const Stk& src, RegI64 dest) {
+ fr.loadStackI64(src.offs(), dest);
+ }
+
+ void loadLocalI64(const Stk& src, RegI64 dest) {
+ fr.loadLocalI64(localFromSlot(src.slot(), MIRType::Int64), dest);
+ }
+
+ void loadRegisterI64(const Stk& src, RegI64 dest) {
+ moveI64(src.i64reg(), dest);
+ }
+
+ void loadConstRef(const Stk& src, RegPtr dest) {
+ moveImmRef(src.refval(), dest);
+ }
+
+ void loadMemRef(const Stk& src, RegPtr dest) {
+ fr.loadStackPtr(src.offs(), dest);
+ }
+
+ void loadLocalRef(const Stk& src, RegPtr dest) {
+ fr.loadLocalPtr(localFromSlot(src.slot(), MIRType::RefOrNull), dest);
+ }
+
+ void loadRegisterRef(const Stk& src, RegPtr dest) {
+ moveRef(src.refReg(), dest);
+ }
+
+ void loadConstF64(const Stk& src, RegF64 dest) {
+ double d;
+ src.f64val(&d);
+ masm.loadConstantDouble(d, dest);
+ }
+
+ void loadMemF64(const Stk& src, RegF64 dest) {
+ fr.loadStackF64(src.offs(), dest);
+ }
+
+ void loadLocalF64(const Stk& src, RegF64 dest) {
+ fr.loadLocalF64(localFromSlot(src.slot(), MIRType::Double), dest);
+ }
+
+ void loadRegisterF64(const Stk& src, RegF64 dest) {
+ moveF64(src.f64reg(), dest);
+ }
+
+ void loadConstF32(const Stk& src, RegF32 dest) {
+ float f;
+ src.f32val(&f);
+ masm.loadConstantFloat32(f, dest);
+ }
+
+ void loadMemF32(const Stk& src, RegF32 dest) {
+ fr.loadStackF32(src.offs(), dest);
+ }
+
+ void loadLocalF32(const Stk& src, RegF32 dest) {
+ fr.loadLocalF32(localFromSlot(src.slot(), MIRType::Float32), dest);
+ }
+
+ void loadRegisterF32(const Stk& src, RegF32 dest) {
+ moveF32(src.f32reg(), dest);
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ void loadConstV128(const Stk& src, RegV128 dest) {
+ V128 f;
+ src.v128val(&f);
+ masm.loadConstantSimd128(SimdConstant::CreateX16((int8_t*)f.bytes), dest);
+ }
+
+ void loadMemV128(const Stk& src, RegV128 dest) {
+ fr.loadStackV128(src.offs(), dest);
+ }
+
+ void loadLocalV128(const Stk& src, RegV128 dest) {
+ fr.loadLocalV128(localFromSlot(src.slot(), MIRType::Simd128), dest);
+ }
+
+ void loadRegisterV128(const Stk& src, RegV128 dest) {
+ moveV128(src.v128reg(), dest);
+ }
+#endif
+
+ void loadI32(const Stk& src, RegI32 dest) {
+ switch (src.kind()) {
+ case Stk::ConstI32:
+ loadConstI32(src, dest);
+ break;
+ case Stk::MemI32:
+ loadMemI32(src, dest);
+ break;
+ case Stk::LocalI32:
+ loadLocalI32(src, dest);
+ break;
+ case Stk::RegisterI32:
+ loadRegisterI32(src, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: Expected I32 on stack");
+ }
+ }
+
+ void loadI64(const Stk& src, RegI64 dest) {
+ switch (src.kind()) {
+ case Stk::ConstI64:
+ loadConstI64(src, dest);
+ break;
+ case Stk::MemI64:
+ loadMemI64(src, dest);
+ break;
+ case Stk::LocalI64:
+ loadLocalI64(src, dest);
+ break;
+ case Stk::RegisterI64:
+ loadRegisterI64(src, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: Expected I64 on stack");
+ }
+ }
+
+#if !defined(JS_PUNBOX64)
+ void loadI64Low(const Stk& src, RegI32 dest) {
+ switch (src.kind()) {
+ case Stk::ConstI64:
+ moveImm32(int32_t(src.i64val()), dest);
+ break;
+ case Stk::MemI64:
+ fr.loadStackI64Low(src.offs(), dest);
+ break;
+ case Stk::LocalI64:
+ fr.loadLocalI64Low(localFromSlot(src.slot(), MIRType::Int64), dest);
+ break;
+ case Stk::RegisterI64:
+ moveI32(RegI32(src.i64reg().low), dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: Expected I64 on stack");
+ }
+ }
+
+ void loadI64High(const Stk& src, RegI32 dest) {
+ switch (src.kind()) {
+ case Stk::ConstI64:
+ moveImm32(int32_t(src.i64val() >> 32), dest);
+ break;
+ case Stk::MemI64:
+ fr.loadStackI64High(src.offs(), dest);
+ break;
+ case Stk::LocalI64:
+ fr.loadLocalI64High(localFromSlot(src.slot(), MIRType::Int64), dest);
+ break;
+ case Stk::RegisterI64:
+ moveI32(RegI32(src.i64reg().high), dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: Expected I64 on stack");
+ }
+ }
+#endif
+
+ void loadF64(const Stk& src, RegF64 dest) {
+ switch (src.kind()) {
+ case Stk::ConstF64:
+ loadConstF64(src, dest);
+ break;
+ case Stk::MemF64:
+ loadMemF64(src, dest);
+ break;
+ case Stk::LocalF64:
+ loadLocalF64(src, dest);
+ break;
+ case Stk::RegisterF64:
+ loadRegisterF64(src, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected F64 on stack");
+ }
+ }
+
+ void loadF32(const Stk& src, RegF32 dest) {
+ switch (src.kind()) {
+ case Stk::ConstF32:
+ loadConstF32(src, dest);
+ break;
+ case Stk::MemF32:
+ loadMemF32(src, dest);
+ break;
+ case Stk::LocalF32:
+ loadLocalF32(src, dest);
+ break;
+ case Stk::RegisterF32:
+ loadRegisterF32(src, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected F32 on stack");
+ }
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ void loadV128(const Stk& src, RegV128 dest) {
+ switch (src.kind()) {
+ case Stk::ConstV128:
+ loadConstV128(src, dest);
+ break;
+ case Stk::MemV128:
+ loadMemV128(src, dest);
+ break;
+ case Stk::LocalV128:
+ loadLocalV128(src, dest);
+ break;
+ case Stk::RegisterV128:
+ loadRegisterV128(src, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected V128 on stack");
+ }
+ }
+#endif
+
+ void loadRef(const Stk& src, RegPtr dest) {
+ switch (src.kind()) {
+ case Stk::ConstRef:
+ loadConstRef(src, dest);
+ break;
+ case Stk::MemRef:
+ loadMemRef(src, dest);
+ break;
+ case Stk::LocalRef:
+ loadLocalRef(src, dest);
+ break;
+ case Stk::RegisterRef:
+ loadRegisterRef(src, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected ref on stack");
+ }
+ }
+
+ // Flush all local and register value stack elements to memory.
+ //
+ // TODO / OPTIMIZE: As this is fairly expensive and causes worse
+ // code to be emitted subsequently, it is useful to avoid calling
+ // it. (Bug 1316802)
+ //
+ // Some optimization has been done already. Remaining
+ // opportunities:
+ //
+ // - It would be interesting to see if we can specialize it
+ // before calls with particularly simple signatures, or where
+ // we can do parallel assignment of register arguments, or
+ // similar. See notes in emitCall().
+ //
+ // - Operations that need specific registers: multiply, quotient,
+ // remainder, will tend to sync because the registers we need
+ // will tend to be allocated. We may be able to avoid that by
+ // prioritizing registers differently (takeLast instead of
+ // takeFirst) but we may also be able to allocate an unused
+ // register on demand to free up one we need, thus avoiding the
+ // sync. That type of fix would go into needI32().
+
+ void sync() final {
+ size_t start = 0;
+ size_t lim = stk_.length();
+
+ for (size_t i = lim; i > 0; i--) {
+ // Memory opcodes are first in the enum, single check against MemLast is
+ // fine.
+ if (stk_[i - 1].kind() <= Stk::MemLast) {
+ start = i;
+ break;
+ }
+ }
+
+ for (size_t i = start; i < lim; i++) {
+ Stk& v = stk_[i];
+ switch (v.kind()) {
+ case Stk::LocalI32: {
+ ScratchI32 scratch(*this);
+ loadLocalI32(v, scratch);
+ uint32_t offs = fr.pushPtr(scratch);
+ v.setOffs(Stk::MemI32, offs);
+ break;
+ }
+ case Stk::RegisterI32: {
+ uint32_t offs = fr.pushPtr(v.i32reg());
+ freeI32(v.i32reg());
+ v.setOffs(Stk::MemI32, offs);
+ break;
+ }
+ case Stk::LocalI64: {
+ ScratchI32 scratch(*this);
+#ifdef JS_PUNBOX64
+ loadI64(v, fromI32(scratch));
+ uint32_t offs = fr.pushPtr(scratch);
+#else
+ fr.loadLocalI64High(localFromSlot(v.slot(), MIRType::Int64), scratch);
+ fr.pushPtr(scratch);
+ fr.loadLocalI64Low(localFromSlot(v.slot(), MIRType::Int64), scratch);
+ uint32_t offs = fr.pushPtr(scratch);
+#endif
+ v.setOffs(Stk::MemI64, offs);
+ break;
+ }
+ case Stk::RegisterI64: {
+#ifdef JS_PUNBOX64
+ uint32_t offs = fr.pushPtr(v.i64reg().reg);
+ freeI64(v.i64reg());
+#else
+ fr.pushPtr(v.i64reg().high);
+ uint32_t offs = fr.pushPtr(v.i64reg().low);
+ freeI64(v.i64reg());
+#endif
+ v.setOffs(Stk::MemI64, offs);
+ break;
+ }
+ case Stk::LocalF64: {
+ ScratchF64 scratch(*this);
+ loadF64(v, scratch);
+ uint32_t offs = fr.pushDouble(scratch);
+ v.setOffs(Stk::MemF64, offs);
+ break;
+ }
+ case Stk::RegisterF64: {
+ uint32_t offs = fr.pushDouble(v.f64reg());
+ freeF64(v.f64reg());
+ v.setOffs(Stk::MemF64, offs);
+ break;
+ }
+ case Stk::LocalF32: {
+ ScratchF32 scratch(*this);
+ loadF32(v, scratch);
+ uint32_t offs = fr.pushFloat32(scratch);
+ v.setOffs(Stk::MemF32, offs);
+ break;
+ }
+ case Stk::RegisterF32: {
+ uint32_t offs = fr.pushFloat32(v.f32reg());
+ freeF32(v.f32reg());
+ v.setOffs(Stk::MemF32, offs);
+ break;
+ }
+#ifdef ENABLE_WASM_SIMD
+ case Stk::LocalV128: {
+ ScratchV128 scratch(*this);
+ loadV128(v, scratch);
+ uint32_t offs = fr.pushV128(scratch);
+ v.setOffs(Stk::MemV128, offs);
+ break;
+ }
+ case Stk::RegisterV128: {
+ uint32_t offs = fr.pushV128(v.v128reg());
+ freeV128(v.v128reg());
+ v.setOffs(Stk::MemV128, offs);
+ break;
+ }
+#endif
+ case Stk::LocalRef: {
+ ScratchPtr scratch(*this);
+ loadLocalRef(v, scratch);
+ uint32_t offs = fr.pushPtr(scratch);
+ v.setOffs(Stk::MemRef, offs);
+ stackMapGenerator_.memRefsOnStk++;
+ break;
+ }
+ case Stk::RegisterRef: {
+ uint32_t offs = fr.pushPtr(v.refReg());
+ freeRef(v.refReg());
+ v.setOffs(Stk::MemRef, offs);
+ stackMapGenerator_.memRefsOnStk++;
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+ }
+ }
+
+ void saveTempPtr(RegPtr r) final {
+ MOZ_ASSERT(!ra.isAvailablePtr(r));
+ fr.pushPtr(r);
+ ra.freePtr(r);
+ MOZ_ASSERT(ra.isAvailablePtr(r));
+ }
+
+ void restoreTempPtr(RegPtr r) final {
+ MOZ_ASSERT(ra.isAvailablePtr(r));
+ ra.needPtr(r);
+ fr.popPtr(r);
+ MOZ_ASSERT(!ra.isAvailablePtr(r));
+ }
+
+ // Various methods for creating a stack map. Stack maps are indexed by the
+ // lowest address of the instruction immediately *after* the instruction of
+ // interest. In practice that means either: the return point of a call, the
+ // instruction immediately after a trap instruction (the "resume"
+ // instruction), or the instruction immediately following a no-op (when
+ // debugging is enabled).
+
+ // Create a vanilla stack map.
+ [[nodiscard]] bool createStackMap(const char* who) {
+ const ExitStubMapVector noExtras;
+ return createStackMap(who, noExtras, masm.currentOffset());
+ }
+
+ // Create a stack map as vanilla, but for a custom assembler offset.
+ [[nodiscard]] bool createStackMap(const char* who,
+ CodeOffset assemblerOffset) {
+ const ExitStubMapVector noExtras;
+ return createStackMap(who, noExtras, assemblerOffset.offset());
+ }
+
+ // The most general stack map construction.
+ [[nodiscard]] bool createStackMap(const char* who,
+ const ExitStubMapVector& extras,
+ uint32_t assemblerOffset) {
+ auto debugFrame =
+ compilerEnv_.debugEnabled() ? HasDebugFrame::Yes : HasDebugFrame::No;
+ return stackMapGenerator_.createStackMap(who, extras, assemblerOffset,
+ debugFrame, stk_);
+ }
+
+ // This is an optimization used to avoid calling sync() for
+ // setLocal(): if the local does not exist unresolved on the stack
+ // then we can skip the sync.
+
+ bool hasLocal(uint32_t slot) {
+ for (size_t i = stk_.length(); i > 0; i--) {
+ // Memory opcodes are first in the enum, single check against MemLast is
+ // fine.
+ Stk::Kind kind = stk_[i - 1].kind();
+ if (kind <= Stk::MemLast) {
+ return false;
+ }
+
+ // Local opcodes follow memory opcodes in the enum, single check against
+ // LocalLast is sufficient.
+ if (kind <= Stk::LocalLast && stk_[i - 1].slot() == slot) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void syncLocal(uint32_t slot) {
+ if (hasLocal(slot)) {
+ sync(); // TODO / OPTIMIZE: Improve this? (Bug 1316817)
+ }
+ }
+
+ // Push the register r onto the stack.
+
+ void pushI32(RegI32 r) {
+ MOZ_ASSERT(!isAvailableI32(r));
+ push(Stk(r));
+ }
+
+ void pushI64(RegI64 r) {
+ MOZ_ASSERT(!isAvailableI64(r));
+ push(Stk(r));
+ }
+
+ void pushRef(RegPtr r) {
+ MOZ_ASSERT(!isAvailableRef(r));
+ push(Stk(r));
+ }
+
+ void pushF64(RegF64 r) {
+ MOZ_ASSERT(!isAvailableF64(r));
+ push(Stk(r));
+ }
+
+ void pushF32(RegF32 r) {
+ MOZ_ASSERT(!isAvailableF32(r));
+ push(Stk(r));
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ void pushV128(RegV128 r) {
+ MOZ_ASSERT(!isAvailableV128(r));
+ push(Stk(r));
+ }
+#endif
+
+ // Push the value onto the stack.
+
+ void pushI32(int32_t v) { push(Stk(v)); }
+
+ void pushI64(int64_t v) { push(Stk(v)); }
+
+ void pushRef(intptr_t v) { pushConstRef(v); }
+
+ void pushF64(double v) { push(Stk(v)); }
+
+ void pushF32(float v) { push(Stk(v)); }
+
+#ifdef ENABLE_WASM_SIMD
+ void pushV128(V128 v) { push(Stk(v)); }
+#endif
+
+ // Push the local slot onto the stack. The slot will not be read
+ // here; it will be read when it is consumed, or when a side
+ // effect to the slot forces its value to be saved.
+
+ void pushLocalI32(uint32_t slot) {
+ stk_.infallibleEmplaceBack(Stk(Stk::LocalI32, slot));
+ }
+
+ void pushLocalI64(uint32_t slot) {
+ stk_.infallibleEmplaceBack(Stk(Stk::LocalI64, slot));
+ }
+
+ void pushLocalRef(uint32_t slot) {
+ stk_.infallibleEmplaceBack(Stk(Stk::LocalRef, slot));
+ }
+
+ void pushLocalF64(uint32_t slot) {
+ stk_.infallibleEmplaceBack(Stk(Stk::LocalF64, slot));
+ }
+
+ void pushLocalF32(uint32_t slot) {
+ stk_.infallibleEmplaceBack(Stk(Stk::LocalF32, slot));
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ void pushLocalV128(uint32_t slot) {
+ stk_.infallibleEmplaceBack(Stk(Stk::LocalV128, slot));
+ }
+#endif
+
+ // Call only from other popI32() variants.
+ // v must be the stack top. May pop the CPU stack.
+
+ void popI32(const Stk& v, RegI32 dest) {
+ MOZ_ASSERT(&v == &stk_.back());
+ switch (v.kind()) {
+ case Stk::ConstI32:
+ loadConstI32(v, dest);
+ break;
+ case Stk::LocalI32:
+ loadLocalI32(v, dest);
+ break;
+ case Stk::MemI32:
+ fr.popPtr(dest);
+ break;
+ case Stk::RegisterI32:
+ loadRegisterI32(v, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected int on stack");
+ }
+ }
+
+ [[nodiscard]] RegI32 popI32() {
+ Stk& v = stk_.back();
+ RegI32 r;
+ if (v.kind() == Stk::RegisterI32) {
+ r = v.i32reg();
+ } else {
+ popI32(v, (r = needI32()));
+ }
+ stk_.popBack();
+ return r;
+ }
+
+ RegI32 popI32(RegI32 specific) {
+ Stk& v = stk_.back();
+
+ if (!(v.kind() == Stk::RegisterI32 && v.i32reg() == specific)) {
+ needI32(specific);
+ popI32(v, specific);
+ if (v.kind() == Stk::RegisterI32) {
+ freeI32(v.i32reg());
+ }
+ }
+
+ stk_.popBack();
+ return specific;
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ // Call only from other popV128() variants.
+ // v must be the stack top. May pop the CPU stack.
+
+ void popV128(const Stk& v, RegV128 dest) {
+ MOZ_ASSERT(&v == &stk_.back());
+ switch (v.kind()) {
+ case Stk::ConstV128:
+ loadConstV128(v, dest);
+ break;
+ case Stk::LocalV128:
+ loadLocalV128(v, dest);
+ break;
+ case Stk::MemV128:
+ fr.popV128(dest);
+ break;
+ case Stk::RegisterV128:
+ loadRegisterV128(v, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected int on stack");
+ }
+ }
+
+ [[nodiscard]] RegV128 popV128() {
+ Stk& v = stk_.back();
+ RegV128 r;
+ if (v.kind() == Stk::RegisterV128) {
+ r = v.v128reg();
+ } else {
+ popV128(v, (r = needV128()));
+ }
+ stk_.popBack();
+ return r;
+ }
+
+ RegV128 popV128(RegV128 specific) {
+ Stk& v = stk_.back();
+
+ if (!(v.kind() == Stk::RegisterV128 && v.v128reg() == specific)) {
+ needV128(specific);
+ popV128(v, specific);
+ if (v.kind() == Stk::RegisterV128) {
+ freeV128(v.v128reg());
+ }
+ }
+
+ stk_.popBack();
+ return specific;
+ }
+#endif
+
+ // Call only from other popI64() variants.
+ // v must be the stack top. May pop the CPU stack.
+
+ void popI64(const Stk& v, RegI64 dest) {
+ MOZ_ASSERT(&v == &stk_.back());
+ switch (v.kind()) {
+ case Stk::ConstI64:
+ loadConstI64(v, dest);
+ break;
+ case Stk::LocalI64:
+ loadLocalI64(v, dest);
+ break;
+ case Stk::MemI64:
+#ifdef JS_PUNBOX64
+ fr.popPtr(dest.reg);
+#else
+ fr.popPtr(dest.low);
+ fr.popPtr(dest.high);
+#endif
+ break;
+ case Stk::RegisterI64:
+ loadRegisterI64(v, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected long on stack");
+ }
+ }
+
+ [[nodiscard]] RegI64 popI64() {
+ Stk& v = stk_.back();
+ RegI64 r;
+ if (v.kind() == Stk::RegisterI64) {
+ r = v.i64reg();
+ } else {
+ popI64(v, (r = needI64()));
+ }
+ stk_.popBack();
+ return r;
+ }
+
+ // Note, the stack top can be in one half of "specific" on 32-bit
+ // systems. We can optimize, but for simplicity, if the register
+ // does not match exactly, then just force the stack top to memory
+ // and then read it back in.
+
+ RegI64 popI64(RegI64 specific) {
+ Stk& v = stk_.back();
+
+ if (!(v.kind() == Stk::RegisterI64 && v.i64reg() == specific)) {
+ needI64(specific);
+ popI64(v, specific);
+ if (v.kind() == Stk::RegisterI64) {
+ freeI64(v.i64reg());
+ }
+ }
+
+ stk_.popBack();
+ return specific;
+ }
+
+ // Call only from other popRef() variants.
+ // v must be the stack top. May pop the CPU stack.
+
+ void popRef(const Stk& v, RegPtr dest) {
+ MOZ_ASSERT(&v == &stk_.back());
+ switch (v.kind()) {
+ case Stk::ConstRef:
+ loadConstRef(v, dest);
+ break;
+ case Stk::LocalRef:
+ loadLocalRef(v, dest);
+ break;
+ case Stk::MemRef:
+ fr.popPtr(dest);
+ break;
+ case Stk::RegisterRef:
+ loadRegisterRef(v, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected ref on stack");
+ }
+ }
+
+ RegPtr popRef(RegPtr specific) {
+ Stk& v = stk_.back();
+
+ if (!(v.kind() == Stk::RegisterRef && v.refReg() == specific)) {
+ needRef(specific);
+ popRef(v, specific);
+ if (v.kind() == Stk::RegisterRef) {
+ freeRef(v.refReg());
+ }
+ }
+
+ stk_.popBack();
+ if (v.kind() == Stk::MemRef) {
+ stackMapGenerator_.memRefsOnStk--;
+ }
+ return specific;
+ }
+
+ [[nodiscard]] RegPtr popRef() {
+ Stk& v = stk_.back();
+ RegPtr r;
+ if (v.kind() == Stk::RegisterRef) {
+ r = v.refReg();
+ } else {
+ popRef(v, (r = needRef()));
+ }
+ stk_.popBack();
+ if (v.kind() == Stk::MemRef) {
+ stackMapGenerator_.memRefsOnStk--;
+ }
+ return r;
+ }
+
+ // Call only from other popF64() variants.
+ // v must be the stack top. May pop the CPU stack.
+
+ void popF64(const Stk& v, RegF64 dest) {
+ MOZ_ASSERT(&v == &stk_.back());
+ switch (v.kind()) {
+ case Stk::ConstF64:
+ loadConstF64(v, dest);
+ break;
+ case Stk::LocalF64:
+ loadLocalF64(v, dest);
+ break;
+ case Stk::MemF64:
+ fr.popDouble(dest);
+ break;
+ case Stk::RegisterF64:
+ loadRegisterF64(v, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected double on stack");
+ }
+ }
+
+ [[nodiscard]] RegF64 popF64() {
+ Stk& v = stk_.back();
+ RegF64 r;
+ if (v.kind() == Stk::RegisterF64) {
+ r = v.f64reg();
+ } else {
+ popF64(v, (r = needF64()));
+ }
+ stk_.popBack();
+ return r;
+ }
+
+ RegF64 popF64(RegF64 specific) {
+ Stk& v = stk_.back();
+
+ if (!(v.kind() == Stk::RegisterF64 && v.f64reg() == specific)) {
+ needF64(specific);
+ popF64(v, specific);
+ if (v.kind() == Stk::RegisterF64) {
+ freeF64(v.f64reg());
+ }
+ }
+
+ stk_.popBack();
+ return specific;
+ }
+
+ // Call only from other popF32() variants.
+ // v must be the stack top. May pop the CPU stack.
+
+ void popF32(const Stk& v, RegF32 dest) {
+ MOZ_ASSERT(&v == &stk_.back());
+ switch (v.kind()) {
+ case Stk::ConstF32:
+ loadConstF32(v, dest);
+ break;
+ case Stk::LocalF32:
+ loadLocalF32(v, dest);
+ break;
+ case Stk::MemF32:
+ fr.popFloat32(dest);
+ break;
+ case Stk::RegisterF32:
+ loadRegisterF32(v, dest);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected float on stack");
+ }
+ }
+
+ [[nodiscard]] RegF32 popF32() {
+ Stk& v = stk_.back();
+ RegF32 r;
+ if (v.kind() == Stk::RegisterF32) {
+ r = v.f32reg();
+ } else {
+ popF32(v, (r = needF32()));
+ }
+ stk_.popBack();
+ return r;
+ }
+
+ RegF32 popF32(RegF32 specific) {
+ Stk& v = stk_.back();
+
+ if (!(v.kind() == Stk::RegisterF32 && v.f32reg() == specific)) {
+ needF32(specific);
+ popF32(v, specific);
+ if (v.kind() == Stk::RegisterF32) {
+ freeF32(v.f32reg());
+ }
+ }
+
+ stk_.popBack();
+ return specific;
+ }
+
+ [[nodiscard]] bool popConstI32(int32_t* c) {
+ Stk& v = stk_.back();
+ if (v.kind() != Stk::ConstI32) {
+ return false;
+ }
+ *c = v.i32val();
+ stk_.popBack();
+ return true;
+ }
+
+ [[nodiscard]] bool popConstI64(int64_t* c) {
+ Stk& v = stk_.back();
+ if (v.kind() != Stk::ConstI64) {
+ return false;
+ }
+ *c = v.i64val();
+ stk_.popBack();
+ return true;
+ }
+
+ [[nodiscard]] bool peekConstI32(int32_t* c) {
+ Stk& v = stk_.back();
+ if (v.kind() != Stk::ConstI32) {
+ return false;
+ }
+ *c = v.i32val();
+ return true;
+ }
+
+ [[nodiscard]] bool peekConstI64(int64_t* c) {
+ Stk& v = stk_.back();
+ if (v.kind() != Stk::ConstI64) {
+ return false;
+ }
+ *c = v.i64val();
+ return true;
+ }
+
+ [[nodiscard]] bool peek2xI32(int32_t* c0, int32_t* c1) {
+ MOZ_ASSERT(stk_.length() >= 2);
+ const Stk& v0 = *(stk_.end() - 1);
+ const Stk& v1 = *(stk_.end() - 2);
+ if (v0.kind() != Stk::ConstI32 || v1.kind() != Stk::ConstI32) {
+ return false;
+ }
+ *c0 = v0.i32val();
+ *c1 = v1.i32val();
+ return true;
+ }
+
+ [[nodiscard]] bool popConstPositivePowerOfTwoI32(int32_t* c,
+ uint_fast8_t* power,
+ int32_t cutoff) {
+ Stk& v = stk_.back();
+ if (v.kind() != Stk::ConstI32) {
+ return false;
+ }
+ *c = v.i32val();
+ if (*c <= cutoff || !IsPowerOfTwo(static_cast<uint32_t>(*c))) {
+ return false;
+ }
+ *power = FloorLog2(*c);
+ stk_.popBack();
+ return true;
+ }
+
+ [[nodiscard]] bool popConstPositivePowerOfTwoI64(int64_t* c,
+ uint_fast8_t* power,
+ int64_t cutoff) {
+ Stk& v = stk_.back();
+ if (v.kind() != Stk::ConstI64) {
+ return false;
+ }
+ *c = v.i64val();
+ if (*c <= cutoff || !IsPowerOfTwo(static_cast<uint64_t>(*c))) {
+ return false;
+ }
+ *power = FloorLog2(*c);
+ stk_.popBack();
+ return true;
+ }
+
+ [[nodiscard]] bool peekLocalI32(uint32_t* local) {
+ Stk& v = stk_.back();
+ if (v.kind() != Stk::LocalI32) {
+ return false;
+ }
+ *local = v.slot();
+ return true;
+ }
+
+ // TODO / OPTIMIZE (Bug 1316818): At the moment we use the Wasm
+ // inter-procedure ABI for block returns, which allocates ReturnReg as the
+ // single block result register. It is possible other choices would lead to
+ // better register allocation, as ReturnReg is often first in the register set
+ // and will be heavily wanted by the register allocator that uses takeFirst().
+ //
+ // Obvious options:
+ // - pick a register at the back of the register set
+ // - pick a random register per block (different blocks have
+ // different join regs)
+
+ void popRegisterResults(ABIResultIter& iter) {
+ // Pop register results. Note that in the single-value case, popping to a
+ // register may cause a sync(); for multi-value we sync'd already.
+ for (; !iter.done(); iter.next()) {
+ const ABIResult& result = iter.cur();
+ if (!result.inRegister()) {
+ // TODO / OPTIMIZE: We sync here to avoid solving the general parallel
+ // move problem in popStackResults. However we could avoid syncing the
+ // values that are going to registers anyway, if they are already in
+ // registers.
+ sync();
+ break;
+ }
+ switch (result.type().kind()) {
+ case ValType::I32:
+ popI32(RegI32(result.gpr()));
+ break;
+ case ValType::I64:
+ popI64(RegI64(result.gpr64()));
+ break;
+ case ValType::F32:
+ popF32(RegF32(result.fpr()));
+ break;
+ case ValType::F64:
+ popF64(RegF64(result.fpr()));
+ break;
+ case ValType::Ref:
+ popRef(RegPtr(result.gpr()));
+ break;
+ case ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ popV128(RegV128(result.fpr()));
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ }
+ }
+ }
+
+ void popStackResults(ABIResultIter& iter, StackHeight stackBase) {
+ MOZ_ASSERT(!iter.done());
+
+ // The iterator should be advanced beyond register results, and register
+ // results should be popped already from the value stack.
+ uint32_t alreadyPopped = iter.index();
+
+ // At this point, only stack arguments are remaining. Iterate through them
+ // to measure how much stack space they will take up.
+ for (; !iter.done(); iter.next()) {
+ MOZ_ASSERT(iter.cur().onStack());
+ }
+
+ // Calculate the space needed to store stack results, in bytes.
+ uint32_t stackResultBytes = iter.stackBytesConsumedSoFar();
+ MOZ_ASSERT(stackResultBytes);
+
+ // Compute the stack height including the stack results. Note that it's
+ // possible that this call expands the stack, for example if some of the
+ // results are supplied by constants and so are not already on the machine
+ // stack.
+ uint32_t endHeight = fr.prepareStackResultArea(stackBase, stackResultBytes);
+
+ // Find a free GPR to use when shuffling stack values. If none is
+ // available, push ReturnReg and restore it after we're done.
+ bool saved = false;
+ RegPtr temp = ra.needTempPtr(RegPtr(ReturnReg), &saved);
+
+ // The sequence of Stk values is in the same order on the machine stack as
+ // the result locations, but there is a complication: constant values are
+ // not actually pushed on the machine stack. (At this point registers and
+ // locals have been spilled already.) So, moving the Stk values into place
+ // isn't simply a shuffle-down or shuffle-up operation. There is a part of
+ // the Stk sequence that shuffles toward the FP, a part that's already in
+ // place, and a part that shuffles toward the SP. After shuffling, we have
+ // to materialize the constants.
+
+ // Shuffle mem values toward the frame pointer, copying deepest values
+ // first. Stop when we run out of results, get to a register result, or
+ // find a Stk value that is closer to the FP than the result.
+ for (iter.switchToPrev(); !iter.done(); iter.prev()) {
+ const ABIResult& result = iter.cur();
+ if (!result.onStack()) {
+ break;
+ }
+ MOZ_ASSERT(result.stackOffset() < stackResultBytes);
+ uint32_t destHeight = endHeight - result.stackOffset();
+ uint32_t stkBase = stk_.length() - (iter.count() - alreadyPopped);
+ Stk& v = stk_[stkBase + iter.index()];
+ if (v.isMem()) {
+ uint32_t srcHeight = v.offs();
+ if (srcHeight <= destHeight) {
+ break;
+ }
+ fr.shuffleStackResultsTowardFP(srcHeight, destHeight, result.size(),
+ temp);
+ }
+ }
+
+ // Reset iterator and skip register results.
+ for (iter.reset(); !iter.done(); iter.next()) {
+ if (iter.cur().onStack()) {
+ break;
+ }
+ }
+
+ // Revisit top stack values, shuffling mem values toward the stack pointer,
+ // copying shallowest values first.
+ for (; !iter.done(); iter.next()) {
+ const ABIResult& result = iter.cur();
+ MOZ_ASSERT(result.onStack());
+ MOZ_ASSERT(result.stackOffset() < stackResultBytes);
+ uint32_t destHeight = endHeight - result.stackOffset();
+ Stk& v = stk_[stk_.length() - (iter.index() - alreadyPopped) - 1];
+ if (v.isMem()) {
+ uint32_t srcHeight = v.offs();
+ if (srcHeight >= destHeight) {
+ break;
+ }
+ fr.shuffleStackResultsTowardSP(srcHeight, destHeight, result.size(),
+ temp);
+ }
+ }
+
+ // Reset iterator and skip register results, which are already popped off
+ // the value stack.
+ for (iter.reset(); !iter.done(); iter.next()) {
+ if (iter.cur().onStack()) {
+ break;
+ }
+ }
+
+ // Materialize constants and pop the remaining items from the value stack.
+ for (; !iter.done(); iter.next()) {
+ const ABIResult& result = iter.cur();
+ uint32_t resultHeight = endHeight - result.stackOffset();
+ Stk& v = stk_.back();
+ switch (v.kind()) {
+ case Stk::ConstI32:
+ fr.storeImmediatePtrToStack(uint32_t(v.i32val_), resultHeight, temp);
+ break;
+ case Stk::ConstF32:
+ fr.storeImmediateF32ToStack(v.f32val_, resultHeight, temp);
+ break;
+ case Stk::ConstI64:
+ fr.storeImmediateI64ToStack(v.i64val_, resultHeight, temp);
+ break;
+ case Stk::ConstF64:
+ fr.storeImmediateF64ToStack(v.f64val_, resultHeight, temp);
+ break;
+#ifdef ENABLE_WASM_SIMD
+ case Stk::ConstV128:
+ fr.storeImmediateV128ToStack(v.v128val_, resultHeight, temp);
+ break;
+#endif
+ case Stk::ConstRef:
+ fr.storeImmediatePtrToStack(v.refval_, resultHeight, temp);
+ break;
+ case Stk::MemRef:
+ // Update bookkeeping as we pop the Stk entry.
+ stackMapGenerator_.memRefsOnStk--;
+ break;
+ default:
+ MOZ_ASSERT(v.isMem());
+ break;
+ }
+ stk_.popBack();
+ }
+
+ ra.freeTempPtr(temp, saved);
+
+ // This will pop the stack if needed.
+ fr.finishStackResultArea(stackBase, stackResultBytes);
+ }
+
+ enum class ContinuationKind { Fallthrough, Jump };
+
+ void popBlockResults(ResultType type, StackHeight stackBase,
+ ContinuationKind kind) {
+ if (!type.empty()) {
+ ABIResultIter iter(type);
+ popRegisterResults(iter);
+ if (!iter.done()) {
+ popStackResults(iter, stackBase);
+ // Because popStackResults might clobber the stack, it leaves the stack
+ // pointer already in the right place for the continuation, whether the
+ // continuation is a jump or fallthrough.
+ return;
+ }
+ }
+ // We get here if there are no stack results. For a fallthrough, the stack
+ // is already at the right height. For a jump, we may need to pop the stack
+ // pointer if the continuation's stack height is lower than the current
+ // stack height.
+ if (kind == ContinuationKind::Jump) {
+ fr.popStackBeforeBranch(stackBase, type);
+ }
+ }
+
+ Stk captureStackResult(const ABIResult& result, StackHeight resultsBase,
+ uint32_t stackResultBytes) {
+ MOZ_ASSERT(result.onStack());
+ uint32_t offs = fr.locateStackResult(result, resultsBase, stackResultBytes);
+ return Stk::StackResult(result.type(), offs);
+ }
+
+ MOZ_MUST_USE bool pushResults(ResultType type, StackHeight resultsBase) {
+ if (type.empty()) {
+ return true;
+ }
+
+ if (type.length() > 1) {
+ if (!stk_.reserve(stk_.length() + type.length() + MaxPushesPerOpcode)) {
+ return false;
+ }
+ }
+
+ // We need to push the results in reverse order, so first iterate through
+ // all results to determine the locations of stack result types.
+ ABIResultIter iter(type);
+ while (!iter.done()) {
+ iter.next();
+ }
+ uint32_t stackResultBytes = iter.stackBytesConsumedSoFar();
+ for (iter.switchToPrev(); !iter.done(); iter.prev()) {
+ const ABIResult& result = iter.cur();
+ if (!result.onStack()) {
+ break;
+ }
+ Stk v = captureStackResult(result, resultsBase, stackResultBytes);
+ push(v);
+ if (v.kind() == Stk::MemRef) {
+ stackMapGenerator_.memRefsOnStk++;
+ }
+ }
+
+ for (; !iter.done(); iter.prev()) {
+ const ABIResult& result = iter.cur();
+ MOZ_ASSERT(result.inRegister());
+ switch (result.type().kind()) {
+ case ValType::I32:
+ pushI32(RegI32(result.gpr()));
+ break;
+ case ValType::I64:
+ pushI64(RegI64(result.gpr64()));
+ break;
+ case ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ pushV128(RegV128(result.fpr()));
+ break;
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ case ValType::F32:
+ pushF32(RegF32(result.fpr()));
+ break;
+ case ValType::F64:
+ pushF64(RegF64(result.fpr()));
+ break;
+ case ValType::Ref:
+ pushRef(RegPtr(result.gpr()));
+ break;
+ }
+ }
+
+ return true;
+ }
+
+ MOZ_MUST_USE bool pushBlockResults(ResultType type) {
+ return pushResults(type, controlItem().stackHeight);
+ }
+
+ // A combination of popBlockResults + pushBlockResults, used when entering a
+ // block with a control-flow join (loops) or split (if) to shuffle the
+ // fallthrough block parameters into the locations expected by the
+ // continuation.
+ MOZ_MUST_USE bool topBlockParams(ResultType type) {
+ // This function should only be called when entering a block with a
+ // control-flow join at the entry, where there are no live temporaries in
+ // the current block.
+ StackHeight base = controlItem().stackHeight;
+ MOZ_ASSERT(fr.stackResultsBase(stackConsumed(type.length())) == base);
+ popBlockResults(type, base, ContinuationKind::Fallthrough);
+ return pushBlockResults(type);
+ }
+
+ // A combination of popBlockResults + pushBlockResults, used before branches
+ // where we don't know the target (br_if / br_table). If and when the branch
+ // is taken, the stack results will be shuffled down into place. For br_if
+ // that has fallthrough, the parameters for the untaken branch flow through to
+ // the continuation.
+ MOZ_MUST_USE bool topBranchParams(ResultType type, StackHeight* height) {
+ if (type.empty()) {
+ *height = fr.stackHeight();
+ return true;
+ }
+ // There may be temporary values that need spilling; delay computation of
+ // the stack results base until after the popRegisterResults(), which spills
+ // if needed.
+ ABIResultIter iter(type);
+ popRegisterResults(iter);
+ StackHeight base = fr.stackResultsBase(stackConsumed(iter.remaining()));
+ if (!iter.done()) {
+ popStackResults(iter, base);
+ }
+ if (!pushResults(type, base)) {
+ return false;
+ }
+ *height = base;
+ return true;
+ }
+
+ // Conditional branches with fallthrough are preceded by a topBranchParams, so
+ // we know that there are no stack results that need to be materialized. In
+ // that case, we can just shuffle the whole block down before popping the
+ // stack.
+ void shuffleStackResultsBeforeBranch(StackHeight srcHeight,
+ StackHeight destHeight,
+ ResultType type) {
+ uint32_t stackResultBytes = 0;
+
+ if (ABIResultIter::HasStackResults(type)) {
+ MOZ_ASSERT(stk_.length() >= type.length());
+ ABIResultIter iter(type);
+ for (; !iter.done(); iter.next()) {
+#ifdef DEBUG
+ const ABIResult& result = iter.cur();
+ const Stk& v = stk_[stk_.length() - iter.index() - 1];
+ MOZ_ASSERT(v.isMem() == result.onStack());
+#endif
+ }
+
+ stackResultBytes = iter.stackBytesConsumedSoFar();
+ MOZ_ASSERT(stackResultBytes > 0);
+
+ if (srcHeight != destHeight) {
+ // Find a free GPR to use when shuffling stack values. If none
+ // is available, push ReturnReg and restore it after we're done.
+ bool saved = false;
+ RegPtr temp = ra.needTempPtr(RegPtr(ReturnReg), &saved);
+ fr.shuffleStackResultsTowardFP(srcHeight, destHeight, stackResultBytes,
+ temp);
+ ra.freeTempPtr(temp, saved);
+ }
+ }
+
+ fr.popStackBeforeBranch(destHeight, stackResultBytes);
+ }
+
+ // Return the amount of execution stack consumed by the top numval
+ // values on the value stack.
+
+ size_t stackConsumed(size_t numval) {
+ size_t size = 0;
+ MOZ_ASSERT(numval <= stk_.length());
+ for (uint32_t i = stk_.length() - 1; numval > 0; numval--, i--) {
+ Stk& v = stk_[i];
+ switch (v.kind()) {
+ case Stk::MemRef:
+ size += BaseStackFrame::StackSizeOfPtr;
+ break;
+ case Stk::MemI32:
+ size += BaseStackFrame::StackSizeOfPtr;
+ break;
+ case Stk::MemI64:
+ size += BaseStackFrame::StackSizeOfInt64;
+ break;
+ case Stk::MemF64:
+ size += BaseStackFrame::StackSizeOfDouble;
+ break;
+ case Stk::MemF32:
+ size += BaseStackFrame::StackSizeOfFloat;
+ break;
+#ifdef ENABLE_WASM_SIMD
+ case Stk::MemV128:
+ size += BaseStackFrame::StackSizeOfV128;
+ break;
+#endif
+ default:
+ break;
+ }
+ }
+ return size;
+ }
+
+ void popValueStackTo(uint32_t stackSize) {
+ for (uint32_t i = stk_.length(); i > stackSize; i--) {
+ Stk& v = stk_[i - 1];
+ switch (v.kind()) {
+ case Stk::RegisterI32:
+ freeI32(v.i32reg());
+ break;
+ case Stk::RegisterI64:
+ freeI64(v.i64reg());
+ break;
+ case Stk::RegisterF64:
+ freeF64(v.f64reg());
+ break;
+ case Stk::RegisterF32:
+ freeF32(v.f32reg());
+ break;
+#ifdef ENABLE_WASM_SIMD
+ case Stk::RegisterV128:
+ freeV128(v.v128reg());
+ break;
+#endif
+ case Stk::RegisterRef:
+ freeRef(v.refReg());
+ break;
+ case Stk::MemRef:
+ stackMapGenerator_.memRefsOnStk--;
+ break;
+ default:
+ break;
+ }
+ }
+ stk_.shrinkTo(stackSize);
+ }
+
+ void popValueStackBy(uint32_t items) {
+ popValueStackTo(stk_.length() - items);
+ }
+
+ void dropValue() {
+ if (peek(0).isMem()) {
+ fr.popBytes(stackConsumed(1));
+ }
+ popValueStackBy(1);
+ }
+
+ // Peek at the stack, for calls.
+
+ Stk& peek(uint32_t relativeDepth) {
+ return stk_[stk_.length() - 1 - relativeDepth];
+ }
+
+#ifdef DEBUG
+ // Check that we're not leaking registers by comparing the
+ // state of the stack + available registers with the set of
+ // all available registers.
+
+ // Call this between opcodes.
+ void performRegisterLeakCheck() {
+ BaseRegAlloc::LeakCheck check(ra);
+ for (size_t i = 0; i < stk_.length(); i++) {
+ Stk& item = stk_[i];
+ switch (item.kind_) {
+ case Stk::RegisterI32:
+ check.addKnownI32(item.i32reg());
+ break;
+ case Stk::RegisterI64:
+ check.addKnownI64(item.i64reg());
+ break;
+ case Stk::RegisterF32:
+ check.addKnownF32(item.f32reg());
+ break;
+ case Stk::RegisterF64:
+ check.addKnownF64(item.f64reg());
+ break;
+# ifdef ENABLE_WASM_SIMD
+ case Stk::RegisterV128:
+ check.addKnownV128(item.v128reg());
+ break;
+# endif
+ case Stk::RegisterRef:
+ check.addKnownRef(item.refReg());
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ void assertStackInvariants() const {
+ if (deadCode_) {
+ // Nonlocal control flow can pass values in stack locations in a way that
+ // isn't accounted for by the value stack. In dead code, which occurs
+ // after unconditional non-local control flow, there is no invariant to
+ // assert.
+ return;
+ }
+ size_t size = 0;
+ for (const Stk& v : stk_) {
+ switch (v.kind()) {
+ case Stk::MemRef:
+ size += BaseStackFrame::StackSizeOfPtr;
+ break;
+ case Stk::MemI32:
+ size += BaseStackFrame::StackSizeOfPtr;
+ break;
+ case Stk::MemI64:
+ size += BaseStackFrame::StackSizeOfInt64;
+ break;
+ case Stk::MemF64:
+ size += BaseStackFrame::StackSizeOfDouble;
+ break;
+ case Stk::MemF32:
+ size += BaseStackFrame::StackSizeOfFloat;
+ break;
+# ifdef ENABLE_WASM_SIMD
+ case Stk::MemV128:
+ size += BaseStackFrame::StackSizeOfV128;
+ break;
+# endif
+ default:
+ MOZ_ASSERT(!v.isMem());
+ break;
+ }
+ }
+ MOZ_ASSERT(size == fr.dynamicHeight());
+ }
+
+#endif
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Control stack
+
+ void initControl(Control& item, ResultType params) {
+ // Make sure the constructor was run properly
+ MOZ_ASSERT(!item.stackHeight.isValid() && item.stackSize == UINT32_MAX);
+
+ uint32_t paramCount = deadCode_ ? 0 : params.length();
+ uint32_t stackParamSize = stackConsumed(paramCount);
+ item.stackHeight = fr.stackResultsBase(stackParamSize);
+ item.stackSize = stk_.length() - paramCount;
+ item.deadOnArrival = deadCode_;
+ item.bceSafeOnEntry = bceSafe_;
+ }
+
+ Control& controlItem() { return iter_.controlItem(); }
+
+ Control& controlItem(uint32_t relativeDepth) {
+ return iter_.controlItem(relativeDepth);
+ }
+
+ Control& controlOutermost() { return iter_.controlOutermost(); }
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Labels
+
+ void insertBreakablePoint(CallSiteDesc::Kind kind) {
+ fr.loadTlsPtr(WasmTlsReg);
+ masm.nopPatchableToCall(CallSiteDesc(iter_.lastOpcodeOffset(), kind));
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Function prologue and epilogue.
+
+ [[nodiscard]] bool beginFunction() {
+ JitSpew(JitSpew_Codegen, "# ========================================");
+ JitSpew(JitSpew_Codegen, "# Emitting wasm baseline code");
+ JitSpew(JitSpew_Codegen,
+ "# beginFunction: start of function prologue for index %d",
+ (int)func_.index);
+
+ // Make a start on the stack map for this function. Inspect the args so
+ // as to determine which of them are both in-memory and pointer-typed, and
+ // add entries to machineStackTracker as appropriate.
+
+ ArgTypeVector args(funcType());
+ size_t inboundStackArgBytes = StackArgAreaSizeUnaligned(args);
+ MOZ_ASSERT(inboundStackArgBytes % sizeof(void*) == 0);
+ stackMapGenerator_.numStackArgWords = inboundStackArgBytes / sizeof(void*);
+
+ MOZ_ASSERT(stackMapGenerator_.machineStackTracker.length() == 0);
+ if (!stackMapGenerator_.machineStackTracker.pushNonGCPointers(
+ stackMapGenerator_.numStackArgWords)) {
+ return false;
+ }
+
+ // Identify GC-managed pointers passed on the stack.
+ for (WasmABIArgIter i(args); !i.done(); i++) {
+ ABIArg argLoc = *i;
+ if (argLoc.kind() == ABIArg::Stack &&
+ args[i.index()] == MIRType::RefOrNull) {
+ uint32_t offset = argLoc.offsetFromArgBase();
+ MOZ_ASSERT(offset < inboundStackArgBytes);
+ MOZ_ASSERT(offset % sizeof(void*) == 0);
+ stackMapGenerator_.machineStackTracker.setGCPointer(offset /
+ sizeof(void*));
+ }
+ }
+
+ GenerateFunctionPrologue(masm, *moduleEnv_.funcs[func_.index].typeId,
+ compilerEnv_.mode() == CompileMode::Tier1
+ ? Some(func_.index)
+ : Nothing(),
+ &offsets_);
+
+ // GenerateFunctionPrologue pushes exactly one wasm::Frame's worth of
+ // stuff, and none of the values are GC pointers. Hence:
+ if (!stackMapGenerator_.machineStackTracker.pushNonGCPointers(
+ sizeof(Frame) / sizeof(void*))) {
+ return false;
+ }
+
+ // Initialize DebugFrame fields before the stack overflow trap so that
+ // we have the invariant that all observable Frames in a debugEnabled
+ // Module have valid DebugFrames.
+ if (compilerEnv_.debugEnabled()) {
+#ifdef JS_CODEGEN_ARM64
+ static_assert(DebugFrame::offsetOfFrame() % WasmStackAlignment == 0,
+ "aligned");
+#endif
+ masm.reserveStack(DebugFrame::offsetOfFrame());
+ if (!stackMapGenerator_.machineStackTracker.pushNonGCPointers(
+ DebugFrame::offsetOfFrame() / sizeof(void*))) {
+ return false;
+ }
+
+ masm.store32(
+ Imm32(func_.index),
+ Address(masm.getStackPointer(), DebugFrame::offsetOfFuncIndex()));
+ masm.store32(Imm32(0), Address(masm.getStackPointer(),
+ DebugFrame::offsetOfFlags()));
+
+ // No need to initialize cachedReturnJSValue_ or any ref-typed spilled
+ // register results, as they are traced if and only if a corresponding
+ // flag (hasCachedReturnJSValue or hasSpilledRefRegisterResult) is set.
+ }
+
+ // Generate a stack-overflow check and its associated stack map.
+
+ fr.checkStack(ABINonArgReg0, BytecodeOffset(func_.lineOrBytecode));
+
+ ExitStubMapVector extras;
+ if (!stackMapGenerator_.generateStackmapEntriesForTrapExit(args, &extras)) {
+ return false;
+ }
+ if (!createStackMap("stack check", extras, masm.currentOffset())) {
+ return false;
+ }
+
+ size_t reservedBytes = fr.fixedAllocSize() - masm.framePushed();
+ MOZ_ASSERT(0 == (reservedBytes % sizeof(void*)));
+
+ masm.reserveStack(reservedBytes);
+ fr.onFixedStackAllocated();
+ if (!stackMapGenerator_.machineStackTracker.pushNonGCPointers(
+ reservedBytes / sizeof(void*))) {
+ return false;
+ }
+
+ // Locals are stack allocated. Mark ref-typed ones in the stackmap
+ // accordingly.
+ for (const Local& l : localInfo_) {
+ // Locals that are stack arguments were already added to the stack map
+ // before pushing the frame.
+ if (l.type == MIRType::RefOrNull && !l.isStackArgument()) {
+ uint32_t offs = fr.localOffsetFromSp(l);
+ MOZ_ASSERT(0 == (offs % sizeof(void*)));
+ stackMapGenerator_.machineStackTracker.setGCPointer(offs /
+ sizeof(void*));
+ }
+ }
+
+ // Copy arguments from registers to stack.
+ for (WasmABIArgIter i(args); !i.done(); i++) {
+ if (args.isSyntheticStackResultPointerArg(i.index())) {
+ // If there are stack results and the pointer to stack results
+ // was passed in a register, store it to the stack.
+ if (i->argInRegister()) {
+ fr.storeIncomingStackResultAreaPtr(RegPtr(i->gpr()));
+ }
+ // If we're in a debug frame, copy the stack result pointer arg
+ // to a well-known place.
+ if (compilerEnv_.debugEnabled()) {
+ Register target = ABINonArgReturnReg0;
+ fr.loadIncomingStackResultAreaPtr(RegPtr(target));
+ size_t debugFrameOffset =
+ masm.framePushed() - DebugFrame::offsetOfFrame();
+ size_t debugStackResultsPointerOffset =
+ debugFrameOffset + DebugFrame::offsetOfStackResultsPointer();
+ masm.storePtr(target, Address(masm.getStackPointer(),
+ debugStackResultsPointerOffset));
+ }
+ continue;
+ }
+ if (!i->argInRegister()) {
+ continue;
+ }
+ Local& l = localInfo_[args.naturalIndex(i.index())];
+ switch (i.mirType()) {
+ case MIRType::Int32:
+ fr.storeLocalI32(RegI32(i->gpr()), l);
+ break;
+ case MIRType::Int64:
+ fr.storeLocalI64(RegI64(i->gpr64()), l);
+ break;
+ case MIRType::RefOrNull: {
+ DebugOnly<uint32_t> offs = fr.localOffsetFromSp(l);
+ MOZ_ASSERT(0 == (offs % sizeof(void*)));
+ fr.storeLocalPtr(RegPtr(i->gpr()), l);
+ // We should have just visited this local in the preceding loop.
+ MOZ_ASSERT(stackMapGenerator_.machineStackTracker.isGCPointer(
+ offs / sizeof(void*)));
+ break;
+ }
+ case MIRType::Double:
+ fr.storeLocalF64(RegF64(i->fpu()), l);
+ break;
+ case MIRType::Float32:
+ fr.storeLocalF32(RegF32(i->fpu()), l);
+ break;
+#ifdef ENABLE_WASM_SIMD
+ case MIRType::Simd128:
+ fr.storeLocalV128(RegV128(i->fpu()), l);
+ break;
+#endif
+ default:
+ MOZ_CRASH("Function argument type");
+ }
+ }
+
+ fr.zeroLocals(&ra);
+ fr.storeTlsPtr(WasmTlsReg);
+
+ if (compilerEnv_.debugEnabled()) {
+ insertBreakablePoint(CallSiteDesc::EnterFrame);
+ if (!createStackMap("debug: breakable point")) {
+ return false;
+ }
+ }
+
+ JitSpew(JitSpew_Codegen,
+ "# beginFunction: enter body with masm.framePushed = %u",
+ masm.framePushed());
+ MOZ_ASSERT(stackMapGenerator_.framePushedAtEntryToBody.isNothing());
+ stackMapGenerator_.framePushedAtEntryToBody.emplace(masm.framePushed());
+
+ return true;
+ }
+
+ void popStackReturnValues(const ResultType& resultType) {
+ uint32_t bytes = ABIResultIter::MeasureStackBytes(resultType);
+ if (bytes == 0) {
+ return;
+ }
+ Register target = ABINonArgReturnReg0;
+ Register temp = ABINonArgReturnReg1;
+ fr.loadIncomingStackResultAreaPtr(RegPtr(target));
+ fr.popStackResultsToMemory(target, bytes, temp);
+ }
+
+ void saveRegisterReturnValues(const ResultType& resultType) {
+ MOZ_ASSERT(compilerEnv_.debugEnabled());
+ size_t debugFrameOffset = masm.framePushed() - DebugFrame::offsetOfFrame();
+ size_t registerResultIdx = 0;
+ for (ABIResultIter i(resultType); !i.done(); i.next()) {
+ const ABIResult result = i.cur();
+ if (!result.inRegister()) {
+#ifdef DEBUG
+ for (i.next(); !i.done(); i.next()) {
+ MOZ_ASSERT(!i.cur().inRegister());
+ }
+#endif
+ break;
+ }
+
+ size_t resultOffset =
+ DebugFrame::offsetOfRegisterResult(registerResultIdx);
+ Address dest(masm.getStackPointer(), debugFrameOffset + resultOffset);
+ switch (result.type().kind()) {
+ case ValType::I32:
+ masm.store32(RegI32(result.gpr()), dest);
+ break;
+ case ValType::I64:
+ masm.store64(RegI64(result.gpr64()), dest);
+ break;
+ case ValType::F64:
+ masm.storeDouble(RegF64(result.fpr()), dest);
+ break;
+ case ValType::F32:
+ masm.storeFloat32(RegF32(result.fpr()), dest);
+ break;
+ case ValType::Ref: {
+ uint32_t flag =
+ DebugFrame::hasSpilledRegisterRefResultBitMask(registerResultIdx);
+ // Tell Instance::traceFrame that we have a pointer to trace.
+ masm.or32(Imm32(flag),
+ Address(masm.getStackPointer(),
+ debugFrameOffset + DebugFrame::offsetOfFlags()));
+ masm.storePtr(RegPtr(result.gpr()), dest);
+ break;
+ }
+ case ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ masm.storeUnalignedSimd128(RegV128(result.fpr()), dest);
+ break;
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ }
+ registerResultIdx++;
+ }
+ }
+
+ void restoreRegisterReturnValues(const ResultType& resultType) {
+ MOZ_ASSERT(compilerEnv_.debugEnabled());
+ size_t debugFrameOffset = masm.framePushed() - DebugFrame::offsetOfFrame();
+ size_t registerResultIdx = 0;
+ for (ABIResultIter i(resultType); !i.done(); i.next()) {
+ const ABIResult result = i.cur();
+ if (!result.inRegister()) {
+#ifdef DEBUG
+ for (i.next(); !i.done(); i.next()) {
+ MOZ_ASSERT(!i.cur().inRegister());
+ }
+#endif
+ break;
+ }
+ size_t resultOffset =
+ DebugFrame::offsetOfRegisterResult(registerResultIdx++);
+ Address src(masm.getStackPointer(), debugFrameOffset + resultOffset);
+ switch (result.type().kind()) {
+ case ValType::I32:
+ masm.load32(src, RegI32(result.gpr()));
+ break;
+ case ValType::I64:
+ masm.load64(src, RegI64(result.gpr64()));
+ break;
+ case ValType::F64:
+ masm.loadDouble(src, RegF64(result.fpr()));
+ break;
+ case ValType::F32:
+ masm.loadFloat32(src, RegF32(result.fpr()));
+ break;
+ case ValType::Ref:
+ masm.loadPtr(src, RegPtr(result.gpr()));
+ break;
+ case ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ masm.loadUnalignedSimd128(src, RegV128(result.fpr()));
+ break;
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ }
+ }
+ }
+
+ [[nodiscard]] bool endFunction() {
+ JitSpew(JitSpew_Codegen, "# endFunction: start of function epilogue");
+
+ // Always branch to returnLabel_.
+ masm.breakpoint();
+
+ // Patch the add in the prologue so that it checks against the correct
+ // frame size. Flush the constant pool in case it needs to be patched.
+ masm.flush();
+
+ // Precondition for patching.
+ if (masm.oom()) {
+ return false;
+ }
+
+ fr.patchCheckStack();
+
+ masm.bind(&returnLabel_);
+
+ ResultType resultType(ResultType::Vector(funcType().results()));
+
+ popStackReturnValues(resultType);
+
+ if (compilerEnv_.debugEnabled()) {
+ // Store and reload the return value from DebugFrame::return so that
+ // it can be clobbered, and/or modified by the debug trap.
+ saveRegisterReturnValues(resultType);
+ insertBreakablePoint(CallSiteDesc::Breakpoint);
+ if (!createStackMap("debug: breakpoint")) {
+ return false;
+ }
+ insertBreakablePoint(CallSiteDesc::LeaveFrame);
+ if (!createStackMap("debug: leave frame")) {
+ return false;
+ }
+ restoreRegisterReturnValues(resultType);
+ }
+
+ // To satisy Tls extent invariant we need to reload WasmTlsReg because
+ // baseline can clobber it.
+ fr.loadTlsPtr(WasmTlsReg);
+ GenerateFunctionEpilogue(masm, fr.fixedAllocSize(), &offsets_);
+
+#if defined(JS_ION_PERF)
+ // FIXME - profiling code missing. No bug for this.
+
+ // Note the end of the inline code and start of the OOL code.
+ // gen->perfSpewer().noteEndInlineCode(masm);
+#endif
+
+ JitSpew(JitSpew_Codegen, "# endFunction: end of function epilogue");
+ JitSpew(JitSpew_Codegen, "# endFunction: start of OOL code");
+ if (!generateOutOfLineCode()) {
+ return false;
+ }
+
+ offsets_.end = masm.currentOffset();
+
+ if (!fr.checkStackHeight()) {
+ return false;
+ }
+
+ JitSpew(JitSpew_Codegen, "# endFunction: end of OOL code for index %d",
+ (int)func_.index);
+ return !masm.oom();
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Calls.
+
+ struct FunctionCall {
+ explicit FunctionCall(uint32_t lineOrBytecode)
+ : lineOrBytecode(lineOrBytecode),
+ isInterModule(false),
+ usesSystemAbi(false),
+#ifdef JS_CODEGEN_ARM
+ hardFP(true),
+#endif
+ frameAlignAdjustment(0),
+ stackArgAreaSize(0) {
+ }
+
+ uint32_t lineOrBytecode;
+ WasmABIArgGenerator abi;
+ bool isInterModule;
+ bool usesSystemAbi;
+#ifdef JS_CODEGEN_ARM
+ bool hardFP;
+#endif
+ size_t frameAlignAdjustment;
+ size_t stackArgAreaSize;
+ };
+
+ void beginCall(FunctionCall& call, UseABI useABI, InterModule interModule) {
+ MOZ_ASSERT_IF(useABI == UseABI::Builtin, interModule == InterModule::False);
+
+ call.isInterModule = interModule == InterModule::True;
+ call.usesSystemAbi = useABI == UseABI::System;
+
+ if (call.usesSystemAbi) {
+ // Call-outs need to use the appropriate system ABI.
+#if defined(JS_CODEGEN_ARM)
+ call.hardFP = UseHardFpABI();
+ call.abi.setUseHardFp(call.hardFP);
+#elif defined(JS_CODEGEN_MIPS32)
+ call.abi.enforceO32ABI();
+#endif
+ } else {
+#if defined(JS_CODEGEN_ARM)
+ MOZ_ASSERT(call.hardFP,
+ "All private ABIs pass FP arguments in registers");
+#endif
+ }
+
+ // Use masm.framePushed() because the value we want here does not depend
+ // on the height of the frame's stack area, but the actual size of the
+ // allocated frame.
+ call.frameAlignAdjustment = ComputeByteAlignment(
+ masm.framePushed() + sizeof(Frame), JitStackAlignment);
+ }
+
+ void endCall(FunctionCall& call, size_t stackSpace) {
+ size_t adjustment = call.stackArgAreaSize + call.frameAlignAdjustment;
+ fr.freeArgAreaAndPopBytes(adjustment, stackSpace);
+
+ MOZ_ASSERT(
+ stackMapGenerator_.framePushedExcludingOutboundCallArgs.isSome());
+ stackMapGenerator_.framePushedExcludingOutboundCallArgs.reset();
+
+ if (call.isInterModule) {
+ fr.loadTlsPtr(WasmTlsReg);
+ masm.loadWasmPinnedRegsFromTls();
+ masm.switchToWasmTlsRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
+ } else if (call.usesSystemAbi) {
+ // On x86 there are no pinned registers, so don't waste time
+ // reloading the Tls.
+#ifndef JS_CODEGEN_X86
+ fr.loadTlsPtr(WasmTlsReg);
+ masm.loadWasmPinnedRegsFromTls();
+#endif
+ }
+ }
+
+ void startCallArgs(size_t stackArgAreaSizeUnaligned, FunctionCall* call) {
+ size_t stackArgAreaSizeAligned =
+ AlignStackArgAreaSize(stackArgAreaSizeUnaligned);
+ MOZ_ASSERT(stackArgAreaSizeUnaligned <= stackArgAreaSizeAligned);
+
+ // Record the masm.framePushed() value at this point, before we push args
+ // for the call, but including the alignment space placed above the args.
+ // This defines the lower limit of the stackmap that will be created for
+ // this call.
+ MOZ_ASSERT(
+ stackMapGenerator_.framePushedExcludingOutboundCallArgs.isNothing());
+ stackMapGenerator_.framePushedExcludingOutboundCallArgs.emplace(
+ // However much we've pushed so far
+ masm.framePushed() +
+ // Extra space we'll push to get the frame aligned
+ call->frameAlignAdjustment +
+ // Extra space we'll push to get the outbound arg area 16-aligned
+ (stackArgAreaSizeAligned - stackArgAreaSizeUnaligned));
+
+ call->stackArgAreaSize = stackArgAreaSizeAligned;
+
+ size_t adjustment = call->stackArgAreaSize + call->frameAlignAdjustment;
+ fr.allocArgArea(adjustment);
+ }
+
+ const ABIArg reservePointerArgument(FunctionCall* call) {
+ return call->abi.next(MIRType::Pointer);
+ }
+
+ // TODO / OPTIMIZE (Bug 1316821): Note passArg is used only in one place.
+ // (Or it was, until Luke wandered through, but that can be fixed again.)
+ // I'm not saying we should manually inline it, but we could hoist the
+ // dispatch into the caller and have type-specific implementations of
+ // passArg: passArgI32(), etc. Then those might be inlined, at least in PGO
+ // builds.
+ //
+ // The bulk of the work here (60%) is in the next() call, though.
+ //
+ // Notably, since next() is so expensive, StackArgAreaSizeUnaligned()
+ // becomes expensive too.
+ //
+ // Somehow there could be a trick here where the sequence of argument types
+ // (read from the input stream) leads to a cached entry for
+ // StackArgAreaSizeUnaligned() and for how to pass arguments...
+ //
+ // But at least we could reduce the cost of StackArgAreaSizeUnaligned() by
+ // first reading the argument types into a (reusable) vector, then we have
+ // the outgoing size at low cost, and then we can pass args based on the
+ // info we read.
+
+ void passArg(ValType type, const Stk& arg, FunctionCall* call) {
+ switch (type.kind()) {
+ case ValType::I32: {
+ ABIArg argLoc = call->abi.next(MIRType::Int32);
+ if (argLoc.kind() == ABIArg::Stack) {
+ ScratchI32 scratch(*this);
+ loadI32(arg, scratch);
+ masm.store32(scratch, Address(masm.getStackPointer(),
+ argLoc.offsetFromArgBase()));
+ } else {
+ loadI32(arg, RegI32(argLoc.gpr()));
+ }
+ break;
+ }
+ case ValType::I64: {
+ ABIArg argLoc = call->abi.next(MIRType::Int64);
+ if (argLoc.kind() == ABIArg::Stack) {
+ ScratchI32 scratch(*this);
+#ifdef JS_PUNBOX64
+ loadI64(arg, fromI32(scratch));
+ masm.storePtr(scratch, Address(masm.getStackPointer(),
+ argLoc.offsetFromArgBase()));
+#else
+ loadI64Low(arg, scratch);
+ masm.store32(scratch, LowWord(Address(masm.getStackPointer(),
+ argLoc.offsetFromArgBase())));
+ loadI64High(arg, scratch);
+ masm.store32(scratch, HighWord(Address(masm.getStackPointer(),
+ argLoc.offsetFromArgBase())));
+#endif
+ } else {
+ loadI64(arg, RegI64(argLoc.gpr64()));
+ }
+ break;
+ }
+ case ValType::V128: {
+#ifdef ENABLE_WASM_SIMD
+ ABIArg argLoc = call->abi.next(MIRType::Simd128);
+ switch (argLoc.kind()) {
+ case ABIArg::Stack: {
+ ScratchV128 scratch(*this);
+ loadV128(arg, scratch);
+ masm.storeUnalignedSimd128(
+ (RegV128)scratch,
+ Address(masm.getStackPointer(), argLoc.offsetFromArgBase()));
+ break;
+ }
+ case ABIArg::GPR: {
+ MOZ_CRASH("Unexpected parameter passing discipline");
+ }
+ case ABIArg::FPU: {
+ loadV128(arg, RegV128(argLoc.fpu()));
+ break;
+ }
+# if defined(JS_CODEGEN_REGISTER_PAIR)
+ case ABIArg::GPR_PAIR: {
+ MOZ_CRASH("Unexpected parameter passing discipline");
+ }
+# endif
+ case ABIArg::Uninitialized:
+ MOZ_CRASH("Uninitialized ABIArg kind");
+ }
+ break;
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ }
+ case ValType::F64: {
+ ABIArg argLoc = call->abi.next(MIRType::Double);
+ switch (argLoc.kind()) {
+ case ABIArg::Stack: {
+ ScratchF64 scratch(*this);
+ loadF64(arg, scratch);
+ masm.storeDouble(scratch, Address(masm.getStackPointer(),
+ argLoc.offsetFromArgBase()));
+ break;
+ }
+#if defined(JS_CODEGEN_REGISTER_PAIR)
+ case ABIArg::GPR_PAIR: {
+# if defined(JS_CODEGEN_ARM)
+ ScratchF64 scratch(*this);
+ loadF64(arg, scratch);
+ masm.ma_vxfer(scratch, argLoc.evenGpr(), argLoc.oddGpr());
+ break;
+# elif defined(JS_CODEGEN_MIPS32)
+ ScratchF64 scratch(*this);
+ loadF64(arg, scratch);
+ MOZ_ASSERT(MOZ_LITTLE_ENDIAN());
+ masm.moveFromDoubleLo(scratch, argLoc.evenGpr());
+ masm.moveFromDoubleHi(scratch, argLoc.oddGpr());
+ break;
+# else
+ MOZ_CRASH("BaseCompiler platform hook: passArg F64 pair");
+# endif
+ }
+#endif
+ case ABIArg::FPU: {
+ loadF64(arg, RegF64(argLoc.fpu()));
+ break;
+ }
+ case ABIArg::GPR: {
+ MOZ_CRASH("Unexpected parameter passing discipline");
+ }
+ case ABIArg::Uninitialized:
+ MOZ_CRASH("Uninitialized ABIArg kind");
+ }
+ break;
+ }
+ case ValType::F32: {
+ ABIArg argLoc = call->abi.next(MIRType::Float32);
+ switch (argLoc.kind()) {
+ case ABIArg::Stack: {
+ ScratchF32 scratch(*this);
+ loadF32(arg, scratch);
+ masm.storeFloat32(scratch, Address(masm.getStackPointer(),
+ argLoc.offsetFromArgBase()));
+ break;
+ }
+ case ABIArg::GPR: {
+ ScratchF32 scratch(*this);
+ loadF32(arg, scratch);
+ masm.moveFloat32ToGPR(scratch, argLoc.gpr());
+ break;
+ }
+ case ABIArg::FPU: {
+ loadF32(arg, RegF32(argLoc.fpu()));
+ break;
+ }
+#if defined(JS_CODEGEN_REGISTER_PAIR)
+ case ABIArg::GPR_PAIR: {
+ MOZ_CRASH("Unexpected parameter passing discipline");
+ }
+#endif
+ case ABIArg::Uninitialized:
+ MOZ_CRASH("Uninitialized ABIArg kind");
+ }
+ break;
+ }
+ case ValType::Ref: {
+ ABIArg argLoc = call->abi.next(MIRType::RefOrNull);
+ if (argLoc.kind() == ABIArg::Stack) {
+ ScratchPtr scratch(*this);
+ loadRef(arg, scratch);
+ masm.storePtr(scratch, Address(masm.getStackPointer(),
+ argLoc.offsetFromArgBase()));
+ } else {
+ loadRef(arg, RegPtr(argLoc.gpr()));
+ }
+ break;
+ }
+ }
+ }
+
+ CodeOffset callDefinition(uint32_t funcIndex, const FunctionCall& call) {
+ CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Func);
+ return masm.call(desc, funcIndex);
+ }
+
+ CodeOffset callSymbolic(SymbolicAddress callee, const FunctionCall& call) {
+ CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Symbolic);
+ return masm.call(desc, callee);
+ }
+
+ // Precondition: sync()
+
+ CodeOffset callIndirect(uint32_t funcTypeIndex, uint32_t tableIndex,
+ const Stk& indexVal, const FunctionCall& call) {
+ const TypeIdDesc& funcTypeId = moduleEnv_.typeIds[funcTypeIndex];
+ MOZ_ASSERT(funcTypeId.kind() != TypeIdDescKind::None);
+
+ const TableDesc& table = moduleEnv_.tables[tableIndex];
+
+ loadI32(indexVal, RegI32(WasmTableCallIndexReg));
+
+ CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Dynamic);
+ CalleeDesc callee = CalleeDesc::wasmTable(table, funcTypeId);
+ return masm.wasmCallIndirect(desc, callee, NeedsBoundsCheck(true));
+ }
+
+ // Precondition: sync()
+
+ CodeOffset callImport(unsigned globalDataOffset, const FunctionCall& call) {
+ CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Dynamic);
+ CalleeDesc callee = CalleeDesc::import(globalDataOffset);
+ return masm.wasmCallImport(desc, callee);
+ }
+
+ CodeOffset builtinCall(SymbolicAddress builtin, const FunctionCall& call) {
+ return callSymbolic(builtin, call);
+ }
+
+ CodeOffset builtinInstanceMethodCall(const SymbolicAddressSignature& builtin,
+ const ABIArg& instanceArg,
+ const FunctionCall& call) {
+ // Builtin method calls assume the TLS register has been set.
+ fr.loadTlsPtr(WasmTlsReg);
+
+ CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Symbolic);
+ return masm.wasmCallBuiltinInstanceMethod(
+ desc, instanceArg, builtin.identity, builtin.failureMode);
+ }
+
+ MOZ_MUST_USE bool pushCallResults(const FunctionCall& call, ResultType type,
+ const StackResultsLoc& loc) {
+#if defined(JS_CODEGEN_ARM)
+ // pushResults currently bypasses special case code in captureReturnedFxx()
+ // that converts GPR results to FPR results for systemABI+softFP. If we
+ // ever start using that combination for calls we need more code. This
+ // assert is stronger than we need - we only care about results in return
+ // registers - but that's OK.
+ MOZ_ASSERT(!call.usesSystemAbi || call.hardFP);
+#endif
+ return pushResults(type, fr.stackResultsBase(loc.bytes()));
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Sundry low-level code generators.
+
+ // The compiler depends on moveImm32() clearing the high bits of a 64-bit
+ // register on 64-bit systems except MIPS64 where high bits are sign extended
+ // from lower bits.
+
+ void moveImm32(int32_t v, RegI32 dest) { masm.move32(Imm32(v), dest); }
+
+ void moveImm64(int64_t v, RegI64 dest) { masm.move64(Imm64(v), dest); }
+
+ void moveImmRef(intptr_t v, RegPtr dest) { masm.movePtr(ImmWord(v), dest); }
+
+ void moveImmF32(float f, RegF32 dest) { masm.loadConstantFloat32(f, dest); }
+
+ void moveImmF64(double d, RegF64 dest) { masm.loadConstantDouble(d, dest); }
+
+ [[nodiscard]] bool addInterruptCheck() {
+ ScratchI32 tmp(*this);
+ fr.loadTlsPtr(tmp);
+ masm.wasmInterruptCheck(tmp, bytecodeOffset());
+ return createStackMap("addInterruptCheck");
+ }
+
+ void jumpTable(const LabelVector& labels, Label* theTable) {
+ // Flush constant pools to ensure that the table is never interrupted by
+ // constant pool entries.
+ masm.flush();
+
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+ // Prevent nop sequences to appear in the jump table.
+ AutoForbidNops afn(&masm);
+#endif
+ masm.bind(theTable);
+
+ for (uint32_t i = 0; i < labels.length(); i++) {
+ CodeLabel cl;
+ masm.writeCodePointer(&cl);
+ cl.target()->bind(labels[i].offset());
+ masm.addCodeLabel(cl);
+ }
+ }
+
+ void tableSwitch(Label* theTable, RegI32 switchValue, Label* dispatchCode) {
+ masm.bind(dispatchCode);
+
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+ ScratchI32 scratch(*this);
+ CodeLabel tableCl;
+
+ masm.mov(&tableCl, scratch);
+
+ tableCl.target()->bind(theTable->offset());
+ masm.addCodeLabel(tableCl);
+
+ masm.jmp(Operand(scratch, switchValue, ScalePointer));
+#elif defined(JS_CODEGEN_ARM)
+ // Flush constant pools: offset must reflect the distance from the MOV
+ // to the start of the table; as the address of the MOV is given by the
+ // label, nothing must come between the bind() and the ma_mov().
+ AutoForbidPoolsAndNops afp(&masm,
+ /* number of instructions in scope = */ 5);
+
+ ScratchI32 scratch(*this);
+
+ // Compute the offset from the ma_mov instruction to the jump table.
+ Label here;
+ masm.bind(&here);
+ uint32_t offset = here.offset() - theTable->offset();
+
+ // Read PC+8
+ masm.ma_mov(pc, scratch);
+
+ // ARM scratch register is required by ma_sub.
+ ScratchRegisterScope arm_scratch(*this);
+
+ // Compute the absolute table base pointer into `scratch`, offset by 8
+ // to account for the fact that ma_mov read PC+8.
+ masm.ma_sub(Imm32(offset + 8), scratch, arm_scratch);
+
+ // Jump indirect via table element.
+ masm.ma_ldr(DTRAddr(scratch, DtrRegImmShift(switchValue, LSL, 2)), pc,
+ Offset, Assembler::Always);
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ ScratchI32 scratch(*this);
+ CodeLabel tableCl;
+
+ masm.ma_li(scratch, &tableCl);
+
+ tableCl.target()->bind(theTable->offset());
+ masm.addCodeLabel(tableCl);
+
+ masm.branchToComputedAddress(BaseIndex(scratch, switchValue, ScalePointer));
+#elif defined(JS_CODEGEN_ARM64)
+ AutoForbidPoolsAndNops afp(&masm,
+ /* number of instructions in scope = */ 4);
+
+ ScratchI32 scratch(*this);
+
+ ARMRegister s(scratch, 64);
+ ARMRegister v(switchValue, 64);
+ masm.Adr(s, theTable);
+ masm.Add(s, s, Operand(v, vixl::LSL, 3));
+ masm.Ldr(s, MemOperand(s, 0));
+ masm.Br(s);
+#else
+ MOZ_CRASH("BaseCompiler platform hook: tableSwitch");
+#endif
+ }
+
+ RegI32 captureReturnedI32() {
+ RegI32 r = RegI32(ReturnReg);
+ MOZ_ASSERT(isAvailableI32(r));
+ needI32(r);
+#if defined(JS_CODEGEN_X64)
+ if (JitOptions.spectreIndexMasking) {
+ masm.movl(r, r);
+ }
+#endif
+ return r;
+ }
+
+ RegI64 captureReturnedI64() {
+ RegI64 r = RegI64(ReturnReg64);
+ MOZ_ASSERT(isAvailableI64(r));
+ needI64(r);
+ return r;
+ }
+
+ RegF32 captureReturnedF32(const FunctionCall& call) {
+ RegF32 r = RegF32(ReturnFloat32Reg);
+ MOZ_ASSERT(isAvailableF32(r));
+ needF32(r);
+#if defined(JS_CODEGEN_ARM)
+ if (call.usesSystemAbi && !call.hardFP) {
+ masm.ma_vxfer(ReturnReg, r);
+ }
+#endif
+ return r;
+ }
+
+ RegF64 captureReturnedF64(const FunctionCall& call) {
+ RegF64 r = RegF64(ReturnDoubleReg);
+ MOZ_ASSERT(isAvailableF64(r));
+ needF64(r);
+#if defined(JS_CODEGEN_ARM)
+ if (call.usesSystemAbi && !call.hardFP) {
+ masm.ma_vxfer(ReturnReg64.low, ReturnReg64.high, r);
+ }
+#endif
+ return r;
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ RegV128 captureReturnedV128(const FunctionCall& call) {
+ RegV128 r = RegV128(ReturnSimd128Reg);
+ MOZ_ASSERT(isAvailableV128(r));
+ needV128(r);
+ return r;
+ }
+#endif
+
+ RegPtr captureReturnedRef() {
+ RegPtr r = RegPtr(ReturnReg);
+ MOZ_ASSERT(isAvailableRef(r));
+ needRef(r);
+ return r;
+ }
+
+ void checkDivideByZeroI32(RegI32 rhs) {
+ Label nonZero;
+ masm.branchTest32(Assembler::NonZero, rhs, rhs, &nonZero);
+ trap(Trap::IntegerDivideByZero);
+ masm.bind(&nonZero);
+ }
+
+ void checkDivideByZeroI64(RegI64 r) {
+ Label nonZero;
+ ScratchI32 scratch(*this);
+ masm.branchTest64(Assembler::NonZero, r, r, scratch, &nonZero);
+ trap(Trap::IntegerDivideByZero);
+ masm.bind(&nonZero);
+ }
+
+ void checkDivideSignedOverflowI32(RegI32 rhs, RegI32 srcDest, Label* done,
+ bool zeroOnOverflow) {
+ Label notMin;
+ masm.branch32(Assembler::NotEqual, srcDest, Imm32(INT32_MIN), &notMin);
+ if (zeroOnOverflow) {
+ masm.branch32(Assembler::NotEqual, rhs, Imm32(-1), &notMin);
+ moveImm32(0, srcDest);
+ masm.jump(done);
+ } else {
+ masm.branch32(Assembler::NotEqual, rhs, Imm32(-1), &notMin);
+ trap(Trap::IntegerOverflow);
+ }
+ masm.bind(&notMin);
+ }
+
+ void checkDivideSignedOverflowI64(RegI64 rhs, RegI64 srcDest, Label* done,
+ bool zeroOnOverflow) {
+ Label notmin;
+ masm.branch64(Assembler::NotEqual, srcDest, Imm64(INT64_MIN), &notmin);
+ masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), &notmin);
+ if (zeroOnOverflow) {
+ masm.xor64(srcDest, srcDest);
+ masm.jump(done);
+ } else {
+ trap(Trap::IntegerOverflow);
+ }
+ masm.bind(&notmin);
+ }
+
+#ifndef RABALDR_INT_DIV_I64_CALLOUT
+ void quotientI64(RegI64 rhs, RegI64 srcDest, RegI64 reserved,
+ IsUnsigned isUnsigned, bool isConst, int64_t c) {
+ Label done;
+
+ if (!isConst || c == 0) {
+ checkDivideByZeroI64(rhs);
+ }
+
+ if (!isUnsigned && (!isConst || c == -1)) {
+ checkDivideSignedOverflowI64(rhs, srcDest, &done, ZeroOnOverflow(false));
+ }
+
+# if defined(JS_CODEGEN_X64)
+ // The caller must set up the following situation.
+ MOZ_ASSERT(srcDest.reg == rax);
+ MOZ_ASSERT(reserved == specific_.rdx);
+ if (isUnsigned) {
+ masm.xorq(rdx, rdx);
+ masm.udivq(rhs.reg);
+ } else {
+ masm.cqo();
+ masm.idivq(rhs.reg);
+ }
+# elif defined(JS_CODEGEN_MIPS64)
+ if (isUnsigned) {
+ masm.as_ddivu(srcDest.reg, rhs.reg);
+ } else {
+ masm.as_ddiv(srcDest.reg, rhs.reg);
+ }
+ masm.as_mflo(srcDest.reg);
+# elif defined(JS_CODEGEN_ARM64)
+ ARMRegister sd(srcDest.reg, 64);
+ ARMRegister r(rhs.reg, 64);
+ if (isUnsigned) {
+ masm.Udiv(sd, sd, r);
+ } else {
+ masm.Sdiv(sd, sd, r);
+ }
+# else
+ MOZ_CRASH("BaseCompiler platform hook: quotientI64");
+# endif
+ masm.bind(&done);
+ }
+
+ void remainderI64(RegI64 rhs, RegI64 srcDest, RegI64 reserved,
+ IsUnsigned isUnsigned, bool isConst, int64_t c) {
+ Label done;
+
+ if (!isConst || c == 0) {
+ checkDivideByZeroI64(rhs);
+ }
+
+ if (!isUnsigned && (!isConst || c == -1)) {
+ checkDivideSignedOverflowI64(rhs, srcDest, &done, ZeroOnOverflow(true));
+ }
+
+# if defined(JS_CODEGEN_X64)
+ // The caller must set up the following situation.
+ MOZ_ASSERT(srcDest.reg == rax);
+ MOZ_ASSERT(reserved == specific_.rdx);
+
+ if (isUnsigned) {
+ masm.xorq(rdx, rdx);
+ masm.udivq(rhs.reg);
+ } else {
+ masm.cqo();
+ masm.idivq(rhs.reg);
+ }
+ masm.movq(rdx, rax);
+# elif defined(JS_CODEGEN_MIPS64)
+ if (isUnsigned) {
+ masm.as_ddivu(srcDest.reg, rhs.reg);
+ } else {
+ masm.as_ddiv(srcDest.reg, rhs.reg);
+ }
+ masm.as_mfhi(srcDest.reg);
+# elif defined(JS_CODEGEN_ARM64)
+ MOZ_ASSERT(reserved.isInvalid());
+ ARMRegister sd(srcDest.reg, 64);
+ ARMRegister r(rhs.reg, 64);
+ ScratchI32 temp(*this);
+ ARMRegister t(temp, 64);
+ if (isUnsigned) {
+ masm.Udiv(t, sd, r);
+ } else {
+ masm.Sdiv(t, sd, r);
+ }
+ masm.Mul(t, t, r);
+ masm.Sub(sd, sd, t);
+# else
+ MOZ_CRASH("BaseCompiler platform hook: remainderI64");
+# endif
+ masm.bind(&done);
+ }
+#endif // RABALDR_INT_DIV_I64_CALLOUT
+
+ RegI32 needRotate64Temp() {
+#if defined(JS_CODEGEN_X86)
+ return needI32();
+#elif defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || \
+ defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || \
+ defined(JS_CODEGEN_MIPS64)
+ return RegI32::Invalid();
+#else
+ MOZ_CRASH("BaseCompiler platform hook: needRotate64Temp");
+#endif
+ }
+
+ void maskShiftCount32(RegI32 r) {
+#if defined(JS_CODEGEN_ARM)
+ masm.and32(Imm32(31), r);
+#endif
+ }
+
+ RegI32 needPopcnt32Temp() {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ return AssemblerX86Shared::HasPOPCNT() ? RegI32::Invalid() : needI32();
+#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ return needI32();
+#else
+ MOZ_CRASH("BaseCompiler platform hook: needPopcnt32Temp");
+#endif
+ }
+
+ RegI32 needPopcnt64Temp() {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ return AssemblerX86Shared::HasPOPCNT() ? RegI32::Invalid() : needI32();
+#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ return needI32();
+#else
+ MOZ_CRASH("BaseCompiler platform hook: needPopcnt64Temp");
+#endif
+ }
+
+ class OutOfLineTruncateCheckF32OrF64ToI32 : public OutOfLineCode {
+ AnyReg src;
+ RegI32 dest;
+ TruncFlags flags;
+ BytecodeOffset off;
+
+ public:
+ OutOfLineTruncateCheckF32OrF64ToI32(AnyReg src, RegI32 dest,
+ TruncFlags flags, BytecodeOffset off)
+ : src(src), dest(dest), flags(flags), off(off) {}
+
+ virtual void generate(MacroAssembler* masm) override {
+ if (src.tag == AnyReg::F32) {
+ masm->oolWasmTruncateCheckF32ToI32(src.f32(), dest, flags, off,
+ rejoin());
+ } else if (src.tag == AnyReg::F64) {
+ masm->oolWasmTruncateCheckF64ToI32(src.f64(), dest, flags, off,
+ rejoin());
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+ }
+ };
+
+ [[nodiscard]] bool truncateF32ToI32(RegF32 src, RegI32 dest,
+ TruncFlags flags) {
+ BytecodeOffset off = bytecodeOffset();
+ OutOfLineCode* ool =
+ addOutOfLineCode(new (alloc_) OutOfLineTruncateCheckF32OrF64ToI32(
+ AnyReg(src), dest, flags, off));
+ if (!ool) {
+ return false;
+ }
+ bool isSaturating = flags & TRUNC_SATURATING;
+ if (flags & TRUNC_UNSIGNED) {
+ masm.wasmTruncateFloat32ToUInt32(src, dest, isSaturating, ool->entry());
+ } else {
+ masm.wasmTruncateFloat32ToInt32(src, dest, isSaturating, ool->entry());
+ }
+ masm.bind(ool->rejoin());
+ return true;
+ }
+
+ [[nodiscard]] bool truncateF64ToI32(RegF64 src, RegI32 dest,
+ TruncFlags flags) {
+ BytecodeOffset off = bytecodeOffset();
+ OutOfLineCode* ool =
+ addOutOfLineCode(new (alloc_) OutOfLineTruncateCheckF32OrF64ToI32(
+ AnyReg(src), dest, flags, off));
+ if (!ool) {
+ return false;
+ }
+ bool isSaturating = flags & TRUNC_SATURATING;
+ if (flags & TRUNC_UNSIGNED) {
+ masm.wasmTruncateDoubleToUInt32(src, dest, isSaturating, ool->entry());
+ } else {
+ masm.wasmTruncateDoubleToInt32(src, dest, isSaturating, ool->entry());
+ }
+ masm.bind(ool->rejoin());
+ return true;
+ }
+
+ class OutOfLineTruncateCheckF32OrF64ToI64 : public OutOfLineCode {
+ AnyReg src;
+ RegI64 dest;
+ TruncFlags flags;
+ BytecodeOffset off;
+
+ public:
+ OutOfLineTruncateCheckF32OrF64ToI64(AnyReg src, RegI64 dest,
+ TruncFlags flags, BytecodeOffset off)
+ : src(src), dest(dest), flags(flags), off(off) {}
+
+ virtual void generate(MacroAssembler* masm) override {
+ if (src.tag == AnyReg::F32) {
+ masm->oolWasmTruncateCheckF32ToI64(src.f32(), dest, flags, off,
+ rejoin());
+ } else if (src.tag == AnyReg::F64) {
+ masm->oolWasmTruncateCheckF64ToI64(src.f64(), dest, flags, off,
+ rejoin());
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+ }
+ };
+
+#ifndef RABALDR_FLOAT_TO_I64_CALLOUT
+ [[nodiscard]] RegF64 needTempForFloatingToI64(TruncFlags flags) {
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ if (flags & TRUNC_UNSIGNED) {
+ return needF64();
+ }
+# endif
+ return RegF64::Invalid();
+ }
+
+ [[nodiscard]] bool truncateF32ToI64(RegF32 src, RegI64 dest, TruncFlags flags,
+ RegF64 temp) {
+ OutOfLineCode* ool =
+ addOutOfLineCode(new (alloc_) OutOfLineTruncateCheckF32OrF64ToI64(
+ AnyReg(src), dest, flags, bytecodeOffset()));
+ if (!ool) {
+ return false;
+ }
+ bool isSaturating = flags & TRUNC_SATURATING;
+ if (flags & TRUNC_UNSIGNED) {
+ masm.wasmTruncateFloat32ToUInt64(src, dest, isSaturating, ool->entry(),
+ ool->rejoin(), temp);
+ } else {
+ masm.wasmTruncateFloat32ToInt64(src, dest, isSaturating, ool->entry(),
+ ool->rejoin(), temp);
+ }
+ return true;
+ }
+
+ [[nodiscard]] bool truncateF64ToI64(RegF64 src, RegI64 dest, TruncFlags flags,
+ RegF64 temp) {
+ OutOfLineCode* ool =
+ addOutOfLineCode(new (alloc_) OutOfLineTruncateCheckF32OrF64ToI64(
+ AnyReg(src), dest, flags, bytecodeOffset()));
+ if (!ool) {
+ return false;
+ }
+ bool isSaturating = flags & TRUNC_SATURATING;
+ if (flags & TRUNC_UNSIGNED) {
+ masm.wasmTruncateDoubleToUInt64(src, dest, isSaturating, ool->entry(),
+ ool->rejoin(), temp);
+ } else {
+ masm.wasmTruncateDoubleToInt64(src, dest, isSaturating, ool->entry(),
+ ool->rejoin(), temp);
+ }
+ return true;
+ }
+#endif // RABALDR_FLOAT_TO_I64_CALLOUT
+
+#ifndef RABALDR_I64_TO_FLOAT_CALLOUT
+ RegI32 needConvertI64ToFloatTemp(ValType to, bool isUnsigned) {
+ bool needs = false;
+ if (to == ValType::F64) {
+ needs = isUnsigned && masm.convertUInt64ToDoubleNeedsTemp();
+ } else {
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ needs = true;
+# endif
+ }
+ return needs ? needI32() : RegI32::Invalid();
+ }
+
+ void convertI64ToF32(RegI64 src, bool isUnsigned, RegF32 dest, RegI32 temp) {
+ if (isUnsigned) {
+ masm.convertUInt64ToFloat32(src, dest, temp);
+ } else {
+ masm.convertInt64ToFloat32(src, dest);
+ }
+ }
+
+ void convertI64ToF64(RegI64 src, bool isUnsigned, RegF64 dest, RegI32 temp) {
+ if (isUnsigned) {
+ masm.convertUInt64ToDouble(src, dest, temp);
+ } else {
+ masm.convertInt64ToDouble(src, dest);
+ }
+ }
+#endif // RABALDR_I64_TO_FLOAT_CALLOUT
+
+ void cmp64Set(Assembler::Condition cond, RegI64 lhs, RegI64 rhs,
+ RegI32 dest) {
+#if defined(JS_PUNBOX64)
+ masm.cmpPtrSet(cond, lhs.reg, rhs.reg, dest);
+#elif defined(JS_CODEGEN_MIPS32)
+ masm.cmp64Set(cond, lhs, rhs, dest);
+#else
+ // TODO / OPTIMIZE (Bug 1316822): This is pretty branchy, we should be
+ // able to do better.
+ Label done, condTrue;
+ masm.branch64(cond, lhs, rhs, &condTrue);
+ moveImm32(0, dest);
+ masm.jump(&done);
+ masm.bind(&condTrue);
+ moveImm32(1, dest);
+ masm.bind(&done);
+#endif
+ }
+
+ void eqz64(RegI64 src, RegI32 dest) {
+#ifdef JS_PUNBOX64
+ masm.cmpPtrSet(Assembler::Equal, src.reg, ImmWord(0), dest);
+#else
+ masm.or32(src.high, src.low);
+ masm.cmp32Set(Assembler::Equal, src.low, Imm32(0), dest);
+#endif
+ }
+
+ [[nodiscard]] bool supportsRoundInstruction(RoundingMode mode) {
+ return Assembler::HasRoundInstruction(mode);
+ }
+
+ void roundF32(RoundingMode roundingMode, RegF32 f0) {
+ masm.nearbyIntFloat32(roundingMode, f0, f0);
+ }
+
+ void roundF64(RoundingMode roundingMode, RegF64 f0) {
+ masm.nearbyIntDouble(roundingMode, f0, f0);
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Global variable access.
+
+ Address addressOfGlobalVar(const GlobalDesc& global, RegI32 tmp) {
+ uint32_t globalToTlsOffset =
+ offsetof(TlsData, globalArea) + global.offset();
+ fr.loadTlsPtr(tmp);
+ if (global.isIndirect()) {
+ masm.loadPtr(Address(tmp, globalToTlsOffset), tmp);
+ return Address(tmp, 0);
+ }
+ return Address(tmp, globalToTlsOffset);
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Heap access.
+
+ void bceCheckLocal(MemoryAccessDesc* access, AccessCheck* check,
+ uint32_t local) {
+ if (local >= sizeof(BCESet) * 8) {
+ return;
+ }
+
+ uint32_t offsetGuardLimit =
+ GetMaxOffsetGuardLimit(moduleEnv_.hugeMemoryEnabled());
+
+ if ((bceSafe_ & (BCESet(1) << local)) &&
+ access->offset() < offsetGuardLimit) {
+ check->omitBoundsCheck = true;
+ }
+
+ // The local becomes safe even if the offset is beyond the guard limit.
+ bceSafe_ |= (BCESet(1) << local);
+ }
+
+ void bceLocalIsUpdated(uint32_t local) {
+ if (local >= sizeof(BCESet) * 8) {
+ return;
+ }
+
+ bceSafe_ &= ~(BCESet(1) << local);
+ }
+
+ void prepareMemoryAccess(MemoryAccessDesc* access, AccessCheck* check,
+ RegI32 tls, RegI32 ptr) {
+ uint32_t offsetGuardLimit =
+ GetMaxOffsetGuardLimit(moduleEnv_.hugeMemoryEnabled());
+
+ // Fold offset if necessary for further computations.
+ if (access->offset() >= offsetGuardLimit ||
+ (access->isAtomic() && !check->omitAlignmentCheck &&
+ !check->onlyPointerAlignment)) {
+ Label ok;
+ masm.branchAdd32(Assembler::CarryClear, Imm32(access->offset()), ptr,
+ &ok);
+ masm.wasmTrap(Trap::OutOfBounds, bytecodeOffset());
+ masm.bind(&ok);
+ access->clearOffset();
+ check->onlyPointerAlignment = true;
+ }
+
+ // Alignment check if required.
+
+ if (access->isAtomic() && !check->omitAlignmentCheck) {
+ MOZ_ASSERT(check->onlyPointerAlignment);
+ // We only care about the low pointer bits here.
+ Label ok;
+ masm.branchTest32(Assembler::Zero, ptr, Imm32(access->byteSize() - 1),
+ &ok);
+ masm.wasmTrap(Trap::UnalignedAccess, bytecodeOffset());
+ masm.bind(&ok);
+ }
+
+ // Ensure no tls if we don't need it.
+
+ if (moduleEnv_.hugeMemoryEnabled()) {
+ // We have HeapReg and no bounds checking and need load neither
+ // memoryBase nor boundsCheckLimit from tls.
+ MOZ_ASSERT_IF(check->omitBoundsCheck, tls.isInvalid());
+ }
+#ifdef JS_CODEGEN_ARM
+ // We have HeapReg on ARM and don't need to load the memoryBase from tls.
+ MOZ_ASSERT_IF(check->omitBoundsCheck, tls.isInvalid());
+#endif
+
+ // Bounds check if required.
+
+ if (!moduleEnv_.hugeMemoryEnabled() && !check->omitBoundsCheck) {
+ Label ok;
+ masm.wasmBoundsCheck32(
+ Assembler::Below, ptr,
+ Address(tls, offsetof(TlsData, boundsCheckLimit32)), &ok);
+ masm.wasmTrap(Trap::OutOfBounds, bytecodeOffset());
+ masm.bind(&ok);
+ }
+ }
+
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || \
+ defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || \
+ defined(JS_CODEGEN_MIPS64)
+ BaseIndex prepareAtomicMemoryAccess(MemoryAccessDesc* access,
+ AccessCheck* check, RegI32 tls,
+ RegI32 ptr) {
+ MOZ_ASSERT(needTlsForAccess(*check) == tls.isValid());
+ prepareMemoryAccess(access, check, tls, ptr);
+ return BaseIndex(HeapReg, ptr, TimesOne, access->offset());
+ }
+#elif defined(JS_CODEGEN_X86)
+ // Some consumers depend on the address not retaining tls, as tls may be the
+ // scratch register.
+
+ Address prepareAtomicMemoryAccess(MemoryAccessDesc* access,
+ AccessCheck* check, RegI32 tls,
+ RegI32 ptr) {
+ MOZ_ASSERT(needTlsForAccess(*check) == tls.isValid());
+ prepareMemoryAccess(access, check, tls, ptr);
+ masm.addPtr(Address(tls, offsetof(TlsData, memoryBase)), ptr);
+ return Address(ptr, access->offset());
+ }
+#else
+ Address prepareAtomicMemoryAccess(MemoryAccessDesc* access,
+ AccessCheck* check, RegI32 tls,
+ RegI32 ptr) {
+ MOZ_CRASH("BaseCompiler platform hook: prepareAtomicMemoryAccess");
+ }
+#endif
+
+ void computeEffectiveAddress(MemoryAccessDesc* access) {
+ if (access->offset()) {
+ Label ok;
+ RegI32 ptr = popI32();
+ masm.branchAdd32(Assembler::CarryClear, Imm32(access->offset()), ptr,
+ &ok);
+ masm.wasmTrap(Trap::OutOfBounds, bytecodeOffset());
+ masm.bind(&ok);
+ access->clearOffset();
+ pushI32(ptr);
+ }
+ }
+
+ void needLoadTemps(const MemoryAccessDesc& access, RegI32* temp1,
+ RegI32* temp2, RegI32* temp3) {
+#if defined(JS_CODEGEN_ARM)
+ if (IsUnaligned(access)) {
+ switch (access.type()) {
+ case Scalar::Float64:
+ *temp3 = needI32();
+ [[fallthrough]];
+ case Scalar::Float32:
+ *temp2 = needI32();
+ [[fallthrough]];
+ default:
+ *temp1 = needI32();
+ break;
+ }
+ }
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ *temp1 = needI32();
+#endif
+ }
+
+ [[nodiscard]] bool needTlsForAccess(const AccessCheck& check) {
+#if defined(JS_CODEGEN_X86)
+ // x86 requires Tls for memory base
+ return true;
+#else
+ return !moduleEnv_.hugeMemoryEnabled() && !check.omitBoundsCheck;
+#endif
+ }
+
+ // ptr and dest may be the same iff dest is I32.
+ // This may destroy ptr even if ptr and dest are not the same.
+ [[nodiscard]] bool load(MemoryAccessDesc* access, AccessCheck* check,
+ RegI32 tls, RegI32 ptr, AnyReg dest, RegI32 temp1,
+ RegI32 temp2, RegI32 temp3) {
+ prepareMemoryAccess(access, check, tls, ptr);
+
+#if defined(JS_CODEGEN_X64)
+ Operand srcAddr(HeapReg, ptr, TimesOne, access->offset());
+
+ if (dest.tag == AnyReg::I64) {
+ masm.wasmLoadI64(*access, srcAddr, dest.i64());
+ } else {
+ masm.wasmLoad(*access, srcAddr, dest.any());
+ }
+#elif defined(JS_CODEGEN_X86)
+ masm.addPtr(Address(tls, offsetof(TlsData, memoryBase)), ptr);
+ Operand srcAddr(ptr, access->offset());
+
+ if (dest.tag == AnyReg::I64) {
+ MOZ_ASSERT(dest.i64() == specific_.abiReturnRegI64);
+ masm.wasmLoadI64(*access, srcAddr, dest.i64());
+ } else {
+ // For 8 bit loads, this will generate movsbl or movzbl, so
+ // there's no constraint on what the output register may be.
+ masm.wasmLoad(*access, srcAddr, dest.any());
+ }
+#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || \
+ defined(JS_CODEGEN_MIPS64)
+ if (IsUnaligned(*access)) {
+ switch (dest.tag) {
+ case AnyReg::I64:
+ masm.wasmUnalignedLoadI64(*access, HeapReg, ptr, ptr, dest.i64(),
+ temp1);
+ break;
+ case AnyReg::F32:
+ masm.wasmUnalignedLoadFP(*access, HeapReg, ptr, ptr, dest.f32(),
+ temp1, temp2, RegI32::Invalid());
+ break;
+ case AnyReg::F64:
+ masm.wasmUnalignedLoadFP(*access, HeapReg, ptr, ptr, dest.f64(),
+ temp1, temp2, temp3);
+ break;
+ case AnyReg::I32:
+ masm.wasmUnalignedLoad(*access, HeapReg, ptr, ptr, dest.i32(), temp1);
+ break;
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+ } else {
+ if (dest.tag == AnyReg::I64) {
+ masm.wasmLoadI64(*access, HeapReg, ptr, ptr, dest.i64());
+ } else {
+ masm.wasmLoad(*access, HeapReg, ptr, ptr, dest.any());
+ }
+ }
+#elif defined(JS_CODEGEN_ARM64)
+ if (dest.tag == AnyReg::I64) {
+ masm.wasmLoadI64(*access, HeapReg, ptr, dest.i64());
+ } else {
+ masm.wasmLoad(*access, HeapReg, ptr, dest.any());
+ }
+#else
+ MOZ_CRASH("BaseCompiler platform hook: load");
+#endif
+
+ return true;
+ }
+
+ RegI32 needStoreTemp(const MemoryAccessDesc& access, ValType srcType) {
+#if defined(JS_CODEGEN_ARM)
+ if (IsUnaligned(access) && srcType != ValType::I32) {
+ return needI32();
+ }
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ return needI32();
+#endif
+ return RegI32::Invalid();
+ }
+
+ // ptr and src must not be the same register.
+ // This may destroy ptr and src.
+ [[nodiscard]] bool store(MemoryAccessDesc* access, AccessCheck* check,
+ RegI32 tls, RegI32 ptr, AnyReg src, RegI32 temp) {
+ prepareMemoryAccess(access, check, tls, ptr);
+
+ // Emit the store
+#if defined(JS_CODEGEN_X64)
+ MOZ_ASSERT(temp.isInvalid());
+ Operand dstAddr(HeapReg, ptr, TimesOne, access->offset());
+
+ masm.wasmStore(*access, src.any(), dstAddr);
+#elif defined(JS_CODEGEN_X86)
+ MOZ_ASSERT(temp.isInvalid());
+ masm.addPtr(Address(tls, offsetof(TlsData, memoryBase)), ptr);
+ Operand dstAddr(ptr, access->offset());
+
+ if (access->type() == Scalar::Int64) {
+ masm.wasmStoreI64(*access, src.i64(), dstAddr);
+ } else {
+ AnyRegister value;
+ ScratchI8 scratch(*this);
+ if (src.tag == AnyReg::I64) {
+ if (access->byteSize() == 1 && !ra.isSingleByteI32(src.i64().low)) {
+ masm.mov(src.i64().low, scratch);
+ value = AnyRegister(scratch);
+ } else {
+ value = AnyRegister(src.i64().low);
+ }
+ } else if (access->byteSize() == 1 && !ra.isSingleByteI32(src.i32())) {
+ masm.mov(src.i32(), scratch);
+ value = AnyRegister(scratch);
+ } else {
+ value = src.any();
+ }
+
+ masm.wasmStore(*access, value, dstAddr);
+ }
+#elif defined(JS_CODEGEN_ARM)
+ if (IsUnaligned(*access)) {
+ switch (src.tag) {
+ case AnyReg::I64:
+ masm.wasmUnalignedStoreI64(*access, src.i64(), HeapReg, ptr, ptr,
+ temp);
+ break;
+ case AnyReg::F32:
+ masm.wasmUnalignedStoreFP(*access, src.f32(), HeapReg, ptr, ptr,
+ temp);
+ break;
+ case AnyReg::F64:
+ masm.wasmUnalignedStoreFP(*access, src.f64(), HeapReg, ptr, ptr,
+ temp);
+ break;
+ case AnyReg::I32:
+ MOZ_ASSERT(temp.isInvalid());
+ masm.wasmUnalignedStore(*access, src.i32(), HeapReg, ptr, ptr, temp);
+ break;
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+ } else {
+ MOZ_ASSERT(temp.isInvalid());
+ if (access->type() == Scalar::Int64) {
+ masm.wasmStoreI64(*access, src.i64(), HeapReg, ptr, ptr);
+ } else if (src.tag == AnyReg::I64) {
+ masm.wasmStore(*access, AnyRegister(src.i64().low), HeapReg, ptr, ptr);
+ } else {
+ masm.wasmStore(*access, src.any(), HeapReg, ptr, ptr);
+ }
+ }
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ if (IsUnaligned(*access)) {
+ switch (src.tag) {
+ case AnyReg::I64:
+ masm.wasmUnalignedStoreI64(*access, src.i64(), HeapReg, ptr, ptr,
+ temp);
+ break;
+ case AnyReg::F32:
+ masm.wasmUnalignedStoreFP(*access, src.f32(), HeapReg, ptr, ptr,
+ temp);
+ break;
+ case AnyReg::F64:
+ masm.wasmUnalignedStoreFP(*access, src.f64(), HeapReg, ptr, ptr,
+ temp);
+ break;
+ case AnyReg::I32:
+ masm.wasmUnalignedStore(*access, src.i32(), HeapReg, ptr, ptr, temp);
+ break;
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+ } else {
+ if (src.tag == AnyReg::I64) {
+ masm.wasmStoreI64(*access, src.i64(), HeapReg, ptr, ptr);
+ } else {
+ masm.wasmStore(*access, src.any(), HeapReg, ptr, ptr);
+ }
+ }
+#elif defined(JS_CODEGEN_ARM64)
+ MOZ_ASSERT(temp.isInvalid());
+ if (access->type() == Scalar::Int64) {
+ masm.wasmStoreI64(*access, src.i64(), HeapReg, ptr);
+ } else {
+ masm.wasmStore(*access, src.any(), HeapReg, ptr);
+ }
+#else
+ MOZ_CRASH("BaseCompiler platform hook: store");
+#endif
+
+ return true;
+ }
+
+ template <size_t Count>
+ struct Atomic32Temps : mozilla::Array<RegI32, Count> {
+ // Allocate all temp registers if 'allocate' is not specified.
+ void allocate(BaseCompiler* bc, size_t allocate = Count) {
+ static_assert(Count != 0);
+ for (size_t i = 0; i < allocate; ++i) {
+ this->operator[](i) = bc->needI32();
+ }
+ }
+ void maybeFree(BaseCompiler* bc) {
+ for (size_t i = 0; i < Count; ++i) {
+ bc->maybeFreeI32(this->operator[](i));
+ }
+ }
+ };
+
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ using AtomicRMW32Temps = Atomic32Temps<3>;
+#else
+ using AtomicRMW32Temps = Atomic32Temps<1>;
+#endif
+
+ template <typename T>
+ void atomicRMW32(const MemoryAccessDesc& access, T srcAddr, AtomicOp op,
+ RegI32 rv, RegI32 rd, const AtomicRMW32Temps& temps) {
+ switch (access.type()) {
+ case Scalar::Uint8:
+#ifdef JS_CODEGEN_X86
+ {
+ RegI32 temp = temps[0];
+ // The temp, if used, must be a byte register.
+ MOZ_ASSERT(temp.isInvalid());
+ ScratchI8 scratch(*this);
+ if (op != AtomicFetchAddOp && op != AtomicFetchSubOp) {
+ temp = scratch;
+ }
+ masm.wasmAtomicFetchOp(access, op, rv, srcAddr, temp, rd);
+ break;
+ }
+#endif
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ masm.wasmAtomicFetchOp(access, op, rv, srcAddr, temps[0], temps[1],
+ temps[2], rd);
+#else
+ masm.wasmAtomicFetchOp(access, op, rv, srcAddr, temps[0], rd);
+#endif
+ break;
+ default: {
+ MOZ_CRASH("Bad type for atomic operation");
+ }
+ }
+ }
+
+ // On x86, V is Address. On other platforms, it is Register64.
+ // T is BaseIndex or Address.
+ template <typename T, typename V>
+ void atomicRMW64(const MemoryAccessDesc& access, const T& srcAddr,
+ AtomicOp op, V value, Register64 temp, Register64 rd) {
+ masm.wasmAtomicFetchOp64(access, op, value, srcAddr, temp, rd);
+ }
+
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ using AtomicCmpXchg32Temps = Atomic32Temps<3>;
+#else
+ using AtomicCmpXchg32Temps = Atomic32Temps<0>;
+#endif
+
+ template <typename T>
+ void atomicCmpXchg32(const MemoryAccessDesc& access, T srcAddr,
+ RegI32 rexpect, RegI32 rnew, RegI32 rd,
+ const AtomicCmpXchg32Temps& temps) {
+ switch (access.type()) {
+ case Scalar::Uint8:
+#if defined(JS_CODEGEN_X86)
+ {
+ ScratchI8 scratch(*this);
+ MOZ_ASSERT(rd == specific_.eax);
+ if (!ra.isSingleByteI32(rnew)) {
+ // The replacement value must have a byte persona.
+ masm.movl(rnew, scratch);
+ rnew = scratch;
+ }
+ masm.wasmCompareExchange(access, srcAddr, rexpect, rnew, rd);
+ break;
+ }
+#endif
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ masm.wasmCompareExchange(access, srcAddr, rexpect, rnew, temps[0],
+ temps[1], temps[2], rd);
+#else
+ masm.wasmCompareExchange(access, srcAddr, rexpect, rnew, rd);
+#endif
+ break;
+ default:
+ MOZ_CRASH("Bad type for atomic operation");
+ }
+ }
+
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ using AtomicXchg32Temps = Atomic32Temps<3>;
+#else
+ using AtomicXchg32Temps = Atomic32Temps<0>;
+#endif
+
+ template <typename T>
+ void atomicXchg32(const MemoryAccessDesc& access, T srcAddr, RegI32 rv,
+ RegI32 rd, const AtomicXchg32Temps& temps) {
+ switch (access.type()) {
+ case Scalar::Uint8:
+#if defined(JS_CODEGEN_X86)
+ {
+ if (!ra.isSingleByteI32(rd)) {
+ ScratchI8 scratch(*this);
+ // The output register must have a byte persona.
+ masm.wasmAtomicExchange(access, srcAddr, rv, scratch);
+ masm.movl(scratch, rd);
+ } else {
+ masm.wasmAtomicExchange(access, srcAddr, rv, rd);
+ }
+ break;
+ }
+#endif
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ masm.wasmAtomicExchange(access, srcAddr, rv, temps[0], temps[1],
+ temps[2], rd);
+#else
+ masm.wasmAtomicExchange(access, srcAddr, rv, rd);
+#endif
+ break;
+ default:
+ MOZ_CRASH("Bad type for atomic operation");
+ }
+ }
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Generally speaking, ABOVE this point there should be no
+ // value stack manipulation (calls to popI32 etc).
+ //
+ ////////////////////////////////////////////////////////////
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Platform-specific popping and register targeting.
+ //
+ // These fall into two groups, popping methods for simple needs, and RAII
+ // wrappers for more complex behavior.
+
+ // The simple popping methods pop values into targeted registers; the caller
+ // can free registers using standard functions. These are always called
+ // popXForY where X says something about types and Y something about the
+ // operation being targeted.
+
+ void pop2xI32ForMulDivI32(RegI32* r0, RegI32* r1, RegI32* reserved) {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ // r0 must be eax, and edx will be clobbered.
+ need2xI32(specific_.eax, specific_.edx);
+ *r1 = popI32();
+ *r0 = popI32ToSpecific(specific_.eax);
+ *reserved = specific_.edx;
+#else
+ pop2xI32(r0, r1);
+#endif
+ }
+
+ void pop2xI64ForMulI64(RegI64* r0, RegI64* r1, RegI32* temp,
+ RegI64* reserved) {
+#if defined(JS_CODEGEN_X64)
+ // r0 must be rax, and rdx will be clobbered.
+ need2xI64(specific_.rax, specific_.rdx);
+ *r1 = popI64();
+ *r0 = popI64ToSpecific(specific_.rax);
+ *reserved = specific_.rdx;
+#elif defined(JS_CODEGEN_X86)
+ // As for x64, though edx is part of r0.
+ need2xI32(specific_.eax, specific_.edx);
+ *r1 = popI64();
+ *r0 = popI64ToSpecific(specific_.edx_eax);
+ *temp = needI32();
+#elif defined(JS_CODEGEN_MIPS64)
+ pop2xI64(r0, r1);
+#elif defined(JS_CODEGEN_MIPS32)
+ pop2xI64(r0, r1);
+ *temp = needI32();
+#elif defined(JS_CODEGEN_ARM)
+ pop2xI64(r0, r1);
+ *temp = needI32();
+#elif defined(JS_CODEGEN_ARM64)
+ pop2xI64(r0, r1);
+#else
+ MOZ_CRASH("BaseCompiler porting interface: pop2xI64ForMulI64");
+#endif
+ }
+
+ void pop2xI64ForDivI64(RegI64* r0, RegI64* r1, RegI64* reserved) {
+#if defined(JS_CODEGEN_X64)
+ // r0 must be rax, and rdx will be clobbered.
+ need2xI64(specific_.rax, specific_.rdx);
+ *r1 = popI64();
+ *r0 = popI64ToSpecific(specific_.rax);
+ *reserved = specific_.rdx;
+#else
+ pop2xI64(r0, r1);
+#endif
+ }
+
+ void pop2xI32ForShift(RegI32* r0, RegI32* r1) {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ // r1 must be ecx for a variable shift, unless BMI2 is available.
+ if (!Assembler::HasBMI2()) {
+ *r1 = popI32(specific_.ecx);
+ *r0 = popI32();
+ return;
+ }
+#endif
+ pop2xI32(r0, r1);
+ }
+
+ void pop2xI64ForShift(RegI64* r0, RegI64* r1) {
+#if defined(JS_CODEGEN_X86)
+ // r1 must be ecx for a variable shift.
+ needI32(specific_.ecx);
+ *r1 = popI64ToSpecific(widenI32(specific_.ecx));
+ *r0 = popI64();
+#else
+# if defined(JS_CODEGEN_X64)
+ // r1 must be rcx for a variable shift, unless BMI2 is available.
+ if (!Assembler::HasBMI2()) {
+ needI64(specific_.rcx);
+ *r1 = popI64ToSpecific(specific_.rcx);
+ *r0 = popI64();
+ return;
+ }
+# endif
+ pop2xI64(r0, r1);
+#endif
+ }
+
+ void pop2xI32ForRotate(RegI32* r0, RegI32* r1) {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ // r1 must be ecx for a variable rotate.
+ *r1 = popI32(specific_.ecx);
+ *r0 = popI32();
+#else
+ pop2xI32(r0, r1);
+#endif
+ }
+
+ void pop2xI64ForRotate(RegI64* r0, RegI64* r1) {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ // r1 must be ecx for a variable rotate.
+ needI32(specific_.ecx);
+ *r1 = popI64ToSpecific(widenI32(specific_.ecx));
+ *r0 = popI64();
+#else
+ pop2xI64(r0, r1);
+#endif
+ }
+
+ void popI32ForSignExtendI64(RegI64* r0) {
+#if defined(JS_CODEGEN_X86)
+ // r0 must be edx:eax for cdq
+ need2xI32(specific_.edx, specific_.eax);
+ *r0 = specific_.edx_eax;
+ popI32ToSpecific(specific_.eax);
+#else
+ *r0 = widenI32(popI32());
+#endif
+ }
+
+ void popI64ForSignExtendI64(RegI64* r0) {
+#if defined(JS_CODEGEN_X86)
+ // r0 must be edx:eax for cdq
+ need2xI32(specific_.edx, specific_.eax);
+ // Low on top, high underneath
+ *r0 = popI64ToSpecific(specific_.edx_eax);
+#else
+ *r0 = popI64();
+#endif
+ }
+
+ // The RAII wrappers are used because we sometimes have to free partial
+ // registers, as when part of a register is the scratch register that has
+ // been temporarily used, or not free a register at all, as when the
+ // register is the same as the destination register (but only on some
+ // platforms, not on all). These are called PopX{32,64}Regs where X is the
+ // operation being targeted.
+
+ // Utility struct that holds the BaseCompiler and the destination, and frees
+ // the destination if it has not been extracted.
+
+ template <typename T>
+ class PopBase {
+ T rd_;
+
+ void maybeFree(RegI32 r) { bc->maybeFreeI32(r); }
+ void maybeFree(RegI64 r) { bc->maybeFreeI64(r); }
+
+ protected:
+ BaseCompiler* const bc;
+
+ void setRd(T r) {
+ MOZ_ASSERT(rd_.isInvalid());
+ rd_ = r;
+ }
+ T getRd() const {
+ MOZ_ASSERT(rd_.isValid());
+ return rd_;
+ }
+
+ public:
+ explicit PopBase(BaseCompiler* bc) : bc(bc) {}
+ ~PopBase() { maybeFree(rd_); }
+
+ // Take and clear the Rd - use this when pushing Rd.
+ T takeRd() {
+ MOZ_ASSERT(rd_.isValid());
+ T r = rd_;
+ rd_ = T::Invalid();
+ return r;
+ }
+ };
+
+ friend class PopAtomicCmpXchg32Regs;
+ class PopAtomicCmpXchg32Regs : public PopBase<RegI32> {
+ using Base = PopBase<RegI32>;
+ RegI32 rexpect, rnew;
+ AtomicCmpXchg32Temps temps;
+
+ public:
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+ explicit PopAtomicCmpXchg32Regs(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType)
+ : Base(bc) {
+ // For cmpxchg, the expected value and the result are both in eax.
+ bc->needI32(bc->specific_.eax);
+ if (type == ValType::I64) {
+ rnew = bc->popI64ToI32();
+ rexpect = bc->popI64ToSpecificI32(bc->specific_.eax);
+ } else {
+ rnew = bc->popI32();
+ rexpect = bc->popI32ToSpecific(bc->specific_.eax);
+ }
+ setRd(rexpect);
+ }
+ ~PopAtomicCmpXchg32Regs() { bc->freeI32(rnew); }
+#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+ explicit PopAtomicCmpXchg32Regs(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType)
+ : Base(bc) {
+ if (type == ValType::I64) {
+ rnew = bc->popI64ToI32();
+ rexpect = bc->popI64ToI32();
+ } else {
+ rnew = bc->popI32();
+ rexpect = bc->popI32();
+ }
+ setRd(bc->needI32());
+ }
+ ~PopAtomicCmpXchg32Regs() {
+ bc->freeI32(rnew);
+ bc->freeI32(rexpect);
+ }
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ explicit PopAtomicCmpXchg32Regs(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType)
+ : Base(bc) {
+ if (type == ValType::I64) {
+ rnew = bc->popI64ToI32();
+ rexpect = bc->popI64ToI32();
+ } else {
+ rnew = bc->popI32();
+ rexpect = bc->popI32();
+ }
+ if (Scalar::byteSize(viewType) < 4) {
+ temps.allocate(bc);
+ }
+ setRd(bc->needI32());
+ }
+ ~PopAtomicCmpXchg32Regs() {
+ bc->freeI32(rnew);
+ bc->freeI32(rexpect);
+ temps.maybeFree(bc);
+ }
+#else
+ explicit PopAtomicCmpXchg32Regs(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType)
+ : Base(bc) {
+ MOZ_CRASH("BaseCompiler porting interface: PopAtomicCmpXchg32Regs");
+ }
+#endif
+
+ template <typename T>
+ void atomicCmpXchg32(const MemoryAccessDesc& access, T srcAddr) {
+ bc->atomicCmpXchg32(access, srcAddr, rexpect, rnew, getRd(), temps);
+ }
+ };
+
+ friend class PopAtomicCmpXchg64Regs;
+ class PopAtomicCmpXchg64Regs : public PopBase<RegI64> {
+ using Base = PopBase<RegI64>;
+ RegI64 rexpect, rnew;
+
+ public:
+#ifdef JS_CODEGEN_X64
+ explicit PopAtomicCmpXchg64Regs(BaseCompiler* bc) : Base(bc) {
+ // For cmpxchg, the expected value and the result are both in rax.
+ bc->needI64(bc->specific_.rax);
+ rnew = bc->popI64();
+ rexpect = bc->popI64ToSpecific(bc->specific_.rax);
+ setRd(rexpect);
+ }
+ ~PopAtomicCmpXchg64Regs() { bc->freeI64(rnew); }
+#elif defined(JS_CODEGEN_X86)
+ explicit PopAtomicCmpXchg64Regs(BaseCompiler* bc) : Base(bc) {
+ // For cmpxchg8b, the expected value and the result are both in
+ // edx:eax, and the replacement value is in ecx:ebx. But we can't
+ // allocate ebx here, so instead we allocate a temp to hold the low
+ // word of 'new'.
+ bc->needI64(bc->specific_.edx_eax);
+ bc->needI32(bc->specific_.ecx);
+
+ rnew = bc->popI64ToSpecific(
+ RegI64(Register64(bc->specific_.ecx, bc->needI32())));
+ rexpect = bc->popI64ToSpecific(bc->specific_.edx_eax);
+ setRd(rexpect);
+ }
+ ~PopAtomicCmpXchg64Regs() { bc->freeI64(rnew); }
+#elif defined(JS_CODEGEN_ARM)
+ explicit PopAtomicCmpXchg64Regs(BaseCompiler* bc) : Base(bc) {
+ // The replacement value and the result must both be odd/even pairs.
+ rnew = bc->popI64Pair();
+ rexpect = bc->popI64();
+ setRd(bc->needI64Pair());
+ }
+ ~PopAtomicCmpXchg64Regs() {
+ bc->freeI64(rexpect);
+ bc->freeI64(rnew);
+ }
+#elif defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || \
+ defined(JS_CODEGEN_MIPS64)
+ explicit PopAtomicCmpXchg64Regs(BaseCompiler* bc) : Base(bc) {
+ rnew = bc->popI64();
+ rexpect = bc->popI64();
+ setRd(bc->needI64());
+ }
+ ~PopAtomicCmpXchg64Regs() {
+ bc->freeI64(rexpect);
+ bc->freeI64(rnew);
+ }
+#else
+ explicit PopAtomicCmpXchg64Regs(BaseCompiler* bc) : Base(bc) {
+ MOZ_CRASH("BaseCompiler porting interface: PopAtomicCmpXchg64Regs");
+ }
+#endif
+
+#ifdef JS_CODEGEN_X86
+ template <typename T>
+ void atomicCmpXchg64(const MemoryAccessDesc& access, T srcAddr,
+ RegI32 ebx) {
+ MOZ_ASSERT(ebx == js::jit::ebx);
+ bc->masm.move32(rnew.low, ebx);
+ bc->masm.wasmCompareExchange64(access, srcAddr, rexpect,
+ bc->specific_.ecx_ebx, getRd());
+ }
+#else
+ template <typename T>
+ void atomicCmpXchg64(const MemoryAccessDesc& access, T srcAddr) {
+ bc->masm.wasmCompareExchange64(access, srcAddr, rexpect, rnew, getRd());
+ }
+#endif
+ };
+
+#ifndef JS_64BIT
+ class PopAtomicLoad64Regs : public PopBase<RegI64> {
+ using Base = PopBase<RegI64>;
+
+ public:
+# if defined(JS_CODEGEN_X86)
+ explicit PopAtomicLoad64Regs(BaseCompiler* bc) : Base(bc) {
+ // The result is in edx:eax, and we need ecx:ebx as a temp. But we
+ // can't reserve ebx yet, so we'll accept it as an argument to the
+ // operation (below).
+ bc->needI32(bc->specific_.ecx);
+ bc->needI64(bc->specific_.edx_eax);
+ setRd(bc->specific_.edx_eax);
+ }
+ ~PopAtomicLoad64Regs() { bc->freeI32(bc->specific_.ecx); }
+# elif defined(JS_CODEGEN_ARM)
+ explicit PopAtomicLoad64Regs(BaseCompiler* bc) : Base(bc) {
+ setRd(bc->needI64Pair());
+ }
+# elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ explicit PopAtomicLoad64Regs(BaseCompiler* bc) : Base(bc) {
+ setRd(bc->needI64());
+ }
+# else
+ explicit PopAtomicLoad64Regs(BaseCompiler* bc) : Base(bc) {
+ MOZ_CRASH("BaseCompiler porting interface: PopAtomicLoad64Regs");
+ }
+# endif
+
+# ifdef JS_CODEGEN_X86
+ template <typename T>
+ void atomicLoad64(const MemoryAccessDesc& access, T srcAddr, RegI32 ebx) {
+ MOZ_ASSERT(ebx == js::jit::ebx);
+ bc->masm.wasmAtomicLoad64(access, srcAddr, bc->specific_.ecx_ebx,
+ getRd());
+ }
+# else // ARM, MIPS32
+ template <typename T>
+ void atomicLoad64(const MemoryAccessDesc& access, T srcAddr) {
+ bc->masm.wasmAtomicLoad64(access, srcAddr, RegI64::Invalid(), getRd());
+ }
+# endif
+ };
+#endif // JS_64BIT
+
+ friend class PopAtomicRMW32Regs;
+ class PopAtomicRMW32Regs : public PopBase<RegI32> {
+ using Base = PopBase<RegI32>;
+ RegI32 rv;
+ AtomicRMW32Temps temps;
+
+ public:
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+ explicit PopAtomicRMW32Regs(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType, AtomicOp op)
+ : Base(bc) {
+ bc->needI32(bc->specific_.eax);
+ if (op == AtomicFetchAddOp || op == AtomicFetchSubOp) {
+ // We use xadd, so source and destination are the same. Using
+ // eax here is overconstraining, but for byte operations on x86
+ // we do need something with a byte register.
+ if (type == ValType::I64) {
+ rv = bc->popI64ToSpecificI32(bc->specific_.eax);
+ } else {
+ rv = bc->popI32ToSpecific(bc->specific_.eax);
+ }
+ setRd(rv);
+ } else {
+ // We use a cmpxchg loop. The output must be eax; the input
+ // must be in a separate register since it may be used several
+ // times.
+ if (type == ValType::I64) {
+ rv = bc->popI64ToI32();
+ } else {
+ rv = bc->popI32();
+ }
+ setRd(bc->specific_.eax);
+# if defined(JS_CODEGEN_X86)
+ // Single-byte is a special case handled very locally with
+ // ScratchReg, see atomicRMW32 above.
+ if (Scalar::byteSize(viewType) > 1) {
+ temps.allocate(bc);
+ }
+# else
+ temps.allocate(bc);
+# endif
+ }
+ }
+ ~PopAtomicRMW32Regs() {
+ if (rv != bc->specific_.eax) {
+ bc->freeI32(rv);
+ }
+ temps.maybeFree(bc);
+ }
+#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+ explicit PopAtomicRMW32Regs(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType, AtomicOp op)
+ : Base(bc) {
+ rv = type == ValType::I64 ? bc->popI64ToI32() : bc->popI32();
+ temps.allocate(bc);
+ setRd(bc->needI32());
+ }
+ ~PopAtomicRMW32Regs() {
+ bc->freeI32(rv);
+ temps.maybeFree(bc);
+ }
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ explicit PopAtomicRMW32Regs(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType, AtomicOp op)
+ : Base(bc) {
+ rv = type == ValType::I64 ? bc->popI64ToI32() : bc->popI32();
+ if (Scalar::byteSize(viewType) < 4) {
+ temps.allocate(bc);
+ }
+
+ setRd(bc->needI32());
+ }
+ ~PopAtomicRMW32Regs() {
+ bc->freeI32(rv);
+ temps.maybeFree(bc);
+ }
+#else
+ explicit PopAtomicRMW32Regs(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType, AtomicOp op)
+ : Base(bc) {
+ MOZ_CRASH("BaseCompiler porting interface: PopAtomicRMW32Regs");
+ }
+#endif
+
+ template <typename T>
+ void atomicRMW32(const MemoryAccessDesc& access, T srcAddr, AtomicOp op) {
+ bc->atomicRMW32(access, srcAddr, op, rv, getRd(), temps);
+ }
+ };
+
+ friend class PopAtomicRMW64Regs;
+ class PopAtomicRMW64Regs : public PopBase<RegI64> {
+ using Base = PopBase<RegI64>;
+#if defined(JS_CODEGEN_X64)
+ AtomicOp op;
+#endif
+ RegI64 rv, temp;
+
+ public:
+#if defined(JS_CODEGEN_X64)
+ explicit PopAtomicRMW64Regs(BaseCompiler* bc, AtomicOp op)
+ : Base(bc), op(op) {
+ if (op == AtomicFetchAddOp || op == AtomicFetchSubOp) {
+ // We use xaddq, so input and output must be the same register.
+ rv = bc->popI64();
+ setRd(rv);
+ } else {
+ // We use a cmpxchgq loop, so the output must be rax.
+ bc->needI64(bc->specific_.rax);
+ rv = bc->popI64();
+ temp = bc->needI64();
+ setRd(bc->specific_.rax);
+ }
+ }
+ ~PopAtomicRMW64Regs() {
+ bc->maybeFreeI64(temp);
+ if (op != AtomicFetchAddOp && op != AtomicFetchSubOp) {
+ bc->freeI64(rv);
+ }
+ }
+#elif defined(JS_CODEGEN_X86)
+ // We'll use cmpxchg8b, so rv must be in ecx:ebx, and rd must be
+ // edx:eax. But we can't reserve ebx here because we need it later, so
+ // use a separate temp and set up ebx when we perform the operation.
+ explicit PopAtomicRMW64Regs(BaseCompiler* bc, AtomicOp) : Base(bc) {
+ bc->needI32(bc->specific_.ecx);
+ bc->needI64(bc->specific_.edx_eax);
+
+ temp = RegI64(Register64(bc->specific_.ecx, bc->needI32()));
+ bc->popI64ToSpecific(temp);
+
+ setRd(bc->specific_.edx_eax);
+ }
+ ~PopAtomicRMW64Regs() { bc->freeI64(temp); }
+ RegI32 valueHigh() const { return RegI32(temp.high); }
+ RegI32 valueLow() const { return RegI32(temp.low); }
+#elif defined(JS_CODEGEN_ARM)
+ explicit PopAtomicRMW64Regs(BaseCompiler* bc, AtomicOp) : Base(bc) {
+ // We use a ldrex/strexd loop so the temp and the output must be
+ // odd/even pairs.
+ rv = bc->popI64();
+ temp = bc->needI64Pair();
+ setRd(bc->needI64Pair());
+ }
+ ~PopAtomicRMW64Regs() {
+ bc->freeI64(rv);
+ bc->freeI64(temp);
+ }
+#elif defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || \
+ defined(JS_CODEGEN_MIPS64)
+ explicit PopAtomicRMW64Regs(BaseCompiler* bc, AtomicOp) : Base(bc) {
+ rv = bc->popI64();
+ temp = bc->needI64();
+ setRd(bc->needI64());
+ }
+ ~PopAtomicRMW64Regs() {
+ bc->freeI64(rv);
+ bc->freeI64(temp);
+ }
+#else
+ explicit PopAtomicRMW64Regs(BaseCompiler* bc, AtomicOp) : Base(bc) {
+ MOZ_CRASH("BaseCompiler porting interface: PopAtomicRMW64Regs");
+ }
+#endif
+
+#ifdef JS_CODEGEN_X86
+ template <typename T, typename V>
+ void atomicRMW64(const MemoryAccessDesc& access, T srcAddr, AtomicOp op,
+ const V& value, RegI32 ebx) {
+ MOZ_ASSERT(ebx == js::jit::ebx);
+ bc->atomicRMW64(access, srcAddr, op, value, bc->specific_.ecx_ebx,
+ getRd());
+ }
+#else
+ template <typename T>
+ void atomicRMW64(const MemoryAccessDesc& access, T srcAddr, AtomicOp op) {
+ bc->atomicRMW64(access, srcAddr, op, rv, temp, getRd());
+ }
+#endif
+ };
+
+ friend class PopAtomicXchg32Regs;
+ class PopAtomicXchg32Regs : public PopBase<RegI32> {
+ using Base = PopBase<RegI32>;
+ RegI32 rv;
+ AtomicXchg32Temps temps;
+
+ public:
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+ explicit PopAtomicXchg32Regs(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType)
+ : Base(bc) {
+ // The xchg instruction reuses rv as rd.
+ rv = (type == ValType::I64) ? bc->popI64ToI32() : bc->popI32();
+ setRd(rv);
+ }
+#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+ explicit PopAtomicXchg32Regs(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType)
+ : Base(bc) {
+ rv = (type == ValType::I64) ? bc->popI64ToI32() : bc->popI32();
+ setRd(bc->needI32());
+ }
+ ~PopAtomicXchg32Regs() { bc->freeI32(rv); }
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ explicit PopAtomicXchg32Regs(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType)
+ : Base(bc) {
+ rv = (type == ValType::I64) ? bc->popI64ToI32() : bc->popI32();
+ if (Scalar::byteSize(viewType) < 4) {
+ temps.allocate(bc);
+ }
+ setRd(bc->needI32());
+ }
+ ~PopAtomicXchg32Regs() {
+ temps.maybeFree(bc);
+ bc->freeI32(rv);
+ }
+#else
+ explicit PopAtomicXchg32Regs(BaseCompiler* bc, ValType type,
+ Scalar::Type viewType)
+ : Base(bc) {
+ MOZ_CRASH("BaseCompiler porting interface: PopAtomicXchg32Regs");
+ }
+#endif
+
+ template <typename T>
+ void atomicXchg32(const MemoryAccessDesc& access, T srcAddr) {
+ bc->atomicXchg32(access, srcAddr, rv, getRd(), temps);
+ }
+ };
+
+ friend class PopAtomicXchg64Regs;
+ class PopAtomicXchg64Regs : public PopBase<RegI64> {
+ using Base = PopBase<RegI64>;
+ RegI64 rv;
+
+ public:
+#if defined(JS_CODEGEN_X64)
+ explicit PopAtomicXchg64Regs(BaseCompiler* bc) : Base(bc) {
+ rv = bc->popI64();
+ setRd(rv);
+ }
+#elif defined(JS_CODEGEN_ARM64)
+ explicit PopAtomicXchg64Regs(BaseCompiler* bc) : Base(bc) {
+ rv = bc->popI64();
+ setRd(bc->needI64());
+ }
+ ~PopAtomicXchg64Regs() { bc->freeI64(rv); }
+#elif defined(JS_CODEGEN_X86)
+ // We'll use cmpxchg8b, so rv must be in ecx:ebx, and rd must be
+ // edx:eax. But we can't reserve ebx here because we need it later, so
+ // use a separate temp and set up ebx when we perform the operation.
+ explicit PopAtomicXchg64Regs(BaseCompiler* bc) : Base(bc) {
+ bc->needI32(bc->specific_.ecx);
+ bc->needI64(bc->specific_.edx_eax);
+
+ rv = RegI64(Register64(bc->specific_.ecx, bc->needI32()));
+ bc->popI64ToSpecific(rv);
+
+ setRd(bc->specific_.edx_eax);
+ }
+ ~PopAtomicXchg64Regs() { bc->freeI64(rv); }
+#elif defined(JS_CODEGEN_ARM)
+ // Both rv and rd must be odd/even pairs.
+ explicit PopAtomicXchg64Regs(BaseCompiler* bc) : Base(bc) {
+ rv = bc->popI64ToSpecific(bc->needI64Pair());
+ setRd(bc->needI64Pair());
+ }
+ ~PopAtomicXchg64Regs() { bc->freeI64(rv); }
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ explicit PopAtomicXchg64Regs(BaseCompiler* bc) : Base(bc) {
+ rv = bc->popI64ToSpecific(bc->needI64());
+ setRd(bc->needI64());
+ }
+ ~PopAtomicXchg64Regs() { bc->freeI64(rv); }
+#else
+ explicit PopAtomicXchg64Regs(BaseCompiler* bc) : Base(bc) {
+ MOZ_CRASH("BaseCompiler porting interface: xchg64");
+ }
+#endif
+
+#ifdef JS_CODEGEN_X86
+ template <typename T>
+ void atomicXchg64(const MemoryAccessDesc& access, T srcAddr,
+ RegI32 ebx) const {
+ MOZ_ASSERT(ebx == js::jit::ebx);
+ bc->masm.move32(rv.low, ebx);
+ bc->masm.wasmAtomicExchange64(access, srcAddr, bc->specific_.ecx_ebx,
+ getRd());
+ }
+#else
+ template <typename T>
+ void atomicXchg64(const MemoryAccessDesc& access, T srcAddr) const {
+ bc->masm.wasmAtomicExchange64(access, srcAddr, rv, getRd());
+ }
+#endif
+ };
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Generally speaking, BELOW this point there should be no
+ // platform dependencies. We make very occasional exceptions
+ // when it doesn't become messy and further abstraction is
+ // not desirable.
+ //
+ ////////////////////////////////////////////////////////////
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Sundry wrappers.
+
+ void pop2xI32(RegI32* r0, RegI32* r1) {
+ *r1 = popI32();
+ *r0 = popI32();
+ }
+
+ RegI32 popI32ToSpecific(RegI32 specific) {
+ freeI32(specific);
+ return popI32(specific);
+ }
+
+ void pop2xI64(RegI64* r0, RegI64* r1) {
+ *r1 = popI64();
+ *r0 = popI64();
+ }
+
+ RegI64 popI64ToSpecific(RegI64 specific) {
+ freeI64(specific);
+ return popI64(specific);
+ }
+
+#ifdef JS_CODEGEN_ARM
+ RegI64 popI64Pair() {
+ RegI64 r = needI64Pair();
+ popI64ToSpecific(r);
+ return r;
+ }
+#endif
+
+ void pop2xF32(RegF32* r0, RegF32* r1) {
+ *r1 = popF32();
+ *r0 = popF32();
+ }
+
+ void pop2xF64(RegF64* r0, RegF64* r1) {
+ *r1 = popF64();
+ *r0 = popF64();
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ void pop2xV128(RegV128* r0, RegV128* r1) {
+ *r1 = popV128();
+ *r0 = popV128();
+ }
+#endif
+
+ void pop2xRef(RegPtr* r0, RegPtr* r1) {
+ *r1 = popRef();
+ *r0 = popRef();
+ }
+
+ RegI32 popI64ToI32() {
+ RegI64 r = popI64();
+ return narrowI64(r);
+ }
+
+ RegI32 popI64ToSpecificI32(RegI32 specific) {
+ RegI64 rd = widenI32(specific);
+ popI64ToSpecific(rd);
+ return narrowI64(rd);
+ }
+
+ void pushU32AsI64(RegI32 rs) {
+ RegI64 rd = widenI32(rs);
+ masm.move32To64ZeroExtend(rs, rd);
+ pushI64(rd);
+ }
+
+ RegI32 popMemoryAccess(MemoryAccessDesc* access, AccessCheck* check);
+
+ void pushHeapBase();
+
+ template <typename RegType>
+ RegType pop();
+ template <typename RegType>
+ RegType need();
+ template <typename RegType>
+ void free(RegType r);
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Sundry helpers.
+
+ uint32_t readCallSiteLineOrBytecode() {
+ if (!func_.callSiteLineNums.empty()) {
+ return func_.callSiteLineNums[lastReadCallSite_++];
+ }
+ return iter_.lastOpcodeOffset();
+ }
+
+ bool done() const { return iter_.done(); }
+
+ BytecodeOffset bytecodeOffset() const { return iter_.bytecodeOffset(); }
+
+ void trap(Trap t) const { masm.wasmTrap(t, bytecodeOffset()); }
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Object support.
+
+ // This emits a GC pre-write barrier. The pre-barrier is needed when we
+ // replace a member field with a new value, and the previous field value
+ // might have no other referents, and incremental GC is ongoing. The field
+ // might belong to an object or be a stack slot or a register or a heap
+ // allocated value.
+ //
+ // let obj = { field: previousValue };
+ // obj.field = newValue; // previousValue must be marked with a pre-barrier.
+ //
+ // The `valueAddr` is the address of the location that we are about to
+ // update. This function preserves that register.
+
+ void emitPreBarrier(RegPtr valueAddr) {
+ Label skipBarrier;
+ ScratchPtr scratch(*this);
+
+ fr.loadTlsPtr(scratch);
+ EmitWasmPreBarrierGuard(masm, scratch, scratch, valueAddr, &skipBarrier);
+
+ fr.loadTlsPtr(scratch);
+#ifdef JS_CODEGEN_ARM64
+ // The prebarrier stub assumes the PseudoStackPointer is set up. It is OK
+ // to just move the sp to x28 here because x28 is not being used by the
+ // baseline compiler and need not be saved or restored.
+ MOZ_ASSERT(!GeneralRegisterSet::All().hasRegisterIndex(x28.asUnsized()));
+ masm.Mov(x28, sp);
+#endif
+ EmitWasmPreBarrierCall(masm, scratch, scratch, valueAddr);
+
+ masm.bind(&skipBarrier);
+ }
+
+ // This frees the register `valueAddr`.
+
+ [[nodiscard]] bool emitPostBarrierCall(RegPtr valueAddr) {
+ uint32_t bytecodeOffset = iter_.lastOpcodeOffset();
+
+ // The `valueAddr` is a raw pointer to the cell within some GC object or
+ // TLS area, and we guarantee that the GC will not run while the
+ // postbarrier call is active, so push a uintptr_t value.
+#ifdef JS_64BIT
+ pushI64(RegI64(Register64(valueAddr)));
+#else
+ pushI32(RegI32(valueAddr));
+#endif
+ if (!emitInstanceCall(bytecodeOffset, SASigPostBarrier,
+ /*pushReturnedValue=*/false)) {
+ return false;
+ }
+ return true;
+ }
+
+ [[nodiscard]] bool emitBarrieredStore(const Maybe<RegPtr>& object,
+ RegPtr valueAddr, RegPtr value) {
+ // TODO/AnyRef-boxing: With boxed immediates and strings, the write
+ // barrier is going to have to be more complicated.
+ ASSERT_ANYREF_IS_JSOBJECT;
+
+ emitPreBarrier(valueAddr); // Preserves valueAddr
+ masm.storePtr(value, Address(valueAddr, 0));
+
+ Label skipBarrier;
+ sync();
+
+ RegPtr otherScratch = needRef();
+ EmitWasmPostBarrierGuard(masm, object, otherScratch, value, &skipBarrier);
+ freeRef(otherScratch);
+
+ if (!emitPostBarrierCall(valueAddr)) {
+ return false;
+ }
+ masm.bind(&skipBarrier);
+ return true;
+ }
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Machinery for optimized conditional branches.
+ //
+ // To disable this optimization it is enough always to return false from
+ // sniffConditionalControl{Cmp,Eqz}.
+
+ struct BranchState {
+ union {
+ struct {
+ RegI32 lhs;
+ RegI32 rhs;
+ int32_t imm;
+ bool rhsImm;
+ } i32;
+ struct {
+ RegI64 lhs;
+ RegI64 rhs;
+ int64_t imm;
+ bool rhsImm;
+ } i64;
+ struct {
+ RegF32 lhs;
+ RegF32 rhs;
+ } f32;
+ struct {
+ RegF64 lhs;
+ RegF64 rhs;
+ } f64;
+ };
+
+ Label* const label; // The target of the branch, never NULL
+ const StackHeight stackHeight; // The stack base above which to place
+ // stack-spilled block results, if
+ // hasBlockResults().
+ const bool invertBranch; // If true, invert the sense of the branch
+ const ResultType resultType; // The result propagated along the edges
+
+ explicit BranchState(Label* label)
+ : label(label),
+ stackHeight(StackHeight::Invalid()),
+ invertBranch(false),
+ resultType(ResultType::Empty()) {}
+
+ BranchState(Label* label, bool invertBranch)
+ : label(label),
+ stackHeight(StackHeight::Invalid()),
+ invertBranch(invertBranch),
+ resultType(ResultType::Empty()) {}
+
+ BranchState(Label* label, StackHeight stackHeight, bool invertBranch,
+ ResultType resultType)
+ : label(label),
+ stackHeight(stackHeight),
+ invertBranch(invertBranch),
+ resultType(resultType) {}
+
+ bool hasBlockResults() const { return stackHeight.isValid(); }
+ };
+
+ void setLatentCompare(Assembler::Condition compareOp, ValType operandType) {
+ latentOp_ = LatentOp::Compare;
+ latentType_ = operandType;
+ latentIntCmp_ = compareOp;
+ }
+
+ void setLatentCompare(Assembler::DoubleCondition compareOp,
+ ValType operandType) {
+ latentOp_ = LatentOp::Compare;
+ latentType_ = operandType;
+ latentDoubleCmp_ = compareOp;
+ }
+
+ void setLatentEqz(ValType operandType) {
+ latentOp_ = LatentOp::Eqz;
+ latentType_ = operandType;
+ }
+
+ bool hasLatentOp() const { return latentOp_ != LatentOp::None; }
+
+ void resetLatentOp() { latentOp_ = LatentOp::None; }
+
+ void branchTo(Assembler::DoubleCondition c, RegF64 lhs, RegF64 rhs,
+ Label* l) {
+ masm.branchDouble(c, lhs, rhs, l);
+ }
+
+ void branchTo(Assembler::DoubleCondition c, RegF32 lhs, RegF32 rhs,
+ Label* l) {
+ masm.branchFloat(c, lhs, rhs, l);
+ }
+
+ void branchTo(Assembler::Condition c, RegI32 lhs, RegI32 rhs, Label* l) {
+ masm.branch32(c, lhs, rhs, l);
+ }
+
+ void branchTo(Assembler::Condition c, RegI32 lhs, Imm32 rhs, Label* l) {
+ masm.branch32(c, lhs, rhs, l);
+ }
+
+ void branchTo(Assembler::Condition c, RegI64 lhs, RegI64 rhs, Label* l) {
+ masm.branch64(c, lhs, rhs, l);
+ }
+
+ void branchTo(Assembler::Condition c, RegI64 lhs, Imm64 rhs, Label* l) {
+ masm.branch64(c, lhs, rhs, l);
+ }
+
+ void branchTo(Assembler::Condition c, RegPtr lhs, ImmWord rhs, Label* l) {
+ masm.branchPtr(c, lhs, rhs, l);
+ }
+
+ // Emit a conditional branch that optionally and optimally cleans up the CPU
+ // stack before we branch.
+ //
+ // Cond is either Assembler::Condition or Assembler::DoubleCondition.
+ //
+ // Lhs is RegI32, RegI64, or RegF32, RegF64, or RegPtr.
+ //
+ // Rhs is either the same as Lhs, or an immediate expression compatible with
+ // Lhs "when applicable".
+
+ template <typename Cond, typename Lhs, typename Rhs>
+ MOZ_MUST_USE bool jumpConditionalWithResults(BranchState* b, Cond cond,
+ Lhs lhs, Rhs rhs) {
+ if (b->hasBlockResults()) {
+ StackHeight resultsBase(0);
+ if (!topBranchParams(b->resultType, &resultsBase)) {
+ return false;
+ }
+ if (b->stackHeight != resultsBase) {
+ Label notTaken;
+ branchTo(b->invertBranch ? cond : Assembler::InvertCondition(cond), lhs,
+ rhs, &notTaken);
+
+ // Shuffle stack args.
+ shuffleStackResultsBeforeBranch(resultsBase, b->stackHeight,
+ b->resultType);
+ masm.jump(b->label);
+ masm.bind(&notTaken);
+ return true;
+ }
+ }
+
+ branchTo(b->invertBranch ? Assembler::InvertCondition(cond) : cond, lhs,
+ rhs, b->label);
+ return true;
+ }
+
+ // sniffConditionalControl{Cmp,Eqz} may modify the latentWhatever_ state in
+ // the BaseCompiler so that a subsequent conditional branch can be compiled
+ // optimally. emitBranchSetup() and emitBranchPerform() will consume that
+ // state. If the latter methods are not called because deadCode_ is true
+ // then the compiler MUST instead call resetLatentOp() to reset the state.
+
+ template <typename Cond>
+ MOZ_MUST_USE bool sniffConditionalControlCmp(Cond compareOp,
+ ValType operandType);
+ MOZ_MUST_USE bool sniffConditionalControlEqz(ValType operandType);
+ void emitBranchSetup(BranchState* b);
+ MOZ_MUST_USE bool emitBranchPerform(BranchState* b);
+
+ //////////////////////////////////////////////////////////////////////
+
+ [[nodiscard]] bool emitBody();
+ [[nodiscard]] bool emitBlock();
+ [[nodiscard]] bool emitLoop();
+ [[nodiscard]] bool emitIf();
+ [[nodiscard]] bool emitElse();
+#ifdef ENABLE_WASM_EXCEPTIONS
+ [[nodiscard]] bool emitTry();
+ [[nodiscard]] bool emitCatch();
+ [[nodiscard]] bool emitThrow();
+#endif
+ [[nodiscard]] bool emitEnd();
+ [[nodiscard]] bool emitBr();
+ [[nodiscard]] bool emitBrIf();
+ [[nodiscard]] bool emitBrTable();
+ [[nodiscard]] bool emitDrop();
+ [[nodiscard]] bool emitReturn();
+
+ enum class CalleeOnStack {
+ // After the arguments to the call, there is a callee pushed onto value
+ // stack. This is only the case for callIndirect. To get the arguments to
+ // the call, emitCallArgs has to reach one element deeper into the value
+ // stack, to skip the callee.
+ True,
+
+ // No callee on the stack.
+ False
+ };
+
+ [[nodiscard]] bool emitCallArgs(const ValTypeVector& args,
+ const StackResultsLoc& results,
+ FunctionCall* baselineCall,
+ CalleeOnStack calleeOnStack);
+
+ [[nodiscard]] bool emitCall();
+ [[nodiscard]] bool emitCallIndirect();
+ [[nodiscard]] bool emitUnaryMathBuiltinCall(SymbolicAddress callee,
+ ValType operandType);
+ [[nodiscard]] bool emitGetLocal();
+ [[nodiscard]] bool emitSetLocal();
+ [[nodiscard]] bool emitTeeLocal();
+ [[nodiscard]] bool emitGetGlobal();
+ [[nodiscard]] bool emitSetGlobal();
+ [[nodiscard]] RegI32 maybeLoadTlsForAccess(const AccessCheck& check);
+ [[nodiscard]] RegI32 maybeLoadTlsForAccess(const AccessCheck& check,
+ RegI32 specific);
+ [[nodiscard]] bool emitLoad(ValType type, Scalar::Type viewType);
+ [[nodiscard]] bool loadCommon(MemoryAccessDesc* access, AccessCheck check,
+ ValType type);
+ [[nodiscard]] bool emitStore(ValType resultType, Scalar::Type viewType);
+ [[nodiscard]] bool storeCommon(MemoryAccessDesc* access, AccessCheck check,
+ ValType resultType);
+ [[nodiscard]] bool emitSelect(bool typed);
+
+ template <bool isSetLocal>
+ [[nodiscard]] bool emitSetOrTeeLocal(uint32_t slot);
+
+ MOZ_MUST_USE bool endBlock(ResultType type);
+ MOZ_MUST_USE bool endIfThen(ResultType type);
+ MOZ_MUST_USE bool endIfThenElse(ResultType type);
+
+ void doReturn(ContinuationKind kind);
+ void pushReturnValueOfCall(const FunctionCall& call, MIRType type);
+
+ MOZ_MUST_USE bool pushStackResultsForCall(const ResultType& type, RegPtr temp,
+ StackResultsLoc* loc);
+ void popStackResultsAfterCall(const StackResultsLoc& results,
+ uint32_t stackArgBytes);
+
+ void emitCompareI32(Assembler::Condition compareOp, ValType compareType);
+ void emitCompareI64(Assembler::Condition compareOp, ValType compareType);
+ void emitCompareF32(Assembler::DoubleCondition compareOp,
+ ValType compareType);
+ void emitCompareF64(Assembler::DoubleCondition compareOp,
+ ValType compareType);
+ void emitCompareRef(Assembler::Condition compareOp, ValType compareType);
+
+ void emitAddI32();
+ void emitAddI64();
+ void emitAddF64();
+ void emitAddF32();
+ void emitSubtractI32();
+ void emitSubtractI64();
+ void emitSubtractF32();
+ void emitSubtractF64();
+ void emitMultiplyI32();
+ void emitMultiplyI64();
+ void emitMultiplyF32();
+ void emitMultiplyF64();
+ void emitQuotientI32();
+ void emitQuotientU32();
+ void emitRemainderI32();
+ void emitRemainderU32();
+#ifdef RABALDR_INT_DIV_I64_CALLOUT
+ [[nodiscard]] bool emitDivOrModI64BuiltinCall(SymbolicAddress callee,
+ ValType operandType);
+#else
+ void emitQuotientI64();
+ void emitQuotientU64();
+ void emitRemainderI64();
+ void emitRemainderU64();
+#endif
+ void emitDivideF32();
+ void emitDivideF64();
+ void emitMinF32();
+ void emitMaxF32();
+ void emitMinF64();
+ void emitMaxF64();
+ void emitCopysignF32();
+ void emitCopysignF64();
+ void emitOrI32();
+ void emitOrI64();
+ void emitAndI32();
+ void emitAndI64();
+ void emitXorI32();
+ void emitXorI64();
+ void emitShlI32();
+ void emitShlI64();
+ void emitShrI32();
+ void emitShrI64();
+ void emitShrU32();
+ void emitShrU64();
+ void emitRotrI32();
+ void emitRotrI64();
+ void emitRotlI32();
+ void emitRotlI64();
+ void emitEqzI32();
+ void emitEqzI64();
+ void emitClzI32();
+ void emitClzI64();
+ void emitCtzI32();
+ void emitCtzI64();
+ void emitPopcntI32();
+ void emitPopcntI64();
+ void emitAbsF32();
+ void emitAbsF64();
+ void emitNegateF32();
+ void emitNegateF64();
+ void emitSqrtF32();
+ void emitSqrtF64();
+ template <TruncFlags flags>
+ [[nodiscard]] bool emitTruncateF32ToI32();
+ template <TruncFlags flags>
+ [[nodiscard]] bool emitTruncateF64ToI32();
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+ [[nodiscard]] bool emitConvertFloatingToInt64Callout(SymbolicAddress callee,
+ ValType operandType,
+ ValType resultType);
+#else
+ template <TruncFlags flags>
+ [[nodiscard]] bool emitTruncateF32ToI64();
+ template <TruncFlags flags>
+ [[nodiscard]] bool emitTruncateF64ToI64();
+#endif
+ void emitWrapI64ToI32();
+ void emitExtendI32_8();
+ void emitExtendI32_16();
+ void emitExtendI64_8();
+ void emitExtendI64_16();
+ void emitExtendI64_32();
+ void emitExtendI32ToI64();
+ void emitExtendU32ToI64();
+ void emitReinterpretF32AsI32();
+ void emitReinterpretF64AsI64();
+ void emitConvertF64ToF32();
+ void emitConvertI32ToF32();
+ void emitConvertU32ToF32();
+ void emitConvertF32ToF64();
+ void emitConvertI32ToF64();
+ void emitConvertU32ToF64();
+#ifdef RABALDR_I64_TO_FLOAT_CALLOUT
+ [[nodiscard]] bool emitConvertInt64ToFloatingCallout(SymbolicAddress callee,
+ ValType operandType,
+ ValType resultType);
+#else
+ void emitConvertI64ToF32();
+ void emitConvertU64ToF32();
+ void emitConvertI64ToF64();
+ void emitConvertU64ToF64();
+#endif
+ void emitReinterpretI32AsF32();
+ void emitReinterpretI64AsF64();
+ void emitRound(RoundingMode roundingMode, ValType operandType);
+ [[nodiscard]] bool emitInstanceCall(uint32_t lineOrBytecode,
+ const SymbolicAddressSignature& builtin,
+ bool pushReturnedValue = true);
+ [[nodiscard]] bool emitMemoryGrow();
+ [[nodiscard]] bool emitMemorySize();
+
+ [[nodiscard]] bool emitRefFunc();
+ [[nodiscard]] bool emitRefNull();
+ [[nodiscard]] bool emitRefIsNull();
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ [[nodiscard]] bool emitRefAsNonNull();
+ [[nodiscard]] bool emitBrOnNull();
+#endif
+
+ [[nodiscard]] bool emitAtomicCmpXchg(ValType type, Scalar::Type viewType);
+ [[nodiscard]] bool emitAtomicLoad(ValType type, Scalar::Type viewType);
+ [[nodiscard]] bool emitAtomicRMW(ValType type, Scalar::Type viewType,
+ AtomicOp op);
+ [[nodiscard]] bool emitAtomicStore(ValType type, Scalar::Type viewType);
+ [[nodiscard]] bool emitWait(ValType type, uint32_t byteSize);
+ [[nodiscard]] bool emitWake();
+ [[nodiscard]] bool emitFence();
+ [[nodiscard]] bool emitAtomicXchg(ValType type, Scalar::Type viewType);
+ void emitAtomicXchg64(MemoryAccessDesc* access, WantResult wantResult);
+ [[nodiscard]] bool emitMemCopy();
+ [[nodiscard]] bool emitMemCopyCall(uint32_t lineOrBytecode);
+ [[nodiscard]] bool emitMemCopyInline();
+ [[nodiscard]] bool emitTableCopy();
+ [[nodiscard]] bool emitDataOrElemDrop(bool isData);
+ [[nodiscard]] bool emitMemFill();
+ [[nodiscard]] bool emitMemFillCall(uint32_t lineOrBytecode);
+ [[nodiscard]] bool emitMemFillInline();
+ [[nodiscard]] bool emitMemOrTableInit(bool isMem);
+#ifdef ENABLE_WASM_REFTYPES
+ [[nodiscard]] bool emitTableFill();
+ [[nodiscard]] bool emitTableGet();
+ [[nodiscard]] bool emitTableGrow();
+ [[nodiscard]] bool emitTableSet();
+ [[nodiscard]] bool emitTableSize();
+#endif
+ [[nodiscard]] bool emitStructNew();
+ [[nodiscard]] bool emitStructGet();
+ [[nodiscard]] bool emitStructSet();
+ [[nodiscard]] bool emitStructNarrow();
+#ifdef ENABLE_WASM_SIMD
+ template <typename SourceType, typename DestType>
+ void emitVectorUnop(void (*op)(MacroAssembler& masm, SourceType rs,
+ DestType rd));
+
+ template <typename SourceType, typename DestType, typename TempType>
+ void emitVectorUnop(void (*op)(MacroAssembler& masm, SourceType rs,
+ DestType rd, TempType temp));
+
+ template <typename SourceType, typename DestType, typename ImmType>
+ void emitVectorUnop(ImmType immediate, void (*op)(MacroAssembler&, ImmType,
+ SourceType, DestType));
+
+ template <typename RhsType, typename LhsDestType>
+ void emitVectorBinop(void (*op)(MacroAssembler& masm, RhsType src,
+ LhsDestType srcDest));
+
+ template <typename RhsDestType, typename LhsType>
+ void emitVectorBinop(void (*op)(MacroAssembler& masm, RhsDestType src,
+ LhsType srcDest, RhsDestOp));
+
+ template <typename RhsType, typename LhsDestType, typename TempType>
+ void emitVectorBinop(void (*)(MacroAssembler& masm, RhsType rs,
+ LhsDestType rsd, TempType temp));
+
+ template <typename RhsType, typename LhsDestType, typename TempType1,
+ typename TempType2>
+ void emitVectorBinop(void (*)(MacroAssembler& masm, RhsType rs,
+ LhsDestType rsd, TempType1 temp1,
+ TempType2 temp2));
+
+ template <typename RhsType, typename LhsDestType, typename ImmType>
+ void emitVectorBinop(ImmType immediate, void (*op)(MacroAssembler&, ImmType,
+ RhsType, LhsDestType));
+
+ template <typename RhsType, typename LhsDestType, typename ImmType,
+ typename TempType1, typename TempType2>
+ void emitVectorBinop(ImmType immediate,
+ void (*op)(MacroAssembler&, ImmType, RhsType,
+ LhsDestType, TempType1 temp1,
+ TempType2 temp2));
+
+ void emitVectorAndNot();
+
+ [[nodiscard]] bool emitLoadSplat(Scalar::Type viewType);
+ [[nodiscard]] bool emitLoadZero(Scalar::Type viewType);
+ [[nodiscard]] bool emitLoadExtend(Scalar::Type viewType);
+ [[nodiscard]] bool emitBitselect();
+ [[nodiscard]] bool emitVectorShuffle();
+ [[nodiscard]] bool emitVectorShiftRightI64x2(bool isUnsigned);
+ [[nodiscard]] bool emitVectorMulI64x2();
+#endif
+};
+
+// TODO: We want these to be inlined for sure; do we need an `inline` somewhere?
+
+template <>
+RegI32 BaseCompiler::need<RegI32>() {
+ return needI32();
+}
+template <>
+RegI64 BaseCompiler::need<RegI64>() {
+ return needI64();
+}
+template <>
+RegF32 BaseCompiler::need<RegF32>() {
+ return needF32();
+}
+template <>
+RegF64 BaseCompiler::need<RegF64>() {
+ return needF64();
+}
+
+template <>
+RegI32 BaseCompiler::pop<RegI32>() {
+ return popI32();
+}
+template <>
+RegI64 BaseCompiler::pop<RegI64>() {
+ return popI64();
+}
+template <>
+RegF32 BaseCompiler::pop<RegF32>() {
+ return popF32();
+}
+template <>
+RegF64 BaseCompiler::pop<RegF64>() {
+ return popF64();
+}
+
+template <>
+void BaseCompiler::free<RegI32>(RegI32 r) {
+ freeI32(r);
+}
+template <>
+void BaseCompiler::free<RegI64>(RegI64 r) {
+ freeI64(r);
+}
+template <>
+void BaseCompiler::free<RegF32>(RegF32 r) {
+ freeF32(r);
+}
+template <>
+void BaseCompiler::free<RegF64>(RegF64 r) {
+ freeF64(r);
+}
+
+#ifdef ENABLE_WASM_SIMD
+template <>
+RegV128 BaseCompiler::need<RegV128>() {
+ return needV128();
+}
+template <>
+RegV128 BaseCompiler::pop<RegV128>() {
+ return popV128();
+}
+template <>
+void BaseCompiler::free<RegV128>(RegV128 r) {
+ freeV128(r);
+}
+#endif
+
+void BaseCompiler::emitAddI32() {
+ int32_t c;
+ if (popConstI32(&c)) {
+ RegI32 r = popI32();
+ masm.add32(Imm32(c), r);
+ pushI32(r);
+ } else {
+ RegI32 r, rs;
+ pop2xI32(&r, &rs);
+ masm.add32(rs, r);
+ freeI32(rs);
+ pushI32(r);
+ }
+}
+
+void BaseCompiler::emitAddI64() {
+ int64_t c;
+ if (popConstI64(&c)) {
+ RegI64 r = popI64();
+ masm.add64(Imm64(c), r);
+ pushI64(r);
+ } else {
+ RegI64 r, rs;
+ pop2xI64(&r, &rs);
+ masm.add64(rs, r);
+ freeI64(rs);
+ pushI64(r);
+ }
+}
+
+void BaseCompiler::emitAddF64() {
+ RegF64 r, rs;
+ pop2xF64(&r, &rs);
+ masm.addDouble(rs, r);
+ freeF64(rs);
+ pushF64(r);
+}
+
+void BaseCompiler::emitAddF32() {
+ RegF32 r, rs;
+ pop2xF32(&r, &rs);
+ masm.addFloat32(rs, r);
+ freeF32(rs);
+ pushF32(r);
+}
+
+void BaseCompiler::emitSubtractI32() {
+ int32_t c;
+ if (popConstI32(&c)) {
+ RegI32 r = popI32();
+ masm.sub32(Imm32(c), r);
+ pushI32(r);
+ } else {
+ RegI32 r, rs;
+ pop2xI32(&r, &rs);
+ masm.sub32(rs, r);
+ freeI32(rs);
+ pushI32(r);
+ }
+}
+
+void BaseCompiler::emitSubtractI64() {
+ int64_t c;
+ if (popConstI64(&c)) {
+ RegI64 r = popI64();
+ masm.sub64(Imm64(c), r);
+ pushI64(r);
+ } else {
+ RegI64 r, rs;
+ pop2xI64(&r, &rs);
+ masm.sub64(rs, r);
+ freeI64(rs);
+ pushI64(r);
+ }
+}
+
+void BaseCompiler::emitSubtractF32() {
+ RegF32 r, rs;
+ pop2xF32(&r, &rs);
+ masm.subFloat32(rs, r);
+ freeF32(rs);
+ pushF32(r);
+}
+
+void BaseCompiler::emitSubtractF64() {
+ RegF64 r, rs;
+ pop2xF64(&r, &rs);
+ masm.subDouble(rs, r);
+ freeF64(rs);
+ pushF64(r);
+}
+
+void BaseCompiler::emitMultiplyI32() {
+ RegI32 r, rs, reserved;
+ pop2xI32ForMulDivI32(&r, &rs, &reserved);
+ masm.mul32(rs, r);
+ maybeFreeI32(reserved);
+ freeI32(rs);
+ pushI32(r);
+}
+
+void BaseCompiler::emitMultiplyI64() {
+ RegI64 r, rs, reserved;
+ RegI32 temp;
+ pop2xI64ForMulI64(&r, &rs, &temp, &reserved);
+ masm.mul64(rs, r, temp);
+ maybeFreeI64(reserved);
+ maybeFreeI32(temp);
+ freeI64(rs);
+ pushI64(r);
+}
+
+void BaseCompiler::emitMultiplyF32() {
+ RegF32 r, rs;
+ pop2xF32(&r, &rs);
+ masm.mulFloat32(rs, r);
+ freeF32(rs);
+ pushF32(r);
+}
+
+void BaseCompiler::emitMultiplyF64() {
+ RegF64 r, rs;
+ pop2xF64(&r, &rs);
+ masm.mulDouble(rs, r);
+ freeF64(rs);
+ pushF64(r);
+}
+
+void BaseCompiler::emitQuotientI32() {
+ int32_t c;
+ uint_fast8_t power;
+ if (popConstPositivePowerOfTwoI32(&c, &power, 0)) {
+ if (power != 0) {
+ RegI32 r = popI32();
+ Label positive;
+ masm.branchTest32(Assembler::NotSigned, r, r, &positive);
+ masm.add32(Imm32(c - 1), r);
+ masm.bind(&positive);
+
+ masm.rshift32Arithmetic(Imm32(power & 31), r);
+ pushI32(r);
+ }
+ } else {
+ bool isConst = peekConstI32(&c);
+ RegI32 r, rs, reserved;
+ pop2xI32ForMulDivI32(&r, &rs, &reserved);
+
+ if (!isConst || c == 0) {
+ checkDivideByZeroI32(rs);
+ }
+
+ Label done;
+ if (!isConst || c == -1) {
+ checkDivideSignedOverflowI32(rs, r, &done, ZeroOnOverflow(false));
+ }
+ masm.quotient32(rs, r, IsUnsigned(false));
+ masm.bind(&done);
+
+ maybeFreeI32(reserved);
+ freeI32(rs);
+ pushI32(r);
+ }
+}
+
+void BaseCompiler::emitQuotientU32() {
+ int32_t c;
+ uint_fast8_t power;
+ if (popConstPositivePowerOfTwoI32(&c, &power, 0)) {
+ if (power != 0) {
+ RegI32 r = popI32();
+ masm.rshift32(Imm32(power & 31), r);
+ pushI32(r);
+ }
+ } else {
+ bool isConst = peekConstI32(&c);
+ RegI32 r, rs, reserved;
+ pop2xI32ForMulDivI32(&r, &rs, &reserved);
+
+ if (!isConst || c == 0) {
+ checkDivideByZeroI32(rs);
+ }
+ masm.quotient32(rs, r, IsUnsigned(true));
+
+ maybeFreeI32(reserved);
+ freeI32(rs);
+ pushI32(r);
+ }
+}
+
+void BaseCompiler::emitRemainderI32() {
+ int32_t c;
+ uint_fast8_t power;
+ if (popConstPositivePowerOfTwoI32(&c, &power, 1)) {
+ RegI32 r = popI32();
+ RegI32 temp = needI32();
+ moveI32(r, temp);
+
+ Label positive;
+ masm.branchTest32(Assembler::NotSigned, temp, temp, &positive);
+ masm.add32(Imm32(c - 1), temp);
+ masm.bind(&positive);
+
+ masm.rshift32Arithmetic(Imm32(power & 31), temp);
+ masm.lshift32(Imm32(power & 31), temp);
+ masm.sub32(temp, r);
+ freeI32(temp);
+
+ pushI32(r);
+ } else {
+ bool isConst = peekConstI32(&c);
+ RegI32 r, rs, reserved;
+ pop2xI32ForMulDivI32(&r, &rs, &reserved);
+
+ if (!isConst || c == 0) {
+ checkDivideByZeroI32(rs);
+ }
+
+ Label done;
+ if (!isConst || c == -1) {
+ checkDivideSignedOverflowI32(rs, r, &done, ZeroOnOverflow(true));
+ }
+ masm.remainder32(rs, r, IsUnsigned(false));
+ masm.bind(&done);
+
+ maybeFreeI32(reserved);
+ freeI32(rs);
+ pushI32(r);
+ }
+}
+
+void BaseCompiler::emitRemainderU32() {
+ int32_t c;
+ uint_fast8_t power;
+ if (popConstPositivePowerOfTwoI32(&c, &power, 1)) {
+ RegI32 r = popI32();
+ masm.and32(Imm32(c - 1), r);
+ pushI32(r);
+ } else {
+ bool isConst = peekConstI32(&c);
+ RegI32 r, rs, reserved;
+ pop2xI32ForMulDivI32(&r, &rs, &reserved);
+
+ if (!isConst || c == 0) {
+ checkDivideByZeroI32(rs);
+ }
+ masm.remainder32(rs, r, IsUnsigned(true));
+
+ maybeFreeI32(reserved);
+ freeI32(rs);
+ pushI32(r);
+ }
+}
+
+#ifndef RABALDR_INT_DIV_I64_CALLOUT
+void BaseCompiler::emitQuotientI64() {
+# ifdef JS_64BIT
+ int64_t c;
+ uint_fast8_t power;
+ if (popConstPositivePowerOfTwoI64(&c, &power, 0)) {
+ if (power != 0) {
+ RegI64 r = popI64();
+ Label positive;
+ masm.branchTest64(Assembler::NotSigned, r, r, RegI32::Invalid(),
+ &positive);
+ masm.add64(Imm64(c - 1), r);
+ masm.bind(&positive);
+
+ masm.rshift64Arithmetic(Imm32(power & 63), r);
+ pushI64(r);
+ }
+ } else {
+ bool isConst = peekConstI64(&c);
+ RegI64 r, rs, reserved;
+ pop2xI64ForDivI64(&r, &rs, &reserved);
+ quotientI64(rs, r, reserved, IsUnsigned(false), isConst, c);
+ maybeFreeI64(reserved);
+ freeI64(rs);
+ pushI64(r);
+ }
+# else
+ MOZ_CRASH("BaseCompiler platform hook: emitQuotientI64");
+# endif
+}
+
+void BaseCompiler::emitQuotientU64() {
+# ifdef JS_64BIT
+ int64_t c;
+ uint_fast8_t power;
+ if (popConstPositivePowerOfTwoI64(&c, &power, 0)) {
+ if (power != 0) {
+ RegI64 r = popI64();
+ masm.rshift64(Imm32(power & 63), r);
+ pushI64(r);
+ }
+ } else {
+ bool isConst = peekConstI64(&c);
+ RegI64 r, rs, reserved;
+ pop2xI64ForDivI64(&r, &rs, &reserved);
+ quotientI64(rs, r, reserved, IsUnsigned(true), isConst, c);
+ maybeFreeI64(reserved);
+ freeI64(rs);
+ pushI64(r);
+ }
+# else
+ MOZ_CRASH("BaseCompiler platform hook: emitQuotientU64");
+# endif
+}
+
+void BaseCompiler::emitRemainderI64() {
+# ifdef JS_64BIT
+ int64_t c;
+ uint_fast8_t power;
+ if (popConstPositivePowerOfTwoI64(&c, &power, 1)) {
+ RegI64 r = popI64();
+ RegI64 temp = needI64();
+ moveI64(r, temp);
+
+ Label positive;
+ masm.branchTest64(Assembler::NotSigned, temp, temp, RegI32::Invalid(),
+ &positive);
+ masm.add64(Imm64(c - 1), temp);
+ masm.bind(&positive);
+
+ masm.rshift64Arithmetic(Imm32(power & 63), temp);
+ masm.lshift64(Imm32(power & 63), temp);
+ masm.sub64(temp, r);
+ freeI64(temp);
+
+ pushI64(r);
+ } else {
+ bool isConst = peekConstI64(&c);
+ RegI64 r, rs, reserved;
+ pop2xI64ForDivI64(&r, &rs, &reserved);
+ remainderI64(rs, r, reserved, IsUnsigned(false), isConst, c);
+ maybeFreeI64(reserved);
+ freeI64(rs);
+ pushI64(r);
+ }
+# else
+ MOZ_CRASH("BaseCompiler platform hook: emitRemainderI64");
+# endif
+}
+
+void BaseCompiler::emitRemainderU64() {
+# ifdef JS_64BIT
+ int64_t c;
+ uint_fast8_t power;
+ if (popConstPositivePowerOfTwoI64(&c, &power, 1)) {
+ RegI64 r = popI64();
+ masm.and64(Imm64(c - 1), r);
+ pushI64(r);
+ } else {
+ bool isConst = peekConstI64(&c);
+ RegI64 r, rs, reserved;
+ pop2xI64ForDivI64(&r, &rs, &reserved);
+ remainderI64(rs, r, reserved, IsUnsigned(true), isConst, c);
+ maybeFreeI64(reserved);
+ freeI64(rs);
+ pushI64(r);
+ }
+# else
+ MOZ_CRASH("BaseCompiler platform hook: emitRemainderU64");
+# endif
+}
+#endif // RABALDR_INT_DIV_I64_CALLOUT
+
+void BaseCompiler::emitDivideF32() {
+ RegF32 r, rs;
+ pop2xF32(&r, &rs);
+ masm.divFloat32(rs, r);
+ freeF32(rs);
+ pushF32(r);
+}
+
+void BaseCompiler::emitDivideF64() {
+ RegF64 r, rs;
+ pop2xF64(&r, &rs);
+ masm.divDouble(rs, r);
+ freeF64(rs);
+ pushF64(r);
+}
+
+void BaseCompiler::emitMinF32() {
+ RegF32 r, rs;
+ pop2xF32(&r, &rs);
+ // Convert signaling NaN to quiet NaNs.
+ //
+ // TODO / OPTIMIZE (bug 1316824): Don't do this if one of the operands
+ // is known to be a constant.
+ ScratchF32 zero(*this);
+ moveImmF32(0.f, zero);
+ masm.subFloat32(zero, r);
+ masm.subFloat32(zero, rs);
+ masm.minFloat32(rs, r, HandleNaNSpecially(true));
+ freeF32(rs);
+ pushF32(r);
+}
+
+void BaseCompiler::emitMaxF32() {
+ RegF32 r, rs;
+ pop2xF32(&r, &rs);
+ // Convert signaling NaN to quiet NaNs.
+ //
+ // TODO / OPTIMIZE (bug 1316824): see comment in emitMinF32.
+ ScratchF32 zero(*this);
+ moveImmF32(0.f, zero);
+ masm.subFloat32(zero, r);
+ masm.subFloat32(zero, rs);
+ masm.maxFloat32(rs, r, HandleNaNSpecially(true));
+ freeF32(rs);
+ pushF32(r);
+}
+
+void BaseCompiler::emitMinF64() {
+ RegF64 r, rs;
+ pop2xF64(&r, &rs);
+ // Convert signaling NaN to quiet NaNs.
+ //
+ // TODO / OPTIMIZE (bug 1316824): see comment in emitMinF32.
+ ScratchF64 zero(*this);
+ moveImmF64(0, zero);
+ masm.subDouble(zero, r);
+ masm.subDouble(zero, rs);
+ masm.minDouble(rs, r, HandleNaNSpecially(true));
+ freeF64(rs);
+ pushF64(r);
+}
+
+void BaseCompiler::emitMaxF64() {
+ RegF64 r, rs;
+ pop2xF64(&r, &rs);
+ // Convert signaling NaN to quiet NaNs.
+ //
+ // TODO / OPTIMIZE (bug 1316824): see comment in emitMinF32.
+ ScratchF64 zero(*this);
+ moveImmF64(0, zero);
+ masm.subDouble(zero, r);
+ masm.subDouble(zero, rs);
+ masm.maxDouble(rs, r, HandleNaNSpecially(true));
+ freeF64(rs);
+ pushF64(r);
+}
+
+void BaseCompiler::emitCopysignF32() {
+ RegF32 r, rs;
+ pop2xF32(&r, &rs);
+ RegI32 temp0 = needI32();
+ RegI32 temp1 = needI32();
+ masm.moveFloat32ToGPR(r, temp0);
+ masm.moveFloat32ToGPR(rs, temp1);
+ masm.and32(Imm32(INT32_MAX), temp0);
+ masm.and32(Imm32(INT32_MIN), temp1);
+ masm.or32(temp1, temp0);
+ masm.moveGPRToFloat32(temp0, r);
+ freeI32(temp0);
+ freeI32(temp1);
+ freeF32(rs);
+ pushF32(r);
+}
+
+void BaseCompiler::emitCopysignF64() {
+ RegF64 r, rs;
+ pop2xF64(&r, &rs);
+ RegI64 temp0 = needI64();
+ RegI64 temp1 = needI64();
+ masm.moveDoubleToGPR64(r, temp0);
+ masm.moveDoubleToGPR64(rs, temp1);
+ masm.and64(Imm64(INT64_MAX), temp0);
+ masm.and64(Imm64(INT64_MIN), temp1);
+ masm.or64(temp1, temp0);
+ masm.moveGPR64ToDouble(temp0, r);
+ freeI64(temp0);
+ freeI64(temp1);
+ freeF64(rs);
+ pushF64(r);
+}
+
+void BaseCompiler::emitOrI32() {
+ int32_t c;
+ if (popConstI32(&c)) {
+ RegI32 r = popI32();
+ masm.or32(Imm32(c), r);
+ pushI32(r);
+ } else {
+ RegI32 r, rs;
+ pop2xI32(&r, &rs);
+ masm.or32(rs, r);
+ freeI32(rs);
+ pushI32(r);
+ }
+}
+
+void BaseCompiler::emitOrI64() {
+ int64_t c;
+ if (popConstI64(&c)) {
+ RegI64 r = popI64();
+ masm.or64(Imm64(c), r);
+ pushI64(r);
+ } else {
+ RegI64 r, rs;
+ pop2xI64(&r, &rs);
+ masm.or64(rs, r);
+ freeI64(rs);
+ pushI64(r);
+ }
+}
+
+void BaseCompiler::emitAndI32() {
+ int32_t c;
+ if (popConstI32(&c)) {
+ RegI32 r = popI32();
+ masm.and32(Imm32(c), r);
+ pushI32(r);
+ } else {
+ RegI32 r, rs;
+ pop2xI32(&r, &rs);
+ masm.and32(rs, r);
+ freeI32(rs);
+ pushI32(r);
+ }
+}
+
+void BaseCompiler::emitAndI64() {
+ int64_t c;
+ if (popConstI64(&c)) {
+ RegI64 r = popI64();
+ masm.and64(Imm64(c), r);
+ pushI64(r);
+ } else {
+ RegI64 r, rs;
+ pop2xI64(&r, &rs);
+ masm.and64(rs, r);
+ freeI64(rs);
+ pushI64(r);
+ }
+}
+
+void BaseCompiler::emitXorI32() {
+ int32_t c;
+ if (popConstI32(&c)) {
+ RegI32 r = popI32();
+ masm.xor32(Imm32(c), r);
+ pushI32(r);
+ } else {
+ RegI32 r, rs;
+ pop2xI32(&r, &rs);
+ masm.xor32(rs, r);
+ freeI32(rs);
+ pushI32(r);
+ }
+}
+
+void BaseCompiler::emitXorI64() {
+ int64_t c;
+ if (popConstI64(&c)) {
+ RegI64 r = popI64();
+ masm.xor64(Imm64(c), r);
+ pushI64(r);
+ } else {
+ RegI64 r, rs;
+ pop2xI64(&r, &rs);
+ masm.xor64(rs, r);
+ freeI64(rs);
+ pushI64(r);
+ }
+}
+
+void BaseCompiler::emitShlI32() {
+ int32_t c;
+ if (popConstI32(&c)) {
+ RegI32 r = popI32();
+ masm.lshift32(Imm32(c & 31), r);
+ pushI32(r);
+ } else {
+ RegI32 r, rs;
+ pop2xI32ForShift(&r, &rs);
+ maskShiftCount32(rs);
+ masm.lshift32(rs, r);
+ freeI32(rs);
+ pushI32(r);
+ }
+}
+
+void BaseCompiler::emitShlI64() {
+ int64_t c;
+ if (popConstI64(&c)) {
+ RegI64 r = popI64();
+ masm.lshift64(Imm32(c & 63), r);
+ pushI64(r);
+ } else {
+ RegI64 r, rs;
+ pop2xI64ForShift(&r, &rs);
+ masm.lshift64(lowPart(rs), r);
+ freeI64(rs);
+ pushI64(r);
+ }
+}
+
+void BaseCompiler::emitShrI32() {
+ int32_t c;
+ if (popConstI32(&c)) {
+ RegI32 r = popI32();
+ masm.rshift32Arithmetic(Imm32(c & 31), r);
+ pushI32(r);
+ } else {
+ RegI32 r, rs;
+ pop2xI32ForShift(&r, &rs);
+ maskShiftCount32(rs);
+ masm.rshift32Arithmetic(rs, r);
+ freeI32(rs);
+ pushI32(r);
+ }
+}
+
+void BaseCompiler::emitShrI64() {
+ int64_t c;
+ if (popConstI64(&c)) {
+ RegI64 r = popI64();
+ masm.rshift64Arithmetic(Imm32(c & 63), r);
+ pushI64(r);
+ } else {
+ RegI64 r, rs;
+ pop2xI64ForShift(&r, &rs);
+ masm.rshift64Arithmetic(lowPart(rs), r);
+ freeI64(rs);
+ pushI64(r);
+ }
+}
+
+void BaseCompiler::emitShrU32() {
+ int32_t c;
+ if (popConstI32(&c)) {
+ RegI32 r = popI32();
+ masm.rshift32(Imm32(c & 31), r);
+ pushI32(r);
+ } else {
+ RegI32 r, rs;
+ pop2xI32ForShift(&r, &rs);
+ maskShiftCount32(rs);
+ masm.rshift32(rs, r);
+ freeI32(rs);
+ pushI32(r);
+ }
+}
+
+void BaseCompiler::emitShrU64() {
+ int64_t c;
+ if (popConstI64(&c)) {
+ RegI64 r = popI64();
+ masm.rshift64(Imm32(c & 63), r);
+ pushI64(r);
+ } else {
+ RegI64 r, rs;
+ pop2xI64ForShift(&r, &rs);
+ masm.rshift64(lowPart(rs), r);
+ freeI64(rs);
+ pushI64(r);
+ }
+}
+
+void BaseCompiler::emitRotrI32() {
+ int32_t c;
+ if (popConstI32(&c)) {
+ RegI32 r = popI32();
+ masm.rotateRight(Imm32(c & 31), r, r);
+ pushI32(r);
+ } else {
+ RegI32 r, rs;
+ pop2xI32ForRotate(&r, &rs);
+ masm.rotateRight(rs, r, r);
+ freeI32(rs);
+ pushI32(r);
+ }
+}
+
+void BaseCompiler::emitRotrI64() {
+ int64_t c;
+ if (popConstI64(&c)) {
+ RegI64 r = popI64();
+ RegI32 temp = needRotate64Temp();
+ masm.rotateRight64(Imm32(c & 63), r, r, temp);
+ maybeFreeI32(temp);
+ pushI64(r);
+ } else {
+ RegI64 r, rs;
+ pop2xI64ForRotate(&r, &rs);
+ masm.rotateRight64(lowPart(rs), r, r, maybeHighPart(rs));
+ freeI64(rs);
+ pushI64(r);
+ }
+}
+
+void BaseCompiler::emitRotlI32() {
+ int32_t c;
+ if (popConstI32(&c)) {
+ RegI32 r = popI32();
+ masm.rotateLeft(Imm32(c & 31), r, r);
+ pushI32(r);
+ } else {
+ RegI32 r, rs;
+ pop2xI32ForRotate(&r, &rs);
+ masm.rotateLeft(rs, r, r);
+ freeI32(rs);
+ pushI32(r);
+ }
+}
+
+void BaseCompiler::emitRotlI64() {
+ int64_t c;
+ if (popConstI64(&c)) {
+ RegI64 r = popI64();
+ RegI32 temp = needRotate64Temp();
+ masm.rotateLeft64(Imm32(c & 63), r, r, temp);
+ maybeFreeI32(temp);
+ pushI64(r);
+ } else {
+ RegI64 r, rs;
+ pop2xI64ForRotate(&r, &rs);
+ masm.rotateLeft64(lowPart(rs), r, r, maybeHighPart(rs));
+ freeI64(rs);
+ pushI64(r);
+ }
+}
+
+void BaseCompiler::emitEqzI32() {
+ if (sniffConditionalControlEqz(ValType::I32)) {
+ return;
+ }
+
+ RegI32 r = popI32();
+ masm.cmp32Set(Assembler::Equal, r, Imm32(0), r);
+ pushI32(r);
+}
+
+void BaseCompiler::emitEqzI64() {
+ if (sniffConditionalControlEqz(ValType::I64)) {
+ return;
+ }
+
+ RegI64 rs = popI64();
+ RegI32 rd = fromI64(rs);
+ eqz64(rs, rd);
+ freeI64Except(rs, rd);
+ pushI32(rd);
+}
+
+void BaseCompiler::emitClzI32() {
+ RegI32 r = popI32();
+ masm.clz32(r, r, IsKnownNotZero(false));
+ pushI32(r);
+}
+
+void BaseCompiler::emitClzI64() {
+ RegI64 r = popI64();
+ masm.clz64(r, lowPart(r));
+ maybeClearHighPart(r);
+ pushI64(r);
+}
+
+void BaseCompiler::emitCtzI32() {
+ RegI32 r = popI32();
+ masm.ctz32(r, r, IsKnownNotZero(false));
+ pushI32(r);
+}
+
+void BaseCompiler::emitCtzI64() {
+ RegI64 r = popI64();
+ masm.ctz64(r, lowPart(r));
+ maybeClearHighPart(r);
+ pushI64(r);
+}
+
+void BaseCompiler::emitPopcntI32() {
+ RegI32 r = popI32();
+ RegI32 temp = needPopcnt32Temp();
+ masm.popcnt32(r, r, temp);
+ maybeFreeI32(temp);
+ pushI32(r);
+}
+
+void BaseCompiler::emitPopcntI64() {
+ RegI64 r = popI64();
+ RegI32 temp = needPopcnt64Temp();
+ masm.popcnt64(r, r, temp);
+ maybeFreeI32(temp);
+ pushI64(r);
+}
+
+void BaseCompiler::emitAbsF32() {
+ RegF32 r = popF32();
+ masm.absFloat32(r, r);
+ pushF32(r);
+}
+
+void BaseCompiler::emitAbsF64() {
+ RegF64 r = popF64();
+ masm.absDouble(r, r);
+ pushF64(r);
+}
+
+void BaseCompiler::emitNegateF32() {
+ RegF32 r = popF32();
+ masm.negateFloat(r);
+ pushF32(r);
+}
+
+void BaseCompiler::emitNegateF64() {
+ RegF64 r = popF64();
+ masm.negateDouble(r);
+ pushF64(r);
+}
+
+void BaseCompiler::emitSqrtF32() {
+ RegF32 r = popF32();
+ masm.sqrtFloat32(r, r);
+ pushF32(r);
+}
+
+void BaseCompiler::emitSqrtF64() {
+ RegF64 r = popF64();
+ masm.sqrtDouble(r, r);
+ pushF64(r);
+}
+
+template <TruncFlags flags>
+bool BaseCompiler::emitTruncateF32ToI32() {
+ RegF32 rs = popF32();
+ RegI32 rd = needI32();
+ if (!truncateF32ToI32(rs, rd, flags)) {
+ return false;
+ }
+ freeF32(rs);
+ pushI32(rd);
+ return true;
+}
+
+template <TruncFlags flags>
+bool BaseCompiler::emitTruncateF64ToI32() {
+ RegF64 rs = popF64();
+ RegI32 rd = needI32();
+ if (!truncateF64ToI32(rs, rd, flags)) {
+ return false;
+ }
+ freeF64(rs);
+ pushI32(rd);
+ return true;
+}
+
+#ifndef RABALDR_FLOAT_TO_I64_CALLOUT
+template <TruncFlags flags>
+bool BaseCompiler::emitTruncateF32ToI64() {
+ RegF32 rs = popF32();
+ RegI64 rd = needI64();
+ RegF64 temp = needTempForFloatingToI64(flags);
+ if (!truncateF32ToI64(rs, rd, flags, temp)) {
+ return false;
+ }
+ maybeFreeF64(temp);
+ freeF32(rs);
+ pushI64(rd);
+ return true;
+}
+
+template <TruncFlags flags>
+bool BaseCompiler::emitTruncateF64ToI64() {
+ RegF64 rs = popF64();
+ RegI64 rd = needI64();
+ RegF64 temp = needTempForFloatingToI64(flags);
+ if (!truncateF64ToI64(rs, rd, flags, temp)) {
+ return false;
+ }
+ maybeFreeF64(temp);
+ freeF64(rs);
+ pushI64(rd);
+ return true;
+}
+#endif // RABALDR_FLOAT_TO_I64_CALLOUT
+
+void BaseCompiler::emitWrapI64ToI32() {
+ RegI64 rs = popI64();
+ RegI32 rd = fromI64(rs);
+ masm.move64To32(rs, rd);
+ freeI64Except(rs, rd);
+ pushI32(rd);
+}
+
+void BaseCompiler::emitExtendI32_8() {
+ RegI32 r = popI32();
+#ifdef JS_CODEGEN_X86
+ if (!ra.isSingleByteI32(r)) {
+ ScratchI8 scratch(*this);
+ moveI32(r, scratch);
+ masm.move8SignExtend(scratch, r);
+ pushI32(r);
+ return;
+ }
+#endif
+ masm.move8SignExtend(r, r);
+ pushI32(r);
+}
+
+void BaseCompiler::emitExtendI32_16() {
+ RegI32 r = popI32();
+ masm.move16SignExtend(r, r);
+ pushI32(r);
+}
+
+void BaseCompiler::emitExtendI64_8() {
+ RegI64 r;
+ popI64ForSignExtendI64(&r);
+ masm.move8To64SignExtend(lowPart(r), r);
+ pushI64(r);
+}
+
+void BaseCompiler::emitExtendI64_16() {
+ RegI64 r;
+ popI64ForSignExtendI64(&r);
+ masm.move16To64SignExtend(lowPart(r), r);
+ pushI64(r);
+}
+
+void BaseCompiler::emitExtendI64_32() {
+ RegI64 r;
+ popI64ForSignExtendI64(&r);
+ masm.move32To64SignExtend(lowPart(r), r);
+ pushI64(r);
+}
+
+void BaseCompiler::emitExtendI32ToI64() {
+ RegI64 r;
+ popI32ForSignExtendI64(&r);
+ masm.move32To64SignExtend(lowPart(r), r);
+ pushI64(r);
+}
+
+void BaseCompiler::emitExtendU32ToI64() {
+ RegI32 rs = popI32();
+ RegI64 rd = widenI32(rs);
+ masm.move32To64ZeroExtend(rs, rd);
+ pushI64(rd);
+}
+
+void BaseCompiler::emitReinterpretF32AsI32() {
+ RegF32 rs = popF32();
+ RegI32 rd = needI32();
+ masm.moveFloat32ToGPR(rs, rd);
+ freeF32(rs);
+ pushI32(rd);
+}
+
+void BaseCompiler::emitReinterpretF64AsI64() {
+ RegF64 rs = popF64();
+ RegI64 rd = needI64();
+ masm.moveDoubleToGPR64(rs, rd);
+ freeF64(rs);
+ pushI64(rd);
+}
+
+void BaseCompiler::emitConvertF64ToF32() {
+ RegF64 rs = popF64();
+ RegF32 rd = needF32();
+ masm.convertDoubleToFloat32(rs, rd);
+ freeF64(rs);
+ pushF32(rd);
+}
+
+void BaseCompiler::emitConvertI32ToF32() {
+ RegI32 rs = popI32();
+ RegF32 rd = needF32();
+ masm.convertInt32ToFloat32(rs, rd);
+ freeI32(rs);
+ pushF32(rd);
+}
+
+void BaseCompiler::emitConvertU32ToF32() {
+ RegI32 rs = popI32();
+ RegF32 rd = needF32();
+ masm.convertUInt32ToFloat32(rs, rd);
+ freeI32(rs);
+ pushF32(rd);
+}
+
+#ifndef RABALDR_I64_TO_FLOAT_CALLOUT
+void BaseCompiler::emitConvertI64ToF32() {
+ RegI64 rs = popI64();
+ RegF32 rd = needF32();
+ convertI64ToF32(rs, IsUnsigned(false), rd, RegI32());
+ freeI64(rs);
+ pushF32(rd);
+}
+
+void BaseCompiler::emitConvertU64ToF32() {
+ RegI64 rs = popI64();
+ RegF32 rd = needF32();
+ RegI32 temp = needConvertI64ToFloatTemp(ValType::F32, IsUnsigned(true));
+ convertI64ToF32(rs, IsUnsigned(true), rd, temp);
+ maybeFreeI32(temp);
+ freeI64(rs);
+ pushF32(rd);
+}
+#endif
+
+void BaseCompiler::emitConvertF32ToF64() {
+ RegF32 rs = popF32();
+ RegF64 rd = needF64();
+ masm.convertFloat32ToDouble(rs, rd);
+ freeF32(rs);
+ pushF64(rd);
+}
+
+void BaseCompiler::emitConvertI32ToF64() {
+ RegI32 rs = popI32();
+ RegF64 rd = needF64();
+ masm.convertInt32ToDouble(rs, rd);
+ freeI32(rs);
+ pushF64(rd);
+}
+
+void BaseCompiler::emitConvertU32ToF64() {
+ RegI32 rs = popI32();
+ RegF64 rd = needF64();
+ masm.convertUInt32ToDouble(rs, rd);
+ freeI32(rs);
+ pushF64(rd);
+}
+
+#ifndef RABALDR_I64_TO_FLOAT_CALLOUT
+void BaseCompiler::emitConvertI64ToF64() {
+ RegI64 rs = popI64();
+ RegF64 rd = needF64();
+ convertI64ToF64(rs, IsUnsigned(false), rd, RegI32());
+ freeI64(rs);
+ pushF64(rd);
+}
+
+void BaseCompiler::emitConvertU64ToF64() {
+ RegI64 rs = popI64();
+ RegF64 rd = needF64();
+ RegI32 temp = needConvertI64ToFloatTemp(ValType::F64, IsUnsigned(true));
+ convertI64ToF64(rs, IsUnsigned(true), rd, temp);
+ maybeFreeI32(temp);
+ freeI64(rs);
+ pushF64(rd);
+}
+#endif // RABALDR_I64_TO_FLOAT_CALLOUT
+
+void BaseCompiler::emitReinterpretI32AsF32() {
+ RegI32 rs = popI32();
+ RegF32 rd = needF32();
+ masm.moveGPRToFloat32(rs, rd);
+ freeI32(rs);
+ pushF32(rd);
+}
+
+void BaseCompiler::emitReinterpretI64AsF64() {
+ RegI64 rs = popI64();
+ RegF64 rd = needF64();
+ masm.moveGPR64ToDouble(rs, rd);
+ freeI64(rs);
+ pushF64(rd);
+}
+
+template <typename Cond>
+bool BaseCompiler::sniffConditionalControlCmp(Cond compareOp,
+ ValType operandType) {
+ MOZ_ASSERT(latentOp_ == LatentOp::None,
+ "Latent comparison state not properly reset");
+
+#ifdef JS_CODEGEN_X86
+ // On x86, latent i64 binary comparisons use too many registers: the
+ // reserved join register and the lhs and rhs operands require six, but we
+ // only have five.
+ if (operandType == ValType::I64) {
+ return false;
+ }
+#endif
+
+ // No optimization for pointer compares yet.
+ if (operandType.isReference()) {
+ return false;
+ }
+
+ OpBytes op;
+ iter_.peekOp(&op);
+ switch (op.b0) {
+ case uint16_t(Op::BrIf):
+ case uint16_t(Op::If):
+ case uint16_t(Op::SelectNumeric):
+ case uint16_t(Op::SelectTyped):
+ setLatentCompare(compareOp, operandType);
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool BaseCompiler::sniffConditionalControlEqz(ValType operandType) {
+ MOZ_ASSERT(latentOp_ == LatentOp::None,
+ "Latent comparison state not properly reset");
+
+ OpBytes op;
+ iter_.peekOp(&op);
+ switch (op.b0) {
+ case uint16_t(Op::BrIf):
+ case uint16_t(Op::SelectNumeric):
+ case uint16_t(Op::SelectTyped):
+ case uint16_t(Op::If):
+ setLatentEqz(operandType);
+ return true;
+ default:
+ return false;
+ }
+}
+
+void BaseCompiler::emitBranchSetup(BranchState* b) {
+ // Avoid allocating operands to latentOp_ to result registers.
+ if (b->hasBlockResults()) {
+ needResultRegisters(b->resultType);
+ }
+
+ // Set up fields so that emitBranchPerform() need not switch on latentOp_.
+ switch (latentOp_) {
+ case LatentOp::None: {
+ latentIntCmp_ = Assembler::NotEqual;
+ latentType_ = ValType::I32;
+ b->i32.lhs = popI32();
+ b->i32.rhsImm = true;
+ b->i32.imm = 0;
+ break;
+ }
+ case LatentOp::Compare: {
+ switch (latentType_.kind()) {
+ case ValType::I32: {
+ if (popConstI32(&b->i32.imm)) {
+ b->i32.lhs = popI32();
+ b->i32.rhsImm = true;
+ } else {
+ pop2xI32(&b->i32.lhs, &b->i32.rhs);
+ b->i32.rhsImm = false;
+ }
+ break;
+ }
+ case ValType::I64: {
+ pop2xI64(&b->i64.lhs, &b->i64.rhs);
+ b->i64.rhsImm = false;
+ break;
+ }
+ case ValType::F32: {
+ pop2xF32(&b->f32.lhs, &b->f32.rhs);
+ break;
+ }
+ case ValType::F64: {
+ pop2xF64(&b->f64.lhs, &b->f64.rhs);
+ break;
+ }
+ default: {
+ MOZ_CRASH("Unexpected type for LatentOp::Compare");
+ }
+ }
+ break;
+ }
+ case LatentOp::Eqz: {
+ switch (latentType_.kind()) {
+ case ValType::I32: {
+ latentIntCmp_ = Assembler::Equal;
+ b->i32.lhs = popI32();
+ b->i32.rhsImm = true;
+ b->i32.imm = 0;
+ break;
+ }
+ case ValType::I64: {
+ latentIntCmp_ = Assembler::Equal;
+ b->i64.lhs = popI64();
+ b->i64.rhsImm = true;
+ b->i64.imm = 0;
+ break;
+ }
+ default: {
+ MOZ_CRASH("Unexpected type for LatentOp::Eqz");
+ }
+ }
+ break;
+ }
+ }
+
+ if (b->hasBlockResults()) {
+ freeResultRegisters(b->resultType);
+ }
+}
+
+bool BaseCompiler::emitBranchPerform(BranchState* b) {
+ switch (latentType_.kind()) {
+ case ValType::I32: {
+ if (b->i32.rhsImm) {
+ if (!jumpConditionalWithResults(b, latentIntCmp_, b->i32.lhs,
+ Imm32(b->i32.imm))) {
+ return false;
+ }
+ } else {
+ if (!jumpConditionalWithResults(b, latentIntCmp_, b->i32.lhs,
+ b->i32.rhs)) {
+ return false;
+ }
+ freeI32(b->i32.rhs);
+ }
+ freeI32(b->i32.lhs);
+ break;
+ }
+ case ValType::I64: {
+ if (b->i64.rhsImm) {
+ if (!jumpConditionalWithResults(b, latentIntCmp_, b->i64.lhs,
+ Imm64(b->i64.imm))) {
+ return false;
+ }
+ } else {
+ if (!jumpConditionalWithResults(b, latentIntCmp_, b->i64.lhs,
+ b->i64.rhs)) {
+ return false;
+ }
+ freeI64(b->i64.rhs);
+ }
+ freeI64(b->i64.lhs);
+ break;
+ }
+ case ValType::F32: {
+ if (!jumpConditionalWithResults(b, latentDoubleCmp_, b->f32.lhs,
+ b->f32.rhs)) {
+ return false;
+ }
+ freeF32(b->f32.lhs);
+ freeF32(b->f32.rhs);
+ break;
+ }
+ case ValType::F64: {
+ if (!jumpConditionalWithResults(b, latentDoubleCmp_, b->f64.lhs,
+ b->f64.rhs)) {
+ return false;
+ }
+ freeF64(b->f64.lhs);
+ freeF64(b->f64.rhs);
+ break;
+ }
+ default: {
+ MOZ_CRASH("Unexpected type for LatentOp::Compare");
+ }
+ }
+ resetLatentOp();
+ return true;
+}
+
+// For blocks and loops and ifs:
+//
+// - Sync the value stack before going into the block in order to simplify exit
+// from the block: all exits from the block can assume that there are no
+// live registers except the one carrying the exit value.
+// - The block can accumulate a number of dead values on the stacks, so when
+// branching out of the block or falling out at the end be sure to
+// pop the appropriate stacks back to where they were on entry, while
+// preserving the exit value.
+// - A continue branch in a loop is much like an exit branch, but the branch
+// value must not be preserved.
+// - The exit value is always in a designated join register (type dependent).
+
+bool BaseCompiler::emitBlock() {
+ ResultType params;
+ if (!iter_.readBlock(&params)) {
+ return false;
+ }
+
+ if (!deadCode_) {
+ sync(); // Simplifies branching out from block
+ }
+
+ initControl(controlItem(), params);
+
+ return true;
+}
+
+bool BaseCompiler::endBlock(ResultType type) {
+ Control& block = controlItem();
+
+ if (deadCode_) {
+ // Block does not fall through; reset stack.
+ fr.resetStackHeight(block.stackHeight, type);
+ popValueStackTo(block.stackSize);
+ } else {
+ // If the block label is used, we have a control join, so we need to shuffle
+ // fallthrough values into place. Otherwise if it's not a control join, we
+ // can leave the value stack alone.
+ MOZ_ASSERT(stk_.length() == block.stackSize + type.length());
+ if (block.label.used()) {
+ popBlockResults(type, block.stackHeight, ContinuationKind::Fallthrough);
+ }
+ block.bceSafeOnExit &= bceSafe_;
+ }
+
+ // Bind after cleanup: branches out will have popped the stack.
+ if (block.label.used()) {
+ masm.bind(&block.label);
+ if (deadCode_) {
+ captureResultRegisters(type);
+ deadCode_ = false;
+ }
+ if (!pushBlockResults(type)) {
+ return false;
+ }
+ }
+
+ bceSafe_ = block.bceSafeOnExit;
+
+ return true;
+}
+
+bool BaseCompiler::emitLoop() {
+ ResultType params;
+ if (!iter_.readLoop(&params)) {
+ return false;
+ }
+
+ if (!deadCode_) {
+ sync(); // Simplifies branching out from block
+ }
+
+ initControl(controlItem(), params);
+ bceSafe_ = 0;
+
+ if (!deadCode_) {
+ // Loop entry is a control join, so shuffle the entry parameters into the
+ // well-known locations.
+ if (!topBlockParams(params)) {
+ return false;
+ }
+ masm.nopAlign(CodeAlignment);
+ masm.bind(&controlItem(0).label);
+ // The interrupt check barfs if there are live registers.
+ sync();
+ if (!addInterruptCheck()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// The bodies of the "then" and "else" arms can be arbitrary sequences
+// of expressions, they push control and increment the nesting and can
+// even be targeted by jumps. A branch to the "if" block branches to
+// the exit of the if, ie, it's like "break". Consider:
+//
+// (func (result i32)
+// (if (i32.const 1)
+// (begin (br 1) (unreachable))
+// (begin (unreachable)))
+// (i32.const 1))
+//
+// The branch causes neither of the unreachable expressions to be
+// evaluated.
+
+bool BaseCompiler::emitIf() {
+ ResultType params;
+ Nothing unused_cond;
+ if (!iter_.readIf(&params, &unused_cond)) {
+ return false;
+ }
+
+ BranchState b(&controlItem().otherLabel, InvertBranch(true));
+ if (!deadCode_) {
+ needResultRegisters(params);
+ emitBranchSetup(&b);
+ freeResultRegisters(params);
+ sync();
+ } else {
+ resetLatentOp();
+ }
+
+ initControl(controlItem(), params);
+
+ if (!deadCode_) {
+ // Because params can flow immediately to results in the case of an empty
+ // "then" or "else" block, and the result of an if/then is a join in
+ // general, we shuffle params eagerly to the result allocations.
+ if (!topBlockParams(params)) {
+ return false;
+ }
+ if (!emitBranchPerform(&b)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool BaseCompiler::endIfThen(ResultType type) {
+ Control& ifThen = controlItem();
+
+ // The parameters to the "if" logically flow to both the "then" and "else"
+ // blocks, but the "else" block is empty. Since we know that the "if"
+ // type-checks, that means that the "else" parameters are the "else" results,
+ // and that the "if"'s result type is the same as its parameter type.
+
+ if (deadCode_) {
+ // "then" arm does not fall through; reset stack.
+ fr.resetStackHeight(ifThen.stackHeight, type);
+ popValueStackTo(ifThen.stackSize);
+ if (!ifThen.deadOnArrival) {
+ captureResultRegisters(type);
+ }
+ } else {
+ MOZ_ASSERT(stk_.length() == ifThen.stackSize + type.length());
+ // Assume we have a control join, so place results in block result
+ // allocations.
+ popBlockResults(type, ifThen.stackHeight, ContinuationKind::Fallthrough);
+ MOZ_ASSERT(!ifThen.deadOnArrival);
+ }
+
+ if (ifThen.otherLabel.used()) {
+ masm.bind(&ifThen.otherLabel);
+ }
+
+ if (ifThen.label.used()) {
+ masm.bind(&ifThen.label);
+ }
+
+ if (!deadCode_) {
+ ifThen.bceSafeOnExit &= bceSafe_;
+ }
+
+ deadCode_ = ifThen.deadOnArrival;
+ if (!deadCode_) {
+ if (!pushBlockResults(type)) {
+ return false;
+ }
+ }
+
+ bceSafe_ = ifThen.bceSafeOnExit & ifThen.bceSafeOnEntry;
+
+ return true;
+}
+
+bool BaseCompiler::emitElse() {
+ ResultType params, results;
+ NothingVector unused_thenValues;
+
+ if (!iter_.readElse(&params, &results, &unused_thenValues)) {
+ return false;
+ }
+
+ Control& ifThenElse = controlItem(0);
+
+ // See comment in endIfThenElse, below.
+
+ // Exit the "then" branch.
+
+ ifThenElse.deadThenBranch = deadCode_;
+
+ if (deadCode_) {
+ fr.resetStackHeight(ifThenElse.stackHeight, results);
+ popValueStackTo(ifThenElse.stackSize);
+ } else {
+ MOZ_ASSERT(stk_.length() == ifThenElse.stackSize + results.length());
+ popBlockResults(results, ifThenElse.stackHeight, ContinuationKind::Jump);
+ freeResultRegisters(results);
+ MOZ_ASSERT(!ifThenElse.deadOnArrival);
+ }
+
+ if (!deadCode_) {
+ masm.jump(&ifThenElse.label);
+ }
+
+ if (ifThenElse.otherLabel.used()) {
+ masm.bind(&ifThenElse.otherLabel);
+ }
+
+ // Reset to the "else" branch.
+
+ if (!deadCode_) {
+ ifThenElse.bceSafeOnExit &= bceSafe_;
+ }
+
+ deadCode_ = ifThenElse.deadOnArrival;
+ bceSafe_ = ifThenElse.bceSafeOnEntry;
+
+ fr.resetStackHeight(ifThenElse.stackHeight, params);
+
+ if (!deadCode_) {
+ captureResultRegisters(params);
+ if (!pushBlockResults(params)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool BaseCompiler::endIfThenElse(ResultType type) {
+ Control& ifThenElse = controlItem();
+
+ // The expression type is not a reliable guide to what we'll find
+ // on the stack, we could have (if E (i32.const 1) (unreachable))
+ // in which case the "else" arm is AnyType but the type of the
+ // full expression is I32. So restore whatever's there, not what
+ // we want to find there. The "then" arm has the same constraint.
+
+ if (deadCode_) {
+ // "then" arm does not fall through; reset stack.
+ fr.resetStackHeight(ifThenElse.stackHeight, type);
+ popValueStackTo(ifThenElse.stackSize);
+ } else {
+ MOZ_ASSERT(stk_.length() == ifThenElse.stackSize + type.length());
+ // Assume we have a control join, so place results in block result
+ // allocations.
+ popBlockResults(type, ifThenElse.stackHeight,
+ ContinuationKind::Fallthrough);
+ ifThenElse.bceSafeOnExit &= bceSafe_;
+ MOZ_ASSERT(!ifThenElse.deadOnArrival);
+ }
+
+ if (ifThenElse.label.used()) {
+ masm.bind(&ifThenElse.label);
+ }
+
+ bool joinLive =
+ !ifThenElse.deadOnArrival &&
+ (!ifThenElse.deadThenBranch || !deadCode_ || ifThenElse.label.bound());
+
+ if (joinLive) {
+ // No values were provided by the "then" path, but capture the values
+ // provided by the "else" path.
+ if (deadCode_) {
+ captureResultRegisters(type);
+ }
+ deadCode_ = false;
+ }
+
+ bceSafe_ = ifThenElse.bceSafeOnExit;
+
+ if (!deadCode_) {
+ if (!pushBlockResults(type)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool BaseCompiler::emitEnd() {
+ LabelKind kind;
+ ResultType type;
+ NothingVector unused_values;
+ if (!iter_.readEnd(&kind, &type, &unused_values, &unused_values)) {
+ return false;
+ }
+
+ switch (kind) {
+ case LabelKind::Body:
+ if (!endBlock(type)) {
+ return false;
+ }
+ doReturn(ContinuationKind::Fallthrough);
+ iter_.popEnd();
+ MOZ_ASSERT(iter_.controlStackEmpty());
+ return iter_.readFunctionEnd(iter_.end());
+ case LabelKind::Block:
+ if (!endBlock(type)) {
+ return false;
+ }
+ break;
+ case LabelKind::Loop:
+ // The end of a loop isn't a branch target, so we can just leave its
+ // results on the expression stack to be consumed by the outer block.
+ break;
+ case LabelKind::Then:
+ if (!endIfThen(type)) {
+ return false;
+ }
+ break;
+ case LabelKind::Else:
+ if (!endIfThenElse(type)) {
+ return false;
+ }
+ break;
+#ifdef ENABLE_WASM_EXCEPTIONS
+ case LabelKind::Try:
+ MOZ_CRASH("NYI");
+ break;
+ case LabelKind::Catch:
+ MOZ_CRASH("NYI");
+ break;
+#endif
+ }
+
+ iter_.popEnd();
+
+ return true;
+}
+
+bool BaseCompiler::emitBr() {
+ uint32_t relativeDepth;
+ ResultType type;
+ NothingVector unused_values;
+ if (!iter_.readBr(&relativeDepth, &type, &unused_values)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ Control& target = controlItem(relativeDepth);
+ target.bceSafeOnExit &= bceSafe_;
+
+ // Save any values in the designated join registers, as if the target block
+ // returned normally.
+
+ popBlockResults(type, target.stackHeight, ContinuationKind::Jump);
+ masm.jump(&target.label);
+
+ // The registers holding the join values are free for the remainder of this
+ // block.
+
+ freeResultRegisters(type);
+
+ deadCode_ = true;
+
+ return true;
+}
+
+bool BaseCompiler::emitBrIf() {
+ uint32_t relativeDepth;
+ ResultType type;
+ NothingVector unused_values;
+ Nothing unused_condition;
+ if (!iter_.readBrIf(&relativeDepth, &type, &unused_values,
+ &unused_condition)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ resetLatentOp();
+ return true;
+ }
+
+ Control& target = controlItem(relativeDepth);
+ target.bceSafeOnExit &= bceSafe_;
+
+ BranchState b(&target.label, target.stackHeight, InvertBranch(false), type);
+ emitBranchSetup(&b);
+ return emitBranchPerform(&b);
+}
+
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+bool BaseCompiler::emitBrOnNull() {
+ MOZ_ASSERT(!hasLatentOp());
+
+ uint32_t relativeDepth;
+ ResultType type;
+ NothingVector unused_values;
+ Nothing unused_condition;
+ if (!iter_.readBrOnNull(&relativeDepth, &type, &unused_values,
+ &unused_condition)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ Control& target = controlItem(relativeDepth);
+ target.bceSafeOnExit &= bceSafe_;
+
+ BranchState b(&target.label, target.stackHeight, InvertBranch(false), type);
+ if (b.hasBlockResults()) {
+ needResultRegisters(b.resultType);
+ }
+ RegPtr rp = popRef();
+ if (b.hasBlockResults()) {
+ freeResultRegisters(b.resultType);
+ }
+ if (!jumpConditionalWithResults(&b, Assembler::Equal, rp,
+ ImmWord(NULLREF_VALUE))) {
+ return false;
+ }
+ pushRef(rp);
+
+ return true;
+}
+#endif
+
+bool BaseCompiler::emitBrTable() {
+ Uint32Vector depths;
+ uint32_t defaultDepth;
+ ResultType branchParams;
+ NothingVector unused_values;
+ Nothing unused_index;
+ // N.B., `branchParams' gets set to the type of the default branch target. In
+ // the presence of subtyping, it could be that the different branch targets
+ // have different types. Here we rely on the assumption that the value
+ // representations (e.g. Stk value types) of all branch target types are the
+ // same, in the baseline compiler. Notably, this means that all Ref types
+ // should be represented the same.
+ if (!iter_.readBrTable(&depths, &defaultDepth, &branchParams, &unused_values,
+ &unused_index)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ // Don't use param registers for rc
+ needIntegerResultRegisters(branchParams);
+
+ // Table switch value always on top.
+ RegI32 rc = popI32();
+
+ freeIntegerResultRegisters(branchParams);
+
+ StackHeight resultsBase(0);
+ if (!topBranchParams(branchParams, &resultsBase)) {
+ return false;
+ }
+
+ Label dispatchCode;
+ masm.branch32(Assembler::Below, rc, Imm32(depths.length()), &dispatchCode);
+
+ // This is the out-of-range stub. rc is dead here but we don't need it.
+
+ shuffleStackResultsBeforeBranch(
+ resultsBase, controlItem(defaultDepth).stackHeight, branchParams);
+ controlItem(defaultDepth).bceSafeOnExit &= bceSafe_;
+ masm.jump(&controlItem(defaultDepth).label);
+
+ // Emit stubs. rc is dead in all of these but we don't need it.
+ //
+ // The labels in the vector are in the TempAllocator and will
+ // be freed by and by.
+ //
+ // TODO / OPTIMIZE (Bug 1316804): Branch directly to the case code if we
+ // can, don't emit an intermediate stub.
+
+ LabelVector stubs;
+ if (!stubs.reserve(depths.length())) {
+ return false;
+ }
+
+ for (uint32_t depth : depths) {
+ stubs.infallibleEmplaceBack(NonAssertingLabel());
+ masm.bind(&stubs.back());
+ shuffleStackResultsBeforeBranch(resultsBase, controlItem(depth).stackHeight,
+ branchParams);
+ controlItem(depth).bceSafeOnExit &= bceSafe_;
+ masm.jump(&controlItem(depth).label);
+ }
+
+ // Emit table.
+
+ Label theTable;
+ jumpTable(stubs, &theTable);
+
+ // Emit indirect jump. rc is live here.
+
+ tableSwitch(&theTable, rc, &dispatchCode);
+
+ deadCode_ = true;
+
+ // Clean up.
+
+ freeI32(rc);
+ popValueStackBy(branchParams.length());
+
+ return true;
+}
+
+#ifdef ENABLE_WASM_EXCEPTIONS
+bool BaseCompiler::emitTry() {
+ ResultType params;
+ if (!iter_.readTry(&params)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ MOZ_CRASH("NYI");
+}
+
+bool BaseCompiler::emitCatch() {
+ LabelKind kind;
+ uint32_t eventIndex;
+ ResultType paramType, resultType;
+ NothingVector unused_tryValues;
+
+ if (!iter_.readCatch(&kind, &eventIndex, &paramType, &resultType,
+ &unused_tryValues)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ MOZ_CRASH("NYI");
+}
+
+bool BaseCompiler::emitThrow() {
+ uint32_t exnIndex;
+ NothingVector unused_argValues;
+
+ if (!iter_.readThrow(&exnIndex, &unused_argValues)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ MOZ_CRASH("NYI");
+}
+#endif
+
+bool BaseCompiler::emitDrop() {
+ if (!iter_.readDrop()) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ dropValue();
+ return true;
+}
+
+void BaseCompiler::doReturn(ContinuationKind kind) {
+ if (deadCode_) {
+ return;
+ }
+
+ StackHeight height = controlOutermost().stackHeight;
+ ResultType type = ResultType::Vector(funcType().results());
+ popBlockResults(type, height, kind);
+ masm.jump(&returnLabel_);
+ freeResultRegisters(type);
+}
+
+bool BaseCompiler::emitReturn() {
+ NothingVector unused_values;
+ if (!iter_.readReturn(&unused_values)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ doReturn(ContinuationKind::Jump);
+ deadCode_ = true;
+
+ return true;
+}
+
+bool BaseCompiler::emitCallArgs(const ValTypeVector& argTypes,
+ const StackResultsLoc& results,
+ FunctionCall* baselineCall,
+ CalleeOnStack calleeOnStack) {
+ MOZ_ASSERT(!deadCode_);
+
+ ArgTypeVector args(argTypes, results.stackResults());
+ uint32_t naturalArgCount = argTypes.length();
+ uint32_t abiArgCount = args.lengthWithStackResults();
+ startCallArgs(StackArgAreaSizeUnaligned(args), baselineCall);
+
+ // Args are deeper on the stack than the stack result area, if any.
+ size_t argsDepth = results.count();
+ // They're deeper than the callee too, for callIndirect.
+ if (calleeOnStack == CalleeOnStack::True) {
+ argsDepth++;
+ }
+
+ for (size_t i = 0; i < abiArgCount; ++i) {
+ if (args.isNaturalArg(i)) {
+ size_t naturalIndex = args.naturalIndex(i);
+ size_t stackIndex = naturalArgCount - 1 - naturalIndex + argsDepth;
+ passArg(argTypes[naturalIndex], peek(stackIndex), baselineCall);
+ } else {
+ // The synthetic stack result area pointer.
+ ABIArg argLoc = baselineCall->abi.next(MIRType::Pointer);
+ if (argLoc.kind() == ABIArg::Stack) {
+ ScratchPtr scratch(*this);
+ fr.computeOutgoingStackResultAreaPtr(results, scratch);
+ masm.storePtr(scratch, Address(masm.getStackPointer(),
+ argLoc.offsetFromArgBase()));
+ } else {
+ fr.computeOutgoingStackResultAreaPtr(results, RegPtr(argLoc.gpr()));
+ }
+ }
+ }
+
+ fr.loadTlsPtr(WasmTlsReg);
+ return true;
+}
+
+void BaseCompiler::pushReturnValueOfCall(const FunctionCall& call,
+ MIRType type) {
+ switch (type) {
+ case MIRType::Int32: {
+ RegI32 rv = captureReturnedI32();
+ pushI32(rv);
+ break;
+ }
+ case MIRType::Int64: {
+ RegI64 rv = captureReturnedI64();
+ pushI64(rv);
+ break;
+ }
+ case MIRType::Float32: {
+ RegF32 rv = captureReturnedF32(call);
+ pushF32(rv);
+ break;
+ }
+ case MIRType::Double: {
+ RegF64 rv = captureReturnedF64(call);
+ pushF64(rv);
+ break;
+ }
+#ifdef ENABLE_WASM_SIMD
+ case MIRType::Simd128: {
+ RegV128 rv = captureReturnedV128(call);
+ pushV128(rv);
+ break;
+ }
+#endif
+ case MIRType::RefOrNull: {
+ RegPtr rv = captureReturnedRef();
+ pushRef(rv);
+ break;
+ }
+ default:
+ // In particular, passing |type| as MIRType::Void or MIRType::Pointer to
+ // this function is an error.
+ MOZ_CRASH("Function return type");
+ }
+}
+
+bool BaseCompiler::pushStackResultsForCall(const ResultType& type, RegPtr temp,
+ StackResultsLoc* loc) {
+ if (!ABIResultIter::HasStackResults(type)) {
+ return true;
+ }
+
+ // This method is the only one in the class that can increase stk_.length() by
+ // an unbounded amount, so it's the only one that requires an allocation.
+ // (The general case is handled in emitBody.)
+ if (!stk_.reserve(stk_.length() + type.length())) {
+ return false;
+ }
+
+ // Measure stack results.
+ ABIResultIter i(type);
+ size_t count = 0;
+ for (; !i.done(); i.next()) {
+ if (i.cur().onStack()) {
+ count++;
+ }
+ }
+ uint32_t bytes = i.stackBytesConsumedSoFar();
+
+ // Reserve space for the stack results.
+ StackHeight resultsBase = fr.stackHeight();
+ uint32_t height = fr.prepareStackResultArea(resultsBase, bytes);
+
+ // Push Stk values onto the value stack, and zero out Ref values.
+ for (i.switchToPrev(); !i.done(); i.prev()) {
+ const ABIResult& result = i.cur();
+ if (result.onStack()) {
+ Stk v = captureStackResult(result, resultsBase, bytes);
+ push(v);
+ if (v.kind() == Stk::MemRef) {
+ stackMapGenerator_.memRefsOnStk++;
+ fr.storeImmediatePtrToStack(intptr_t(0), v.offs(), temp);
+ }
+ }
+ }
+
+ *loc = StackResultsLoc(bytes, count, height);
+
+ return true;
+}
+
+// After a call, some results may be written to the stack result locations that
+// are pushed on the machine stack after any stack args. If there are stack
+// args and stack results, these results need to be shuffled down, as the args
+// are "consumed" by the call.
+void BaseCompiler::popStackResultsAfterCall(const StackResultsLoc& results,
+ uint32_t stackArgBytes) {
+ if (results.bytes() != 0) {
+ popValueStackBy(results.count());
+ if (stackArgBytes != 0) {
+ uint32_t srcHeight = results.height();
+ MOZ_ASSERT(srcHeight >= stackArgBytes + results.bytes());
+ uint32_t destHeight = srcHeight - stackArgBytes;
+
+ fr.shuffleStackResultsTowardFP(srcHeight, destHeight, results.bytes(),
+ ABINonArgReturnVolatileReg);
+ }
+ }
+}
+
+// For now, always sync() at the beginning of the call to easily save live
+// values.
+//
+// TODO / OPTIMIZE (Bug 1316806): We may be able to avoid a full sync(), since
+// all we want is to save live registers that won't be saved by the callee or
+// that we need for outgoing args - we don't need to sync the locals. We can
+// just push the necessary registers, it'll be like a lightweight sync.
+//
+// Even some of the pushing may be unnecessary if the registers will be consumed
+// by the call, because then what we want is parallel assignment to the argument
+// registers or onto the stack for outgoing arguments. A sync() is just
+// simpler.
+
+bool BaseCompiler::emitCall() {
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+ uint32_t funcIndex;
+ NothingVector args_;
+ if (!iter_.readCall(&funcIndex, &args_)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ sync();
+
+ const FuncType& funcType = *moduleEnv_.funcs[funcIndex].type;
+ bool import = moduleEnv_.funcIsImport(funcIndex);
+
+ uint32_t numArgs = funcType.args().length();
+ size_t stackArgBytes = stackConsumed(numArgs);
+
+ ResultType resultType(ResultType::Vector(funcType.results()));
+ StackResultsLoc results;
+ if (!pushStackResultsForCall(resultType, RegPtr(ABINonArgReg0), &results)) {
+ return false;
+ }
+
+ FunctionCall baselineCall(lineOrBytecode);
+ beginCall(baselineCall, UseABI::Wasm,
+ import ? InterModule::True : InterModule::False);
+
+ if (!emitCallArgs(funcType.args(), results, &baselineCall,
+ CalleeOnStack::False)) {
+ return false;
+ }
+
+ CodeOffset raOffset;
+ if (import) {
+ raOffset = callImport(moduleEnv_.funcImportGlobalDataOffsets[funcIndex],
+ baselineCall);
+ } else {
+ raOffset = callDefinition(funcIndex, baselineCall);
+ }
+
+ if (!createStackMap("emitCall", raOffset)) {
+ return false;
+ }
+
+ popStackResultsAfterCall(results, stackArgBytes);
+
+ endCall(baselineCall, stackArgBytes);
+
+ popValueStackBy(numArgs);
+
+ captureCallResultRegisters(resultType);
+ return pushCallResults(baselineCall, resultType, results);
+}
+
+bool BaseCompiler::emitCallIndirect() {
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+ uint32_t funcTypeIndex;
+ uint32_t tableIndex;
+ Nothing callee_;
+ NothingVector args_;
+ if (!iter_.readCallIndirect(&funcTypeIndex, &tableIndex, &callee_, &args_)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ sync();
+
+ const FuncType& funcType = moduleEnv_.types[funcTypeIndex].funcType();
+
+ // Stack: ... arg1 .. argn callee
+
+ uint32_t numArgs = funcType.args().length() + 1;
+ size_t stackArgBytes = stackConsumed(numArgs);
+
+ ResultType resultType(ResultType::Vector(funcType.results()));
+ StackResultsLoc results;
+ if (!pushStackResultsForCall(resultType, RegPtr(ABINonArgReg0), &results)) {
+ return false;
+ }
+
+ FunctionCall baselineCall(lineOrBytecode);
+ beginCall(baselineCall, UseABI::Wasm, InterModule::True);
+
+ if (!emitCallArgs(funcType.args(), results, &baselineCall,
+ CalleeOnStack::True)) {
+ return false;
+ }
+
+ const Stk& callee = peek(results.count());
+ CodeOffset raOffset =
+ callIndirect(funcTypeIndex, tableIndex, callee, baselineCall);
+ if (!createStackMap("emitCallIndirect", raOffset)) {
+ return false;
+ }
+
+ popStackResultsAfterCall(results, stackArgBytes);
+
+ endCall(baselineCall, stackArgBytes);
+
+ popValueStackBy(numArgs);
+
+ captureCallResultRegisters(resultType);
+ return pushCallResults(baselineCall, resultType, results);
+}
+
+void BaseCompiler::emitRound(RoundingMode roundingMode, ValType operandType) {
+ if (operandType == ValType::F32) {
+ RegF32 f0 = popF32();
+ roundF32(roundingMode, f0);
+ pushF32(f0);
+ } else if (operandType == ValType::F64) {
+ RegF64 f0 = popF64();
+ roundF64(roundingMode, f0);
+ pushF64(f0);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+bool BaseCompiler::emitUnaryMathBuiltinCall(SymbolicAddress callee,
+ ValType operandType) {
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+ Nothing operand_;
+ if (!iter_.readUnary(operandType, &operand_)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ RoundingMode roundingMode;
+ if (IsRoundingFunction(callee, &roundingMode) &&
+ supportsRoundInstruction(roundingMode)) {
+ emitRound(roundingMode, operandType);
+ return true;
+ }
+
+ sync();
+
+ ValTypeVector& signature = operandType == ValType::F32 ? SigF_ : SigD_;
+ ValType retType = operandType;
+ uint32_t numArgs = signature.length();
+ size_t stackSpace = stackConsumed(numArgs);
+ StackResultsLoc noStackResults;
+
+ FunctionCall baselineCall(lineOrBytecode);
+ beginCall(baselineCall, UseABI::Builtin, InterModule::False);
+
+ if (!emitCallArgs(signature, noStackResults, &baselineCall,
+ CalleeOnStack::False)) {
+ return false;
+ }
+
+ CodeOffset raOffset = builtinCall(callee, baselineCall);
+ if (!createStackMap("emitUnaryMathBuiltin[..]", raOffset)) {
+ return false;
+ }
+
+ endCall(baselineCall, stackSpace);
+
+ popValueStackBy(numArgs);
+
+ pushReturnValueOfCall(baselineCall, ToMIRType(retType));
+
+ return true;
+}
+
+#ifdef RABALDR_INT_DIV_I64_CALLOUT
+bool BaseCompiler::emitDivOrModI64BuiltinCall(SymbolicAddress callee,
+ ValType operandType) {
+ MOZ_ASSERT(operandType == ValType::I64);
+ MOZ_ASSERT(!deadCode_);
+
+ sync();
+
+ needI64(specific_.abiReturnRegI64);
+
+ RegI64 rhs = popI64();
+ RegI64 srcDest = popI64ToSpecific(specific_.abiReturnRegI64);
+
+ Label done;
+
+ checkDivideByZeroI64(rhs);
+
+ if (callee == SymbolicAddress::DivI64) {
+ checkDivideSignedOverflowI64(rhs, srcDest, &done, ZeroOnOverflow(false));
+ } else if (callee == SymbolicAddress::ModI64) {
+ checkDivideSignedOverflowI64(rhs, srcDest, &done, ZeroOnOverflow(true));
+ }
+
+ masm.setupWasmABICall();
+ masm.passABIArg(srcDest.high);
+ masm.passABIArg(srcDest.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+ CodeOffset raOffset = masm.callWithABI(bytecodeOffset(), callee,
+ mozilla::Some(fr.getTlsPtrOffset()));
+ if (!createStackMap("emitDivOrModI64Bui[..]", raOffset)) {
+ return false;
+ }
+
+ masm.bind(&done);
+
+ freeI64(rhs);
+ pushI64(srcDest);
+ return true;
+}
+#endif // RABALDR_INT_DIV_I64_CALLOUT
+
+#ifdef RABALDR_I64_TO_FLOAT_CALLOUT
+bool BaseCompiler::emitConvertInt64ToFloatingCallout(SymbolicAddress callee,
+ ValType operandType,
+ ValType resultType) {
+ sync();
+
+ RegI64 input = popI64();
+
+ FunctionCall call(0);
+
+ masm.setupWasmABICall();
+# ifdef JS_PUNBOX64
+ MOZ_CRASH("BaseCompiler platform hook: emitConvertInt64ToFloatingCallout");
+# else
+ masm.passABIArg(input.high);
+ masm.passABIArg(input.low);
+# endif
+ CodeOffset raOffset = masm.callWithABI(
+ bytecodeOffset(), callee, mozilla::Some(fr.getTlsPtrOffset()),
+ resultType == ValType::F32 ? MoveOp::FLOAT32 : MoveOp::DOUBLE);
+ if (!createStackMap("emitConvertInt64To[..]", raOffset)) {
+ return false;
+ }
+
+ freeI64(input);
+
+ if (resultType == ValType::F32) {
+ pushF32(captureReturnedF32(call));
+ } else {
+ pushF64(captureReturnedF64(call));
+ }
+
+ return true;
+}
+#endif // RABALDR_I64_TO_FLOAT_CALLOUT
+
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+// `Callee` always takes a double, so a float32 input must be converted.
+bool BaseCompiler::emitConvertFloatingToInt64Callout(SymbolicAddress callee,
+ ValType operandType,
+ ValType resultType) {
+ RegF64 doubleInput;
+ if (operandType == ValType::F32) {
+ doubleInput = needF64();
+ RegF32 input = popF32();
+ masm.convertFloat32ToDouble(input, doubleInput);
+ freeF32(input);
+ } else {
+ doubleInput = popF64();
+ }
+
+ // We may need the value after the call for the ool check.
+ RegF64 otherReg = needF64();
+ moveF64(doubleInput, otherReg);
+ pushF64(otherReg);
+
+ sync();
+
+ FunctionCall call(0);
+
+ masm.setupWasmABICall();
+ masm.passABIArg(doubleInput, MoveOp::DOUBLE);
+ CodeOffset raOffset = masm.callWithABI(bytecodeOffset(), callee,
+ mozilla::Some(fr.getTlsPtrOffset()));
+ if (!createStackMap("emitConvertFloatin[..]", raOffset)) {
+ return false;
+ }
+
+ freeF64(doubleInput);
+
+ RegI64 rv = captureReturnedI64();
+
+ RegF64 inputVal = popF64();
+
+ TruncFlags flags = 0;
+ if (callee == SymbolicAddress::TruncateDoubleToUint64) {
+ flags |= TRUNC_UNSIGNED;
+ }
+ if (callee == SymbolicAddress::SaturatingTruncateDoubleToInt64 ||
+ callee == SymbolicAddress::SaturatingTruncateDoubleToUint64) {
+ flags |= TRUNC_SATURATING;
+ }
+
+ // If we're saturating, the callout will always produce the final result
+ // value. Otherwise, the callout value will return 0x8000000000000000
+ // and we need to produce traps.
+ OutOfLineCode* ool = nullptr;
+ if (!(flags & TRUNC_SATURATING)) {
+ // The OOL check just succeeds or fails, it does not generate a value.
+ ool = addOutOfLineCode(new (alloc_) OutOfLineTruncateCheckF32OrF64ToI64(
+ AnyReg(inputVal), rv, flags, bytecodeOffset()));
+ if (!ool) {
+ return false;
+ }
+
+ masm.branch64(Assembler::Equal, rv, Imm64(0x8000000000000000),
+ ool->entry());
+ masm.bind(ool->rejoin());
+ }
+
+ pushI64(rv);
+ freeF64(inputVal);
+
+ return true;
+}
+#endif // RABALDR_FLOAT_TO_I64_CALLOUT
+
+bool BaseCompiler::emitGetLocal() {
+ uint32_t slot;
+ if (!iter_.readGetLocal(locals_, &slot)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ // Local loads are pushed unresolved, ie, they may be deferred
+ // until needed, until they may be affected by a store, or until a
+ // sync. This is intended to reduce register pressure.
+
+ switch (locals_[slot].kind()) {
+ case ValType::I32:
+ pushLocalI32(slot);
+ break;
+ case ValType::I64:
+ pushLocalI64(slot);
+ break;
+ case ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ pushLocalV128(slot);
+ break;
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ case ValType::F64:
+ pushLocalF64(slot);
+ break;
+ case ValType::F32:
+ pushLocalF32(slot);
+ break;
+ case ValType::Ref:
+ pushLocalRef(slot);
+ break;
+ }
+
+ return true;
+}
+
+template <bool isSetLocal>
+bool BaseCompiler::emitSetOrTeeLocal(uint32_t slot) {
+ if (deadCode_) {
+ return true;
+ }
+
+ bceLocalIsUpdated(slot);
+ switch (locals_[slot].kind()) {
+ case ValType::I32: {
+ RegI32 rv = popI32();
+ syncLocal(slot);
+ fr.storeLocalI32(rv, localFromSlot(slot, MIRType::Int32));
+ if (isSetLocal) {
+ freeI32(rv);
+ } else {
+ pushI32(rv);
+ }
+ break;
+ }
+ case ValType::I64: {
+ RegI64 rv = popI64();
+ syncLocal(slot);
+ fr.storeLocalI64(rv, localFromSlot(slot, MIRType::Int64));
+ if (isSetLocal) {
+ freeI64(rv);
+ } else {
+ pushI64(rv);
+ }
+ break;
+ }
+ case ValType::F64: {
+ RegF64 rv = popF64();
+ syncLocal(slot);
+ fr.storeLocalF64(rv, localFromSlot(slot, MIRType::Double));
+ if (isSetLocal) {
+ freeF64(rv);
+ } else {
+ pushF64(rv);
+ }
+ break;
+ }
+ case ValType::F32: {
+ RegF32 rv = popF32();
+ syncLocal(slot);
+ fr.storeLocalF32(rv, localFromSlot(slot, MIRType::Float32));
+ if (isSetLocal) {
+ freeF32(rv);
+ } else {
+ pushF32(rv);
+ }
+ break;
+ }
+ case ValType::V128: {
+#ifdef ENABLE_WASM_SIMD
+ RegV128 rv = popV128();
+ syncLocal(slot);
+ fr.storeLocalV128(rv, localFromSlot(slot, MIRType::Simd128));
+ if (isSetLocal) {
+ freeV128(rv);
+ } else {
+ pushV128(rv);
+ }
+ break;
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ }
+ case ValType::Ref: {
+ RegPtr rv = popRef();
+ syncLocal(slot);
+ fr.storeLocalPtr(rv, localFromSlot(slot, MIRType::RefOrNull));
+ if (isSetLocal) {
+ freeRef(rv);
+ } else {
+ pushRef(rv);
+ }
+ break;
+ }
+ }
+
+ return true;
+}
+
+bool BaseCompiler::emitSetLocal() {
+ uint32_t slot;
+ Nothing unused_value;
+ if (!iter_.readSetLocal(locals_, &slot, &unused_value)) {
+ return false;
+ }
+ return emitSetOrTeeLocal<true>(slot);
+}
+
+bool BaseCompiler::emitTeeLocal() {
+ uint32_t slot;
+ Nothing unused_value;
+ if (!iter_.readTeeLocal(locals_, &slot, &unused_value)) {
+ return false;
+ }
+ return emitSetOrTeeLocal<false>(slot);
+}
+
+bool BaseCompiler::emitGetGlobal() {
+ uint32_t id;
+ if (!iter_.readGetGlobal(&id)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ const GlobalDesc& global = moduleEnv_.globals[id];
+
+ if (global.isConstant()) {
+ LitVal value = global.constantValue();
+ switch (value.type().kind()) {
+ case ValType::I32:
+ pushI32(value.i32());
+ break;
+ case ValType::I64:
+ pushI64(value.i64());
+ break;
+ case ValType::F32:
+ pushF32(value.f32());
+ break;
+ case ValType::F64:
+ pushF64(value.f64());
+ break;
+ case ValType::Ref:
+ pushRef(intptr_t(value.ref().forCompiledCode()));
+ break;
+#ifdef ENABLE_WASM_SIMD
+ case ValType::V128:
+ pushV128(value.v128());
+ break;
+#endif
+ default:
+ MOZ_CRASH("Global constant type");
+ }
+ return true;
+ }
+
+ switch (global.type().kind()) {
+ case ValType::I32: {
+ RegI32 rv = needI32();
+ ScratchI32 tmp(*this);
+ masm.load32(addressOfGlobalVar(global, tmp), rv);
+ pushI32(rv);
+ break;
+ }
+ case ValType::I64: {
+ RegI64 rv = needI64();
+ ScratchI32 tmp(*this);
+ masm.load64(addressOfGlobalVar(global, tmp), rv);
+ pushI64(rv);
+ break;
+ }
+ case ValType::F32: {
+ RegF32 rv = needF32();
+ ScratchI32 tmp(*this);
+ masm.loadFloat32(addressOfGlobalVar(global, tmp), rv);
+ pushF32(rv);
+ break;
+ }
+ case ValType::F64: {
+ RegF64 rv = needF64();
+ ScratchI32 tmp(*this);
+ masm.loadDouble(addressOfGlobalVar(global, tmp), rv);
+ pushF64(rv);
+ break;
+ }
+ case ValType::Ref: {
+ RegPtr rv = needRef();
+ ScratchI32 tmp(*this);
+ masm.loadPtr(addressOfGlobalVar(global, tmp), rv);
+ pushRef(rv);
+ break;
+ }
+#ifdef ENABLE_WASM_SIMD
+ case ValType::V128: {
+ RegV128 rv = needV128();
+ ScratchI32 tmp(*this);
+ masm.loadUnalignedSimd128(addressOfGlobalVar(global, tmp), rv);
+ pushV128(rv);
+ break;
+ }
+#endif
+ default:
+ MOZ_CRASH("Global variable type");
+ break;
+ }
+ return true;
+}
+
+bool BaseCompiler::emitSetGlobal() {
+ uint32_t id;
+ Nothing unused_value;
+ if (!iter_.readSetGlobal(&id, &unused_value)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ const GlobalDesc& global = moduleEnv_.globals[id];
+
+ switch (global.type().kind()) {
+ case ValType::I32: {
+ RegI32 rv = popI32();
+ ScratchI32 tmp(*this);
+ masm.store32(rv, addressOfGlobalVar(global, tmp));
+ freeI32(rv);
+ break;
+ }
+ case ValType::I64: {
+ RegI64 rv = popI64();
+ ScratchI32 tmp(*this);
+ masm.store64(rv, addressOfGlobalVar(global, tmp));
+ freeI64(rv);
+ break;
+ }
+ case ValType::F32: {
+ RegF32 rv = popF32();
+ ScratchI32 tmp(*this);
+ masm.storeFloat32(rv, addressOfGlobalVar(global, tmp));
+ freeF32(rv);
+ break;
+ }
+ case ValType::F64: {
+ RegF64 rv = popF64();
+ ScratchI32 tmp(*this);
+ masm.storeDouble(rv, addressOfGlobalVar(global, tmp));
+ freeF64(rv);
+ break;
+ }
+ case ValType::Ref: {
+ RegPtr valueAddr(PreBarrierReg);
+ needRef(valueAddr);
+ {
+ ScratchI32 tmp(*this);
+ masm.computeEffectiveAddress(addressOfGlobalVar(global, tmp),
+ valueAddr);
+ }
+ RegPtr rv = popRef();
+ // emitBarrieredStore consumes valueAddr
+ if (!emitBarrieredStore(Nothing(), valueAddr, rv)) {
+ return false;
+ }
+ freeRef(rv);
+ break;
+ }
+#ifdef ENABLE_WASM_SIMD
+ case ValType::V128: {
+ RegV128 rv = popV128();
+ ScratchI32 tmp(*this);
+ masm.storeUnalignedSimd128(rv, addressOfGlobalVar(global, tmp));
+ freeV128(rv);
+ break;
+ }
+#endif
+ default:
+ MOZ_CRASH("Global variable type");
+ break;
+ }
+ return true;
+}
+
+// Bounds check elimination.
+//
+// We perform BCE on two kinds of address expressions: on constant heap pointers
+// that are known to be in the heap or will be handled by the out-of-bounds trap
+// handler; and on local variables that have been checked in dominating code
+// without being updated since.
+//
+// For an access through a constant heap pointer + an offset we can eliminate
+// the bounds check if the sum of the address and offset is below the sum of the
+// minimum memory length and the offset guard length.
+//
+// For an access through a local variable + an offset we can eliminate the
+// bounds check if the local variable has already been checked and has not been
+// updated since, and the offset is less than the guard limit.
+//
+// To track locals for which we can eliminate checks we use a bit vector
+// bceSafe_ that has a bit set for those locals whose bounds have been checked
+// and which have not subsequently been set. Initially this vector is zero.
+//
+// In straight-line code a bit is set when we perform a bounds check on an
+// access via the local and is reset when the variable is updated.
+//
+// In control flow, the bit vector is manipulated as follows. Each ControlItem
+// has a value bceSafeOnEntry, which is the value of bceSafe_ on entry to the
+// item, and a value bceSafeOnExit, which is initially ~0. On a branch (br,
+// brIf, brTable), we always AND the branch target's bceSafeOnExit with the
+// value of bceSafe_ at the branch point. On exiting an item by falling out of
+// it, provided we're not in dead code, we AND the current value of bceSafe_
+// into the item's bceSafeOnExit. Additional processing depends on the item
+// type:
+//
+// - After a block, set bceSafe_ to the block's bceSafeOnExit.
+//
+// - On loop entry, after pushing the ControlItem, set bceSafe_ to zero; the
+// back edges would otherwise require us to iterate to a fixedpoint.
+//
+// - After a loop, the bceSafe_ is left unchanged, because only fallthrough
+// control flow will reach that point and the bceSafe_ value represents the
+// correct state of the fallthrough path.
+//
+// - Set bceSafe_ to the ControlItem's bceSafeOnEntry at both the 'then' branch
+// and the 'else' branch.
+//
+// - After an if-then-else, set bceSafe_ to the if-then-else's bceSafeOnExit.
+//
+// - After an if-then, set bceSafe_ to the if-then's bceSafeOnExit AND'ed with
+// the if-then's bceSafeOnEntry.
+//
+// Finally, when the debugger allows locals to be mutated we must disable BCE
+// for references via a local, by returning immediately from bceCheckLocal if
+// compilerEnv_.debugEnabled() is true.
+//
+//
+// Alignment check elimination.
+//
+// Alignment checks for atomic operations can be omitted if the pointer is a
+// constant and the pointer + offset is aligned. Alignment checking that can't
+// be omitted can still be simplified by checking only the pointer if the offset
+// is aligned.
+//
+// (In addition, alignment checking of the pointer can be omitted if the pointer
+// has been checked in dominating code, but we don't do that yet.)
+
+// TODO / OPTIMIZE (bug 1329576): There are opportunities to generate better
+// code by not moving a constant address with a zero offset into a register.
+
+RegI32 BaseCompiler::popMemoryAccess(MemoryAccessDesc* access,
+ AccessCheck* check) {
+ check->onlyPointerAlignment =
+ (access->offset() & (access->byteSize() - 1)) == 0;
+
+ int32_t addrTemp;
+ if (popConstI32(&addrTemp)) {
+ uint32_t addr = addrTemp;
+
+ uint32_t offsetGuardLimit =
+ GetMaxOffsetGuardLimit(moduleEnv_.hugeMemoryEnabled());
+
+ uint64_t ea = uint64_t(addr) + uint64_t(access->offset());
+ uint64_t limit = moduleEnv_.minMemoryLength + offsetGuardLimit;
+
+ check->omitBoundsCheck = ea < limit;
+ check->omitAlignmentCheck = (ea & (access->byteSize() - 1)) == 0;
+
+ // Fold the offset into the pointer if we can, as this is always
+ // beneficial.
+
+ if (ea <= UINT32_MAX) {
+ addr = uint32_t(ea);
+ access->clearOffset();
+ }
+
+ RegI32 r = needI32();
+ moveImm32(int32_t(addr), r);
+ return r;
+ }
+
+ uint32_t local;
+ if (peekLocalI32(&local)) {
+ bceCheckLocal(access, check, local);
+ }
+
+ return popI32();
+}
+
+void BaseCompiler::pushHeapBase() {
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS64)
+ RegI64 heapBase = needI64();
+ moveI64(RegI64(Register64(HeapReg)), heapBase);
+ pushI64(heapBase);
+#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
+ RegI32 heapBase = needI32();
+ moveI32(RegI32(HeapReg), heapBase);
+ pushI32(heapBase);
+#elif defined(JS_CODEGEN_X86)
+ RegI32 heapBase = needI32();
+ fr.loadTlsPtr(heapBase);
+ masm.loadPtr(Address(heapBase, offsetof(TlsData, memoryBase)), heapBase);
+ pushI32(heapBase);
+#else
+ MOZ_CRASH("BaseCompiler platform hook: pushHeapBase");
+#endif
+}
+
+RegI32 BaseCompiler::maybeLoadTlsForAccess(const AccessCheck& check) {
+ RegI32 tls;
+ if (needTlsForAccess(check)) {
+ tls = needI32();
+ fr.loadTlsPtr(tls);
+ }
+ return tls;
+}
+
+RegI32 BaseCompiler::maybeLoadTlsForAccess(const AccessCheck& check,
+ RegI32 specific) {
+ if (needTlsForAccess(check)) {
+ fr.loadTlsPtr(specific);
+ return specific;
+ }
+ return RegI32::Invalid();
+}
+
+bool BaseCompiler::loadCommon(MemoryAccessDesc* access, AccessCheck check,
+ ValType type) {
+ RegI32 tls, temp1, temp2, temp3;
+ needLoadTemps(*access, &temp1, &temp2, &temp3);
+
+ switch (type.kind()) {
+ case ValType::I32: {
+ RegI32 rp = popMemoryAccess(access, &check);
+#ifdef JS_CODEGEN_ARM
+ RegI32 rv = IsUnaligned(*access) ? needI32() : rp;
+#else
+ RegI32 rv = rp;
+#endif
+ tls = maybeLoadTlsForAccess(check);
+ if (!load(access, &check, tls, rp, AnyReg(rv), temp1, temp2, temp3)) {
+ return false;
+ }
+ pushI32(rv);
+ if (rp != rv) {
+ freeI32(rp);
+ }
+ break;
+ }
+ case ValType::I64: {
+ RegI64 rv;
+ RegI32 rp;
+#ifdef JS_CODEGEN_X86
+ rv = specific_.abiReturnRegI64;
+ needI64(rv);
+ rp = popMemoryAccess(access, &check);
+#else
+ rp = popMemoryAccess(access, &check);
+ rv = needI64();
+#endif
+ tls = maybeLoadTlsForAccess(check);
+ if (!load(access, &check, tls, rp, AnyReg(rv), temp1, temp2, temp3)) {
+ return false;
+ }
+ pushI64(rv);
+ freeI32(rp);
+ break;
+ }
+ case ValType::F32: {
+ RegI32 rp = popMemoryAccess(access, &check);
+ RegF32 rv = needF32();
+ tls = maybeLoadTlsForAccess(check);
+ if (!load(access, &check, tls, rp, AnyReg(rv), temp1, temp2, temp3)) {
+ return false;
+ }
+ pushF32(rv);
+ freeI32(rp);
+ break;
+ }
+ case ValType::F64: {
+ RegI32 rp = popMemoryAccess(access, &check);
+ RegF64 rv = needF64();
+ tls = maybeLoadTlsForAccess(check);
+ if (!load(access, &check, tls, rp, AnyReg(rv), temp1, temp2, temp3)) {
+ return false;
+ }
+ pushF64(rv);
+ freeI32(rp);
+ break;
+ }
+#ifdef ENABLE_WASM_SIMD
+ case ValType::V128: {
+ RegI32 rp = popMemoryAccess(access, &check);
+ RegV128 rv = needV128();
+ tls = maybeLoadTlsForAccess(check);
+ if (!load(access, &check, tls, rp, AnyReg(rv), temp1, temp2, temp3)) {
+ return false;
+ }
+ pushV128(rv);
+ freeI32(rp);
+ break;
+ }
+#endif
+ default:
+ MOZ_CRASH("load type");
+ break;
+ }
+
+ maybeFreeI32(tls);
+ maybeFreeI32(temp1);
+ maybeFreeI32(temp2);
+ maybeFreeI32(temp3);
+
+ return true;
+}
+
+bool BaseCompiler::emitLoad(ValType type, Scalar::Type viewType) {
+ LinearMemoryAddress<Nothing> addr;
+ if (!iter_.readLoad(type, Scalar::byteSize(viewType), &addr)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset());
+ return loadCommon(&access, AccessCheck(), type);
+}
+
+bool BaseCompiler::storeCommon(MemoryAccessDesc* access, AccessCheck check,
+ ValType resultType) {
+ RegI32 tls;
+ RegI32 temp = needStoreTemp(*access, resultType);
+
+ switch (resultType.kind()) {
+ case ValType::I32: {
+ RegI32 rv = popI32();
+ RegI32 rp = popMemoryAccess(access, &check);
+ tls = maybeLoadTlsForAccess(check);
+ if (!store(access, &check, tls, rp, AnyReg(rv), temp)) {
+ return false;
+ }
+ freeI32(rp);
+ freeI32(rv);
+ break;
+ }
+ case ValType::I64: {
+ RegI64 rv = popI64();
+ RegI32 rp = popMemoryAccess(access, &check);
+ tls = maybeLoadTlsForAccess(check);
+ if (!store(access, &check, tls, rp, AnyReg(rv), temp)) {
+ return false;
+ }
+ freeI32(rp);
+ freeI64(rv);
+ break;
+ }
+ case ValType::F32: {
+ RegF32 rv = popF32();
+ RegI32 rp = popMemoryAccess(access, &check);
+ tls = maybeLoadTlsForAccess(check);
+ if (!store(access, &check, tls, rp, AnyReg(rv), temp)) {
+ return false;
+ }
+ freeI32(rp);
+ freeF32(rv);
+ break;
+ }
+ case ValType::F64: {
+ RegF64 rv = popF64();
+ RegI32 rp = popMemoryAccess(access, &check);
+ tls = maybeLoadTlsForAccess(check);
+ if (!store(access, &check, tls, rp, AnyReg(rv), temp)) {
+ return false;
+ }
+ freeI32(rp);
+ freeF64(rv);
+ break;
+ }
+#ifdef ENABLE_WASM_SIMD
+ case ValType::V128: {
+ RegV128 rv = popV128();
+ RegI32 rp = popMemoryAccess(access, &check);
+ tls = maybeLoadTlsForAccess(check);
+ if (!store(access, &check, tls, rp, AnyReg(rv), temp)) {
+ return false;
+ }
+ freeI32(rp);
+ freeV128(rv);
+ break;
+ }
+#endif
+ default:
+ MOZ_CRASH("store type");
+ break;
+ }
+
+ maybeFreeI32(tls);
+ maybeFreeI32(temp);
+
+ return true;
+}
+
+bool BaseCompiler::emitStore(ValType resultType, Scalar::Type viewType) {
+ LinearMemoryAddress<Nothing> addr;
+ Nothing unused_value;
+ if (!iter_.readStore(resultType, Scalar::byteSize(viewType), &addr,
+ &unused_value)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset());
+ return storeCommon(&access, AccessCheck(), resultType);
+}
+
+bool BaseCompiler::emitSelect(bool typed) {
+ StackType type;
+ Nothing unused_trueValue;
+ Nothing unused_falseValue;
+ Nothing unused_condition;
+ if (!iter_.readSelect(typed, &type, &unused_trueValue, &unused_falseValue,
+ &unused_condition)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ resetLatentOp();
+ return true;
+ }
+
+ // I32 condition on top, then false, then true.
+
+ Label done;
+ BranchState b(&done);
+ emitBranchSetup(&b);
+
+ switch (type.valType().kind()) {
+ case ValType::I32: {
+ RegI32 r, rs;
+ pop2xI32(&r, &rs);
+ if (!emitBranchPerform(&b)) {
+ return false;
+ }
+ moveI32(rs, r);
+ masm.bind(&done);
+ freeI32(rs);
+ pushI32(r);
+ break;
+ }
+ case ValType::I64: {
+#ifdef JS_CODEGEN_X86
+ // There may be as many as four Int64 values in registers at a time: two
+ // for the latent branch operands, and two for the true/false values we
+ // normally pop before executing the branch. On x86 this is one value
+ // too many, so we need to generate more complicated code here, and for
+ // simplicity's sake we do so even if the branch operands are not Int64.
+ // However, the resulting control flow diamond is complicated since the
+ // arms of the diamond will have to stay synchronized with respect to
+ // their evaluation stack and regalloc state. To simplify further, we
+ // use a double branch and a temporary boolean value for now.
+ RegI32 temp = needI32();
+ moveImm32(0, temp);
+ if (!emitBranchPerform(&b)) {
+ return false;
+ }
+ moveImm32(1, temp);
+ masm.bind(&done);
+
+ Label trueValue;
+ RegI64 r, rs;
+ pop2xI64(&r, &rs);
+ masm.branch32(Assembler::Equal, temp, Imm32(0), &trueValue);
+ moveI64(rs, r);
+ masm.bind(&trueValue);
+ freeI32(temp);
+ freeI64(rs);
+ pushI64(r);
+#else
+ RegI64 r, rs;
+ pop2xI64(&r, &rs);
+ if (!emitBranchPerform(&b)) {
+ return false;
+ }
+ moveI64(rs, r);
+ masm.bind(&done);
+ freeI64(rs);
+ pushI64(r);
+#endif
+ break;
+ }
+ case ValType::F32: {
+ RegF32 r, rs;
+ pop2xF32(&r, &rs);
+ if (!emitBranchPerform(&b)) {
+ return false;
+ }
+ moveF32(rs, r);
+ masm.bind(&done);
+ freeF32(rs);
+ pushF32(r);
+ break;
+ }
+ case ValType::F64: {
+ RegF64 r, rs;
+ pop2xF64(&r, &rs);
+ if (!emitBranchPerform(&b)) {
+ return false;
+ }
+ moveF64(rs, r);
+ masm.bind(&done);
+ freeF64(rs);
+ pushF64(r);
+ break;
+ }
+#ifdef ENABLE_WASM_SIMD
+ case ValType::V128: {
+ RegV128 r, rs;
+ pop2xV128(&r, &rs);
+ if (!emitBranchPerform(&b)) {
+ return false;
+ }
+ moveV128(rs, r);
+ masm.bind(&done);
+ freeV128(rs);
+ pushV128(r);
+ break;
+ }
+#endif
+ case ValType::Ref: {
+ RegPtr r, rs;
+ pop2xRef(&r, &rs);
+ if (!emitBranchPerform(&b)) {
+ return false;
+ }
+ moveRef(rs, r);
+ masm.bind(&done);
+ freeRef(rs);
+ pushRef(r);
+ break;
+ }
+ default: {
+ MOZ_CRASH("select type");
+ }
+ }
+
+ return true;
+}
+
+void BaseCompiler::emitCompareI32(Assembler::Condition compareOp,
+ ValType compareType) {
+ MOZ_ASSERT(compareType == ValType::I32);
+
+ if (sniffConditionalControlCmp(compareOp, compareType)) {
+ return;
+ }
+
+ int32_t c;
+ if (popConstI32(&c)) {
+ RegI32 r = popI32();
+ masm.cmp32Set(compareOp, r, Imm32(c), r);
+ pushI32(r);
+ } else {
+ RegI32 r, rs;
+ pop2xI32(&r, &rs);
+ masm.cmp32Set(compareOp, r, rs, r);
+ freeI32(rs);
+ pushI32(r);
+ }
+}
+
+void BaseCompiler::emitCompareI64(Assembler::Condition compareOp,
+ ValType compareType) {
+ MOZ_ASSERT(compareType == ValType::I64);
+
+ if (sniffConditionalControlCmp(compareOp, compareType)) {
+ return;
+ }
+
+ RegI64 rs0, rs1;
+ pop2xI64(&rs0, &rs1);
+ RegI32 rd(fromI64(rs0));
+ cmp64Set(compareOp, rs0, rs1, rd);
+ freeI64(rs1);
+ freeI64Except(rs0, rd);
+ pushI32(rd);
+}
+
+void BaseCompiler::emitCompareF32(Assembler::DoubleCondition compareOp,
+ ValType compareType) {
+ MOZ_ASSERT(compareType == ValType::F32);
+
+ if (sniffConditionalControlCmp(compareOp, compareType)) {
+ return;
+ }
+
+ Label across;
+ RegF32 rs0, rs1;
+ pop2xF32(&rs0, &rs1);
+ RegI32 rd = needI32();
+ moveImm32(1, rd);
+ masm.branchFloat(compareOp, rs0, rs1, &across);
+ moveImm32(0, rd);
+ masm.bind(&across);
+ freeF32(rs0);
+ freeF32(rs1);
+ pushI32(rd);
+}
+
+void BaseCompiler::emitCompareF64(Assembler::DoubleCondition compareOp,
+ ValType compareType) {
+ MOZ_ASSERT(compareType == ValType::F64);
+
+ if (sniffConditionalControlCmp(compareOp, compareType)) {
+ return;
+ }
+
+ Label across;
+ RegF64 rs0, rs1;
+ pop2xF64(&rs0, &rs1);
+ RegI32 rd = needI32();
+ moveImm32(1, rd);
+ masm.branchDouble(compareOp, rs0, rs1, &across);
+ moveImm32(0, rd);
+ masm.bind(&across);
+ freeF64(rs0);
+ freeF64(rs1);
+ pushI32(rd);
+}
+
+void BaseCompiler::emitCompareRef(Assembler::Condition compareOp,
+ ValType compareType) {
+ MOZ_ASSERT(!sniffConditionalControlCmp(compareOp, compareType));
+
+ RegPtr rs1, rs2;
+ pop2xRef(&rs1, &rs2);
+ RegI32 rd = needI32();
+ masm.cmpPtrSet(compareOp, rs1, rs2, rd);
+ freeRef(rs1);
+ freeRef(rs2);
+ pushI32(rd);
+}
+
+bool BaseCompiler::emitInstanceCall(uint32_t lineOrBytecode,
+ const SymbolicAddressSignature& builtin,
+ bool pushReturnedValue /*=true*/) {
+ const MIRType* argTypes = builtin.argTypes;
+ MOZ_ASSERT(argTypes[0] == MIRType::Pointer);
+
+ sync();
+
+ uint32_t numNonInstanceArgs = builtin.numArgs - 1 /* instance */;
+ size_t stackSpace = stackConsumed(numNonInstanceArgs);
+
+ FunctionCall baselineCall(lineOrBytecode);
+ beginCall(baselineCall, UseABI::System, InterModule::True);
+
+ ABIArg instanceArg = reservePointerArgument(&baselineCall);
+
+ startCallArgs(StackArgAreaSizeUnaligned(builtin), &baselineCall);
+ for (uint32_t i = 1; i < builtin.numArgs; i++) {
+ ValType t;
+ switch (argTypes[i]) {
+ case MIRType::Int32:
+ t = ValType::I32;
+ break;
+ case MIRType::Int64:
+ t = ValType::I64;
+ break;
+ case MIRType::RefOrNull:
+ t = RefType::extern_();
+ break;
+ case MIRType::Pointer:
+ // Instance function args can now be uninterpreted pointers (eg, for
+ // the cases PostBarrier and PostBarrierFilter) so we simply treat
+ // them like the equivalently sized integer.
+ t = sizeof(void*) == 4 ? ValType::I32 : ValType::I64;
+ break;
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+ passArg(t, peek(numNonInstanceArgs - i), &baselineCall);
+ }
+ CodeOffset raOffset =
+ builtinInstanceMethodCall(builtin, instanceArg, baselineCall);
+ if (!createStackMap("emitInstanceCall", raOffset)) {
+ return false;
+ }
+
+ endCall(baselineCall, stackSpace);
+
+ popValueStackBy(numNonInstanceArgs);
+
+ // Note, many clients of emitInstanceCall currently assume that pushing the
+ // result here does not destroy ReturnReg.
+ //
+ // Furthermore, clients assume that if builtin.retType != MIRType::None, the
+ // callee will have returned a result and left it in ReturnReg for us to
+ // find, and that that register will not be destroyed here (or above).
+
+ if (pushReturnedValue) {
+ // For the return type only, MIRType::None is used to indicate that the
+ // call doesn't return a result, that is, returns a C/C++ "void".
+ MOZ_ASSERT(builtin.retType != MIRType::None);
+ pushReturnValueOfCall(baselineCall, builtin.retType);
+ }
+ return true;
+}
+
+bool BaseCompiler::emitMemoryGrow() {
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+ Nothing arg;
+ if (!iter_.readMemoryGrow(&arg)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ return emitInstanceCall(lineOrBytecode, SASigMemoryGrow);
+}
+
+bool BaseCompiler::emitMemorySize() {
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+ if (!iter_.readMemorySize()) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ return emitInstanceCall(lineOrBytecode, SASigMemorySize);
+}
+
+bool BaseCompiler::emitRefFunc() {
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+ uint32_t funcIndex;
+ if (!iter_.readRefFunc(&funcIndex)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+
+ pushI32(funcIndex);
+ return emitInstanceCall(lineOrBytecode, SASigRefFunc);
+}
+
+bool BaseCompiler::emitRefNull() {
+ if (!iter_.readRefNull()) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ pushRef(NULLREF_VALUE);
+ return true;
+}
+
+bool BaseCompiler::emitRefIsNull() {
+ Nothing nothing;
+ if (!iter_.readRefIsNull(&nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ RegPtr r = popRef();
+ RegI32 rd = narrowPtr(r);
+
+ masm.cmpPtrSet(Assembler::Equal, r, ImmWord(NULLREF_VALUE), rd);
+ pushI32(rd);
+ return true;
+}
+
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+bool BaseCompiler::emitRefAsNonNull() {
+ Nothing nothing;
+ if (!iter_.readRefAsNonNull(&nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ RegPtr rp = popRef();
+ Label ok;
+ masm.branchTestPtr(Assembler::NonZero, rp, rp, &ok);
+ trap(Trap::NullPointerDereference);
+ masm.bind(&ok);
+ pushRef(rp);
+
+ return true;
+}
+#endif
+
+bool BaseCompiler::emitAtomicCmpXchg(ValType type, Scalar::Type viewType) {
+ LinearMemoryAddress<Nothing> addr;
+ Nothing unused;
+
+ if (!iter_.readAtomicCmpXchg(&addr, type, Scalar::byteSize(viewType), &unused,
+ &unused)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset(),
+ Synchronization::Full());
+
+ if (Scalar::byteSize(viewType) <= 4) {
+ PopAtomicCmpXchg32Regs regs(this, type, viewType);
+
+ AccessCheck check;
+ RegI32 rp = popMemoryAccess(&access, &check);
+ RegI32 tls = maybeLoadTlsForAccess(check);
+
+ auto memaddr = prepareAtomicMemoryAccess(&access, &check, tls, rp);
+ regs.atomicCmpXchg32(access, memaddr);
+
+ maybeFreeI32(tls);
+ freeI32(rp);
+
+ if (type == ValType::I64) {
+ pushU32AsI64(regs.takeRd());
+ } else {
+ pushI32(regs.takeRd());
+ }
+
+ return true;
+ }
+
+ MOZ_ASSERT(type == ValType::I64 && Scalar::byteSize(viewType) == 8);
+
+ PopAtomicCmpXchg64Regs regs(this);
+
+ AccessCheck check;
+ RegI32 rp = popMemoryAccess(&access, &check);
+
+#ifdef JS_CODEGEN_X86
+ ScratchEBX ebx(*this);
+ RegI32 tls = maybeLoadTlsForAccess(check, ebx);
+ auto memaddr = prepareAtomicMemoryAccess(&access, &check, tls, rp);
+ regs.atomicCmpXchg64(access, memaddr, ebx);
+#else
+ RegI32 tls = maybeLoadTlsForAccess(check);
+ auto memaddr = prepareAtomicMemoryAccess(&access, &check, tls, rp);
+ regs.atomicCmpXchg64(access, memaddr);
+ maybeFreeI32(tls);
+#endif
+
+ freeI32(rp);
+
+ pushI64(regs.takeRd());
+ return true;
+}
+
+bool BaseCompiler::emitAtomicLoad(ValType type, Scalar::Type viewType) {
+ LinearMemoryAddress<Nothing> addr;
+ if (!iter_.readAtomicLoad(&addr, type, Scalar::byteSize(viewType))) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset(),
+ Synchronization::Load());
+
+ if (Scalar::byteSize(viewType) <= sizeof(void*)) {
+ return loadCommon(&access, AccessCheck(), type);
+ }
+
+ MOZ_ASSERT(type == ValType::I64 && Scalar::byteSize(viewType) == 8);
+
+#if defined(JS_64BIT)
+ MOZ_CRASH("Should not happen");
+#else
+ PopAtomicLoad64Regs regs(this);
+
+ AccessCheck check;
+ RegI32 rp = popMemoryAccess(&access, &check);
+
+# ifdef JS_CODEGEN_X86
+ ScratchEBX ebx(*this);
+ RegI32 tls = maybeLoadTlsForAccess(check, ebx);
+ auto memaddr = prepareAtomicMemoryAccess(&access, &check, tls, rp);
+ regs.atomicLoad64(access, memaddr, ebx);
+# else
+ RegI32 tls = maybeLoadTlsForAccess(check);
+ auto memaddr = prepareAtomicMemoryAccess(&access, &check, tls, rp);
+ regs.atomicLoad64(access, memaddr);
+ maybeFreeI32(tls);
+# endif
+
+ freeI32(rp);
+
+ pushI64(regs.takeRd());
+ return true;
+#endif // JS_64BIT
+}
+
+bool BaseCompiler::emitAtomicRMW(ValType type, Scalar::Type viewType,
+ AtomicOp op) {
+ LinearMemoryAddress<Nothing> addr;
+ Nothing unused_value;
+ if (!iter_.readAtomicRMW(&addr, type, Scalar::byteSize(viewType),
+ &unused_value)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset(),
+ Synchronization::Full());
+
+ if (Scalar::byteSize(viewType) <= 4) {
+ PopAtomicRMW32Regs regs(this, type, viewType, op);
+
+ AccessCheck check;
+ RegI32 rp = popMemoryAccess(&access, &check);
+ RegI32 tls = maybeLoadTlsForAccess(check);
+
+ auto memaddr = prepareAtomicMemoryAccess(&access, &check, tls, rp);
+ regs.atomicRMW32(access, memaddr, op);
+
+ maybeFreeI32(tls);
+ freeI32(rp);
+
+ if (type == ValType::I64) {
+ pushU32AsI64(regs.takeRd());
+ } else {
+ pushI32(regs.takeRd());
+ }
+ return true;
+ }
+
+ MOZ_ASSERT(type == ValType::I64 && Scalar::byteSize(viewType) == 8);
+
+ PopAtomicRMW64Regs regs(this, op);
+
+ AccessCheck check;
+ RegI32 rp = popMemoryAccess(&access, &check);
+
+#ifdef JS_CODEGEN_X86
+ ScratchEBX ebx(*this);
+ RegI32 tls = maybeLoadTlsForAccess(check, ebx);
+
+ fr.pushPtr(regs.valueHigh());
+ fr.pushPtr(regs.valueLow());
+ Address value(esp, 0);
+
+ auto memaddr = prepareAtomicMemoryAccess(&access, &check, tls, rp);
+ regs.atomicRMW64(access, memaddr, op, value, ebx);
+
+ fr.popBytes(8);
+#else
+ RegI32 tls = maybeLoadTlsForAccess(check);
+ auto memaddr = prepareAtomicMemoryAccess(&access, &check, tls, rp);
+ regs.atomicRMW64(access, memaddr, op);
+ maybeFreeI32(tls);
+#endif
+
+ freeI32(rp);
+
+ pushI64(regs.takeRd());
+ return true;
+}
+
+bool BaseCompiler::emitAtomicStore(ValType type, Scalar::Type viewType) {
+ LinearMemoryAddress<Nothing> addr;
+ Nothing unused_value;
+ if (!iter_.readAtomicStore(&addr, type, Scalar::byteSize(viewType),
+ &unused_value)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset(),
+ Synchronization::Store());
+
+ if (Scalar::byteSize(viewType) <= sizeof(void*)) {
+ return storeCommon(&access, AccessCheck(), type);
+ }
+
+ MOZ_ASSERT(type == ValType::I64 && Scalar::byteSize(viewType) == 8);
+
+#ifdef JS_64BIT
+ MOZ_CRASH("Should not happen");
+#else
+ emitAtomicXchg64(&access, WantResult(false));
+ return true;
+#endif
+}
+
+bool BaseCompiler::emitAtomicXchg(ValType type, Scalar::Type viewType) {
+ LinearMemoryAddress<Nothing> addr;
+ Nothing unused_value;
+ if (!iter_.readAtomicRMW(&addr, type, Scalar::byteSize(viewType),
+ &unused_value)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ AccessCheck check;
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset(),
+ Synchronization::Full());
+
+ if (Scalar::byteSize(viewType) <= 4) {
+ PopAtomicXchg32Regs regs(this, type, viewType);
+ RegI32 rp = popMemoryAccess(&access, &check);
+ RegI32 tls = maybeLoadTlsForAccess(check);
+
+ auto memaddr = prepareAtomicMemoryAccess(&access, &check, tls, rp);
+ regs.atomicXchg32(access, memaddr);
+
+ maybeFreeI32(tls);
+ freeI32(rp);
+
+ if (type == ValType::I64) {
+ pushU32AsI64(regs.takeRd());
+ } else {
+ pushI32(regs.takeRd());
+ }
+ return true;
+ }
+
+ MOZ_ASSERT(type == ValType::I64 && Scalar::byteSize(viewType) == 8);
+
+ emitAtomicXchg64(&access, WantResult(true));
+ return true;
+}
+
+void BaseCompiler::emitAtomicXchg64(MemoryAccessDesc* access,
+ WantResult wantResult) {
+ PopAtomicXchg64Regs regs(this);
+
+ AccessCheck check;
+ RegI32 rp = popMemoryAccess(access, &check);
+
+#ifdef JS_CODEGEN_X86
+ ScratchEBX ebx(*this);
+ RegI32 tls = maybeLoadTlsForAccess(check, ebx);
+ auto memaddr = prepareAtomicMemoryAccess(access, &check, tls, rp);
+ regs.atomicXchg64(*access, memaddr, ebx);
+#else
+ RegI32 tls = maybeLoadTlsForAccess(check);
+ auto memaddr = prepareAtomicMemoryAccess(access, &check, tls, rp);
+ regs.atomicXchg64(*access, memaddr);
+ maybeFreeI32(tls);
+#endif
+
+ freeI32(rp);
+
+ if (wantResult) {
+ pushI64(regs.takeRd());
+ }
+}
+
+bool BaseCompiler::emitWait(ValType type, uint32_t byteSize) {
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+ Nothing nothing;
+ LinearMemoryAddress<Nothing> addr;
+ if (!iter_.readWait(&addr, type, byteSize, &nothing, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ switch (type.kind()) {
+ case ValType::I32: {
+ RegI64 timeout = popI64();
+ RegI32 val = popI32();
+
+ MemoryAccessDesc access(Scalar::Int32, addr.align, addr.offset,
+ bytecodeOffset());
+ computeEffectiveAddress(&access);
+
+ pushI32(val);
+ pushI64(timeout);
+
+ if (!emitInstanceCall(lineOrBytecode, SASigWaitI32)) {
+ return false;
+ }
+ break;
+ }
+ case ValType::I64: {
+ RegI64 timeout = popI64();
+ RegI64 val = popI64();
+
+ MemoryAccessDesc access(Scalar::Int64, addr.align, addr.offset,
+ bytecodeOffset());
+ computeEffectiveAddress(&access);
+
+ pushI64(val);
+ pushI64(timeout);
+
+ if (!emitInstanceCall(lineOrBytecode, SASigWaitI64)) {
+ return false;
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH();
+ }
+
+ return true;
+}
+
+bool BaseCompiler::emitWake() {
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+ Nothing nothing;
+ LinearMemoryAddress<Nothing> addr;
+ if (!iter_.readWake(&addr, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ RegI32 count = popI32();
+
+ MemoryAccessDesc access(Scalar::Int32, addr.align, addr.offset,
+ bytecodeOffset());
+ computeEffectiveAddress(&access);
+
+ pushI32(count);
+
+ return emitInstanceCall(lineOrBytecode, SASigWake);
+}
+
+bool BaseCompiler::emitFence() {
+ if (!iter_.readFence()) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+
+ masm.memoryBarrier(MembarFull);
+ return true;
+}
+
+bool BaseCompiler::emitMemCopy() {
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+ uint32_t dstMemOrTableIndex = 0;
+ uint32_t srcMemOrTableIndex = 0;
+ Nothing nothing;
+ if (!iter_.readMemOrTableCopy(true, &dstMemOrTableIndex, &nothing,
+ &srcMemOrTableIndex, &nothing, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ int32_t signedLength;
+ if (MacroAssembler::SupportsFastUnalignedAccesses() &&
+ peekConstI32(&signedLength) && signedLength != 0 &&
+ uint32_t(signedLength) <= MaxInlineMemoryCopyLength) {
+ return emitMemCopyInline();
+ }
+
+ return emitMemCopyCall(lineOrBytecode);
+}
+
+bool BaseCompiler::emitMemCopyCall(uint32_t lineOrBytecode) {
+ pushHeapBase();
+ if (!emitInstanceCall(lineOrBytecode,
+ usesSharedMemory() ? SASigMemCopyShared : SASigMemCopy,
+ /*pushReturnedValue=*/false)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool BaseCompiler::emitMemCopyInline() {
+ MOZ_ASSERT(MaxInlineMemoryCopyLength != 0);
+
+ int32_t signedLength;
+ MOZ_ALWAYS_TRUE(popConstI32(&signedLength));
+ uint32_t length = signedLength;
+ MOZ_ASSERT(length != 0 && length <= MaxInlineMemoryCopyLength);
+
+ RegI32 src = popI32();
+ RegI32 dest = popI32();
+
+ // Compute the number of copies of each width we will need to do
+ size_t remainder = length;
+#ifdef JS_64BIT
+ size_t numCopies8 = remainder / sizeof(uint64_t);
+ remainder %= sizeof(uint64_t);
+#endif
+ size_t numCopies4 = remainder / sizeof(uint32_t);
+ remainder %= sizeof(uint32_t);
+ size_t numCopies2 = remainder / sizeof(uint16_t);
+ remainder %= sizeof(uint16_t);
+ size_t numCopies1 = remainder;
+
+ // Load all source bytes onto the value stack from low to high using the
+ // widest transfer width we can for the system. We will trap without writing
+ // anything if any source byte is out-of-bounds.
+ bool omitBoundsCheck = false;
+ size_t offset = 0;
+
+#ifdef JS_64BIT
+ for (uint32_t i = 0; i < numCopies8; i++) {
+ RegI32 temp = needI32();
+ moveI32(src, temp);
+ pushI32(temp);
+
+ MemoryAccessDesc access(Scalar::Int64, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ if (!loadCommon(&access, check, ValType::I64)) {
+ return false;
+ }
+
+ offset += sizeof(uint64_t);
+ omitBoundsCheck = true;
+ }
+#endif
+
+ for (uint32_t i = 0; i < numCopies4; i++) {
+ RegI32 temp = needI32();
+ moveI32(src, temp);
+ pushI32(temp);
+
+ MemoryAccessDesc access(Scalar::Uint32, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ if (!loadCommon(&access, check, ValType::I32)) {
+ return false;
+ }
+
+ offset += sizeof(uint32_t);
+ omitBoundsCheck = true;
+ }
+
+ if (numCopies2) {
+ RegI32 temp = needI32();
+ moveI32(src, temp);
+ pushI32(temp);
+
+ MemoryAccessDesc access(Scalar::Uint16, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ if (!loadCommon(&access, check, ValType::I32)) {
+ return false;
+ }
+
+ offset += sizeof(uint16_t);
+ omitBoundsCheck = true;
+ }
+
+ if (numCopies1) {
+ RegI32 temp = needI32();
+ moveI32(src, temp);
+ pushI32(temp);
+
+ MemoryAccessDesc access(Scalar::Uint8, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ if (!loadCommon(&access, check, ValType::I32)) {
+ return false;
+ }
+ }
+
+ // Store all source bytes from the value stack to the destination from
+ // high to low. We will trap without writing anything on the first store
+ // if any dest byte is out-of-bounds.
+ offset = length;
+ omitBoundsCheck = false;
+
+ if (numCopies1) {
+ offset -= sizeof(uint8_t);
+
+ RegI32 value = popI32();
+ RegI32 temp = needI32();
+ moveI32(dest, temp);
+ pushI32(temp);
+ pushI32(value);
+
+ MemoryAccessDesc access(Scalar::Uint8, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ if (!storeCommon(&access, check, ValType::I32)) {
+ return false;
+ }
+
+ omitBoundsCheck = true;
+ }
+
+ if (numCopies2) {
+ offset -= sizeof(uint16_t);
+
+ RegI32 value = popI32();
+ RegI32 temp = needI32();
+ moveI32(dest, temp);
+ pushI32(temp);
+ pushI32(value);
+
+ MemoryAccessDesc access(Scalar::Uint16, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ if (!storeCommon(&access, check, ValType::I32)) {
+ return false;
+ }
+
+ omitBoundsCheck = true;
+ }
+
+ for (uint32_t i = 0; i < numCopies4; i++) {
+ offset -= sizeof(uint32_t);
+
+ RegI32 value = popI32();
+ RegI32 temp = needI32();
+ moveI32(dest, temp);
+ pushI32(temp);
+ pushI32(value);
+
+ MemoryAccessDesc access(Scalar::Uint32, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ if (!storeCommon(&access, check, ValType::I32)) {
+ return false;
+ }
+
+ omitBoundsCheck = true;
+ }
+
+#ifdef JS_64BIT
+ for (uint32_t i = 0; i < numCopies8; i++) {
+ offset -= sizeof(uint64_t);
+
+ RegI64 value = popI64();
+ RegI32 temp = needI32();
+ moveI32(dest, temp);
+ pushI32(temp);
+ pushI64(value);
+
+ MemoryAccessDesc access(Scalar::Int64, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ if (!storeCommon(&access, check, ValType::I64)) {
+ return false;
+ }
+
+ omitBoundsCheck = true;
+ }
+#endif
+
+ freeI32(dest);
+ freeI32(src);
+ return true;
+}
+
+bool BaseCompiler::emitTableCopy() {
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+ uint32_t dstMemOrTableIndex = 0;
+ uint32_t srcMemOrTableIndex = 0;
+ Nothing nothing;
+ if (!iter_.readMemOrTableCopy(false, &dstMemOrTableIndex, &nothing,
+ &srcMemOrTableIndex, &nothing, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ pushI32(dstMemOrTableIndex);
+ pushI32(srcMemOrTableIndex);
+ if (!emitInstanceCall(lineOrBytecode, SASigTableCopy,
+ /*pushReturnedValue=*/false)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool BaseCompiler::emitDataOrElemDrop(bool isData) {
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+ uint32_t segIndex = 0;
+ if (!iter_.readDataOrElemDrop(isData, &segIndex)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ // Despite the cast to int32_t, the callee regards the value as unsigned.
+ pushI32(int32_t(segIndex));
+
+ return emitInstanceCall(lineOrBytecode,
+ isData ? SASigDataDrop : SASigElemDrop,
+ /*pushReturnedValue=*/false);
+}
+
+bool BaseCompiler::emitMemFill() {
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+ Nothing nothing;
+ if (!iter_.readMemFill(&nothing, &nothing, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ int32_t signedLength;
+ int32_t signedValue;
+ if (MacroAssembler::SupportsFastUnalignedAccesses() &&
+ peek2xI32(&signedLength, &signedValue) && signedLength != 0 &&
+ uint32_t(signedLength) <= MaxInlineMemoryFillLength) {
+ return emitMemFillInline();
+ }
+ return emitMemFillCall(lineOrBytecode);
+}
+
+bool BaseCompiler::emitMemFillCall(uint32_t lineOrBytecode) {
+ pushHeapBase();
+ return emitInstanceCall(
+ lineOrBytecode, usesSharedMemory() ? SASigMemFillShared : SASigMemFill,
+ /*pushReturnedValue=*/false);
+}
+
+bool BaseCompiler::emitMemFillInline() {
+ MOZ_ASSERT(MaxInlineMemoryFillLength != 0);
+
+ int32_t signedLength;
+ int32_t signedValue;
+ MOZ_ALWAYS_TRUE(popConstI32(&signedLength));
+ MOZ_ALWAYS_TRUE(popConstI32(&signedValue));
+ uint32_t length = uint32_t(signedLength);
+ uint32_t value = uint32_t(signedValue);
+ MOZ_ASSERT(length != 0 && length <= MaxInlineMemoryFillLength);
+
+ RegI32 dest = popI32();
+
+ // Compute the number of copies of each width we will need to do
+ size_t remainder = length;
+#ifdef JS_64BIT
+ size_t numCopies8 = remainder / sizeof(uint64_t);
+ remainder %= sizeof(uint64_t);
+#endif
+ size_t numCopies4 = remainder / sizeof(uint32_t);
+ remainder %= sizeof(uint32_t);
+ size_t numCopies2 = remainder / sizeof(uint16_t);
+ remainder %= sizeof(uint16_t);
+ size_t numCopies1 = remainder;
+
+ MOZ_ASSERT(numCopies2 <= 1 && numCopies1 <= 1);
+
+ // Generate splatted definitions for wider fills as needed
+#ifdef JS_64BIT
+ uint64_t val8 = SplatByteToUInt<uint64_t>(value, 8);
+#endif
+ uint32_t val4 = SplatByteToUInt<uint32_t>(value, 4);
+ uint32_t val2 = SplatByteToUInt<uint32_t>(value, 2);
+ uint32_t val1 = value;
+
+ // Store the fill value to the destination from high to low. We will trap
+ // without writing anything on the first store if any dest byte is
+ // out-of-bounds.
+ size_t offset = length;
+ bool omitBoundsCheck = false;
+
+ if (numCopies1) {
+ offset -= sizeof(uint8_t);
+
+ RegI32 temp = needI32();
+ moveI32(dest, temp);
+ pushI32(temp);
+ pushI32(val1);
+
+ MemoryAccessDesc access(Scalar::Uint8, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ if (!storeCommon(&access, check, ValType::I32)) {
+ return false;
+ }
+
+ omitBoundsCheck = true;
+ }
+
+ if (numCopies2) {
+ offset -= sizeof(uint16_t);
+
+ RegI32 temp = needI32();
+ moveI32(dest, temp);
+ pushI32(temp);
+ pushI32(val2);
+
+ MemoryAccessDesc access(Scalar::Uint16, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ if (!storeCommon(&access, check, ValType::I32)) {
+ return false;
+ }
+
+ omitBoundsCheck = true;
+ }
+
+ for (uint32_t i = 0; i < numCopies4; i++) {
+ offset -= sizeof(uint32_t);
+
+ RegI32 temp = needI32();
+ moveI32(dest, temp);
+ pushI32(temp);
+ pushI32(val4);
+
+ MemoryAccessDesc access(Scalar::Uint32, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ if (!storeCommon(&access, check, ValType::I32)) {
+ return false;
+ }
+
+ omitBoundsCheck = true;
+ }
+
+#ifdef JS_64BIT
+ for (uint32_t i = 0; i < numCopies8; i++) {
+ offset -= sizeof(uint64_t);
+
+ RegI32 temp = needI32();
+ moveI32(dest, temp);
+ pushI32(temp);
+ pushI64(val8);
+
+ MemoryAccessDesc access(Scalar::Int64, 1, offset, bytecodeOffset());
+ AccessCheck check;
+ check.omitBoundsCheck = omitBoundsCheck;
+ if (!storeCommon(&access, check, ValType::I64)) {
+ return false;
+ }
+
+ omitBoundsCheck = true;
+ }
+#endif
+
+ freeI32(dest);
+ return true;
+}
+
+bool BaseCompiler::emitMemOrTableInit(bool isMem) {
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+ uint32_t segIndex = 0;
+ uint32_t dstTableIndex = 0;
+ Nothing nothing;
+ if (!iter_.readMemOrTableInit(isMem, &segIndex, &dstTableIndex, &nothing,
+ &nothing, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ pushI32(int32_t(segIndex));
+ if (isMem) {
+ if (!emitInstanceCall(lineOrBytecode, SASigMemInit,
+ /*pushReturnedValue=*/false)) {
+ return false;
+ }
+ } else {
+ pushI32(dstTableIndex);
+ if (!emitInstanceCall(lineOrBytecode, SASigTableInit,
+ /*pushReturnedValue=*/false)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+#ifdef ENABLE_WASM_REFTYPES
+[[nodiscard]] bool BaseCompiler::emitTableFill() {
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+ Nothing nothing;
+ uint32_t tableIndex;
+ if (!iter_.readTableFill(&tableIndex, &nothing, &nothing, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ // fill(start:u32, val:ref, len:u32, table:u32) -> u32
+ pushI32(tableIndex);
+ return emitInstanceCall(lineOrBytecode, SASigTableFill,
+ /*pushReturnedValue=*/false);
+}
+
+[[nodiscard]] bool BaseCompiler::emitTableGet() {
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+ Nothing index;
+ uint32_t tableIndex;
+ if (!iter_.readTableGet(&tableIndex, &index)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ // get(index:u32, table:u32) -> uintptr_t(AnyRef)
+ pushI32(tableIndex);
+ if (!emitInstanceCall(lineOrBytecode, SASigTableGet,
+ /*pushReturnedValue=*/false)) {
+ return false;
+ }
+
+ // Push the resulting anyref back on the eval stack. NOTE: needRef() must
+ // not kill the value in the register.
+ RegPtr r = RegPtr(ReturnReg);
+ needRef(r);
+ pushRef(r);
+
+ return true;
+}
+
+[[nodiscard]] bool BaseCompiler::emitTableGrow() {
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+ Nothing delta;
+ Nothing initValue;
+ uint32_t tableIndex;
+ if (!iter_.readTableGrow(&tableIndex, &initValue, &delta)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ // grow(initValue:anyref, delta:u32, table:u32) -> u32
+ pushI32(tableIndex);
+ return emitInstanceCall(lineOrBytecode, SASigTableGrow);
+}
+
+[[nodiscard]] bool BaseCompiler::emitTableSet() {
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+ Nothing index, value;
+ uint32_t tableIndex;
+ if (!iter_.readTableSet(&tableIndex, &index, &value)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ // set(index:u32, value:ref, table:u32) -> i32
+ pushI32(tableIndex);
+ return emitInstanceCall(lineOrBytecode, SASigTableSet,
+ /*pushReturnedValue=*/false);
+}
+
+[[nodiscard]] bool BaseCompiler::emitTableSize() {
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+ uint32_t tableIndex;
+ if (!iter_.readTableSize(&tableIndex)) {
+ return false;
+ }
+ if (deadCode_) {
+ return true;
+ }
+ // size(table:u32) -> u32
+ pushI32(tableIndex);
+ return emitInstanceCall(lineOrBytecode, SASigTableSize);
+}
+#endif
+
+bool BaseCompiler::emitStructNew() {
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+ uint32_t typeIndex;
+ NothingVector args;
+ if (!iter_.readStructNew(&typeIndex, &args)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ // Allocate zeroed storage. The parameter to StructNew is an index into a
+ // descriptor table that the instance has.
+ //
+ // Returns null on OOM.
+
+ const StructType& structType = moduleEnv_.types[typeIndex].structType();
+ const TypeIdDesc& structTypeId = moduleEnv_.typeIds[typeIndex];
+ RegPtr rst = needRef();
+ fr.loadTlsPtr(WasmTlsReg);
+ masm.loadWasmGlobalPtr(structTypeId.globalDataOffset(), rst);
+ pushRef(rst);
+
+ if (!emitInstanceCall(lineOrBytecode, SASigStructNew)) {
+ return false;
+ }
+
+ // Optimization opportunity: Iterate backward to pop arguments off the
+ // stack. This will generate more instructions than we want, since we
+ // really only need to pop the stack once at the end, not for every element,
+ // but to do better we need a bit more machinery to load elements off the
+ // stack into registers.
+
+ RegPtr rp = popRef();
+ RegPtr rdata = rp;
+
+ if (!structType.isInline_) {
+ rdata = needRef();
+ masm.loadPtr(Address(rp, OutlineTypedObject::offsetOfData()), rdata);
+ }
+
+ // Optimization opportunity: when the value being stored is a known
+ // zero/null we need store nothing. This case may be somewhat common
+ // because struct.new forces a value to be specified for every field.
+
+ uint32_t fieldNo = structType.fields_.length();
+ while (fieldNo-- > 0) {
+ uint32_t offs = structType.objectBaseFieldOffset(fieldNo);
+ switch (structType.fields_[fieldNo].type.kind()) {
+ case ValType::I32: {
+ RegI32 r = popI32();
+ masm.store32(r, Address(rdata, offs));
+ freeI32(r);
+ break;
+ }
+ case ValType::I64: {
+ RegI64 r = popI64();
+ masm.store64(r, Address(rdata, offs));
+ freeI64(r);
+ break;
+ }
+ case ValType::F32: {
+ RegF32 r = popF32();
+ masm.storeFloat32(r, Address(rdata, offs));
+ freeF32(r);
+ break;
+ }
+ case ValType::F64: {
+ RegF64 r = popF64();
+ masm.storeDouble(r, Address(rdata, offs));
+ freeF64(r);
+ break;
+ }
+ case ValType::Ref: {
+ RegPtr value = popRef();
+ masm.storePtr(value, Address(rdata, offs));
+
+ // A write barrier is needed here for the extremely unlikely case
+ // that the object is allocated in the tenured area - a result of
+ // a GC artifact.
+
+ Label skipBarrier;
+
+ sync();
+
+ RegPtr rowner = rp;
+ if (!structType.isInline_) {
+ rowner = needRef();
+ masm.loadPtr(Address(rp, OutlineTypedObject::offsetOfOwner()),
+ rowner);
+ }
+
+ RegPtr otherScratch = needRef();
+ EmitWasmPostBarrierGuard(masm, Some(rowner), otherScratch, value,
+ &skipBarrier);
+ freeRef(otherScratch);
+
+ if (!structType.isInline_) {
+ freeRef(rowner);
+ }
+
+ freeRef(value);
+
+ // TODO/AnyRef-boxing: With boxed immediates and strings, the write
+ // barrier is going to have to be more complicated.
+ ASSERT_ANYREF_IS_JSOBJECT;
+
+ pushRef(rp); // Save rp across the call
+ RegPtr valueAddr = needRef();
+ masm.computeEffectiveAddress(Address(rdata, offs), valueAddr);
+ if (!emitPostBarrierCall(valueAddr)) { // Consumes valueAddr
+ return false;
+ }
+ popRef(rp); // Restore rp
+ if (!structType.isInline_) {
+ masm.loadPtr(Address(rp, OutlineTypedObject::offsetOfData()), rdata);
+ }
+
+ masm.bind(&skipBarrier);
+ break;
+ }
+ default: {
+ MOZ_CRASH("Unexpected field type");
+ }
+ }
+ }
+
+ if (!structType.isInline_) {
+ freeRef(rdata);
+ }
+
+ pushRef(rp);
+
+ return true;
+}
+
+bool BaseCompiler::emitStructGet() {
+ uint32_t typeIndex;
+ uint32_t fieldIndex;
+ Nothing nothing;
+ if (!iter_.readStructGet(&typeIndex, &fieldIndex, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ const StructType& structType = moduleEnv_.types[typeIndex].structType();
+
+ RegPtr rp = popRef();
+
+ Label ok;
+ masm.branchTestPtr(Assembler::NonZero, rp, rp, &ok);
+ trap(Trap::NullPointerDereference);
+ masm.bind(&ok);
+
+ if (!structType.isInline_) {
+ masm.loadPtr(Address(rp, OutlineTypedObject::offsetOfData()), rp);
+ }
+
+ uint32_t offs = structType.objectBaseFieldOffset(fieldIndex);
+ switch (structType.fields_[fieldIndex].type.kind()) {
+ case ValType::I32: {
+ RegI32 r = needI32();
+ masm.load32(Address(rp, offs), r);
+ pushI32(r);
+ break;
+ }
+ case ValType::I64: {
+ RegI64 r = needI64();
+ masm.load64(Address(rp, offs), r);
+ pushI64(r);
+ break;
+ }
+ case ValType::F32: {
+ RegF32 r = needF32();
+ masm.loadFloat32(Address(rp, offs), r);
+ pushF32(r);
+ break;
+ }
+ case ValType::F64: {
+ RegF64 r = needF64();
+ masm.loadDouble(Address(rp, offs), r);
+ pushF64(r);
+ break;
+ }
+ case ValType::Ref: {
+ RegPtr r = needRef();
+ masm.loadPtr(Address(rp, offs), r);
+ pushRef(r);
+ break;
+ }
+ default: {
+ MOZ_CRASH("Unexpected field type");
+ }
+ }
+
+ freeRef(rp);
+
+ return true;
+}
+
+bool BaseCompiler::emitStructSet() {
+ uint32_t typeIndex;
+ uint32_t fieldIndex;
+ Nothing nothing;
+ if (!iter_.readStructSet(&typeIndex, &fieldIndex, &nothing, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ const StructType& structType = moduleEnv_.types[typeIndex].structType();
+
+ RegI32 ri;
+ RegI64 rl;
+ RegF32 rf;
+ RegF64 rd;
+ RegPtr rr;
+
+ // Reserve this register early if we will need it so that it is not taken by
+ // rr or rp.
+ RegPtr valueAddr;
+ if (structType.fields_[fieldIndex].type.isReference()) {
+ valueAddr = RegPtr(PreBarrierReg);
+ needRef(valueAddr);
+ }
+
+ switch (structType.fields_[fieldIndex].type.kind()) {
+ case ValType::I32:
+ ri = popI32();
+ break;
+ case ValType::I64:
+ rl = popI64();
+ break;
+ case ValType::F32:
+ rf = popF32();
+ break;
+ case ValType::F64:
+ rd = popF64();
+ break;
+ case ValType::Ref:
+ rr = popRef();
+ break;
+ default:
+ MOZ_CRASH("Unexpected field type");
+ }
+
+ RegPtr rp = popRef();
+
+ Label ok;
+ masm.branchTestPtr(Assembler::NonZero, rp, rp, &ok);
+ trap(Trap::NullPointerDereference);
+ masm.bind(&ok);
+
+ if (!structType.isInline_) {
+ masm.loadPtr(Address(rp, OutlineTypedObject::offsetOfData()), rp);
+ }
+
+ uint32_t offs = structType.objectBaseFieldOffset(fieldIndex);
+ switch (structType.fields_[fieldIndex].type.kind()) {
+ case ValType::I32: {
+ masm.store32(ri, Address(rp, offs));
+ freeI32(ri);
+ break;
+ }
+ case ValType::I64: {
+ masm.store64(rl, Address(rp, offs));
+ freeI64(rl);
+ break;
+ }
+ case ValType::F32: {
+ masm.storeFloat32(rf, Address(rp, offs));
+ freeF32(rf);
+ break;
+ }
+ case ValType::F64: {
+ masm.storeDouble(rd, Address(rp, offs));
+ freeF64(rd);
+ break;
+ }
+ case ValType::Ref: {
+ masm.computeEffectiveAddress(Address(rp, offs), valueAddr);
+
+ // Bug 1617908. Ensure that if a TypedObject is not inline, then its
+ // underlying ArrayBuffer also is not inline, or the barrier logic fails.
+ static_assert(InlineTypedObject::MaxInlineBytes >=
+ ArrayBufferObject::MaxInlineBytes);
+
+ // emitBarrieredStore consumes valueAddr
+ if (!emitBarrieredStore(structType.isInline_ ? Some(rp) : Nothing(),
+ valueAddr, rr)) {
+ return false;
+ }
+ freeRef(rr);
+ break;
+ }
+ default: {
+ MOZ_CRASH("Unexpected field type");
+ }
+ }
+
+ freeRef(rp);
+
+ return true;
+}
+
+bool BaseCompiler::emitStructNarrow() {
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+ ValType inputType, outputType;
+ Nothing nothing;
+ if (!iter_.readStructNarrow(&inputType, &outputType, &nothing)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ // struct.narrow validation ensures that these hold.
+
+ MOZ_ASSERT(inputType.isEqRef() ||
+ moduleEnv_.types.isStructType(inputType.refType()));
+ MOZ_ASSERT(outputType.isEqRef() ||
+ moduleEnv_.types.isStructType(outputType.refType()));
+ MOZ_ASSERT_IF(outputType.isEqRef(), inputType.isEqRef());
+
+ // EqRef -> EqRef is a no-op, just leave the value on the stack.
+
+ if (inputType.isEqRef() && outputType.isEqRef()) {
+ return true;
+ }
+
+ RegPtr rp = popRef();
+
+ // Dynamic downcast eqref|(optref T) -> (optref U), leaves rp or null
+ const TypeIdDesc& outputStructTypeId =
+ moduleEnv_.typeIds[outputType.refType().typeIndex()];
+ RegPtr rst = needRef();
+ fr.loadTlsPtr(WasmTlsReg);
+ masm.loadWasmGlobalPtr(outputStructTypeId.globalDataOffset(), rst);
+ pushRef(rst);
+
+ pushRef(rp);
+ return emitInstanceCall(lineOrBytecode, SASigStructNarrow);
+}
+
+#ifdef ENABLE_WASM_SIMD
+
+// Emitter trampolines used by abstracted SIMD operations. Naming here follows
+// the SIMD spec pretty closely.
+
+static void AndV128(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.bitwiseAndSimd128(rs, rsd);
+}
+
+static void OrV128(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.bitwiseOrSimd128(rs, rsd);
+}
+
+static void XorV128(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.bitwiseXorSimd128(rs, rsd);
+}
+
+static void AddI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.addInt8x16(rs, rsd);
+}
+
+static void AddI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.addInt16x8(rs, rsd);
+}
+
+static void AddI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.addInt32x4(rs, rsd);
+}
+
+static void AddF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.addFloat32x4(rs, rsd);
+}
+
+static void AddI64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.addInt64x2(rs, rsd);
+}
+
+static void AddF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.addFloat64x2(rs, rsd);
+}
+
+static void AddSatI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.addSatInt8x16(rs, rsd);
+}
+
+static void AddSatUI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedAddSatInt8x16(rs, rsd);
+}
+
+static void AddSatI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.addSatInt16x8(rs, rsd);
+}
+
+static void AddSatUI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedAddSatInt16x8(rs, rsd);
+}
+
+static void SubI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.subInt8x16(rs, rsd);
+}
+
+static void SubI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.subInt16x8(rs, rsd);
+}
+
+static void SubI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.subInt32x4(rs, rsd);
+}
+
+static void SubF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.subFloat32x4(rs, rsd);
+}
+
+static void SubI64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.subInt64x2(rs, rsd);
+}
+
+static void SubF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.subFloat64x2(rs, rsd);
+}
+
+static void SubSatI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.subSatInt8x16(rs, rsd);
+}
+
+static void SubSatUI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedSubSatInt8x16(rs, rsd);
+}
+
+static void SubSatI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.subSatInt16x8(rs, rsd);
+}
+
+static void SubSatUI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedSubSatInt16x8(rs, rsd);
+}
+
+static void MulI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.mulInt16x8(rs, rsd);
+}
+
+static void MulI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.mulInt32x4(rs, rsd);
+}
+
+static void MulF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.mulFloat32x4(rs, rsd);
+}
+
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+static void MulI64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd,
+ RegV128 temp) {
+ masm.mulInt64x2(rs, rsd, temp);
+}
+# endif
+
+static void MulF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.mulFloat64x2(rs, rsd);
+}
+
+static void DivF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.divFloat32x4(rs, rsd);
+}
+
+static void DivF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.divFloat64x2(rs, rsd);
+}
+
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+static void MinF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd,
+ RegV128 temp1, RegV128 temp2) {
+ masm.minFloat32x4(rs, rsd, temp1, temp2);
+}
+
+static void MinF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd,
+ RegV128 temp1, RegV128 temp2) {
+ masm.minFloat64x2(rs, rsd, temp1, temp2);
+}
+
+static void MaxF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd,
+ RegV128 temp1, RegV128 temp2) {
+ masm.maxFloat32x4(rs, rsd, temp1, temp2);
+}
+
+static void MaxF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd,
+ RegV128 temp1, RegV128 temp2) {
+ masm.maxFloat64x2(rs, rsd, temp1, temp2);
+}
+
+static void PMinF32x4(MacroAssembler& masm, RegV128 rsd, RegV128 rs,
+ RhsDestOp) {
+ masm.pseudoMinFloat32x4(rsd, rs);
+}
+
+static void PMinF64x2(MacroAssembler& masm, RegV128 rsd, RegV128 rs,
+ RhsDestOp) {
+ masm.pseudoMinFloat64x2(rsd, rs);
+}
+
+static void PMaxF32x4(MacroAssembler& masm, RegV128 rsd, RegV128 rs,
+ RhsDestOp) {
+ masm.pseudoMaxFloat32x4(rsd, rs);
+}
+
+static void PMaxF64x2(MacroAssembler& masm, RegV128 rsd, RegV128 rs,
+ RhsDestOp) {
+ masm.pseudoMaxFloat64x2(rsd, rs);
+}
+# elif defined(JS_CODEGEN_ARM64)
+static void MinF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.minFloat32x4(rs, rsd);
+}
+
+static void MinF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.minFloat64x2(rs, rsd);
+}
+
+static void MaxF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.maxFloat32x4(rs, rsd);
+}
+
+static void MaxF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.maxFloat64x2(rs, rsd);
+}
+
+static void PMinF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.pseudoMinFloat32x4(rs, rsd);
+}
+
+static void PMinF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.pseudoMinFloat64x2(rs, rsd);
+}
+
+static void PMaxF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.pseudoMaxFloat32x4(rs, rsd);
+}
+
+static void PMaxF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.pseudoMaxFloat64x2(rs, rsd);
+}
+# endif
+
+static void DotI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.widenDotInt16x8(rs, rsd);
+}
+
+static void CmpI8x16(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd) {
+ masm.compareInt8x16(cond, rs, rsd);
+}
+
+static void CmpI16x8(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd) {
+ masm.compareInt16x8(cond, rs, rsd);
+}
+
+static void CmpI32x4(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd) {
+ masm.compareInt32x4(cond, rs, rsd);
+}
+
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+static void CmpUI8x16(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd, RegV128 temp1, RegV128 temp2) {
+ masm.unsignedCompareInt8x16(cond, rs, rsd, temp1, temp2);
+}
+
+static void CmpUI16x8(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd, RegV128 temp1, RegV128 temp2) {
+ masm.unsignedCompareInt16x8(cond, rs, rsd, temp1, temp2);
+}
+
+static void CmpUI32x4(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd, RegV128 temp1, RegV128 temp2) {
+ masm.unsignedCompareInt32x4(cond, rs, rsd, temp1, temp2);
+}
+# else
+static void CmpUI8x16(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd) {
+ masm.compareInt8x16(cond, rs, rsd);
+}
+
+static void CmpUI16x8(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd) {
+ masm.compareInt16x8(cond, rs, rsd);
+}
+
+static void CmpUI32x4(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd) {
+ masm.compareInt32x4(cond, rs, rsd);
+}
+# endif
+
+static void CmpF32x4(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd) {
+ masm.compareFloat32x4(cond, rs, rsd);
+}
+
+static void CmpF64x2(MacroAssembler& masm, Assembler::Condition cond,
+ RegV128 rs, RegV128 rsd) {
+ masm.compareFloat64x2(cond, rs, rsd);
+}
+
+static void NegI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.negInt8x16(rs, rd);
+}
+
+static void NegI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.negInt16x8(rs, rd);
+}
+
+static void NegI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.negInt32x4(rs, rd);
+}
+
+static void NegI64x2(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.negInt64x2(rs, rd);
+}
+
+static void NegF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.negFloat32x4(rs, rd);
+}
+
+static void NegF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.negFloat64x2(rs, rd);
+}
+
+static void AbsF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.absFloat32x4(rs, rd);
+}
+
+static void AbsF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.absFloat64x2(rs, rd);
+}
+
+static void SqrtF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.sqrtFloat32x4(rs, rd);
+}
+
+static void SqrtF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.sqrtFloat64x2(rs, rd);
+}
+
+static void CeilF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.ceilFloat32x4(rs, rd);
+}
+
+static void FloorF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.floorFloat32x4(rs, rd);
+}
+
+static void TruncF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.truncFloat32x4(rs, rd);
+}
+
+static void NearestF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.nearestFloat32x4(rs, rd);
+}
+
+static void CeilF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.ceilFloat64x2(rs, rd);
+}
+
+static void FloorF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.floorFloat64x2(rs, rd);
+}
+
+static void TruncF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.truncFloat64x2(rs, rd);
+}
+
+static void NearestF64x2(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.nearestFloat64x2(rs, rd);
+}
+
+static void NotV128(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.bitwiseNotSimd128(rs, rd);
+}
+
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+static void ShiftLeftI8x16(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp1, RegV128 temp2) {
+ masm.leftShiftInt8x16(rs, rsd, temp1, temp2);
+}
+
+static void ShiftLeftI16x8(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ masm.leftShiftInt16x8(rs, rsd, temp);
+}
+
+static void ShiftLeftI32x4(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ masm.leftShiftInt32x4(rs, rsd, temp);
+}
+
+static void ShiftLeftI64x2(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ masm.leftShiftInt64x2(rs, rsd, temp);
+}
+
+static void ShiftRightI8x16(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp1, RegV128 temp2) {
+ masm.rightShiftInt8x16(rs, rsd, temp1, temp2);
+}
+
+static void ShiftRightUI8x16(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp1, RegV128 temp2) {
+ masm.unsignedRightShiftInt8x16(rs, rsd, temp1, temp2);
+}
+
+static void ShiftRightI16x8(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ masm.rightShiftInt16x8(rs, rsd, temp);
+}
+
+static void ShiftRightUI16x8(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ masm.unsignedRightShiftInt16x8(rs, rsd, temp);
+}
+
+static void ShiftRightI32x4(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ masm.rightShiftInt32x4(rs, rsd, temp);
+}
+
+static void ShiftRightUI32x4(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ masm.unsignedRightShiftInt32x4(rs, rsd, temp);
+}
+
+static void ShiftRightUI64x2(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegI32 temp) {
+ masm.unsignedRightShiftInt64x2(rs, rsd, temp);
+}
+# elif defined(JS_CODEGEN_ARM64)
+static void ShiftLeftI8x16(MacroAssembler& masm, RegI32 rs, RegV128 rsd) {
+ masm.leftShiftInt8x16(rs, rsd);
+}
+
+static void ShiftLeftI16x8(MacroAssembler& masm, RegI32 rs, RegV128 rsd) {
+ masm.leftShiftInt16x8(rs, rsd);
+}
+
+static void ShiftLeftI32x4(MacroAssembler& masm, RegI32 rs, RegV128 rsd) {
+ masm.leftShiftInt32x4(rs, rsd);
+}
+
+static void ShiftLeftI64x2(MacroAssembler& masm, RegI32 rs, RegV128 rsd) {
+ masm.leftShiftInt64x2(rs, rsd);
+}
+
+static void ShiftRightI8x16(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegV128 temp) {
+ masm.rightShiftInt8x16(rs, rsd, temp);
+}
+
+static void ShiftRightUI8x16(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegV128 temp) {
+ masm.unsignedRightShiftInt8x16(rs, rsd, temp);
+}
+
+static void ShiftRightI16x8(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegV128 temp) {
+ masm.rightShiftInt16x8(rs, rsd, temp);
+}
+
+static void ShiftRightUI16x8(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegV128 temp) {
+ masm.unsignedRightShiftInt16x8(rs, rsd, temp);
+}
+
+static void ShiftRightI32x4(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegV128 temp) {
+ masm.rightShiftInt32x4(rs, rsd, temp);
+}
+
+static void ShiftRightUI32x4(MacroAssembler& masm, RegI32 rs, RegV128 rsd,
+ RegV128 temp) {
+ masm.unsignedRightShiftInt32x4(rs, rsd, temp);
+}
+# endif
+
+static void AverageUI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedAverageInt8x16(rs, rsd);
+}
+
+static void AverageUI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedAverageInt16x8(rs, rsd);
+}
+
+static void MinI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.minInt8x16(rs, rsd);
+}
+
+static void MinUI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedMinInt8x16(rs, rsd);
+}
+
+static void MaxI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.maxInt8x16(rs, rsd);
+}
+
+static void MaxUI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedMaxInt8x16(rs, rsd);
+}
+
+static void MinI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.minInt16x8(rs, rsd);
+}
+
+static void MinUI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedMinInt16x8(rs, rsd);
+}
+
+static void MaxI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.maxInt16x8(rs, rsd);
+}
+
+static void MaxUI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedMaxInt16x8(rs, rsd);
+}
+
+static void MinI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.minInt32x4(rs, rsd);
+}
+
+static void MinUI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedMinInt32x4(rs, rsd);
+}
+
+static void MaxI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.maxInt32x4(rs, rsd);
+}
+
+static void MaxUI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedMaxInt32x4(rs, rsd);
+}
+
+static void NarrowI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.narrowInt16x8(rs, rsd);
+}
+
+static void NarrowUI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedNarrowInt16x8(rs, rsd);
+}
+
+static void NarrowI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.narrowInt32x4(rs, rsd);
+}
+
+static void NarrowUI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.unsignedNarrowInt32x4(rs, rsd);
+}
+
+static void WidenLowI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.widenLowInt8x16(rs, rd);
+}
+
+static void WidenHighI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.widenHighInt8x16(rs, rd);
+}
+
+static void WidenLowUI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.unsignedWidenLowInt8x16(rs, rd);
+}
+
+static void WidenHighUI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.unsignedWidenHighInt8x16(rs, rd);
+}
+
+static void WidenLowI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.widenLowInt16x8(rs, rd);
+}
+
+static void WidenHighI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.widenHighInt16x8(rs, rd);
+}
+
+static void WidenLowUI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.unsignedWidenLowInt16x8(rs, rd);
+}
+
+static void WidenHighUI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.unsignedWidenHighInt16x8(rs, rd);
+}
+
+static void AbsI8x16(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.absInt8x16(rs, rd);
+}
+
+static void AbsI16x8(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.absInt16x8(rs, rd);
+}
+
+static void AbsI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.absInt32x4(rs, rd);
+}
+
+static void ExtractLaneI8x16(MacroAssembler& masm, uint32_t laneIndex,
+ RegV128 rs, RegI32 rd) {
+ masm.extractLaneInt8x16(laneIndex, rs, rd);
+}
+
+static void ExtractLaneUI8x16(MacroAssembler& masm, uint32_t laneIndex,
+ RegV128 rs, RegI32 rd) {
+ masm.unsignedExtractLaneInt8x16(laneIndex, rs, rd);
+}
+
+static void ExtractLaneI16x8(MacroAssembler& masm, uint32_t laneIndex,
+ RegV128 rs, RegI32 rd) {
+ masm.extractLaneInt16x8(laneIndex, rs, rd);
+}
+
+static void ExtractLaneUI16x8(MacroAssembler& masm, uint32_t laneIndex,
+ RegV128 rs, RegI32 rd) {
+ masm.unsignedExtractLaneInt16x8(laneIndex, rs, rd);
+}
+
+static void ExtractLaneI32x4(MacroAssembler& masm, uint32_t laneIndex,
+ RegV128 rs, RegI32 rd) {
+ masm.extractLaneInt32x4(laneIndex, rs, rd);
+}
+
+static void ExtractLaneI64x2(MacroAssembler& masm, uint32_t laneIndex,
+ RegV128 rs, RegI64 rd) {
+ masm.extractLaneInt64x2(laneIndex, rs, rd);
+}
+
+static void ExtractLaneF32x4(MacroAssembler& masm, uint32_t laneIndex,
+ RegV128 rs, RegF32 rd) {
+ masm.extractLaneFloat32x4(laneIndex, rs, rd);
+}
+
+static void ExtractLaneF64x2(MacroAssembler& masm, uint32_t laneIndex,
+ RegV128 rs, RegF64 rd) {
+ masm.extractLaneFloat64x2(laneIndex, rs, rd);
+}
+
+static void ReplaceLaneI8x16(MacroAssembler& masm, uint32_t laneIndex,
+ RegI32 rs, RegV128 rsd) {
+ masm.replaceLaneInt8x16(laneIndex, rs, rsd);
+}
+
+static void ReplaceLaneI16x8(MacroAssembler& masm, uint32_t laneIndex,
+ RegI32 rs, RegV128 rsd) {
+ masm.replaceLaneInt16x8(laneIndex, rs, rsd);
+}
+
+static void ReplaceLaneI32x4(MacroAssembler& masm, uint32_t laneIndex,
+ RegI32 rs, RegV128 rsd) {
+ masm.replaceLaneInt32x4(laneIndex, rs, rsd);
+}
+
+static void ReplaceLaneI64x2(MacroAssembler& masm, uint32_t laneIndex,
+ RegI64 rs, RegV128 rsd) {
+ masm.replaceLaneInt64x2(laneIndex, rs, rsd);
+}
+
+static void ReplaceLaneF32x4(MacroAssembler& masm, uint32_t laneIndex,
+ RegF32 rs, RegV128 rsd) {
+ masm.replaceLaneFloat32x4(laneIndex, rs, rsd);
+}
+
+static void ReplaceLaneF64x2(MacroAssembler& masm, uint32_t laneIndex,
+ RegF64 rs, RegV128 rsd) {
+ masm.replaceLaneFloat64x2(laneIndex, rs, rsd);
+}
+
+static void SplatI8x16(MacroAssembler& masm, RegI32 rs, RegV128 rd) {
+ masm.splatX16(rs, rd);
+}
+
+static void SplatI16x8(MacroAssembler& masm, RegI32 rs, RegV128 rd) {
+ masm.splatX8(rs, rd);
+}
+
+static void SplatI32x4(MacroAssembler& masm, RegI32 rs, RegV128 rd) {
+ masm.splatX4(rs, rd);
+}
+
+static void SplatI64x2(MacroAssembler& masm, RegI64 rs, RegV128 rd) {
+ masm.splatX2(rs, rd);
+}
+
+static void SplatF32x4(MacroAssembler& masm, RegF32 rs, RegV128 rd) {
+ masm.splatX4(rs, rd);
+}
+
+static void SplatF64x2(MacroAssembler& masm, RegF64 rs, RegV128 rd) {
+ masm.splatX2(rs, rd);
+}
+
+// This is the same op independent of lanes: it tests for any nonzero bit.
+static void AnyTrue(MacroAssembler& masm, RegV128 rs, RegI32 rd) {
+ masm.anyTrueSimd128(rs, rd);
+}
+
+static void AllTrueI8x16(MacroAssembler& masm, RegV128 rs, RegI32 rd) {
+ masm.allTrueInt8x16(rs, rd);
+}
+
+static void AllTrueI16x8(MacroAssembler& masm, RegV128 rs, RegI32 rd) {
+ masm.allTrueInt16x8(rs, rd);
+}
+
+static void AllTrueI32x4(MacroAssembler& masm, RegV128 rs, RegI32 rd) {
+ masm.allTrueInt32x4(rs, rd);
+}
+
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+static void BitmaskI8x16(MacroAssembler& masm, RegV128 rs, RegI32 rd) {
+ masm.bitmaskInt8x16(rs, rd);
+}
+
+static void BitmaskI16x8(MacroAssembler& masm, RegV128 rs, RegI32 rd) {
+ masm.bitmaskInt16x8(rs, rd);
+}
+
+static void BitmaskI32x4(MacroAssembler& masm, RegV128 rs, RegI32 rd) {
+ masm.bitmaskInt32x4(rs, rd);
+}
+
+static void Swizzle(MacroAssembler& masm, RegV128 rs, RegV128 rsd,
+ RegV128 temp) {
+ masm.swizzleInt8x16(rs, rsd, temp);
+}
+# elif defined(JS_CODEGEN_ARM64)
+static void BitmaskI8x16(MacroAssembler& masm, RegV128 rs, RegI32 rd,
+ RegV128 temp) {
+ masm.bitmaskInt8x16(rs, rd, temp);
+}
+
+static void BitmaskI16x8(MacroAssembler& masm, RegV128 rs, RegI32 rd,
+ RegV128 temp) {
+ masm.bitmaskInt16x8(rs, rd, temp);
+}
+
+static void BitmaskI32x4(MacroAssembler& masm, RegV128 rs, RegI32 rd,
+ RegV128 temp) {
+ masm.bitmaskInt32x4(rs, rd, temp);
+}
+
+static void Swizzle(MacroAssembler& masm, RegV128 rs, RegV128 rsd) {
+ masm.swizzleInt8x16(rs, rsd);
+}
+# endif
+
+static void ConvertI32x4ToF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.convertInt32x4ToFloat32x4(rs, rd);
+}
+
+static void ConvertUI32x4ToF32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.unsignedConvertInt32x4ToFloat32x4(rs, rd);
+}
+
+static void ConvertF32x4ToI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd) {
+ masm.truncSatFloat32x4ToInt32x4(rs, rd);
+}
+
+static void ConvertF32x4ToUI32x4(MacroAssembler& masm, RegV128 rs, RegV128 rd,
+ RegV128 temp) {
+ masm.unsignedTruncSatFloat32x4ToInt32x4(rs, rd, temp);
+}
+
+template <typename SourceType, typename DestType>
+void BaseCompiler::emitVectorUnop(void (*op)(MacroAssembler& masm,
+ SourceType rs, DestType rd)) {
+ SourceType rs = pop<SourceType>();
+ DestType rd = need<DestType>();
+ op(masm, rs, rd);
+ free(rs);
+ push(rd);
+}
+
+template <typename SourceType, typename DestType, typename TempType>
+void BaseCompiler::emitVectorUnop(void (*op)(MacroAssembler& masm,
+ SourceType rs, DestType rd,
+ TempType temp)) {
+ SourceType rs = pop<SourceType>();
+ DestType rd = need<DestType>();
+ TempType temp = need<TempType>();
+ op(masm, rs, rd, temp);
+ free(rs);
+ free(temp);
+ push(rd);
+}
+
+template <typename SourceType, typename DestType, typename ImmType>
+void BaseCompiler::emitVectorUnop(ImmType immediate,
+ void (*op)(MacroAssembler&, ImmType,
+ SourceType, DestType)) {
+ SourceType rs = pop<SourceType>();
+ DestType rd = need<DestType>();
+ op(masm, immediate, rs, rd);
+ free(rs);
+ push(rd);
+}
+
+template <typename RhsType, typename LhsDestType>
+void BaseCompiler::emitVectorBinop(void (*op)(MacroAssembler& masm, RhsType src,
+ LhsDestType srcDest)) {
+ RhsType rs = pop<RhsType>();
+ LhsDestType rsd = pop<LhsDestType>();
+ op(masm, rs, rsd);
+ free(rs);
+ push(rsd);
+}
+
+template <typename RhsDestType, typename LhsType>
+void BaseCompiler::emitVectorBinop(void (*op)(MacroAssembler& masm,
+ RhsDestType src, LhsType srcDest,
+ RhsDestOp)) {
+ RhsDestType rsd = pop<RhsDestType>();
+ LhsType rs = pop<LhsType>();
+ op(masm, rsd, rs, RhsDestOp::True);
+ free(rs);
+ push(rsd);
+}
+
+template <typename RhsType, typename LhsDestType, typename TempType>
+void BaseCompiler::emitVectorBinop(void (*op)(MacroAssembler& masm, RhsType rs,
+ LhsDestType rsd, TempType temp)) {
+ RhsType rs = pop<RhsType>();
+ LhsDestType rsd = pop<LhsDestType>();
+ TempType temp = need<TempType>();
+ op(masm, rs, rsd, temp);
+ free(rs);
+ free(temp);
+ push(rsd);
+}
+
+template <typename RhsType, typename LhsDestType, typename TempType1,
+ typename TempType2>
+void BaseCompiler::emitVectorBinop(void (*op)(MacroAssembler& masm, RhsType rs,
+ LhsDestType rsd, TempType1 temp1,
+ TempType2 temp2)) {
+ RhsType rs = pop<RhsType>();
+ LhsDestType rsd = pop<LhsDestType>();
+ TempType1 temp1 = need<TempType1>();
+ TempType2 temp2 = need<TempType2>();
+ op(masm, rs, rsd, temp1, temp2);
+ free(rs);
+ free(temp1);
+ free(temp2);
+ push(rsd);
+}
+
+template <typename RhsType, typename LhsDestType, typename ImmType>
+void BaseCompiler::emitVectorBinop(ImmType immediate,
+ void (*op)(MacroAssembler&, ImmType, RhsType,
+ LhsDestType)) {
+ RhsType rs = pop<RhsType>();
+ LhsDestType rsd = pop<LhsDestType>();
+ op(masm, immediate, rs, rsd);
+ free(rs);
+ push(rsd);
+}
+
+template <typename RhsType, typename LhsDestType, typename ImmType,
+ typename TempType1, typename TempType2>
+void BaseCompiler::emitVectorBinop(ImmType immediate,
+ void (*op)(MacroAssembler&, ImmType, RhsType,
+ LhsDestType, TempType1 temp1,
+ TempType2 temp2)) {
+ RhsType rs = pop<RhsType>();
+ LhsDestType rsd = pop<LhsDestType>();
+ TempType1 temp1 = need<TempType1>();
+ TempType2 temp2 = need<TempType2>();
+ op(masm, immediate, rs, rsd, temp1, temp2);
+ free(rs);
+ free(temp1);
+ free(temp2);
+ push(rsd);
+}
+
+void BaseCompiler::emitVectorAndNot() {
+ // We want x & ~y but the available operation is ~x & y, so reverse the
+ // operands.
+ RegV128 r, rs;
+ pop2xV128(&r, &rs);
+ masm.bitwiseNotAndSimd128(r, rs);
+ freeV128(r);
+ pushV128(rs);
+}
+
+bool BaseCompiler::emitLoadSplat(Scalar::Type viewType) {
+ // We can implement loadSplat mostly as load + splat because the push of the
+ // result onto the value stack in loadCommon normally will not generate any
+ // code, it will leave the value in a register which we will consume.
+
+ LinearMemoryAddress<Nothing> addr;
+ if (!iter_.readLoadSplat(Scalar::byteSize(viewType), &addr)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ // We use uint types when we can on the general assumption that unsigned loads
+ // might be smaller/faster on some platforms, because no sign extension needs
+ // to be done after the sub-register load.
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset());
+ switch (viewType) {
+ case Scalar::Uint8:
+ if (!loadCommon(&access, AccessCheck(), ValType::I32)) {
+ return false;
+ }
+ emitVectorUnop(SplatI8x16);
+ break;
+ case Scalar::Uint16:
+ if (!loadCommon(&access, AccessCheck(), ValType::I32)) {
+ return false;
+ }
+ emitVectorUnop(SplatI16x8);
+ break;
+ case Scalar::Uint32:
+ if (!loadCommon(&access, AccessCheck(), ValType::I32)) {
+ return false;
+ }
+ emitVectorUnop(SplatI32x4);
+ break;
+ case Scalar::Int64:
+ if (!loadCommon(&access, AccessCheck(), ValType::I64)) {
+ return false;
+ }
+ emitVectorUnop(SplatI64x2);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ return true;
+}
+
+bool BaseCompiler::emitLoadZero(Scalar::Type viewType) {
+ // LoadZero has the structure of LoadSplat
+ LinearMemoryAddress<Nothing> addr;
+ if (!iter_.readLoadSplat(Scalar::byteSize(viewType), &addr)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, bytecodeOffset());
+ access.setZeroExtendSimd128Load();
+ return loadCommon(&access, AccessCheck(), ValType::V128);
+}
+
+bool BaseCompiler::emitLoadExtend(Scalar::Type viewType) {
+ LinearMemoryAddress<Nothing> addr;
+ if (!iter_.readLoadExtend(&addr)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ MemoryAccessDesc access(Scalar::Int64, addr.align, addr.offset,
+ bytecodeOffset());
+ if (!loadCommon(&access, AccessCheck(), ValType::I64)) {
+ return false;
+ }
+
+ RegI64 rs = popI64();
+ RegV128 rd = needV128();
+ masm.moveGPR64ToDouble(rs, rd);
+ switch (viewType) {
+ case Scalar::Int8:
+ masm.widenLowInt8x16(rd, rd);
+ break;
+ case Scalar::Uint8:
+ masm.unsignedWidenLowInt8x16(rd, rd);
+ break;
+ case Scalar::Int16:
+ masm.widenLowInt16x8(rd, rd);
+ break;
+ case Scalar::Uint16:
+ masm.unsignedWidenLowInt16x8(rd, rd);
+ break;
+ case Scalar::Int32:
+ masm.widenLowInt32x4(rd, rd);
+ break;
+ case Scalar::Uint32:
+ masm.unsignedWidenLowInt32x4(rd, rd);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ freeI64(rs);
+ pushV128(rd);
+
+ return true;
+}
+
+bool BaseCompiler::emitBitselect() {
+ Nothing unused_a, unused_b, unused_c;
+
+ if (!iter_.readVectorSelect(&unused_a, &unused_b, &unused_c)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ RegV128 rs3 = popV128(); // Control
+ RegV128 rs2 = popV128(); // 'false' vector
+ RegV128 rs1 = popV128(); // 'true' vector
+
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ // On x86, certain register assignments will result in more compact code: we
+ // want output=rs1 and tmp=rs3. Attend to this after we see what other
+ // platforms want/need.
+ RegV128 tmp = needV128(); // Distinguished tmp, for now
+ masm.bitwiseSelectSimd128(rs3, rs1, rs2, rs1, tmp);
+ freeV128(rs2);
+ freeV128(rs3);
+ freeV128(tmp);
+ pushV128(rs1);
+# elif defined(JS_CODEGEN_ARM64)
+ // Note register conventions differ significantly from x86.
+ masm.bitwiseSelectSimd128(rs1, rs2, rs3);
+ freeV128(rs1);
+ freeV128(rs2);
+ pushV128(rs3);
+# else
+ MOZ_CRASH("NYI");
+# endif
+ return true;
+}
+
+bool BaseCompiler::emitVectorShuffle() {
+ Nothing unused_a, unused_b;
+ V128 shuffleMask;
+
+ if (!iter_.readVectorShuffle(&unused_a, &unused_b, &shuffleMask)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+ RegV128 rd, rs;
+ pop2xV128(&rd, &rs);
+ masm.shuffleInt8x16(shuffleMask.bytes, rs, rd);
+ freeV128(rs);
+ pushV128(rd);
+
+ return true;
+}
+
+// Signed case must be scalarized on x86/x64 and requires CL.
+// Signed and unsigned cases must be scalarized on ARM64.
+bool BaseCompiler::emitVectorShiftRightI64x2(bool isUnsigned) {
+ Nothing unused_a, unused_b;
+
+ if (!iter_.readVectorShift(&unused_a, &unused_b)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ if (isUnsigned) {
+ emitVectorBinop(ShiftRightUI64x2);
+ return true;
+ }
+# endif
+
+# if defined(JS_CODEGEN_X86)
+ needI32(specific_.ecx);
+ RegI32 count = popI32ToSpecific(specific_.ecx);
+# elif defined(JS_CODEGEN_X64)
+ RegI32 count;
+ if (Assembler::HasBMI2()) {
+ count = popI32();
+ } else {
+ needI32(specific_.ecx);
+ count = popI32ToSpecific(specific_.ecx);
+ }
+# elif defined(JS_CODEGEN_ARM64)
+ RegI32 count = popI32();
+# endif
+ RegV128 lhsDest = popV128();
+ RegI64 tmp = needI64();
+ masm.and32(Imm32(63), count);
+ masm.extractLaneInt64x2(0, lhsDest, tmp);
+ if (isUnsigned) {
+ masm.rshift64(count, tmp);
+ } else {
+ masm.rshift64Arithmetic(count, tmp);
+ }
+ masm.replaceLaneInt64x2(0, tmp, lhsDest);
+ masm.extractLaneInt64x2(1, lhsDest, tmp);
+ if (isUnsigned) {
+ masm.rshift64(count, tmp);
+ } else {
+ masm.rshift64Arithmetic(count, tmp);
+ }
+ masm.replaceLaneInt64x2(1, tmp, lhsDest);
+ freeI64(tmp);
+ freeI32(count);
+ pushV128(lhsDest);
+
+ return true;
+}
+
+// Must be scalarized on ARM64.
+bool BaseCompiler::emitVectorMulI64x2() {
+ Nothing unused_a, unused_b;
+
+ if (!iter_.readBinary(ValType::V128, &unused_a, &unused_b)) {
+ return false;
+ }
+
+ if (deadCode_) {
+ return true;
+ }
+
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ emitVectorBinop(MulI64x2);
+# elif defined(JS_CODEGEN_ARM64)
+ RegV128 r, rs;
+ pop2xV128(&r, &rs);
+ RegI64 temp1 = needI64();
+ RegI64 temp2 = needI64();
+ masm.extractLaneInt64x2(0, r, temp1);
+ masm.extractLaneInt64x2(0, rs, temp2);
+ masm.mul64(temp2, temp1, Register::Invalid());
+ masm.replaceLaneInt64x2(0, temp1, r);
+ masm.extractLaneInt64x2(1, r, temp1);
+ masm.extractLaneInt64x2(1, rs, temp2);
+ masm.mul64(temp2, temp1, Register::Invalid());
+ masm.replaceLaneInt64x2(1, temp1, r);
+ freeI64(temp1);
+ freeI64(temp2);
+ freeV128(rs);
+ pushV128(r);
+# else
+ MOZ_CRASH("NYI");
+# endif
+
+ return true;
+}
+#endif
+
+bool BaseCompiler::emitBody() {
+ MOZ_ASSERT(stackMapGenerator_.framePushedAtEntryToBody.isSome());
+
+ if (!iter_.readFunctionStart(func_.index)) {
+ return false;
+ }
+
+ initControl(controlItem(), ResultType::Empty());
+
+ for (;;) {
+ Nothing unused_a, unused_b;
+
+#ifdef DEBUG
+ performRegisterLeakCheck();
+ assertStackInvariants();
+#endif
+
+#define dispatchBinary(doEmit, type) \
+ iter_.readBinary(type, &unused_a, &unused_b) && \
+ (deadCode_ || (doEmit(), true))
+
+#define dispatchUnary(doEmit, type) \
+ iter_.readUnary(type, &unused_a) && (deadCode_ || (doEmit(), true))
+
+#define dispatchComparison(doEmit, operandType, compareOp) \
+ iter_.readComparison(operandType, &unused_a, &unused_b) && \
+ (deadCode_ || (doEmit(compareOp, operandType), true))
+
+#define dispatchConversion(doEmit, inType, outType) \
+ iter_.readConversion(inType, outType, &unused_a) && \
+ (deadCode_ || (doEmit(), true))
+
+#define dispatchConversionOOM(doEmit, inType, outType) \
+ iter_.readConversion(inType, outType, &unused_a) && (deadCode_ || doEmit())
+
+#define dispatchCalloutConversionOOM(doEmit, symbol, inType, outType) \
+ iter_.readConversion(inType, outType, &unused_a) && \
+ (deadCode_ || doEmit(symbol, inType, outType))
+
+#define dispatchIntDivCallout(doEmit, symbol, type) \
+ iter_.readBinary(type, &unused_a, &unused_b) && \
+ (deadCode_ || doEmit(symbol, type))
+
+#define dispatchVectorBinary(op) \
+ iter_.readBinary(ValType::V128, &unused_a, &unused_b) && \
+ (deadCode_ || (emitVectorBinop(op), true))
+
+#define dispatchVectorUnary(op) \
+ iter_.readUnary(ValType::V128, &unused_a) && \
+ (deadCode_ || (emitVectorUnop(op), true))
+
+#define dispatchVectorComparison(op, compareOp) \
+ iter_.readBinary(ValType::V128, &unused_a, &unused_b) && \
+ (deadCode_ || (emitVectorBinop(compareOp, op), true))
+
+#define dispatchVectorVariableShift(op) \
+ iter_.readVectorShift(&unused_a, &unused_b) && \
+ (deadCode_ || (emitVectorBinop(op), true))
+
+#define dispatchExtractLane(op, outType, laneLimit) \
+ iter_.readExtractLane(outType, laneLimit, &laneIndex, &unused_a) && \
+ (deadCode_ || (emitVectorUnop(laneIndex, op), true))
+
+#define dispatchReplaceLane(op, inType, laneLimit) \
+ iter_.readReplaceLane(inType, laneLimit, &laneIndex, &unused_a, \
+ &unused_b) && \
+ (deadCode_ || (emitVectorBinop(laneIndex, op), true))
+
+#define dispatchSplat(op, inType) \
+ iter_.readConversion(inType, ValType::V128, &unused_a) && \
+ (deadCode_ || (emitVectorUnop(op), true))
+
+#define dispatchVectorReduction(op) \
+ iter_.readConversion(ValType::V128, ValType::I32, &unused_a) && \
+ (deadCode_ || (emitVectorUnop(op), true))
+
+#ifdef DEBUG
+ // Check that the number of ref-typed entries in the operand stack matches
+ // reality.
+# define CHECK_POINTER_COUNT \
+ do { \
+ MOZ_ASSERT(countMemRefsOnStk() == stackMapGenerator_.memRefsOnStk); \
+ } while (0)
+#else
+# define CHECK_POINTER_COUNT \
+ do { \
+ } while (0)
+#endif
+
+#ifdef ENABLE_WASM_SIMD_EXPERIMENTAL
+# define CHECK_SIMD_EXPERIMENTAL() (void)(0)
+#else
+# define CHECK_SIMD_EXPERIMENTAL() break
+#endif
+
+#define CHECK(E) \
+ if (!(E)) return false
+#define NEXT() \
+ { \
+ CHECK_POINTER_COUNT; \
+ continue; \
+ }
+#define CHECK_NEXT(E) \
+ if (!(E)) return false; \
+ { \
+ CHECK_POINTER_COUNT; \
+ continue; \
+ }
+
+ CHECK(stk_.reserve(stk_.length() + MaxPushesPerOpcode));
+
+ OpBytes op;
+ CHECK(iter_.readOp(&op));
+
+ // When compilerEnv_.debugEnabled(), every operator has breakpoint site but
+ // Op::End.
+ if (compilerEnv_.debugEnabled() && op.b0 != (uint16_t)Op::End) {
+ // TODO sync only registers that can be clobbered by the exit
+ // prologue/epilogue or disable these registers for use in
+ // baseline compiler when compilerEnv_.debugEnabled() is set.
+ sync();
+
+ insertBreakablePoint(CallSiteDesc::Breakpoint);
+ if (!createStackMap("debug: per insn")) {
+ return false;
+ }
+ }
+
+ // Going below framePushedAtEntryToBody would imply that we've
+ // popped off the machine stack, part of the frame created by
+ // beginFunction().
+ MOZ_ASSERT(masm.framePushed() >=
+ stackMapGenerator_.framePushedAtEntryToBody.value());
+
+ // At this point we're definitely not generating code for a function call.
+ MOZ_ASSERT(
+ stackMapGenerator_.framePushedExcludingOutboundCallArgs.isNothing());
+
+ switch (op.b0) {
+ case uint16_t(Op::End):
+ if (!emitEnd()) {
+ return false;
+ }
+ if (iter_.controlStackEmpty()) {
+ return true;
+ }
+ NEXT();
+
+ // Control opcodes
+ case uint16_t(Op::Nop):
+ CHECK_NEXT(iter_.readNop());
+ case uint16_t(Op::Drop):
+ CHECK_NEXT(emitDrop());
+ case uint16_t(Op::Block):
+ CHECK_NEXT(emitBlock());
+ case uint16_t(Op::Loop):
+ CHECK_NEXT(emitLoop());
+ case uint16_t(Op::If):
+ CHECK_NEXT(emitIf());
+ case uint16_t(Op::Else):
+ CHECK_NEXT(emitElse());
+#ifdef ENABLE_WASM_EXCEPTIONS
+ case uint16_t(Op::Try):
+ if (!moduleEnv_.exceptionsEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(emitTry());
+ case uint16_t(Op::Catch):
+ if (!moduleEnv_.exceptionsEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(emitCatch());
+ case uint16_t(Op::Throw):
+ if (!moduleEnv_.exceptionsEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(emitThrow());
+#endif
+ case uint16_t(Op::Br):
+ CHECK_NEXT(emitBr());
+ case uint16_t(Op::BrIf):
+ CHECK_NEXT(emitBrIf());
+ case uint16_t(Op::BrTable):
+ CHECK_NEXT(emitBrTable());
+ case uint16_t(Op::Return):
+ CHECK_NEXT(emitReturn());
+ case uint16_t(Op::Unreachable):
+ CHECK(iter_.readUnreachable());
+ if (!deadCode_) {
+ trap(Trap::Unreachable);
+ deadCode_ = true;
+ }
+ NEXT();
+
+ // Calls
+ case uint16_t(Op::Call):
+ CHECK_NEXT(emitCall());
+ case uint16_t(Op::CallIndirect):
+ CHECK_NEXT(emitCallIndirect());
+
+ // Locals and globals
+ case uint16_t(Op::GetLocal):
+ CHECK_NEXT(emitGetLocal());
+ case uint16_t(Op::SetLocal):
+ CHECK_NEXT(emitSetLocal());
+ case uint16_t(Op::TeeLocal):
+ CHECK_NEXT(emitTeeLocal());
+ case uint16_t(Op::GetGlobal):
+ CHECK_NEXT(emitGetGlobal());
+ case uint16_t(Op::SetGlobal):
+ CHECK_NEXT(emitSetGlobal());
+#ifdef ENABLE_WASM_REFTYPES
+ case uint16_t(Op::TableGet):
+ CHECK_NEXT(emitTableGet());
+ case uint16_t(Op::TableSet):
+ CHECK_NEXT(emitTableSet());
+#endif
+
+ // Select
+ case uint16_t(Op::SelectNumeric):
+ CHECK_NEXT(emitSelect(/*typed*/ false));
+ case uint16_t(Op::SelectTyped):
+ if (!moduleEnv_.refTypesEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(emitSelect(/*typed*/ true));
+
+ // I32
+ case uint16_t(Op::I32Const): {
+ int32_t i32;
+ CHECK(iter_.readI32Const(&i32));
+ if (!deadCode_) {
+ pushI32(i32);
+ }
+ NEXT();
+ }
+ case uint16_t(Op::I32Add):
+ CHECK_NEXT(dispatchBinary(emitAddI32, ValType::I32));
+ case uint16_t(Op::I32Sub):
+ CHECK_NEXT(dispatchBinary(emitSubtractI32, ValType::I32));
+ case uint16_t(Op::I32Mul):
+ CHECK_NEXT(dispatchBinary(emitMultiplyI32, ValType::I32));
+ case uint16_t(Op::I32DivS):
+ CHECK_NEXT(dispatchBinary(emitQuotientI32, ValType::I32));
+ case uint16_t(Op::I32DivU):
+ CHECK_NEXT(dispatchBinary(emitQuotientU32, ValType::I32));
+ case uint16_t(Op::I32RemS):
+ CHECK_NEXT(dispatchBinary(emitRemainderI32, ValType::I32));
+ case uint16_t(Op::I32RemU):
+ CHECK_NEXT(dispatchBinary(emitRemainderU32, ValType::I32));
+ case uint16_t(Op::I32Eqz):
+ CHECK_NEXT(dispatchConversion(emitEqzI32, ValType::I32, ValType::I32));
+ case uint16_t(Op::I32TruncSF32):
+ CHECK_NEXT(dispatchConversionOOM(emitTruncateF32ToI32<0>, ValType::F32,
+ ValType::I32));
+ case uint16_t(Op::I32TruncUF32):
+ CHECK_NEXT(dispatchConversionOOM(emitTruncateF32ToI32<TRUNC_UNSIGNED>,
+ ValType::F32, ValType::I32));
+ case uint16_t(Op::I32TruncSF64):
+ CHECK_NEXT(dispatchConversionOOM(emitTruncateF64ToI32<0>, ValType::F64,
+ ValType::I32));
+ case uint16_t(Op::I32TruncUF64):
+ CHECK_NEXT(dispatchConversionOOM(emitTruncateF64ToI32<TRUNC_UNSIGNED>,
+ ValType::F64, ValType::I32));
+ case uint16_t(Op::I32WrapI64):
+ CHECK_NEXT(
+ dispatchConversion(emitWrapI64ToI32, ValType::I64, ValType::I32));
+ case uint16_t(Op::I32ReinterpretF32):
+ CHECK_NEXT(dispatchConversion(emitReinterpretF32AsI32, ValType::F32,
+ ValType::I32));
+ case uint16_t(Op::I32Clz):
+ CHECK_NEXT(dispatchUnary(emitClzI32, ValType::I32));
+ case uint16_t(Op::I32Ctz):
+ CHECK_NEXT(dispatchUnary(emitCtzI32, ValType::I32));
+ case uint16_t(Op::I32Popcnt):
+ CHECK_NEXT(dispatchUnary(emitPopcntI32, ValType::I32));
+ case uint16_t(Op::I32Or):
+ CHECK_NEXT(dispatchBinary(emitOrI32, ValType::I32));
+ case uint16_t(Op::I32And):
+ CHECK_NEXT(dispatchBinary(emitAndI32, ValType::I32));
+ case uint16_t(Op::I32Xor):
+ CHECK_NEXT(dispatchBinary(emitXorI32, ValType::I32));
+ case uint16_t(Op::I32Shl):
+ CHECK_NEXT(dispatchBinary(emitShlI32, ValType::I32));
+ case uint16_t(Op::I32ShrS):
+ CHECK_NEXT(dispatchBinary(emitShrI32, ValType::I32));
+ case uint16_t(Op::I32ShrU):
+ CHECK_NEXT(dispatchBinary(emitShrU32, ValType::I32));
+ case uint16_t(Op::I32Load8S):
+ CHECK_NEXT(emitLoad(ValType::I32, Scalar::Int8));
+ case uint16_t(Op::I32Load8U):
+ CHECK_NEXT(emitLoad(ValType::I32, Scalar::Uint8));
+ case uint16_t(Op::I32Load16S):
+ CHECK_NEXT(emitLoad(ValType::I32, Scalar::Int16));
+ case uint16_t(Op::I32Load16U):
+ CHECK_NEXT(emitLoad(ValType::I32, Scalar::Uint16));
+ case uint16_t(Op::I32Load):
+ CHECK_NEXT(emitLoad(ValType::I32, Scalar::Int32));
+ case uint16_t(Op::I32Store8):
+ CHECK_NEXT(emitStore(ValType::I32, Scalar::Int8));
+ case uint16_t(Op::I32Store16):
+ CHECK_NEXT(emitStore(ValType::I32, Scalar::Int16));
+ case uint16_t(Op::I32Store):
+ CHECK_NEXT(emitStore(ValType::I32, Scalar::Int32));
+ case uint16_t(Op::I32Rotr):
+ CHECK_NEXT(dispatchBinary(emitRotrI32, ValType::I32));
+ case uint16_t(Op::I32Rotl):
+ CHECK_NEXT(dispatchBinary(emitRotlI32, ValType::I32));
+
+ // I64
+ case uint16_t(Op::I64Const): {
+ int64_t i64;
+ CHECK(iter_.readI64Const(&i64));
+ if (!deadCode_) {
+ pushI64(i64);
+ }
+ NEXT();
+ }
+ case uint16_t(Op::I64Add):
+ CHECK_NEXT(dispatchBinary(emitAddI64, ValType::I64));
+ case uint16_t(Op::I64Sub):
+ CHECK_NEXT(dispatchBinary(emitSubtractI64, ValType::I64));
+ case uint16_t(Op::I64Mul):
+ CHECK_NEXT(dispatchBinary(emitMultiplyI64, ValType::I64));
+ case uint16_t(Op::I64DivS):
+#ifdef RABALDR_INT_DIV_I64_CALLOUT
+ CHECK_NEXT(dispatchIntDivCallout(
+ emitDivOrModI64BuiltinCall, SymbolicAddress::DivI64, ValType::I64));
+#else
+ CHECK_NEXT(dispatchBinary(emitQuotientI64, ValType::I64));
+#endif
+ case uint16_t(Op::I64DivU):
+#ifdef RABALDR_INT_DIV_I64_CALLOUT
+ CHECK_NEXT(dispatchIntDivCallout(emitDivOrModI64BuiltinCall,
+ SymbolicAddress::UDivI64,
+ ValType::I64));
+#else
+ CHECK_NEXT(dispatchBinary(emitQuotientU64, ValType::I64));
+#endif
+ case uint16_t(Op::I64RemS):
+#ifdef RABALDR_INT_DIV_I64_CALLOUT
+ CHECK_NEXT(dispatchIntDivCallout(
+ emitDivOrModI64BuiltinCall, SymbolicAddress::ModI64, ValType::I64));
+#else
+ CHECK_NEXT(dispatchBinary(emitRemainderI64, ValType::I64));
+#endif
+ case uint16_t(Op::I64RemU):
+#ifdef RABALDR_INT_DIV_I64_CALLOUT
+ CHECK_NEXT(dispatchIntDivCallout(emitDivOrModI64BuiltinCall,
+ SymbolicAddress::UModI64,
+ ValType::I64));
+#else
+ CHECK_NEXT(dispatchBinary(emitRemainderU64, ValType::I64));
+#endif
+ case uint16_t(Op::I64TruncSF32):
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+ CHECK_NEXT(
+ dispatchCalloutConversionOOM(emitConvertFloatingToInt64Callout,
+ SymbolicAddress::TruncateDoubleToInt64,
+ ValType::F32, ValType::I64));
+#else
+ CHECK_NEXT(dispatchConversionOOM(emitTruncateF32ToI64<0>, ValType::F32,
+ ValType::I64));
+#endif
+ case uint16_t(Op::I64TruncUF32):
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+ CHECK_NEXT(dispatchCalloutConversionOOM(
+ emitConvertFloatingToInt64Callout,
+ SymbolicAddress::TruncateDoubleToUint64, ValType::F32,
+ ValType::I64));
+#else
+ CHECK_NEXT(dispatchConversionOOM(emitTruncateF32ToI64<TRUNC_UNSIGNED>,
+ ValType::F32, ValType::I64));
+#endif
+ case uint16_t(Op::I64TruncSF64):
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+ CHECK_NEXT(
+ dispatchCalloutConversionOOM(emitConvertFloatingToInt64Callout,
+ SymbolicAddress::TruncateDoubleToInt64,
+ ValType::F64, ValType::I64));
+#else
+ CHECK_NEXT(dispatchConversionOOM(emitTruncateF64ToI64<0>, ValType::F64,
+ ValType::I64));
+#endif
+ case uint16_t(Op::I64TruncUF64):
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+ CHECK_NEXT(dispatchCalloutConversionOOM(
+ emitConvertFloatingToInt64Callout,
+ SymbolicAddress::TruncateDoubleToUint64, ValType::F64,
+ ValType::I64));
+#else
+ CHECK_NEXT(dispatchConversionOOM(emitTruncateF64ToI64<TRUNC_UNSIGNED>,
+ ValType::F64, ValType::I64));
+#endif
+ case uint16_t(Op::I64ExtendSI32):
+ CHECK_NEXT(
+ dispatchConversion(emitExtendI32ToI64, ValType::I32, ValType::I64));
+ case uint16_t(Op::I64ExtendUI32):
+ CHECK_NEXT(
+ dispatchConversion(emitExtendU32ToI64, ValType::I32, ValType::I64));
+ case uint16_t(Op::I64ReinterpretF64):
+ CHECK_NEXT(dispatchConversion(emitReinterpretF64AsI64, ValType::F64,
+ ValType::I64));
+ case uint16_t(Op::I64Or):
+ CHECK_NEXT(dispatchBinary(emitOrI64, ValType::I64));
+ case uint16_t(Op::I64And):
+ CHECK_NEXT(dispatchBinary(emitAndI64, ValType::I64));
+ case uint16_t(Op::I64Xor):
+ CHECK_NEXT(dispatchBinary(emitXorI64, ValType::I64));
+ case uint16_t(Op::I64Shl):
+ CHECK_NEXT(dispatchBinary(emitShlI64, ValType::I64));
+ case uint16_t(Op::I64ShrS):
+ CHECK_NEXT(dispatchBinary(emitShrI64, ValType::I64));
+ case uint16_t(Op::I64ShrU):
+ CHECK_NEXT(dispatchBinary(emitShrU64, ValType::I64));
+ case uint16_t(Op::I64Rotr):
+ CHECK_NEXT(dispatchBinary(emitRotrI64, ValType::I64));
+ case uint16_t(Op::I64Rotl):
+ CHECK_NEXT(dispatchBinary(emitRotlI64, ValType::I64));
+ case uint16_t(Op::I64Clz):
+ CHECK_NEXT(dispatchUnary(emitClzI64, ValType::I64));
+ case uint16_t(Op::I64Ctz):
+ CHECK_NEXT(dispatchUnary(emitCtzI64, ValType::I64));
+ case uint16_t(Op::I64Popcnt):
+ CHECK_NEXT(dispatchUnary(emitPopcntI64, ValType::I64));
+ case uint16_t(Op::I64Eqz):
+ CHECK_NEXT(dispatchConversion(emitEqzI64, ValType::I64, ValType::I32));
+ case uint16_t(Op::I64Load8S):
+ CHECK_NEXT(emitLoad(ValType::I64, Scalar::Int8));
+ case uint16_t(Op::I64Load16S):
+ CHECK_NEXT(emitLoad(ValType::I64, Scalar::Int16));
+ case uint16_t(Op::I64Load32S):
+ CHECK_NEXT(emitLoad(ValType::I64, Scalar::Int32));
+ case uint16_t(Op::I64Load8U):
+ CHECK_NEXT(emitLoad(ValType::I64, Scalar::Uint8));
+ case uint16_t(Op::I64Load16U):
+ CHECK_NEXT(emitLoad(ValType::I64, Scalar::Uint16));
+ case uint16_t(Op::I64Load32U):
+ CHECK_NEXT(emitLoad(ValType::I64, Scalar::Uint32));
+ case uint16_t(Op::I64Load):
+ CHECK_NEXT(emitLoad(ValType::I64, Scalar::Int64));
+ case uint16_t(Op::I64Store8):
+ CHECK_NEXT(emitStore(ValType::I64, Scalar::Int8));
+ case uint16_t(Op::I64Store16):
+ CHECK_NEXT(emitStore(ValType::I64, Scalar::Int16));
+ case uint16_t(Op::I64Store32):
+ CHECK_NEXT(emitStore(ValType::I64, Scalar::Int32));
+ case uint16_t(Op::I64Store):
+ CHECK_NEXT(emitStore(ValType::I64, Scalar::Int64));
+
+ // F32
+ case uint16_t(Op::F32Const): {
+ float f32;
+ CHECK(iter_.readF32Const(&f32));
+ if (!deadCode_) {
+ pushF32(f32);
+ }
+ NEXT();
+ }
+ case uint16_t(Op::F32Add):
+ CHECK_NEXT(dispatchBinary(emitAddF32, ValType::F32));
+ case uint16_t(Op::F32Sub):
+ CHECK_NEXT(dispatchBinary(emitSubtractF32, ValType::F32));
+ case uint16_t(Op::F32Mul):
+ CHECK_NEXT(dispatchBinary(emitMultiplyF32, ValType::F32));
+ case uint16_t(Op::F32Div):
+ CHECK_NEXT(dispatchBinary(emitDivideF32, ValType::F32));
+ case uint16_t(Op::F32Min):
+ CHECK_NEXT(dispatchBinary(emitMinF32, ValType::F32));
+ case uint16_t(Op::F32Max):
+ CHECK_NEXT(dispatchBinary(emitMaxF32, ValType::F32));
+ case uint16_t(Op::F32Neg):
+ CHECK_NEXT(dispatchUnary(emitNegateF32, ValType::F32));
+ case uint16_t(Op::F32Abs):
+ CHECK_NEXT(dispatchUnary(emitAbsF32, ValType::F32));
+ case uint16_t(Op::F32Sqrt):
+ CHECK_NEXT(dispatchUnary(emitSqrtF32, ValType::F32));
+ case uint16_t(Op::F32Ceil):
+ CHECK_NEXT(
+ emitUnaryMathBuiltinCall(SymbolicAddress::CeilF, ValType::F32));
+ case uint16_t(Op::F32Floor):
+ CHECK_NEXT(
+ emitUnaryMathBuiltinCall(SymbolicAddress::FloorF, ValType::F32));
+ case uint16_t(Op::F32DemoteF64):
+ CHECK_NEXT(dispatchConversion(emitConvertF64ToF32, ValType::F64,
+ ValType::F32));
+ case uint16_t(Op::F32ConvertSI32):
+ CHECK_NEXT(dispatchConversion(emitConvertI32ToF32, ValType::I32,
+ ValType::F32));
+ case uint16_t(Op::F32ConvertUI32):
+ CHECK_NEXT(dispatchConversion(emitConvertU32ToF32, ValType::I32,
+ ValType::F32));
+ case uint16_t(Op::F32ConvertSI64):
+#ifdef RABALDR_I64_TO_FLOAT_CALLOUT
+ CHECK_NEXT(dispatchCalloutConversionOOM(
+ emitConvertInt64ToFloatingCallout, SymbolicAddress::Int64ToFloat32,
+ ValType::I64, ValType::F32));
+#else
+ CHECK_NEXT(dispatchConversion(emitConvertI64ToF32, ValType::I64,
+ ValType::F32));
+#endif
+ case uint16_t(Op::F32ConvertUI64):
+#ifdef RABALDR_I64_TO_FLOAT_CALLOUT
+ CHECK_NEXT(dispatchCalloutConversionOOM(
+ emitConvertInt64ToFloatingCallout, SymbolicAddress::Uint64ToFloat32,
+ ValType::I64, ValType::F32));
+#else
+ CHECK_NEXT(dispatchConversion(emitConvertU64ToF32, ValType::I64,
+ ValType::F32));
+#endif
+ case uint16_t(Op::F32ReinterpretI32):
+ CHECK_NEXT(dispatchConversion(emitReinterpretI32AsF32, ValType::I32,
+ ValType::F32));
+ case uint16_t(Op::F32Load):
+ CHECK_NEXT(emitLoad(ValType::F32, Scalar::Float32));
+ case uint16_t(Op::F32Store):
+ CHECK_NEXT(emitStore(ValType::F32, Scalar::Float32));
+ case uint16_t(Op::F32CopySign):
+ CHECK_NEXT(dispatchBinary(emitCopysignF32, ValType::F32));
+ case uint16_t(Op::F32Nearest):
+ CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::NearbyIntF,
+ ValType::F32));
+ case uint16_t(Op::F32Trunc):
+ CHECK_NEXT(
+ emitUnaryMathBuiltinCall(SymbolicAddress::TruncF, ValType::F32));
+
+ // F64
+ case uint16_t(Op::F64Const): {
+ double f64;
+ CHECK(iter_.readF64Const(&f64));
+ if (!deadCode_) {
+ pushF64(f64);
+ }
+ NEXT();
+ }
+ case uint16_t(Op::F64Add):
+ CHECK_NEXT(dispatchBinary(emitAddF64, ValType::F64));
+ case uint16_t(Op::F64Sub):
+ CHECK_NEXT(dispatchBinary(emitSubtractF64, ValType::F64));
+ case uint16_t(Op::F64Mul):
+ CHECK_NEXT(dispatchBinary(emitMultiplyF64, ValType::F64));
+ case uint16_t(Op::F64Div):
+ CHECK_NEXT(dispatchBinary(emitDivideF64, ValType::F64));
+ case uint16_t(Op::F64Min):
+ CHECK_NEXT(dispatchBinary(emitMinF64, ValType::F64));
+ case uint16_t(Op::F64Max):
+ CHECK_NEXT(dispatchBinary(emitMaxF64, ValType::F64));
+ case uint16_t(Op::F64Neg):
+ CHECK_NEXT(dispatchUnary(emitNegateF64, ValType::F64));
+ case uint16_t(Op::F64Abs):
+ CHECK_NEXT(dispatchUnary(emitAbsF64, ValType::F64));
+ case uint16_t(Op::F64Sqrt):
+ CHECK_NEXT(dispatchUnary(emitSqrtF64, ValType::F64));
+ case uint16_t(Op::F64Ceil):
+ CHECK_NEXT(
+ emitUnaryMathBuiltinCall(SymbolicAddress::CeilD, ValType::F64));
+ case uint16_t(Op::F64Floor):
+ CHECK_NEXT(
+ emitUnaryMathBuiltinCall(SymbolicAddress::FloorD, ValType::F64));
+ case uint16_t(Op::F64PromoteF32):
+ CHECK_NEXT(dispatchConversion(emitConvertF32ToF64, ValType::F32,
+ ValType::F64));
+ case uint16_t(Op::F64ConvertSI32):
+ CHECK_NEXT(dispatchConversion(emitConvertI32ToF64, ValType::I32,
+ ValType::F64));
+ case uint16_t(Op::F64ConvertUI32):
+ CHECK_NEXT(dispatchConversion(emitConvertU32ToF64, ValType::I32,
+ ValType::F64));
+ case uint16_t(Op::F64ConvertSI64):
+#ifdef RABALDR_I64_TO_FLOAT_CALLOUT
+ CHECK_NEXT(dispatchCalloutConversionOOM(
+ emitConvertInt64ToFloatingCallout, SymbolicAddress::Int64ToDouble,
+ ValType::I64, ValType::F64));
+#else
+ CHECK_NEXT(dispatchConversion(emitConvertI64ToF64, ValType::I64,
+ ValType::F64));
+#endif
+ case uint16_t(Op::F64ConvertUI64):
+#ifdef RABALDR_I64_TO_FLOAT_CALLOUT
+ CHECK_NEXT(dispatchCalloutConversionOOM(
+ emitConvertInt64ToFloatingCallout, SymbolicAddress::Uint64ToDouble,
+ ValType::I64, ValType::F64));
+#else
+ CHECK_NEXT(dispatchConversion(emitConvertU64ToF64, ValType::I64,
+ ValType::F64));
+#endif
+ case uint16_t(Op::F64Load):
+ CHECK_NEXT(emitLoad(ValType::F64, Scalar::Float64));
+ case uint16_t(Op::F64Store):
+ CHECK_NEXT(emitStore(ValType::F64, Scalar::Float64));
+ case uint16_t(Op::F64ReinterpretI64):
+ CHECK_NEXT(dispatchConversion(emitReinterpretI64AsF64, ValType::I64,
+ ValType::F64));
+ case uint16_t(Op::F64CopySign):
+ CHECK_NEXT(dispatchBinary(emitCopysignF64, ValType::F64));
+ case uint16_t(Op::F64Nearest):
+ CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::NearbyIntD,
+ ValType::F64));
+ case uint16_t(Op::F64Trunc):
+ CHECK_NEXT(
+ emitUnaryMathBuiltinCall(SymbolicAddress::TruncD, ValType::F64));
+
+ // Comparisons
+ case uint16_t(Op::I32Eq):
+ CHECK_NEXT(
+ dispatchComparison(emitCompareI32, ValType::I32, Assembler::Equal));
+ case uint16_t(Op::I32Ne):
+ CHECK_NEXT(dispatchComparison(emitCompareI32, ValType::I32,
+ Assembler::NotEqual));
+ case uint16_t(Op::I32LtS):
+ CHECK_NEXT(dispatchComparison(emitCompareI32, ValType::I32,
+ Assembler::LessThan));
+ case uint16_t(Op::I32LeS):
+ CHECK_NEXT(dispatchComparison(emitCompareI32, ValType::I32,
+ Assembler::LessThanOrEqual));
+ case uint16_t(Op::I32GtS):
+ CHECK_NEXT(dispatchComparison(emitCompareI32, ValType::I32,
+ Assembler::GreaterThan));
+ case uint16_t(Op::I32GeS):
+ CHECK_NEXT(dispatchComparison(emitCompareI32, ValType::I32,
+ Assembler::GreaterThanOrEqual));
+ case uint16_t(Op::I32LtU):
+ CHECK_NEXT(
+ dispatchComparison(emitCompareI32, ValType::I32, Assembler::Below));
+ case uint16_t(Op::I32LeU):
+ CHECK_NEXT(dispatchComparison(emitCompareI32, ValType::I32,
+ Assembler::BelowOrEqual));
+ case uint16_t(Op::I32GtU):
+ CHECK_NEXT(
+ dispatchComparison(emitCompareI32, ValType::I32, Assembler::Above));
+ case uint16_t(Op::I32GeU):
+ CHECK_NEXT(dispatchComparison(emitCompareI32, ValType::I32,
+ Assembler::AboveOrEqual));
+ case uint16_t(Op::I64Eq):
+ CHECK_NEXT(
+ dispatchComparison(emitCompareI64, ValType::I64, Assembler::Equal));
+ case uint16_t(Op::I64Ne):
+ CHECK_NEXT(dispatchComparison(emitCompareI64, ValType::I64,
+ Assembler::NotEqual));
+ case uint16_t(Op::I64LtS):
+ CHECK_NEXT(dispatchComparison(emitCompareI64, ValType::I64,
+ Assembler::LessThan));
+ case uint16_t(Op::I64LeS):
+ CHECK_NEXT(dispatchComparison(emitCompareI64, ValType::I64,
+ Assembler::LessThanOrEqual));
+ case uint16_t(Op::I64GtS):
+ CHECK_NEXT(dispatchComparison(emitCompareI64, ValType::I64,
+ Assembler::GreaterThan));
+ case uint16_t(Op::I64GeS):
+ CHECK_NEXT(dispatchComparison(emitCompareI64, ValType::I64,
+ Assembler::GreaterThanOrEqual));
+ case uint16_t(Op::I64LtU):
+ CHECK_NEXT(
+ dispatchComparison(emitCompareI64, ValType::I64, Assembler::Below));
+ case uint16_t(Op::I64LeU):
+ CHECK_NEXT(dispatchComparison(emitCompareI64, ValType::I64,
+ Assembler::BelowOrEqual));
+ case uint16_t(Op::I64GtU):
+ CHECK_NEXT(
+ dispatchComparison(emitCompareI64, ValType::I64, Assembler::Above));
+ case uint16_t(Op::I64GeU):
+ CHECK_NEXT(dispatchComparison(emitCompareI64, ValType::I64,
+ Assembler::AboveOrEqual));
+ case uint16_t(Op::F32Eq):
+ CHECK_NEXT(dispatchComparison(emitCompareF32, ValType::F32,
+ Assembler::DoubleEqual));
+ case uint16_t(Op::F32Ne):
+ CHECK_NEXT(dispatchComparison(emitCompareF32, ValType::F32,
+ Assembler::DoubleNotEqualOrUnordered));
+ case uint16_t(Op::F32Lt):
+ CHECK_NEXT(dispatchComparison(emitCompareF32, ValType::F32,
+ Assembler::DoubleLessThan));
+ case uint16_t(Op::F32Le):
+ CHECK_NEXT(dispatchComparison(emitCompareF32, ValType::F32,
+ Assembler::DoubleLessThanOrEqual));
+ case uint16_t(Op::F32Gt):
+ CHECK_NEXT(dispatchComparison(emitCompareF32, ValType::F32,
+ Assembler::DoubleGreaterThan));
+ case uint16_t(Op::F32Ge):
+ CHECK_NEXT(dispatchComparison(emitCompareF32, ValType::F32,
+ Assembler::DoubleGreaterThanOrEqual));
+ case uint16_t(Op::F64Eq):
+ CHECK_NEXT(dispatchComparison(emitCompareF64, ValType::F64,
+ Assembler::DoubleEqual));
+ case uint16_t(Op::F64Ne):
+ CHECK_NEXT(dispatchComparison(emitCompareF64, ValType::F64,
+ Assembler::DoubleNotEqualOrUnordered));
+ case uint16_t(Op::F64Lt):
+ CHECK_NEXT(dispatchComparison(emitCompareF64, ValType::F64,
+ Assembler::DoubleLessThan));
+ case uint16_t(Op::F64Le):
+ CHECK_NEXT(dispatchComparison(emitCompareF64, ValType::F64,
+ Assembler::DoubleLessThanOrEqual));
+ case uint16_t(Op::F64Gt):
+ CHECK_NEXT(dispatchComparison(emitCompareF64, ValType::F64,
+ Assembler::DoubleGreaterThan));
+ case uint16_t(Op::F64Ge):
+ CHECK_NEXT(dispatchComparison(emitCompareF64, ValType::F64,
+ Assembler::DoubleGreaterThanOrEqual));
+
+ // Sign extensions
+ case uint16_t(Op::I32Extend8S):
+ CHECK_NEXT(
+ dispatchConversion(emitExtendI32_8, ValType::I32, ValType::I32));
+ case uint16_t(Op::I32Extend16S):
+ CHECK_NEXT(
+ dispatchConversion(emitExtendI32_16, ValType::I32, ValType::I32));
+ case uint16_t(Op::I64Extend8S):
+ CHECK_NEXT(
+ dispatchConversion(emitExtendI64_8, ValType::I64, ValType::I64));
+ case uint16_t(Op::I64Extend16S):
+ CHECK_NEXT(
+ dispatchConversion(emitExtendI64_16, ValType::I64, ValType::I64));
+ case uint16_t(Op::I64Extend32S):
+ CHECK_NEXT(
+ dispatchConversion(emitExtendI64_32, ValType::I64, ValType::I64));
+
+ // Memory Related
+ case uint16_t(Op::MemoryGrow):
+ CHECK_NEXT(emitMemoryGrow());
+ case uint16_t(Op::MemorySize):
+ CHECK_NEXT(emitMemorySize());
+
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ case uint16_t(Op::RefAsNonNull):
+ if (!moduleEnv_.functionReferencesEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(emitRefAsNonNull());
+ case uint16_t(Op::BrOnNull):
+ if (!moduleEnv_.functionReferencesEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(emitBrOnNull());
+#endif
+#ifdef ENABLE_WASM_GC
+ case uint16_t(Op::RefEq):
+ if (!moduleEnv_.gcTypesEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ CHECK_NEXT(dispatchComparison(emitCompareRef, RefType::eq(),
+ Assembler::Equal));
+#endif
+#ifdef ENABLE_WASM_REFTYPES
+ case uint16_t(Op::RefFunc):
+ CHECK_NEXT(emitRefFunc());
+ break;
+ case uint16_t(Op::RefNull):
+ CHECK_NEXT(emitRefNull());
+ break;
+ case uint16_t(Op::RefIsNull):
+ CHECK_NEXT(emitRefIsNull());
+ break;
+#endif
+
+#ifdef ENABLE_WASM_GC
+ // "GC" operations
+ case uint16_t(Op::GcPrefix): {
+ if (!moduleEnv_.gcTypesEnabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ switch (op.b1) {
+ case uint32_t(GcOp::StructNew):
+ CHECK_NEXT(emitStructNew());
+ case uint32_t(GcOp::StructGet):
+ CHECK_NEXT(emitStructGet());
+ case uint32_t(GcOp::StructSet):
+ CHECK_NEXT(emitStructSet());
+ case uint32_t(GcOp::StructNarrow):
+ CHECK_NEXT(emitStructNarrow());
+ default:
+ break;
+ } // switch (op.b1)
+ return iter_.unrecognizedOpcode(&op);
+ }
+#endif
+
+#ifdef ENABLE_WASM_SIMD
+ // SIMD operations
+ case uint16_t(Op::SimdPrefix): {
+ uint32_t laneIndex;
+ if (!moduleEnv_.v128Enabled()) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ switch (op.b1) {
+ case uint32_t(SimdOp::I8x16ExtractLaneS):
+ CHECK_NEXT(dispatchExtractLane(ExtractLaneI8x16, ValType::I32, 16));
+ case uint32_t(SimdOp::I8x16ExtractLaneU):
+ CHECK_NEXT(
+ dispatchExtractLane(ExtractLaneUI8x16, ValType::I32, 16));
+ case uint32_t(SimdOp::I16x8ExtractLaneS):
+ CHECK_NEXT(dispatchExtractLane(ExtractLaneI16x8, ValType::I32, 8));
+ case uint32_t(SimdOp::I16x8ExtractLaneU):
+ CHECK_NEXT(dispatchExtractLane(ExtractLaneUI16x8, ValType::I32, 8));
+ case uint32_t(SimdOp::I32x4ExtractLane):
+ CHECK_NEXT(dispatchExtractLane(ExtractLaneI32x4, ValType::I32, 4));
+ case uint32_t(SimdOp::I64x2ExtractLane):
+ CHECK_NEXT(dispatchExtractLane(ExtractLaneI64x2, ValType::I64, 2));
+ case uint32_t(SimdOp::F32x4ExtractLane):
+ CHECK_NEXT(dispatchExtractLane(ExtractLaneF32x4, ValType::F32, 4));
+ case uint32_t(SimdOp::F64x2ExtractLane):
+ CHECK_NEXT(dispatchExtractLane(ExtractLaneF64x2, ValType::F64, 2));
+ case uint32_t(SimdOp::I8x16Splat):
+ CHECK_NEXT(dispatchSplat(SplatI8x16, ValType::I32));
+ case uint32_t(SimdOp::I16x8Splat):
+ CHECK_NEXT(dispatchSplat(SplatI16x8, ValType::I32));
+ case uint32_t(SimdOp::I32x4Splat):
+ CHECK_NEXT(dispatchSplat(SplatI32x4, ValType::I32));
+ case uint32_t(SimdOp::I64x2Splat):
+ CHECK_NEXT(dispatchSplat(SplatI64x2, ValType::I64));
+ case uint32_t(SimdOp::F32x4Splat):
+ CHECK_NEXT(dispatchSplat(SplatF32x4, ValType::F32));
+ case uint32_t(SimdOp::F64x2Splat):
+ CHECK_NEXT(dispatchSplat(SplatF64x2, ValType::F64));
+ case uint32_t(SimdOp::I8x16AnyTrue):
+ case uint32_t(SimdOp::I16x8AnyTrue):
+ case uint32_t(SimdOp::I32x4AnyTrue):
+ CHECK_NEXT(dispatchVectorReduction(AnyTrue));
+ case uint32_t(SimdOp::I8x16AllTrue):
+ CHECK_NEXT(dispatchVectorReduction(AllTrueI8x16));
+ case uint32_t(SimdOp::I16x8AllTrue):
+ CHECK_NEXT(dispatchVectorReduction(AllTrueI16x8));
+ case uint32_t(SimdOp::I32x4AllTrue):
+ CHECK_NEXT(dispatchVectorReduction(AllTrueI32x4));
+ case uint32_t(SimdOp::I8x16Bitmask):
+ CHECK_NEXT(dispatchVectorReduction(BitmaskI8x16));
+ case uint32_t(SimdOp::I16x8Bitmask):
+ CHECK_NEXT(dispatchVectorReduction(BitmaskI16x8));
+ case uint32_t(SimdOp::I32x4Bitmask):
+ CHECK_NEXT(dispatchVectorReduction(BitmaskI32x4));
+ case uint32_t(SimdOp::I8x16ReplaceLane):
+ CHECK_NEXT(dispatchReplaceLane(ReplaceLaneI8x16, ValType::I32, 16));
+ case uint32_t(SimdOp::I16x8ReplaceLane):
+ CHECK_NEXT(dispatchReplaceLane(ReplaceLaneI16x8, ValType::I32, 8));
+ case uint32_t(SimdOp::I32x4ReplaceLane):
+ CHECK_NEXT(dispatchReplaceLane(ReplaceLaneI32x4, ValType::I32, 4));
+ case uint32_t(SimdOp::I64x2ReplaceLane):
+ CHECK_NEXT(dispatchReplaceLane(ReplaceLaneI64x2, ValType::I64, 2));
+ case uint32_t(SimdOp::F32x4ReplaceLane):
+ CHECK_NEXT(dispatchReplaceLane(ReplaceLaneF32x4, ValType::F32, 4));
+ case uint32_t(SimdOp::F64x2ReplaceLane):
+ CHECK_NEXT(dispatchReplaceLane(ReplaceLaneF64x2, ValType::F64, 2));
+ case uint32_t(SimdOp::I8x16Eq):
+ CHECK_NEXT(dispatchVectorComparison(CmpI8x16, Assembler::Equal));
+ case uint32_t(SimdOp::I8x16Ne):
+ CHECK_NEXT(dispatchVectorComparison(CmpI8x16, Assembler::NotEqual));
+ case uint32_t(SimdOp::I8x16LtS):
+ CHECK_NEXT(dispatchVectorComparison(CmpI8x16, Assembler::LessThan));
+ case uint32_t(SimdOp::I8x16LtU):
+ CHECK_NEXT(dispatchVectorComparison(CmpUI8x16, Assembler::Below));
+ case uint32_t(SimdOp::I8x16GtS):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpI8x16, Assembler::GreaterThan));
+ case uint32_t(SimdOp::I8x16GtU):
+ CHECK_NEXT(dispatchVectorComparison(CmpUI8x16, Assembler::Above));
+ case uint32_t(SimdOp::I8x16LeS):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpI8x16, Assembler::LessThanOrEqual));
+ case uint32_t(SimdOp::I8x16LeU):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpUI8x16, Assembler::BelowOrEqual));
+ case uint32_t(SimdOp::I8x16GeS):
+ CHECK_NEXT(dispatchVectorComparison(CmpI8x16,
+ Assembler::GreaterThanOrEqual));
+ case uint32_t(SimdOp::I8x16GeU):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpUI8x16, Assembler::AboveOrEqual));
+ case uint32_t(SimdOp::I16x8Eq):
+ CHECK_NEXT(dispatchVectorComparison(CmpI16x8, Assembler::Equal));
+ case uint32_t(SimdOp::I16x8Ne):
+ CHECK_NEXT(dispatchVectorComparison(CmpI16x8, Assembler::NotEqual));
+ case uint32_t(SimdOp::I16x8LtS):
+ CHECK_NEXT(dispatchVectorComparison(CmpI16x8, Assembler::LessThan));
+ case uint32_t(SimdOp::I16x8LtU):
+ CHECK_NEXT(dispatchVectorComparison(CmpUI16x8, Assembler::Below));
+ case uint32_t(SimdOp::I16x8GtS):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpI16x8, Assembler::GreaterThan));
+ case uint32_t(SimdOp::I16x8GtU):
+ CHECK_NEXT(dispatchVectorComparison(CmpUI16x8, Assembler::Above));
+ case uint32_t(SimdOp::I16x8LeS):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpI16x8, Assembler::LessThanOrEqual));
+ case uint32_t(SimdOp::I16x8LeU):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpUI16x8, Assembler::BelowOrEqual));
+ case uint32_t(SimdOp::I16x8GeS):
+ CHECK_NEXT(dispatchVectorComparison(CmpI16x8,
+ Assembler::GreaterThanOrEqual));
+ case uint32_t(SimdOp::I16x8GeU):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpUI16x8, Assembler::AboveOrEqual));
+ case uint32_t(SimdOp::I32x4Eq):
+ CHECK_NEXT(dispatchVectorComparison(CmpI32x4, Assembler::Equal));
+ case uint32_t(SimdOp::I32x4Ne):
+ CHECK_NEXT(dispatchVectorComparison(CmpI32x4, Assembler::NotEqual));
+ case uint32_t(SimdOp::I32x4LtS):
+ CHECK_NEXT(dispatchVectorComparison(CmpI32x4, Assembler::LessThan));
+ case uint32_t(SimdOp::I32x4LtU):
+ CHECK_NEXT(dispatchVectorComparison(CmpUI32x4, Assembler::Below));
+ case uint32_t(SimdOp::I32x4GtS):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpI32x4, Assembler::GreaterThan));
+ case uint32_t(SimdOp::I32x4GtU):
+ CHECK_NEXT(dispatchVectorComparison(CmpUI32x4, Assembler::Above));
+ case uint32_t(SimdOp::I32x4LeS):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpI32x4, Assembler::LessThanOrEqual));
+ case uint32_t(SimdOp::I32x4LeU):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpUI32x4, Assembler::BelowOrEqual));
+ case uint32_t(SimdOp::I32x4GeS):
+ CHECK_NEXT(dispatchVectorComparison(CmpI32x4,
+ Assembler::GreaterThanOrEqual));
+ case uint32_t(SimdOp::I32x4GeU):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpUI32x4, Assembler::AboveOrEqual));
+ case uint32_t(SimdOp::F32x4Eq):
+ CHECK_NEXT(dispatchVectorComparison(CmpF32x4, Assembler::Equal));
+ case uint32_t(SimdOp::F32x4Ne):
+ CHECK_NEXT(dispatchVectorComparison(CmpF32x4, Assembler::NotEqual));
+ case uint32_t(SimdOp::F32x4Lt):
+ CHECK_NEXT(dispatchVectorComparison(CmpF32x4, Assembler::LessThan));
+ case uint32_t(SimdOp::F32x4Gt):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpF32x4, Assembler::GreaterThan));
+ case uint32_t(SimdOp::F32x4Le):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpF32x4, Assembler::LessThanOrEqual));
+ case uint32_t(SimdOp::F32x4Ge):
+ CHECK_NEXT(dispatchVectorComparison(CmpF32x4,
+ Assembler::GreaterThanOrEqual));
+ case uint32_t(SimdOp::F64x2Eq):
+ CHECK_NEXT(dispatchVectorComparison(CmpF64x2, Assembler::Equal));
+ case uint32_t(SimdOp::F64x2Ne):
+ CHECK_NEXT(dispatchVectorComparison(CmpF64x2, Assembler::NotEqual));
+ case uint32_t(SimdOp::F64x2Lt):
+ CHECK_NEXT(dispatchVectorComparison(CmpF64x2, Assembler::LessThan));
+ case uint32_t(SimdOp::F64x2Gt):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpF64x2, Assembler::GreaterThan));
+ case uint32_t(SimdOp::F64x2Le):
+ CHECK_NEXT(
+ dispatchVectorComparison(CmpF64x2, Assembler::LessThanOrEqual));
+ case uint32_t(SimdOp::F64x2Ge):
+ CHECK_NEXT(dispatchVectorComparison(CmpF64x2,
+ Assembler::GreaterThanOrEqual));
+ case uint32_t(SimdOp::V128And):
+ CHECK_NEXT(dispatchVectorBinary(AndV128));
+ case uint32_t(SimdOp::V128Or):
+ CHECK_NEXT(dispatchVectorBinary(OrV128));
+ case uint32_t(SimdOp::V128Xor):
+ CHECK_NEXT(dispatchVectorBinary(XorV128));
+ case uint32_t(SimdOp::V128AndNot):
+ CHECK_NEXT(dispatchBinary(emitVectorAndNot, ValType::V128));
+ case uint32_t(SimdOp::I8x16AvgrU):
+ CHECK_NEXT(dispatchVectorBinary(AverageUI8x16));
+ case uint32_t(SimdOp::I16x8AvgrU):
+ CHECK_NEXT(dispatchVectorBinary(AverageUI16x8));
+ case uint32_t(SimdOp::I8x16Add):
+ CHECK_NEXT(dispatchVectorBinary(AddI8x16));
+ case uint32_t(SimdOp::I8x16AddSaturateS):
+ CHECK_NEXT(dispatchVectorBinary(AddSatI8x16));
+ case uint32_t(SimdOp::I8x16AddSaturateU):
+ CHECK_NEXT(dispatchVectorBinary(AddSatUI8x16));
+ case uint32_t(SimdOp::I8x16Sub):
+ CHECK_NEXT(dispatchVectorBinary(SubI8x16));
+ case uint32_t(SimdOp::I8x16SubSaturateS):
+ CHECK_NEXT(dispatchVectorBinary(SubSatI8x16));
+ case uint32_t(SimdOp::I8x16SubSaturateU):
+ CHECK_NEXT(dispatchVectorBinary(SubSatUI8x16));
+ case uint32_t(SimdOp::I8x16MinS):
+ CHECK_NEXT(dispatchVectorBinary(MinI8x16));
+ case uint32_t(SimdOp::I8x16MinU):
+ CHECK_NEXT(dispatchVectorBinary(MinUI8x16));
+ case uint32_t(SimdOp::I8x16MaxS):
+ CHECK_NEXT(dispatchVectorBinary(MaxI8x16));
+ case uint32_t(SimdOp::I8x16MaxU):
+ CHECK_NEXT(dispatchVectorBinary(MaxUI8x16));
+ case uint32_t(SimdOp::I16x8Add):
+ CHECK_NEXT(dispatchVectorBinary(AddI16x8));
+ case uint32_t(SimdOp::I16x8AddSaturateS):
+ CHECK_NEXT(dispatchVectorBinary(AddSatI16x8));
+ case uint32_t(SimdOp::I16x8AddSaturateU):
+ CHECK_NEXT(dispatchVectorBinary(AddSatUI16x8));
+ case uint32_t(SimdOp::I16x8Sub):
+ CHECK_NEXT(dispatchVectorBinary(SubI16x8));
+ case uint32_t(SimdOp::I16x8SubSaturateS):
+ CHECK_NEXT(dispatchVectorBinary(SubSatI16x8));
+ case uint32_t(SimdOp::I16x8SubSaturateU):
+ CHECK_NEXT(dispatchVectorBinary(SubSatUI16x8));
+ case uint32_t(SimdOp::I16x8Mul):
+ CHECK_NEXT(dispatchVectorBinary(MulI16x8));
+ case uint32_t(SimdOp::I16x8MinS):
+ CHECK_NEXT(dispatchVectorBinary(MinI16x8));
+ case uint32_t(SimdOp::I16x8MinU):
+ CHECK_NEXT(dispatchVectorBinary(MinUI16x8));
+ case uint32_t(SimdOp::I16x8MaxS):
+ CHECK_NEXT(dispatchVectorBinary(MaxI16x8));
+ case uint32_t(SimdOp::I16x8MaxU):
+ CHECK_NEXT(dispatchVectorBinary(MaxUI16x8));
+ case uint32_t(SimdOp::I32x4Add):
+ CHECK_NEXT(dispatchVectorBinary(AddI32x4));
+ case uint32_t(SimdOp::I32x4Sub):
+ CHECK_NEXT(dispatchVectorBinary(SubI32x4));
+ case uint32_t(SimdOp::I32x4Mul):
+ CHECK_NEXT(dispatchVectorBinary(MulI32x4));
+ case uint32_t(SimdOp::I32x4MinS):
+ CHECK_NEXT(dispatchVectorBinary(MinI32x4));
+ case uint32_t(SimdOp::I32x4MinU):
+ CHECK_NEXT(dispatchVectorBinary(MinUI32x4));
+ case uint32_t(SimdOp::I32x4MaxS):
+ CHECK_NEXT(dispatchVectorBinary(MaxI32x4));
+ case uint32_t(SimdOp::I32x4MaxU):
+ CHECK_NEXT(dispatchVectorBinary(MaxUI32x4));
+ case uint32_t(SimdOp::I64x2Add):
+ CHECK_NEXT(dispatchVectorBinary(AddI64x2));
+ case uint32_t(SimdOp::I64x2Sub):
+ CHECK_NEXT(dispatchVectorBinary(SubI64x2));
+ case uint32_t(SimdOp::I64x2Mul):
+ CHECK_NEXT(emitVectorMulI64x2());
+ case uint32_t(SimdOp::F32x4Add):
+ CHECK_NEXT(dispatchVectorBinary(AddF32x4));
+ case uint32_t(SimdOp::F32x4Sub):
+ CHECK_NEXT(dispatchVectorBinary(SubF32x4));
+ case uint32_t(SimdOp::F32x4Mul):
+ CHECK_NEXT(dispatchVectorBinary(MulF32x4));
+ case uint32_t(SimdOp::F32x4Div):
+ CHECK_NEXT(dispatchVectorBinary(DivF32x4));
+ case uint32_t(SimdOp::F32x4Min):
+ CHECK_NEXT(dispatchVectorBinary(MinF32x4));
+ case uint32_t(SimdOp::F32x4Max):
+ CHECK_NEXT(dispatchVectorBinary(MaxF32x4));
+ case uint32_t(SimdOp::F64x2Add):
+ CHECK_NEXT(dispatchVectorBinary(AddF64x2));
+ case uint32_t(SimdOp::F64x2Sub):
+ CHECK_NEXT(dispatchVectorBinary(SubF64x2));
+ case uint32_t(SimdOp::F64x2Mul):
+ CHECK_NEXT(dispatchVectorBinary(MulF64x2));
+ case uint32_t(SimdOp::F64x2Div):
+ CHECK_NEXT(dispatchVectorBinary(DivF64x2));
+ case uint32_t(SimdOp::F64x2Min):
+ CHECK_NEXT(dispatchVectorBinary(MinF64x2));
+ case uint32_t(SimdOp::F64x2Max):
+ CHECK_NEXT(dispatchVectorBinary(MaxF64x2));
+ case uint32_t(SimdOp::I8x16NarrowSI16x8):
+ CHECK_NEXT(dispatchVectorBinary(NarrowI16x8));
+ case uint32_t(SimdOp::I8x16NarrowUI16x8):
+ CHECK_NEXT(dispatchVectorBinary(NarrowUI16x8));
+ case uint32_t(SimdOp::I16x8NarrowSI32x4):
+ CHECK_NEXT(dispatchVectorBinary(NarrowI32x4));
+ case uint32_t(SimdOp::I16x8NarrowUI32x4):
+ CHECK_NEXT(dispatchVectorBinary(NarrowUI32x4));
+ case uint32_t(SimdOp::V8x16Swizzle):
+ CHECK_NEXT(dispatchVectorBinary(Swizzle));
+ case uint32_t(SimdOp::F32x4PMax):
+ CHECK_NEXT(dispatchVectorBinary(PMaxF32x4));
+ case uint32_t(SimdOp::F32x4PMin):
+ CHECK_NEXT(dispatchVectorBinary(PMinF32x4));
+ case uint32_t(SimdOp::F64x2PMax):
+ CHECK_NEXT(dispatchVectorBinary(PMaxF64x2));
+ case uint32_t(SimdOp::F64x2PMin):
+ CHECK_NEXT(dispatchVectorBinary(PMinF64x2));
+ case uint32_t(SimdOp::I32x4DotSI16x8):
+ CHECK_NEXT(dispatchVectorBinary(DotI16x8));
+ case uint32_t(SimdOp::I8x16Neg):
+ CHECK_NEXT(dispatchVectorUnary(NegI8x16));
+ case uint32_t(SimdOp::I16x8Neg):
+ CHECK_NEXT(dispatchVectorUnary(NegI16x8));
+ case uint32_t(SimdOp::I16x8WidenLowSI8x16):
+ CHECK_NEXT(dispatchVectorUnary(WidenLowI8x16));
+ case uint32_t(SimdOp::I16x8WidenHighSI8x16):
+ CHECK_NEXT(dispatchVectorUnary(WidenHighI8x16));
+ case uint32_t(SimdOp::I16x8WidenLowUI8x16):
+ CHECK_NEXT(dispatchVectorUnary(WidenLowUI8x16));
+ case uint32_t(SimdOp::I16x8WidenHighUI8x16):
+ CHECK_NEXT(dispatchVectorUnary(WidenHighUI8x16));
+ case uint32_t(SimdOp::I32x4Neg):
+ CHECK_NEXT(dispatchVectorUnary(NegI32x4));
+ case uint32_t(SimdOp::I32x4WidenLowSI16x8):
+ CHECK_NEXT(dispatchVectorUnary(WidenLowI16x8));
+ case uint32_t(SimdOp::I32x4WidenHighSI16x8):
+ CHECK_NEXT(dispatchVectorUnary(WidenHighI16x8));
+ case uint32_t(SimdOp::I32x4WidenLowUI16x8):
+ CHECK_NEXT(dispatchVectorUnary(WidenLowUI16x8));
+ case uint32_t(SimdOp::I32x4WidenHighUI16x8):
+ CHECK_NEXT(dispatchVectorUnary(WidenHighUI16x8));
+ case uint32_t(SimdOp::I32x4TruncSSatF32x4):
+ CHECK_NEXT(dispatchVectorUnary(ConvertF32x4ToI32x4));
+ case uint32_t(SimdOp::I32x4TruncUSatF32x4):
+ CHECK_NEXT(dispatchVectorUnary(ConvertF32x4ToUI32x4));
+ case uint32_t(SimdOp::I64x2Neg):
+ CHECK_NEXT(dispatchVectorUnary(NegI64x2));
+ case uint32_t(SimdOp::F32x4Abs):
+ CHECK_NEXT(dispatchVectorUnary(AbsF32x4));
+ case uint32_t(SimdOp::F32x4Neg):
+ CHECK_NEXT(dispatchVectorUnary(NegF32x4));
+ case uint32_t(SimdOp::F32x4Sqrt):
+ CHECK_NEXT(dispatchVectorUnary(SqrtF32x4));
+ case uint32_t(SimdOp::F32x4ConvertSI32x4):
+ CHECK_NEXT(dispatchVectorUnary(ConvertI32x4ToF32x4));
+ case uint32_t(SimdOp::F32x4ConvertUI32x4):
+ CHECK_NEXT(dispatchVectorUnary(ConvertUI32x4ToF32x4));
+ case uint32_t(SimdOp::F64x2Abs):
+ CHECK_NEXT(dispatchVectorUnary(AbsF64x2));
+ case uint32_t(SimdOp::F64x2Neg):
+ CHECK_NEXT(dispatchVectorUnary(NegF64x2));
+ case uint32_t(SimdOp::F64x2Sqrt):
+ CHECK_NEXT(dispatchVectorUnary(SqrtF64x2));
+ case uint32_t(SimdOp::V128Not):
+ CHECK_NEXT(dispatchVectorUnary(NotV128));
+ case uint32_t(SimdOp::I8x16Abs):
+ CHECK_NEXT(dispatchVectorUnary(AbsI8x16));
+ case uint32_t(SimdOp::I16x8Abs):
+ CHECK_NEXT(dispatchVectorUnary(AbsI16x8));
+ case uint32_t(SimdOp::I32x4Abs):
+ CHECK_NEXT(dispatchVectorUnary(AbsI32x4));
+ case uint32_t(SimdOp::F32x4Ceil):
+ CHECK_NEXT(dispatchVectorUnary(CeilF32x4));
+ case uint32_t(SimdOp::F32x4Floor):
+ CHECK_NEXT(dispatchVectorUnary(FloorF32x4));
+ case uint32_t(SimdOp::F32x4Trunc):
+ CHECK_NEXT(dispatchVectorUnary(TruncF32x4));
+ case uint32_t(SimdOp::F32x4Nearest):
+ CHECK_NEXT(dispatchVectorUnary(NearestF32x4));
+ case uint32_t(SimdOp::F64x2Ceil):
+ CHECK_NEXT(dispatchVectorUnary(CeilF64x2));
+ case uint32_t(SimdOp::F64x2Floor):
+ CHECK_NEXT(dispatchVectorUnary(FloorF64x2));
+ case uint32_t(SimdOp::F64x2Trunc):
+ CHECK_NEXT(dispatchVectorUnary(TruncF64x2));
+ case uint32_t(SimdOp::F64x2Nearest):
+ CHECK_NEXT(dispatchVectorUnary(NearestF64x2));
+ case uint32_t(SimdOp::I8x16Shl):
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftLeftI8x16));
+ case uint32_t(SimdOp::I8x16ShrS):
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftRightI8x16));
+ case uint32_t(SimdOp::I8x16ShrU):
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftRightUI8x16));
+ case uint32_t(SimdOp::I16x8Shl):
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftLeftI16x8));
+ case uint32_t(SimdOp::I16x8ShrS):
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftRightI16x8));
+ case uint32_t(SimdOp::I16x8ShrU):
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftRightUI16x8));
+ case uint32_t(SimdOp::I32x4Shl):
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftLeftI32x4));
+ case uint32_t(SimdOp::I32x4ShrS):
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftRightI32x4));
+ case uint32_t(SimdOp::I32x4ShrU):
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftRightUI32x4));
+ case uint32_t(SimdOp::I64x2Shl):
+ CHECK_NEXT(dispatchVectorVariableShift(ShiftLeftI64x2));
+ case uint32_t(SimdOp::I64x2ShrS):
+ CHECK_NEXT(emitVectorShiftRightI64x2(/* isUnsigned */ false));
+ case uint32_t(SimdOp::I64x2ShrU):
+ CHECK_NEXT(emitVectorShiftRightI64x2(/* isUnsigned */ true));
+ case uint32_t(SimdOp::V128Bitselect):
+ CHECK_NEXT(emitBitselect());
+ case uint32_t(SimdOp::V8x16Shuffle):
+ CHECK_NEXT(emitVectorShuffle());
+ case uint32_t(SimdOp::V128Const): {
+ V128 v128;
+ CHECK(iter_.readV128Const(&v128));
+ if (!deadCode_) {
+ pushV128(v128);
+ }
+ NEXT();
+ }
+ case uint32_t(SimdOp::V128Load):
+ CHECK_NEXT(emitLoad(ValType::V128, Scalar::Simd128));
+ case uint32_t(SimdOp::V8x16LoadSplat):
+ CHECK_NEXT(emitLoadSplat(Scalar::Uint8));
+ case uint32_t(SimdOp::V16x8LoadSplat):
+ CHECK_NEXT(emitLoadSplat(Scalar::Uint16));
+ case uint32_t(SimdOp::V32x4LoadSplat):
+ CHECK_NEXT(emitLoadSplat(Scalar::Uint32));
+ case uint32_t(SimdOp::V64x2LoadSplat):
+ CHECK_NEXT(emitLoadSplat(Scalar::Int64));
+ case uint32_t(SimdOp::I16x8LoadS8x8):
+ CHECK_NEXT(emitLoadExtend(Scalar::Int8));
+ case uint32_t(SimdOp::I16x8LoadU8x8):
+ CHECK_NEXT(emitLoadExtend(Scalar::Uint8));
+ case uint32_t(SimdOp::I32x4LoadS16x4):
+ CHECK_NEXT(emitLoadExtend(Scalar::Int16));
+ case uint32_t(SimdOp::I32x4LoadU16x4):
+ CHECK_NEXT(emitLoadExtend(Scalar::Uint16));
+ case uint32_t(SimdOp::I64x2LoadS32x2):
+ CHECK_NEXT(emitLoadExtend(Scalar::Int32));
+ case uint32_t(SimdOp::I64x2LoadU32x2):
+ CHECK_NEXT(emitLoadExtend(Scalar::Uint32));
+ case uint32_t(SimdOp::V128Load32Zero):
+ CHECK_NEXT(emitLoadZero(Scalar::Float32));
+ case uint32_t(SimdOp::V128Load64Zero):
+ CHECK_NEXT(emitLoadZero(Scalar::Float64));
+ case uint32_t(SimdOp::V128Store):
+ CHECK_NEXT(emitStore(ValType::V128, Scalar::Simd128));
+ default:
+ break;
+ } // switch (op.b1)
+ return iter_.unrecognizedOpcode(&op);
+ }
+#endif // ENABLE_WASM_SIMD
+
+ // "Miscellaneous" operations
+ case uint16_t(Op::MiscPrefix): {
+ switch (op.b1) {
+ case uint32_t(MiscOp::I32TruncSSatF32):
+ CHECK_NEXT(
+ dispatchConversionOOM(emitTruncateF32ToI32<TRUNC_SATURATING>,
+ ValType::F32, ValType::I32));
+ case uint32_t(MiscOp::I32TruncUSatF32):
+ CHECK_NEXT(dispatchConversionOOM(
+ emitTruncateF32ToI32<TRUNC_UNSIGNED | TRUNC_SATURATING>,
+ ValType::F32, ValType::I32));
+ case uint32_t(MiscOp::I32TruncSSatF64):
+ CHECK_NEXT(
+ dispatchConversionOOM(emitTruncateF64ToI32<TRUNC_SATURATING>,
+ ValType::F64, ValType::I32));
+ case uint32_t(MiscOp::I32TruncUSatF64):
+ CHECK_NEXT(dispatchConversionOOM(
+ emitTruncateF64ToI32<TRUNC_UNSIGNED | TRUNC_SATURATING>,
+ ValType::F64, ValType::I32));
+ case uint32_t(MiscOp::I64TruncSSatF32):
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+ CHECK_NEXT(dispatchCalloutConversionOOM(
+ emitConvertFloatingToInt64Callout,
+ SymbolicAddress::SaturatingTruncateDoubleToInt64, ValType::F32,
+ ValType::I64));
+#else
+ CHECK_NEXT(
+ dispatchConversionOOM(emitTruncateF32ToI64<TRUNC_SATURATING>,
+ ValType::F32, ValType::I64));
+#endif
+ case uint32_t(MiscOp::I64TruncUSatF32):
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+ CHECK_NEXT(dispatchCalloutConversionOOM(
+ emitConvertFloatingToInt64Callout,
+ SymbolicAddress::SaturatingTruncateDoubleToUint64, ValType::F32,
+ ValType::I64));
+#else
+ CHECK_NEXT(dispatchConversionOOM(
+ emitTruncateF32ToI64<TRUNC_UNSIGNED | TRUNC_SATURATING>,
+ ValType::F32, ValType::I64));
+#endif
+ case uint32_t(MiscOp::I64TruncSSatF64):
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+ CHECK_NEXT(dispatchCalloutConversionOOM(
+ emitConvertFloatingToInt64Callout,
+ SymbolicAddress::SaturatingTruncateDoubleToInt64, ValType::F64,
+ ValType::I64));
+#else
+ CHECK_NEXT(
+ dispatchConversionOOM(emitTruncateF64ToI64<TRUNC_SATURATING>,
+ ValType::F64, ValType::I64));
+#endif
+ case uint32_t(MiscOp::I64TruncUSatF64):
+#ifdef RABALDR_FLOAT_TO_I64_CALLOUT
+ CHECK_NEXT(dispatchCalloutConversionOOM(
+ emitConvertFloatingToInt64Callout,
+ SymbolicAddress::SaturatingTruncateDoubleToUint64, ValType::F64,
+ ValType::I64));
+#else
+ CHECK_NEXT(dispatchConversionOOM(
+ emitTruncateF64ToI64<TRUNC_UNSIGNED | TRUNC_SATURATING>,
+ ValType::F64, ValType::I64));
+#endif
+ case uint32_t(MiscOp::MemCopy):
+ CHECK_NEXT(emitMemCopy());
+ case uint32_t(MiscOp::DataDrop):
+ CHECK_NEXT(emitDataOrElemDrop(/*isData=*/true));
+ case uint32_t(MiscOp::MemFill):
+ CHECK_NEXT(emitMemFill());
+ case uint32_t(MiscOp::MemInit):
+ CHECK_NEXT(emitMemOrTableInit(/*isMem=*/true));
+ case uint32_t(MiscOp::TableCopy):
+ CHECK_NEXT(emitTableCopy());
+ case uint32_t(MiscOp::ElemDrop):
+ CHECK_NEXT(emitDataOrElemDrop(/*isData=*/false));
+ case uint32_t(MiscOp::TableInit):
+ CHECK_NEXT(emitMemOrTableInit(/*isMem=*/false));
+#ifdef ENABLE_WASM_REFTYPES
+ case uint32_t(MiscOp::TableFill):
+ CHECK_NEXT(emitTableFill());
+ case uint32_t(MiscOp::TableGrow):
+ CHECK_NEXT(emitTableGrow());
+ case uint32_t(MiscOp::TableSize):
+ CHECK_NEXT(emitTableSize());
+#endif
+ default:
+ break;
+ } // switch (op.b1)
+ return iter_.unrecognizedOpcode(&op);
+ }
+
+ // Thread operations
+ case uint16_t(Op::ThreadPrefix): {
+ if (moduleEnv_.sharedMemoryEnabled() == Shareable::False) {
+ return iter_.unrecognizedOpcode(&op);
+ }
+ switch (op.b1) {
+ case uint32_t(ThreadOp::Wake):
+ CHECK_NEXT(emitWake());
+
+ case uint32_t(ThreadOp::I32Wait):
+ CHECK_NEXT(emitWait(ValType::I32, 4));
+ case uint32_t(ThreadOp::I64Wait):
+ CHECK_NEXT(emitWait(ValType::I64, 8));
+ case uint32_t(ThreadOp::Fence):
+ CHECK_NEXT(emitFence());
+
+ case uint32_t(ThreadOp::I32AtomicLoad):
+ CHECK_NEXT(emitAtomicLoad(ValType::I32, Scalar::Int32));
+ case uint32_t(ThreadOp::I64AtomicLoad):
+ CHECK_NEXT(emitAtomicLoad(ValType::I64, Scalar::Int64));
+ case uint32_t(ThreadOp::I32AtomicLoad8U):
+ CHECK_NEXT(emitAtomicLoad(ValType::I32, Scalar::Uint8));
+ case uint32_t(ThreadOp::I32AtomicLoad16U):
+ CHECK_NEXT(emitAtomicLoad(ValType::I32, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicLoad8U):
+ CHECK_NEXT(emitAtomicLoad(ValType::I64, Scalar::Uint8));
+ case uint32_t(ThreadOp::I64AtomicLoad16U):
+ CHECK_NEXT(emitAtomicLoad(ValType::I64, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicLoad32U):
+ CHECK_NEXT(emitAtomicLoad(ValType::I64, Scalar::Uint32));
+
+ case uint32_t(ThreadOp::I32AtomicStore):
+ CHECK_NEXT(emitAtomicStore(ValType::I32, Scalar::Int32));
+ case uint32_t(ThreadOp::I64AtomicStore):
+ CHECK_NEXT(emitAtomicStore(ValType::I64, Scalar::Int64));
+ case uint32_t(ThreadOp::I32AtomicStore8U):
+ CHECK_NEXT(emitAtomicStore(ValType::I32, Scalar::Uint8));
+ case uint32_t(ThreadOp::I32AtomicStore16U):
+ CHECK_NEXT(emitAtomicStore(ValType::I32, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicStore8U):
+ CHECK_NEXT(emitAtomicStore(ValType::I64, Scalar::Uint8));
+ case uint32_t(ThreadOp::I64AtomicStore16U):
+ CHECK_NEXT(emitAtomicStore(ValType::I64, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicStore32U):
+ CHECK_NEXT(emitAtomicStore(ValType::I64, Scalar::Uint32));
+
+ case uint32_t(ThreadOp::I32AtomicAdd):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I64AtomicAdd):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I32AtomicAdd8U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I32AtomicAdd16U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I64AtomicAdd8U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I64AtomicAdd16U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I64AtomicAdd32U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchAddOp));
+
+ case uint32_t(ThreadOp::I32AtomicSub):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I64AtomicSub):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I32AtomicSub8U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I32AtomicSub16U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I64AtomicSub8U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I64AtomicSub16U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I64AtomicSub32U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchSubOp));
+
+ case uint32_t(ThreadOp::I32AtomicAnd):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I64AtomicAnd):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I32AtomicAnd8U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I32AtomicAnd16U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I64AtomicAnd8U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I64AtomicAnd16U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I64AtomicAnd32U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchAndOp));
+
+ case uint32_t(ThreadOp::I32AtomicOr):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I64AtomicOr):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I32AtomicOr8U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I32AtomicOr16U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I64AtomicOr8U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I64AtomicOr16U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I64AtomicOr32U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchOrOp));
+
+ case uint32_t(ThreadOp::I32AtomicXor):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I64AtomicXor):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I32AtomicXor8U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I32AtomicXor16U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I64AtomicXor8U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I64AtomicXor16U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I64AtomicXor32U):
+ CHECK_NEXT(
+ emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchXorOp));
+
+ case uint32_t(ThreadOp::I32AtomicXchg):
+ CHECK_NEXT(emitAtomicXchg(ValType::I32, Scalar::Int32));
+ case uint32_t(ThreadOp::I64AtomicXchg):
+ CHECK_NEXT(emitAtomicXchg(ValType::I64, Scalar::Int64));
+ case uint32_t(ThreadOp::I32AtomicXchg8U):
+ CHECK_NEXT(emitAtomicXchg(ValType::I32, Scalar::Uint8));
+ case uint32_t(ThreadOp::I32AtomicXchg16U):
+ CHECK_NEXT(emitAtomicXchg(ValType::I32, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicXchg8U):
+ CHECK_NEXT(emitAtomicXchg(ValType::I64, Scalar::Uint8));
+ case uint32_t(ThreadOp::I64AtomicXchg16U):
+ CHECK_NEXT(emitAtomicXchg(ValType::I64, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicXchg32U):
+ CHECK_NEXT(emitAtomicXchg(ValType::I64, Scalar::Uint32));
+
+ case uint32_t(ThreadOp::I32AtomicCmpXchg):
+ CHECK_NEXT(emitAtomicCmpXchg(ValType::I32, Scalar::Int32));
+ case uint32_t(ThreadOp::I64AtomicCmpXchg):
+ CHECK_NEXT(emitAtomicCmpXchg(ValType::I64, Scalar::Int64));
+ case uint32_t(ThreadOp::I32AtomicCmpXchg8U):
+ CHECK_NEXT(emitAtomicCmpXchg(ValType::I32, Scalar::Uint8));
+ case uint32_t(ThreadOp::I32AtomicCmpXchg16U):
+ CHECK_NEXT(emitAtomicCmpXchg(ValType::I32, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicCmpXchg8U):
+ CHECK_NEXT(emitAtomicCmpXchg(ValType::I64, Scalar::Uint8));
+ case uint32_t(ThreadOp::I64AtomicCmpXchg16U):
+ CHECK_NEXT(emitAtomicCmpXchg(ValType::I64, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicCmpXchg32U):
+ CHECK_NEXT(emitAtomicCmpXchg(ValType::I64, Scalar::Uint32));
+
+ default:
+ return iter_.unrecognizedOpcode(&op);
+ }
+ break;
+ }
+
+ // asm.js and other private operations
+ case uint16_t(Op::MozPrefix):
+ return iter_.unrecognizedOpcode(&op);
+
+ default:
+ return iter_.unrecognizedOpcode(&op);
+ }
+
+#undef CHECK
+#undef NEXT
+#undef CHECK_NEXT
+#undef CHECK_POINTER_COUNT
+#undef CHECK_SIMD_EXPERIMENTAL
+#undef dispatchBinary
+#undef dispatchUnary
+#undef dispatchComparison
+#undef dispatchConversion
+#undef dispatchConversionOOM
+#undef dispatchCalloutConversionOOM
+#undef dispatchIntDivCallout
+#undef dispatchVectorBinary
+#undef dispatchVectorUnary
+#undef dispatchVectorComparison
+#undef dispatchExtractLane
+#undef dispatchReplaceLane
+#undef dispatchSplat
+#undef dispatchVectorReduction
+
+ MOZ_CRASH("unreachable");
+ }
+
+ MOZ_CRASH("unreachable");
+}
+
+bool BaseCompiler::emitFunction() {
+ if (!beginFunction()) {
+ return false;
+ }
+
+ if (!emitBody()) {
+ return false;
+ }
+
+ if (!endFunction()) {
+ return false;
+ }
+
+ return true;
+}
+
+BaseCompiler::BaseCompiler(const ModuleEnvironment& moduleEnv,
+ const CompilerEnvironment& compilerEnv,
+ const FuncCompileInput& func,
+ const ValTypeVector& locals,
+ const MachineState& trapExitLayout,
+ size_t trapExitLayoutNumWords, Decoder& decoder,
+ StkVector& stkSource, TempAllocator* alloc,
+ MacroAssembler* masm, StackMaps* stackMaps)
+ : moduleEnv_(moduleEnv),
+ compilerEnv_(compilerEnv),
+ iter_(moduleEnv, decoder),
+ func_(func),
+ lastReadCallSite_(0),
+ alloc_(alloc->fallible()),
+ locals_(locals),
+ deadCode_(false),
+ bceSafe_(0),
+ latentOp_(LatentOp::None),
+ latentType_(ValType::I32),
+ latentIntCmp_(Assembler::Equal),
+ latentDoubleCmp_(Assembler::DoubleEqual),
+ masm(*masm),
+ fr(*masm),
+ stackMapGenerator_(stackMaps, trapExitLayout, trapExitLayoutNumWords,
+ *masm),
+ stkSource_(stkSource) {
+ // Our caller, BaselineCompileFunctions, will lend us the vector contents to
+ // use for the eval stack. To get hold of those contents, we'll temporarily
+ // installing an empty one in its place.
+ MOZ_ASSERT(stk_.empty());
+ stk_.swap(stkSource_);
+
+ // Assuming that previously processed wasm functions are well formed, the
+ // eval stack should now be empty. But empty it anyway; any non-emptyness
+ // at this point will cause chaos.
+ stk_.clear();
+}
+
+BaseCompiler::~BaseCompiler() {
+ stk_.swap(stkSource_);
+ // We've returned the eval stack vector contents to our caller,
+ // BaselineCompileFunctions. We expect the vector we get in return to be
+ // empty since that's what we swapped for the stack vector in our
+ // constructor.
+ MOZ_ASSERT(stk_.empty());
+}
+
+bool BaseCompiler::init() {
+ ra.init(this);
+
+ if (!SigD_.append(ValType::F64)) {
+ return false;
+ }
+ if (!SigF_.append(ValType::F32)) {
+ return false;
+ }
+
+ ArgTypeVector args(funcType());
+ if (!fr.setupLocals(locals_, args, compilerEnv_.debugEnabled(),
+ &localInfo_)) {
+ return false;
+ }
+
+ return true;
+}
+
+FuncOffsets BaseCompiler::finish() {
+ MOZ_ASSERT(done(), "all bytes must be consumed");
+ MOZ_ASSERT(func_.callSiteLineNums.length() == lastReadCallSite_);
+
+ MOZ_ASSERT(stk_.empty());
+ MOZ_ASSERT(stackMapGenerator_.memRefsOnStk == 0);
+
+ masm.flushBuffer();
+
+ return offsets_;
+}
+
+} // namespace wasm
+} // namespace js
+
+bool js::wasm::BaselinePlatformSupport() {
+#if defined(JS_CODEGEN_ARM)
+ // Simplifying assumption: require SDIV and UDIV.
+ //
+ // I have no good data on ARM populations allowing me to say that
+ // X% of devices in the market implement SDIV and UDIV. However,
+ // they are definitely implemented on the Cortex-A7 and Cortex-A15
+ // and on all ARMv8 systems.
+ if (!HasIDIV()) {
+ return false;
+ }
+#endif
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || \
+ defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool js::wasm::BaselineCompileFunctions(const ModuleEnvironment& moduleEnv,
+ const CompilerEnvironment& compilerEnv,
+ LifoAlloc& lifo,
+ const FuncCompileInputVector& inputs,
+ CompiledCode* code,
+ UniqueChars* error) {
+ MOZ_ASSERT(compilerEnv.tier() == Tier::Baseline);
+ MOZ_ASSERT(moduleEnv.kind == ModuleKind::Wasm);
+
+ // The MacroAssembler will sometimes access the jitContext.
+
+ TempAllocator alloc(&lifo);
+ JitContext jitContext(&alloc);
+ MOZ_ASSERT(IsCompilingWasm());
+ WasmMacroAssembler masm(alloc, moduleEnv);
+
+ // Swap in already-allocated empty vectors to avoid malloc/free.
+ MOZ_ASSERT(code->empty());
+ if (!code->swap(masm)) {
+ return false;
+ }
+
+ // Create a description of the stack layout created by GenerateTrapExit().
+ MachineState trapExitLayout;
+ size_t trapExitLayoutNumWords;
+ GenerateTrapExitMachineState(&trapExitLayout, &trapExitLayoutNumWords);
+
+ // The compiler's operand stack. We reuse it across all functions so as to
+ // avoid malloc/free. Presize it to 128 elements in the hope of avoiding
+ // reallocation later.
+ StkVector stk;
+ if (!stk.reserve(128)) {
+ return false;
+ }
+
+ for (const FuncCompileInput& func : inputs) {
+ Decoder d(func.begin, func.end, func.lineOrBytecode, error);
+
+ // Build the local types vector.
+
+ ValTypeVector locals;
+ if (!locals.appendAll(moduleEnv.funcs[func.index].type->args())) {
+ return false;
+ }
+ if (!DecodeLocalEntries(d, moduleEnv.types, moduleEnv.features, &locals)) {
+ return false;
+ }
+
+ // One-pass baseline compilation.
+
+ BaseCompiler f(moduleEnv, compilerEnv, func, locals, trapExitLayout,
+ trapExitLayoutNumWords, d, stk, &alloc, &masm,
+ &code->stackMaps);
+ if (!f.init()) {
+ return false;
+ }
+ if (!f.emitFunction()) {
+ return false;
+ }
+ if (!code->codeRanges.emplaceBack(func.index, func.lineOrBytecode,
+ f.finish())) {
+ return false;
+ }
+ }
+
+ masm.finish();
+ if (masm.oom()) {
+ return false;
+ }
+
+ return code->swap(masm);
+}
+
+#ifdef DEBUG
+bool js::wasm::IsValidStackMapKey(bool debugEnabled, const uint8_t* nextPC) {
+# if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+ const uint8_t* insn = nextPC;
+ return (insn[-2] == 0x0F && insn[-1] == 0x0B) || // ud2
+ (insn[-2] == 0xFF && (insn[-1] & 0xF8) == 0xD0) || // call *%r_
+ insn[-5] == 0xE8 || // call simm32
+ (debugEnabled && insn[-5] == 0x0F && insn[-4] == 0x1F &&
+ insn[-3] == 0x44 && insn[-2] == 0x00 &&
+ insn[-1] == 0x00); // nop_five
+
+# elif defined(JS_CODEGEN_ARM)
+ const uint32_t* insn = (const uint32_t*)nextPC;
+ return ((uintptr_t(insn) & 3) == 0) && // must be ARM, not Thumb
+ (insn[-1] == 0xe7f000f0 || // udf
+ (insn[-1] & 0xfffffff0) == 0xe12fff30 || // blx reg (ARM, enc A1)
+ (insn[-1] & 0xff000000) == 0xeb000000 || // bl simm24 (ARM, enc A1)
+ (debugEnabled && insn[-1] == 0xe320f000)); // "as_nop"
+
+# elif defined(JS_CODEGEN_ARM64)
+ const uint32_t hltInsn = 0xd4a00000;
+ const uint32_t* insn = (const uint32_t*)nextPC;
+ return ((uintptr_t(insn) & 3) == 0) &&
+ (insn[-1] == hltInsn || // hlt
+ (insn[-1] & 0xfffffc1f) == 0xd63f0000 || // blr reg
+ (insn[-1] & 0xfc000000) == 0x94000000 || // bl simm26
+ (debugEnabled && insn[-1] == 0xd503201f)); // nop
+
+# else
+ MOZ_CRASH("IsValidStackMapKey: requires implementation on this platform");
+# endif
+}
+#endif
+
+#undef RABALDR_INT_DIV_I64_CALLOUT
+#undef RABALDR_I64_TO_FLOAT_CALLOUT
+#undef RABALDR_FLOAT_TO_I64_CALLOUT
diff --git a/js/src/wasm/WasmBaselineCompile.h b/js/src/wasm/WasmBaselineCompile.h
new file mode 100644
index 0000000000..13c205e247
--- /dev/null
+++ b/js/src/wasm/WasmBaselineCompile.h
@@ -0,0 +1,103 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef asmjs_wasm_baseline_compile_h
+#define asmjs_wasm_baseline_compile_h
+
+#include "wasm/WasmGenerator.h"
+
+namespace js {
+namespace wasm {
+
+// Return whether BaselineCompileFunction can generate code on the current
+// device. Usually you do *not* want to call this, you want
+// BaselineAvailable().
+[[nodiscard]] bool BaselinePlatformSupport();
+
+// Generate adequate code quickly.
+[[nodiscard]] bool BaselineCompileFunctions(
+ const ModuleEnvironment& moduleEnv, const CompilerEnvironment& compilerEnv,
+ LifoAlloc& lifo, const FuncCompileInputVector& inputs, CompiledCode* code,
+ UniqueChars* error);
+
+class BaseLocalIter {
+ private:
+ using ConstValTypeRange = mozilla::Range<const ValType>;
+
+ const ValTypeVector& locals_;
+ const ArgTypeVector& args_;
+ jit::WasmABIArgIter<ArgTypeVector> argsIter_;
+ size_t index_;
+ int32_t frameSize_;
+ int32_t nextFrameSize_;
+ int32_t frameOffset_;
+ int32_t stackResultPointerOffset_;
+ jit::MIRType mirType_;
+ bool done_;
+
+ void settle();
+ int32_t pushLocal(size_t nbytes);
+
+ public:
+ BaseLocalIter(const ValTypeVector& locals, const ArgTypeVector& args,
+ bool debugEnabled);
+ void operator++(int);
+ bool done() const { return done_; }
+
+ jit::MIRType mirType() const {
+ MOZ_ASSERT(!done_);
+ return mirType_;
+ }
+ int32_t frameOffset() const {
+ MOZ_ASSERT(!done_);
+ MOZ_ASSERT(frameOffset_ != INT32_MAX);
+ return frameOffset_;
+ }
+ size_t index() const {
+ MOZ_ASSERT(!done_);
+ return index_;
+ }
+ // The size in bytes taken up by the previous `index_` locals, also including
+ // fixed allocations like the DebugFrame and "hidden" locals like a spilled
+ // stack results pointer.
+ int32_t frameSize() const { return frameSize_; }
+
+ int32_t stackResultPointerOffset() const {
+ MOZ_ASSERT(args_.hasSyntheticStackResultPointerArg());
+ MOZ_ASSERT(stackResultPointerOffset_ != INT32_MAX);
+ return stackResultPointerOffset_;
+ }
+
+#ifdef DEBUG
+ bool isArg() const {
+ MOZ_ASSERT(!done_);
+ return !argsIter_.done();
+ }
+#endif
+};
+
+#ifdef DEBUG
+// Check whether |nextPC| is a valid code address for a stackmap created by
+// this compiler.
+bool IsValidStackMapKey(bool debugEnabled, const uint8_t* nextPC);
+#endif
+
+} // namespace wasm
+} // namespace js
+
+#endif // asmjs_wasm_baseline_compile_h
diff --git a/js/src/wasm/WasmBuiltins.cpp b/js/src/wasm/WasmBuiltins.cpp
new file mode 100644
index 0000000000..71aec1d955
--- /dev/null
+++ b/js/src/wasm/WasmBuiltins.cpp
@@ -0,0 +1,1576 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2017 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmBuiltins.h"
+
+#include "mozilla/Atomics.h"
+
+#include "fdlibm.h"
+#include "jslibmath.h"
+#include "jsmath.h"
+
+#include "gc/Allocator.h"
+#include "jit/AtomicOperations.h"
+#include "jit/InlinableNatives.h"
+#include "jit/MacroAssembler.h"
+#include "jit/Simulator.h"
+#include "js/experimental/JitInfo.h" // JSJitInfo
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/friend/StackLimits.h" // js::CheckRecursionLimit
+#include "threading/Mutex.h"
+#include "util/Memory.h"
+#include "util/Poison.h"
+#include "vm/BigIntType.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmStubs.h"
+#include "wasm/WasmTypes.h"
+
+#include "debugger/DebugAPI-inl.h"
+#include "vm/Stack-inl.h"
+
+using namespace js;
+using namespace jit;
+using namespace wasm;
+
+using mozilla::HashGeneric;
+using mozilla::IsNaN;
+using mozilla::MakeEnumeratedRange;
+
+static const unsigned BUILTIN_THUNK_LIFO_SIZE = 64 * 1024;
+
+// ============================================================================
+// WebAssembly builtin C++ functions called from wasm code to implement internal
+// wasm operations: type descriptions.
+
+// Some abbreviations, for the sake of conciseness.
+#define _F64 MIRType::Double
+#define _F32 MIRType::Float32
+#define _I32 MIRType::Int32
+#define _I64 MIRType::Int64
+#define _PTR MIRType::Pointer
+#define _RoN MIRType::RefOrNull
+#define _VOID MIRType::None
+#define _END MIRType::None
+#define _Infallible FailureMode::Infallible
+#define _FailOnNegI32 FailureMode::FailOnNegI32
+#define _FailOnNullPtr FailureMode::FailOnNullPtr
+#define _FailOnInvalidRef FailureMode::FailOnInvalidRef
+
+namespace js {
+namespace wasm {
+
+const SymbolicAddressSignature SASigSinD = {
+ SymbolicAddress::SinD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigCosD = {
+ SymbolicAddress::CosD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigTanD = {
+ SymbolicAddress::TanD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigASinD = {
+ SymbolicAddress::ASinD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigACosD = {
+ SymbolicAddress::ACosD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigATanD = {
+ SymbolicAddress::ATanD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigCeilD = {
+ SymbolicAddress::CeilD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigCeilF = {
+ SymbolicAddress::CeilF, _F32, _Infallible, 1, {_F32, _END}};
+const SymbolicAddressSignature SASigFloorD = {
+ SymbolicAddress::FloorD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigFloorF = {
+ SymbolicAddress::FloorF, _F32, _Infallible, 1, {_F32, _END}};
+const SymbolicAddressSignature SASigTruncD = {
+ SymbolicAddress::TruncD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigTruncF = {
+ SymbolicAddress::TruncF, _F32, _Infallible, 1, {_F32, _END}};
+const SymbolicAddressSignature SASigNearbyIntD = {
+ SymbolicAddress::NearbyIntD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigNearbyIntF = {
+ SymbolicAddress::NearbyIntF, _F32, _Infallible, 1, {_F32, _END}};
+const SymbolicAddressSignature SASigExpD = {
+ SymbolicAddress::ExpD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigLogD = {
+ SymbolicAddress::LogD, _F64, _Infallible, 1, {_F64, _END}};
+const SymbolicAddressSignature SASigPowD = {
+ SymbolicAddress::PowD, _F64, _Infallible, 2, {_F64, _F64, _END}};
+const SymbolicAddressSignature SASigATan2D = {
+ SymbolicAddress::ATan2D, _F64, _Infallible, 2, {_F64, _F64, _END}};
+const SymbolicAddressSignature SASigMemoryGrow = {
+ SymbolicAddress::MemoryGrow, _I32, _Infallible, 2, {_PTR, _I32, _END}};
+const SymbolicAddressSignature SASigMemorySize = {
+ SymbolicAddress::MemorySize, _I32, _Infallible, 1, {_PTR, _END}};
+const SymbolicAddressSignature SASigWaitI32 = {SymbolicAddress::WaitI32,
+ _I32,
+ _FailOnNegI32,
+ 4,
+ {_PTR, _I32, _I32, _I64, _END}};
+const SymbolicAddressSignature SASigWaitI64 = {SymbolicAddress::WaitI64,
+ _I32,
+ _FailOnNegI32,
+ 4,
+ {_PTR, _I32, _I64, _I64, _END}};
+const SymbolicAddressSignature SASigWake = {
+ SymbolicAddress::Wake, _I32, _FailOnNegI32, 3, {_PTR, _I32, _I32, _END}};
+const SymbolicAddressSignature SASigMemCopy = {
+ SymbolicAddress::MemCopy,
+ _VOID,
+ _FailOnNegI32,
+ 5,
+ {_PTR, _I32, _I32, _I32, _PTR, _END}};
+const SymbolicAddressSignature SASigMemCopyShared = {
+ SymbolicAddress::MemCopyShared,
+ _VOID,
+ _FailOnNegI32,
+ 5,
+ {_PTR, _I32, _I32, _I32, _PTR, _END}};
+const SymbolicAddressSignature SASigDataDrop = {
+ SymbolicAddress::DataDrop, _VOID, _FailOnNegI32, 2, {_PTR, _I32, _END}};
+const SymbolicAddressSignature SASigMemFill = {
+ SymbolicAddress::MemFill,
+ _VOID,
+ _FailOnNegI32,
+ 5,
+ {_PTR, _I32, _I32, _I32, _PTR, _END}};
+const SymbolicAddressSignature SASigMemFillShared = {
+ SymbolicAddress::MemFillShared,
+ _VOID,
+ _FailOnNegI32,
+ 5,
+ {_PTR, _I32, _I32, _I32, _PTR, _END}};
+const SymbolicAddressSignature SASigMemInit = {
+ SymbolicAddress::MemInit,
+ _VOID,
+ _FailOnNegI32,
+ 5,
+ {_PTR, _I32, _I32, _I32, _I32, _END}};
+const SymbolicAddressSignature SASigTableCopy = {
+ SymbolicAddress::TableCopy,
+ _VOID,
+ _FailOnNegI32,
+ 6,
+ {_PTR, _I32, _I32, _I32, _I32, _I32, _END}};
+const SymbolicAddressSignature SASigElemDrop = {
+ SymbolicAddress::ElemDrop, _VOID, _FailOnNegI32, 2, {_PTR, _I32, _END}};
+const SymbolicAddressSignature SASigTableFill = {
+ SymbolicAddress::TableFill,
+ _VOID,
+ _FailOnNegI32,
+ 5,
+ {_PTR, _I32, _RoN, _I32, _I32, _END}};
+const SymbolicAddressSignature SASigTableGet = {SymbolicAddress::TableGet,
+ _RoN,
+ _FailOnInvalidRef,
+ 3,
+ {_PTR, _I32, _I32, _END}};
+const SymbolicAddressSignature SASigTableGrow = {
+ SymbolicAddress::TableGrow,
+ _I32,
+ _Infallible,
+ 4,
+ {_PTR, _RoN, _I32, _I32, _END}};
+const SymbolicAddressSignature SASigTableInit = {
+ SymbolicAddress::TableInit,
+ _VOID,
+ _FailOnNegI32,
+ 6,
+ {_PTR, _I32, _I32, _I32, _I32, _I32, _END}};
+const SymbolicAddressSignature SASigTableSet = {SymbolicAddress::TableSet,
+ _VOID,
+ _FailOnNegI32,
+ 4,
+ {_PTR, _I32, _RoN, _I32, _END}};
+const SymbolicAddressSignature SASigTableSize = {
+ SymbolicAddress::TableSize, _I32, _Infallible, 2, {_PTR, _I32, _END}};
+const SymbolicAddressSignature SASigRefFunc = {
+ SymbolicAddress::RefFunc, _RoN, _FailOnInvalidRef, 2, {_PTR, _I32, _END}};
+const SymbolicAddressSignature SASigPreBarrierFiltering = {
+ SymbolicAddress::PreBarrierFiltering,
+ _VOID,
+ _Infallible,
+ 2,
+ {_PTR, _PTR, _END}};
+const SymbolicAddressSignature SASigPostBarrier = {
+ SymbolicAddress::PostBarrier, _VOID, _Infallible, 2, {_PTR, _PTR, _END}};
+const SymbolicAddressSignature SASigPostBarrierFiltering = {
+ SymbolicAddress::PostBarrierFiltering,
+ _VOID,
+ _Infallible,
+ 2,
+ {_PTR, _PTR, _END}};
+const SymbolicAddressSignature SASigStructNew = {
+ SymbolicAddress::StructNew, _RoN, _FailOnNullPtr, 2, {_PTR, _RoN, _END}};
+const SymbolicAddressSignature SASigStructNarrow = {
+ SymbolicAddress::StructNarrow,
+ _RoN,
+ _Infallible,
+ 3,
+ {_PTR, _RoN, _RoN, _END}};
+
+} // namespace wasm
+} // namespace js
+
+#undef _F64
+#undef _F32
+#undef _I32
+#undef _I64
+#undef _PTR
+#undef _RoN
+#undef _VOID
+#undef _END
+#undef _Infallible
+#undef _FailOnNegI32
+#undef _FailOnNullPtr
+
+#ifdef DEBUG
+ABIArgType ToABIType(FailureMode mode) {
+ switch (mode) {
+ case FailureMode::FailOnNegI32:
+ return ArgType_Int32;
+ case FailureMode::FailOnNullPtr:
+ case FailureMode::FailOnInvalidRef:
+ return ArgType_General;
+ default:
+ MOZ_CRASH("unexpected failure mode");
+ }
+}
+
+ABIArgType ToABIType(MIRType type) {
+ switch (type) {
+ case MIRType::None:
+ case MIRType::Int32:
+ return ArgType_Int32;
+ case MIRType::Int64:
+ return ArgType_Int64;
+ case MIRType::Pointer:
+ case MIRType::RefOrNull:
+ return ArgType_General;
+ case MIRType::Float32:
+ return ArgType_Float32;
+ case MIRType::Double:
+ return ArgType_Float64;
+ default:
+ MOZ_CRASH("unexpected type");
+ }
+}
+
+ABIFunctionType ToABIType(const SymbolicAddressSignature& sig) {
+ MOZ_ASSERT_IF(sig.failureMode != FailureMode::Infallible,
+ ToABIType(sig.failureMode) == ToABIType(sig.retType));
+ int abiType = ToABIType(sig.retType) << RetType_Shift;
+ for (int i = 0; i < sig.numArgs; i++) {
+ abiType |= (ToABIType(sig.argTypes[i]) << (ArgType_Shift * (i + 1)));
+ }
+ return ABIFunctionType(abiType);
+}
+#endif
+
+// ============================================================================
+// WebAssembly builtin C++ functions called from wasm code to implement internal
+// wasm operations: implementations.
+
+#if defined(JS_CODEGEN_ARM)
+extern "C" {
+
+extern MOZ_EXPORT int64_t __aeabi_idivmod(int, int);
+
+extern MOZ_EXPORT int64_t __aeabi_uidivmod(int, int);
+}
+#endif
+
+// This utility function can only be called for builtins that are called
+// directly from wasm code.
+static JitActivation* CallingActivation() {
+ Activation* act = TlsContext.get()->activation();
+ MOZ_ASSERT(act->asJit()->hasWasmExitFP());
+ return act->asJit();
+}
+
+static bool WasmHandleDebugTrap() {
+ JitActivation* activation = CallingActivation();
+ JSContext* cx = activation->cx();
+ Frame* fp = activation->wasmExitFP();
+ Instance* instance = GetNearestEffectiveTls(fp)->instance;
+ const Code& code = instance->code();
+ MOZ_ASSERT(code.metadata().debugEnabled);
+
+ // The debug trap stub is the innermost frame. It's return address is the
+ // actual trap site.
+ const CallSite* site = code.lookupCallSite(fp->returnAddress());
+ MOZ_ASSERT(site);
+
+ // Advance to the actual trapping frame.
+ fp = fp->wasmCaller();
+ DebugFrame* debugFrame = DebugFrame::from(fp);
+
+ if (site->kind() == CallSite::EnterFrame) {
+ if (!instance->debug().enterFrameTrapsEnabled()) {
+ return true;
+ }
+ debugFrame->setIsDebuggee();
+ debugFrame->observe(cx);
+ if (!DebugAPI::onEnterFrame(cx, debugFrame)) {
+ if (cx->isPropagatingForcedReturn()) {
+ cx->clearPropagatingForcedReturn();
+ // Ignoring forced return because changing code execution order is
+ // not yet implemented in the wasm baseline.
+ // TODO properly handle forced return and resume wasm execution.
+ JS_ReportErrorASCII(cx,
+ "Unexpected resumption value from onEnterFrame");
+ }
+ return false;
+ }
+ return true;
+ }
+ if (site->kind() == CallSite::LeaveFrame) {
+ if (!debugFrame->updateReturnJSValue(cx)) {
+ return false;
+ }
+ bool ok = DebugAPI::onLeaveFrame(cx, debugFrame, nullptr, true);
+ debugFrame->leave(cx);
+ return ok;
+ }
+
+ DebugState& debug = instance->debug();
+ MOZ_ASSERT(debug.hasBreakpointTrapAtOffset(site->lineOrBytecode()));
+ if (debug.stepModeEnabled(debugFrame->funcIndex())) {
+ if (!DebugAPI::onSingleStep(cx)) {
+ if (cx->isPropagatingForcedReturn()) {
+ cx->clearPropagatingForcedReturn();
+ // TODO properly handle forced return.
+ JS_ReportErrorASCII(cx,
+ "Unexpected resumption value from onSingleStep");
+ }
+ return false;
+ }
+ }
+ if (debug.hasBreakpointSite(site->lineOrBytecode())) {
+ if (!DebugAPI::onTrap(cx)) {
+ if (cx->isPropagatingForcedReturn()) {
+ cx->clearPropagatingForcedReturn();
+ // TODO properly handle forced return.
+ JS_ReportErrorASCII(
+ cx, "Unexpected resumption value from breakpoint handler");
+ }
+ return false;
+ }
+ }
+ return true;
+}
+
+// Unwind the entire activation in response to a thrown exception. This function
+// is responsible for notifying the debugger of each unwound frame. The return
+// value is the new stack address which the calling stub will set to the sp
+// register before executing a return instruction.
+
+void* wasm::HandleThrow(JSContext* cx, WasmFrameIter& iter) {
+ // WasmFrameIter iterates down wasm frames in the activation starting at
+ // JitActivation::wasmExitFP(). Calling WasmFrameIter::startUnwinding pops
+ // JitActivation::wasmExitFP() once each time WasmFrameIter is incremented,
+ // ultimately leaving exit FP null when the WasmFrameIter is done(). This
+ // is necessary to prevent a DebugFrame from being observed again after we
+ // just called onLeaveFrame (which would lead to the frame being re-added
+ // to the map of live frames, right as it becomes trash).
+
+ MOZ_ASSERT(CallingActivation() == iter.activation());
+ MOZ_ASSERT(!iter.done());
+ iter.setUnwind(WasmFrameIter::Unwind::True);
+
+ // Live wasm code on the stack is kept alive (in TraceJitActivation) by
+ // marking the instance of every wasm::Frame found by WasmFrameIter.
+ // However, as explained above, we're popping frames while iterating which
+ // means that a GC during this loop could collect the code of frames whose
+ // code is still on the stack. This is actually mostly fine: as soon as we
+ // return to the throw stub, the entire stack will be popped as a whole,
+ // returning to the C++ caller. However, we must keep the throw stub alive
+ // itself which is owned by the innermost instance.
+ RootedWasmInstanceObject keepAlive(cx, iter.instance()->object());
+
+ for (; !iter.done(); ++iter) {
+ // Wasm code can enter same-compartment realms, so reset cx->realm to
+ // this frame's realm.
+ cx->setRealmForJitExceptionHandler(iter.instance()->realm());
+
+ if (!iter.debugEnabled()) {
+ continue;
+ }
+
+ DebugFrame* frame = iter.debugFrame();
+ frame->clearReturnJSValue();
+
+ // Assume ResumeMode::Terminate if no exception is pending --
+ // no onExceptionUnwind handlers must be fired.
+ if (cx->isExceptionPending()) {
+ if (!DebugAPI::onExceptionUnwind(cx, frame)) {
+ if (cx->isPropagatingForcedReturn()) {
+ cx->clearPropagatingForcedReturn();
+ // Unexpected trap return -- raising error since throw recovery
+ // is not yet implemented in the wasm baseline.
+ // TODO properly handle forced return and resume wasm execution.
+ JS_ReportErrorASCII(
+ cx, "Unexpected resumption value from onExceptionUnwind");
+ }
+ }
+ }
+
+ bool ok = DebugAPI::onLeaveFrame(cx, frame, nullptr, false);
+ if (ok) {
+ // Unexpected success from the handler onLeaveFrame -- raising error
+ // since throw recovery is not yet implemented in the wasm baseline.
+ // TODO properly handle success and resume wasm execution.
+ JS_ReportErrorASCII(cx, "Unexpected success from onLeaveFrame");
+ }
+ frame->leave(cx);
+ }
+
+ MOZ_ASSERT(!cx->activation()->asJit()->isWasmTrapping(),
+ "unwinding clears the trapping state");
+
+ return iter.unwoundAddressOfReturnAddress();
+}
+
+static void* WasmHandleThrow() {
+ JitActivation* activation = CallingActivation();
+ JSContext* cx = activation->cx();
+ WasmFrameIter iter(activation);
+ return HandleThrow(cx, iter);
+}
+
+// Unconditionally returns nullptr per calling convention of HandleTrap().
+static void* ReportError(JSContext* cx, unsigned errorNumber) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, errorNumber);
+ return nullptr;
+};
+
+// Has the same return-value convention as HandleTrap().
+static void* CheckInterrupt(JSContext* cx, JitActivation* activation) {
+ ResetInterruptState(cx);
+
+ if (!CheckForInterrupt(cx)) {
+ return nullptr;
+ }
+
+ void* resumePC = activation->wasmTrapData().resumePC;
+ activation->finishWasmTrap();
+ return resumePC;
+}
+
+// The calling convention between this function and its caller in the stub
+// generated by GenerateTrapExit() is:
+// - return nullptr if the stub should jump to the throw stub to unwind
+// the activation;
+// - return the (non-null) resumePC that should be jumped if execution should
+// resume after the trap.
+static void* WasmHandleTrap() {
+ JitActivation* activation = CallingActivation();
+ JSContext* cx = activation->cx();
+
+ switch (activation->wasmTrapData().trap) {
+ case Trap::Unreachable:
+ return ReportError(cx, JSMSG_WASM_UNREACHABLE);
+ case Trap::IntegerOverflow:
+ return ReportError(cx, JSMSG_WASM_INTEGER_OVERFLOW);
+ case Trap::InvalidConversionToInteger:
+ return ReportError(cx, JSMSG_WASM_INVALID_CONVERSION);
+ case Trap::IntegerDivideByZero:
+ return ReportError(cx, JSMSG_WASM_INT_DIVIDE_BY_ZERO);
+ case Trap::IndirectCallToNull:
+ return ReportError(cx, JSMSG_WASM_IND_CALL_TO_NULL);
+ case Trap::IndirectCallBadSig:
+ return ReportError(cx, JSMSG_WASM_IND_CALL_BAD_SIG);
+ case Trap::NullPointerDereference:
+ return ReportError(cx, JSMSG_WASM_DEREF_NULL);
+ case Trap::OutOfBounds:
+ return ReportError(cx, JSMSG_WASM_OUT_OF_BOUNDS);
+ case Trap::UnalignedAccess:
+ return ReportError(cx, JSMSG_WASM_UNALIGNED_ACCESS);
+ case Trap::CheckInterrupt:
+ return CheckInterrupt(cx, activation);
+ case Trap::StackOverflow:
+ // TlsData::setInterrupt() causes a fake stack overflow. Since
+ // TlsData::setInterrupt() is called racily, it's possible for a real
+ // stack overflow to trap, followed by a racy call to setInterrupt().
+ // Thus, we must check for a real stack overflow first before we
+ // CheckInterrupt() and possibly resume execution.
+ if (!CheckRecursionLimit(cx)) {
+ return nullptr;
+ }
+ if (activation->wasmExitTls()->isInterrupted()) {
+ return CheckInterrupt(cx, activation);
+ }
+ return ReportError(cx, JSMSG_OVER_RECURSED);
+ case Trap::ThrowReported:
+ // Error was already reported under another name.
+ return nullptr;
+ case Trap::Limit:
+ break;
+ }
+
+ MOZ_CRASH("unexpected trap");
+}
+
+static void WasmReportV128JSCall() {
+ JSContext* cx = TlsContext.get();
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_VAL_TYPE);
+}
+
+static int32_t CoerceInPlace_ToInt32(Value* rawVal) {
+ JSContext* cx = TlsContext.get();
+
+ int32_t i32;
+ RootedValue val(cx, *rawVal);
+ if (!ToInt32(cx, val, &i32)) {
+ *rawVal = PoisonedObjectValue(0x42);
+ return false;
+ }
+
+ *rawVal = Int32Value(i32);
+ return true;
+}
+
+static int32_t CoerceInPlace_ToBigInt(Value* rawVal) {
+ JSContext* cx = TlsContext.get();
+
+ RootedValue val(cx, *rawVal);
+ BigInt* bi = ToBigInt(cx, val);
+ if (!bi) {
+ *rawVal = PoisonedObjectValue(0x43);
+ return false;
+ }
+
+ *rawVal = BigIntValue(bi);
+ return true;
+}
+
+static int32_t CoerceInPlace_ToNumber(Value* rawVal) {
+ JSContext* cx = TlsContext.get();
+
+ double dbl;
+ RootedValue val(cx, *rawVal);
+ if (!ToNumber(cx, val, &dbl)) {
+ *rawVal = PoisonedObjectValue(0x42);
+ return false;
+ }
+
+ *rawVal = DoubleValue(dbl);
+ return true;
+}
+
+static void* BoxValue_Anyref(Value* rawVal) {
+ JSContext* cx = TlsContext.get();
+ RootedValue val(cx, *rawVal);
+ RootedAnyRef result(cx, AnyRef::null());
+ if (!BoxAnyRef(cx, val, &result)) {
+ return nullptr;
+ }
+ return result.get().forCompiledCode();
+}
+
+static int32_t CoerceInPlace_JitEntry(int funcExportIndex, TlsData* tlsData,
+ Value* argv) {
+ JSContext* cx = CallingActivation()->cx();
+
+ const Code& code = tlsData->instance->code();
+ const FuncExport& fe =
+ code.metadata(code.stableTier()).funcExports[funcExportIndex];
+
+ for (size_t i = 0; i < fe.funcType().args().length(); i++) {
+ HandleValue arg = HandleValue::fromMarkedLocation(&argv[i]);
+ switch (fe.funcType().args()[i].kind()) {
+ case ValType::I32: {
+ int32_t i32;
+ if (!ToInt32(cx, arg, &i32)) {
+ return false;
+ }
+ argv[i] = Int32Value(i32);
+ break;
+ }
+ case ValType::I64: {
+ // In this case we store a BigInt value as there is no value type
+ // corresponding directly to an I64. The conversion to I64 happens
+ // in the JIT entry stub.
+ BigInt* bigint = ToBigInt(cx, arg);
+ if (!bigint) {
+ return false;
+ }
+ argv[i] = BigIntValue(bigint);
+ break;
+ }
+ case ValType::F32:
+ case ValType::F64: {
+ double dbl;
+ if (!ToNumber(cx, arg, &dbl)) {
+ return false;
+ }
+ // No need to convert double-to-float for f32, it's done inline
+ // in the wasm stub later.
+ argv[i] = DoubleValue(dbl);
+ break;
+ }
+ case ValType::Ref: {
+ switch (fe.funcType().args()[i].refTypeKind()) {
+ case RefType::Extern:
+ // Leave Object and Null alone, we will unbox inline. All we need
+ // to do is convert other values to an Object representation.
+ if (!arg.isObjectOrNull()) {
+ RootedAnyRef result(cx, AnyRef::null());
+ if (!BoxAnyRef(cx, arg, &result)) {
+ return false;
+ }
+ argv[i].setObject(*result.get().asJSObject());
+ }
+ break;
+ case RefType::Func:
+ case RefType::Eq:
+ case RefType::TypeIndex:
+ // Guarded against by temporarilyUnsupportedReftypeForEntry()
+ MOZ_CRASH("unexpected input argument in CoerceInPlace_JitEntry");
+ }
+ break;
+ }
+ case ValType::V128: {
+ // Guarded against by hasV128ArgOrRet()
+ MOZ_CRASH("unexpected input argument in CoerceInPlace_JitEntry");
+ }
+ default: {
+ MOZ_CRASH("unexpected input argument in CoerceInPlace_JitEntry");
+ }
+ }
+ }
+
+ return true;
+}
+
+// Allocate a BigInt without GC, corresponds to the similar VMFunction.
+static BigInt* AllocateBigIntTenuredNoGC() {
+ JSContext* cx = TlsContext.get();
+
+ return js::AllocateBigInt<NoGC>(cx, gc::TenuredHeap);
+}
+
+static int64_t DivI64(uint32_t x_hi, uint32_t x_lo, uint32_t y_hi,
+ uint32_t y_lo) {
+ int64_t x = ((uint64_t)x_hi << 32) + x_lo;
+ int64_t y = ((uint64_t)y_hi << 32) + y_lo;
+ MOZ_ASSERT(x != INT64_MIN || y != -1);
+ MOZ_ASSERT(y != 0);
+ return x / y;
+}
+
+static int64_t UDivI64(uint32_t x_hi, uint32_t x_lo, uint32_t y_hi,
+ uint32_t y_lo) {
+ uint64_t x = ((uint64_t)x_hi << 32) + x_lo;
+ uint64_t y = ((uint64_t)y_hi << 32) + y_lo;
+ MOZ_ASSERT(y != 0);
+ return x / y;
+}
+
+static int64_t ModI64(uint32_t x_hi, uint32_t x_lo, uint32_t y_hi,
+ uint32_t y_lo) {
+ int64_t x = ((uint64_t)x_hi << 32) + x_lo;
+ int64_t y = ((uint64_t)y_hi << 32) + y_lo;
+ MOZ_ASSERT(x != INT64_MIN || y != -1);
+ MOZ_ASSERT(y != 0);
+ return x % y;
+}
+
+static int64_t UModI64(uint32_t x_hi, uint32_t x_lo, uint32_t y_hi,
+ uint32_t y_lo) {
+ uint64_t x = ((uint64_t)x_hi << 32) + x_lo;
+ uint64_t y = ((uint64_t)y_hi << 32) + y_lo;
+ MOZ_ASSERT(y != 0);
+ return x % y;
+}
+
+static int64_t TruncateDoubleToInt64(double input) {
+ // Note: INT64_MAX is not representable in double. It is actually
+ // INT64_MAX + 1. Therefore also sending the failure value.
+ if (input >= double(INT64_MAX) || input < double(INT64_MIN) || IsNaN(input)) {
+ return 0x8000000000000000;
+ }
+ return int64_t(input);
+}
+
+static uint64_t TruncateDoubleToUint64(double input) {
+ // Note: UINT64_MAX is not representable in double. It is actually
+ // UINT64_MAX + 1. Therefore also sending the failure value.
+ if (input >= double(UINT64_MAX) || input <= -1.0 || IsNaN(input)) {
+ return 0x8000000000000000;
+ }
+ return uint64_t(input);
+}
+
+static int64_t SaturatingTruncateDoubleToInt64(double input) {
+ // Handle in-range values (except INT64_MIN).
+ if (fabs(input) < -double(INT64_MIN)) {
+ return int64_t(input);
+ }
+ // Handle NaN.
+ if (IsNaN(input)) {
+ return 0;
+ }
+ // Handle positive overflow.
+ if (input > 0) {
+ return INT64_MAX;
+ }
+ // Handle negative overflow.
+ return INT64_MIN;
+}
+
+static uint64_t SaturatingTruncateDoubleToUint64(double input) {
+ // Handle positive overflow.
+ if (input >= -double(INT64_MIN) * 2.0) {
+ return UINT64_MAX;
+ }
+ // Handle in-range values.
+ if (input > -1.0) {
+ return uint64_t(input);
+ }
+ // Handle NaN and negative overflow.
+ return 0;
+}
+
+static double Int64ToDouble(int32_t x_hi, uint32_t x_lo) {
+ int64_t x = int64_t((uint64_t(x_hi) << 32)) + int64_t(x_lo);
+ return double(x);
+}
+
+static float Int64ToFloat32(int32_t x_hi, uint32_t x_lo) {
+ int64_t x = int64_t((uint64_t(x_hi) << 32)) + int64_t(x_lo);
+ return float(x);
+}
+
+static double Uint64ToDouble(int32_t x_hi, uint32_t x_lo) {
+ uint64_t x = (uint64_t(x_hi) << 32) + uint64_t(x_lo);
+ return double(x);
+}
+
+static float Uint64ToFloat32(int32_t x_hi, uint32_t x_lo) {
+ uint64_t x = (uint64_t(x_hi) << 32) + uint64_t(x_lo);
+ return float(x);
+}
+
+template <class F>
+static inline void* FuncCast(F* funcPtr, ABIFunctionType abiType) {
+ void* pf = JS_FUNC_TO_DATA_PTR(void*, funcPtr);
+#ifdef JS_SIMULATOR
+ pf = Simulator::RedirectNativeFunction(pf, abiType);
+#endif
+ return pf;
+}
+
+#ifdef WASM_CODEGEN_DEBUG
+void wasm::PrintI32(int32_t val) { fprintf(stderr, "i32(%d) ", val); }
+
+void wasm::PrintPtr(uint8_t* val) { fprintf(stderr, "ptr(%p) ", val); }
+
+void wasm::PrintF32(float val) { fprintf(stderr, "f32(%f) ", val); }
+
+void wasm::PrintF64(double val) { fprintf(stderr, "f64(%lf) ", val); }
+
+void wasm::PrintText(const char* out) { fprintf(stderr, "%s", out); }
+#endif
+
+void* wasm::AddressOf(SymbolicAddress imm, ABIFunctionType* abiType) {
+ switch (imm) {
+ case SymbolicAddress::HandleDebugTrap:
+ *abiType = Args_General0;
+ return FuncCast(WasmHandleDebugTrap, *abiType);
+ case SymbolicAddress::HandleThrow:
+ *abiType = Args_General0;
+ return FuncCast(WasmHandleThrow, *abiType);
+ case SymbolicAddress::HandleTrap:
+ *abiType = Args_General0;
+ return FuncCast(WasmHandleTrap, *abiType);
+ case SymbolicAddress::ReportV128JSCall:
+ *abiType = Args_General0;
+ return FuncCast(WasmReportV128JSCall, *abiType);
+ case SymbolicAddress::CallImport_General:
+ *abiType = MakeABIFunctionType(
+ ArgType_Int32,
+ {ArgType_General, ArgType_Int32, ArgType_Int32, ArgType_General});
+ return FuncCast(Instance::callImport_general, *abiType);
+ case SymbolicAddress::CoerceInPlace_ToInt32:
+ *abiType = Args_General1;
+ return FuncCast(CoerceInPlace_ToInt32, *abiType);
+ case SymbolicAddress::CoerceInPlace_ToBigInt:
+ *abiType = Args_General1;
+ return FuncCast(CoerceInPlace_ToBigInt, *abiType);
+ case SymbolicAddress::CoerceInPlace_ToNumber:
+ *abiType = Args_General1;
+ return FuncCast(CoerceInPlace_ToNumber, *abiType);
+ case SymbolicAddress::CoerceInPlace_JitEntry:
+ *abiType = Args_General3;
+ return FuncCast(CoerceInPlace_JitEntry, *abiType);
+ case SymbolicAddress::ToInt32:
+ *abiType = Args_Int_Double;
+ return FuncCast<int32_t(double)>(JS::ToInt32, *abiType);
+ case SymbolicAddress::BoxValue_Anyref:
+ *abiType = Args_General1;
+ return FuncCast(BoxValue_Anyref, *abiType);
+ case SymbolicAddress::AllocateBigInt:
+ *abiType = Args_General0;
+ return FuncCast(AllocateBigIntTenuredNoGC, *abiType);
+ case SymbolicAddress::DivI64:
+ *abiType = Args_General4;
+ return FuncCast(DivI64, *abiType);
+ case SymbolicAddress::UDivI64:
+ *abiType = Args_General4;
+ return FuncCast(UDivI64, *abiType);
+ case SymbolicAddress::ModI64:
+ *abiType = Args_General4;
+ return FuncCast(ModI64, *abiType);
+ case SymbolicAddress::UModI64:
+ *abiType = Args_General4;
+ return FuncCast(UModI64, *abiType);
+ case SymbolicAddress::TruncateDoubleToUint64:
+ *abiType = Args_Int64_Double;
+ return FuncCast(TruncateDoubleToUint64, *abiType);
+ case SymbolicAddress::TruncateDoubleToInt64:
+ *abiType = Args_Int64_Double;
+ return FuncCast(TruncateDoubleToInt64, *abiType);
+ case SymbolicAddress::SaturatingTruncateDoubleToUint64:
+ *abiType = Args_Int64_Double;
+ return FuncCast(SaturatingTruncateDoubleToUint64, *abiType);
+ case SymbolicAddress::SaturatingTruncateDoubleToInt64:
+ *abiType = Args_Int64_Double;
+ return FuncCast(SaturatingTruncateDoubleToInt64, *abiType);
+ case SymbolicAddress::Uint64ToDouble:
+ *abiType = Args_Double_IntInt;
+ return FuncCast(Uint64ToDouble, *abiType);
+ case SymbolicAddress::Uint64ToFloat32:
+ *abiType = Args_Float32_IntInt;
+ return FuncCast(Uint64ToFloat32, *abiType);
+ case SymbolicAddress::Int64ToDouble:
+ *abiType = Args_Double_IntInt;
+ return FuncCast(Int64ToDouble, *abiType);
+ case SymbolicAddress::Int64ToFloat32:
+ *abiType = Args_Float32_IntInt;
+ return FuncCast(Int64ToFloat32, *abiType);
+#if defined(JS_CODEGEN_ARM)
+ case SymbolicAddress::aeabi_idivmod:
+ *abiType = Args_General2;
+ return FuncCast(__aeabi_idivmod, *abiType);
+ case SymbolicAddress::aeabi_uidivmod:
+ *abiType = Args_General2;
+ return FuncCast(__aeabi_uidivmod, *abiType);
+#endif
+ case SymbolicAddress::ModD:
+ *abiType = Args_Double_DoubleDouble;
+ return FuncCast(NumberMod, *abiType);
+ case SymbolicAddress::SinD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(sin, *abiType);
+ case SymbolicAddress::CosD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(cos, *abiType);
+ case SymbolicAddress::TanD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(tan, *abiType);
+ case SymbolicAddress::ASinD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(fdlibm::asin, *abiType);
+ case SymbolicAddress::ACosD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(fdlibm::acos, *abiType);
+ case SymbolicAddress::ATanD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(fdlibm::atan, *abiType);
+ case SymbolicAddress::CeilD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(fdlibm::ceil, *abiType);
+ case SymbolicAddress::CeilF:
+ *abiType = Args_Float32_Float32;
+ return FuncCast<float(float)>(fdlibm::ceilf, *abiType);
+ case SymbolicAddress::FloorD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(fdlibm::floor, *abiType);
+ case SymbolicAddress::FloorF:
+ *abiType = Args_Float32_Float32;
+ return FuncCast<float(float)>(fdlibm::floorf, *abiType);
+ case SymbolicAddress::TruncD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(fdlibm::trunc, *abiType);
+ case SymbolicAddress::TruncF:
+ *abiType = Args_Float32_Float32;
+ return FuncCast<float(float)>(fdlibm::truncf, *abiType);
+ case SymbolicAddress::NearbyIntD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(fdlibm::nearbyint, *abiType);
+ case SymbolicAddress::NearbyIntF:
+ *abiType = Args_Float32_Float32;
+ return FuncCast<float(float)>(fdlibm::nearbyintf, *abiType);
+ case SymbolicAddress::ExpD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(fdlibm::exp, *abiType);
+ case SymbolicAddress::LogD:
+ *abiType = Args_Double_Double;
+ return FuncCast<double(double)>(fdlibm::log, *abiType);
+ case SymbolicAddress::PowD:
+ *abiType = Args_Double_DoubleDouble;
+ return FuncCast(ecmaPow, *abiType);
+ case SymbolicAddress::ATan2D:
+ *abiType = Args_Double_DoubleDouble;
+ return FuncCast(ecmaAtan2, *abiType);
+
+ case SymbolicAddress::MemoryGrow:
+ *abiType =
+ MakeABIFunctionType(ArgType_Int32, {ArgType_General, ArgType_Int32});
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemoryGrow));
+ return FuncCast(Instance::memoryGrow_i32, *abiType);
+ case SymbolicAddress::MemorySize:
+ *abiType = MakeABIFunctionType(ArgType_Int32, {ArgType_General});
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemorySize));
+ return FuncCast(Instance::memorySize_i32, *abiType);
+ case SymbolicAddress::WaitI32:
+ *abiType = MakeABIFunctionType(
+ ArgType_Int32,
+ {ArgType_General, ArgType_Int32, ArgType_Int32, ArgType_Int64});
+ MOZ_ASSERT(*abiType == ToABIType(SASigWaitI32));
+ return FuncCast(Instance::wait_i32, *abiType);
+ case SymbolicAddress::WaitI64:
+ *abiType = MakeABIFunctionType(
+ ArgType_Int32,
+ {ArgType_General, ArgType_Int32, ArgType_Int64, ArgType_Int64});
+ MOZ_ASSERT(*abiType == ToABIType(SASigWaitI64));
+ return FuncCast(Instance::wait_i64, *abiType);
+ case SymbolicAddress::Wake:
+ *abiType = MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_Int32, ArgType_Int32});
+ MOZ_ASSERT(*abiType == ToABIType(SASigWake));
+ return FuncCast(Instance::wake, *abiType);
+ case SymbolicAddress::MemCopy:
+ *abiType = MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_Int32, ArgType_Int32,
+ ArgType_Int32, ArgType_General});
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemCopy));
+ return FuncCast(Instance::memCopy, *abiType);
+ case SymbolicAddress::MemCopyShared:
+ *abiType = MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_Int32, ArgType_Int32,
+ ArgType_Int32, ArgType_General});
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemCopyShared));
+ return FuncCast(Instance::memCopyShared, *abiType);
+ case SymbolicAddress::DataDrop:
+ *abiType =
+ MakeABIFunctionType(ArgType_Int32, {ArgType_General, ArgType_Int32});
+ MOZ_ASSERT(*abiType == ToABIType(SASigDataDrop));
+ return FuncCast(Instance::dataDrop, *abiType);
+ case SymbolicAddress::MemFill:
+ *abiType = MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_Int32, ArgType_Int32,
+ ArgType_Int32, ArgType_General});
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemFill));
+ return FuncCast(Instance::memFill, *abiType);
+ case SymbolicAddress::MemFillShared:
+ *abiType = MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_Int32, ArgType_Int32,
+ ArgType_Int32, ArgType_General});
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemFillShared));
+ return FuncCast(Instance::memFillShared, *abiType);
+ case SymbolicAddress::MemInit:
+ *abiType = MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_Int32, ArgType_Int32,
+ ArgType_Int32, ArgType_Int32});
+ MOZ_ASSERT(*abiType == ToABIType(SASigMemInit));
+ return FuncCast(Instance::memInit, *abiType);
+ case SymbolicAddress::TableCopy:
+ *abiType = MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_Int32, ArgType_Int32,
+ ArgType_Int32, ArgType_Int32, ArgType_Int32});
+ MOZ_ASSERT(*abiType == ToABIType(SASigTableCopy));
+ return FuncCast(Instance::tableCopy, *abiType);
+ case SymbolicAddress::ElemDrop:
+ *abiType =
+ MakeABIFunctionType(ArgType_Int32, {ArgType_General, ArgType_Int32});
+ MOZ_ASSERT(*abiType == ToABIType(SASigElemDrop));
+ return FuncCast(Instance::elemDrop, *abiType);
+ case SymbolicAddress::TableFill:
+ *abiType = MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_Int32, ArgType_General,
+ ArgType_Int32, ArgType_Int32});
+ MOZ_ASSERT(*abiType == ToABIType(SASigTableFill));
+ return FuncCast(Instance::tableFill, *abiType);
+ case SymbolicAddress::TableInit:
+ *abiType = MakeABIFunctionType(
+ ArgType_Int32, {ArgType_General, ArgType_Int32, ArgType_Int32,
+ ArgType_Int32, ArgType_Int32, ArgType_Int32});
+ MOZ_ASSERT(*abiType == ToABIType(SASigTableInit));
+ return FuncCast(Instance::tableInit, *abiType);
+ case SymbolicAddress::TableGet:
+ *abiType = MakeABIFunctionType(
+ ArgType_General, {ArgType_General, ArgType_Int32, ArgType_Int32});
+ MOZ_ASSERT(*abiType == ToABIType(SASigTableGet));
+ return FuncCast(Instance::tableGet, *abiType);
+ case SymbolicAddress::TableGrow:
+ *abiType = MakeABIFunctionType(
+ ArgType_Int32,
+ {ArgType_General, ArgType_General, ArgType_Int32, ArgType_Int32});
+ MOZ_ASSERT(*abiType == ToABIType(SASigTableGrow));
+ return FuncCast(Instance::tableGrow, *abiType);
+ case SymbolicAddress::TableSet:
+ *abiType = MakeABIFunctionType(
+ ArgType_Int32,
+ {ArgType_General, ArgType_Int32, ArgType_General, ArgType_Int32});
+ MOZ_ASSERT(*abiType == ToABIType(SASigTableSet));
+ return FuncCast(Instance::tableSet, *abiType);
+ case SymbolicAddress::TableSize:
+ *abiType =
+ MakeABIFunctionType(ArgType_Int32, {ArgType_General, ArgType_Int32});
+ MOZ_ASSERT(*abiType == ToABIType(SASigTableSize));
+ return FuncCast(Instance::tableSize, *abiType);
+ case SymbolicAddress::RefFunc:
+ *abiType = MakeABIFunctionType(ArgType_General,
+ {ArgType_General, ArgType_Int32});
+ MOZ_ASSERT(*abiType == ToABIType(SASigRefFunc));
+ return FuncCast(Instance::refFunc, *abiType);
+ case SymbolicAddress::PostBarrier:
+ *abiType = MakeABIFunctionType(ArgType_Int32,
+ {ArgType_General, ArgType_General});
+ MOZ_ASSERT(*abiType == ToABIType(SASigPostBarrier));
+ return FuncCast(Instance::postBarrier, *abiType);
+ case SymbolicAddress::PreBarrierFiltering:
+ *abiType = MakeABIFunctionType(ArgType_Int32,
+ {ArgType_General, ArgType_General});
+ MOZ_ASSERT(*abiType == ToABIType(SASigPreBarrierFiltering));
+ return FuncCast(Instance::preBarrierFiltering, *abiType);
+ case SymbolicAddress::PostBarrierFiltering:
+ *abiType = MakeABIFunctionType(ArgType_Int32,
+ {ArgType_General, ArgType_General});
+ MOZ_ASSERT(*abiType == ToABIType(SASigPostBarrierFiltering));
+ return FuncCast(Instance::postBarrierFiltering, *abiType);
+ case SymbolicAddress::StructNew:
+ *abiType = MakeABIFunctionType(ArgType_General,
+ {ArgType_General, ArgType_General});
+ MOZ_ASSERT(*abiType == ToABIType(SASigStructNew));
+ return FuncCast(Instance::structNew, *abiType);
+ case SymbolicAddress::StructNarrow:
+ *abiType = MakeABIFunctionType(
+ ArgType_General, {ArgType_General, ArgType_General, ArgType_General});
+ MOZ_ASSERT(*abiType == ToABIType(SASigStructNarrow));
+ return FuncCast(Instance::structNarrow, *abiType);
+
+#if defined(JS_CODEGEN_MIPS32)
+ case SymbolicAddress::js_jit_gAtomic64Lock:
+ return &js::jit::gAtomic64Lock;
+#endif
+#ifdef WASM_CODEGEN_DEBUG
+ case SymbolicAddress::PrintI32:
+ *abiType = Args_General1;
+ return FuncCast(PrintI32, *abiType);
+ case SymbolicAddress::PrintPtr:
+ *abiType = Args_General1;
+ return FuncCast(PrintPtr, *abiType);
+ case SymbolicAddress::PrintF32:
+ *abiType = Args_Int_Float32;
+ return FuncCast(PrintF32, *abiType);
+ case SymbolicAddress::PrintF64:
+ *abiType = Args_Int_Double;
+ return FuncCast(PrintF64, *abiType);
+ case SymbolicAddress::PrintText:
+ *abiType = Args_General1;
+ return FuncCast(PrintText, *abiType);
+#endif
+ case SymbolicAddress::Limit:
+ break;
+ }
+
+ MOZ_CRASH("Bad SymbolicAddress");
+}
+
+bool wasm::NeedsBuiltinThunk(SymbolicAddress sym) {
+ // Some functions don't want to a thunk, because they already have one or
+ // they don't have frame info.
+ switch (sym) {
+ case SymbolicAddress::HandleDebugTrap: // GenerateDebugTrapStub
+ case SymbolicAddress::HandleThrow: // GenerateThrowStub
+ case SymbolicAddress::HandleTrap: // GenerateTrapExit
+ case SymbolicAddress::CallImport_General: // GenerateImportInterpExit
+ case SymbolicAddress::CoerceInPlace_ToInt32: // GenerateImportJitExit
+ case SymbolicAddress::CoerceInPlace_ToNumber:
+ case SymbolicAddress::CoerceInPlace_ToBigInt:
+ case SymbolicAddress::BoxValue_Anyref:
+#if defined(JS_CODEGEN_MIPS32)
+ case SymbolicAddress::js_jit_gAtomic64Lock:
+#endif
+#ifdef WASM_CODEGEN_DEBUG
+ case SymbolicAddress::PrintI32:
+ case SymbolicAddress::PrintPtr:
+ case SymbolicAddress::PrintF32:
+ case SymbolicAddress::PrintF64:
+ case SymbolicAddress::PrintText: // Used only in stubs
+#endif
+ return false;
+ case SymbolicAddress::ToInt32:
+ case SymbolicAddress::DivI64:
+ case SymbolicAddress::UDivI64:
+ case SymbolicAddress::ModI64:
+ case SymbolicAddress::UModI64:
+ case SymbolicAddress::TruncateDoubleToUint64:
+ case SymbolicAddress::TruncateDoubleToInt64:
+ case SymbolicAddress::SaturatingTruncateDoubleToUint64:
+ case SymbolicAddress::SaturatingTruncateDoubleToInt64:
+ case SymbolicAddress::Uint64ToDouble:
+ case SymbolicAddress::Uint64ToFloat32:
+ case SymbolicAddress::Int64ToDouble:
+ case SymbolicAddress::Int64ToFloat32:
+#if defined(JS_CODEGEN_ARM)
+ case SymbolicAddress::aeabi_idivmod:
+ case SymbolicAddress::aeabi_uidivmod:
+#endif
+ case SymbolicAddress::AllocateBigInt:
+ case SymbolicAddress::ModD:
+ case SymbolicAddress::SinD:
+ case SymbolicAddress::CosD:
+ case SymbolicAddress::TanD:
+ case SymbolicAddress::ASinD:
+ case SymbolicAddress::ACosD:
+ case SymbolicAddress::ATanD:
+ case SymbolicAddress::CeilD:
+ case SymbolicAddress::CeilF:
+ case SymbolicAddress::FloorD:
+ case SymbolicAddress::FloorF:
+ case SymbolicAddress::TruncD:
+ case SymbolicAddress::TruncF:
+ case SymbolicAddress::NearbyIntD:
+ case SymbolicAddress::NearbyIntF:
+ case SymbolicAddress::ExpD:
+ case SymbolicAddress::LogD:
+ case SymbolicAddress::PowD:
+ case SymbolicAddress::ATan2D:
+ case SymbolicAddress::MemoryGrow:
+ case SymbolicAddress::MemorySize:
+ case SymbolicAddress::WaitI32:
+ case SymbolicAddress::WaitI64:
+ case SymbolicAddress::Wake:
+ case SymbolicAddress::CoerceInPlace_JitEntry:
+ case SymbolicAddress::ReportV128JSCall:
+ case SymbolicAddress::MemCopy:
+ case SymbolicAddress::MemCopyShared:
+ case SymbolicAddress::DataDrop:
+ case SymbolicAddress::MemFill:
+ case SymbolicAddress::MemFillShared:
+ case SymbolicAddress::MemInit:
+ case SymbolicAddress::TableCopy:
+ case SymbolicAddress::ElemDrop:
+ case SymbolicAddress::TableFill:
+ case SymbolicAddress::TableGet:
+ case SymbolicAddress::TableGrow:
+ case SymbolicAddress::TableInit:
+ case SymbolicAddress::TableSet:
+ case SymbolicAddress::TableSize:
+ case SymbolicAddress::RefFunc:
+ case SymbolicAddress::PreBarrierFiltering:
+ case SymbolicAddress::PostBarrier:
+ case SymbolicAddress::PostBarrierFiltering:
+ case SymbolicAddress::StructNew:
+ case SymbolicAddress::StructNarrow:
+ return true;
+ case SymbolicAddress::Limit:
+ break;
+ }
+
+ MOZ_CRASH("unexpected symbolic address");
+}
+
+// ============================================================================
+// JS builtins that can be imported by wasm modules and called efficiently
+// through thunks. These thunks conform to the internal wasm ABI and thus can be
+// patched in for import calls. Calling a JS builtin through a thunk is much
+// faster than calling out through the generic import call trampoline which will
+// end up in the slowest C++ Instance::callImport path.
+//
+// Each JS builtin can have several overloads. These must all be enumerated in
+// PopulateTypedNatives() so they can be included in the process-wide thunk set.
+
+#define FOR_EACH_UNARY_NATIVE(_) \
+ _(math_sin, MathSin) \
+ _(math_tan, MathTan) \
+ _(math_cos, MathCos) \
+ _(math_exp, MathExp) \
+ _(math_log, MathLog) \
+ _(math_asin, MathASin) \
+ _(math_atan, MathATan) \
+ _(math_acos, MathACos) \
+ _(math_log10, MathLog10) \
+ _(math_log2, MathLog2) \
+ _(math_log1p, MathLog1P) \
+ _(math_expm1, MathExpM1) \
+ _(math_sinh, MathSinH) \
+ _(math_tanh, MathTanH) \
+ _(math_cosh, MathCosH) \
+ _(math_asinh, MathASinH) \
+ _(math_atanh, MathATanH) \
+ _(math_acosh, MathACosH) \
+ _(math_sign, MathSign) \
+ _(math_trunc, MathTrunc) \
+ _(math_cbrt, MathCbrt)
+
+#define FOR_EACH_BINARY_NATIVE(_) \
+ _(ecmaAtan2, MathATan2) \
+ _(ecmaHypot, MathHypot) \
+ _(ecmaPow, MathPow)
+
+#define DEFINE_UNARY_FLOAT_WRAPPER(func, _) \
+ static float func##_impl_f32(float x) { \
+ return float(func##_impl(double(x))); \
+ }
+
+#define DEFINE_BINARY_FLOAT_WRAPPER(func, _) \
+ static float func##_f32(float x, float y) { \
+ return float(func(double(x), double(y))); \
+ }
+
+FOR_EACH_UNARY_NATIVE(DEFINE_UNARY_FLOAT_WRAPPER)
+FOR_EACH_BINARY_NATIVE(DEFINE_BINARY_FLOAT_WRAPPER)
+
+#undef DEFINE_UNARY_FLOAT_WRAPPER
+#undef DEFINE_BINARY_FLOAT_WRAPPER
+
+struct TypedNative {
+ InlinableNative native;
+ ABIFunctionType abiType;
+
+ TypedNative(InlinableNative native, ABIFunctionType abiType)
+ : native(native), abiType(abiType) {}
+
+ using Lookup = TypedNative;
+ static HashNumber hash(const Lookup& l) {
+ return HashGeneric(uint32_t(l.native), uint32_t(l.abiType));
+ }
+ static bool match(const TypedNative& lhs, const Lookup& rhs) {
+ return lhs.native == rhs.native && lhs.abiType == rhs.abiType;
+ }
+};
+
+using TypedNativeToFuncPtrMap =
+ HashMap<TypedNative, void*, TypedNative, SystemAllocPolicy>;
+
+static bool PopulateTypedNatives(TypedNativeToFuncPtrMap* typedNatives) {
+#define ADD_OVERLOAD(funcName, native, abiType) \
+ if (!typedNatives->putNew(TypedNative(InlinableNative::native, abiType), \
+ FuncCast(funcName, abiType))) \
+ return false;
+
+#define ADD_UNARY_OVERLOADS(funcName, native) \
+ ADD_OVERLOAD(funcName##_impl, native, Args_Double_Double) \
+ ADD_OVERLOAD(funcName##_impl_f32, native, Args_Float32_Float32)
+
+#define ADD_BINARY_OVERLOADS(funcName, native) \
+ ADD_OVERLOAD(funcName, native, Args_Double_DoubleDouble) \
+ ADD_OVERLOAD(funcName##_f32, native, Args_Float32_Float32Float32)
+
+ FOR_EACH_UNARY_NATIVE(ADD_UNARY_OVERLOADS)
+ FOR_EACH_BINARY_NATIVE(ADD_BINARY_OVERLOADS)
+
+#undef ADD_UNARY_OVERLOADS
+#undef ADD_BINARY_OVERLOADS
+
+ return true;
+}
+
+#undef FOR_EACH_UNARY_NATIVE
+#undef FOR_EACH_BINARY_NATIVE
+
+// ============================================================================
+// Process-wide builtin thunk set
+//
+// Thunks are inserted between wasm calls and the C++ callee and achieve two
+// things:
+// - bridging the few differences between the internal wasm ABI and the
+// external native ABI (viz. float returns on x86 and soft-fp ARM)
+// - executing an exit prologue/epilogue which in turn allows any profiling
+// iterator to see the full stack up to the wasm operation that called out
+//
+// Thunks are created for two kinds of C++ callees, enumerated above:
+// - SymbolicAddress: for statically compiled calls in the wasm module
+// - Imported JS builtins: optimized calls to imports
+//
+// All thunks are created up front, lazily, when the first wasm module is
+// compiled in the process. Thunks are kept alive until the JS engine shuts down
+// in the process. No thunks are created at runtime after initialization. This
+// simple scheme allows several simplifications:
+// - no reference counting to keep thunks alive
+// - no problems toggling W^X permissions which, because of multiple executing
+// threads, would require each thunk allocation to be on its own page
+// The cost for creating all thunks at once is relatively low since all thunks
+// fit within the smallest executable quanta (64k).
+
+using TypedNativeToCodeRangeMap =
+ HashMap<TypedNative, uint32_t, TypedNative, SystemAllocPolicy>;
+
+using SymbolicAddressToCodeRangeArray =
+ EnumeratedArray<SymbolicAddress, SymbolicAddress::Limit, uint32_t>;
+
+struct BuiltinThunks {
+ uint8_t* codeBase;
+ size_t codeSize;
+ CodeRangeVector codeRanges;
+ TypedNativeToCodeRangeMap typedNativeToCodeRange;
+ SymbolicAddressToCodeRangeArray symbolicAddressToCodeRange;
+ uint32_t provisionalJitEntryOffset;
+
+ BuiltinThunks() : codeBase(nullptr), codeSize(0) {}
+
+ ~BuiltinThunks() {
+ if (codeBase) {
+ DeallocateExecutableMemory(codeBase, codeSize);
+ }
+ }
+};
+
+Mutex initBuiltinThunks(mutexid::WasmInitBuiltinThunks);
+Atomic<const BuiltinThunks*> builtinThunks;
+
+bool wasm::EnsureBuiltinThunksInitialized() {
+ LockGuard<Mutex> guard(initBuiltinThunks);
+ if (builtinThunks) {
+ return true;
+ }
+
+ auto thunks = MakeUnique<BuiltinThunks>();
+ if (!thunks) {
+ return false;
+ }
+
+ LifoAlloc lifo(BUILTIN_THUNK_LIFO_SIZE);
+ TempAllocator tempAlloc(&lifo);
+ WasmMacroAssembler masm(tempAlloc);
+
+ for (auto sym : MakeEnumeratedRange(SymbolicAddress::Limit)) {
+ if (!NeedsBuiltinThunk(sym)) {
+ thunks->symbolicAddressToCodeRange[sym] = UINT32_MAX;
+ continue;
+ }
+
+ uint32_t codeRangeIndex = thunks->codeRanges.length();
+ thunks->symbolicAddressToCodeRange[sym] = codeRangeIndex;
+
+ ABIFunctionType abiType;
+ void* funcPtr = AddressOf(sym, &abiType);
+
+ ExitReason exitReason(sym);
+
+ CallableOffsets offsets;
+ if (!GenerateBuiltinThunk(masm, abiType, exitReason, funcPtr, &offsets)) {
+ return false;
+ }
+ if (!thunks->codeRanges.emplaceBack(CodeRange::BuiltinThunk, offsets)) {
+ return false;
+ }
+ }
+
+ TypedNativeToFuncPtrMap typedNatives;
+ if (!PopulateTypedNatives(&typedNatives)) {
+ return false;
+ }
+
+ for (TypedNativeToFuncPtrMap::Range r = typedNatives.all(); !r.empty();
+ r.popFront()) {
+ TypedNative typedNative = r.front().key();
+
+ uint32_t codeRangeIndex = thunks->codeRanges.length();
+ if (!thunks->typedNativeToCodeRange.putNew(typedNative, codeRangeIndex)) {
+ return false;
+ }
+
+ ABIFunctionType abiType = typedNative.abiType;
+ void* funcPtr = r.front().value();
+
+ ExitReason exitReason = ExitReason::Fixed::BuiltinNative;
+
+ CallableOffsets offsets;
+ if (!GenerateBuiltinThunk(masm, abiType, exitReason, funcPtr, &offsets)) {
+ return false;
+ }
+ if (!thunks->codeRanges.emplaceBack(CodeRange::BuiltinThunk, offsets)) {
+ return false;
+ }
+ }
+
+ // Provisional JitEntry stub: This is a shared stub that can be installed in
+ // the jit-entry jump table. It uses the JIT ABI and when invoked will
+ // retrieve (via TlsContext()) and invoke the context-appropriate
+ // invoke-from-interpreter jit stub, thus serving as the initial, unoptimized
+ // jit-entry stub for any exported wasm function that has a jit-entry.
+
+#ifdef DEBUG
+ // We need to allow this machine code to bake in a C++ code pointer, so we
+ // disable the wasm restrictions while generating this stub.
+ JitContext jitContext(&tempAlloc);
+ bool oldFlag = jitContext.setIsCompilingWasm(false);
+#endif
+
+ Offsets provisionalJitEntryOffsets;
+ if (!GenerateProvisionalJitEntryStub(masm, &provisionalJitEntryOffsets)) {
+ return false;
+ }
+ thunks->provisionalJitEntryOffset = provisionalJitEntryOffsets.begin;
+
+#ifdef DEBUG
+ jitContext.setIsCompilingWasm(oldFlag);
+#endif
+
+ masm.finish();
+ if (masm.oom()) {
+ return false;
+ }
+
+ size_t allocSize = AlignBytes(masm.bytesNeeded(), ExecutableCodePageSize);
+
+ thunks->codeSize = allocSize;
+ thunks->codeBase = (uint8_t*)AllocateExecutableMemory(
+ allocSize, ProtectionSetting::Writable, MemCheckKind::MakeUndefined);
+ if (!thunks->codeBase) {
+ return false;
+ }
+
+ masm.executableCopy(thunks->codeBase);
+ memset(thunks->codeBase + masm.bytesNeeded(), 0,
+ allocSize - masm.bytesNeeded());
+
+ masm.processCodeLabels(thunks->codeBase);
+ PatchDebugSymbolicAccesses(thunks->codeBase, masm);
+
+ MOZ_ASSERT(masm.callSites().empty());
+ MOZ_ASSERT(masm.callSiteTargets().empty());
+ MOZ_ASSERT(masm.trapSites().empty());
+
+ if (!ExecutableAllocator::makeExecutableAndFlushICache(
+ FlushICacheSpec::LocalThreadOnly, thunks->codeBase,
+ thunks->codeSize)) {
+ return false;
+ }
+
+ builtinThunks = thunks.release();
+ return true;
+}
+
+void wasm::ReleaseBuiltinThunks() {
+ if (builtinThunks) {
+ const BuiltinThunks* ptr = builtinThunks;
+ js_delete(const_cast<BuiltinThunks*>(ptr));
+ builtinThunks = nullptr;
+ }
+}
+
+void* wasm::SymbolicAddressTarget(SymbolicAddress sym) {
+ MOZ_ASSERT(builtinThunks);
+
+ ABIFunctionType abiType;
+ void* funcPtr = AddressOf(sym, &abiType);
+
+ if (!NeedsBuiltinThunk(sym)) {
+ return funcPtr;
+ }
+
+ const BuiltinThunks& thunks = *builtinThunks;
+ uint32_t codeRangeIndex = thunks.symbolicAddressToCodeRange[sym];
+ return thunks.codeBase + thunks.codeRanges[codeRangeIndex].begin();
+}
+
+void* wasm::ProvisionalJitEntryStub() {
+ MOZ_ASSERT(builtinThunks);
+
+ const BuiltinThunks& thunks = *builtinThunks;
+ return thunks.codeBase + thunks.provisionalJitEntryOffset;
+}
+
+static Maybe<ABIFunctionType> ToBuiltinABIFunctionType(
+ const FuncType& funcType) {
+ const ValTypeVector& args = funcType.args();
+ const ValTypeVector& results = funcType.results();
+
+ if (results.length() != 1) {
+ return Nothing();
+ }
+
+ uint32_t abiType;
+ switch (results[0].kind()) {
+ case ValType::F32:
+ abiType = ArgType_Float32 << RetType_Shift;
+ break;
+ case ValType::F64:
+ abiType = ArgType_Float64 << RetType_Shift;
+ break;
+ default:
+ return Nothing();
+ }
+
+ if ((args.length() + 1) > (sizeof(uint32_t) * 8 / ArgType_Shift)) {
+ return Nothing();
+ }
+
+ for (size_t i = 0; i < args.length(); i++) {
+ switch (args[i].kind()) {
+ case ValType::F32:
+ abiType |= (ArgType_Float32 << (ArgType_Shift * (i + 1)));
+ break;
+ case ValType::F64:
+ abiType |= (ArgType_Float64 << (ArgType_Shift * (i + 1)));
+ break;
+ default:
+ return Nothing();
+ }
+ }
+
+ return Some(ABIFunctionType(abiType));
+}
+
+void* wasm::MaybeGetBuiltinThunk(JSFunction* f, const FuncType& funcType) {
+ MOZ_ASSERT(builtinThunks);
+
+ if (!f->isNative() || !f->hasJitInfo() ||
+ f->jitInfo()->type() != JSJitInfo::InlinableNative) {
+ return nullptr;
+ }
+
+ Maybe<ABIFunctionType> abiType = ToBuiltinABIFunctionType(funcType);
+ if (!abiType) {
+ return nullptr;
+ }
+
+ TypedNative typedNative(f->jitInfo()->inlinableNative, *abiType);
+
+ const BuiltinThunks& thunks = *builtinThunks;
+ auto p = thunks.typedNativeToCodeRange.readonlyThreadsafeLookup(typedNative);
+ if (!p) {
+ return nullptr;
+ }
+
+ return thunks.codeBase + thunks.codeRanges[p->value()].begin();
+}
+
+bool wasm::LookupBuiltinThunk(void* pc, const CodeRange** codeRange,
+ uint8_t** codeBase) {
+ if (!builtinThunks) {
+ return false;
+ }
+
+ const BuiltinThunks& thunks = *builtinThunks;
+ if (pc < thunks.codeBase || pc >= thunks.codeBase + thunks.codeSize) {
+ return false;
+ }
+
+ *codeBase = thunks.codeBase;
+
+ CodeRange::OffsetInCode target((uint8_t*)pc - thunks.codeBase);
+ *codeRange = LookupInSorted(thunks.codeRanges, target);
+
+ return !!*codeRange;
+}
diff --git a/js/src/wasm/WasmBuiltins.h b/js/src/wasm/WasmBuiltins.h
new file mode 100644
index 0000000000..5c364863a5
--- /dev/null
+++ b/js/src/wasm/WasmBuiltins.h
@@ -0,0 +1,120 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2017 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_builtins_h
+#define wasm_builtins_h
+
+#include "wasm/WasmTypes.h"
+
+namespace js {
+namespace wasm {
+
+class WasmFrameIter;
+
+// These provide argument type information for a subset of the SymbolicAddress
+// targets, for which type info is needed to generate correct stackmaps.
+
+extern const SymbolicAddressSignature SASigSinD;
+extern const SymbolicAddressSignature SASigCosD;
+extern const SymbolicAddressSignature SASigTanD;
+extern const SymbolicAddressSignature SASigASinD;
+extern const SymbolicAddressSignature SASigACosD;
+extern const SymbolicAddressSignature SASigATanD;
+extern const SymbolicAddressSignature SASigCeilD;
+extern const SymbolicAddressSignature SASigCeilF;
+extern const SymbolicAddressSignature SASigFloorD;
+extern const SymbolicAddressSignature SASigFloorF;
+extern const SymbolicAddressSignature SASigTruncD;
+extern const SymbolicAddressSignature SASigTruncF;
+extern const SymbolicAddressSignature SASigNearbyIntD;
+extern const SymbolicAddressSignature SASigNearbyIntF;
+extern const SymbolicAddressSignature SASigExpD;
+extern const SymbolicAddressSignature SASigLogD;
+extern const SymbolicAddressSignature SASigPowD;
+extern const SymbolicAddressSignature SASigATan2D;
+extern const SymbolicAddressSignature SASigMemoryGrow;
+extern const SymbolicAddressSignature SASigMemorySize;
+extern const SymbolicAddressSignature SASigWaitI32;
+extern const SymbolicAddressSignature SASigWaitI64;
+extern const SymbolicAddressSignature SASigWake;
+extern const SymbolicAddressSignature SASigMemCopy;
+extern const SymbolicAddressSignature SASigMemCopyShared;
+extern const SymbolicAddressSignature SASigDataDrop;
+extern const SymbolicAddressSignature SASigMemFill;
+extern const SymbolicAddressSignature SASigMemFillShared;
+extern const SymbolicAddressSignature SASigMemInit;
+extern const SymbolicAddressSignature SASigTableCopy;
+extern const SymbolicAddressSignature SASigElemDrop;
+extern const SymbolicAddressSignature SASigTableFill;
+extern const SymbolicAddressSignature SASigTableGet;
+extern const SymbolicAddressSignature SASigTableGrow;
+extern const SymbolicAddressSignature SASigTableInit;
+extern const SymbolicAddressSignature SASigTableSet;
+extern const SymbolicAddressSignature SASigTableSize;
+extern const SymbolicAddressSignature SASigRefFunc;
+extern const SymbolicAddressSignature SASigPreBarrierFiltering;
+extern const SymbolicAddressSignature SASigPostBarrier;
+extern const SymbolicAddressSignature SASigPostBarrierFiltering;
+extern const SymbolicAddressSignature SASigStructNew;
+extern const SymbolicAddressSignature SASigStructNarrow;
+
+// A SymbolicAddress that NeedsBuiltinThunk() will call through a thunk to the
+// C++ function. This will be true for all normal calls from normal wasm
+// function code. Only calls to C++ from other exits/thunks do not need a thunk.
+
+bool NeedsBuiltinThunk(SymbolicAddress sym);
+
+// This function queries whether pc is in one of the process's builtin thunks
+// and, if so, returns the CodeRange and pointer to the code segment that the
+// CodeRange is relative to.
+
+bool LookupBuiltinThunk(void* pc, const CodeRange** codeRange,
+ uint8_t** codeBase);
+
+// EnsureBuiltinThunksInitialized() must be called, and must succeed, before
+// SymbolicAddressTarget() or MaybeGetBuiltinThunk(). This function creates all
+// thunks for the process. ReleaseBuiltinThunks() should be called before
+// ReleaseProcessExecutableMemory() so that the latter can assert that all
+// executable code has been released.
+
+bool EnsureBuiltinThunksInitialized();
+
+void* HandleThrow(JSContext* cx, WasmFrameIter& iter);
+
+void* SymbolicAddressTarget(SymbolicAddress sym);
+
+void* ProvisionalJitEntryStub();
+
+void* MaybeGetBuiltinThunk(JSFunction* f, const FuncType& funcType);
+
+void ReleaseBuiltinThunks();
+
+void* AddressOf(SymbolicAddress imm, jit::ABIFunctionType* abiType);
+
+#ifdef WASM_CODEGEN_DEBUG
+void PrintI32(int32_t val);
+void PrintF32(float val);
+void PrintF64(double val);
+void PrintPtr(uint8_t* val);
+void PrintText(const char* out);
+#endif
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_builtins_h
diff --git a/js/src/wasm/WasmCode.cpp b/js/src/wasm/WasmCode.cpp
new file mode 100644
index 0000000000..50a6f20aab
--- /dev/null
+++ b/js/src/wasm/WasmCode.cpp
@@ -0,0 +1,1510 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmCode.h"
+
+#include "mozilla/BinarySearch.h"
+#include "mozilla/EnumeratedRange.h"
+
+#include <algorithm>
+
+#include "jsnum.h"
+
+#include "jit/ExecutableAllocator.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "util/Poison.h"
+#ifdef MOZ_VTUNE
+# include "vtune/VTuneWrapper.h"
+#endif
+#include "wasm/WasmModule.h"
+#include "wasm/WasmProcess.h"
+#include "wasm/WasmSerialize.h"
+#include "wasm/WasmStubs.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+using mozilla::BinarySearch;
+using mozilla::MakeEnumeratedRange;
+using mozilla::PodAssign;
+
+size_t LinkData::SymbolicLinkArray::serializedSize() const {
+ size_t size = 0;
+ for (const Uint32Vector& offsets : *this) {
+ size += SerializedPodVectorSize(offsets);
+ }
+ return size;
+}
+
+uint8_t* LinkData::SymbolicLinkArray::serialize(uint8_t* cursor) const {
+ for (const Uint32Vector& offsets : *this) {
+ cursor = SerializePodVector(cursor, offsets);
+ }
+ return cursor;
+}
+
+const uint8_t* LinkData::SymbolicLinkArray::deserialize(const uint8_t* cursor) {
+ for (Uint32Vector& offsets : *this) {
+ cursor = DeserializePodVector(cursor, &offsets);
+ if (!cursor) {
+ return nullptr;
+ }
+ }
+ return cursor;
+}
+
+size_t LinkData::SymbolicLinkArray::sizeOfExcludingThis(
+ MallocSizeOf mallocSizeOf) const {
+ size_t size = 0;
+ for (const Uint32Vector& offsets : *this) {
+ size += offsets.sizeOfExcludingThis(mallocSizeOf);
+ }
+ return size;
+}
+
+size_t LinkData::serializedSize() const {
+ return sizeof(pod()) + SerializedPodVectorSize(internalLinks) +
+ symbolicLinks.serializedSize();
+}
+
+uint8_t* LinkData::serialize(uint8_t* cursor) const {
+ MOZ_ASSERT(tier == Tier::Serialized);
+
+ cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
+ cursor = SerializePodVector(cursor, internalLinks);
+ cursor = symbolicLinks.serialize(cursor);
+ return cursor;
+}
+
+const uint8_t* LinkData::deserialize(const uint8_t* cursor) {
+ MOZ_ASSERT(tier == Tier::Serialized);
+
+ (cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
+ (cursor = DeserializePodVector(cursor, &internalLinks)) &&
+ (cursor = symbolicLinks.deserialize(cursor));
+ return cursor;
+}
+
+CodeSegment::~CodeSegment() {
+ if (unregisterOnDestroy_) {
+ UnregisterCodeSegment(this);
+ }
+}
+
+static uint32_t RoundupCodeLength(uint32_t codeLength) {
+ // AllocateExecutableMemory() requires a multiple of ExecutableCodePageSize.
+ return RoundUp(codeLength, ExecutableCodePageSize);
+}
+
+/* static */
+UniqueCodeBytes CodeSegment::AllocateCodeBytes(uint32_t codeLength) {
+ if (codeLength > MaxCodeBytesPerProcess) {
+ return nullptr;
+ }
+
+ static_assert(MaxCodeBytesPerProcess <= INT32_MAX, "rounding won't overflow");
+ uint32_t roundedCodeLength = RoundupCodeLength(codeLength);
+
+ void* p =
+ AllocateExecutableMemory(roundedCodeLength, ProtectionSetting::Writable,
+ MemCheckKind::MakeUndefined);
+
+ // If the allocation failed and the embedding gives us a last-ditch attempt
+ // to purge all memory (which, in gecko, does a purging GC/CC/GC), do that
+ // then retry the allocation.
+ if (!p) {
+ if (OnLargeAllocationFailure) {
+ OnLargeAllocationFailure();
+ p = AllocateExecutableMemory(roundedCodeLength,
+ ProtectionSetting::Writable,
+ MemCheckKind::MakeUndefined);
+ }
+ }
+
+ if (!p) {
+ return nullptr;
+ }
+
+ // Zero the padding.
+ memset(((uint8_t*)p) + codeLength, 0, roundedCodeLength - codeLength);
+
+ // We account for the bytes allocated in WasmModuleObject::create, where we
+ // have the necessary JSContext.
+
+ return UniqueCodeBytes((uint8_t*)p, FreeCode(roundedCodeLength));
+}
+
+bool CodeSegment::initialize(const CodeTier& codeTier) {
+ MOZ_ASSERT(!initialized());
+ codeTier_ = &codeTier;
+ MOZ_ASSERT(initialized());
+
+ // In the case of tiering, RegisterCodeSegment() immediately makes this code
+ // segment live to access from other threads executing the containing
+ // module. So only call once the CodeSegment is fully initialized.
+ if (!RegisterCodeSegment(this)) {
+ return false;
+ }
+
+ // This bool is only used by the destructor which cannot be called racily
+ // and so it is not a problem to mutate it after RegisterCodeSegment().
+ MOZ_ASSERT(!unregisterOnDestroy_);
+ unregisterOnDestroy_ = true;
+ return true;
+}
+
+const Code& CodeSegment::code() const {
+ MOZ_ASSERT(codeTier_);
+ return codeTier_->code();
+}
+
+void CodeSegment::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code) const {
+ *code += RoundupCodeLength(length());
+}
+
+void FreeCode::operator()(uint8_t* bytes) {
+ MOZ_ASSERT(codeLength);
+ MOZ_ASSERT(codeLength == RoundupCodeLength(codeLength));
+
+#ifdef MOZ_VTUNE
+ vtune::UnmarkBytes(bytes, codeLength);
+#endif
+ DeallocateExecutableMemory(bytes, codeLength);
+}
+
+static bool StaticallyLink(const ModuleSegment& ms, const LinkData& linkData) {
+ for (LinkData::InternalLink link : linkData.internalLinks) {
+ CodeLabel label;
+ label.patchAt()->bind(link.patchAtOffset);
+ label.target()->bind(link.targetOffset);
+#ifdef JS_CODELABEL_LINKMODE
+ label.setLinkMode(static_cast<CodeLabel::LinkMode>(link.mode));
+#endif
+ Assembler::Bind(ms.base(), label);
+ }
+
+ if (!EnsureBuiltinThunksInitialized()) {
+ return false;
+ }
+
+ for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
+ const Uint32Vector& offsets = linkData.symbolicLinks[imm];
+ if (offsets.empty()) {
+ continue;
+ }
+
+ void* target = SymbolicAddressTarget(imm);
+ for (uint32_t offset : offsets) {
+ uint8_t* patchAt = ms.base() + offset;
+ Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
+ PatchedImmPtr(target),
+ PatchedImmPtr((void*)-1));
+ }
+ }
+
+ return true;
+}
+
+static void StaticallyUnlink(uint8_t* base, const LinkData& linkData) {
+ for (LinkData::InternalLink link : linkData.internalLinks) {
+ CodeLabel label;
+ label.patchAt()->bind(link.patchAtOffset);
+ label.target()->bind(-size_t(base)); // to reset immediate to null
+#ifdef JS_CODELABEL_LINKMODE
+ label.setLinkMode(static_cast<CodeLabel::LinkMode>(link.mode));
+#endif
+ Assembler::Bind(base, label);
+ }
+
+ for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
+ const Uint32Vector& offsets = linkData.symbolicLinks[imm];
+ if (offsets.empty()) {
+ continue;
+ }
+
+ void* target = SymbolicAddressTarget(imm);
+ for (uint32_t offset : offsets) {
+ uint8_t* patchAt = base + offset;
+ Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
+ PatchedImmPtr((void*)-1),
+ PatchedImmPtr(target));
+ }
+ }
+}
+
+#ifdef JS_ION_PERF
+static bool AppendToString(const char* str, UTF8Bytes* bytes) {
+ return bytes->append(str, strlen(str)) && bytes->append('\0');
+}
+#endif
+
+static void SendCodeRangesToProfiler(const ModuleSegment& ms,
+ const Metadata& metadata,
+ const CodeRangeVector& codeRanges) {
+ bool enabled = false;
+#ifdef JS_ION_PERF
+ enabled |= PerfFuncEnabled();
+#endif
+#ifdef MOZ_VTUNE
+ enabled |= vtune::IsProfilingActive();
+#endif
+ if (!enabled) {
+ return;
+ }
+
+ for (const CodeRange& codeRange : codeRanges) {
+ if (!codeRange.hasFuncIndex()) {
+ continue;
+ }
+
+ uintptr_t start = uintptr_t(ms.base() + codeRange.begin());
+ uintptr_t size = codeRange.end() - codeRange.begin();
+
+ UTF8Bytes name;
+ if (!metadata.getFuncNameStandalone(codeRange.funcIndex(), &name)) {
+ return;
+ }
+
+ // Avoid "unused" warnings
+ (void)start;
+ (void)size;
+
+#ifdef JS_ION_PERF
+ if (PerfFuncEnabled()) {
+ const char* file = metadata.filename.get();
+ if (codeRange.isFunction()) {
+ if (!name.append('\0')) {
+ return;
+ }
+ unsigned line = codeRange.funcLineOrBytecode();
+ writePerfSpewerWasmFunctionMap(start, size, file, line, name.begin());
+ } else if (codeRange.isInterpEntry()) {
+ if (!AppendToString(" slow entry", &name)) {
+ return;
+ }
+ writePerfSpewerWasmMap(start, size, file, name.begin());
+ } else if (codeRange.isJitEntry()) {
+ if (!AppendToString(" fast entry", &name)) {
+ return;
+ }
+ writePerfSpewerWasmMap(start, size, file, name.begin());
+ } else if (codeRange.isImportInterpExit()) {
+ if (!AppendToString(" slow exit", &name)) {
+ return;
+ }
+ writePerfSpewerWasmMap(start, size, file, name.begin());
+ } else if (codeRange.isImportJitExit()) {
+ if (!AppendToString(" fast exit", &name)) {
+ return;
+ }
+ writePerfSpewerWasmMap(start, size, file, name.begin());
+ } else {
+ MOZ_CRASH("unhandled perf hasFuncIndex type");
+ }
+ }
+#endif
+#ifdef MOZ_VTUNE
+ if (!vtune::IsProfilingActive()) {
+ continue;
+ }
+ if (!codeRange.isFunction()) {
+ continue;
+ }
+ if (!name.append('\0')) {
+ return;
+ }
+ vtune::MarkWasm(vtune::GenerateUniqueMethodID(), name.begin(), (void*)start,
+ size);
+#endif
+ }
+}
+
+ModuleSegment::ModuleSegment(Tier tier, UniqueCodeBytes codeBytes,
+ uint32_t codeLength, const LinkData& linkData)
+ : CodeSegment(std::move(codeBytes), codeLength, CodeSegment::Kind::Module),
+ tier_(tier),
+ trapCode_(base() + linkData.trapOffset) {}
+
+/* static */
+UniqueModuleSegment ModuleSegment::create(Tier tier, MacroAssembler& masm,
+ const LinkData& linkData) {
+ uint32_t codeLength = masm.bytesNeeded();
+
+ UniqueCodeBytes codeBytes = AllocateCodeBytes(codeLength);
+ if (!codeBytes) {
+ return nullptr;
+ }
+
+ masm.executableCopy(codeBytes.get());
+
+ return js::MakeUnique<ModuleSegment>(tier, std::move(codeBytes), codeLength,
+ linkData);
+}
+
+/* static */
+UniqueModuleSegment ModuleSegment::create(Tier tier, const Bytes& unlinkedBytes,
+ const LinkData& linkData) {
+ uint32_t codeLength = unlinkedBytes.length();
+
+ UniqueCodeBytes codeBytes = AllocateCodeBytes(codeLength);
+ if (!codeBytes) {
+ return nullptr;
+ }
+
+ memcpy(codeBytes.get(), unlinkedBytes.begin(), codeLength);
+
+ return js::MakeUnique<ModuleSegment>(tier, std::move(codeBytes), codeLength,
+ linkData);
+}
+
+bool ModuleSegment::initialize(IsTier2 isTier2, const CodeTier& codeTier,
+ const LinkData& linkData,
+ const Metadata& metadata,
+ const MetadataTier& metadataTier) {
+ if (!StaticallyLink(*this, linkData)) {
+ return false;
+ }
+
+ // Optimized compilation finishes on a background thread, so we must make sure
+ // to flush the icaches of all the executing threads.
+ FlushICacheSpec flushIcacheSpec = isTier2 == IsTier2::Tier2
+ ? FlushICacheSpec::AllThreads
+ : FlushICacheSpec::LocalThreadOnly;
+
+ // Reprotect the whole region to avoid having separate RW and RX mappings.
+ if (!ExecutableAllocator::makeExecutableAndFlushICache(
+ flushIcacheSpec, base(), RoundupCodeLength(length()))) {
+ return false;
+ }
+
+ SendCodeRangesToProfiler(*this, metadata, metadataTier.codeRanges);
+
+ // See comments in CodeSegment::initialize() for why this must be last.
+ return CodeSegment::initialize(codeTier);
+}
+
+size_t ModuleSegment::serializedSize() const {
+ return sizeof(uint32_t) + length();
+}
+
+void ModuleSegment::addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf,
+ size_t* code, size_t* data) const {
+ CodeSegment::addSizeOfMisc(mallocSizeOf, code);
+ *data += mallocSizeOf(this);
+}
+
+uint8_t* ModuleSegment::serialize(uint8_t* cursor,
+ const LinkData& linkData) const {
+ MOZ_ASSERT(tier() == Tier::Serialized);
+
+ cursor = WriteScalar<uint32_t>(cursor, length());
+ uint8_t* serializedBase = cursor;
+ cursor = WriteBytes(cursor, base(), length());
+ StaticallyUnlink(serializedBase, linkData);
+ return cursor;
+}
+
+/* static */ const uint8_t* ModuleSegment::deserialize(
+ const uint8_t* cursor, const LinkData& linkData,
+ UniqueModuleSegment* segment) {
+ uint32_t length;
+ cursor = ReadScalar<uint32_t>(cursor, &length);
+ if (!cursor) {
+ return nullptr;
+ }
+
+ UniqueCodeBytes bytes = AllocateCodeBytes(length);
+ if (!bytes) {
+ return nullptr;
+ }
+
+ cursor = ReadBytes(cursor, bytes.get(), length);
+ if (!cursor) {
+ return nullptr;
+ }
+
+ *segment = js::MakeUnique<ModuleSegment>(Tier::Serialized, std::move(bytes),
+ length, linkData);
+ if (!*segment) {
+ return nullptr;
+ }
+
+ return cursor;
+}
+
+const CodeRange* ModuleSegment::lookupRange(const void* pc) const {
+ return codeTier().lookupRange(pc);
+}
+
+size_t FuncExport::serializedSize() const {
+ return funcType_.serializedSize() + sizeof(pod);
+}
+
+uint8_t* FuncExport::serialize(uint8_t* cursor) const {
+ cursor = funcType_.serialize(cursor);
+ cursor = WriteBytes(cursor, &pod, sizeof(pod));
+ return cursor;
+}
+
+const uint8_t* FuncExport::deserialize(const uint8_t* cursor) {
+ (cursor = funcType_.deserialize(cursor)) &&
+ (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
+ return cursor;
+}
+
+size_t FuncExport::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return funcType_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t FuncImport::serializedSize() const {
+ return funcType_.serializedSize() + sizeof(pod);
+}
+
+uint8_t* FuncImport::serialize(uint8_t* cursor) const {
+ cursor = funcType_.serialize(cursor);
+ cursor = WriteBytes(cursor, &pod, sizeof(pod));
+ return cursor;
+}
+
+const uint8_t* FuncImport::deserialize(const uint8_t* cursor) {
+ (cursor = funcType_.deserialize(cursor)) &&
+ (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
+ return cursor;
+}
+
+size_t FuncImport::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return funcType_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+static size_t StringLengthWithNullChar(const char* chars) {
+ return chars ? strlen(chars) + 1 : 0;
+}
+
+size_t CacheableChars::serializedSize() const {
+ return sizeof(uint32_t) + StringLengthWithNullChar(get());
+}
+
+uint8_t* CacheableChars::serialize(uint8_t* cursor) const {
+ uint32_t lengthWithNullChar = StringLengthWithNullChar(get());
+ cursor = WriteScalar<uint32_t>(cursor, lengthWithNullChar);
+ cursor = WriteBytes(cursor, get(), lengthWithNullChar);
+ return cursor;
+}
+
+const uint8_t* CacheableChars::deserialize(const uint8_t* cursor) {
+ uint32_t lengthWithNullChar;
+ cursor = ReadBytes(cursor, &lengthWithNullChar, sizeof(uint32_t));
+
+ if (lengthWithNullChar) {
+ reset(js_pod_malloc<char>(lengthWithNullChar));
+ if (!get()) {
+ return nullptr;
+ }
+
+ cursor = ReadBytes(cursor, get(), lengthWithNullChar);
+ } else {
+ MOZ_ASSERT(!get());
+ }
+
+ return cursor;
+}
+
+size_t CacheableChars::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(get());
+}
+
+size_t MetadataTier::serializedSize() const {
+ return SerializedPodVectorSize(funcToCodeRange) +
+ SerializedPodVectorSize(codeRanges) +
+ SerializedPodVectorSize(callSites) + trapSites.serializedSize() +
+ SerializedVectorSize(funcImports) + SerializedVectorSize(funcExports);
+}
+
+size_t MetadataTier::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return funcToCodeRange.sizeOfExcludingThis(mallocSizeOf) +
+ codeRanges.sizeOfExcludingThis(mallocSizeOf) +
+ callSites.sizeOfExcludingThis(mallocSizeOf) +
+ trapSites.sizeOfExcludingThis(mallocSizeOf) +
+ SizeOfVectorExcludingThis(funcImports, mallocSizeOf) +
+ SizeOfVectorExcludingThis(funcExports, mallocSizeOf);
+}
+
+uint8_t* MetadataTier::serialize(uint8_t* cursor) const {
+ cursor = SerializePodVector(cursor, funcToCodeRange);
+ cursor = SerializePodVector(cursor, codeRanges);
+ cursor = SerializePodVector(cursor, callSites);
+ cursor = trapSites.serialize(cursor);
+ cursor = SerializeVector(cursor, funcImports);
+ cursor = SerializeVector(cursor, funcExports);
+ MOZ_ASSERT(debugTrapFarJumpOffsets.empty());
+ return cursor;
+}
+
+/* static */ const uint8_t* MetadataTier::deserialize(const uint8_t* cursor) {
+ (cursor = DeserializePodVector(cursor, &funcToCodeRange)) &&
+ (cursor = DeserializePodVector(cursor, &codeRanges)) &&
+ (cursor = DeserializePodVector(cursor, &callSites)) &&
+ (cursor = trapSites.deserialize(cursor)) &&
+ (cursor = DeserializeVector(cursor, &funcImports)) &&
+ (cursor = DeserializeVector(cursor, &funcExports));
+ MOZ_ASSERT(debugTrapFarJumpOffsets.empty());
+ return cursor;
+}
+
+UniqueLazyStubSegment LazyStubSegment::create(const CodeTier& codeTier,
+ size_t length) {
+ UniqueCodeBytes codeBytes = AllocateCodeBytes(length);
+ if (!codeBytes) {
+ return nullptr;
+ }
+
+ auto segment = js::MakeUnique<LazyStubSegment>(std::move(codeBytes), length);
+ if (!segment || !segment->initialize(codeTier)) {
+ return nullptr;
+ }
+
+ return segment;
+}
+
+bool LazyStubSegment::hasSpace(size_t bytes) const {
+ MOZ_ASSERT(AlignBytesNeeded(bytes) == bytes);
+ return bytes <= length() && usedBytes_ <= length() - bytes;
+}
+
+bool LazyStubSegment::addStubs(size_t codeLength,
+ const Uint32Vector& funcExportIndices,
+ const FuncExportVector& funcExports,
+ const CodeRangeVector& codeRanges,
+ uint8_t** codePtr,
+ size_t* indexFirstInsertedCodeRange) {
+ MOZ_ASSERT(hasSpace(codeLength));
+
+ size_t offsetInSegment = usedBytes_;
+ *codePtr = base() + usedBytes_;
+ usedBytes_ += codeLength;
+
+ *indexFirstInsertedCodeRange = codeRanges_.length();
+
+ if (!codeRanges_.reserve(codeRanges_.length() + 2 * codeRanges.length())) {
+ return false;
+ }
+
+ size_t i = 0;
+ for (uint32_t funcExportIndex : funcExportIndices) {
+ const CodeRange& interpRange = codeRanges[i];
+ MOZ_ASSERT(interpRange.isInterpEntry());
+ MOZ_ASSERT(interpRange.funcIndex() ==
+ funcExports[funcExportIndex].funcIndex());
+
+ codeRanges_.infallibleAppend(interpRange);
+ codeRanges_.back().offsetBy(offsetInSegment);
+ i++;
+
+ if (funcExports[funcExportIndex].funcType().hasUnexposableArgOrRet()) {
+ continue;
+ }
+ if (funcExports[funcExportIndex]
+ .funcType()
+ .temporarilyUnsupportedReftypeForEntry()) {
+ continue;
+ }
+
+ const CodeRange& jitRange = codeRanges[i];
+ MOZ_ASSERT(jitRange.isJitEntry());
+ MOZ_ASSERT(jitRange.funcIndex() == interpRange.funcIndex());
+
+ codeRanges_.infallibleAppend(jitRange);
+ codeRanges_.back().offsetBy(offsetInSegment);
+ i++;
+ }
+
+ return true;
+}
+
+const CodeRange* LazyStubSegment::lookupRange(const void* pc) const {
+ return LookupInSorted(codeRanges_,
+ CodeRange::OffsetInCode((uint8_t*)pc - base()));
+}
+
+void LazyStubSegment::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
+ size_t* data) const {
+ CodeSegment::addSizeOfMisc(mallocSizeOf, code);
+ *data += codeRanges_.sizeOfExcludingThis(mallocSizeOf);
+ *data += mallocSizeOf(this);
+}
+
+struct ProjectLazyFuncIndex {
+ const LazyFuncExportVector& funcExports;
+ explicit ProjectLazyFuncIndex(const LazyFuncExportVector& funcExports)
+ : funcExports(funcExports) {}
+ uint32_t operator[](size_t index) const {
+ return funcExports[index].funcIndex;
+ }
+};
+
+static constexpr unsigned LAZY_STUB_LIFO_DEFAULT_CHUNK_SIZE = 8 * 1024;
+
+bool LazyStubTier::createMany(const Uint32Vector& funcExportIndices,
+ const CodeTier& codeTier,
+ bool flushAllThreadsIcaches,
+ size_t* stubSegmentIndex) {
+ MOZ_ASSERT(funcExportIndices.length());
+
+ LifoAlloc lifo(LAZY_STUB_LIFO_DEFAULT_CHUNK_SIZE);
+ TempAllocator alloc(&lifo);
+ JitContext jitContext(&alloc);
+ WasmMacroAssembler masm(alloc);
+
+ const MetadataTier& metadata = codeTier.metadata();
+ const FuncExportVector& funcExports = metadata.funcExports;
+ uint8_t* moduleSegmentBase = codeTier.segment().base();
+
+ CodeRangeVector codeRanges;
+ DebugOnly<uint32_t> numExpectedRanges = 0;
+ for (uint32_t funcExportIndex : funcExportIndices) {
+ const FuncExport& fe = funcExports[funcExportIndex];
+ // Entries with unsupported types get only the interp exit
+ bool unsupportedType =
+ fe.funcType().hasUnexposableArgOrRet() ||
+ fe.funcType().temporarilyUnsupportedReftypeForEntry();
+ numExpectedRanges += (unsupportedType ? 1 : 2);
+ void* calleePtr =
+ moduleSegmentBase + metadata.codeRange(fe).funcUncheckedCallEntry();
+ Maybe<ImmPtr> callee;
+ callee.emplace(calleePtr, ImmPtr::NoCheckToken());
+ if (!GenerateEntryStubs(masm, funcExportIndex, fe, callee,
+ /* asmjs */ false, &codeRanges)) {
+ return false;
+ }
+ }
+ MOZ_ASSERT(codeRanges.length() == numExpectedRanges,
+ "incorrect number of entries per function");
+
+ masm.finish();
+
+ MOZ_ASSERT(masm.callSites().empty());
+ MOZ_ASSERT(masm.callSiteTargets().empty());
+ MOZ_ASSERT(masm.trapSites().empty());
+
+ if (masm.oom()) {
+ return false;
+ }
+
+ size_t codeLength = LazyStubSegment::AlignBytesNeeded(masm.bytesNeeded());
+
+ if (!stubSegments_.length() ||
+ !stubSegments_[lastStubSegmentIndex_]->hasSpace(codeLength)) {
+ size_t newSegmentSize = std::max(codeLength, ExecutableCodePageSize);
+ UniqueLazyStubSegment newSegment =
+ LazyStubSegment::create(codeTier, newSegmentSize);
+ if (!newSegment) {
+ return false;
+ }
+ lastStubSegmentIndex_ = stubSegments_.length();
+ if (!stubSegments_.emplaceBack(std::move(newSegment))) {
+ return false;
+ }
+ }
+
+ LazyStubSegment* segment = stubSegments_[lastStubSegmentIndex_].get();
+ *stubSegmentIndex = lastStubSegmentIndex_;
+
+ size_t interpRangeIndex;
+ uint8_t* codePtr = nullptr;
+ if (!segment->addStubs(codeLength, funcExportIndices, funcExports, codeRanges,
+ &codePtr, &interpRangeIndex))
+ return false;
+
+ masm.executableCopy(codePtr);
+ PatchDebugSymbolicAccesses(codePtr, masm);
+ memset(codePtr + masm.bytesNeeded(), 0, codeLength - masm.bytesNeeded());
+
+ for (const CodeLabel& label : masm.codeLabels()) {
+ Assembler::Bind(codePtr, label);
+ }
+
+ // Optimized compilation finishes on a background thread, so we must make sure
+ // to flush the icaches of all the executing threads.
+ FlushICacheSpec flushIcacheSpec = flushAllThreadsIcaches
+ ? FlushICacheSpec::AllThreads
+ : FlushICacheSpec::LocalThreadOnly;
+ if (!ExecutableAllocator::makeExecutableAndFlushICache(flushIcacheSpec,
+ codePtr, codeLength)) {
+ return false;
+ }
+
+ // Create lazy function exports for funcIndex -> entry lookup.
+ if (!exports_.reserve(exports_.length() + funcExportIndices.length())) {
+ return false;
+ }
+
+ for (uint32_t funcExportIndex : funcExportIndices) {
+ const FuncExport& fe = funcExports[funcExportIndex];
+
+ DebugOnly<CodeRange> cr = segment->codeRanges()[interpRangeIndex];
+ MOZ_ASSERT(cr.value.isInterpEntry());
+ MOZ_ASSERT(cr.value.funcIndex() == fe.funcIndex());
+
+ LazyFuncExport lazyExport(fe.funcIndex(), *stubSegmentIndex,
+ interpRangeIndex);
+
+ size_t exportIndex;
+ MOZ_ALWAYS_FALSE(BinarySearch(ProjectLazyFuncIndex(exports_), 0,
+ exports_.length(), fe.funcIndex(),
+ &exportIndex));
+ MOZ_ALWAYS_TRUE(
+ exports_.insert(exports_.begin() + exportIndex, std::move(lazyExport)));
+
+ // Functions with unsupported types in their sig have only one entry
+ // (interp). All other functions get an extra jit entry.
+ bool unsupportedType =
+ fe.funcType().hasUnexposableArgOrRet() ||
+ fe.funcType().temporarilyUnsupportedReftypeForEntry();
+ interpRangeIndex += (unsupportedType ? 1 : 2);
+ }
+
+ return true;
+}
+
+bool LazyStubTier::createOne(uint32_t funcExportIndex,
+ const CodeTier& codeTier) {
+ Uint32Vector funcExportIndexes;
+ if (!funcExportIndexes.append(funcExportIndex)) {
+ return false;
+ }
+
+ // This happens on the executing thread (called via GetInterpEntry), so no
+ // need to flush the icaches on all the threads.
+ bool flushAllThreadIcaches = false;
+
+ size_t stubSegmentIndex;
+ if (!createMany(funcExportIndexes, codeTier, flushAllThreadIcaches,
+ &stubSegmentIndex)) {
+ return false;
+ }
+
+ const UniqueLazyStubSegment& segment = stubSegments_[stubSegmentIndex];
+ const CodeRangeVector& codeRanges = segment->codeRanges();
+
+ // Functions that have unsupported types in their sig don't get a jit
+ // entry.
+ if (codeTier.metadata()
+ .funcExports[funcExportIndex]
+ .funcType()
+ .temporarilyUnsupportedReftypeForEntry() ||
+ codeTier.metadata()
+ .funcExports[funcExportIndex]
+ .funcType()
+ .hasUnexposableArgOrRet()) {
+ MOZ_ASSERT(codeRanges.length() >= 1);
+ MOZ_ASSERT(codeRanges.back().isInterpEntry());
+ return true;
+ }
+
+ MOZ_ASSERT(codeRanges.length() >= 2);
+ MOZ_ASSERT(codeRanges[codeRanges.length() - 2].isInterpEntry());
+
+ const CodeRange& cr = codeRanges[codeRanges.length() - 1];
+ MOZ_ASSERT(cr.isJitEntry());
+
+ codeTier.code().setJitEntry(cr.funcIndex(), segment->base() + cr.begin());
+ return true;
+}
+
+bool LazyStubTier::createTier2(const Uint32Vector& funcExportIndices,
+ const CodeTier& codeTier,
+ Maybe<size_t>* outStubSegmentIndex) {
+ if (!funcExportIndices.length()) {
+ return true;
+ }
+
+ // This compilation happens on a background compiler thread, so the icache may
+ // need to be flushed on all the threads.
+ bool flushAllThreadIcaches = true;
+
+ size_t stubSegmentIndex;
+ if (!createMany(funcExportIndices, codeTier, flushAllThreadIcaches,
+ &stubSegmentIndex)) {
+ return false;
+ }
+
+ outStubSegmentIndex->emplace(stubSegmentIndex);
+ return true;
+}
+
+void LazyStubTier::setJitEntries(const Maybe<size_t>& stubSegmentIndex,
+ const Code& code) {
+ if (!stubSegmentIndex) {
+ return;
+ }
+ const UniqueLazyStubSegment& segment = stubSegments_[*stubSegmentIndex];
+ for (const CodeRange& cr : segment->codeRanges()) {
+ if (!cr.isJitEntry()) {
+ continue;
+ }
+ code.setJitEntry(cr.funcIndex(), segment->base() + cr.begin());
+ }
+}
+
+bool LazyStubTier::hasStub(uint32_t funcIndex) const {
+ size_t match;
+ return BinarySearch(ProjectLazyFuncIndex(exports_), 0, exports_.length(),
+ funcIndex, &match);
+}
+
+void* LazyStubTier::lookupInterpEntry(uint32_t funcIndex) const {
+ size_t match;
+ if (!BinarySearch(ProjectLazyFuncIndex(exports_), 0, exports_.length(),
+ funcIndex, &match)) {
+ return nullptr;
+ }
+ const LazyFuncExport& fe = exports_[match];
+ const LazyStubSegment& stub = *stubSegments_[fe.lazyStubSegmentIndex];
+ return stub.base() + stub.codeRanges()[fe.funcCodeRangeIndex].begin();
+}
+
+void LazyStubTier::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
+ size_t* data) const {
+ *data += sizeof(*this);
+ *data += exports_.sizeOfExcludingThis(mallocSizeOf);
+ for (const UniqueLazyStubSegment& stub : stubSegments_) {
+ stub->addSizeOfMisc(mallocSizeOf, code, data);
+ }
+}
+
+bool MetadataTier::clone(const MetadataTier& src) {
+ if (!funcToCodeRange.appendAll(src.funcToCodeRange)) {
+ return false;
+ }
+ if (!codeRanges.appendAll(src.codeRanges)) {
+ return false;
+ }
+ if (!callSites.appendAll(src.callSites)) {
+ return false;
+ }
+ if (!debugTrapFarJumpOffsets.appendAll(src.debugTrapFarJumpOffsets)) {
+ return false;
+ }
+
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ if (!trapSites[trap].appendAll(src.trapSites[trap])) {
+ return false;
+ }
+ }
+
+ if (!funcImports.resize(src.funcImports.length())) {
+ return false;
+ }
+ for (size_t i = 0; i < src.funcImports.length(); i++) {
+ funcImports[i].clone(src.funcImports[i]);
+ }
+
+ if (!funcExports.resize(src.funcExports.length())) {
+ return false;
+ }
+ for (size_t i = 0; i < src.funcExports.length(); i++) {
+ funcExports[i].clone(src.funcExports[i]);
+ }
+
+ return true;
+}
+
+size_t Metadata::serializedSize() const {
+ return sizeof(pod()) + SerializedVectorSize(types) +
+ SerializedPodVectorSize(globals) + SerializedPodVectorSize(tables) +
+#ifdef ENABLE_WASM_EXCEPTIONS
+ SerializedPodVectorSize(events) +
+#endif
+ sizeof(moduleName) + SerializedPodVectorSize(funcNames) +
+ filename.serializedSize() + sourceMapURL.serializedSize();
+}
+
+uint8_t* Metadata::serialize(uint8_t* cursor) const {
+ MOZ_ASSERT(!debugEnabled && debugFuncArgTypes.empty() &&
+ debugFuncReturnTypes.empty());
+ cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
+ cursor = SerializeVector(cursor, types);
+ cursor = SerializePodVector(cursor, globals);
+ cursor = SerializePodVector(cursor, tables);
+#ifdef ENABLE_WASM_EXCEPTIONS
+ cursor = SerializePodVector(cursor, events);
+#endif
+ cursor = WriteBytes(cursor, &moduleName, sizeof(moduleName));
+ cursor = SerializePodVector(cursor, funcNames);
+ cursor = filename.serialize(cursor);
+ cursor = sourceMapURL.serialize(cursor);
+ return cursor;
+}
+
+/* static */ const uint8_t* Metadata::deserialize(const uint8_t* cursor) {
+ (cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
+ (cursor = DeserializeVector(cursor, &types)) &&
+ (cursor = DeserializePodVector(cursor, &globals)) &&
+ (cursor = DeserializePodVector(cursor, &tables)) &&
+#ifdef ENABLE_WASM_EXCEPTIONS
+ (cursor = DeserializePodVector(cursor, &events)) &&
+#endif
+ (cursor = ReadBytes(cursor, &moduleName, sizeof(moduleName))) &&
+ (cursor = DeserializePodVector(cursor, &funcNames)) &&
+ (cursor = filename.deserialize(cursor)) &&
+ (cursor = sourceMapURL.deserialize(cursor));
+ debugEnabled = false;
+ debugFuncArgTypes.clear();
+ debugFuncReturnTypes.clear();
+ return cursor;
+}
+
+size_t Metadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return SizeOfVectorExcludingThis(types, mallocSizeOf) +
+ globals.sizeOfExcludingThis(mallocSizeOf) +
+ tables.sizeOfExcludingThis(mallocSizeOf) +
+#ifdef ENABLE_WASM_EXCEPTIONS
+ events.sizeOfExcludingThis(mallocSizeOf) +
+#endif
+ funcNames.sizeOfExcludingThis(mallocSizeOf) +
+ filename.sizeOfExcludingThis(mallocSizeOf) +
+ sourceMapURL.sizeOfExcludingThis(mallocSizeOf);
+}
+
+struct ProjectFuncIndex {
+ const FuncExportVector& funcExports;
+ explicit ProjectFuncIndex(const FuncExportVector& funcExports)
+ : funcExports(funcExports) {}
+ uint32_t operator[](size_t index) const {
+ return funcExports[index].funcIndex();
+ }
+};
+
+FuncExport& MetadataTier::lookupFuncExport(
+ uint32_t funcIndex, size_t* funcExportIndex /* = nullptr */) {
+ size_t match;
+ if (!BinarySearch(ProjectFuncIndex(funcExports), 0, funcExports.length(),
+ funcIndex, &match)) {
+ MOZ_CRASH("missing function export");
+ }
+ if (funcExportIndex) {
+ *funcExportIndex = match;
+ }
+ return funcExports[match];
+}
+
+const FuncExport& MetadataTier::lookupFuncExport(
+ uint32_t funcIndex, size_t* funcExportIndex) const {
+ return const_cast<MetadataTier*>(this)->lookupFuncExport(funcIndex,
+ funcExportIndex);
+}
+
+static bool AppendName(const Bytes& namePayload, const Name& name,
+ UTF8Bytes* bytes) {
+ MOZ_RELEASE_ASSERT(name.offsetInNamePayload <= namePayload.length());
+ MOZ_RELEASE_ASSERT(name.length <=
+ namePayload.length() - name.offsetInNamePayload);
+ return bytes->append(
+ (const char*)namePayload.begin() + name.offsetInNamePayload, name.length);
+}
+
+static bool AppendFunctionIndexName(uint32_t funcIndex, UTF8Bytes* bytes) {
+ const char beforeFuncIndex[] = "wasm-function[";
+ const char afterFuncIndex[] = "]";
+
+ ToCStringBuf cbuf;
+ const char* funcIndexStr = NumberToCString(nullptr, &cbuf, funcIndex);
+ MOZ_ASSERT(funcIndexStr);
+
+ return bytes->append(beforeFuncIndex, strlen(beforeFuncIndex)) &&
+ bytes->append(funcIndexStr, strlen(funcIndexStr)) &&
+ bytes->append(afterFuncIndex, strlen(afterFuncIndex));
+}
+
+bool Metadata::getFuncName(NameContext ctx, uint32_t funcIndex,
+ UTF8Bytes* name) const {
+ if (moduleName && moduleName->length != 0) {
+ if (!AppendName(namePayload->bytes, *moduleName, name)) {
+ return false;
+ }
+ if (!name->append('.')) {
+ return false;
+ }
+ }
+
+ if (funcIndex < funcNames.length() && funcNames[funcIndex].length != 0) {
+ return AppendName(namePayload->bytes, funcNames[funcIndex], name);
+ }
+
+ if (ctx == NameContext::BeforeLocation) {
+ return true;
+ }
+
+ return AppendFunctionIndexName(funcIndex, name);
+}
+
+bool CodeTier::initialize(IsTier2 isTier2, const Code& code,
+ const LinkData& linkData, const Metadata& metadata) {
+ MOZ_ASSERT(!initialized());
+ code_ = &code;
+
+ MOZ_ASSERT(lazyStubs_.lock()->empty());
+
+ // See comments in CodeSegment::initialize() for why this must be last.
+ if (!segment_->initialize(isTier2, *this, linkData, metadata, *metadata_)) {
+ return false;
+ }
+
+ MOZ_ASSERT(initialized());
+ return true;
+}
+
+size_t CodeTier::serializedSize() const {
+ return segment_->serializedSize() + metadata_->serializedSize();
+}
+
+uint8_t* CodeTier::serialize(uint8_t* cursor, const LinkData& linkData) const {
+ cursor = metadata_->serialize(cursor);
+ cursor = segment_->serialize(cursor, linkData);
+ return cursor;
+}
+
+/* static */ const uint8_t* CodeTier::deserialize(const uint8_t* cursor,
+ const LinkData& linkData,
+ UniqueCodeTier* codeTier) {
+ auto metadata = js::MakeUnique<MetadataTier>(Tier::Serialized);
+ if (!metadata) {
+ return nullptr;
+ }
+ cursor = metadata->deserialize(cursor);
+ if (!cursor) {
+ return nullptr;
+ }
+
+ UniqueModuleSegment segment;
+ cursor = ModuleSegment::deserialize(cursor, linkData, &segment);
+ if (!cursor) {
+ return nullptr;
+ }
+
+ *codeTier = js::MakeUnique<CodeTier>(std::move(metadata), std::move(segment));
+ if (!*codeTier) {
+ return nullptr;
+ }
+
+ return cursor;
+}
+
+void CodeTier::addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
+ size_t* data) const {
+ segment_->addSizeOfMisc(mallocSizeOf, code, data);
+ lazyStubs_.lock()->addSizeOfMisc(mallocSizeOf, code, data);
+ *data += metadata_->sizeOfExcludingThis(mallocSizeOf);
+}
+
+const CodeRange* CodeTier::lookupRange(const void* pc) const {
+ CodeRange::OffsetInCode target((uint8_t*)pc - segment_->base());
+ return LookupInSorted(metadata_->codeRanges, target);
+}
+
+bool JumpTables::init(CompileMode mode, const ModuleSegment& ms,
+ const CodeRangeVector& codeRanges) {
+ static_assert(JSScript::offsetOfJitCodeRaw() == 0,
+ "wasm fast jit entry is at (void*) jit[funcIndex]");
+
+ mode_ = mode;
+
+ size_t numFuncs = 0;
+ for (const CodeRange& cr : codeRanges) {
+ if (cr.isFunction()) {
+ numFuncs++;
+ }
+ }
+
+ numFuncs_ = numFuncs;
+
+ if (mode_ == CompileMode::Tier1) {
+ tiering_ = TablePointer(js_pod_calloc<void*>(numFuncs));
+ if (!tiering_) {
+ return false;
+ }
+ }
+
+ // The number of jit entries is overestimated, but it is simpler when
+ // filling/looking up the jit entries and safe (worst case we'll crash
+ // because of a null deref when trying to call the jit entry of an
+ // unexported function).
+ jit_ = TablePointer(js_pod_calloc<void*>(numFuncs));
+ if (!jit_) {
+ return false;
+ }
+
+ uint8_t* codeBase = ms.base();
+ for (const CodeRange& cr : codeRanges) {
+ if (cr.isFunction()) {
+ setTieringEntry(cr.funcIndex(), codeBase + cr.funcTierEntry());
+ } else if (cr.isJitEntry()) {
+ setJitEntry(cr.funcIndex(), codeBase + cr.begin());
+ }
+ }
+ return true;
+}
+
+Code::Code(UniqueCodeTier tier1, const Metadata& metadata,
+ JumpTables&& maybeJumpTables)
+ : tier1_(std::move(tier1)),
+ metadata_(&metadata),
+ profilingLabels_(mutexid::WasmCodeProfilingLabels,
+ CacheableCharsVector()),
+ jumpTables_(std::move(maybeJumpTables)) {}
+
+bool Code::initialize(const LinkData& linkData) {
+ MOZ_ASSERT(!initialized());
+
+ if (!tier1_->initialize(IsTier2::NotTier2, *this, linkData, *metadata_)) {
+ return false;
+ }
+
+ MOZ_ASSERT(initialized());
+ return true;
+}
+
+bool Code::setTier2(UniqueCodeTier tier2, const LinkData& linkData) const {
+ MOZ_RELEASE_ASSERT(!hasTier2());
+ MOZ_RELEASE_ASSERT(tier2->tier() == Tier::Optimized &&
+ tier1_->tier() == Tier::Baseline);
+
+ if (!tier2->initialize(IsTier2::Tier2, *this, linkData, *metadata_)) {
+ return false;
+ }
+
+ tier2_ = std::move(tier2);
+
+ return true;
+}
+
+void Code::commitTier2() const {
+ MOZ_RELEASE_ASSERT(!hasTier2());
+ MOZ_RELEASE_ASSERT(tier2_.get());
+ hasTier2_ = true;
+ MOZ_ASSERT(hasTier2());
+}
+
+uint32_t Code::getFuncIndex(JSFunction* fun) const {
+ MOZ_ASSERT(fun->isWasm() || fun->isAsmJSNative());
+ if (!fun->isWasmWithJitEntry()) {
+ return fun->wasmFuncIndex();
+ }
+ return jumpTables_.funcIndexFromJitEntry(fun->wasmJitEntry());
+}
+
+Tiers Code::tiers() const {
+ if (hasTier2()) {
+ return Tiers(tier1_->tier(), tier2_->tier());
+ }
+ return Tiers(tier1_->tier());
+}
+
+bool Code::hasTier(Tier t) const {
+ if (hasTier2() && tier2_->tier() == t) {
+ return true;
+ }
+ return tier1_->tier() == t;
+}
+
+Tier Code::stableTier() const { return tier1_->tier(); }
+
+Tier Code::bestTier() const {
+ if (hasTier2()) {
+ return tier2_->tier();
+ }
+ return tier1_->tier();
+}
+
+const CodeTier& Code::codeTier(Tier tier) const {
+ switch (tier) {
+ case Tier::Baseline:
+ if (tier1_->tier() == Tier::Baseline) {
+ MOZ_ASSERT(tier1_->initialized());
+ return *tier1_;
+ }
+ MOZ_CRASH("No code segment at this tier");
+ case Tier::Optimized:
+ if (tier1_->tier() == Tier::Optimized) {
+ MOZ_ASSERT(tier1_->initialized());
+ return *tier1_;
+ }
+ if (tier2_) {
+ MOZ_ASSERT(tier2_->initialized());
+ return *tier2_;
+ }
+ MOZ_CRASH("No code segment at this tier");
+ }
+ MOZ_CRASH();
+}
+
+bool Code::containsCodePC(const void* pc) const {
+ for (Tier t : tiers()) {
+ const ModuleSegment& ms = segment(t);
+ if (ms.containsCodePC(pc)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+struct CallSiteRetAddrOffset {
+ const CallSiteVector& callSites;
+ explicit CallSiteRetAddrOffset(const CallSiteVector& callSites)
+ : callSites(callSites) {}
+ uint32_t operator[](size_t index) const {
+ return callSites[index].returnAddressOffset();
+ }
+};
+
+const CallSite* Code::lookupCallSite(void* returnAddress) const {
+ for (Tier t : tiers()) {
+ uint32_t target = ((uint8_t*)returnAddress) - segment(t).base();
+ size_t lowerBound = 0;
+ size_t upperBound = metadata(t).callSites.length();
+
+ size_t match;
+ if (BinarySearch(CallSiteRetAddrOffset(metadata(t).callSites), lowerBound,
+ upperBound, target, &match))
+ return &metadata(t).callSites[match];
+ }
+
+ return nullptr;
+}
+
+const CodeRange* Code::lookupFuncRange(void* pc) const {
+ for (Tier t : tiers()) {
+ const CodeRange* result = codeTier(t).lookupRange(pc);
+ if (result && result->isFunction()) {
+ return result;
+ }
+ }
+ return nullptr;
+}
+
+const StackMap* Code::lookupStackMap(uint8_t* nextPC) const {
+ for (Tier t : tiers()) {
+ const StackMap* result = metadata(t).stackMaps.findMap(nextPC);
+ if (result) {
+ return result;
+ }
+ }
+ return nullptr;
+}
+
+struct TrapSitePCOffset {
+ const TrapSiteVector& trapSites;
+ explicit TrapSitePCOffset(const TrapSiteVector& trapSites)
+ : trapSites(trapSites) {}
+ uint32_t operator[](size_t index) const { return trapSites[index].pcOffset; }
+};
+
+bool Code::lookupTrap(void* pc, Trap* trapOut, BytecodeOffset* bytecode) const {
+ for (Tier t : tiers()) {
+ const TrapSiteVectorArray& trapSitesArray = metadata(t).trapSites;
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ const TrapSiteVector& trapSites = trapSitesArray[trap];
+
+ uint32_t target = ((uint8_t*)pc) - segment(t).base();
+ size_t lowerBound = 0;
+ size_t upperBound = trapSites.length();
+
+ size_t match;
+ if (BinarySearch(TrapSitePCOffset(trapSites), lowerBound, upperBound,
+ target, &match)) {
+ MOZ_ASSERT(segment(t).containsCodePC(pc));
+ *trapOut = trap;
+ *bytecode = trapSites[match].bytecode;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+// When enabled, generate profiling labels for every name in funcNames_ that is
+// the name of some Function CodeRange. This involves malloc() so do it now
+// since, once we start sampling, we'll be in a signal-handing context where we
+// cannot malloc.
+void Code::ensureProfilingLabels(bool profilingEnabled) const {
+ auto labels = profilingLabels_.lock();
+
+ if (!profilingEnabled) {
+ labels->clear();
+ return;
+ }
+
+ if (!labels->empty()) {
+ return;
+ }
+
+ // Any tier will do, we only need tier-invariant data that are incidentally
+ // stored with the code ranges.
+
+ for (const CodeRange& codeRange : metadata(stableTier()).codeRanges) {
+ if (!codeRange.isFunction()) {
+ continue;
+ }
+
+ ToCStringBuf cbuf;
+ const char* bytecodeStr =
+ NumberToCString(nullptr, &cbuf, codeRange.funcLineOrBytecode());
+ MOZ_ASSERT(bytecodeStr);
+
+ UTF8Bytes name;
+ if (!metadata().getFuncNameStandalone(codeRange.funcIndex(), &name)) {
+ return;
+ }
+ if (!name.append(" (", 2)) {
+ return;
+ }
+
+ if (const char* filename = metadata().filename.get()) {
+ if (!name.append(filename, strlen(filename))) {
+ return;
+ }
+ } else {
+ if (!name.append('?')) {
+ return;
+ }
+ }
+
+ if (!name.append(':') || !name.append(bytecodeStr, strlen(bytecodeStr)) ||
+ !name.append(")\0", 2)) {
+ return;
+ }
+
+ UniqueChars label(name.extractOrCopyRawBuffer());
+ if (!label) {
+ return;
+ }
+
+ if (codeRange.funcIndex() >= labels->length()) {
+ if (!labels->resize(codeRange.funcIndex() + 1)) {
+ return;
+ }
+ }
+
+ ((CacheableCharsVector&)labels)[codeRange.funcIndex()] = std::move(label);
+ }
+}
+
+const char* Code::profilingLabel(uint32_t funcIndex) const {
+ auto labels = profilingLabels_.lock();
+
+ if (funcIndex >= labels->length() ||
+ !((CacheableCharsVector&)labels)[funcIndex]) {
+ return "?";
+ }
+ return ((CacheableCharsVector&)labels)[funcIndex].get();
+}
+
+void Code::addSizeOfMiscIfNotSeen(MallocSizeOf mallocSizeOf,
+ Metadata::SeenSet* seenMetadata,
+ Code::SeenSet* seenCode, size_t* code,
+ size_t* data) const {
+ auto p = seenCode->lookupForAdd(this);
+ if (p) {
+ return;
+ }
+ bool ok = seenCode->add(p, this);
+ (void)ok; // oh well
+
+ *data += mallocSizeOf(this) +
+ metadata().sizeOfIncludingThisIfNotSeen(mallocSizeOf, seenMetadata) +
+ profilingLabels_.lock()->sizeOfExcludingThis(mallocSizeOf) +
+ jumpTables_.sizeOfMiscExcludingThis();
+
+ for (auto t : tiers()) {
+ codeTier(t).addSizeOfMisc(mallocSizeOf, code, data);
+ }
+}
+
+size_t Code::serializedSize() const {
+ return metadata().serializedSize() +
+ codeTier(Tier::Serialized).serializedSize();
+}
+
+uint8_t* Code::serialize(uint8_t* cursor, const LinkData& linkData) const {
+ MOZ_RELEASE_ASSERT(!metadata().debugEnabled);
+
+ cursor = metadata().serialize(cursor);
+ cursor = codeTier(Tier::Serialized).serialize(cursor, linkData);
+ return cursor;
+}
+
+/* static */ const uint8_t* Code::deserialize(const uint8_t* cursor,
+ const LinkData& linkData,
+ Metadata& metadata,
+ SharedCode* out) {
+ cursor = metadata.deserialize(cursor);
+ if (!cursor) {
+ return nullptr;
+ }
+
+ UniqueCodeTier codeTier;
+ cursor = CodeTier::deserialize(cursor, linkData, &codeTier);
+ if (!cursor) {
+ return nullptr;
+ }
+
+ JumpTables jumpTables;
+ if (!jumpTables.init(CompileMode::Once, codeTier->segment(),
+ codeTier->metadata().codeRanges)) {
+ return nullptr;
+ }
+
+ MutableCode code =
+ js_new<Code>(std::move(codeTier), metadata, std::move(jumpTables));
+ if (!code || !code->initialize(linkData)) {
+ return nullptr;
+ }
+
+ *out = code;
+ return cursor;
+}
+
+void wasm::PatchDebugSymbolicAccesses(uint8_t* codeBase, MacroAssembler& masm) {
+#ifdef WASM_CODEGEN_DEBUG
+ for (auto& access : masm.symbolicAccesses()) {
+ switch (access.target) {
+ case SymbolicAddress::PrintI32:
+ case SymbolicAddress::PrintPtr:
+ case SymbolicAddress::PrintF32:
+ case SymbolicAddress::PrintF64:
+ case SymbolicAddress::PrintText:
+ break;
+ default:
+ MOZ_CRASH("unexpected symbol in PatchDebugSymbolicAccesses");
+ }
+ ABIFunctionType abiType;
+ void* target = AddressOf(access.target, &abiType);
+ uint8_t* patchAt = codeBase + access.patchAt.offset();
+ Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
+ PatchedImmPtr(target),
+ PatchedImmPtr((void*)-1));
+ }
+#else
+ MOZ_ASSERT(masm.symbolicAccesses().empty());
+#endif
+}
diff --git a/js/src/wasm/WasmCode.h b/js/src/wasm/WasmCode.h
new file mode 100644
index 0000000000..29315e9ae3
--- /dev/null
+++ b/js/src/wasm/WasmCode.h
@@ -0,0 +1,767 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_code_h
+#define wasm_code_h
+
+#include "gc/Memory.h"
+#include "jit/JitOptions.h"
+#include "jit/shared/Assembler-shared.h"
+#include "js/HashTable.h"
+#include "threading/ExclusiveData.h"
+#include "util/Memory.h"
+#include "vm/MutexIDs.h"
+#include "wasm/WasmGC.h"
+#include "wasm/WasmTypes.h"
+
+namespace js {
+
+struct AsmJSMetadata;
+
+namespace wasm {
+
+struct MetadataTier;
+struct Metadata;
+
+// LinkData contains all the metadata necessary to patch all the locations
+// that depend on the absolute address of a ModuleSegment. This happens in a
+// "linking" step after compilation and after the module's code is serialized.
+// The LinkData is serialized along with the Module but does not (normally, see
+// Module::debugLinkData_ comment) persist after (de)serialization, which
+// distinguishes it from Metadata, which is stored in the Code object.
+
+struct LinkDataCacheablePod {
+ uint32_t trapOffset = 0;
+
+ LinkDataCacheablePod() = default;
+};
+
+struct LinkData : LinkDataCacheablePod {
+ const Tier tier;
+
+ explicit LinkData(Tier tier) : tier(tier) {}
+
+ LinkDataCacheablePod& pod() { return *this; }
+ const LinkDataCacheablePod& pod() const { return *this; }
+
+ struct InternalLink {
+ uint32_t patchAtOffset;
+ uint32_t targetOffset;
+#ifdef JS_CODELABEL_LINKMODE
+ uint32_t mode;
+#endif
+ };
+ typedef Vector<InternalLink, 0, SystemAllocPolicy> InternalLinkVector;
+
+ struct SymbolicLinkArray
+ : EnumeratedArray<SymbolicAddress, SymbolicAddress::Limit, Uint32Vector> {
+ WASM_DECLARE_SERIALIZABLE(SymbolicLinkArray)
+ };
+
+ InternalLinkVector internalLinks;
+ SymbolicLinkArray symbolicLinks;
+
+ WASM_DECLARE_SERIALIZABLE(LinkData)
+};
+
+using UniqueLinkData = UniquePtr<LinkData>;
+
+// Executable code must be deallocated specially.
+
+struct FreeCode {
+ uint32_t codeLength;
+ FreeCode() : codeLength(0) {}
+ explicit FreeCode(uint32_t codeLength) : codeLength(codeLength) {}
+ void operator()(uint8_t* codeBytes);
+};
+
+using UniqueCodeBytes = UniquePtr<uint8_t, FreeCode>;
+
+class Code;
+class CodeTier;
+class ModuleSegment;
+class LazyStubSegment;
+
+// CodeSegment contains common helpers for determining the base and length of a
+// code segment and if a pc belongs to this segment. It is inherited by:
+// - ModuleSegment, i.e. the code segment of a Module, generated
+// eagerly when a Module is instanciated.
+// - LazyStubSegment, i.e. the code segment of entry stubs that are lazily
+// generated.
+
+class CodeSegment {
+ protected:
+ static UniqueCodeBytes AllocateCodeBytes(uint32_t codeLength);
+
+ enum class Kind { LazyStubs, Module };
+
+ CodeSegment(UniqueCodeBytes bytes, uint32_t length, Kind kind)
+ : bytes_(std::move(bytes)),
+ length_(length),
+ kind_(kind),
+ codeTier_(nullptr),
+ unregisterOnDestroy_(false) {}
+
+ bool initialize(const CodeTier& codeTier);
+
+ private:
+ const UniqueCodeBytes bytes_;
+ const uint32_t length_;
+ const Kind kind_;
+ const CodeTier* codeTier_;
+ bool unregisterOnDestroy_;
+
+ public:
+ bool initialized() const { return !!codeTier_; }
+ ~CodeSegment();
+
+ bool isLazyStubs() const { return kind_ == Kind::LazyStubs; }
+ bool isModule() const { return kind_ == Kind::Module; }
+ const ModuleSegment* asModule() const {
+ MOZ_ASSERT(isModule());
+ return (ModuleSegment*)this;
+ }
+ const LazyStubSegment* asLazyStub() const {
+ MOZ_ASSERT(isLazyStubs());
+ return (LazyStubSegment*)this;
+ }
+
+ uint8_t* base() const { return bytes_.get(); }
+ uint32_t length() const {
+ MOZ_ASSERT(length_ != UINT32_MAX);
+ return length_;
+ }
+
+ bool containsCodePC(const void* pc) const {
+ return pc >= base() && pc < (base() + length_);
+ }
+
+ const CodeTier& codeTier() const {
+ MOZ_ASSERT(initialized());
+ return *codeTier_;
+ }
+ const Code& code() const;
+
+ void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code) const;
+};
+
+// A wasm ModuleSegment owns the allocated executable code for a wasm module.
+
+using UniqueModuleSegment = UniquePtr<ModuleSegment>;
+
+enum IsTier2 { Tier2, NotTier2 };
+
+class ModuleSegment : public CodeSegment {
+ const Tier tier_;
+ uint8_t* const trapCode_;
+
+ public:
+ ModuleSegment(Tier tier, UniqueCodeBytes codeBytes, uint32_t codeLength,
+ const LinkData& linkData);
+
+ static UniqueModuleSegment create(Tier tier, jit::MacroAssembler& masm,
+ const LinkData& linkData);
+ static UniqueModuleSegment create(Tier tier, const Bytes& unlinkedBytes,
+ const LinkData& linkData);
+
+ bool initialize(IsTier2 compileMode, const CodeTier& codeTier,
+ const LinkData& linkData, const Metadata& metadata,
+ const MetadataTier& metadataTier);
+
+ Tier tier() const { return tier_; }
+
+ // Pointers to stubs to which PC is redirected from the signal-handler.
+
+ uint8_t* trapCode() const { return trapCode_; }
+
+ // Structured clone support:
+
+ size_t serializedSize() const;
+ uint8_t* serialize(uint8_t* cursor, const LinkData& linkData) const;
+ static const uint8_t* deserialize(const uint8_t* cursor,
+ const LinkData& linkData,
+ UniqueModuleSegment* segment);
+
+ const CodeRange* lookupRange(const void* pc) const;
+
+ void addSizeOfMisc(mozilla::MallocSizeOf mallocSizeOf, size_t* code,
+ size_t* data) const;
+};
+
+// A FuncExport represents a single function definition inside a wasm Module
+// that has been exported one or more times. A FuncExport represents an
+// internal entry point that can be called via function definition index by
+// Instance::callExport(). To allow O(log(n)) lookup of a FuncExport by
+// function definition index, the FuncExportVector is stored sorted by
+// function definition index.
+
+class FuncExport {
+ FuncType funcType_;
+ MOZ_INIT_OUTSIDE_CTOR struct CacheablePod {
+ uint32_t funcIndex_;
+ uint32_t eagerInterpEntryOffset_; // Machine code offset
+ bool hasEagerStubs_;
+ } pod;
+
+ public:
+ FuncExport() = default;
+ explicit FuncExport(FuncType&& funcType, uint32_t funcIndex,
+ bool hasEagerStubs)
+ : funcType_(std::move(funcType)) {
+ pod.funcIndex_ = funcIndex;
+ pod.eagerInterpEntryOffset_ = UINT32_MAX;
+ pod.hasEagerStubs_ = hasEagerStubs;
+ }
+ void initEagerInterpEntryOffset(uint32_t entryOffset) {
+ MOZ_ASSERT(pod.eagerInterpEntryOffset_ == UINT32_MAX);
+ MOZ_ASSERT(hasEagerStubs());
+ pod.eagerInterpEntryOffset_ = entryOffset;
+ }
+
+ bool hasEagerStubs() const { return pod.hasEagerStubs_; }
+ const FuncType& funcType() const { return funcType_; }
+ uint32_t funcIndex() const { return pod.funcIndex_; }
+ uint32_t eagerInterpEntryOffset() const {
+ MOZ_ASSERT(pod.eagerInterpEntryOffset_ != UINT32_MAX);
+ MOZ_ASSERT(hasEagerStubs());
+ return pod.eagerInterpEntryOffset_;
+ }
+
+ bool canHaveJitEntry() const {
+ return !funcType_.hasUnexposableArgOrRet() &&
+ !funcType_.temporarilyUnsupportedReftypeForEntry() &&
+ !funcType_.temporarilyUnsupportedResultCountForJitEntry() &&
+ JitOptions.enableWasmJitEntry;
+ }
+
+ bool clone(const FuncExport& src) {
+ mozilla::PodAssign(&pod, &src.pod);
+ return funcType_.clone(src.funcType_);
+ }
+
+ WASM_DECLARE_SERIALIZABLE(FuncExport)
+};
+
+typedef Vector<FuncExport, 0, SystemAllocPolicy> FuncExportVector;
+
+// An FuncImport contains the runtime metadata needed to implement a call to an
+// imported function. Each function import has two call stubs: an optimized path
+// into JIT code and a slow path into the generic C++ js::Invoke and these
+// offsets of these stubs are stored so that function-import callsites can be
+// dynamically patched at runtime.
+
+class FuncImport {
+ FuncType funcType_;
+ struct CacheablePod {
+ uint32_t tlsDataOffset_;
+ uint32_t interpExitCodeOffset_; // Machine code offset
+ uint32_t jitExitCodeOffset_; // Machine code offset
+ } pod;
+
+ public:
+ FuncImport() { memset(&pod, 0, sizeof(CacheablePod)); }
+
+ FuncImport(FuncType&& funcType, uint32_t tlsDataOffset)
+ : funcType_(std::move(funcType)) {
+ pod.tlsDataOffset_ = tlsDataOffset;
+ pod.interpExitCodeOffset_ = 0;
+ pod.jitExitCodeOffset_ = 0;
+ }
+
+ void initInterpExitOffset(uint32_t off) {
+ MOZ_ASSERT(!pod.interpExitCodeOffset_);
+ pod.interpExitCodeOffset_ = off;
+ }
+ void initJitExitOffset(uint32_t off) {
+ MOZ_ASSERT(!pod.jitExitCodeOffset_);
+ pod.jitExitCodeOffset_ = off;
+ }
+
+ const FuncType& funcType() const { return funcType_; }
+ uint32_t tlsDataOffset() const { return pod.tlsDataOffset_; }
+ uint32_t interpExitCodeOffset() const { return pod.interpExitCodeOffset_; }
+ uint32_t jitExitCodeOffset() const { return pod.jitExitCodeOffset_; }
+
+ bool clone(const FuncImport& src) {
+ mozilla::PodAssign(&pod, &src.pod);
+ return funcType_.clone(src.funcType_);
+ }
+
+ WASM_DECLARE_SERIALIZABLE(FuncImport)
+};
+
+typedef Vector<FuncImport, 0, SystemAllocPolicy> FuncImportVector;
+
+// Metadata holds all the data that is needed to describe compiled wasm code
+// at runtime (as opposed to data that is only used to statically link or
+// instantiate a module).
+//
+// Metadata is built incrementally by ModuleGenerator and then shared immutably
+// between modules.
+//
+// The Metadata structure is split into tier-invariant and tier-variant parts;
+// the former points to instances of the latter. Additionally, the asm.js
+// subsystem subclasses the Metadata, adding more tier-invariant data, some of
+// which is serialized. See AsmJS.cpp.
+
+struct MetadataCacheablePod {
+ ModuleKind kind;
+ MemoryUsage memoryUsage;
+ uint64_t minMemoryLength;
+ uint32_t globalDataLength;
+ Maybe<uint64_t> maxMemoryLength;
+ Maybe<uint32_t> startFuncIndex;
+ Maybe<uint32_t> nameCustomSectionIndex;
+ bool filenameIsURL;
+ bool v128Enabled;
+ bool omitsBoundsChecks;
+ bool usesDuplicateImports;
+
+ explicit MetadataCacheablePod(ModuleKind kind)
+ : kind(kind),
+ memoryUsage(MemoryUsage::None),
+ minMemoryLength(0),
+ globalDataLength(0),
+ filenameIsURL(false),
+ v128Enabled(false),
+ omitsBoundsChecks(false),
+ usesDuplicateImports(false) {}
+};
+
+typedef uint8_t ModuleHash[8];
+typedef Vector<ValTypeVector, 0, SystemAllocPolicy> FuncArgTypesVector;
+typedef Vector<ValTypeVector, 0, SystemAllocPolicy> FuncReturnTypesVector;
+
+struct Metadata : public ShareableBase<Metadata>, public MetadataCacheablePod {
+ TypeDefWithIdVector types;
+ GlobalDescVector globals;
+ TableDescVector tables;
+#ifdef ENABLE_WASM_EXCEPTIONS
+ EventDescVector events;
+#endif
+ CacheableChars filename;
+ CacheableChars sourceMapURL;
+
+ // namePayload points at the name section's CustomSection::payload so that
+ // the Names (which are use payload-relative offsets) can be used
+ // independently of the Module without duplicating the name section.
+ SharedBytes namePayload;
+ Maybe<Name> moduleName;
+ NameVector funcNames;
+
+ // Debug-enabled code is not serialized.
+ bool debugEnabled;
+ FuncArgTypesVector debugFuncArgTypes;
+ FuncReturnTypesVector debugFuncReturnTypes;
+ ModuleHash debugHash;
+
+ explicit Metadata(ModuleKind kind = ModuleKind::Wasm)
+ : MetadataCacheablePod(kind), debugEnabled(false), debugHash() {}
+ virtual ~Metadata() = default;
+
+ MetadataCacheablePod& pod() { return *this; }
+ const MetadataCacheablePod& pod() const { return *this; }
+
+ bool usesMemory() const { return memoryUsage != MemoryUsage::None; }
+ bool usesSharedMemory() const { return memoryUsage == MemoryUsage::Shared; }
+
+ // Invariant: The result of getFuncResultType can only be used as long as
+ // MetaData is live, because the returned ResultType may encode a pointer to
+ // debugFuncReturnTypes.
+ ResultType getFuncResultType(uint32_t funcIndex) const {
+ return ResultType::Vector(debugFuncReturnTypes[funcIndex]);
+ };
+
+ // AsmJSMetadata derives Metadata iff isAsmJS(). Mostly this distinction is
+ // encapsulated within AsmJS.cpp, but the additional virtual functions allow
+ // asm.js to override wasm behavior in the handful of cases that can't be
+ // easily encapsulated by AsmJS.cpp.
+
+ bool isAsmJS() const { return kind == ModuleKind::AsmJS; }
+ const AsmJSMetadata& asAsmJS() const {
+ MOZ_ASSERT(isAsmJS());
+ return *(const AsmJSMetadata*)this;
+ }
+ virtual bool mutedErrors() const { return false; }
+ virtual const char16_t* displayURL() const { return nullptr; }
+ virtual ScriptSource* maybeScriptSource() const { return nullptr; }
+
+ // The Developer-Facing Display Conventions section of the WebAssembly Web
+ // API spec defines two cases for displaying a wasm function name:
+ // 1. the function name stands alone
+ // 2. the function name precedes the location
+
+ enum NameContext { Standalone, BeforeLocation };
+
+ virtual bool getFuncName(NameContext ctx, uint32_t funcIndex,
+ UTF8Bytes* name) const;
+
+ bool getFuncNameStandalone(uint32_t funcIndex, UTF8Bytes* name) const {
+ return getFuncName(NameContext::Standalone, funcIndex, name);
+ }
+ bool getFuncNameBeforeLocation(uint32_t funcIndex, UTF8Bytes* name) const {
+ return getFuncName(NameContext::BeforeLocation, funcIndex, name);
+ }
+
+ WASM_DECLARE_SERIALIZABLE(Metadata);
+};
+
+using MutableMetadata = RefPtr<Metadata>;
+using SharedMetadata = RefPtr<const Metadata>;
+
+struct MetadataTier {
+ explicit MetadataTier(Tier tier) : tier(tier) {}
+
+ const Tier tier;
+
+ Uint32Vector funcToCodeRange;
+ CodeRangeVector codeRanges;
+ CallSiteVector callSites;
+ TrapSiteVectorArray trapSites;
+ FuncImportVector funcImports;
+ FuncExportVector funcExports;
+ StackMaps stackMaps;
+
+ // Debug information, not serialized.
+ Uint32Vector debugTrapFarJumpOffsets;
+
+ FuncExport& lookupFuncExport(uint32_t funcIndex,
+ size_t* funcExportIndex = nullptr);
+ const FuncExport& lookupFuncExport(uint32_t funcIndex,
+ size_t* funcExportIndex = nullptr) const;
+
+ const CodeRange& codeRange(const FuncExport& funcExport) const {
+ return codeRanges[funcToCodeRange[funcExport.funcIndex()]];
+ }
+
+ bool clone(const MetadataTier& src);
+
+ WASM_DECLARE_SERIALIZABLE(MetadataTier);
+};
+
+using UniqueMetadataTier = UniquePtr<MetadataTier>;
+
+// LazyStubSegment is a code segment lazily generated for function entry stubs
+// (both interpreter and jit ones).
+//
+// Because a stub is usually small (a few KiB) and an executable code segment
+// isn't (64KiB), a given stub segment can contain entry stubs of many
+// functions.
+
+using UniqueLazyStubSegment = UniquePtr<LazyStubSegment>;
+using LazyStubSegmentVector =
+ Vector<UniqueLazyStubSegment, 0, SystemAllocPolicy>;
+
+class LazyStubSegment : public CodeSegment {
+ CodeRangeVector codeRanges_;
+ size_t usedBytes_;
+
+ public:
+ LazyStubSegment(UniqueCodeBytes bytes, size_t length)
+ : CodeSegment(std::move(bytes), length, CodeSegment::Kind::LazyStubs),
+ usedBytes_(0) {}
+
+ static UniqueLazyStubSegment create(const CodeTier& codeTier,
+ size_t codeLength);
+
+ static size_t AlignBytesNeeded(size_t bytes) {
+ return AlignBytes(bytes, gc::SystemPageSize());
+ }
+
+ bool hasSpace(size_t bytes) const;
+ bool addStubs(size_t codeLength, const Uint32Vector& funcExportIndices,
+ const FuncExportVector& funcExports,
+ const CodeRangeVector& codeRanges, uint8_t** codePtr,
+ size_t* indexFirstInsertedCodeRange);
+
+ const CodeRangeVector& codeRanges() const { return codeRanges_; }
+ const CodeRange* lookupRange(const void* pc) const;
+
+ void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
+ size_t* data) const;
+};
+
+// LazyFuncExport helps to efficiently lookup a CodeRange from a given function
+// index. It is inserted in a vector sorted by function index, to perform
+// binary search on it later.
+
+struct LazyFuncExport {
+ size_t funcIndex;
+ size_t lazyStubSegmentIndex;
+ size_t funcCodeRangeIndex;
+ LazyFuncExport(size_t funcIndex, size_t lazyStubSegmentIndex,
+ size_t funcCodeRangeIndex)
+ : funcIndex(funcIndex),
+ lazyStubSegmentIndex(lazyStubSegmentIndex),
+ funcCodeRangeIndex(funcCodeRangeIndex) {}
+};
+
+using LazyFuncExportVector = Vector<LazyFuncExport, 0, SystemAllocPolicy>;
+
+// LazyStubTier contains all the necessary information for lazy function entry
+// stubs that are generated at runtime. None of its data is ever serialized.
+//
+// It must be protected by a lock, because the main thread can both read and
+// write lazy stubs at any time while a background thread can regenerate lazy
+// stubs for tier2 at any time.
+
+class LazyStubTier {
+ LazyStubSegmentVector stubSegments_;
+ LazyFuncExportVector exports_;
+ size_t lastStubSegmentIndex_;
+
+ bool createMany(const Uint32Vector& funcExportIndices,
+ const CodeTier& codeTier, bool flushAllThreadsIcaches,
+ size_t* stubSegmentIndex);
+
+ public:
+ LazyStubTier() : lastStubSegmentIndex_(0) {}
+
+ bool empty() const { return stubSegments_.empty(); }
+ bool hasStub(uint32_t funcIndex) const;
+
+ // Returns a pointer to the raw interpreter entry of a given function which
+ // stubs have been lazily generated.
+ void* lookupInterpEntry(uint32_t funcIndex) const;
+
+ // Creates one lazy stub for the exported function, for which the jit entry
+ // will be set to the lazily-generated one.
+ bool createOne(uint32_t funcExportIndex, const CodeTier& codeTier);
+
+ // Create one lazy stub for all the functions in funcExportIndices, putting
+ // them in a single stub. Jit entries won't be used until
+ // setJitEntries() is actually called, after the Code owner has committed
+ // tier2.
+ bool createTier2(const Uint32Vector& funcExportIndices,
+ const CodeTier& codeTier, Maybe<size_t>* stubSegmentIndex);
+ void setJitEntries(const Maybe<size_t>& stubSegmentIndex, const Code& code);
+
+ void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
+ size_t* data) const;
+};
+
+// CodeTier contains all the data related to a given compilation tier. It is
+// built during module generation and then immutably stored in a Code.
+
+using UniqueCodeTier = UniquePtr<CodeTier>;
+using UniqueConstCodeTier = UniquePtr<const CodeTier>;
+
+class CodeTier {
+ const Code* code_;
+
+ // Serialized information.
+ const UniqueMetadataTier metadata_;
+ const UniqueModuleSegment segment_;
+
+ // Lazy stubs, not serialized.
+ ExclusiveData<LazyStubTier> lazyStubs_;
+
+ static const MutexId& mutexForTier(Tier tier) {
+ if (tier == Tier::Baseline) {
+ return mutexid::WasmLazyStubsTier1;
+ }
+ MOZ_ASSERT(tier == Tier::Optimized);
+ return mutexid::WasmLazyStubsTier2;
+ }
+
+ public:
+ CodeTier(UniqueMetadataTier metadata, UniqueModuleSegment segment)
+ : code_(nullptr),
+ metadata_(std::move(metadata)),
+ segment_(std::move(segment)),
+ lazyStubs_(mutexForTier(segment_->tier())) {}
+
+ bool initialized() const { return !!code_ && segment_->initialized(); }
+ bool initialize(IsTier2 isTier2, const Code& code, const LinkData& linkData,
+ const Metadata& metadata);
+
+ Tier tier() const { return segment_->tier(); }
+ const ExclusiveData<LazyStubTier>& lazyStubs() const { return lazyStubs_; }
+ const MetadataTier& metadata() const { return *metadata_.get(); }
+ const ModuleSegment& segment() const { return *segment_.get(); }
+ const Code& code() const {
+ MOZ_ASSERT(initialized());
+ return *code_;
+ }
+
+ const CodeRange* lookupRange(const void* pc) const;
+
+ size_t serializedSize() const;
+ uint8_t* serialize(uint8_t* cursor, const LinkData& linkData) const;
+ static const uint8_t* deserialize(const uint8_t* cursor,
+ const LinkData& linkData,
+ UniqueCodeTier* codeTier);
+ void addSizeOfMisc(MallocSizeOf mallocSizeOf, size_t* code,
+ size_t* data) const;
+};
+
+// Jump tables to take tiering into account, when calling either from wasm to
+// wasm (through rabaldr) or from jit to wasm (jit entry).
+
+class JumpTables {
+ using TablePointer = mozilla::UniquePtr<void*[], JS::FreePolicy>;
+
+ CompileMode mode_;
+ TablePointer tiering_;
+ TablePointer jit_;
+ size_t numFuncs_;
+
+ public:
+ bool init(CompileMode mode, const ModuleSegment& ms,
+ const CodeRangeVector& codeRanges);
+
+ void setJitEntry(size_t i, void* target) const {
+ // Make sure that write is atomic; see comment in wasm::Module::finishTier2
+ // to that effect.
+ MOZ_ASSERT(i < numFuncs_);
+ jit_.get()[i] = target;
+ }
+ void setJitEntryIfNull(size_t i, void* target) const {
+ // Make sure that compare-and-write is atomic; see comment in
+ // wasm::Module::finishTier2 to that effect.
+ MOZ_ASSERT(i < numFuncs_);
+ void* expected = nullptr;
+ (void)__atomic_compare_exchange_n(&jit_.get()[i], &expected, target,
+ /*weak=*/false, __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED);
+ }
+ void** getAddressOfJitEntry(size_t i) const {
+ MOZ_ASSERT(i < numFuncs_);
+ MOZ_ASSERT(jit_.get()[i]);
+ return &jit_.get()[i];
+ }
+ size_t funcIndexFromJitEntry(void** target) const {
+ MOZ_ASSERT(target >= &jit_.get()[0]);
+ MOZ_ASSERT(target <= &(jit_.get()[numFuncs_ - 1]));
+ return (intptr_t*)target - (intptr_t*)&jit_.get()[0];
+ }
+
+ void setTieringEntry(size_t i, void* target) const {
+ MOZ_ASSERT(i < numFuncs_);
+ // See comment in wasm::Module::finishTier2.
+ if (mode_ == CompileMode::Tier1) {
+ tiering_.get()[i] = target;
+ }
+ }
+ void** tiering() const { return tiering_.get(); }
+
+ size_t sizeOfMiscExcludingThis() const {
+ // 2 words per function for the jit entry table, plus maybe 1 per
+ // function if we're tiering.
+ return sizeof(void*) * (2 + (tiering_ ? 1 : 0)) * numFuncs_;
+ }
+};
+
+// Code objects own executable code and the metadata that describe it. A single
+// Code object is normally shared between a module and all its instances.
+//
+// profilingLabels_ is lazily initialized, but behind a lock.
+
+using SharedCode = RefPtr<const Code>;
+using MutableCode = RefPtr<Code>;
+
+class Code : public ShareableBase<Code> {
+ UniqueCodeTier tier1_;
+ mutable UniqueConstCodeTier tier2_; // Access only when hasTier2() is true
+ mutable Atomic<bool> hasTier2_;
+ SharedMetadata metadata_;
+ ExclusiveData<CacheableCharsVector> profilingLabels_;
+ JumpTables jumpTables_;
+
+ public:
+ Code(UniqueCodeTier tier1, const Metadata& metadata,
+ JumpTables&& maybeJumpTables);
+ bool initialized() const { return tier1_->initialized(); }
+
+ bool initialize(const LinkData& linkData);
+
+ void setTieringEntry(size_t i, void* target) const {
+ jumpTables_.setTieringEntry(i, target);
+ }
+ void** tieringJumpTable() const { return jumpTables_.tiering(); }
+
+ void setJitEntry(size_t i, void* target) const {
+ jumpTables_.setJitEntry(i, target);
+ }
+ void setJitEntryIfNull(size_t i, void* target) const {
+ jumpTables_.setJitEntryIfNull(i, target);
+ }
+ void** getAddressOfJitEntry(size_t i) const {
+ return jumpTables_.getAddressOfJitEntry(i);
+ }
+ uint32_t getFuncIndex(JSFunction* fun) const;
+
+ bool setTier2(UniqueCodeTier tier2, const LinkData& linkData) const;
+ void commitTier2() const;
+
+ bool hasTier2() const { return hasTier2_; }
+ Tiers tiers() const;
+ bool hasTier(Tier t) const;
+
+ Tier stableTier() const; // This is stable during a run
+ Tier bestTier()
+ const; // This may transition from Baseline -> Ion at any time
+
+ const CodeTier& codeTier(Tier tier) const;
+ const Metadata& metadata() const { return *metadata_; }
+
+ const ModuleSegment& segment(Tier iter) const {
+ return codeTier(iter).segment();
+ }
+ const MetadataTier& metadata(Tier iter) const {
+ return codeTier(iter).metadata();
+ }
+
+ // Metadata lookup functions:
+
+ const CallSite* lookupCallSite(void* returnAddress) const;
+ const CodeRange* lookupFuncRange(void* pc) const;
+ const StackMap* lookupStackMap(uint8_t* nextPC) const;
+ bool containsCodePC(const void* pc) const;
+ bool lookupTrap(void* pc, Trap* trap, BytecodeOffset* bytecode) const;
+
+ // To save memory, profilingLabels_ are generated lazily when profiling mode
+ // is enabled.
+
+ void ensureProfilingLabels(bool profilingEnabled) const;
+ const char* profilingLabel(uint32_t funcIndex) const;
+
+ // about:memory reporting:
+
+ void addSizeOfMiscIfNotSeen(MallocSizeOf mallocSizeOf,
+ Metadata::SeenSet* seenMetadata,
+ Code::SeenSet* seenCode, size_t* code,
+ size_t* data) const;
+
+ // A Code object is serialized as the length and bytes of the machine code
+ // after statically unlinking it; the Code is then later recreated from the
+ // machine code and other parts.
+
+ size_t serializedSize() const;
+ uint8_t* serialize(uint8_t* cursor, const LinkData& linkData) const;
+ static const uint8_t* deserialize(const uint8_t* cursor,
+ const LinkData& linkData,
+ Metadata& metadata, SharedCode* code);
+};
+
+void PatchDebugSymbolicAccesses(uint8_t* codeBase, jit::MacroAssembler& masm);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_code_h
diff --git a/js/src/wasm/WasmCompile.cpp b/js/src/wasm/WasmCompile.cpp
new file mode 100644
index 0000000000..f1f4061e74
--- /dev/null
+++ b/js/src/wasm/WasmCompile.cpp
@@ -0,0 +1,790 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmCompile.h"
+
+#include "mozilla/Maybe.h"
+#include "mozilla/Unused.h"
+
+#include <algorithm>
+
+#include "jit/ProcessExecutableMemory.h"
+#include "util/Text.h"
+#include "vm/HelperThreadState.h"
+#include "vm/Realm.h"
+#include "wasm/WasmBaselineCompile.h"
+#include "wasm/WasmCraneliftCompile.h"
+#include "wasm/WasmGenerator.h"
+#include "wasm/WasmIonCompile.h"
+#include "wasm/WasmOpIter.h"
+#include "wasm/WasmProcess.h"
+#include "wasm/WasmSignalHandlers.h"
+#include "wasm/WasmValidate.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+uint32_t wasm::ObservedCPUFeatures() {
+ enum Arch {
+ X86 = 0x1,
+ X64 = 0x2,
+ ARM = 0x3,
+ MIPS = 0x4,
+ MIPS64 = 0x5,
+ ARM64 = 0x6,
+ ARCH_BITS = 3
+ };
+
+#if defined(JS_CODEGEN_X86)
+ MOZ_ASSERT(uint32_t(jit::CPUInfo::GetSSEVersion()) <=
+ (UINT32_MAX >> ARCH_BITS));
+ return X86 | (uint32_t(jit::CPUInfo::GetSSEVersion()) << ARCH_BITS);
+#elif defined(JS_CODEGEN_X64)
+ MOZ_ASSERT(uint32_t(jit::CPUInfo::GetSSEVersion()) <=
+ (UINT32_MAX >> ARCH_BITS));
+ return X64 | (uint32_t(jit::CPUInfo::GetSSEVersion()) << ARCH_BITS);
+#elif defined(JS_CODEGEN_ARM)
+ MOZ_ASSERT(jit::GetARMFlags() <= (UINT32_MAX >> ARCH_BITS));
+ return ARM | (jit::GetARMFlags() << ARCH_BITS);
+#elif defined(JS_CODEGEN_ARM64)
+ MOZ_ASSERT(jit::GetARM64Flags() <= (UINT32_MAX >> ARCH_BITS));
+ return ARM64 | (jit::GetARM64Flags() << ARCH_BITS);
+#elif defined(JS_CODEGEN_MIPS32)
+ MOZ_ASSERT(jit::GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS));
+ return MIPS | (jit::GetMIPSFlags() << ARCH_BITS);
+#elif defined(JS_CODEGEN_MIPS64)
+ MOZ_ASSERT(jit::GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS));
+ return MIPS64 | (jit::GetMIPSFlags() << ARCH_BITS);
+#elif defined(JS_CODEGEN_NONE)
+ return 0;
+#else
+# error "unknown architecture"
+#endif
+}
+
+FeatureArgs FeatureArgs::build(JSContext* cx) {
+ FeatureArgs features;
+ features.sharedMemory =
+ wasm::ThreadsAvailable(cx) ? Shareable::True : Shareable::False;
+ features.refTypes = wasm::ReftypesAvailable(cx);
+ features.functionReferences = wasm::FunctionReferencesAvailable(cx);
+ features.gcTypes = wasm::GcTypesAvailable(cx);
+ features.multiValue = wasm::MultiValuesAvailable(cx);
+ features.v128 = wasm::SimdAvailable(cx);
+ features.hugeMemory = wasm::IsHugeMemoryEnabled();
+ features.simdWormhole = wasm::SimdWormholeAvailable(cx);
+ features.exceptions = wasm::ExceptionsAvailable(cx);
+ return features;
+}
+
+SharedCompileArgs CompileArgs::build(JSContext* cx,
+ ScriptedCaller&& scriptedCaller) {
+ bool baseline = BaselineAvailable(cx);
+ bool ion = IonAvailable(cx);
+ bool cranelift = CraneliftAvailable(cx);
+
+ // At most one optimizing compiler.
+ MOZ_RELEASE_ASSERT(!(ion && cranelift));
+
+ // Debug information such as source view or debug traps will require
+ // additional memory and permanently stay in baseline code, so we try to
+ // only enable it when a developer actually cares: when the debugger tab
+ // is open.
+ bool debug = cx->realm() && cx->realm()->debuggerObservesAsmJS();
+
+ bool forceTiering =
+ cx->options().testWasmAwaitTier2() || JitOptions.wasmDelayTier2;
+
+ // The <Compiler>Available() predicates should ensure no failure here, but
+ // when we're fuzzing we allow inconsistent switches and the check may thus
+ // fail. Let it go to a run-time error instead of crashing.
+ if (debug && (ion || cranelift)) {
+ JS_ReportErrorASCII(cx, "no WebAssembly compiler available");
+ return nullptr;
+ }
+
+ if (forceTiering && !(baseline && (cranelift || ion))) {
+ // This can happen only in testing, and in this case we don't have a
+ // proper way to signal the error, so just silently override the default,
+ // instead of adding a skip-if directive to every test using debug/gc.
+ forceTiering = false;
+ }
+
+ if (!(baseline || ion || cranelift)) {
+ JS_ReportErrorASCII(cx, "no WebAssembly compiler available");
+ return nullptr;
+ }
+
+ CompileArgs* target = cx->new_<CompileArgs>(std::move(scriptedCaller));
+ if (!target) {
+ return nullptr;
+ }
+
+ target->baselineEnabled = baseline;
+ target->ionEnabled = ion;
+ target->craneliftEnabled = cranelift;
+ target->debugEnabled = debug;
+ target->forceTiering = forceTiering;
+ target->features = FeatureArgs::build(cx);
+
+ Log(cx, "available wasm compilers: tier1=%s tier2=%s",
+ baseline ? "baseline" : "none",
+ ion ? "ion" : (cranelift ? "cranelift" : "none"));
+
+ return target;
+}
+
+// Classify the current system as one of a set of recognizable classes. This
+// really needs to get our tier-1 systems right.
+//
+// TODO: We don't yet have a good measure of how fast a system is. We
+// distinguish between mobile and desktop because these are very different kinds
+// of systems, but we could further distinguish between low / medium / high end
+// within those major classes. If we do so, then constants below would be
+// provided for each (class, architecture, system-tier) combination, not just
+// (class, architecture) as now.
+//
+// CPU clock speed is not by itself a good predictor of system performance, as
+// there are high-performance systems with slow clocks (recent Intel) and
+// low-performance systems with fast clocks (older AMD). We can also use
+// physical memory, core configuration, OS details, CPU class and family, and
+// CPU manufacturer to disambiguate.
+
+enum class SystemClass {
+ DesktopX86,
+ DesktopX64,
+ DesktopUnknown32,
+ DesktopUnknown64,
+ MobileX86,
+ MobileArm32,
+ MobileArm64,
+ MobileUnknown32,
+ MobileUnknown64
+};
+
+static SystemClass ClassifySystem() {
+ bool isDesktop;
+
+#if defined(ANDROID) || defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+ isDesktop = false;
+#else
+ isDesktop = true;
+#endif
+
+ if (isDesktop) {
+#if defined(JS_CODEGEN_X64)
+ return SystemClass::DesktopX64;
+#elif defined(JS_CODEGEN_X86)
+ return SystemClass::DesktopX86;
+#elif defined(JS_64BIT)
+ return SystemClass::DesktopUnknown64;
+#else
+ return SystemClass::DesktopUnknown32;
+#endif
+ } else {
+#if defined(JS_CODEGEN_X86)
+ return SystemClass::MobileX86;
+#elif defined(JS_CODEGEN_ARM)
+ return SystemClass::MobileArm32;
+#elif defined(JS_CODEGEN_ARM64)
+ return SystemClass::MobileArm64;
+#elif defined(JS_64BIT)
+ return SystemClass::MobileUnknown64;
+#else
+ return SystemClass::MobileUnknown32;
+#endif
+ }
+}
+
+// Code sizes in machine code bytes per bytecode byte, again empirical except
+// where marked.
+//
+// The Ion estimate for ARM64 is the measured Baseline value scaled by a
+// plausible factor for optimized code.
+
+static const double x64Tox86Inflation = 1.25;
+
+static const double x64IonBytesPerBytecode = 2.45;
+static const double x86IonBytesPerBytecode =
+ x64IonBytesPerBytecode * x64Tox86Inflation;
+static const double arm32IonBytesPerBytecode = 3.3;
+static const double arm64IonBytesPerBytecode = 3.0 / 1.4; // Estimate
+
+static const double x64BaselineBytesPerBytecode = x64IonBytesPerBytecode * 1.43;
+static const double x86BaselineBytesPerBytecode =
+ x64BaselineBytesPerBytecode * x64Tox86Inflation;
+static const double arm32BaselineBytesPerBytecode =
+ arm32IonBytesPerBytecode * 1.39;
+static const double arm64BaselineBytesPerBytecode = 3.0;
+
+static double OptimizedBytesPerBytecode(SystemClass cls) {
+ switch (cls) {
+ case SystemClass::DesktopX86:
+ case SystemClass::MobileX86:
+ case SystemClass::DesktopUnknown32:
+ return x86IonBytesPerBytecode;
+ case SystemClass::DesktopX64:
+ case SystemClass::DesktopUnknown64:
+ return x64IonBytesPerBytecode;
+ case SystemClass::MobileArm32:
+ case SystemClass::MobileUnknown32:
+ return arm32IonBytesPerBytecode;
+ case SystemClass::MobileArm64:
+ case SystemClass::MobileUnknown64:
+ return arm64IonBytesPerBytecode;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+static double BaselineBytesPerBytecode(SystemClass cls) {
+ switch (cls) {
+ case SystemClass::DesktopX86:
+ case SystemClass::MobileX86:
+ case SystemClass::DesktopUnknown32:
+ return x86BaselineBytesPerBytecode;
+ case SystemClass::DesktopX64:
+ case SystemClass::DesktopUnknown64:
+ return x64BaselineBytesPerBytecode;
+ case SystemClass::MobileArm32:
+ case SystemClass::MobileUnknown32:
+ return arm32BaselineBytesPerBytecode;
+ case SystemClass::MobileArm64:
+ case SystemClass::MobileUnknown64:
+ return arm64BaselineBytesPerBytecode;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+double wasm::EstimateCompiledCodeSize(Tier tier, size_t bytecodeSize) {
+ SystemClass cls = ClassifySystem();
+ switch (tier) {
+ case Tier::Baseline:
+ return double(bytecodeSize) * BaselineBytesPerBytecode(cls);
+ case Tier::Optimized:
+ return double(bytecodeSize) * OptimizedBytesPerBytecode(cls);
+ }
+ MOZ_CRASH("bad tier");
+}
+
+// If parallel Ion compilation is going to take longer than this, we should
+// tier.
+
+static const double tierCutoffMs = 10;
+
+// Compilation rate values are empirical except when noted, the reference
+// systems are:
+//
+// Late-2013 MacBook Pro (2.6GHz 4 x hyperthreaded Haswell, Mac OS X)
+// Late-2015 Nexus 5X (1.4GHz 4 x Cortex-A53 + 1.8GHz 2 x Cortex-A57, Android)
+// Ca-2016 SoftIron Overdrive 1000 (1.7GHz 4 x Cortex-A57, Fedora)
+//
+// The rates are always per core.
+//
+// The estimate for ARM64 is the Baseline compilation rate on the SoftIron
+// (because we have no Ion yet), divided by 5 to estimate Ion compile rate and
+// then divided by 2 to make it more reasonable for consumer ARM64 systems.
+
+static const double x64IonBytecodesPerMs = 2100;
+static const double x86IonBytecodesPerMs = 1500;
+static const double arm32IonBytecodesPerMs = 450;
+static const double arm64IonBytecodesPerMs = 750; // Estimate
+
+// Tiering cutoff values: if code section sizes are below these values (when
+// divided by the effective number of cores) we do not tier, because we guess
+// that parallel Ion compilation will be fast enough.
+
+static const double x64DesktopTierCutoff = x64IonBytecodesPerMs * tierCutoffMs;
+static const double x86DesktopTierCutoff = x86IonBytecodesPerMs * tierCutoffMs;
+static const double x86MobileTierCutoff = x86DesktopTierCutoff / 2; // Guess
+static const double arm32MobileTierCutoff =
+ arm32IonBytecodesPerMs * tierCutoffMs;
+static const double arm64MobileTierCutoff =
+ arm64IonBytecodesPerMs * tierCutoffMs;
+
+static double CodesizeCutoff(SystemClass cls) {
+ switch (cls) {
+ case SystemClass::DesktopX86:
+ case SystemClass::DesktopUnknown32:
+ return x86DesktopTierCutoff;
+ case SystemClass::DesktopX64:
+ case SystemClass::DesktopUnknown64:
+ return x64DesktopTierCutoff;
+ case SystemClass::MobileX86:
+ return x86MobileTierCutoff;
+ case SystemClass::MobileArm32:
+ case SystemClass::MobileUnknown32:
+ return arm32MobileTierCutoff;
+ case SystemClass::MobileArm64:
+ case SystemClass::MobileUnknown64:
+ return arm64MobileTierCutoff;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+// As the number of cores grows the effectiveness of each core dwindles (on the
+// systems we care about for SpiderMonkey).
+//
+// The data are empirical, computed from the observed compilation time of the
+// Tanks demo code on a variable number of cores.
+//
+// The heuristic may fail on NUMA systems where the core count is high but the
+// performance increase is nil or negative once the program moves beyond one
+// socket. However, few browser users have such systems.
+
+static double EffectiveCores(uint32_t cores) {
+ if (cores <= 3) {
+ return pow(cores, 0.9);
+ }
+ return pow(cores, 0.75);
+}
+
+#ifndef JS_64BIT
+// Don't tier if tiering will fill code memory to more to more than this
+// fraction.
+
+static const double spaceCutoffPct = 0.9;
+#endif
+
+// Figure out whether we should use tiered compilation or not.
+static bool TieringBeneficial(uint32_t codeSize) {
+ uint32_t cpuCount = HelperThreadState().cpuCount;
+ MOZ_ASSERT(cpuCount > 0);
+
+ // It's mostly sensible not to background compile when there's only one
+ // hardware thread as we want foreground computation to have access to that.
+ // However, if wasm background compilation helper threads can be given lower
+ // priority then background compilation on single-core systems still makes
+ // some kind of sense. That said, this is a non-issue: as of September 2017
+ // 1-core was down to 3.5% of our population and falling.
+
+ if (cpuCount == 1) {
+ return false;
+ }
+
+ MOZ_ASSERT(HelperThreadState().threadCount >= cpuCount);
+
+ // Compute the max number of threads available to do actual background
+ // compilation work.
+
+ uint32_t workers = HelperThreadState().maxWasmCompilationThreads();
+
+ // The number of cores we will use is bounded both by the CPU count and the
+ // worker count.
+
+ uint32_t cores = std::min(cpuCount, workers);
+
+ SystemClass cls = ClassifySystem();
+
+ // Ion compilation on available cores must take long enough to be worth the
+ // bother.
+
+ double cutoffSize = CodesizeCutoff(cls);
+ double effectiveCores = EffectiveCores(cores);
+
+ if ((codeSize / effectiveCores) < cutoffSize) {
+ return false;
+ }
+
+ // Do not implement a size cutoff for 64-bit systems since the code size
+ // budget for 64 bit is so large that it will hardly ever be an issue.
+ // (Also the cutoff percentage might be different on 64-bit.)
+
+#ifndef JS_64BIT
+ // If the amount of executable code for baseline compilation jeopardizes the
+ // availability of executable memory for ion code then do not tier, for now.
+ //
+ // TODO: For now we consider this module in isolation. We should really
+ // worry about what else is going on in this process and might be filling up
+ // the code memory. It's like we need some kind of code memory reservation
+ // system or JIT compilation for large modules.
+
+ double ionRatio = OptimizedBytesPerBytecode(cls);
+ double baselineRatio = BaselineBytesPerBytecode(cls);
+ double needMemory = codeSize * (ionRatio + baselineRatio);
+ double availMemory = LikelyAvailableExecutableMemory();
+ double cutoff = spaceCutoffPct * MaxCodeBytesPerProcess;
+
+ // If the sum of baseline and ion code makes us exceeds some set percentage
+ // of the executable memory then disable tiering.
+
+ if ((MaxCodeBytesPerProcess - availMemory) + needMemory > cutoff) {
+ return false;
+ }
+#endif
+
+ return true;
+}
+
+CompilerEnvironment::CompilerEnvironment(const CompileArgs& args)
+ : state_(InitialWithArgs), args_(&args) {}
+
+CompilerEnvironment::CompilerEnvironment(CompileMode mode, Tier tier,
+ OptimizedBackend optimizedBackend,
+ DebugEnabled debugEnabled)
+ : state_(InitialWithModeTierDebug),
+ mode_(mode),
+ tier_(tier),
+ optimizedBackend_(optimizedBackend),
+ debug_(debugEnabled) {}
+
+void CompilerEnvironment::computeParameters() {
+ MOZ_ASSERT(state_ == InitialWithModeTierDebug);
+
+ state_ = Computed;
+}
+
+// Check that this architecture either:
+// - is cache-coherent, which is the case for most tier-1 architectures we care
+// about.
+// - or has the ability to invalidate the instruction cache of all threads, so
+// background compilation in tiered compilation can be synchronized across all
+// threads.
+static bool IsICacheSafe() {
+#ifdef JS_CODEGEN_ARM64
+ return jit::CanFlushICacheFromBackgroundThreads();
+#else
+ return true;
+#endif
+}
+
+void CompilerEnvironment::computeParameters(Decoder& d) {
+ MOZ_ASSERT(!isComputed());
+
+ if (state_ == InitialWithModeTierDebug) {
+ computeParameters();
+ return;
+ }
+
+ bool baselineEnabled = args_->baselineEnabled;
+ bool ionEnabled = args_->ionEnabled;
+ bool debugEnabled = args_->debugEnabled;
+ bool craneliftEnabled = args_->craneliftEnabled;
+ bool forceTiering = args_->forceTiering;
+
+ bool hasSecondTier = ionEnabled || craneliftEnabled;
+ MOZ_ASSERT_IF(debugEnabled, baselineEnabled);
+ MOZ_ASSERT_IF(forceTiering, baselineEnabled && hasSecondTier);
+
+ // Various constraints in various places should prevent failure here.
+ MOZ_RELEASE_ASSERT(baselineEnabled || ionEnabled || craneliftEnabled);
+ MOZ_RELEASE_ASSERT(!(ionEnabled && craneliftEnabled));
+
+ uint32_t codeSectionSize = 0;
+
+ SectionRange range;
+ if (StartsCodeSection(d.begin(), d.end(), &range)) {
+ codeSectionSize = range.size;
+ }
+
+ if (baselineEnabled && hasSecondTier && CanUseExtraThreads() &&
+ (TieringBeneficial(codeSectionSize) || forceTiering) && IsICacheSafe()) {
+ mode_ = CompileMode::Tier1;
+ tier_ = Tier::Baseline;
+ } else {
+ mode_ = CompileMode::Once;
+ tier_ = hasSecondTier ? Tier::Optimized : Tier::Baseline;
+ }
+
+ optimizedBackend_ =
+ craneliftEnabled ? OptimizedBackend::Cranelift : OptimizedBackend::Ion;
+
+ debug_ = debugEnabled ? DebugEnabled::True : DebugEnabled::False;
+
+ state_ = Computed;
+}
+
+template <class DecoderT>
+static bool DecodeFunctionBody(DecoderT& d, ModuleGenerator& mg,
+ uint32_t funcIndex) {
+ uint32_t bodySize;
+ if (!d.readVarU32(&bodySize)) {
+ return d.fail("expected number of function body bytes");
+ }
+
+ if (bodySize > MaxFunctionBytes) {
+ return d.fail("function body too big");
+ }
+
+ const size_t offsetInModule = d.currentOffset();
+
+ // Skip over the function body; it will be validated by the compilation
+ // thread.
+ const uint8_t* bodyBegin;
+ if (!d.readBytes(bodySize, &bodyBegin)) {
+ return d.fail("function body length too big");
+ }
+
+ return mg.compileFuncDef(funcIndex, offsetInModule, bodyBegin,
+ bodyBegin + bodySize);
+}
+
+template <class DecoderT>
+static bool DecodeCodeSection(const ModuleEnvironment& env, DecoderT& d,
+ ModuleGenerator& mg) {
+ if (!env.codeSection) {
+ if (env.numFuncDefs() != 0) {
+ return d.fail("expected code section");
+ }
+
+ return mg.finishFuncDefs();
+ }
+
+ uint32_t numFuncDefs;
+ if (!d.readVarU32(&numFuncDefs)) {
+ return d.fail("expected function body count");
+ }
+
+ if (numFuncDefs != env.numFuncDefs()) {
+ return d.fail(
+ "function body count does not match function signature count");
+ }
+
+ for (uint32_t funcDefIndex = 0; funcDefIndex < numFuncDefs; funcDefIndex++) {
+ if (!DecodeFunctionBody(d, mg, env.numFuncImports() + funcDefIndex)) {
+ return false;
+ }
+ }
+
+ if (!d.finishSection(*env.codeSection, "code")) {
+ return false;
+ }
+
+ return mg.finishFuncDefs();
+}
+
+SharedModule wasm::CompileBuffer(const CompileArgs& args,
+ const ShareableBytes& bytecode,
+ UniqueChars* error,
+ UniqueCharsVector* warnings,
+ JS::OptimizedEncodingListener* listener,
+ JSTelemetrySender telemetrySender) {
+ Decoder d(bytecode.bytes, 0, error, warnings);
+
+ ModuleEnvironment moduleEnv(args.features);
+ if (!DecodeModuleEnvironment(d, &moduleEnv)) {
+ return nullptr;
+ }
+ CompilerEnvironment compilerEnv(args);
+ compilerEnv.computeParameters(d);
+
+ ModuleGenerator mg(args, &moduleEnv, &compilerEnv, nullptr, error);
+ if (!mg.init(nullptr, telemetrySender)) {
+ return nullptr;
+ }
+
+ if (!DecodeCodeSection(moduleEnv, d, mg)) {
+ return nullptr;
+ }
+
+ if (!DecodeModuleTail(d, &moduleEnv)) {
+ return nullptr;
+ }
+
+ return mg.finishModule(bytecode, listener);
+}
+
+void wasm::CompileTier2(const CompileArgs& args, const Bytes& bytecode,
+ const Module& module, Atomic<bool>* cancelled,
+ JSTelemetrySender telemetrySender) {
+ UniqueChars error;
+ Decoder d(bytecode, 0, &error);
+
+ OptimizedBackend optimizedBackend = args.craneliftEnabled
+ ? OptimizedBackend::Cranelift
+ : OptimizedBackend::Ion;
+
+ ModuleEnvironment moduleEnv(args.features);
+ if (!DecodeModuleEnvironment(d, &moduleEnv)) {
+ return;
+ }
+ CompilerEnvironment compilerEnv(CompileMode::Tier2, Tier::Optimized,
+ optimizedBackend, DebugEnabled::False);
+ compilerEnv.computeParameters(d);
+
+ ModuleGenerator mg(args, &moduleEnv, &compilerEnv, cancelled, &error);
+ if (!mg.init(nullptr, telemetrySender)) {
+ return;
+ }
+
+ if (!DecodeCodeSection(moduleEnv, d, mg)) {
+ return;
+ }
+
+ if (!DecodeModuleTail(d, &moduleEnv)) {
+ return;
+ }
+
+ if (!mg.finishTier2(module)) {
+ return;
+ }
+
+ // The caller doesn't care about success or failure; only that compilation
+ // is inactive, so there is no success to return here.
+}
+
+class StreamingDecoder {
+ Decoder d_;
+ const ExclusiveBytesPtr& codeBytesEnd_;
+ const Atomic<bool>& cancelled_;
+
+ public:
+ StreamingDecoder(const ModuleEnvironment& env, const Bytes& begin,
+ const ExclusiveBytesPtr& codeBytesEnd,
+ const Atomic<bool>& cancelled, UniqueChars* error,
+ UniqueCharsVector* warnings)
+ : d_(begin, env.codeSection->start, error, warnings),
+ codeBytesEnd_(codeBytesEnd),
+ cancelled_(cancelled) {}
+
+ bool fail(const char* msg) { return d_.fail(msg); }
+
+ bool done() const { return d_.done(); }
+
+ size_t currentOffset() const { return d_.currentOffset(); }
+
+ bool waitForBytes(size_t numBytes) {
+ numBytes = std::min(numBytes, d_.bytesRemain());
+ const uint8_t* requiredEnd = d_.currentPosition() + numBytes;
+ auto codeBytesEnd = codeBytesEnd_.lock();
+ while (codeBytesEnd < requiredEnd) {
+ if (cancelled_) {
+ return false;
+ }
+ codeBytesEnd.wait();
+ }
+ return true;
+ }
+
+ bool readVarU32(uint32_t* u32) {
+ return waitForBytes(MaxVarU32DecodedBytes) && d_.readVarU32(u32);
+ }
+
+ bool readBytes(size_t size, const uint8_t** begin) {
+ return waitForBytes(size) && d_.readBytes(size, begin);
+ }
+
+ bool finishSection(const SectionRange& range, const char* name) {
+ return d_.finishSection(range, name);
+ }
+};
+
+static SharedBytes CreateBytecode(const Bytes& env, const Bytes& code,
+ const Bytes& tail, UniqueChars* error) {
+ size_t size = env.length() + code.length() + tail.length();
+ if (size > MaxModuleBytes) {
+ *error = DuplicateString("module too big");
+ return nullptr;
+ }
+
+ MutableBytes bytecode = js_new<ShareableBytes>();
+ if (!bytecode || !bytecode->bytes.resize(size)) {
+ return nullptr;
+ }
+
+ uint8_t* p = bytecode->bytes.begin();
+
+ memcpy(p, env.begin(), env.length());
+ p += env.length();
+
+ memcpy(p, code.begin(), code.length());
+ p += code.length();
+
+ memcpy(p, tail.begin(), tail.length());
+ p += tail.length();
+
+ MOZ_ASSERT(p == bytecode->end());
+
+ return bytecode;
+}
+
+SharedModule wasm::CompileStreaming(
+ const CompileArgs& args, const Bytes& envBytes, const Bytes& codeBytes,
+ const ExclusiveBytesPtr& codeBytesEnd,
+ const ExclusiveStreamEndData& exclusiveStreamEnd,
+ const Atomic<bool>& cancelled, UniqueChars* error,
+ UniqueCharsVector* warnings, JSTelemetrySender telemetrySender) {
+ CompilerEnvironment compilerEnv(args);
+ ModuleEnvironment moduleEnv(args.features);
+
+ {
+ Decoder d(envBytes, 0, error, warnings);
+
+ if (!DecodeModuleEnvironment(d, &moduleEnv)) {
+ return nullptr;
+ }
+ compilerEnv.computeParameters(d);
+
+ if (!moduleEnv.codeSection) {
+ d.fail("unknown section before code section");
+ return nullptr;
+ }
+
+ MOZ_RELEASE_ASSERT(moduleEnv.codeSection->size == codeBytes.length());
+ MOZ_RELEASE_ASSERT(d.done());
+ }
+
+ ModuleGenerator mg(args, &moduleEnv, &compilerEnv, &cancelled, error);
+ if (!mg.init(nullptr, telemetrySender)) {
+ return nullptr;
+ }
+
+ {
+ StreamingDecoder d(moduleEnv, codeBytes, codeBytesEnd, cancelled, error,
+ warnings);
+
+ if (!DecodeCodeSection(moduleEnv, d, mg)) {
+ return nullptr;
+ }
+
+ MOZ_RELEASE_ASSERT(d.done());
+ }
+
+ {
+ auto streamEnd = exclusiveStreamEnd.lock();
+ while (!streamEnd->reached) {
+ if (cancelled) {
+ return nullptr;
+ }
+ streamEnd.wait();
+ }
+ }
+
+ const StreamEndData& streamEnd = exclusiveStreamEnd.lock();
+ const Bytes& tailBytes = *streamEnd.tailBytes;
+
+ {
+ Decoder d(tailBytes, moduleEnv.codeSection->end(), error, warnings);
+
+ if (!DecodeModuleTail(d, &moduleEnv)) {
+ return nullptr;
+ }
+
+ MOZ_RELEASE_ASSERT(d.done());
+ }
+
+ SharedBytes bytecode = CreateBytecode(envBytes, codeBytes, tailBytes, error);
+ if (!bytecode) {
+ return nullptr;
+ }
+
+ return mg.finishModule(*bytecode, streamEnd.tier2Listener);
+}
diff --git a/js/src/wasm/WasmCompile.h b/js/src/wasm/WasmCompile.h
new file mode 100644
index 0000000000..c0f6023392
--- /dev/null
+++ b/js/src/wasm/WasmCompile.h
@@ -0,0 +1,145 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_compile_h
+#define wasm_compile_h
+
+#include "vm/Runtime.h"
+#include "wasm/WasmModule.h"
+
+namespace js {
+namespace wasm {
+
+// Return a uint32_t which captures the observed properties of the CPU that
+// affect compilation. If code compiled now is to be serialized and executed
+// later, the ObservedCPUFeatures() must be ensured to be the same.
+
+uint32_t ObservedCPUFeatures();
+
+// Describes the JS scripted caller of a request to compile a wasm module.
+
+struct ScriptedCaller {
+ UniqueChars filename;
+ bool filenameIsURL;
+ unsigned line;
+
+ ScriptedCaller() : filenameIsURL(false), line(0) {}
+};
+
+// Describes all the parameters that control wasm compilation.
+
+struct CompileArgs;
+using MutableCompileArgs = RefPtr<CompileArgs>;
+using SharedCompileArgs = RefPtr<const CompileArgs>;
+
+struct CompileArgs : ShareableBase<CompileArgs> {
+ ScriptedCaller scriptedCaller;
+ UniqueChars sourceMapURL;
+
+ bool baselineEnabled;
+ bool ionEnabled;
+ bool craneliftEnabled;
+ bool debugEnabled;
+ bool forceTiering;
+
+ FeatureArgs features;
+
+ // CompileArgs has two constructors:
+ //
+ // - one through a factory function `build`, which checks that flags are
+ // consistent with each other.
+ // - one that gives complete access to underlying fields.
+ //
+ // You should use the first one in general, unless you have a very good
+ // reason (i.e. no JSContext around and you know which flags have been used).
+
+ static SharedCompileArgs build(JSContext* cx,
+ ScriptedCaller&& scriptedCaller);
+
+ explicit CompileArgs(ScriptedCaller&& scriptedCaller)
+ : scriptedCaller(std::move(scriptedCaller)),
+ baselineEnabled(false),
+ ionEnabled(false),
+ craneliftEnabled(false),
+ debugEnabled(false),
+ forceTiering(false) {}
+};
+
+// Return the estimated compiled (machine) code size for the given bytecode size
+// compiled at the given tier.
+
+double EstimateCompiledCodeSize(Tier tier, size_t bytecodeSize);
+
+// Compile the given WebAssembly bytecode with the given arguments into a
+// wasm::Module. On success, the Module is returned. On failure, the returned
+// SharedModule pointer is null and either:
+// - *error points to a string description of the error
+// - *error is null and the caller should report out-of-memory.
+
+SharedModule CompileBuffer(
+ const CompileArgs& args, const ShareableBytes& bytecode, UniqueChars* error,
+ UniqueCharsVector* warnings,
+ JS::OptimizedEncodingListener* listener = nullptr,
+ JSTelemetrySender telemetrySender = JSTelemetrySender());
+
+// Attempt to compile the second tier of the given wasm::Module.
+
+void CompileTier2(const CompileArgs& args, const Bytes& bytecode,
+ const Module& module, Atomic<bool>* cancelled,
+ JSTelemetrySender telemetrySender = JSTelemetrySender());
+
+// Compile the given WebAssembly module which has been broken into three
+// partitions:
+// - envBytes contains a complete ModuleEnvironment that has already been
+// copied in from the stream.
+// - codeBytes is pre-sized to hold the complete code section when the stream
+// completes.
+// - The range [codeBytes.begin(), codeBytesEnd) contains the bytes currently
+// read from the stream and codeBytesEnd will advance until either
+// the stream is cancelled or codeBytesEnd == codeBytes.end().
+// - streamEnd contains the final information received after the code section:
+// the remaining module bytecodes and maybe a JS::OptimizedEncodingListener.
+// When the stream is successfully closed, streamEnd.reached is set.
+// The ExclusiveWaitableData are notified when CompileStreaming() can make
+// progress (i.e., codeBytesEnd advances or streamEnd.reached is set).
+// If cancelled is set to true, compilation aborts and returns null. After
+// cancellation is set, both ExclusiveWaitableData will be notified and so every
+// wait() loop must check cancelled.
+
+using ExclusiveBytesPtr = ExclusiveWaitableData<const uint8_t*>;
+
+struct StreamEndData {
+ bool reached;
+ const Bytes* tailBytes;
+ Tier2Listener tier2Listener;
+
+ StreamEndData() : reached(false) {}
+};
+using ExclusiveStreamEndData = ExclusiveWaitableData<StreamEndData>;
+
+SharedModule CompileStreaming(
+ const CompileArgs& args, const Bytes& envBytes, const Bytes& codeBytes,
+ const ExclusiveBytesPtr& codeBytesEnd,
+ const ExclusiveStreamEndData& streamEnd, const Atomic<bool>& cancelled,
+ UniqueChars* error, UniqueCharsVector* warnings,
+ JSTelemetrySender telemetrySender = JSTelemetrySender());
+
+} // namespace wasm
+} // namespace js
+
+#endif // namespace wasm_compile_h
diff --git a/js/src/wasm/WasmConstants.h b/js/src/wasm/WasmConstants.h
new file mode 100644
index 0000000000..f7964858e3
--- /dev/null
+++ b/js/src/wasm/WasmConstants.h
@@ -0,0 +1,1008 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_binary_h
+#define wasm_binary_h
+
+namespace js {
+namespace wasm {
+
+static const uint32_t MagicNumber = 0x6d736100; // "\0asm"
+static const uint32_t EncodingVersion = 0x01;
+
+enum class SectionId {
+ Custom = 0,
+ Type = 1,
+ Import = 2,
+ Function = 3,
+ Table = 4,
+ Memory = 5,
+ Global = 6,
+ Export = 7,
+ Start = 8,
+ Elem = 9,
+ Code = 10,
+ Data = 11,
+ DataCount = 12,
+#ifdef ENABLE_WASM_EXCEPTIONS
+ Event = 13,
+#endif
+ GcFeatureOptIn = 42 // Arbitrary, but fits in 7 bits
+};
+
+// WebAssembly type encodings are all single-byte negative SLEB128s, hence:
+// forall tc:TypeCode. ((tc & SLEB128SignMask) == SLEB128SignBit
+static const uint8_t SLEB128SignMask = 0xc0;
+static const uint8_t SLEB128SignBit = 0x40;
+
+enum class TypeCode {
+
+ // If more "simple primitive" (non-reference, non-constructor,
+ // non-special-purpose) types are added here then you MUST update
+ // LowestPrimitiveTypeCode, below.
+
+ I32 = 0x7f, // SLEB128(-0x01)
+ I64 = 0x7e, // SLEB128(-0x02)
+ F32 = 0x7d, // SLEB128(-0x03)
+ F64 = 0x7c, // SLEB128(-0x04)
+ V128 = 0x7b, // SLEB128(-0x05)
+
+ // A function pointer with any signature
+ FuncRef = 0x70, // SLEB128(-0x10)
+
+ // A reference to any host value.
+ ExternRef = 0x6f, // SLEB128(-0x11)
+
+ // A reference to a struct/array value.
+ EqRef = 0x6d, // SLEB128(-0x12)
+
+ // Type constructor for nullable reference types.
+ NullableRef = 0x6c, // SLEB128(-0x14)
+
+ // Type constructor for non-nullable reference types.
+ Ref = 0x6b, // SLEB128(-0x15)
+
+ // Type constructor for function types
+ Func = 0x60, // SLEB128(-0x20)
+
+ // Type constructor for structure types - unofficial
+ Struct = 0x5f, // SLEB128(-0x21)
+
+ // The 'empty' case of blocktype.
+ BlockVoid = 0x40, // SLEB128(-0x40)
+
+ Limit = 0x80
+};
+
+// This is the lowest-valued TypeCode that is a primitive type, used in
+// UnpackTypeCodeTypeAbstracted(). If primitive typecodes are added below any
+// reference typecode then the logic in that function MUST change.
+
+static constexpr TypeCode LowestPrimitiveTypeCode = TypeCode::V128;
+
+// An arbitrary reference type used as the result of
+// UnpackTypeCodeTypeAbstracted() when a value type is a reference.
+
+static constexpr TypeCode AbstractReferenceTypeCode = TypeCode::ExternRef;
+
+// A type code used to represent (ref null? typeindex) whether or not the type
+// is encoded with 'Ref' or 'NullableRef'.
+
+static constexpr TypeCode AbstractReferenceTypeIndexCode = TypeCode::Ref;
+
+enum class TypeIdDescKind { None, Immediate, Global };
+
+// A wasm::Trap represents a wasm-defined trap that can occur during execution
+// which triggers a WebAssembly.RuntimeError. Generated code may jump to a Trap
+// symbolically, passing the bytecode offset to report as the trap offset. The
+// generated jump will be bound to a tiny stub which fills the offset and
+// then jumps to a per-Trap shared stub at the end of the module.
+
+enum class Trap {
+ // The Unreachable opcode has been executed.
+ Unreachable,
+ // An integer arithmetic operation led to an overflow.
+ IntegerOverflow,
+ // Trying to coerce NaN to an integer.
+ InvalidConversionToInteger,
+ // Integer division by zero.
+ IntegerDivideByZero,
+ // Out of bounds on wasm memory accesses.
+ OutOfBounds,
+ // Unaligned on wasm atomic accesses; also used for non-standard ARM
+ // unaligned access faults.
+ UnalignedAccess,
+ // call_indirect to null.
+ IndirectCallToNull,
+ // call_indirect signature mismatch.
+ IndirectCallBadSig,
+ // Dereference null pointer in operation on (Ref T)
+ NullPointerDereference,
+
+ // The internal stack space was exhausted. For compatibility, this throws
+ // the same over-recursed error as JS.
+ StackOverflow,
+
+ // The wasm execution has potentially run too long and the engine must call
+ // CheckForInterrupt(). This trap is resumable.
+ CheckInterrupt,
+
+ // Signal an error that was reported in C++ code.
+ ThrowReported,
+
+ Limit
+};
+
+// The representation of a null reference value throughout the compiler.
+
+static const intptr_t NULLREF_VALUE = intptr_t((void*)nullptr);
+
+enum class DefinitionKind {
+ Function = 0x00,
+ Table = 0x01,
+ Memory = 0x02,
+ Global = 0x03,
+#ifdef ENABLE_WASM_EXCEPTIONS
+ Event = 0x04,
+#endif
+};
+
+enum class GlobalTypeImmediate { IsMutable = 0x1, AllowedMask = 0x1 };
+
+enum class MemoryTableFlags {
+ Default = 0x0,
+ HasMaximum = 0x1,
+ IsShared = 0x2,
+};
+
+enum class MemoryMasks { AllowUnshared = 0x1, AllowShared = 0x3 };
+
+enum class DataSegmentKind {
+ Active = 0x00,
+ Passive = 0x01,
+ ActiveWithMemoryIndex = 0x02
+};
+
+enum class ElemSegmentKind : uint32_t {
+ Active = 0x0,
+ Passive = 0x1,
+ ActiveWithTableIndex = 0x2,
+ Declared = 0x3,
+};
+
+enum class ElemSegmentPayload : uint32_t {
+ ExternIndex = 0x0,
+ ElemExpression = 0x4,
+};
+
+#ifdef ENABLE_WASM_EXCEPTIONS
+enum class EventKind {
+ Exception = 0x0,
+};
+#endif
+
+enum class Op {
+ // Control flow operators
+ Unreachable = 0x00,
+ Nop = 0x01,
+ Block = 0x02,
+ Loop = 0x03,
+ If = 0x04,
+ Else = 0x05,
+#ifdef ENABLE_WASM_EXCEPTIONS
+ Try = 0x06,
+ Catch = 0x07,
+ Throw = 0x08,
+#endif
+ End = 0x0b,
+ Br = 0x0c,
+ BrIf = 0x0d,
+ BrTable = 0x0e,
+ Return = 0x0f,
+
+ // Call operators
+ Call = 0x10,
+ CallIndirect = 0x11,
+
+ // Parametric operators
+ Drop = 0x1a,
+ SelectNumeric = 0x1b,
+ SelectTyped = 0x1c,
+
+ // Variable access
+ GetLocal = 0x20,
+ SetLocal = 0x21,
+ TeeLocal = 0x22,
+ GetGlobal = 0x23,
+ SetGlobal = 0x24,
+ TableGet = 0x25, // Reftypes,
+ TableSet = 0x26, // per proposal as of February 2019
+
+ // Memory-related operators
+ I32Load = 0x28,
+ I64Load = 0x29,
+ F32Load = 0x2a,
+ F64Load = 0x2b,
+ I32Load8S = 0x2c,
+ I32Load8U = 0x2d,
+ I32Load16S = 0x2e,
+ I32Load16U = 0x2f,
+ I64Load8S = 0x30,
+ I64Load8U = 0x31,
+ I64Load16S = 0x32,
+ I64Load16U = 0x33,
+ I64Load32S = 0x34,
+ I64Load32U = 0x35,
+ I32Store = 0x36,
+ I64Store = 0x37,
+ F32Store = 0x38,
+ F64Store = 0x39,
+ I32Store8 = 0x3a,
+ I32Store16 = 0x3b,
+ I64Store8 = 0x3c,
+ I64Store16 = 0x3d,
+ I64Store32 = 0x3e,
+ MemorySize = 0x3f,
+ MemoryGrow = 0x40,
+
+ // Constants
+ I32Const = 0x41,
+ I64Const = 0x42,
+ F32Const = 0x43,
+ F64Const = 0x44,
+
+ // Comparison operators
+ I32Eqz = 0x45,
+ I32Eq = 0x46,
+ I32Ne = 0x47,
+ I32LtS = 0x48,
+ I32LtU = 0x49,
+ I32GtS = 0x4a,
+ I32GtU = 0x4b,
+ I32LeS = 0x4c,
+ I32LeU = 0x4d,
+ I32GeS = 0x4e,
+ I32GeU = 0x4f,
+ I64Eqz = 0x50,
+ I64Eq = 0x51,
+ I64Ne = 0x52,
+ I64LtS = 0x53,
+ I64LtU = 0x54,
+ I64GtS = 0x55,
+ I64GtU = 0x56,
+ I64LeS = 0x57,
+ I64LeU = 0x58,
+ I64GeS = 0x59,
+ I64GeU = 0x5a,
+ F32Eq = 0x5b,
+ F32Ne = 0x5c,
+ F32Lt = 0x5d,
+ F32Gt = 0x5e,
+ F32Le = 0x5f,
+ F32Ge = 0x60,
+ F64Eq = 0x61,
+ F64Ne = 0x62,
+ F64Lt = 0x63,
+ F64Gt = 0x64,
+ F64Le = 0x65,
+ F64Ge = 0x66,
+
+ // Numeric operators
+ I32Clz = 0x67,
+ I32Ctz = 0x68,
+ I32Popcnt = 0x69,
+ I32Add = 0x6a,
+ I32Sub = 0x6b,
+ I32Mul = 0x6c,
+ I32DivS = 0x6d,
+ I32DivU = 0x6e,
+ I32RemS = 0x6f,
+ I32RemU = 0x70,
+ I32And = 0x71,
+ I32Or = 0x72,
+ I32Xor = 0x73,
+ I32Shl = 0x74,
+ I32ShrS = 0x75,
+ I32ShrU = 0x76,
+ I32Rotl = 0x77,
+ I32Rotr = 0x78,
+ I64Clz = 0x79,
+ I64Ctz = 0x7a,
+ I64Popcnt = 0x7b,
+ I64Add = 0x7c,
+ I64Sub = 0x7d,
+ I64Mul = 0x7e,
+ I64DivS = 0x7f,
+ I64DivU = 0x80,
+ I64RemS = 0x81,
+ I64RemU = 0x82,
+ I64And = 0x83,
+ I64Or = 0x84,
+ I64Xor = 0x85,
+ I64Shl = 0x86,
+ I64ShrS = 0x87,
+ I64ShrU = 0x88,
+ I64Rotl = 0x89,
+ I64Rotr = 0x8a,
+ F32Abs = 0x8b,
+ F32Neg = 0x8c,
+ F32Ceil = 0x8d,
+ F32Floor = 0x8e,
+ F32Trunc = 0x8f,
+ F32Nearest = 0x90,
+ F32Sqrt = 0x91,
+ F32Add = 0x92,
+ F32Sub = 0x93,
+ F32Mul = 0x94,
+ F32Div = 0x95,
+ F32Min = 0x96,
+ F32Max = 0x97,
+ F32CopySign = 0x98,
+ F64Abs = 0x99,
+ F64Neg = 0x9a,
+ F64Ceil = 0x9b,
+ F64Floor = 0x9c,
+ F64Trunc = 0x9d,
+ F64Nearest = 0x9e,
+ F64Sqrt = 0x9f,
+ F64Add = 0xa0,
+ F64Sub = 0xa1,
+ F64Mul = 0xa2,
+ F64Div = 0xa3,
+ F64Min = 0xa4,
+ F64Max = 0xa5,
+ F64CopySign = 0xa6,
+
+ // Conversions
+ I32WrapI64 = 0xa7,
+ I32TruncSF32 = 0xa8,
+ I32TruncUF32 = 0xa9,
+ I32TruncSF64 = 0xaa,
+ I32TruncUF64 = 0xab,
+ I64ExtendSI32 = 0xac,
+ I64ExtendUI32 = 0xad,
+ I64TruncSF32 = 0xae,
+ I64TruncUF32 = 0xaf,
+ I64TruncSF64 = 0xb0,
+ I64TruncUF64 = 0xb1,
+ F32ConvertSI32 = 0xb2,
+ F32ConvertUI32 = 0xb3,
+ F32ConvertSI64 = 0xb4,
+ F32ConvertUI64 = 0xb5,
+ F32DemoteF64 = 0xb6,
+ F64ConvertSI32 = 0xb7,
+ F64ConvertUI32 = 0xb8,
+ F64ConvertSI64 = 0xb9,
+ F64ConvertUI64 = 0xba,
+ F64PromoteF32 = 0xbb,
+
+ // Reinterpretations
+ I32ReinterpretF32 = 0xbc,
+ I64ReinterpretF64 = 0xbd,
+ F32ReinterpretI32 = 0xbe,
+ F64ReinterpretI64 = 0xbf,
+
+ // Sign extension
+ I32Extend8S = 0xc0,
+ I32Extend16S = 0xc1,
+ I64Extend8S = 0xc2,
+ I64Extend16S = 0xc3,
+ I64Extend32S = 0xc4,
+
+ // Reference types
+ RefNull = 0xd0,
+ RefIsNull = 0xd1,
+ RefFunc = 0xd2,
+
+ // Function references
+ RefAsNonNull = 0xd3,
+ BrOnNull = 0xd4,
+
+ // GC (experimental)
+ RefEq = 0xd5,
+
+ FirstPrefix = 0xfb,
+ GcPrefix = 0xfb,
+ MiscPrefix = 0xfc,
+ SimdPrefix = 0xfd,
+ ThreadPrefix = 0xfe,
+ MozPrefix = 0xff,
+
+ Limit = 0x100
+};
+
+inline bool IsPrefixByte(uint8_t b) { return b >= uint8_t(Op::FirstPrefix); }
+
+// Opcodes in the GC opcode space.
+enum class GcOp {
+ // Structure operations
+ StructNew = 0x00,
+ StructGet = 0x03,
+ StructSet = 0x06,
+ StructNarrow = 0x07,
+
+ Limit
+};
+
+// Opcode list from the SIMD proposal post-renumbering in May, 2020.
+
+// Opcodes with suffix 'Experimental' are proposed but not standardized, and are
+// compatible with those same opcodes in V8. No opcode labeled 'Experimental'
+// will ship in a Release build where SIMD is enabled by default.
+
+enum class SimdOp {
+ V128Load = 0x00,
+ I16x8LoadS8x8 = 0x01,
+ I16x8LoadU8x8 = 0x02,
+ I32x4LoadS16x4 = 0x03,
+ I32x4LoadU16x4 = 0x04,
+ I64x2LoadS32x2 = 0x05,
+ I64x2LoadU32x2 = 0x06,
+ V8x16LoadSplat = 0x07,
+ V16x8LoadSplat = 0x08,
+ V32x4LoadSplat = 0x09,
+ V64x2LoadSplat = 0x0a,
+ V128Store = 0x0b,
+ V128Const = 0x0c,
+ V8x16Shuffle = 0x0d,
+ V8x16Swizzle = 0x0e,
+ I8x16Splat = 0x0f,
+ I16x8Splat = 0x10,
+ I32x4Splat = 0x11,
+ I64x2Splat = 0x12,
+ F32x4Splat = 0x13,
+ F64x2Splat = 0x14,
+ I8x16ExtractLaneS = 0x15,
+ I8x16ExtractLaneU = 0x16,
+ I8x16ReplaceLane = 0x17,
+ I16x8ExtractLaneS = 0x18,
+ I16x8ExtractLaneU = 0x19,
+ I16x8ReplaceLane = 0x1a,
+ I32x4ExtractLane = 0x1b,
+ I32x4ReplaceLane = 0x1c,
+ I64x2ExtractLane = 0x1d,
+ I64x2ReplaceLane = 0x1e,
+ F32x4ExtractLane = 0x1f,
+ F32x4ReplaceLane = 0x20,
+ F64x2ExtractLane = 0x21,
+ F64x2ReplaceLane = 0x22,
+ I8x16Eq = 0x23,
+ I8x16Ne = 0x24,
+ I8x16LtS = 0x25,
+ I8x16LtU = 0x26,
+ I8x16GtS = 0x27,
+ I8x16GtU = 0x28,
+ I8x16LeS = 0x29,
+ I8x16LeU = 0x2a,
+ I8x16GeS = 0x2b,
+ I8x16GeU = 0x2c,
+ I16x8Eq = 0x2d,
+ I16x8Ne = 0x2e,
+ I16x8LtS = 0x2f,
+ I16x8LtU = 0x30,
+ I16x8GtS = 0x31,
+ I16x8GtU = 0x32,
+ I16x8LeS = 0x33,
+ I16x8LeU = 0x34,
+ I16x8GeS = 0x35,
+ I16x8GeU = 0x36,
+ I32x4Eq = 0x37,
+ I32x4Ne = 0x38,
+ I32x4LtS = 0x39,
+ I32x4LtU = 0x3a,
+ I32x4GtS = 0x3b,
+ I32x4GtU = 0x3c,
+ I32x4LeS = 0x3d,
+ I32x4LeU = 0x3e,
+ I32x4GeS = 0x3f,
+ I32x4GeU = 0x40,
+ F32x4Eq = 0x41,
+ F32x4Ne = 0x42,
+ F32x4Lt = 0x43,
+ F32x4Gt = 0x44,
+ F32x4Le = 0x45,
+ F32x4Ge = 0x46,
+ F64x2Eq = 0x47,
+ F64x2Ne = 0x48,
+ F64x2Lt = 0x49,
+ F64x2Gt = 0x4a,
+ F64x2Le = 0x4b,
+ F64x2Ge = 0x4c,
+ V128Not = 0x4d,
+ V128And = 0x4e,
+ V128AndNot = 0x4f,
+ V128Or = 0x50,
+ V128Xor = 0x51,
+ V128Bitselect = 0x52,
+ // Unused = 0x53
+ // Unused = 0x54
+ // Unused = 0x55
+ // Unused = 0x56
+ // Unused = 0x57
+ // Unused = 0x58
+ // Unused = 0x59
+ // Unused = 0x5a
+ // Unused = 0x5b
+ // Unused = 0x5c
+ // Unused = 0x5d
+ // Unused = 0x5e
+ // Unused = 0x5f
+ I8x16Abs = 0x60,
+ I8x16Neg = 0x61,
+ I8x16AnyTrue = 0x62,
+ I8x16AllTrue = 0x63,
+ I8x16Bitmask = 0x64,
+ I8x16NarrowSI16x8 = 0x65,
+ I8x16NarrowUI16x8 = 0x66,
+ // Widen = 0x67
+ // Widen = 0x68
+ // Widen = 0x69
+ // Widen = 0x6a
+ I8x16Shl = 0x6b,
+ I8x16ShrS = 0x6c,
+ I8x16ShrU = 0x6d,
+ I8x16Add = 0x6e,
+ I8x16AddSaturateS = 0x6f,
+ I8x16AddSaturateU = 0x70,
+ I8x16Sub = 0x71,
+ I8x16SubSaturateS = 0x72,
+ I8x16SubSaturateU = 0x73,
+ // Dot = 0x74
+ // Mul = 0x75
+ I8x16MinS = 0x76,
+ I8x16MinU = 0x77,
+ I8x16MaxS = 0x78,
+ I8x16MaxU = 0x79,
+ // AvgrS = 0x7a
+ I8x16AvgrU = 0x7b,
+ // Unused = 0x7c
+ // Unused = 0x7d
+ // Unused = 0x7e
+ // Unused = 0x7f
+ I16x8Abs = 0x80,
+ I16x8Neg = 0x81,
+ I16x8AnyTrue = 0x82,
+ I16x8AllTrue = 0x83,
+ I16x8Bitmask = 0x84,
+ I16x8NarrowSI32x4 = 0x85,
+ I16x8NarrowUI32x4 = 0x86,
+ I16x8WidenLowSI8x16 = 0x87,
+ I16x8WidenHighSI8x16 = 0x88,
+ I16x8WidenLowUI8x16 = 0x89,
+ I16x8WidenHighUI8x16 = 0x8a,
+ I16x8Shl = 0x8b,
+ I16x8ShrS = 0x8c,
+ I16x8ShrU = 0x8d,
+ I16x8Add = 0x8e,
+ I16x8AddSaturateS = 0x8f,
+ I16x8AddSaturateU = 0x90,
+ I16x8Sub = 0x91,
+ I16x8SubSaturateS = 0x92,
+ I16x8SubSaturateU = 0x93,
+ // Dot = 0x94
+ I16x8Mul = 0x95,
+ I16x8MinS = 0x96,
+ I16x8MinU = 0x97,
+ I16x8MaxS = 0x98,
+ I16x8MaxU = 0x99,
+ // AvgrS = 0x9a
+ I16x8AvgrU = 0x9b,
+ // Unused = 0x9c
+ // Unused = 0x9d
+ // Unused = 0x9e
+ // Unused = 0x9f
+ I32x4Abs = 0xa0,
+ I32x4Neg = 0xa1,
+ I32x4AnyTrue = 0xa2,
+ I32x4AllTrue = 0xa3,
+ I32x4Bitmask = 0xa4,
+ // Narrow = 0xa5
+ // Narrow = 0xa6
+ I32x4WidenLowSI16x8 = 0xa7,
+ I32x4WidenHighSI16x8 = 0xa8,
+ I32x4WidenLowUI16x8 = 0xa9,
+ I32x4WidenHighUI16x8 = 0xaa,
+ I32x4Shl = 0xab,
+ I32x4ShrS = 0xac,
+ I32x4ShrU = 0xad,
+ I32x4Add = 0xae,
+ // AddSatS = 0xaf
+ // AddSatU = 0xb0
+ I32x4Sub = 0xb1,
+ // SubSatS = 0xb2
+ // SubSatU = 0xb3
+ // Dot = 0xb4
+ I32x4Mul = 0xb5,
+ I32x4MinS = 0xb6,
+ I32x4MinU = 0xb7,
+ I32x4MaxS = 0xb8,
+ I32x4MaxU = 0xb9,
+ I32x4DotSI16x8 = 0xba,
+ // AvgrU = 0xbb
+ // Unused = 0xbc
+ // Unused = 0xbd
+ // Unused = 0xbe
+ // Unused = 0xbf
+ // Abs = 0xc0
+ I64x2Neg = 0xc1,
+ // AnyTrue = 0xc2
+ // AllTrue = 0xc3
+ // Bitmask = 0xc4
+ // Narrow = 0xc5
+ // Narrow = 0xc6
+ // Widen = 0xc7
+ // Widen = 0xc8
+ // Widen = 0xc9
+ // Widen = 0xca
+ I64x2Shl = 0xcb,
+ I64x2ShrS = 0xcc,
+ I64x2ShrU = 0xcd,
+ I64x2Add = 0xce,
+ // AddSatS = 0xcf
+ // AddSatU = 0xd0
+ I64x2Sub = 0xd1,
+ // SubSatS = 0xd2
+ // SubSatU = 0xd3
+ // Dot = 0xd4
+ I64x2Mul = 0xd5,
+ // MinS = 0xd6
+ // MinU = 0xd7
+ F32x4Ceil = 0xd8,
+ F32x4Floor = 0xd9,
+ F32x4Trunc = 0xda,
+ F32x4Nearest = 0xdb,
+ F64x2Ceil = 0xdc,
+ F64x2Floor = 0xdd,
+ F64x2Trunc = 0xde,
+ F64x2Nearest = 0xdf,
+ F32x4Abs = 0xe0,
+ F32x4Neg = 0xe1,
+ // Round = 0xe2
+ F32x4Sqrt = 0xe3,
+ F32x4Add = 0xe4,
+ F32x4Sub = 0xe5,
+ F32x4Mul = 0xe6,
+ F32x4Div = 0xe7,
+ F32x4Min = 0xe8,
+ F32x4Max = 0xe9,
+ F32x4PMin = 0xea,
+ F32x4PMax = 0xeb,
+ F64x2Abs = 0xec,
+ F64x2Neg = 0xed,
+ // Round = 0xee
+ F64x2Sqrt = 0xef,
+ F64x2Add = 0xf0,
+ F64x2Sub = 0xf1,
+ F64x2Mul = 0xf2,
+ F64x2Div = 0xf3,
+ F64x2Min = 0xf4,
+ F64x2Max = 0xf5,
+ F64x2PMin = 0xf6,
+ F64x2PMax = 0xf7,
+ I32x4TruncSSatF32x4 = 0xf8,
+ I32x4TruncUSatF32x4 = 0xf9,
+ F32x4ConvertSI32x4 = 0xfa,
+ F32x4ConvertUI32x4 = 0xfb,
+ V128Load32Zero = 0xfc,
+ V128Load64Zero = 0xfd,
+// Unused = 0xfe and up
+
+// Mozilla extensions, highly experimental and platform-specific
+#ifdef ENABLE_WASM_SIMD_WORMHOLE
+ // The wormhole is a mechanism for injecting experimental, possibly
+ // platform-dependent, opcodes into the generated code. A wormhole op is
+ // expressed as a two-operation SIMD shuffle op with the pattern <31, 0, 30,
+ // 2, 29, 4, 28, 6, 27, 8, 26, 10, 25, 12, 24, X> where X is the opcode,
+ // 0..31, from the set below. If an operation uses no operands, the operands
+ // to the shuffle opcode should be const 0. If an operation uses one operand,
+ // the operands to the shuffle opcode should both be that operand.
+ //
+ // The wormhole must be enabled by a flag and is only supported by ion on x64,
+ // baseline must be disabled.
+ //
+ // The benefit of this mechanism is that it allows experimental opcodes to be
+ // used without updating other tools (compilers, linkers, optimizers).
+ //
+ // These opcodes can be rearranged but the X values associated with them must
+ // remain fixed.
+
+ // X=0, selftest opcode. No operands. The result is an 8x16 hex value:
+ // DEADD00DCAFEBABE.
+ MozWHSELFTEST = 0x200,
+
+ // X=1, Intel SSE3 PMADDUBSW instruction. Two operands.
+ MozWHPMADDUBSW = 0x201,
+
+ // X=2, Intel SSE2 PMADDWD instruction. Two operands.
+ MozWHPMADDWD = 0x202,
+#endif
+
+ Limit
+};
+
+// Opcodes in the "miscellaneous" opcode space.
+enum class MiscOp {
+ // Saturating float-to-int conversions
+ I32TruncSSatF32 = 0x00,
+ I32TruncUSatF32 = 0x01,
+ I32TruncSSatF64 = 0x02,
+ I32TruncUSatF64 = 0x03,
+ I64TruncSSatF32 = 0x04,
+ I64TruncUSatF32 = 0x05,
+ I64TruncSSatF64 = 0x06,
+ I64TruncUSatF64 = 0x07,
+
+ // Bulk memory operations, per proposal as of February 2019.
+ MemInit = 0x08,
+ DataDrop = 0x09,
+ MemCopy = 0x0a,
+ MemFill = 0x0b,
+ TableInit = 0x0c,
+ ElemDrop = 0x0d,
+ TableCopy = 0x0e,
+
+ // Reftypes, per proposal as of February 2019.
+ TableGrow = 0x0f,
+ TableSize = 0x10,
+ TableFill = 0x11,
+
+ Limit
+};
+
+// Opcodes from threads proposal as of June 30, 2017
+enum class ThreadOp {
+ // Wait and wake
+ Wake = 0x00,
+ I32Wait = 0x01,
+ I64Wait = 0x02,
+ Fence = 0x03,
+
+ // Load and store
+ I32AtomicLoad = 0x10,
+ I64AtomicLoad = 0x11,
+ I32AtomicLoad8U = 0x12,
+ I32AtomicLoad16U = 0x13,
+ I64AtomicLoad8U = 0x14,
+ I64AtomicLoad16U = 0x15,
+ I64AtomicLoad32U = 0x16,
+ I32AtomicStore = 0x17,
+ I64AtomicStore = 0x18,
+ I32AtomicStore8U = 0x19,
+ I32AtomicStore16U = 0x1a,
+ I64AtomicStore8U = 0x1b,
+ I64AtomicStore16U = 0x1c,
+ I64AtomicStore32U = 0x1d,
+
+ // Read-modify-write operations
+ I32AtomicAdd = 0x1e,
+ I64AtomicAdd = 0x1f,
+ I32AtomicAdd8U = 0x20,
+ I32AtomicAdd16U = 0x21,
+ I64AtomicAdd8U = 0x22,
+ I64AtomicAdd16U = 0x23,
+ I64AtomicAdd32U = 0x24,
+
+ I32AtomicSub = 0x25,
+ I64AtomicSub = 0x26,
+ I32AtomicSub8U = 0x27,
+ I32AtomicSub16U = 0x28,
+ I64AtomicSub8U = 0x29,
+ I64AtomicSub16U = 0x2a,
+ I64AtomicSub32U = 0x2b,
+
+ I32AtomicAnd = 0x2c,
+ I64AtomicAnd = 0x2d,
+ I32AtomicAnd8U = 0x2e,
+ I32AtomicAnd16U = 0x2f,
+ I64AtomicAnd8U = 0x30,
+ I64AtomicAnd16U = 0x31,
+ I64AtomicAnd32U = 0x32,
+
+ I32AtomicOr = 0x33,
+ I64AtomicOr = 0x34,
+ I32AtomicOr8U = 0x35,
+ I32AtomicOr16U = 0x36,
+ I64AtomicOr8U = 0x37,
+ I64AtomicOr16U = 0x38,
+ I64AtomicOr32U = 0x39,
+
+ I32AtomicXor = 0x3a,
+ I64AtomicXor = 0x3b,
+ I32AtomicXor8U = 0x3c,
+ I32AtomicXor16U = 0x3d,
+ I64AtomicXor8U = 0x3e,
+ I64AtomicXor16U = 0x3f,
+ I64AtomicXor32U = 0x40,
+
+ I32AtomicXchg = 0x41,
+ I64AtomicXchg = 0x42,
+ I32AtomicXchg8U = 0x43,
+ I32AtomicXchg16U = 0x44,
+ I64AtomicXchg8U = 0x45,
+ I64AtomicXchg16U = 0x46,
+ I64AtomicXchg32U = 0x47,
+
+ // CompareExchange
+ I32AtomicCmpXchg = 0x48,
+ I64AtomicCmpXchg = 0x49,
+ I32AtomicCmpXchg8U = 0x4a,
+ I32AtomicCmpXchg16U = 0x4b,
+ I64AtomicCmpXchg8U = 0x4c,
+ I64AtomicCmpXchg16U = 0x4d,
+ I64AtomicCmpXchg32U = 0x4e,
+
+ Limit
+};
+
+enum class MozOp {
+ // ------------------------------------------------------------------------
+ // These operators are emitted internally when compiling asm.js and are
+ // rejected by wasm validation. They are prefixed by MozPrefix.
+
+ // asm.js-specific operators. They start at 1 so as to check for
+ // uninitialized (zeroed) storage.
+ TeeGlobal = 0x01,
+ I32Min,
+ I32Max,
+ I32Neg,
+ I32BitNot,
+ I32Abs,
+ F32TeeStoreF64,
+ F64TeeStoreF32,
+ I32TeeStore8,
+ I32TeeStore16,
+ I64TeeStore8,
+ I64TeeStore16,
+ I64TeeStore32,
+ I32TeeStore,
+ I64TeeStore,
+ F32TeeStore,
+ F64TeeStore,
+ F64Mod,
+ F64Sin,
+ F64Cos,
+ F64Tan,
+ F64Asin,
+ F64Acos,
+ F64Atan,
+ F64Exp,
+ F64Log,
+ F64Pow,
+ F64Atan2,
+
+ // asm.js-style call_indirect with the callee evaluated first.
+ OldCallDirect,
+ OldCallIndirect,
+
+ Limit
+};
+
+struct OpBytes {
+ // b0 is a byte value but has a 16-bit representation to allow for a full
+ // 256-value range plus a sentinel Limit value.
+ uint16_t b0;
+ // b1 is a LEB128 value but 32 bits is enough for now.
+ uint32_t b1;
+
+ explicit OpBytes(Op x) {
+ b0 = uint16_t(x);
+ b1 = 0;
+ }
+ OpBytes() = default;
+};
+
+static const char NameSectionName[] = "name";
+static const char SourceMappingURLSectionName[] = "sourceMappingURL";
+
+enum class NameType { Module = 0, Function = 1, Local = 2 };
+
+enum class FieldFlags { Mutable = 0x01, AllowedMask = 0x01 };
+
+// The WebAssembly spec hard-codes the virtual page size to be 64KiB and
+// requires the size of linear memory to always be a multiple of 64KiB.
+
+static const unsigned PageSize = 64 * 1024;
+static const unsigned PageBits = 16;
+static_assert(PageSize == (1u << PageBits));
+
+static const unsigned PageMask = ((1u << PageBits) - 1);
+
+// These limits are agreed upon with other engines for consistency.
+
+static const unsigned MaxTypes = 1000000;
+static const unsigned MaxFuncs = 1000000;
+static const unsigned MaxTables = 100000;
+static const unsigned MaxImports = 100000;
+static const unsigned MaxExports = 100000;
+static const unsigned MaxGlobals = 1000000;
+#ifdef ENABLE_WASM_EXCEPTIONS
+static const unsigned MaxEvents =
+ 1000000; // TODO: get this into the shared limits spec
+#endif
+static const unsigned MaxDataSegments = 100000;
+static const unsigned MaxDataSegmentLengthPages = 16384;
+static const unsigned MaxElemSegments = 10000000;
+static const unsigned MaxElemSegmentLength = 10000000;
+static const unsigned MaxTableLimitField = UINT32_MAX;
+static const unsigned MaxTableLength = 10000000;
+static const unsigned MaxLocals = 50000;
+static const unsigned MaxParams = 1000;
+// The actual maximum results may be `1` if multi-value is not enabled. Check
+// `env->funcMaxResults()` to get the correct value for a module.
+static const unsigned MaxResults = 1000;
+static const unsigned MaxStructFields = 1000;
+static const unsigned MaxMemory32LimitField = 65536;
+#ifdef JS_64BIT
+// FIXME (large ArrayBuffer): This should be upped to UINT32_MAX / PageSize
+// initially, then to (size_t(UINT32_MAX) + 1) / PageSize subsequently, see the
+// companion FIXME in WasmMemoryObject::grow() for additional information.
+static const unsigned MaxMemory32Pages = INT32_MAX / PageSize;
+#else
+static const unsigned MaxMemory32Pages = INT32_MAX / PageSize;
+#endif
+static const size_t MaxMemory32Bytes = size_t(MaxMemory32Pages) * PageSize;
+static const unsigned MaxStringBytes = 100000;
+static const unsigned MaxModuleBytes = 1024 * 1024 * 1024;
+static const unsigned MaxFunctionBytes = 7654321;
+
+// These limits pertain to our WebAssembly implementation only.
+
+static const unsigned MaxBrTableElems = 1000000;
+static const unsigned MaxCodeSectionBytes = MaxModuleBytes;
+static const unsigned MaxArgsForJitInlineCall = 8;
+static const unsigned MaxResultsForJitEntry = 1;
+static const unsigned MaxResultsForJitExit = 1;
+static const unsigned MaxResultsForJitInlineCall = MaxResultsForJitEntry;
+// The maximum number of results of a function call or block that may be
+// returned in registers.
+static const unsigned MaxRegisterResults = 1;
+
+// A magic value of the FramePointer to indicate after a return to the entry
+// stub that an exception has been caught and that we should throw.
+
+static const unsigned FailFP = 0xbad;
+
+// Asserted by Decoder::readVarU32.
+
+static const unsigned MaxVarU32DecodedBytes = 5;
+
+// Which backend to use in the case of the optimized tier.
+
+enum class OptimizedBackend {
+ Ion,
+ Cranelift,
+};
+
+// The CompileMode controls how compilation of a module is performed (notably,
+// how many times we compile it).
+
+enum class CompileMode { Once, Tier1, Tier2 };
+
+// Typed enum for whether debugging is enabled.
+
+enum class DebugEnabled { False, True };
+
+// A wasm module can either use no memory, a unshared memory (ArrayBuffer) or
+// shared memory (SharedArrayBuffer).
+
+enum class MemoryUsage { None = false, Unshared = 1, Shared = 2 };
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_binary_h
diff --git a/js/src/wasm/WasmContext.cpp b/js/src/wasm/WasmContext.cpp
new file mode 100644
index 0000000000..5c632a5f0c
--- /dev/null
+++ b/js/src/wasm/WasmContext.cpp
@@ -0,0 +1,38 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2020 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmContext.h"
+
+#include "wasm/WasmTypes.h"
+
+using namespace js;
+using namespace wasm;
+
+bool wasm::Context::ensureTypeContext(JSContext* cx) {
+ if (typeContext) {
+ return true;
+ }
+ typeContext =
+ js::MakeUnique<TypeContext>(FeatureArgs::build(cx), TypeDefVector());
+ return !!typeContext;
+}
+
+size_t wasm::Context::sizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return typeContext ? typeContext->sizeOfExcludingThis(mallocSizeOf) : 0;
+}
diff --git a/js/src/wasm/WasmContext.h b/js/src/wasm/WasmContext.h
new file mode 100644
index 0000000000..b52c0750c8
--- /dev/null
+++ b/js/src/wasm/WasmContext.h
@@ -0,0 +1,62 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2020 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_context_h
+#define wasm_context_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/MemoryReporting.h"
+#include "jstypes.h"
+#include "js/UniquePtr.h"
+
+struct JS_PUBLIC_API JSContext;
+
+namespace js {
+namespace wasm {
+
+class TypeContext;
+
+// wasm::Context lives in JSContext and contains the wasm-related per-context
+// state.
+
+class Context {
+ public:
+ Context()
+ : triedToInstallSignalHandlers(false),
+ haveSignalHandlers(false),
+ typeContext(nullptr) {}
+
+ // Used by wasm::EnsureThreadSignalHandlers(cx) to install thread signal
+ // handlers once per JSContext/thread.
+ bool triedToInstallSignalHandlers;
+ bool haveSignalHandlers;
+
+ [[nodiscard]] bool ensureTypeContext(JSContext* cx);
+
+ // The global type context.
+ UniquePtr<TypeContext> typeContext;
+
+ // about:memory reporting
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+};
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_context_h
diff --git a/js/src/wasm/WasmCraneliftCompile.cpp b/js/src/wasm/WasmCraneliftCompile.cpp
new file mode 100644
index 0000000000..561d675503
--- /dev/null
+++ b/js/src/wasm/WasmCraneliftCompile.cpp
@@ -0,0 +1,768 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2018 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmCraneliftCompile.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/ScopeExit.h"
+
+#include "jit/Disassemble.h"
+#include "js/Printf.h"
+#include "vm/JSContext.h"
+
+#include "wasm/cranelift/baldrapi.h"
+#include "wasm/cranelift/clifapi.h"
+#include "wasm/WasmFrameIter.h" // js::wasm::GenerateFunction{Pro,Epi}logue
+#include "wasm/WasmGC.h"
+#include "wasm/WasmGenerator.h"
+#include "wasm/WasmStubs.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::CheckedInt;
+
+bool wasm::CraneliftPlatformSupport() { return cranelift_supports_platform(); }
+
+static inline SymbolicAddress ToSymbolicAddress(BD_SymbolicAddress bd) {
+ switch (bd) {
+ case BD_SymbolicAddress::RefFunc:
+ return SymbolicAddress::RefFunc;
+ case BD_SymbolicAddress::MemoryGrow:
+ return SymbolicAddress::MemoryGrow;
+ case BD_SymbolicAddress::MemorySize:
+ return SymbolicAddress::MemorySize;
+ case BD_SymbolicAddress::MemoryCopy:
+ return SymbolicAddress::MemCopy;
+ case BD_SymbolicAddress::MemoryCopyShared:
+ return SymbolicAddress::MemCopyShared;
+ case BD_SymbolicAddress::DataDrop:
+ return SymbolicAddress::DataDrop;
+ case BD_SymbolicAddress::MemoryFill:
+ return SymbolicAddress::MemFill;
+ case BD_SymbolicAddress::MemoryFillShared:
+ return SymbolicAddress::MemFillShared;
+ case BD_SymbolicAddress::MemoryInit:
+ return SymbolicAddress::MemInit;
+ case BD_SymbolicAddress::TableCopy:
+ return SymbolicAddress::TableCopy;
+ case BD_SymbolicAddress::ElemDrop:
+ return SymbolicAddress::ElemDrop;
+ case BD_SymbolicAddress::TableFill:
+ return SymbolicAddress::TableFill;
+ case BD_SymbolicAddress::TableGet:
+ return SymbolicAddress::TableGet;
+ case BD_SymbolicAddress::TableGrow:
+ return SymbolicAddress::TableGrow;
+ case BD_SymbolicAddress::TableInit:
+ return SymbolicAddress::TableInit;
+ case BD_SymbolicAddress::TableSet:
+ return SymbolicAddress::TableSet;
+ case BD_SymbolicAddress::TableSize:
+ return SymbolicAddress::TableSize;
+ case BD_SymbolicAddress::FloorF32:
+ return SymbolicAddress::FloorF;
+ case BD_SymbolicAddress::FloorF64:
+ return SymbolicAddress::FloorD;
+ case BD_SymbolicAddress::CeilF32:
+ return SymbolicAddress::CeilF;
+ case BD_SymbolicAddress::CeilF64:
+ return SymbolicAddress::CeilD;
+ case BD_SymbolicAddress::NearestF32:
+ return SymbolicAddress::NearbyIntF;
+ case BD_SymbolicAddress::NearestF64:
+ return SymbolicAddress::NearbyIntD;
+ case BD_SymbolicAddress::TruncF32:
+ return SymbolicAddress::TruncF;
+ case BD_SymbolicAddress::TruncF64:
+ return SymbolicAddress::TruncD;
+ case BD_SymbolicAddress::PreBarrier:
+ return SymbolicAddress::PreBarrierFiltering;
+ case BD_SymbolicAddress::PostBarrier:
+ return SymbolicAddress::PostBarrierFiltering;
+ case BD_SymbolicAddress::WaitI32:
+ return SymbolicAddress::WaitI32;
+ case BD_SymbolicAddress::WaitI64:
+ return SymbolicAddress::WaitI64;
+ case BD_SymbolicAddress::Wake:
+ return SymbolicAddress::Wake;
+ case BD_SymbolicAddress::Limit:
+ break;
+ }
+ MOZ_CRASH("unknown baldrdash symbolic address");
+}
+
+static bool GenerateCraneliftCode(
+ WasmMacroAssembler& masm, const CraneliftCompiledFunc& func,
+ const FuncType& funcType, const TypeIdDesc& funcTypeId,
+ uint32_t lineOrBytecode, uint32_t funcBytecodeSize, StackMaps* stackMaps,
+ size_t stackMapsOffset, size_t stackMapsCount, FuncOffsets* offsets) {
+ wasm::GenerateFunctionPrologue(masm, funcTypeId, mozilla::Nothing(), offsets);
+
+ // Omit the check when framePushed is small and we know there's no
+ // recursion.
+ if (func.frame_pushed < MAX_UNCHECKED_LEAF_FRAME_SIZE &&
+ !func.contains_calls) {
+ masm.reserveStack(func.frame_pushed);
+ } else {
+ std::pair<CodeOffset, uint32_t> pair = masm.wasmReserveStackChecked(
+ func.frame_pushed, BytecodeOffset(lineOrBytecode));
+ CodeOffset trapInsnOffset = pair.first;
+ size_t nBytesReservedBeforeTrap = pair.second;
+
+ MachineState trapExitLayout;
+ size_t trapExitLayoutNumWords;
+ GenerateTrapExitMachineState(&trapExitLayout, &trapExitLayoutNumWords);
+
+ size_t nInboundStackArgBytes = StackArgAreaSizeUnaligned(funcType.args());
+
+ ArgTypeVector args(funcType);
+ wasm::StackMap* functionEntryStackMap = nullptr;
+ if (!CreateStackMapForFunctionEntryTrap(
+ args, trapExitLayout, trapExitLayoutNumWords,
+ nBytesReservedBeforeTrap, nInboundStackArgBytes,
+ &functionEntryStackMap)) {
+ return false;
+ }
+
+ // In debug builds, we'll always have a stack map, even if there are no
+ // refs to track.
+ MOZ_ASSERT(functionEntryStackMap);
+
+ if (functionEntryStackMap &&
+ !stackMaps->add((uint8_t*)(uintptr_t)trapInsnOffset.offset(),
+ functionEntryStackMap)) {
+ functionEntryStackMap->destroy();
+ return false;
+ }
+ }
+ MOZ_ASSERT(masm.framePushed() == func.frame_pushed);
+
+ // Copy the machine code; handle jump tables and other read-only data below.
+ uint32_t funcBase = masm.currentOffset();
+ if (func.code_size && !masm.appendRawCode(func.code, func.code_size)) {
+ return false;
+ }
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+ uint32_t codeEnd = masm.currentOffset();
+#endif
+
+ wasm::GenerateFunctionEpilogue(masm, func.frame_pushed, offsets);
+
+ if (func.num_rodata_relocs > 0) {
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+ constexpr size_t jumptableElementSize = 4;
+
+ MOZ_ASSERT(func.jumptables_size % jumptableElementSize == 0);
+
+ // Align the jump tables properly.
+ masm.haltingAlign(jumptableElementSize);
+
+ // Copy over the tables and read-only data.
+ uint32_t rodataBase = masm.currentOffset();
+ if (!masm.appendRawCode(func.code + func.code_size,
+ func.total_size - func.code_size)) {
+ return false;
+ }
+
+ uint32_t numElem = func.jumptables_size / jumptableElementSize;
+ uint32_t bias = rodataBase - codeEnd;
+
+ // Bias the jump table(s). The table values are negative values
+ // representing backward jumps. By shifting the table down we increase the
+ // distance and so we add a negative value to reflect the larger distance.
+ //
+ // Note addToPCRel4() works from the end of the instruction, hence the loop
+ // bounds.
+ for (uint32_t i = 1; i <= numElem; i++) {
+ masm.addToPCRel4(rodataBase + (i * jumptableElementSize), -bias);
+ }
+
+ // Patch up the code locations. These represent forward distances that also
+ // become greater, so we add a positive value.
+ for (uint32_t i = 0; i < func.num_rodata_relocs; i++) {
+ MOZ_ASSERT(func.rodata_relocs[i] < func.code_size);
+ masm.addToPCRel4(funcBase + func.rodata_relocs[i], bias);
+ }
+#else
+ MOZ_CRASH("No jump table support on this platform");
+#endif
+ }
+
+ masm.flush();
+ if (masm.oom()) {
+ return false;
+ }
+ offsets->end = masm.currentOffset();
+
+ for (size_t i = 0; i < stackMapsCount; i++) {
+ auto* maplet = stackMaps->getRef(stackMapsOffset + i);
+ maplet->offsetBy(funcBase);
+ }
+
+ for (size_t i = 0; i < func.num_metadata; i++) {
+ const CraneliftMetadataEntry& metadata = func.metadatas[i];
+
+ CheckedInt<size_t> offset = funcBase;
+ offset += metadata.code_offset;
+ if (!offset.isValid()) {
+ return false;
+ }
+
+#ifdef DEBUG
+ // Check code offsets.
+ MOZ_ASSERT(offset.value() >= offsets->uncheckedCallEntry);
+ MOZ_ASSERT(offset.value() < offsets->ret);
+ MOZ_ASSERT(metadata.module_bytecode_offset != 0);
+
+ // Check bytecode offsets.
+ if (lineOrBytecode > 0) {
+ MOZ_ASSERT(metadata.module_bytecode_offset >= lineOrBytecode);
+ MOZ_ASSERT(metadata.module_bytecode_offset <
+ lineOrBytecode + funcBytecodeSize);
+ }
+#endif
+ uint32_t bytecodeOffset = metadata.module_bytecode_offset;
+
+ switch (metadata.which) {
+ case CraneliftMetadataEntry::Which::DirectCall: {
+ CallSiteDesc desc(bytecodeOffset, CallSiteDesc::Func);
+ masm.append(desc, CodeOffset(offset.value()), metadata.extra);
+ break;
+ }
+ case CraneliftMetadataEntry::Which::IndirectCall: {
+ CallSiteDesc desc(bytecodeOffset, CallSiteDesc::Dynamic);
+ masm.append(desc, CodeOffset(offset.value()));
+ break;
+ }
+ case CraneliftMetadataEntry::Which::Trap: {
+ Trap trap = (Trap)metadata.extra;
+ BytecodeOffset trapOffset(bytecodeOffset);
+ masm.append(trap, wasm::TrapSite(offset.value(), trapOffset));
+ break;
+ }
+ case CraneliftMetadataEntry::Which::SymbolicAccess: {
+ CodeOffset raOffset(offset.value());
+ CallSiteDesc desc(bytecodeOffset, CallSiteDesc::Symbolic);
+ masm.append(desc, raOffset);
+
+ SymbolicAddress sym =
+ ToSymbolicAddress(BD_SymbolicAddress(metadata.extra));
+ masm.append(SymbolicAccess(raOffset, sym));
+ break;
+ }
+ default: {
+ MOZ_CRASH("unknown cranelift metadata kind");
+ }
+ }
+ }
+
+ return true;
+}
+
+// In Rust, a BatchCompiler variable has a lifetime constrained by those of its
+// associated StaticEnvironment and ModuleEnvironment. This RAII class ties
+// them together, as well as makes sure that the compiler is properly destroyed
+// when it exits scope.
+
+class CraneliftContext {
+ CraneliftStaticEnvironment staticEnv_;
+ CraneliftModuleEnvironment moduleEnv_;
+ CraneliftCompiler* compiler_;
+
+ public:
+ explicit CraneliftContext(const ModuleEnvironment& moduleEnv)
+ : moduleEnv_(moduleEnv), compiler_(nullptr) {
+ staticEnv_.ref_types_enabled = moduleEnv.refTypesEnabled();
+ staticEnv_.threads_enabled = true;
+ staticEnv_.v128_enabled = moduleEnv.v128Enabled();
+#ifdef WASM_SUPPORTS_HUGE_MEMORY
+ if (moduleEnv.hugeMemoryEnabled()) {
+ // In the huge memory configuration, we always reserve the full 4 GB
+ // index space for a heap.
+ staticEnv_.static_memory_bound = HugeIndexRange;
+ staticEnv_.memory_guard_size = HugeOffsetGuardLimit;
+ } else {
+ staticEnv_.memory_guard_size = OffsetGuardLimit;
+ }
+#endif
+ // Otherwise, heap bounds are stored in the `boundsCheckLimit32` field
+ // of TlsData.
+ }
+ bool init() {
+ compiler_ = cranelift_compiler_create(&staticEnv_, &moduleEnv_);
+ return !!compiler_;
+ }
+ ~CraneliftContext() {
+ if (compiler_) {
+ cranelift_compiler_destroy(compiler_);
+ }
+ }
+ operator CraneliftCompiler*() { return compiler_; }
+};
+
+CraneliftFuncCompileInput::CraneliftFuncCompileInput(
+ const FuncCompileInput& func)
+ : bytecode(func.begin),
+ bytecode_size(func.end - func.begin),
+ index(func.index),
+ offset_in_module(func.lineOrBytecode) {}
+
+static_assert(offsetof(TlsData, boundsCheckLimit32) == sizeof(void*),
+ "fix make_heap() in wasm2clif.rs");
+
+CraneliftStaticEnvironment::CraneliftStaticEnvironment()
+ :
+#ifdef JS_CODEGEN_X64
+ has_sse2(Assembler::HasSSE2()),
+ has_sse3(Assembler::HasSSE3()),
+ has_sse41(Assembler::HasSSE41()),
+ has_sse42(Assembler::HasSSE42()),
+ has_popcnt(Assembler::HasPOPCNT()),
+ has_avx(Assembler::HasAVX()),
+ has_bmi1(Assembler::HasBMI1()),
+ has_bmi2(Assembler::HasBMI2()),
+ has_lzcnt(Assembler::HasLZCNT()),
+#else
+ has_sse2(false),
+ has_sse3(false),
+ has_sse41(false),
+ has_sse42(false),
+ has_popcnt(false),
+ has_avx(false),
+ has_bmi1(false),
+ has_bmi2(false),
+ has_lzcnt(false),
+#endif
+#if defined(XP_WIN)
+ platform_is_windows(true),
+#else
+ platform_is_windows(false),
+#endif
+ ref_types_enabled(false),
+ threads_enabled(false),
+ v128_enabled(false),
+ static_memory_bound(0),
+ memory_guard_size(0),
+ memory_base_tls_offset(offsetof(TlsData, memoryBase)),
+ instance_tls_offset(offsetof(TlsData, instance)),
+ interrupt_tls_offset(offsetof(TlsData, interrupt)),
+ cx_tls_offset(offsetof(TlsData, cx)),
+ realm_cx_offset(JSContext::offsetOfRealm()),
+ realm_tls_offset(offsetof(TlsData, realm)),
+ realm_func_import_tls_offset(offsetof(FuncImportTls, realm)),
+ size_of_wasm_frame(sizeof(wasm::Frame)) {
+}
+
+// Most of BaldrMonkey's data structures refer to a "global offset" which is a
+// byte offset into the `globalArea` field of the `TlsData` struct.
+//
+// Cranelift represents global variables with their byte offset from the "VM
+// context pointer" which is the `WasmTlsReg` pointing to the `TlsData`
+// struct.
+//
+// This function translates between the two.
+
+static size_t globalToTlsOffset(size_t globalOffset) {
+ return offsetof(wasm::TlsData, globalArea) + globalOffset;
+}
+
+CraneliftModuleEnvironment::CraneliftModuleEnvironment(
+ const ModuleEnvironment& env)
+ : env(&env) {
+ // env.minMemoryLength is in bytes. Convert it to wasm pages.
+ static_assert(sizeof(env.minMemoryLength) == 8);
+ MOZ_RELEASE_ASSERT(env.minMemoryLength <= (((uint64_t)1) << 32));
+ MOZ_RELEASE_ASSERT((env.minMemoryLength & wasm::PageMask) == 0);
+ min_memory_length = (uint32_t)(env.minMemoryLength >> wasm::PageBits);
+}
+
+TypeCode env_unpack(BD_ValType valType) {
+ return UnpackTypeCodeType(PackedTypeCode(valType.packed));
+}
+
+size_t env_num_datas(const CraneliftModuleEnvironment* env) {
+ return env->env->dataCount.valueOr(0);
+}
+
+size_t env_num_elems(const CraneliftModuleEnvironment* env) {
+ return env->env->elemSegments.length();
+}
+TypeCode env_elem_typecode(const CraneliftModuleEnvironment* env,
+ uint32_t index) {
+ return UnpackTypeCodeType(env->env->elemSegments[index]->elemType.packed());
+}
+
+uint32_t env_max_memory(const CraneliftModuleEnvironment* env) {
+ // env.maxMemoryLength is in bytes. Convert it to wasm pages.
+ if (env->env->maxMemoryLength.isSome()) {
+ // We use |auto| here rather than |uint64_t| so that the static_assert will
+ // fail if |maxMemoryLength| is changed to some other size.
+ auto inBytes = *(env->env->maxMemoryLength);
+ static_assert(sizeof(inBytes) == 8);
+ MOZ_RELEASE_ASSERT(inBytes <= (((uint64_t)1) << 32));
+ MOZ_RELEASE_ASSERT((inBytes & wasm::PageMask) == 0);
+ return (uint32_t)(inBytes >> wasm::PageBits);
+ } else {
+ return UINT32_MAX;
+ }
+}
+
+bool env_uses_shared_memory(const CraneliftModuleEnvironment* env) {
+ return env->env->usesSharedMemory();
+}
+
+bool env_has_memory(const CraneliftModuleEnvironment* env) {
+ return env->env->usesMemory();
+}
+
+size_t env_num_types(const CraneliftModuleEnvironment* env) {
+ return env->env->types.length();
+}
+const FuncType* env_type(const CraneliftModuleEnvironment* env,
+ size_t typeIndex) {
+ return &env->env->types[typeIndex].funcType();
+}
+
+size_t env_num_funcs(const CraneliftModuleEnvironment* env) {
+ return env->env->funcs.length();
+}
+const FuncType* env_func_sig(const CraneliftModuleEnvironment* env,
+ size_t funcIndex) {
+ return env->env->funcs[funcIndex].type;
+}
+const TypeIdDesc* env_func_sig_id(const CraneliftModuleEnvironment* env,
+ size_t funcIndex) {
+ return env->env->funcs[funcIndex].typeId;
+}
+size_t env_func_sig_index(const CraneliftModuleEnvironment* env,
+ size_t funcIndex) {
+ return env->env->funcs[funcIndex].typeIndex;
+}
+bool env_is_func_valid_for_ref(const CraneliftModuleEnvironment* env,
+ uint32_t index) {
+ return env->env->validForRefFunc.getBit(index);
+}
+
+size_t env_func_import_tls_offset(const CraneliftModuleEnvironment* env,
+ size_t funcIndex) {
+ return globalToTlsOffset(env->env->funcImportGlobalDataOffsets[funcIndex]);
+}
+
+bool env_func_is_import(const CraneliftModuleEnvironment* env,
+ size_t funcIndex) {
+ return env->env->funcIsImport(funcIndex);
+}
+
+const FuncType* env_signature(const CraneliftModuleEnvironment* env,
+ size_t funcTypeIndex) {
+ return &env->env->types[funcTypeIndex].funcType();
+}
+
+const TypeIdDesc* env_signature_id(const CraneliftModuleEnvironment* env,
+ size_t funcTypeIndex) {
+ return &env->env->typeIds[funcTypeIndex];
+}
+
+size_t env_num_tables(const CraneliftModuleEnvironment* env) {
+ return env->env->tables.length();
+}
+const TableDesc* env_table(const CraneliftModuleEnvironment* env,
+ size_t tableIndex) {
+ return &env->env->tables[tableIndex];
+}
+
+size_t env_num_globals(const CraneliftModuleEnvironment* env) {
+ return env->env->globals.length();
+}
+const GlobalDesc* env_global(const CraneliftModuleEnvironment* env,
+ size_t globalIndex) {
+ return &env->env->globals[globalIndex];
+}
+
+bool wasm::CraneliftCompileFunctions(const ModuleEnvironment& moduleEnv,
+ const CompilerEnvironment& compilerEnv,
+ LifoAlloc& lifo,
+ const FuncCompileInputVector& inputs,
+ CompiledCode* code, UniqueChars* error) {
+ MOZ_RELEASE_ASSERT(CraneliftPlatformSupport());
+
+ MOZ_ASSERT(compilerEnv.tier() == Tier::Optimized);
+ MOZ_ASSERT(compilerEnv.debug() == DebugEnabled::False);
+ MOZ_ASSERT(compilerEnv.optimizedBackend() == OptimizedBackend::Cranelift);
+ MOZ_ASSERT(!moduleEnv.isAsmJS());
+
+ TempAllocator alloc(&lifo);
+ JitContext jitContext(&alloc);
+ WasmMacroAssembler masm(alloc, moduleEnv);
+ MOZ_ASSERT(IsCompilingWasm());
+
+ // Swap in already-allocated empty vectors to avoid malloc/free.
+ MOZ_ASSERT(code->empty());
+
+ CraneliftReusableData reusableContext;
+ if (!code->swapCranelift(masm, reusableContext)) {
+ return false;
+ }
+
+ if (!reusableContext) {
+ auto context = MakeUnique<CraneliftContext>(moduleEnv);
+ if (!context || !context->init()) {
+ return false;
+ }
+ reusableContext.reset((void**)context.release());
+ }
+
+ CraneliftContext* compiler = (CraneliftContext*)reusableContext.get();
+
+ // Disable instruction spew if we're going to disassemble after code
+ // generation, or the output will be a mess.
+
+ bool jitSpew = JitSpewEnabled(js::jit::JitSpew_Codegen);
+ if (jitSpew) {
+ DisableChannel(js::jit::JitSpew_Codegen);
+ }
+ auto reenableSpew = mozilla::MakeScopeExit([&] {
+ if (jitSpew) {
+ EnableChannel(js::jit::JitSpew_Codegen);
+ }
+ });
+
+ for (const FuncCompileInput& func : inputs) {
+ Decoder d(func.begin, func.end, func.lineOrBytecode, error);
+
+ size_t funcBytecodeSize = func.end - func.begin;
+
+ size_t previousStackmapCount = code->stackMaps.length();
+
+ CraneliftFuncCompileInput clifInput(func);
+ clifInput.stackmaps = (BD_Stackmaps*)&code->stackMaps;
+
+ CraneliftCompiledFunc clifFunc;
+
+ char* clifError = nullptr;
+ if (!cranelift_compile_function(*compiler, &clifInput, &clifFunc,
+ &clifError)) {
+ *error = JS_smprintf("%s", clifError);
+ cranelift_compiler_free_error(clifError);
+ return false;
+ }
+
+ uint32_t lineOrBytecode = func.lineOrBytecode;
+ const FuncType& funcType = *moduleEnv.funcs[clifInput.index].type;
+ const TypeIdDesc& funcTypeId = *moduleEnv.funcs[clifInput.index].typeId;
+
+ FuncOffsets offsets;
+ if (!GenerateCraneliftCode(
+ masm, clifFunc, funcType, funcTypeId, lineOrBytecode,
+ funcBytecodeSize, &code->stackMaps, previousStackmapCount,
+ code->stackMaps.length() - previousStackmapCount, &offsets)) {
+ return false;
+ }
+
+ if (!code->codeRanges.emplaceBack(func.index, lineOrBytecode, offsets)) {
+ return false;
+ }
+ }
+
+ masm.finish();
+ if (masm.oom()) {
+ return false;
+ }
+
+ if (jitSpew) {
+ // The disassembler uses the jitspew for output, so re-enable now.
+ EnableChannel(js::jit::JitSpew_Codegen);
+
+ uint32_t totalCodeSize = masm.currentOffset();
+ uint8_t* codeBuf = (uint8_t*)js_malloc(totalCodeSize);
+ if (codeBuf) {
+ masm.executableCopy(codeBuf);
+
+ const CodeRangeVector& codeRanges = code->codeRanges;
+ MOZ_ASSERT(codeRanges.length() >= inputs.length());
+
+ // Within the current batch, functions' code ranges have been added in
+ // the same order as the inputs.
+ size_t firstCodeRangeIndex = codeRanges.length() - inputs.length();
+
+ for (size_t i = 0; i < inputs.length(); i++) {
+ int funcIndex = inputs[i].index;
+ mozilla::Unused << funcIndex;
+
+ JitSpew(JitSpew_Codegen, "# ========================================");
+ JitSpew(JitSpew_Codegen, "# Start of wasm cranelift code for index %d",
+ funcIndex);
+
+ size_t codeRangeIndex = firstCodeRangeIndex + i;
+ uint32_t codeStart = codeRanges[codeRangeIndex].begin();
+ uint32_t codeEnd = codeRanges[codeRangeIndex].end();
+
+ jit::Disassemble(
+ codeBuf + codeStart, codeEnd - codeStart,
+ [](const char* text) { JitSpew(JitSpew_Codegen, "%s", text); });
+
+ JitSpew(JitSpew_Codegen, "# End of wasm cranelift code for index %d",
+ funcIndex);
+ }
+ js_free(codeBuf);
+ }
+ }
+
+ return code->swapCranelift(masm, reusableContext);
+}
+
+void wasm::CraneliftFreeReusableData(void* ptr) {
+ CraneliftContext* compiler = (CraneliftContext*)ptr;
+ if (compiler) {
+ js_delete(compiler);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// Callbacks from Rust to C++.
+
+// Offsets assumed by the `make_heap()` function.
+static_assert(offsetof(wasm::TlsData, memoryBase) == 0, "memory base moved");
+
+// The translate_call() function in wasm2clif.rs depends on these offsets.
+static_assert(offsetof(wasm::FuncImportTls, code) == 0,
+ "Import code field moved");
+static_assert(offsetof(wasm::FuncImportTls, tls) == sizeof(void*),
+ "Import tls moved");
+
+// Global
+
+bool global_isConstant(const GlobalDesc* global) {
+ return global->isConstant();
+}
+
+bool global_isIndirect(const GlobalDesc* global) {
+ return global->isIndirect();
+}
+
+BD_ConstantValue global_constantValue(const GlobalDesc* global) {
+ Val value(global->constantValue());
+ BD_ConstantValue v;
+ v.t = TypeCode(value.type().kind());
+ switch (v.t) {
+ case TypeCode::I32:
+ v.u.i32 = value.i32();
+ break;
+ case TypeCode::I64:
+ v.u.i64 = value.i64();
+ break;
+ case TypeCode::F32:
+ v.u.f32 = value.f32();
+ break;
+ case TypeCode::F64:
+ v.u.f64 = value.f64();
+ break;
+ case TypeCode::V128:
+ memcpy(&v.u.v128, &value.v128(), sizeof(v.u.v128));
+ break;
+ case AbstractReferenceTypeCode:
+ v.u.r = value.ref().forCompiledCode();
+ break;
+ default:
+ MOZ_CRASH("Bad type");
+ }
+ return v;
+}
+
+TypeCode global_type(const GlobalDesc* global) {
+ return UnpackTypeCodeType(global->type().packed());
+}
+
+size_t global_tlsOffset(const GlobalDesc* global) {
+ return globalToTlsOffset(global->offset());
+}
+
+// TableDesc
+
+size_t table_tlsOffset(const TableDesc* table) {
+ return globalToTlsOffset(table->globalDataOffset);
+}
+
+uint32_t table_initialLimit(const TableDesc* table) {
+ return table->initialLength;
+}
+uint32_t table_maximumLimit(const TableDesc* table) {
+ return table->maximumLength.valueOr(UINT32_MAX);
+}
+TypeCode table_elementTypeCode(const TableDesc* table) {
+ return UnpackTypeCodeType(table->elemType.packed());
+}
+
+// Sig
+
+size_t funcType_numArgs(const FuncType* funcType) {
+ return funcType->args().length();
+}
+
+const BD_ValType* funcType_args(const FuncType* funcType) {
+ static_assert(sizeof(BD_ValType) == sizeof(ValType), "update BD_ValType");
+ return (const BD_ValType*)funcType->args().begin();
+}
+
+size_t funcType_numResults(const FuncType* funcType) {
+ return funcType->results().length();
+}
+
+const BD_ValType* funcType_results(const FuncType* funcType) {
+ static_assert(sizeof(BD_ValType) == sizeof(ValType), "update BD_ValType");
+ return (const BD_ValType*)funcType->results().begin();
+}
+
+TypeIdDescKind funcType_idKind(const TypeIdDesc* funcTypeId) {
+ return funcTypeId->kind();
+}
+
+size_t funcType_idImmediate(const TypeIdDesc* funcTypeId) {
+ return funcTypeId->immediate();
+}
+
+size_t funcType_idTlsOffset(const TypeIdDesc* funcTypeId) {
+ return globalToTlsOffset(funcTypeId->globalDataOffset());
+}
+
+void stackmaps_add(BD_Stackmaps* sink, const uint32_t* bitMap,
+ size_t mappedWords, size_t argsSize, size_t codeOffset) {
+ const uint32_t BitElemSize = sizeof(uint32_t) * 8;
+
+ StackMaps* maps = (StackMaps*)sink;
+ StackMap* map = StackMap::create(mappedWords);
+ MOZ_ALWAYS_TRUE(map);
+
+ // Copy the cranelift stackmap into our spidermonkey one
+ // TODO: Take ownership of the cranelift stackmap and avoid a copy
+ for (uint32_t i = 0; i < mappedWords; i++) {
+ uint32_t bit = (bitMap[i / BitElemSize] >> (i % BitElemSize)) & 0x1;
+ if (bit) {
+ map->setBit(i);
+ }
+ }
+
+ map->setFrameOffsetFromTop((argsSize + sizeof(wasm::Frame)) /
+ sizeof(uintptr_t));
+ MOZ_ALWAYS_TRUE(maps->add((uint8_t*)codeOffset, map));
+}
diff --git a/js/src/wasm/WasmCraneliftCompile.h b/js/src/wasm/WasmCraneliftCompile.h
new file mode 100644
index 0000000000..8c72aa132f
--- /dev/null
+++ b/js/src/wasm/WasmCraneliftCompile.h
@@ -0,0 +1,57 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2018 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_cranelift_compile_h
+#define wasm_cranelift_compile_h
+
+#include "mozilla/Attributes.h"
+
+#include "wasm/WasmGenerator.h"
+
+namespace js {
+namespace wasm {
+
+#ifdef ENABLE_WASM_CRANELIFT
+// Return whether CraneliftCompileFunction() can generate code on the current
+// device. Usually you do *not* want this, you want CraneliftAvailable().
+[[nodiscard]] bool CraneliftPlatformSupport();
+
+// Generates code with Cranelift.
+[[nodiscard]] bool CraneliftCompileFunctions(
+ const ModuleEnvironment& moduleEnv, const CompilerEnvironment& compilerEnv,
+ LifoAlloc& lifo, const FuncCompileInputVector& inputs, CompiledCode* code,
+ UniqueChars* error);
+
+void CraneliftFreeReusableData(void* data);
+#else
+[[nodiscard]] inline bool CraneliftPlatformSupport() { return false; }
+
+[[nodiscard]] inline bool CraneliftCompileFunctions(
+ const ModuleEnvironment& moduleEnv, const CompilerEnvironment& compilerEnv,
+ LifoAlloc& lifo, const FuncCompileInputVector& inputs, CompiledCode* code,
+ UniqueChars* error) {
+ MOZ_CRASH("Should not happen");
+}
+
+inline void CraneliftFreeReusableData(void* data) {}
+#endif
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_cranelift_compile_h
diff --git a/js/src/wasm/WasmDebug.cpp b/js/src/wasm/WasmDebug.cpp
new file mode 100644
index 0000000000..81d10aec08
--- /dev/null
+++ b/js/src/wasm/WasmDebug.cpp
@@ -0,0 +1,496 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmDebug.h"
+
+#include "mozilla/BinarySearch.h"
+
+#include "debugger/Debugger.h"
+#include "ds/Sort.h"
+#include "jit/AutoWritableJitCode.h"
+#include "jit/ExecutableAllocator.h"
+#include "jit/MacroAssembler.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmStubs.h"
+#include "wasm/WasmValidate.h"
+
+#include "gc/FreeOp-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::BinarySearchIf;
+
+DebugState::DebugState(const Code& code, const Module& module)
+ : code_(&code),
+ module_(&module),
+ enterFrameTrapsEnabled_(false),
+ enterAndLeaveFrameTrapsCounter_(0) {
+ MOZ_ASSERT(code.metadata().debugEnabled);
+}
+
+void DebugState::trace(JSTracer* trc) {
+ for (auto iter = breakpointSites_.iter(); !iter.done(); iter.next()) {
+ WasmBreakpointSite* site = iter.get().value();
+ site->trace(trc);
+ }
+}
+
+void DebugState::finalize(JSFreeOp* fop) {
+ for (auto iter = breakpointSites_.iter(); !iter.done(); iter.next()) {
+ WasmBreakpointSite* site = iter.get().value();
+ site->delete_(fop);
+ }
+}
+
+static const uint32_t DefaultBinarySourceColumnNumber = 1;
+
+static const CallSite* SlowCallSiteSearchByOffset(const MetadataTier& metadata,
+ uint32_t offset) {
+ for (const CallSite& callSite : metadata.callSites) {
+ if (callSite.lineOrBytecode() == offset &&
+ callSite.kind() == CallSiteDesc::Breakpoint) {
+ return &callSite;
+ }
+ }
+ return nullptr;
+}
+
+bool DebugState::getLineOffsets(size_t lineno, Vector<uint32_t>* offsets) {
+ const CallSite* callsite =
+ SlowCallSiteSearchByOffset(metadata(Tier::Debug), lineno);
+ if (callsite && !offsets->append(lineno)) {
+ return false;
+ }
+ return true;
+}
+
+bool DebugState::getAllColumnOffsets(Vector<ExprLoc>* offsets) {
+ for (const CallSite& callSite : metadata(Tier::Debug).callSites) {
+ if (callSite.kind() != CallSite::Breakpoint) {
+ continue;
+ }
+ uint32_t offset = callSite.lineOrBytecode();
+ if (!offsets->emplaceBack(offset, DefaultBinarySourceColumnNumber,
+ offset)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool DebugState::getOffsetLocation(uint32_t offset, size_t* lineno,
+ size_t* column) {
+ if (!SlowCallSiteSearchByOffset(metadata(Tier::Debug), offset)) {
+ return false;
+ }
+ *lineno = offset;
+ *column = DefaultBinarySourceColumnNumber;
+ return true;
+}
+
+bool DebugState::stepModeEnabled(uint32_t funcIndex) const {
+ return stepperCounters_.lookup(funcIndex).found();
+}
+
+bool DebugState::incrementStepperCount(JSContext* cx, uint32_t funcIndex) {
+ const CodeRange& codeRange =
+ codeRanges(Tier::Debug)[funcToCodeRangeIndex(funcIndex)];
+ MOZ_ASSERT(codeRange.isFunction());
+
+ StepperCounters::AddPtr p = stepperCounters_.lookupForAdd(funcIndex);
+ if (p) {
+ MOZ_ASSERT(p->value() > 0);
+ p->value()++;
+ return true;
+ }
+ if (!stepperCounters_.add(p, funcIndex, 1)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ AutoWritableJitCode awjc(
+ cx->runtime(), code_->segment(Tier::Debug).base() + codeRange.begin(),
+ codeRange.end() - codeRange.begin());
+
+ for (const CallSite& callSite : callSites(Tier::Debug)) {
+ if (callSite.kind() != CallSite::Breakpoint) {
+ continue;
+ }
+ uint32_t offset = callSite.returnAddressOffset();
+ if (codeRange.begin() <= offset && offset <= codeRange.end()) {
+ toggleDebugTrap(offset, true);
+ }
+ }
+ return true;
+}
+
+void DebugState::decrementStepperCount(JSFreeOp* fop, uint32_t funcIndex) {
+ const CodeRange& codeRange =
+ codeRanges(Tier::Debug)[funcToCodeRangeIndex(funcIndex)];
+ MOZ_ASSERT(codeRange.isFunction());
+
+ MOZ_ASSERT(!stepperCounters_.empty());
+ StepperCounters::Ptr p = stepperCounters_.lookup(funcIndex);
+ MOZ_ASSERT(p);
+ if (--p->value()) {
+ return;
+ }
+
+ stepperCounters_.remove(p);
+
+ AutoWritableJitCode awjc(
+ fop->runtime(), code_->segment(Tier::Debug).base() + codeRange.begin(),
+ codeRange.end() - codeRange.begin());
+
+ for (const CallSite& callSite : callSites(Tier::Debug)) {
+ if (callSite.kind() != CallSite::Breakpoint) {
+ continue;
+ }
+ uint32_t offset = callSite.returnAddressOffset();
+ if (codeRange.begin() <= offset && offset <= codeRange.end()) {
+ bool enabled = breakpointSites_.has(offset);
+ toggleDebugTrap(offset, enabled);
+ }
+ }
+}
+
+bool DebugState::hasBreakpointTrapAtOffset(uint32_t offset) {
+ return SlowCallSiteSearchByOffset(metadata(Tier::Debug), offset);
+}
+
+void DebugState::toggleBreakpointTrap(JSRuntime* rt, uint32_t offset,
+ bool enabled) {
+ const CallSite* callSite =
+ SlowCallSiteSearchByOffset(metadata(Tier::Debug), offset);
+ if (!callSite) {
+ return;
+ }
+ size_t debugTrapOffset = callSite->returnAddressOffset();
+
+ const ModuleSegment& codeSegment = code_->segment(Tier::Debug);
+ const CodeRange* codeRange =
+ code_->lookupFuncRange(codeSegment.base() + debugTrapOffset);
+ MOZ_ASSERT(codeRange);
+
+ if (stepperCounters_.lookup(codeRange->funcIndex())) {
+ return; // no need to toggle when step mode is enabled
+ }
+
+ AutoWritableJitCode awjc(rt, codeSegment.base(), codeSegment.length());
+ toggleDebugTrap(debugTrapOffset, enabled);
+}
+
+WasmBreakpointSite* DebugState::getBreakpointSite(uint32_t offset) const {
+ WasmBreakpointSiteMap::Ptr p = breakpointSites_.lookup(offset);
+ if (!p) {
+ return nullptr;
+ }
+
+ return p->value();
+}
+
+WasmBreakpointSite* DebugState::getOrCreateBreakpointSite(JSContext* cx,
+ Instance* instance,
+ uint32_t offset) {
+ WasmBreakpointSite* site;
+
+ WasmBreakpointSiteMap::AddPtr p = breakpointSites_.lookupForAdd(offset);
+ if (!p) {
+ site = cx->new_<WasmBreakpointSite>(instance->object(), offset);
+ if (!site) {
+ return nullptr;
+ }
+
+ if (!breakpointSites_.add(p, offset, site)) {
+ js_delete(site);
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ AddCellMemory(instance->object(), sizeof(WasmBreakpointSite),
+ MemoryUse::BreakpointSite);
+
+ toggleBreakpointTrap(cx->runtime(), offset, true);
+ } else {
+ site = p->value();
+ }
+ return site;
+}
+
+bool DebugState::hasBreakpointSite(uint32_t offset) {
+ return breakpointSites_.has(offset);
+}
+
+void DebugState::destroyBreakpointSite(JSFreeOp* fop, Instance* instance,
+ uint32_t offset) {
+ WasmBreakpointSiteMap::Ptr p = breakpointSites_.lookup(offset);
+ MOZ_ASSERT(p);
+ fop->delete_(instance->objectUnbarriered(), p->value(),
+ MemoryUse::BreakpointSite);
+ breakpointSites_.remove(p);
+ toggleBreakpointTrap(fop->runtime(), offset, false);
+}
+
+void DebugState::clearBreakpointsIn(JSFreeOp* fop, WasmInstanceObject* instance,
+ js::Debugger* dbg, JSObject* handler) {
+ MOZ_ASSERT(instance);
+
+ // Breakpoints hold wrappers in the instance's compartment for the handler.
+ // Make sure we don't try to search for the unwrapped handler.
+ MOZ_ASSERT_IF(handler, instance->compartment() == handler->compartment());
+
+ if (breakpointSites_.empty()) {
+ return;
+ }
+ for (WasmBreakpointSiteMap::Enum e(breakpointSites_); !e.empty();
+ e.popFront()) {
+ WasmBreakpointSite* site = e.front().value();
+ MOZ_ASSERT(site->instanceObject == instance);
+
+ Breakpoint* nextbp;
+ for (Breakpoint* bp = site->firstBreakpoint(); bp; bp = nextbp) {
+ nextbp = bp->nextInSite();
+ MOZ_ASSERT(bp->site == site);
+ if ((!dbg || bp->debugger == dbg) &&
+ (!handler || bp->getHandler() == handler)) {
+ bp->delete_(fop);
+ }
+ }
+ if (site->isEmpty()) {
+ fop->delete_(instance, site, MemoryUse::BreakpointSite);
+ e.removeFront();
+ }
+ }
+}
+
+void DebugState::toggleDebugTrap(uint32_t offset, bool enabled) {
+ MOZ_ASSERT(offset);
+ uint8_t* trap = code_->segment(Tier::Debug).base() + offset;
+ const Uint32Vector& farJumpOffsets =
+ metadata(Tier::Debug).debugTrapFarJumpOffsets;
+ if (enabled) {
+ MOZ_ASSERT(farJumpOffsets.length() > 0);
+ size_t i = 0;
+ while (i < farJumpOffsets.length() && offset < farJumpOffsets[i]) {
+ i++;
+ }
+ if (i >= farJumpOffsets.length() ||
+ (i > 0 && offset - farJumpOffsets[i - 1] < farJumpOffsets[i] - offset))
+ i--;
+ uint8_t* farJump = code_->segment(Tier::Debug).base() + farJumpOffsets[i];
+ MacroAssembler::patchNopToCall(trap, farJump);
+ } else {
+ MacroAssembler::patchCallToNop(trap);
+ }
+}
+
+void DebugState::adjustEnterAndLeaveFrameTrapsState(JSContext* cx,
+ bool enabled) {
+ MOZ_ASSERT_IF(!enabled, enterAndLeaveFrameTrapsCounter_ > 0);
+
+ bool wasEnabled = enterAndLeaveFrameTrapsCounter_ > 0;
+ if (enabled) {
+ ++enterAndLeaveFrameTrapsCounter_;
+ } else {
+ --enterAndLeaveFrameTrapsCounter_;
+ }
+ bool stillEnabled = enterAndLeaveFrameTrapsCounter_ > 0;
+ if (wasEnabled == stillEnabled) {
+ return;
+ }
+
+ const ModuleSegment& codeSegment = code_->segment(Tier::Debug);
+ AutoWritableJitCode awjc(cx->runtime(), codeSegment.base(),
+ codeSegment.length());
+ for (const CallSite& callSite : callSites(Tier::Debug)) {
+ if (callSite.kind() != CallSite::EnterFrame &&
+ callSite.kind() != CallSite::LeaveFrame) {
+ continue;
+ }
+ toggleDebugTrap(callSite.returnAddressOffset(), stillEnabled);
+ }
+}
+
+void DebugState::ensureEnterFrameTrapsState(JSContext* cx, bool enabled) {
+ if (enterFrameTrapsEnabled_ == enabled) {
+ return;
+ }
+
+ adjustEnterAndLeaveFrameTrapsState(cx, enabled);
+
+ enterFrameTrapsEnabled_ = enabled;
+}
+
+bool DebugState::debugGetLocalTypes(uint32_t funcIndex, ValTypeVector* locals,
+ size_t* argsLength,
+ StackResults* stackResults) {
+ const ValTypeVector& args = metadata().debugFuncArgTypes[funcIndex];
+ const ValTypeVector& results = metadata().debugFuncReturnTypes[funcIndex];
+ ResultType resultType(ResultType::Vector(results));
+ *argsLength = args.length();
+ *stackResults = ABIResultIter::HasStackResults(resultType)
+ ? StackResults::HasStackResults
+ : StackResults::NoStackResults;
+ if (!locals->appendAll(args)) {
+ return false;
+ }
+
+ // Decode local var types from wasm binary function body.
+ const CodeRange& range =
+ codeRanges(Tier::Debug)[funcToCodeRangeIndex(funcIndex)];
+ // In wasm, the Code points to the function start via funcLineOrBytecode.
+ size_t offsetInModule = range.funcLineOrBytecode();
+ Decoder d(bytecode().begin() + offsetInModule, bytecode().end(),
+ offsetInModule,
+ /* error = */ nullptr);
+ return DecodeValidatedLocalEntries(d, locals);
+}
+
+bool DebugState::getGlobal(Instance& instance, uint32_t globalIndex,
+ MutableHandleValue vp) {
+ const GlobalDesc& global = metadata().globals[globalIndex];
+
+ if (global.isConstant()) {
+ LitVal value = global.constantValue();
+ switch (value.type().kind()) {
+ case ValType::I32:
+ vp.set(Int32Value(value.i32()));
+ break;
+ case ValType::I64:
+ // Just display as a Number; it's ok if we lose some precision
+ vp.set(NumberValue((double)value.i64()));
+ break;
+ case ValType::F32:
+ vp.set(NumberValue(JS::CanonicalizeNaN(value.f32())));
+ break;
+ case ValType::F64:
+ vp.set(NumberValue(JS::CanonicalizeNaN(value.f64())));
+ break;
+ case ValType::Ref:
+ // It's possible to do better. We could try some kind of hashing
+ // scheme, to make the pointer recognizable without revealing it.
+ vp.set(MagicValue(JS_OPTIMIZED_OUT));
+ break;
+ case ValType::V128:
+ // Debugger must be updated to handle this, and should be updated to
+ // handle i64 in any case.
+ vp.set(MagicValue(JS_OPTIMIZED_OUT));
+ break;
+ default:
+ MOZ_CRASH("Global constant type");
+ }
+ return true;
+ }
+
+ uint8_t* globalData = instance.globalData();
+ void* dataPtr = globalData + global.offset();
+ if (global.isIndirect()) {
+ dataPtr = *static_cast<void**>(dataPtr);
+ }
+ switch (global.type().kind()) {
+ case ValType::I32: {
+ vp.set(Int32Value(*static_cast<int32_t*>(dataPtr)));
+ break;
+ }
+ case ValType::I64: {
+ // Just display as a Number; it's ok if we lose some precision
+ vp.set(NumberValue((double)*static_cast<int64_t*>(dataPtr)));
+ break;
+ }
+ case ValType::F32: {
+ vp.set(NumberValue(JS::CanonicalizeNaN(*static_cast<float*>(dataPtr))));
+ break;
+ }
+ case ValType::F64: {
+ vp.set(NumberValue(JS::CanonicalizeNaN(*static_cast<double*>(dataPtr))));
+ break;
+ }
+ case ValType::Ref: {
+ // Just hide it. See above.
+ vp.set(MagicValue(JS_OPTIMIZED_OUT));
+ break;
+ }
+ case ValType::V128: {
+ // Just hide it. See above.
+ vp.set(MagicValue(JS_OPTIMIZED_OUT));
+ break;
+ }
+ default: {
+ MOZ_CRASH("Global variable type");
+ break;
+ }
+ }
+ return true;
+}
+
+bool DebugState::getSourceMappingURL(JSContext* cx,
+ MutableHandleString result) const {
+ result.set(nullptr);
+
+ for (const CustomSection& customSection : module_->customSections()) {
+ const Bytes& sectionName = customSection.name;
+ if (strlen(SourceMappingURLSectionName) != sectionName.length() ||
+ memcmp(SourceMappingURLSectionName, sectionName.begin(),
+ sectionName.length()) != 0) {
+ continue;
+ }
+
+ // Parse found "SourceMappingURL" custom section.
+ Decoder d(customSection.payload->begin(), customSection.payload->end(), 0,
+ /* error = */ nullptr);
+ uint32_t nchars;
+ if (!d.readVarU32(&nchars)) {
+ return true; // ignoring invalid section data
+ }
+ const uint8_t* chars;
+ if (!d.readBytes(nchars, &chars) || d.currentPosition() != d.end()) {
+ return true; // ignoring invalid section data
+ }
+
+ JS::UTF8Chars utf8Chars(reinterpret_cast<const char*>(chars), nchars);
+ JSString* str = JS_NewStringCopyUTF8N(cx, utf8Chars);
+ if (!str) {
+ return false;
+ }
+ result.set(str);
+ return true;
+ }
+
+ // Check presence of "SourceMap:" HTTP response header.
+ char* sourceMapURL = metadata().sourceMapURL.get();
+ if (sourceMapURL && strlen(sourceMapURL)) {
+ JS::UTF8Chars utf8Chars(sourceMapURL, strlen(sourceMapURL));
+ JSString* str = JS_NewStringCopyUTF8N(cx, utf8Chars);
+ if (!str) {
+ return false;
+ }
+ result.set(str);
+ }
+ return true;
+}
+
+void DebugState::addSizeOfMisc(MallocSizeOf mallocSizeOf,
+ Metadata::SeenSet* seenMetadata,
+ Code::SeenSet* seenCode, size_t* code,
+ size_t* data) const {
+ code_->addSizeOfMiscIfNotSeen(mallocSizeOf, seenMetadata, seenCode, code,
+ data);
+ module_->addSizeOfMisc(mallocSizeOf, seenMetadata, seenCode, code, data);
+}
diff --git a/js/src/wasm/WasmDebug.h b/js/src/wasm/WasmDebug.h
new file mode 100644
index 0000000000..21813ac1e6
--- /dev/null
+++ b/js/src/wasm/WasmDebug.h
@@ -0,0 +1,158 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_debug_h
+#define wasm_debug_h
+
+#include "js/HashTable.h"
+#include "wasm/WasmModule.h"
+#include "wasm/WasmTypes.h"
+
+namespace js {
+
+class Debugger;
+class WasmBreakpointSite;
+class WasmInstanceObject;
+
+namespace wasm {
+
+struct MetadataTier;
+
+// The generated source location for the AST node/expression. The offset field
+// refers an offset in an binary format file.
+
+struct ExprLoc {
+ uint32_t lineno;
+ uint32_t column;
+ uint32_t offset;
+ ExprLoc() : lineno(0), column(0), offset(0) {}
+ ExprLoc(uint32_t lineno_, uint32_t column_, uint32_t offset_)
+ : lineno(lineno_), column(column_), offset(offset_) {}
+};
+
+typedef HashMap<uint32_t, uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy>
+ StepperCounters;
+typedef HashMap<uint32_t, WasmBreakpointSite*, DefaultHasher<uint32_t>,
+ SystemAllocPolicy>
+ WasmBreakpointSiteMap;
+
+class DebugState {
+ const SharedCode code_;
+ const SharedModule module_;
+
+ // State maintained when debugging is enabled. In this case, the Code is
+ // not actually shared, but is referenced uniquely by the instance that is
+ // being debugged.
+
+ bool enterFrameTrapsEnabled_;
+ uint32_t enterAndLeaveFrameTrapsCounter_;
+ WasmBreakpointSiteMap breakpointSites_;
+ StepperCounters stepperCounters_;
+
+ void toggleDebugTrap(uint32_t offset, bool enabled);
+
+ public:
+ DebugState(const Code& code, const Module& module);
+
+ void trace(JSTracer* trc);
+ void finalize(JSFreeOp* fop);
+
+ const Bytes& bytecode() const { return module_->debugBytecode(); }
+
+ [[nodiscard]] bool getLineOffsets(size_t lineno, Vector<uint32_t>* offsets);
+ [[nodiscard]] bool getAllColumnOffsets(Vector<ExprLoc>* offsets);
+ [[nodiscard]] bool getOffsetLocation(uint32_t offset, size_t* lineno,
+ size_t* column);
+
+ // The Code can track enter/leave frame events. Any such event triggers
+ // debug trap. The enter/leave frame events enabled or disabled across
+ // all functions.
+
+ void adjustEnterAndLeaveFrameTrapsState(JSContext* cx, bool enabled);
+ void ensureEnterFrameTrapsState(JSContext* cx, bool enabled);
+ bool enterFrameTrapsEnabled() const { return enterFrameTrapsEnabled_; }
+
+ // When the Code is debugEnabled, individual breakpoints can be enabled or
+ // disabled at instruction offsets.
+
+ bool hasBreakpointTrapAtOffset(uint32_t offset);
+ void toggleBreakpointTrap(JSRuntime* rt, uint32_t offset, bool enabled);
+ WasmBreakpointSite* getBreakpointSite(uint32_t offset) const;
+ WasmBreakpointSite* getOrCreateBreakpointSite(JSContext* cx,
+ Instance* instance,
+ uint32_t offset);
+ bool hasBreakpointSite(uint32_t offset);
+ void destroyBreakpointSite(JSFreeOp* fop, Instance* instance,
+ uint32_t offset);
+ void clearBreakpointsIn(JSFreeOp* fp, WasmInstanceObject* instance,
+ js::Debugger* dbg, JSObject* handler);
+
+ // When the Code is debug-enabled, single-stepping mode can be toggled on
+ // the granularity of individual functions.
+
+ bool stepModeEnabled(uint32_t funcIndex) const;
+ [[nodiscard]] bool incrementStepperCount(JSContext* cx, uint32_t funcIndex);
+ void decrementStepperCount(JSFreeOp* fop, uint32_t funcIndex);
+
+ // Stack inspection helpers.
+
+ [[nodiscard]] bool debugGetLocalTypes(uint32_t funcIndex,
+ ValTypeVector* locals,
+ size_t* argsLength,
+ StackResults* stackResults);
+ // Invariant: the result of getDebugResultType can only be used as long as
+ // code_->metadata() is live. See MetaData::getFuncResultType for more
+ // information.
+ ResultType debugGetResultType(uint32_t funcIndex) const {
+ return metadata().getFuncResultType(funcIndex);
+ }
+ [[nodiscard]] bool getGlobal(Instance& instance, uint32_t globalIndex,
+ MutableHandleValue vp);
+
+ // Debug URL helpers.
+
+ [[nodiscard]] bool getSourceMappingURL(JSContext* cx,
+ MutableHandleString result) const;
+
+ // Accessors for commonly used elements of linked structures.
+
+ const MetadataTier& metadata(Tier t) const { return code_->metadata(t); }
+ const Metadata& metadata() const { return code_->metadata(); }
+ const CodeRangeVector& codeRanges(Tier t) const {
+ return metadata(t).codeRanges;
+ }
+ const CallSiteVector& callSites(Tier t) const {
+ return metadata(t).callSites;
+ }
+
+ uint32_t funcToCodeRangeIndex(uint32_t funcIndex) const {
+ return metadata(Tier::Debug).funcToCodeRange[funcIndex];
+ }
+
+ // about:memory reporting:
+
+ void addSizeOfMisc(MallocSizeOf mallocSizeOf, Metadata::SeenSet* seenMetadata,
+ Code::SeenSet* seenCode, size_t* code, size_t* data) const;
+};
+
+using UniqueDebugState = UniquePtr<DebugState>;
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_debug_h
diff --git a/js/src/wasm/WasmFrameIter.cpp b/js/src/wasm/WasmFrameIter.cpp
new file mode 100644
index 0000000000..e7dd88ccad
--- /dev/null
+++ b/js/src/wasm/WasmFrameIter.cpp
@@ -0,0 +1,1539 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2014 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmFrameIter.h"
+
+#include "jit/JitFrames.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmStubs.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::DebugOnly;
+using mozilla::Maybe;
+
+/*****************************************************************************/
+// WasmFrameIter implementation
+
+WasmFrameIter::WasmFrameIter(JitActivation* activation, wasm::Frame* fp)
+ : activation_(activation),
+ code_(nullptr),
+ codeRange_(nullptr),
+ lineOrBytecode_(0),
+ fp_(fp ? fp : activation->wasmExitFP()),
+ tls_(nullptr),
+ unwoundIonCallerFP_(nullptr),
+ unwoundIonFrameType_(jit::FrameType(-1)),
+ unwind_(Unwind::False),
+ unwoundAddressOfReturnAddress_(nullptr),
+ resumePCinCurrentFrame_(nullptr) {
+ MOZ_ASSERT(fp_);
+ tls_ = GetNearestEffectiveTls(fp_);
+
+ // When the stack is captured during a trap (viz., to create the .stack
+ // for an Error object), use the pc/bytecode information captured by the
+ // signal handler in the runtime. Take care not to use this trap unwind
+ // state for wasm frames in the middle of a JitActivation, i.e., wasm frames
+ // that called into JIT frames before the trap.
+
+ if (activation->isWasmTrapping() && fp_ == activation->wasmExitFP()) {
+ const TrapData& trapData = activation->wasmTrapData();
+ void* unwoundPC = trapData.unwoundPC;
+
+ code_ = &tls_->instance->code();
+ MOZ_ASSERT(code_ == LookupCode(unwoundPC));
+
+ codeRange_ = code_->lookupFuncRange(unwoundPC);
+ MOZ_ASSERT(codeRange_);
+
+ lineOrBytecode_ = trapData.bytecodeOffset;
+
+ MOZ_ASSERT(!done());
+ return;
+ }
+
+ // Otherwise, execution exits wasm code via an exit stub which sets exitFP
+ // to the exit stub's frame. Thus, in this case, we want to start iteration
+ // at the caller of the exit frame, whose Code, CodeRange and CallSite are
+ // indicated by the returnAddress of the exit stub's frame. If the caller
+ // was Ion, we can just skip the wasm frames.
+
+ popFrame();
+ MOZ_ASSERT(!done() || unwoundIonCallerFP_);
+}
+
+bool WasmFrameIter::done() const {
+ MOZ_ASSERT(!!fp_ == !!code_);
+ MOZ_ASSERT(!!fp_ == !!codeRange_);
+ return !fp_;
+}
+
+void WasmFrameIter::operator++() {
+ MOZ_ASSERT(!done());
+
+ // When the iterator is set to unwind, each time the iterator pops a frame,
+ // the JitActivation is updated so that the just-popped frame is no longer
+ // visible. This is necessary since Debugger::onLeaveFrame is called before
+ // popping each frame and, once onLeaveFrame is called for a given frame,
+ // that frame must not be visible to subsequent stack iteration (or it
+ // could be added as a "new" frame just as it becomes garbage). When the
+ // frame is trapping, then exitFP is included in the callstack (otherwise,
+ // it is skipped, as explained above). So to unwind the innermost frame, we
+ // just clear the trapping state.
+
+ if (unwind_ == Unwind::True) {
+ if (activation_->isWasmTrapping()) {
+ activation_->finishWasmTrap();
+ }
+ activation_->setWasmExitFP(fp_);
+ }
+
+ popFrame();
+}
+
+void WasmFrameIter::popFrame() {
+ if (fp_->callerIsExitOrJitEntryFP()) {
+ // We run into a frame pointer which has the low bit set,
+ // indicating this is a direct call from the jit into the wasm
+ // function's body. The call stack resembles this at this point:
+ //
+ // |---------------------|
+ // | JIT FRAME |
+ // | JIT FAKE EXIT FRAME | <-- tagged fp_->callerFP_
+ // | WASM FRAME | <-- fp_
+ // |---------------------|
+ //
+ // fp_->callerFP_ points to the fake exit frame set up by the jit caller,
+ // and the return-address-to-fp is in JIT code, thus doesn't belong to any
+ // wasm instance's code (in particular, there's no associated CodeRange).
+ // Mark the frame as such and untag FP.
+ MOZ_ASSERT(!LookupCode(fp_->returnAddress()));
+
+ unwoundIonCallerFP_ = fp_->jitEntryCaller();
+ unwoundIonFrameType_ = FrameType::Exit;
+
+ if (unwind_ == Unwind::True) {
+ activation_->setJSExitFP(unwoundIonCallerFP());
+ unwoundAddressOfReturnAddress_ = fp_->addressOfReturnAddress();
+ }
+
+ fp_ = nullptr;
+ code_ = nullptr;
+ codeRange_ = nullptr;
+
+ MOZ_ASSERT(done());
+ return;
+ }
+
+ Frame* prevFP = fp_;
+ fp_ = fp_->wasmCaller();
+ resumePCinCurrentFrame_ = prevFP->returnAddress();
+
+ if (!fp_) {
+ code_ = nullptr;
+ codeRange_ = nullptr;
+
+ if (unwind_ == Unwind::True) {
+ // We're exiting via the interpreter entry; we can safely reset
+ // exitFP.
+ activation_->setWasmExitFP(nullptr);
+ unwoundAddressOfReturnAddress_ = prevFP->addressOfReturnAddress();
+ }
+
+ MOZ_ASSERT(done());
+ return;
+ }
+
+ void* returnAddress = prevFP->returnAddress();
+ code_ = LookupCode(returnAddress, &codeRange_);
+ MOZ_ASSERT(codeRange_);
+
+ if (codeRange_->isJitEntry()) {
+ // This wasm function has been called through the generic JIT entry by
+ // a JIT caller, so the call stack resembles this:
+ //
+ // |---------------------|
+ // | JIT FRAME |
+ // | JSJIT TO WASM EXIT | <-- fp_
+ // | WASM JIT ENTRY | <-- prevFP (already unwound)
+ // | WASM FRAME | (already unwound)
+ // |---------------------|
+ //
+ // The next value of FP is just a regular jit frame used as a marker to
+ // know that we should transition to a JSJit frame iterator.
+ unwoundIonCallerFP_ = reinterpret_cast<uint8_t*>(fp_);
+ unwoundIonFrameType_ = FrameType::JSJitToWasm;
+
+ fp_ = nullptr;
+ code_ = nullptr;
+ codeRange_ = nullptr;
+
+ if (unwind_ == Unwind::True) {
+ activation_->setJSExitFP(unwoundIonCallerFP());
+ unwoundAddressOfReturnAddress_ = prevFP->addressOfReturnAddress();
+ }
+
+ MOZ_ASSERT(done());
+ return;
+ }
+
+ MOZ_ASSERT(codeRange_->kind() == CodeRange::Function);
+
+ const CallSite* callsite = code_->lookupCallSite(returnAddress);
+ MOZ_ASSERT(callsite);
+
+ if (callsite->mightBeCrossInstance()) {
+ tls_ = ExtractCallerTlsFromFrameWithTls(prevFP);
+ }
+
+ MOZ_ASSERT(code_ == &tls()->instance->code());
+ lineOrBytecode_ = callsite->lineOrBytecode();
+
+ MOZ_ASSERT(!done());
+}
+
+const char* WasmFrameIter::filename() const {
+ MOZ_ASSERT(!done());
+ return code_->metadata().filename.get();
+}
+
+const char16_t* WasmFrameIter::displayURL() const {
+ MOZ_ASSERT(!done());
+ return code_->metadata().displayURL();
+}
+
+bool WasmFrameIter::mutedErrors() const {
+ MOZ_ASSERT(!done());
+ return code_->metadata().mutedErrors();
+}
+
+JSAtom* WasmFrameIter::functionDisplayAtom() const {
+ MOZ_ASSERT(!done());
+
+ JSContext* cx = activation_->cx();
+ JSAtom* atom = instance()->getFuncDisplayAtom(cx, codeRange_->funcIndex());
+ if (!atom) {
+ cx->clearPendingException();
+ return cx->names().empty;
+ }
+
+ return atom;
+}
+
+unsigned WasmFrameIter::lineOrBytecode() const {
+ MOZ_ASSERT(!done());
+ return lineOrBytecode_;
+}
+
+uint32_t WasmFrameIter::funcIndex() const {
+ MOZ_ASSERT(!done());
+ return codeRange_->funcIndex();
+}
+
+unsigned WasmFrameIter::computeLine(uint32_t* column) const {
+ if (instance()->isAsmJS()) {
+ if (column) {
+ *column = 1;
+ }
+ return lineOrBytecode_;
+ }
+
+ // As a terrible hack to avoid changing the tons of places that pass around
+ // (url, line, column) tuples to instead passing around a Variant that
+ // stores a (url, func-index, bytecode-offset) tuple for wasm frames,
+ // wasm stuffs its tuple into the existing (url, line, column) tuple,
+ // tagging the high bit of the column to indicate "this is a wasm frame".
+ // When knowing clients see this bit, they shall render the tuple
+ // (url, line, column|bit) as "url:wasm-function[column]:0xline" according
+ // to the WebAssembly Web API's Developer-Facing Display Conventions.
+ // https://webassembly.github.io/spec/web-api/index.html#conventions
+ // The wasm bytecode offset continues to be passed as the JS line to avoid
+ // breaking existing devtools code written when this used to be the case.
+
+ MOZ_ASSERT(!(codeRange_->funcIndex() & ColumnBit));
+ if (column) {
+ *column = codeRange_->funcIndex() | ColumnBit;
+ }
+ return lineOrBytecode_;
+}
+
+Instance* WasmFrameIter::instance() const {
+ MOZ_ASSERT(!done());
+ return tls_->instance;
+}
+
+void** WasmFrameIter::unwoundAddressOfReturnAddress() const {
+ MOZ_ASSERT(done());
+ MOZ_ASSERT(unwind_ == Unwind::True);
+ MOZ_ASSERT(unwoundAddressOfReturnAddress_);
+ return unwoundAddressOfReturnAddress_;
+}
+
+bool WasmFrameIter::debugEnabled() const {
+ MOZ_ASSERT(!done());
+
+ // Only non-imported functions can have debug frames.
+ //
+ // Metadata::debugEnabled is only set if debugging is actually enabled (both
+ // requested, and available via baseline compilation), and Tier::Debug code
+ // will be available.
+ return code_->metadata().debugEnabled &&
+ codeRange_->funcIndex() >=
+ code_->metadata(Tier::Debug).funcImports.length();
+}
+
+DebugFrame* WasmFrameIter::debugFrame() const {
+ MOZ_ASSERT(!done());
+ return DebugFrame::from(fp_);
+}
+
+jit::FrameType WasmFrameIter::unwoundIonFrameType() const {
+ MOZ_ASSERT(unwoundIonCallerFP_);
+ MOZ_ASSERT(unwoundIonFrameType_ != jit::FrameType(-1));
+ return unwoundIonFrameType_;
+}
+
+uint8_t* WasmFrameIter::resumePCinCurrentFrame() const {
+ if (resumePCinCurrentFrame_) {
+ return resumePCinCurrentFrame_;
+ }
+ MOZ_ASSERT(activation_->isWasmTrapping());
+ // The next instruction is the instruction following the trap instruction.
+ return (uint8_t*)activation_->wasmTrapData().resumePC;
+}
+
+/*****************************************************************************/
+// Prologue/epilogue code generation
+
+// These constants reflect statically-determined offsets in the
+// prologue/epilogue. The offsets are dynamically asserted during code
+// generation.
+#if defined(JS_CODEGEN_X64)
+static const unsigned PushedRetAddr = 0;
+static const unsigned PushedFP = 1;
+static const unsigned SetFP = 4;
+static const unsigned PoppedFP = 0;
+#elif defined(JS_CODEGEN_X86)
+static const unsigned PushedRetAddr = 0;
+static const unsigned PushedFP = 1;
+static const unsigned SetFP = 3;
+static const unsigned PoppedFP = 0;
+#elif defined(JS_CODEGEN_ARM)
+static const unsigned BeforePushRetAddr = 0;
+static const unsigned PushedRetAddr = 4;
+static const unsigned PushedFP = 8;
+static const unsigned SetFP = 12;
+static const unsigned PoppedFP = 0;
+#elif defined(JS_CODEGEN_ARM64)
+// On ARM64 we do not use push or pop; the prologues and epilogues are
+// structured differently due to restrictions on SP alignment. Even so,
+// PushedRetAddr and PushedFP are used in some restricted contexts
+// and must be superficially meaningful.
+static const unsigned BeforePushRetAddr = 0;
+static const unsigned PushedRetAddr = 8;
+static const unsigned PushedFP = 12;
+static const unsigned SetFP = 16;
+static const unsigned PoppedFP = 4;
+static_assert(BeforePushRetAddr == 0, "Required by StartUnwinding");
+static_assert(PushedFP > PushedRetAddr, "Required by StartUnwinding");
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+static const unsigned PushedRetAddr = 8;
+static const unsigned PushedFP = 12;
+static const unsigned SetFP = 16;
+static const unsigned PoppedFP = 4;
+#elif defined(JS_CODEGEN_NONE)
+// Synthetic values to satisfy asserts and avoid compiler warnings.
+static const unsigned PushedRetAddr = 0;
+static const unsigned PushedFP = 1;
+static const unsigned SetFP = 2;
+static const unsigned PoppedFP = 3;
+#else
+# error "Unknown architecture!"
+#endif
+static constexpr unsigned SetJitEntryFP = PushedRetAddr + SetFP - PushedFP;
+
+static void LoadActivation(MacroAssembler& masm, const Register& dest) {
+ // WasmCall pushes a JitActivation.
+ masm.loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, cx)), dest);
+ masm.loadPtr(Address(dest, JSContext::offsetOfActivation()), dest);
+}
+
+void wasm::SetExitFP(MacroAssembler& masm, ExitReason reason,
+ Register scratch) {
+ MOZ_ASSERT(!reason.isNone());
+
+ LoadActivation(masm, scratch);
+
+ masm.store32(
+ Imm32(reason.encode()),
+ Address(scratch, JitActivation::offsetOfEncodedWasmExitReason()));
+
+ masm.orPtr(Imm32(ExitOrJitEntryFPTag), FramePointer);
+ masm.storePtr(FramePointer,
+ Address(scratch, JitActivation::offsetOfPackedExitFP()));
+ masm.andPtr(Imm32(int32_t(~ExitOrJitEntryFPTag)), FramePointer);
+}
+
+void wasm::ClearExitFP(MacroAssembler& masm, Register scratch) {
+ LoadActivation(masm, scratch);
+ masm.storePtr(ImmWord(0x0),
+ Address(scratch, JitActivation::offsetOfPackedExitFP()));
+ masm.store32(
+ Imm32(0x0),
+ Address(scratch, JitActivation::offsetOfEncodedWasmExitReason()));
+}
+
+static void GenerateCallablePrologue(MacroAssembler& masm, uint32_t* entry) {
+ masm.setFramePushed(0);
+
+ // ProfilingFrameIterator needs to know the offsets of several key
+ // instructions from entry. To save space, we make these offsets static
+ // constants and assert that they match the actual codegen below. On ARM,
+ // this requires AutoForbidPoolsAndNops to prevent a constant pool from being
+ // randomly inserted between two instructions.
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ {
+ *entry = masm.currentOffset();
+
+ masm.subFromStackPtr(Imm32(sizeof(Frame)));
+ masm.storePtr(ra, Address(StackPointer, Frame::returnAddressOffset()));
+ MOZ_ASSERT_IF(!masm.oom(), PushedRetAddr == masm.currentOffset() - *entry);
+ masm.storePtr(FramePointer, Address(StackPointer, Frame::callerFPOffset()));
+ MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - *entry);
+ masm.moveStackPtrTo(FramePointer);
+ MOZ_ASSERT_IF(!masm.oom(), SetFP == masm.currentOffset() - *entry);
+ }
+#elif defined(JS_CODEGEN_ARM64)
+ {
+ // We do not use the PseudoStackPointer.
+ MOZ_ASSERT(masm.GetStackPointer64().code() == sp.code());
+
+ AutoForbidPoolsAndNops afp(&masm,
+ /* number of instructions in scope = */ 4);
+
+ *entry = masm.currentOffset();
+
+ masm.Sub(sp, sp, sizeof(Frame));
+ masm.Str(ARMRegister(lr, 64), MemOperand(sp, Frame::returnAddressOffset()));
+ MOZ_ASSERT_IF(!masm.oom(), PushedRetAddr == masm.currentOffset() - *entry);
+ masm.Str(ARMRegister(FramePointer, 64),
+ MemOperand(sp, Frame::callerFPOffset()));
+ MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - *entry);
+ masm.Mov(ARMRegister(FramePointer, 64), sp);
+ MOZ_ASSERT_IF(!masm.oom(), SetFP == masm.currentOffset() - *entry);
+ }
+#else
+ {
+# if defined(JS_CODEGEN_ARM)
+ AutoForbidPoolsAndNops afp(&masm,
+ /* number of instructions in scope = */ 6);
+
+ *entry = masm.currentOffset();
+
+ static_assert(BeforePushRetAddr == 0);
+ masm.push(lr);
+# else
+ *entry = masm.currentOffset();
+ // The x86/x64 call instruction pushes the return address.
+# endif
+
+ MOZ_ASSERT_IF(!masm.oom(), PushedRetAddr == masm.currentOffset() - *entry);
+ masm.push(FramePointer);
+ MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - *entry);
+ masm.moveStackPtrTo(FramePointer);
+ MOZ_ASSERT_IF(!masm.oom(), SetFP == masm.currentOffset() - *entry);
+ }
+#endif
+}
+
+static void GenerateCallableEpilogue(MacroAssembler& masm, unsigned framePushed,
+ ExitReason reason, uint32_t* ret) {
+ if (framePushed) {
+ masm.freeStack(framePushed);
+ }
+
+ if (!reason.isNone()) {
+ ClearExitFP(masm, ABINonArgReturnVolatileReg);
+ }
+
+ DebugOnly<uint32_t> poppedFP;
+
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+
+ masm.loadPtr(Address(StackPointer, Frame::callerFPOffset()), FramePointer);
+ poppedFP = masm.currentOffset();
+ masm.loadPtr(Address(StackPointer, Frame::returnAddressOffset()), ra);
+
+ *ret = masm.currentOffset();
+ masm.as_jr(ra);
+ masm.addToStackPtr(Imm32(sizeof(Frame)));
+
+#elif defined(JS_CODEGEN_ARM64)
+
+ // We do not use the PseudoStackPointer.
+ MOZ_ASSERT(masm.GetStackPointer64().code() == sp.code());
+
+ AutoForbidPoolsAndNops afp(&masm, /* number of instructions in scope = */ 4);
+
+ masm.Ldr(ARMRegister(FramePointer, 64),
+ MemOperand(sp, Frame::callerFPOffset()));
+ poppedFP = masm.currentOffset();
+
+ masm.Ldr(ARMRegister(lr, 64), MemOperand(sp, Frame::returnAddressOffset()));
+ *ret = masm.currentOffset();
+
+ masm.Add(sp, sp, sizeof(Frame));
+ masm.Ret(ARMRegister(lr, 64));
+
+#else
+ // Forbid pools for the same reason as described in GenerateCallablePrologue.
+# if defined(JS_CODEGEN_ARM)
+ AutoForbidPoolsAndNops afp(&masm, /* number of instructions in scope = */ 6);
+# endif
+
+ // There is an important ordering constraint here: fp must be repointed to
+ // the caller's frame before any field of the frame currently pointed to by
+ // fp is popped: asynchronous signal handlers (which use stack space
+ // starting at sp) could otherwise clobber these fields while they are still
+ // accessible via fp (fp fields are read during frame iteration which is
+ // *also* done asynchronously).
+
+ masm.pop(FramePointer);
+ poppedFP = masm.currentOffset();
+
+ *ret = masm.currentOffset();
+ masm.ret();
+
+#endif
+
+ MOZ_ASSERT_IF(!masm.oom(), PoppedFP == *ret - poppedFP);
+}
+
+static void EnsureOffset(MacroAssembler& masm, uint32_t base,
+ uint32_t targetOffset) {
+ MOZ_ASSERT(targetOffset % CodeAlignment == 0);
+ MOZ_ASSERT_IF(!masm.oom(), masm.currentOffset() - base <= targetOffset);
+
+ while (masm.currentOffset() - base < targetOffset) {
+ masm.nopAlign(CodeAlignment);
+ if (masm.currentOffset() - base < targetOffset) {
+ masm.nop();
+ }
+ }
+
+ MOZ_ASSERT_IF(!masm.oom(), masm.currentOffset() - base == targetOffset);
+}
+
+void wasm::GenerateFunctionPrologue(MacroAssembler& masm,
+ const TypeIdDesc& funcTypeId,
+ const Maybe<uint32_t>& tier1FuncIndex,
+ FuncOffsets* offsets) {
+ // These constants reflect statically-determined offsets
+ // between a function's checked call entry and a tail's entry.
+ static_assert(WasmCheckedCallEntryOffset % CodeAlignment == 0,
+ "code aligned");
+ static_assert(WasmCheckedTailEntryOffset % CodeAlignment == 0,
+ "code aligned");
+
+ // Flush pending pools so they do not get dumped between the 'begin' and
+ // 'uncheckedCallEntry' offsets since the difference must be less than
+ // UINT8_MAX to be stored in CodeRange::funcbeginToUncheckedCallEntry_.
+ masm.flushBuffer();
+ masm.haltingAlign(CodeAlignment);
+
+ // We are going to generate the next code layout:
+ // ---------------------------------------------
+ // checked call entry: callable prologue
+ // checked tail entry: check signature
+ // jump functionBody
+ // unchecked call entry: callable prologue
+ // functionBody
+ // -----------------------------------------------
+ // checked call entry - used for call_indirect when we have to check the
+ // signature.
+ // checked tail entry - used by trampolines which already had pushed Frame
+ // on the callee’s behalf.
+ // unchecked call entry - used for regular direct same-instance calls.
+
+ Label functionBody;
+
+ // Generate checked call entry. The BytecodeOffset of the trap is fixed up to
+ // be the bytecode offset of the callsite by JitActivation::startWasmTrap.
+ offsets->begin = masm.currentOffset();
+ MOZ_ASSERT_IF(!masm.oom(), masm.currentOffset() - offsets->begin ==
+ WasmCheckedCallEntryOffset);
+ uint32_t dummy;
+ GenerateCallablePrologue(masm, &dummy);
+
+ EnsureOffset(masm, offsets->begin, WasmCheckedTailEntryOffset);
+ switch (funcTypeId.kind()) {
+ case TypeIdDescKind::Global: {
+ Register scratch = WasmTableCallScratchReg0;
+ masm.loadWasmGlobalPtr(funcTypeId.globalDataOffset(), scratch);
+ masm.branchPtr(Assembler::Condition::Equal, WasmTableCallSigReg, scratch,
+ &functionBody);
+ masm.wasmTrap(Trap::IndirectCallBadSig, BytecodeOffset(0));
+ break;
+ }
+ case TypeIdDescKind::Immediate: {
+ masm.branch32(Assembler::Condition::Equal, WasmTableCallSigReg,
+ Imm32(funcTypeId.immediate()), &functionBody);
+ masm.wasmTrap(Trap::IndirectCallBadSig, BytecodeOffset(0));
+ break;
+ }
+ case TypeIdDescKind::None:
+ masm.jump(&functionBody);
+ break;
+ }
+
+ // The checked entries might have generated a small constant pool in case of
+ // immediate comparison.
+ masm.flushBuffer();
+
+ // Generate unchecked call entry:
+ masm.nopAlign(CodeAlignment);
+ GenerateCallablePrologue(masm, &offsets->uncheckedCallEntry);
+ masm.bind(&functionBody);
+
+ // Tiering works as follows. The Code owns a jumpTable, which has one
+ // pointer-sized element for each function up to the largest funcIndex in
+ // the module. Each table element is an address into the Tier-1 or the
+ // Tier-2 function at that index; the elements are updated when Tier-2 code
+ // becomes available. The Tier-1 function will unconditionally jump to this
+ // address. The table elements are written racily but without tearing when
+ // Tier-2 compilation is finished.
+ //
+ // The address in the table is either to the instruction following the jump
+ // in Tier-1 code, or into the function prologue after the standard setup in
+ // Tier-2 code. Effectively, Tier-1 code performs standard frame setup on
+ // behalf of whatever code it jumps to, and the target code allocates its
+ // own frame in whatever way it wants.
+ if (tier1FuncIndex) {
+ Register scratch = ABINonArgReg0;
+ masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, jumpTable)), scratch);
+ masm.jump(Address(scratch, *tier1FuncIndex * sizeof(uintptr_t)));
+ }
+
+ offsets->tierEntry = masm.currentOffset();
+
+ MOZ_ASSERT(masm.framePushed() == 0);
+}
+
+void wasm::GenerateFunctionEpilogue(MacroAssembler& masm, unsigned framePushed,
+ FuncOffsets* offsets) {
+ // Inverse of GenerateFunctionPrologue:
+ MOZ_ASSERT(masm.framePushed() == framePushed);
+ GenerateCallableEpilogue(masm, framePushed, ExitReason::None(),
+ &offsets->ret);
+ MOZ_ASSERT(masm.framePushed() == 0);
+}
+
+void wasm::GenerateExitPrologue(MacroAssembler& masm, unsigned framePushed,
+ ExitReason reason, CallableOffsets* offsets) {
+ masm.haltingAlign(CodeAlignment);
+
+ GenerateCallablePrologue(masm, &offsets->begin);
+
+ // This frame will be exiting compiled code to C++ so record the fp and
+ // reason in the JitActivation so the frame iterators can unwind.
+ SetExitFP(masm, reason, ABINonArgReturnVolatileReg);
+
+ MOZ_ASSERT(masm.framePushed() == 0);
+ masm.reserveStack(framePushed);
+}
+
+void wasm::GenerateExitEpilogue(MacroAssembler& masm, unsigned framePushed,
+ ExitReason reason, CallableOffsets* offsets) {
+ // Inverse of GenerateExitPrologue:
+ MOZ_ASSERT(masm.framePushed() == framePushed);
+ GenerateCallableEpilogue(masm, framePushed, reason, &offsets->ret);
+ MOZ_ASSERT(masm.framePushed() == 0);
+}
+
+static void AssertNoWasmExitFPInJitExit(MacroAssembler& masm) {
+ // As a general stack invariant, if Activation::packedExitFP is tagged as
+ // wasm, it must point to a valid wasm::Frame. The JIT exit stub calls into
+ // JIT code and thus does not really exit, thus, when entering/leaving the
+ // JIT exit stub from/to normal wasm code, packedExitFP is not tagged wasm.
+#ifdef DEBUG
+ Register scratch = ABINonArgReturnReg0;
+ LoadActivation(masm, scratch);
+
+ Label ok;
+ masm.branchTestPtr(Assembler::Zero,
+ Address(scratch, JitActivation::offsetOfPackedExitFP()),
+ Imm32(ExitOrJitEntryFPTag), &ok);
+ masm.breakpoint();
+ masm.bind(&ok);
+#endif
+}
+
+void wasm::GenerateJitExitPrologue(MacroAssembler& masm, unsigned framePushed,
+ CallableOffsets* offsets) {
+ masm.haltingAlign(CodeAlignment);
+
+ GenerateCallablePrologue(masm, &offsets->begin);
+ AssertNoWasmExitFPInJitExit(masm);
+
+ MOZ_ASSERT(masm.framePushed() == 0);
+ masm.reserveStack(framePushed);
+}
+
+void wasm::GenerateJitExitEpilogue(MacroAssembler& masm, unsigned framePushed,
+ CallableOffsets* offsets) {
+ // Inverse of GenerateJitExitPrologue:
+ MOZ_ASSERT(masm.framePushed() == framePushed);
+ AssertNoWasmExitFPInJitExit(masm);
+ GenerateCallableEpilogue(masm, framePushed, ExitReason::None(),
+ &offsets->ret);
+ MOZ_ASSERT(masm.framePushed() == 0);
+}
+
+void wasm::GenerateJitEntryPrologue(MacroAssembler& masm, Offsets* offsets) {
+ masm.haltingAlign(CodeAlignment);
+
+ {
+#if defined(JS_CODEGEN_ARM)
+ AutoForbidPoolsAndNops afp(&masm,
+ /* number of instructions in scope = */ 2);
+ offsets->begin = masm.currentOffset();
+ static_assert(BeforePushRetAddr == 0);
+ masm.push(lr);
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ offsets->begin = masm.currentOffset();
+ masm.push(ra);
+#elif defined(JS_CODEGEN_ARM64)
+ AutoForbidPoolsAndNops afp(&masm,
+ /* number of instructions in scope = */ 3);
+ offsets->begin = masm.currentOffset();
+ static_assert(BeforePushRetAddr == 0);
+ // Subtract from SP first as SP must be aligned before offsetting.
+ masm.Sub(sp, sp, 8);
+ masm.storePtr(lr, Address(masm.getStackPointer(), 0));
+ masm.adjustFrame(8);
+#else
+ // The x86/x64 call instruction pushes the return address.
+ offsets->begin = masm.currentOffset();
+#endif
+ MOZ_ASSERT_IF(!masm.oom(),
+ PushedRetAddr == masm.currentOffset() - offsets->begin);
+
+ // Save jit frame pointer, so unwinding from wasm to jit frames is trivial.
+ masm.moveStackPtrTo(FramePointer);
+ MOZ_ASSERT_IF(!masm.oom(),
+ SetJitEntryFP == masm.currentOffset() - offsets->begin);
+ }
+
+ masm.setFramePushed(0);
+}
+
+/*****************************************************************************/
+// ProfilingFrameIterator
+
+ProfilingFrameIterator::ProfilingFrameIterator()
+ : code_(nullptr),
+ codeRange_(nullptr),
+ callerFP_(nullptr),
+ callerPC_(nullptr),
+ stackAddress_(nullptr),
+ unwoundIonCallerFP_(nullptr),
+ exitReason_(ExitReason::Fixed::None) {
+ MOZ_ASSERT(done());
+}
+
+ProfilingFrameIterator::ProfilingFrameIterator(const JitActivation& activation)
+ : code_(nullptr),
+ codeRange_(nullptr),
+ callerFP_(nullptr),
+ callerPC_(nullptr),
+ stackAddress_(nullptr),
+ unwoundIonCallerFP_(nullptr),
+ exitReason_(activation.wasmExitReason()) {
+ initFromExitFP(activation.wasmExitFP());
+}
+
+ProfilingFrameIterator::ProfilingFrameIterator(const Frame* fp)
+ : code_(nullptr),
+ codeRange_(nullptr),
+ callerFP_(nullptr),
+ callerPC_(nullptr),
+ stackAddress_(nullptr),
+ unwoundIonCallerFP_(nullptr),
+ exitReason_(ExitReason::Fixed::ImportJit) {
+ MOZ_ASSERT(fp);
+ initFromExitFP(fp);
+}
+
+static inline void AssertDirectJitCall(const void* fp) {
+ // Called via an inlined fast JIT to wasm call: in this case, FP is
+ // pointing in the middle of the exit frame, right before the exit
+ // footer; ensure the exit frame type is the expected one.
+#ifdef DEBUG
+ if (Frame::isExitOrJitEntryFP(fp)) {
+ fp = Frame::toJitEntryCaller(fp);
+ }
+ auto* jitCaller = (ExitFrameLayout*)fp;
+ MOZ_ASSERT(jitCaller->footer()->type() ==
+ jit::ExitFrameType::DirectWasmJitCall);
+#endif
+}
+
+static inline void AssertMatchesCallSite(void* callerPC, uint8_t* callerFP) {
+#ifdef DEBUG
+ const CodeRange* callerCodeRange;
+ const Code* code = LookupCode(callerPC, &callerCodeRange);
+
+ if (!code) {
+ AssertDirectJitCall(callerFP);
+ return;
+ }
+
+ MOZ_ASSERT(callerCodeRange);
+
+ if (callerCodeRange->isInterpEntry()) {
+ MOZ_ASSERT(callerFP == nullptr);
+ return;
+ }
+
+ if (callerCodeRange->isJitEntry()) {
+ MOZ_ASSERT(callerFP != nullptr);
+ return;
+ }
+
+ const CallSite* callsite = code->lookupCallSite(callerPC);
+ MOZ_ASSERT(callsite);
+#endif
+}
+
+void ProfilingFrameIterator::initFromExitFP(const Frame* fp) {
+ MOZ_ASSERT(fp);
+ stackAddress_ = (void*)fp;
+ code_ = LookupCode(fp->returnAddress(), &codeRange_);
+
+ if (!code_) {
+ // This is a direct call from the JIT, the caller FP is pointing to a
+ // tagged JIT caller's frame.
+ AssertDirectJitCall(fp->jitEntryCaller());
+
+ unwoundIonCallerFP_ = fp->jitEntryCaller();
+ MOZ_ASSERT(done());
+ return;
+ }
+
+ MOZ_ASSERT(codeRange_);
+
+ // Since we don't have the pc for fp, start unwinding at the caller of fp.
+ // This means that the innermost frame is skipped. This is fine because:
+ // - for import exit calls, the innermost frame is a thunk, so the first
+ // frame that shows up is the function calling the import;
+ // - for Math and other builtin calls, we note the absence of an exit
+ // reason and inject a fake "builtin" frame; and
+ switch (codeRange_->kind()) {
+ case CodeRange::InterpEntry:
+ callerPC_ = nullptr;
+ callerFP_ = nullptr;
+ codeRange_ = nullptr;
+ exitReason_ = ExitReason(ExitReason::Fixed::FakeInterpEntry);
+ break;
+ case CodeRange::JitEntry:
+ callerPC_ = nullptr;
+ callerFP_ = nullptr;
+ unwoundIonCallerFP_ = fp->rawCaller();
+ break;
+ case CodeRange::Function:
+ fp = fp->wasmCaller();
+ callerPC_ = fp->returnAddress();
+ callerFP_ = fp->rawCaller();
+ AssertMatchesCallSite(callerPC_, callerFP_);
+ break;
+ case CodeRange::ImportJitExit:
+ case CodeRange::ImportInterpExit:
+ case CodeRange::BuiltinThunk:
+ case CodeRange::TrapExit:
+ case CodeRange::DebugTrap:
+ case CodeRange::Throw:
+ case CodeRange::FarJumpIsland:
+ MOZ_CRASH("Unexpected CodeRange kind");
+ }
+
+ MOZ_ASSERT(!done());
+}
+
+static bool isSignatureCheckFail(uint32_t offsetInCode,
+ const CodeRange* codeRange) {
+ if (!codeRange->isFunction()) {
+ return false;
+ }
+ // checked call entry: 1. push Frame
+ // 2. set FP
+ // 3. signature check <--- check if we are here.
+ // 4. jump 7
+ // unchecked call entry: 5. push Frame
+ // 6. set FP
+ // 7. function's code
+ return offsetInCode < codeRange->funcUncheckedCallEntry() &&
+ (offsetInCode - codeRange->funcCheckedCallEntry()) > SetFP;
+}
+
+const TlsData* js::wasm::GetNearestEffectiveTls(const Frame* fp) {
+ while (true) {
+ if (fp->callerIsExitOrJitEntryFP()) {
+ // It is a direct call from JIT.
+ MOZ_ASSERT(!LookupCode(fp->returnAddress()));
+ return ExtractCalleeTlsFromFrameWithTls(fp);
+ }
+
+ uint8_t* returnAddress = fp->returnAddress();
+ const CodeRange* codeRange = nullptr;
+ const Code* code = LookupCode(returnAddress, &codeRange);
+ MOZ_ASSERT(codeRange);
+
+ if (codeRange->isEntry()) {
+ return ExtractCalleeTlsFromFrameWithTls(fp);
+ }
+
+ MOZ_ASSERT(codeRange->kind() == CodeRange::Function);
+ MOZ_ASSERT(code);
+ const CallSite* callsite = code->lookupCallSite(returnAddress);
+ if (callsite->mightBeCrossInstance()) {
+ return ExtractCalleeTlsFromFrameWithTls(fp);
+ }
+
+ fp = fp->wasmCaller();
+ }
+}
+
+TlsData* js::wasm::GetNearestEffectiveTls(Frame* fp) {
+ return const_cast<TlsData*>(
+ GetNearestEffectiveTls(const_cast<const Frame*>(fp)));
+}
+
+bool js::wasm::StartUnwinding(const RegisterState& registers,
+ UnwindState* unwindState, bool* unwoundCaller) {
+ // Shorthands.
+ uint8_t* const pc = (uint8_t*)registers.pc;
+ void** const sp = (void**)registers.sp;
+
+ // The frame pointer might be:
+ // - in the process of tagging/untagging when calling into the JITs;
+ // make sure it's untagged.
+ // - tagged by an direct JIT call.
+ // - unreliable if it's not been set yet, in prologues.
+ uint8_t* fp = Frame::isExitOrJitEntryFP(registers.fp)
+ ? Frame::toJitEntryCaller(registers.fp)
+ : reinterpret_cast<uint8_t*>(registers.fp);
+
+ // Get the CodeRange describing pc and the base address to which the
+ // CodeRange is relative. If the pc is not in a wasm module or a builtin
+ // thunk, then execution must be entering from or leaving to the C++ caller
+ // that pushed the JitActivation.
+ const CodeRange* codeRange;
+ uint8_t* codeBase;
+ const Code* code = nullptr;
+
+ const CodeSegment* codeSegment = LookupCodeSegment(pc, &codeRange);
+ if (codeSegment) {
+ code = &codeSegment->code();
+ codeBase = codeSegment->base();
+ MOZ_ASSERT(codeRange);
+ } else if (!LookupBuiltinThunk(pc, &codeRange, &codeBase)) {
+ return false;
+ }
+
+ // When the pc is inside the prologue/epilogue, the innermost call's Frame
+ // is not complete and thus fp points to the second-to-innermost call's
+ // Frame. Since fp can only tell you about its caller, naively unwinding
+ // while pc is in the prologue/epilogue would skip the second-to-innermost
+ // call. To avoid this problem, we use the static structure of the code in
+ // the prologue and epilogue to do the Right Thing.
+ uint32_t offsetInCode = pc - codeBase;
+ MOZ_ASSERT(offsetInCode >= codeRange->begin());
+ MOZ_ASSERT(offsetInCode < codeRange->end());
+
+ // Compute the offset of the pc from the (unchecked call) entry of the code
+ // range. The checked call entry and the unchecked call entry have common
+ // prefix, so pc before signature check in the checked call entry is
+ // equivalent to the pc of the unchecked-call-entry. Thus, we can simplify the
+ // below case analysis by redirecting all pc-in-checked-call-entry before
+ // signature check cases to the pc-at-unchecked-call-entry case.
+ uint32_t offsetFromEntry;
+ if (codeRange->isFunction()) {
+ if (offsetInCode < codeRange->funcUncheckedCallEntry()) {
+ offsetFromEntry = offsetInCode - codeRange->funcCheckedCallEntry();
+ } else {
+ offsetFromEntry = offsetInCode - codeRange->funcUncheckedCallEntry();
+ }
+ } else {
+ offsetFromEntry = offsetInCode - codeRange->begin();
+ }
+
+ // Most cases end up unwinding to the caller state; not unwinding is the
+ // exception here.
+ *unwoundCaller = true;
+
+ uint8_t* fixedFP = nullptr;
+ void* fixedPC = nullptr;
+ switch (codeRange->kind()) {
+ case CodeRange::Function:
+ case CodeRange::FarJumpIsland:
+ case CodeRange::ImportJitExit:
+ case CodeRange::ImportInterpExit:
+ case CodeRange::BuiltinThunk:
+ case CodeRange::DebugTrap:
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ if (codeRange->isThunk()) {
+ // The FarJumpIsland sequence temporary scrambles ra.
+ // Don't unwind to caller.
+ fixedPC = pc;
+ fixedFP = fp;
+ *unwoundCaller = false;
+ AssertMatchesCallSite(
+ Frame::fromUntaggedWasmExitFP(fp)->returnAddress(),
+ Frame::fromUntaggedWasmExitFP(fp)->rawCaller());
+ } else if (offsetFromEntry < PushedFP) {
+ // On MIPS we rely on register state instead of state saved on
+ // stack until the wasm::Frame is completely built.
+ // On entry the return address is in ra (registers.lr) and
+ // fp holds the caller's fp.
+ fixedPC = (uint8_t*)registers.lr;
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+ } else
+#elif defined(JS_CODEGEN_ARM64)
+ if (offsetFromEntry < PushedFP || codeRange->isThunk()) {
+ // Constraints above ensure that this covers BeforePushRetAddr and
+ // PushedRetAddr.
+ //
+ // On ARM64 we subtract the size of the Frame from SP and then store
+ // values into the stack. Execution can be interrupted at various
+ // places in that sequence. We rely on the register state for our
+ // values.
+ fixedPC = (uint8_t*)registers.lr;
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+ } else
+#elif defined(JS_CODEGEN_ARM)
+ if (offsetFromEntry == BeforePushRetAddr || codeRange->isThunk()) {
+ // The return address is still in lr and fp holds the caller's fp.
+ fixedPC = (uint8_t*)registers.lr;
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+ } else
+#endif
+ if (offsetFromEntry == PushedRetAddr || codeRange->isThunk()) {
+ // The return address has been pushed on the stack but fp still
+ // points to the caller's fp.
+ fixedPC = sp[0];
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+ } else if (offsetFromEntry == PushedFP) {
+ // The full Frame has been pushed; fp is still the caller's fp.
+ const auto* frame = Frame::fromUntaggedWasmExitFP(sp);
+ DebugOnly<const uint8_t*> caller = frame->callerIsExitOrJitEntryFP()
+ ? frame->jitEntryCaller()
+ : frame->rawCaller();
+ MOZ_ASSERT(caller == fp);
+ fixedPC = frame->returnAddress();
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ } else if (offsetInCode >= codeRange->ret() - PoppedFP &&
+ offsetInCode <= codeRange->ret()) {
+ // The fixedFP field of the Frame has been loaded into fp.
+ // The ra and TLS might also be loaded, but the Frame structure is
+ // still on stack, so we can acess the ra form there.
+ MOZ_ASSERT(*sp == fp);
+ fixedPC = Frame::fromUntaggedWasmExitFP(sp)->returnAddress();
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+#elif defined(JS_CODEGEN_ARM64)
+ // The stack pointer does not move until all values have
+ // been restored so several cases can be coalesced here.
+ } else if (offsetInCode >= codeRange->ret() - PoppedFP &&
+ offsetInCode <= codeRange->ret()) {
+ fixedPC = Frame::fromUntaggedWasmExitFP(sp)->returnAddress();
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+#else
+ } else if (offsetInCode >= codeRange->ret() - PoppedFP &&
+ offsetInCode < codeRange->ret()) {
+ // The fixedFP field of the Frame has been popped into fp.
+ fixedPC = sp[1];
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+ } else if (offsetInCode == codeRange->ret()) {
+ // Both the TLS and fixedFP fields have been popped and fp now
+ // points to the caller's frame.
+ fixedPC = sp[0];
+ fixedFP = fp;
+ AssertMatchesCallSite(fixedPC, fixedFP);
+#endif
+ } else {
+ if (codeRange->kind() == CodeRange::ImportJitExit) {
+ // The jit exit contains a range where the value of FP can't be
+ // trusted. Technically, we could recover fp from sp, but since
+ // the range is so short, for now just drop the stack.
+ if (offsetInCode >= codeRange->jitExitUntrustedFPStart() &&
+ offsetInCode < codeRange->jitExitUntrustedFPEnd()) {
+ return false;
+ }
+ }
+
+ if (isSignatureCheckFail(offsetInCode, codeRange)) {
+ // Frame have been pushed and FP has been set.
+ const auto* frame = Frame::fromUntaggedWasmExitFP(fp);
+ fixedFP = frame->rawCaller();
+ fixedPC = frame->returnAddress();
+ AssertMatchesCallSite(fixedPC, fixedFP);
+ break;
+ }
+
+ // Not in the prologue/epilogue.
+ fixedPC = pc;
+ fixedFP = fp;
+ *unwoundCaller = false;
+ AssertMatchesCallSite(
+ Frame::fromUntaggedWasmExitFP(fp)->returnAddress(),
+ Frame::fromUntaggedWasmExitFP(fp)->rawCaller());
+ break;
+ }
+ break;
+ case CodeRange::TrapExit:
+ // These code stubs execute after the prologue/epilogue have completed
+ // so pc/fp contains the right values here.
+ fixedPC = pc;
+ fixedFP = fp;
+ *unwoundCaller = false;
+ AssertMatchesCallSite(Frame::fromUntaggedWasmExitFP(fp)->returnAddress(),
+ Frame::fromUntaggedWasmExitFP(fp)->rawCaller());
+ break;
+ case CodeRange::InterpEntry:
+ // The entry trampoline is the final frame in an wasm JitActivation. The
+ // entry trampoline also doesn't GeneratePrologue/Epilogue so we can't
+ // use the general unwinding logic above.
+ break;
+ case CodeRange::JitEntry:
+ // There's a jit frame above the current one; we don't care about pc
+ // since the Jit entry frame is a jit frame which can be considered as
+ // an exit frame.
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ if (offsetFromEntry < PushedRetAddr) {
+ // We haven't pushed the jit return address yet, thus the jit
+ // frame is incomplete. During profiling frame iteration, it means
+ // that the jit profiling frame iterator won't be able to unwind
+ // this frame; drop it.
+ return false;
+ }
+#endif
+ fixedFP =
+ offsetFromEntry < SetJitEntryFP ? reinterpret_cast<uint8_t*>(sp) : fp;
+ fixedPC = nullptr;
+
+ // On the error return path, FP might be set to FailFP. Ignore these
+ // transient frames.
+ if (intptr_t(fixedFP) == (FailFP & ~ExitOrJitEntryFPTag)) {
+ return false;
+ }
+ break;
+ case CodeRange::Throw:
+ // The throw stub executes a small number of instructions before popping
+ // the entire activation. To simplify testing, we simply pretend throw
+ // stubs have already popped the entire stack.
+ return false;
+ }
+
+ unwindState->code = code;
+ unwindState->codeRange = codeRange;
+ unwindState->fp = fixedFP;
+ unwindState->pc = fixedPC;
+ return true;
+}
+
+ProfilingFrameIterator::ProfilingFrameIterator(const JitActivation& activation,
+ const RegisterState& state)
+ : code_(nullptr),
+ codeRange_(nullptr),
+ callerFP_(nullptr),
+ callerPC_(nullptr),
+ stackAddress_(nullptr),
+ unwoundIonCallerFP_(nullptr),
+ exitReason_(ExitReason::Fixed::None) {
+ // Let wasmExitFP take precedence to StartUnwinding when it is set since
+ // during the body of an exit stub, the register state may not be valid
+ // causing StartUnwinding() to abandon unwinding this activation.
+ if (activation.hasWasmExitFP()) {
+ exitReason_ = activation.wasmExitReason();
+ initFromExitFP(activation.wasmExitFP());
+ return;
+ }
+
+ bool unwoundCaller;
+ UnwindState unwindState;
+ if (!StartUnwinding(state, &unwindState, &unwoundCaller)) {
+ MOZ_ASSERT(done());
+ return;
+ }
+
+ MOZ_ASSERT(unwindState.codeRange);
+
+ if (unwoundCaller) {
+ callerFP_ = unwindState.fp;
+ callerPC_ = unwindState.pc;
+ // In the case of a function call, if the original FP value is tagged,
+ // then we're being called through a direct JIT call (the interpreter
+ // and the jit entry don't set FP's low bit). We can't observe
+ // transient tagged values of FP (during wasm::SetExitFP) here because
+ // StartUnwinding would not have unwound then.
+ if (unwindState.codeRange->isFunction() &&
+ Frame::isExitOrJitEntryFP(reinterpret_cast<uint8_t*>(state.fp))) {
+ unwoundIonCallerFP_ = callerFP_;
+ }
+ } else {
+ callerFP_ = Frame::fromUntaggedWasmExitFP(unwindState.fp)->rawCaller();
+ callerPC_ = Frame::fromUntaggedWasmExitFP(unwindState.fp)->returnAddress();
+ // See comment above. The only way to get a tagged FP here means that
+ // the caller is a fast JIT caller which called into a wasm function.
+ if (Frame::isExitOrJitEntryFP(callerFP_)) {
+ MOZ_ASSERT(unwindState.codeRange->isFunction());
+ unwoundIonCallerFP_ = Frame::toJitEntryCaller(callerFP_);
+ }
+ }
+
+ if (unwindState.codeRange->isJitEntry()) {
+ MOZ_ASSERT(!unwoundIonCallerFP_);
+ unwoundIonCallerFP_ = callerFP_;
+ }
+
+ if (unwindState.codeRange->isInterpEntry()) {
+ unwindState.codeRange = nullptr;
+ exitReason_ = ExitReason(ExitReason::Fixed::FakeInterpEntry);
+ }
+
+ code_ = unwindState.code;
+ codeRange_ = unwindState.codeRange;
+ stackAddress_ = state.sp;
+ MOZ_ASSERT(!done());
+}
+
+void ProfilingFrameIterator::operator++() {
+ if (!exitReason_.isNone()) {
+ DebugOnly<bool> wasInterpEntry = exitReason_.isInterpEntry();
+ exitReason_ = ExitReason::None();
+ MOZ_ASSERT((!codeRange_) == wasInterpEntry);
+ MOZ_ASSERT(done() == wasInterpEntry);
+ return;
+ }
+
+ if (unwoundIonCallerFP_) {
+ MOZ_ASSERT(codeRange_->isFunction() || codeRange_->isJitEntry());
+ callerPC_ = nullptr;
+ callerFP_ = nullptr;
+ codeRange_ = nullptr;
+ MOZ_ASSERT(done());
+ return;
+ }
+
+ if (!callerPC_) {
+ MOZ_ASSERT(!callerFP_);
+ codeRange_ = nullptr;
+ MOZ_ASSERT(done());
+ return;
+ }
+
+ if (!callerFP_) {
+ MOZ_ASSERT(LookupCode(callerPC_, &codeRange_) == code_);
+ MOZ_ASSERT(codeRange_->kind() == CodeRange::InterpEntry);
+ exitReason_ = ExitReason(ExitReason::Fixed::FakeInterpEntry);
+ codeRange_ = nullptr;
+ callerPC_ = nullptr;
+ MOZ_ASSERT(!done());
+ return;
+ }
+
+ code_ = LookupCode(callerPC_, &codeRange_);
+
+ if (!code_ && Frame::isExitOrJitEntryFP(callerFP_)) {
+ // The parent frame is an inlined wasm call, the tagged FP points to
+ // the fake exit frame.
+ MOZ_ASSERT(!codeRange_);
+ AssertDirectJitCall(callerFP_);
+ unwoundIonCallerFP_ = Frame::toJitEntryCaller(callerFP_);
+ MOZ_ASSERT(done());
+ return;
+ }
+
+ MOZ_ASSERT(codeRange_);
+
+ if (codeRange_->isJitEntry()) {
+ unwoundIonCallerFP_ = callerFP_;
+ MOZ_ASSERT(!done());
+ return;
+ }
+
+ MOZ_ASSERT(code_ ==
+ &GetNearestEffectiveTls(Frame::fromUntaggedWasmExitFP(callerFP_))
+ ->instance->code());
+
+ switch (codeRange_->kind()) {
+ case CodeRange::Function:
+ case CodeRange::ImportJitExit:
+ case CodeRange::ImportInterpExit:
+ case CodeRange::BuiltinThunk:
+ case CodeRange::TrapExit:
+ case CodeRange::DebugTrap:
+ case CodeRange::FarJumpIsland: {
+ stackAddress_ = callerFP_;
+ const auto* frame = Frame::fromUntaggedWasmExitFP(callerFP_);
+ callerPC_ = frame->returnAddress();
+ AssertMatchesCallSite(callerPC_, frame->rawCaller());
+ callerFP_ = frame->rawCaller();
+ break;
+ }
+ case CodeRange::InterpEntry:
+ MOZ_CRASH("should have had null caller fp");
+ case CodeRange::JitEntry:
+ MOZ_CRASH("should have been guarded above");
+ case CodeRange::Throw:
+ MOZ_CRASH("code range doesn't have frame");
+ }
+
+ MOZ_ASSERT(!done());
+}
+
+static const char* ThunkedNativeToDescription(SymbolicAddress func) {
+ MOZ_ASSERT(NeedsBuiltinThunk(func));
+ switch (func) {
+ case SymbolicAddress::HandleDebugTrap:
+ case SymbolicAddress::HandleThrow:
+ case SymbolicAddress::HandleTrap:
+ case SymbolicAddress::CallImport_General:
+ case SymbolicAddress::CoerceInPlace_ToInt32:
+ case SymbolicAddress::CoerceInPlace_ToNumber:
+ case SymbolicAddress::CoerceInPlace_ToBigInt:
+ case SymbolicAddress::BoxValue_Anyref:
+ MOZ_ASSERT(!NeedsBuiltinThunk(func),
+ "not in sync with NeedsBuiltinThunk");
+ break;
+ case SymbolicAddress::ToInt32:
+ return "call to asm.js native ToInt32 coercion (in wasm)";
+ case SymbolicAddress::DivI64:
+ return "call to native i64.div_s (in wasm)";
+ case SymbolicAddress::UDivI64:
+ return "call to native i64.div_u (in wasm)";
+ case SymbolicAddress::ModI64:
+ return "call to native i64.rem_s (in wasm)";
+ case SymbolicAddress::UModI64:
+ return "call to native i64.rem_u (in wasm)";
+ case SymbolicAddress::TruncateDoubleToUint64:
+ return "call to native i64.trunc_u/f64 (in wasm)";
+ case SymbolicAddress::TruncateDoubleToInt64:
+ return "call to native i64.trunc_s/f64 (in wasm)";
+ case SymbolicAddress::SaturatingTruncateDoubleToUint64:
+ return "call to native i64.trunc_u:sat/f64 (in wasm)";
+ case SymbolicAddress::SaturatingTruncateDoubleToInt64:
+ return "call to native i64.trunc_s:sat/f64 (in wasm)";
+ case SymbolicAddress::Uint64ToDouble:
+ return "call to native f64.convert_u/i64 (in wasm)";
+ case SymbolicAddress::Uint64ToFloat32:
+ return "call to native f32.convert_u/i64 (in wasm)";
+ case SymbolicAddress::Int64ToDouble:
+ return "call to native f64.convert_s/i64 (in wasm)";
+ case SymbolicAddress::Int64ToFloat32:
+ return "call to native f32.convert_s/i64 (in wasm)";
+#if defined(JS_CODEGEN_ARM)
+ case SymbolicAddress::aeabi_idivmod:
+ return "call to native i32.div_s (in wasm)";
+ case SymbolicAddress::aeabi_uidivmod:
+ return "call to native i32.div_u (in wasm)";
+#endif
+ case SymbolicAddress::AllocateBigInt:
+ return "call to native Allocate<BigInt, NoGC> (in wasm)";
+ case SymbolicAddress::ModD:
+ return "call to asm.js native f64 % (mod)";
+ case SymbolicAddress::SinD:
+ return "call to asm.js native f64 Math.sin";
+ case SymbolicAddress::CosD:
+ return "call to asm.js native f64 Math.cos";
+ case SymbolicAddress::TanD:
+ return "call to asm.js native f64 Math.tan";
+ case SymbolicAddress::ASinD:
+ return "call to asm.js native f64 Math.asin";
+ case SymbolicAddress::ACosD:
+ return "call to asm.js native f64 Math.acos";
+ case SymbolicAddress::ATanD:
+ return "call to asm.js native f64 Math.atan";
+ case SymbolicAddress::CeilD:
+ return "call to native f64.ceil (in wasm)";
+ case SymbolicAddress::CeilF:
+ return "call to native f32.ceil (in wasm)";
+ case SymbolicAddress::FloorD:
+ return "call to native f64.floor (in wasm)";
+ case SymbolicAddress::FloorF:
+ return "call to native f32.floor (in wasm)";
+ case SymbolicAddress::TruncD:
+ return "call to native f64.trunc (in wasm)";
+ case SymbolicAddress::TruncF:
+ return "call to native f32.trunc (in wasm)";
+ case SymbolicAddress::NearbyIntD:
+ return "call to native f64.nearest (in wasm)";
+ case SymbolicAddress::NearbyIntF:
+ return "call to native f32.nearest (in wasm)";
+ case SymbolicAddress::ExpD:
+ return "call to asm.js native f64 Math.exp";
+ case SymbolicAddress::LogD:
+ return "call to asm.js native f64 Math.log";
+ case SymbolicAddress::PowD:
+ return "call to asm.js native f64 Math.pow";
+ case SymbolicAddress::ATan2D:
+ return "call to asm.js native f64 Math.atan2";
+ case SymbolicAddress::MemoryGrow:
+ return "call to native memory.grow (in wasm)";
+ case SymbolicAddress::MemorySize:
+ return "call to native memory.size (in wasm)";
+ case SymbolicAddress::WaitI32:
+ return "call to native i32.wait (in wasm)";
+ case SymbolicAddress::WaitI64:
+ return "call to native i64.wait (in wasm)";
+ case SymbolicAddress::Wake:
+ return "call to native wake (in wasm)";
+ case SymbolicAddress::CoerceInPlace_JitEntry:
+ return "out-of-line coercion for jit entry arguments (in wasm)";
+ case SymbolicAddress::ReportV128JSCall:
+ return "jit call to v128 wasm function";
+ case SymbolicAddress::MemCopy:
+ case SymbolicAddress::MemCopyShared:
+ return "call to native memory.copy function";
+ case SymbolicAddress::DataDrop:
+ return "call to native data.drop function";
+ case SymbolicAddress::MemFill:
+ case SymbolicAddress::MemFillShared:
+ return "call to native memory.fill function";
+ case SymbolicAddress::MemInit:
+ return "call to native memory.init function";
+ case SymbolicAddress::TableCopy:
+ return "call to native table.copy function";
+ case SymbolicAddress::TableFill:
+ return "call to native table.fill function";
+ case SymbolicAddress::ElemDrop:
+ return "call to native elem.drop function";
+ case SymbolicAddress::TableGet:
+ return "call to native table.get function";
+ case SymbolicAddress::TableGrow:
+ return "call to native table.grow function";
+ case SymbolicAddress::TableInit:
+ return "call to native table.init function";
+ case SymbolicAddress::TableSet:
+ return "call to native table.set function";
+ case SymbolicAddress::TableSize:
+ return "call to native table.size function";
+ case SymbolicAddress::RefFunc:
+ return "call to native ref.func function";
+ case SymbolicAddress::PreBarrierFiltering:
+ return "call to native filtering GC prebarrier (in wasm)";
+ case SymbolicAddress::PostBarrier:
+ return "call to native GC postbarrier (in wasm)";
+ case SymbolicAddress::PostBarrierFiltering:
+ return "call to native filtering GC postbarrier (in wasm)";
+ case SymbolicAddress::StructNew:
+ return "call to native struct.new (in wasm)";
+ case SymbolicAddress::StructNarrow:
+ return "call to native struct.narrow (in wasm)";
+#if defined(JS_CODEGEN_MIPS32)
+ case SymbolicAddress::js_jit_gAtomic64Lock:
+ MOZ_CRASH();
+#endif
+#ifdef WASM_CODEGEN_DEBUG
+ case SymbolicAddress::PrintI32:
+ case SymbolicAddress::PrintPtr:
+ case SymbolicAddress::PrintF32:
+ case SymbolicAddress::PrintF64:
+ case SymbolicAddress::PrintText:
+#endif
+ case SymbolicAddress::Limit:
+ break;
+ }
+ return "?";
+}
+
+const char* ProfilingFrameIterator::label() const {
+ MOZ_ASSERT(!done());
+
+ // Use the same string for both time inside and under so that the two
+ // entries will be coalesced by the profiler.
+ // Must be kept in sync with /tools/profiler/tests/test_asm.js
+ static const char importJitDescription[] = "fast exit trampoline (in wasm)";
+ static const char importInterpDescription[] =
+ "slow exit trampoline (in wasm)";
+ static const char builtinNativeDescription[] =
+ "fast exit trampoline to native (in wasm)";
+ static const char trapDescription[] = "trap handling (in wasm)";
+ static const char debugTrapDescription[] = "debug trap handling (in wasm)";
+
+ if (!exitReason_.isFixed()) {
+ return ThunkedNativeToDescription(exitReason_.symbolic());
+ }
+
+ switch (exitReason_.fixed()) {
+ case ExitReason::Fixed::None:
+ break;
+ case ExitReason::Fixed::ImportJit:
+ return importJitDescription;
+ case ExitReason::Fixed::ImportInterp:
+ return importInterpDescription;
+ case ExitReason::Fixed::BuiltinNative:
+ return builtinNativeDescription;
+ case ExitReason::Fixed::Trap:
+ return trapDescription;
+ case ExitReason::Fixed::DebugTrap:
+ return debugTrapDescription;
+ case ExitReason::Fixed::FakeInterpEntry:
+ return "slow entry trampoline (in wasm)";
+ }
+
+ switch (codeRange_->kind()) {
+ case CodeRange::Function:
+ return code_->profilingLabel(codeRange_->funcIndex());
+ case CodeRange::InterpEntry:
+ MOZ_CRASH("should be an ExitReason");
+ case CodeRange::JitEntry:
+ return "fast entry trampoline (in wasm)";
+ case CodeRange::ImportJitExit:
+ return importJitDescription;
+ case CodeRange::BuiltinThunk:
+ return builtinNativeDescription;
+ case CodeRange::ImportInterpExit:
+ return importInterpDescription;
+ case CodeRange::TrapExit:
+ return trapDescription;
+ case CodeRange::DebugTrap:
+ return debugTrapDescription;
+ case CodeRange::FarJumpIsland:
+ return "interstitial (in wasm)";
+ case CodeRange::Throw:
+ MOZ_CRASH("does not have a frame");
+ }
+
+ MOZ_CRASH("bad code range kind");
+}
diff --git a/js/src/wasm/WasmFrameIter.h b/js/src/wasm/WasmFrameIter.h
new file mode 100644
index 0000000000..0760cecc67
--- /dev/null
+++ b/js/src/wasm/WasmFrameIter.h
@@ -0,0 +1,270 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2014 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_frame_iter_h
+#define wasm_frame_iter_h
+
+#include "js/ProfilingFrameIterator.h"
+#include "js/TypeDecls.h"
+#include "wasm/WasmTypes.h"
+
+namespace js {
+
+namespace jit {
+class MacroAssembler;
+struct Register;
+class Label;
+enum class FrameType;
+} // namespace jit
+
+namespace wasm {
+
+class Code;
+class CodeRange;
+class DebugFrame;
+class TypeIdDesc;
+class Instance;
+class ModuleSegment;
+
+struct CallableOffsets;
+struct FuncOffsets;
+class Frame;
+
+using RegisterState = JS::ProfilingFrameIterator::RegisterState;
+
+// Iterates over a linear group of wasm frames of a single wasm JitActivation,
+// called synchronously from C++ in the wasm thread. It will stop at the first
+// frame that is not of the same kind, or at the end of an activation.
+//
+// If you want to handle every kind of frames (including JS jit frames), use
+// JitFrameIter.
+
+class WasmFrameIter {
+ public:
+ enum class Unwind { True, False };
+ static constexpr uint32_t ColumnBit = 1u << 31;
+
+ private:
+ jit::JitActivation* activation_;
+ const Code* code_;
+ const CodeRange* codeRange_;
+ unsigned lineOrBytecode_;
+ Frame* fp_;
+ const TlsData* tls_;
+ uint8_t* unwoundIonCallerFP_;
+ jit::FrameType unwoundIonFrameType_;
+ Unwind unwind_;
+ void** unwoundAddressOfReturnAddress_;
+ uint8_t* resumePCinCurrentFrame_;
+
+ void popFrame();
+
+ public:
+ // See comment above this class definition.
+ explicit WasmFrameIter(jit::JitActivation* activation, Frame* fp = nullptr);
+ const jit::JitActivation* activation() const { return activation_; }
+ void setUnwind(Unwind unwind) { unwind_ = unwind; }
+ void operator++();
+ bool done() const;
+ const char* filename() const;
+ const char16_t* displayURL() const;
+ bool mutedErrors() const;
+ JSAtom* functionDisplayAtom() const;
+ unsigned lineOrBytecode() const;
+ uint32_t funcIndex() const;
+ unsigned computeLine(uint32_t* column) const;
+ const CodeRange* codeRange() const { return codeRange_; }
+ Instance* instance() const;
+ void** unwoundAddressOfReturnAddress() const;
+ bool debugEnabled() const;
+ DebugFrame* debugFrame() const;
+ jit::FrameType unwoundIonFrameType() const;
+ uint8_t* unwoundIonCallerFP() const { return unwoundIonCallerFP_; }
+ Frame* frame() const { return fp_; }
+ const TlsData* tls() const { return tls_; }
+
+ // Returns the address of the next instruction that will execute in this
+ // frame, once control returns to this frame.
+ uint8_t* resumePCinCurrentFrame() const;
+};
+
+enum class SymbolicAddress;
+
+// An ExitReason describes the possible reasons for leaving compiled wasm
+// code or the state of not having left compiled wasm code
+// (ExitReason::None). It is either a known reason, or a enumeration to a native
+// function that is used for better display in the profiler.
+class ExitReason {
+ public:
+ enum class Fixed : uint32_t {
+ None, // default state, the pc is in wasm code
+ FakeInterpEntry, // slow-path entry call from C++ WasmCall()
+ ImportJit, // fast-path call directly into JIT code
+ ImportInterp, // slow-path call into C++ Invoke()
+ BuiltinNative, // fast-path call directly into native C++ code
+ Trap, // call to trap handler
+ DebugTrap // call to debug trap handler
+ };
+
+ private:
+ uint32_t payload_;
+
+ ExitReason() : ExitReason(Fixed::None) {}
+
+ public:
+ MOZ_IMPLICIT ExitReason(Fixed exitReason)
+ : payload_(0x0 | (uint32_t(exitReason) << 1)) {
+ MOZ_ASSERT(isFixed());
+ MOZ_ASSERT_IF(isNone(), payload_ == 0);
+ }
+
+ explicit ExitReason(SymbolicAddress sym)
+ : payload_(0x1 | (uint32_t(sym) << 1)) {
+ MOZ_ASSERT(uint32_t(sym) <= (UINT32_MAX << 1), "packing constraints");
+ MOZ_ASSERT(!isFixed());
+ }
+
+ static ExitReason Decode(uint32_t payload) {
+ ExitReason reason;
+ reason.payload_ = payload;
+ return reason;
+ }
+
+ static ExitReason None() { return ExitReason(ExitReason::Fixed::None); }
+
+ bool isFixed() const { return (payload_ & 0x1) == 0; }
+ bool isNone() const { return isFixed() && fixed() == Fixed::None; }
+ bool isNative() const {
+ return !isFixed() || fixed() == Fixed::BuiltinNative;
+ }
+ bool isInterpEntry() const {
+ return isFixed() && fixed() == Fixed::FakeInterpEntry;
+ }
+
+ uint32_t encode() const { return payload_; }
+ Fixed fixed() const {
+ MOZ_ASSERT(isFixed());
+ return Fixed(payload_ >> 1);
+ }
+ SymbolicAddress symbolic() const {
+ MOZ_ASSERT(!isFixed());
+ return SymbolicAddress(payload_ >> 1);
+ }
+};
+
+// Iterates over the frames of a single wasm JitActivation, given an
+// asynchronously-profiled thread's state.
+class ProfilingFrameIterator {
+ const Code* code_;
+ const CodeRange* codeRange_;
+ uint8_t* callerFP_;
+ void* callerPC_;
+ void* stackAddress_;
+ uint8_t* unwoundIonCallerFP_;
+ ExitReason exitReason_;
+
+ void initFromExitFP(const Frame* fp);
+
+ public:
+ ProfilingFrameIterator();
+
+ // Start unwinding at a non-innermost activation that has necessarily been
+ // exited from wasm code (and thus activation.hasWasmExitFP).
+ explicit ProfilingFrameIterator(const jit::JitActivation& activation);
+
+ // Start unwinding at a group of wasm frames after unwinding an inner group
+ // of JSJit frames.
+ explicit ProfilingFrameIterator(const Frame* fp);
+
+ // Start unwinding at the innermost activation given the register state when
+ // the thread was suspended.
+ ProfilingFrameIterator(const jit::JitActivation& activation,
+ const RegisterState& state);
+
+ void operator++();
+ bool done() const { return !codeRange_ && exitReason_.isNone(); }
+
+ void* stackAddress() const {
+ MOZ_ASSERT(!done());
+ return stackAddress_;
+ }
+ uint8_t* unwoundIonCallerFP() const {
+ MOZ_ASSERT(done());
+ return unwoundIonCallerFP_;
+ }
+ const char* label() const;
+};
+
+// Prologue/epilogue code generation
+
+void SetExitFP(jit::MacroAssembler& masm, ExitReason reason,
+ jit::Register scratch);
+void ClearExitFP(jit::MacroAssembler& masm, jit::Register scratch);
+
+void GenerateExitPrologue(jit::MacroAssembler& masm, unsigned framePushed,
+ ExitReason reason, CallableOffsets* offsets);
+void GenerateExitEpilogue(jit::MacroAssembler& masm, unsigned framePushed,
+ ExitReason reason, CallableOffsets* offsets);
+
+void GenerateJitExitPrologue(jit::MacroAssembler& masm, unsigned framePushed,
+ CallableOffsets* offsets);
+void GenerateJitExitEpilogue(jit::MacroAssembler& masm, unsigned framePushed,
+ CallableOffsets* offsets);
+
+void GenerateJitEntryPrologue(jit::MacroAssembler& masm, Offsets* offsets);
+
+void GenerateFunctionPrologue(jit::MacroAssembler& masm,
+ const TypeIdDesc& funcTypeId,
+ const mozilla::Maybe<uint32_t>& tier1FuncIndex,
+ FuncOffsets* offsets);
+void GenerateFunctionEpilogue(jit::MacroAssembler& masm, unsigned framePushed,
+ FuncOffsets* offsets);
+
+// Iterates through frames for either possible cross-instance call or an entry
+// stub to obtain tls that corresponds to the passed fp.
+const TlsData* GetNearestEffectiveTls(const Frame* fp);
+TlsData* GetNearestEffectiveTls(Frame* fp);
+
+// Describes register state and associated code at a given call frame.
+
+struct UnwindState {
+ uint8_t* fp;
+ void* pc;
+ const Code* code;
+ const CodeRange* codeRange;
+ UnwindState() : fp(nullptr), pc(nullptr), code(nullptr), codeRange(nullptr) {}
+};
+
+// Ensures the register state at a call site is consistent: pc must be in the
+// code range of the code described by fp. This prevents issues when using
+// the values of pc/fp, especially at call sites boundaries, where the state
+// hasn't fully transitioned from the caller's to the callee's.
+//
+// unwoundCaller is set to true if we were in a transitional state and had to
+// rewind to the caller's frame instead of the current frame.
+//
+// Returns true if it was possible to get to a clear state, or false if the
+// frame should be ignored.
+
+bool StartUnwinding(const RegisterState& registers, UnwindState* unwindState,
+ bool* unwoundCaller);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_frame_iter_h
diff --git a/js/src/wasm/WasmGC.cpp b/js/src/wasm/WasmGC.cpp
new file mode 100644
index 0000000000..48bfd256dd
--- /dev/null
+++ b/js/src/wasm/WasmGC.cpp
@@ -0,0 +1,261 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2019 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmGC.h"
+#include "wasm/WasmInstance.h"
+#include "jit/MacroAssembler-inl.h"
+
+namespace js {
+namespace wasm {
+
+wasm::StackMap* ConvertStackMapBoolVectorToStackMap(
+ const StackMapBoolVector& vec, bool hasRefs) {
+ wasm::StackMap* stackMap = wasm::StackMap::create(vec.length());
+ if (!stackMap) {
+ return nullptr;
+ }
+
+ bool hasRefsObserved = false;
+ size_t i = 0;
+ for (bool b : vec) {
+ if (b) {
+ stackMap->setBit(i);
+ hasRefsObserved = true;
+ }
+ i++;
+ }
+ MOZ_RELEASE_ASSERT(hasRefs == hasRefsObserved);
+
+ return stackMap;
+}
+
+// Generate a stackmap for a function's stack-overflow-at-entry trap, with
+// the structure:
+//
+// <reg dump area>
+// | ++ <space reserved before trap, if any>
+// | ++ <space for Frame>
+// | ++ <inbound arg area>
+// | |
+// Lowest Addr Highest Addr
+//
+// The caller owns the resulting stackmap. This assumes a grow-down stack.
+//
+// For non-debug builds, if the stackmap would contain no pointers, no
+// stackmap is created, and nullptr is returned. For a debug build, a
+// stackmap is always created and returned.
+//
+// The "space reserved before trap" is the space reserved by
+// MacroAssembler::wasmReserveStackChecked, in the case where the frame is
+// "small", as determined by that function.
+bool CreateStackMapForFunctionEntryTrap(const wasm::ArgTypeVector& argTypes,
+ const MachineState& trapExitLayout,
+ size_t trapExitLayoutWords,
+ size_t nBytesReservedBeforeTrap,
+ size_t nInboundStackArgBytes,
+ wasm::StackMap** result) {
+ // Ensure this is defined on all return paths.
+ *result = nullptr;
+
+ // The size of the wasm::Frame itself.
+ const size_t nFrameBytes = sizeof(wasm::Frame);
+
+ // The size of the register dump (trap) area.
+ const size_t trapExitLayoutBytes = trapExitLayoutWords * sizeof(void*);
+
+ // This is the total number of bytes covered by the map.
+ const DebugOnly<size_t> nTotalBytes = trapExitLayoutBytes +
+ nBytesReservedBeforeTrap + nFrameBytes +
+ nInboundStackArgBytes;
+
+ // Create the stackmap initially in this vector. Since most frames will
+ // contain 128 or fewer words, heap allocation is avoided in the majority of
+ // cases. vec[0] is for the lowest address in the map, vec[N-1] is for the
+ // highest address in the map.
+ StackMapBoolVector vec;
+
+ // Keep track of whether we've actually seen any refs.
+ bool hasRefs = false;
+
+ // REG DUMP AREA
+ wasm::ExitStubMapVector trapExitExtras;
+ if (!GenerateStackmapEntriesForTrapExit(
+ argTypes, trapExitLayout, trapExitLayoutWords, &trapExitExtras)) {
+ return false;
+ }
+ MOZ_ASSERT(trapExitExtras.length() == trapExitLayoutWords);
+
+ if (!vec.appendN(false, trapExitLayoutWords)) {
+ return false;
+ }
+ for (size_t i = 0; i < trapExitLayoutWords; i++) {
+ vec[i] = trapExitExtras[i];
+ hasRefs |= vec[i];
+ }
+
+ // SPACE RESERVED BEFORE TRAP
+ MOZ_ASSERT(nBytesReservedBeforeTrap % sizeof(void*) == 0);
+ if (!vec.appendN(false, nBytesReservedBeforeTrap / sizeof(void*))) {
+ return false;
+ }
+
+ // SPACE FOR FRAME
+ if (!vec.appendN(false, nFrameBytes / sizeof(void*))) {
+ return false;
+ }
+
+ // INBOUND ARG AREA
+ MOZ_ASSERT(nInboundStackArgBytes % sizeof(void*) == 0);
+ const size_t numStackArgWords = nInboundStackArgBytes / sizeof(void*);
+
+ const size_t wordsSoFar = vec.length();
+ if (!vec.appendN(false, numStackArgWords)) {
+ return false;
+ }
+
+ for (WasmABIArgIter i(argTypes); !i.done(); i++) {
+ ABIArg argLoc = *i;
+ if (argLoc.kind() == ABIArg::Stack &&
+ argTypes[i.index()] == MIRType::RefOrNull) {
+ uint32_t offset = argLoc.offsetFromArgBase();
+ MOZ_ASSERT(offset < nInboundStackArgBytes);
+ MOZ_ASSERT(offset % sizeof(void*) == 0);
+ vec[wordsSoFar + offset / sizeof(void*)] = true;
+ hasRefs = true;
+ }
+ }
+
+#ifndef DEBUG
+ // We saw no references, and this is a non-debug build, so don't bother
+ // building the stackmap.
+ if (!hasRefs) {
+ return true;
+ }
+#endif
+
+ // Convert vec into a wasm::StackMap.
+ MOZ_ASSERT(vec.length() * sizeof(void*) == nTotalBytes);
+ wasm::StackMap* stackMap = ConvertStackMapBoolVectorToStackMap(vec, hasRefs);
+ if (!stackMap) {
+ return false;
+ }
+ stackMap->setExitStubWords(trapExitLayoutWords);
+
+ stackMap->setFrameOffsetFromTop(nFrameBytes / sizeof(void*) +
+ numStackArgWords);
+#ifdef DEBUG
+ for (uint32_t i = 0; i < nFrameBytes / sizeof(void*); i++) {
+ MOZ_ASSERT(stackMap->getBit(stackMap->numMappedWords -
+ stackMap->frameOffsetFromTop + i) == 0);
+ }
+#endif
+
+ *result = stackMap;
+ return true;
+}
+
+bool GenerateStackmapEntriesForTrapExit(const ArgTypeVector& args,
+ const MachineState& trapExitLayout,
+ const size_t trapExitLayoutNumWords,
+ ExitStubMapVector* extras) {
+ MOZ_ASSERT(extras->empty());
+
+ // If this doesn't hold, we can't distinguish saved and not-saved
+ // registers in the MachineState. See MachineState::MachineState().
+ MOZ_ASSERT(trapExitLayoutNumWords < 0x100);
+
+ if (!extras->appendN(false, trapExitLayoutNumWords)) {
+ return false;
+ }
+
+ for (WasmABIArgIter i(args); !i.done(); i++) {
+ if (!i->argInRegister() || i.mirType() != MIRType::RefOrNull) {
+ continue;
+ }
+
+ size_t offsetFromTop =
+ reinterpret_cast<size_t>(trapExitLayout.address(i->gpr()));
+
+ // If this doesn't hold, the associated register wasn't saved by
+ // the trap exit stub. Better to crash now than much later, in
+ // some obscure place, and possibly with security consequences.
+ MOZ_RELEASE_ASSERT(offsetFromTop < trapExitLayoutNumWords);
+
+ // offsetFromTop is an offset in words down from the highest
+ // address in the exit stub save area. Switch it around to be an
+ // offset up from the bottom of the (integer register) save area.
+ size_t offsetFromBottom = trapExitLayoutNumWords - 1 - offsetFromTop;
+
+ (*extras)[offsetFromBottom] = true;
+ }
+
+ return true;
+}
+
+void EmitWasmPreBarrierGuard(MacroAssembler& masm, Register tls,
+ Register scratch, Register valueAddr,
+ Label* skipBarrier) {
+ // If no incremental GC has started, we don't need the barrier.
+ masm.loadPtr(
+ Address(tls, offsetof(TlsData, addressOfNeedsIncrementalBarrier)),
+ scratch);
+ masm.branchTest32(Assembler::Zero, Address(scratch, 0), Imm32(0x1),
+ skipBarrier);
+
+ // If the previous value is null, we don't need the barrier.
+ masm.loadPtr(Address(valueAddr, 0), scratch);
+ masm.branchTestPtr(Assembler::Zero, scratch, scratch, skipBarrier);
+}
+
+void EmitWasmPreBarrierCall(MacroAssembler& masm, Register tls,
+ Register scratch, Register valueAddr) {
+ MOZ_ASSERT(valueAddr == PreBarrierReg);
+
+ masm.loadPtr(Address(tls, offsetof(TlsData, instance)), scratch);
+ masm.loadPtr(Address(scratch, Instance::offsetOfPreBarrierCode()), scratch);
+#if defined(DEBUG) && defined(JS_CODEGEN_ARM64)
+ // The prebarrier assumes that x28 == sp.
+ Label ok;
+ masm.Cmp(sp, vixl::Operand(x28));
+ masm.B(&ok, Assembler::Equal);
+ masm.breakpoint();
+ masm.bind(&ok);
+#endif
+ masm.call(scratch);
+}
+
+void EmitWasmPostBarrierGuard(MacroAssembler& masm,
+ const Maybe<Register>& object,
+ Register otherScratch, Register setValue,
+ Label* skipBarrier) {
+ // If the pointer being stored is null, no barrier.
+ masm.branchTestPtr(Assembler::Zero, setValue, setValue, skipBarrier);
+
+ // If there is a containing object and it is in the nursery, no barrier.
+ if (object) {
+ masm.branchPtrInNurseryChunk(Assembler::Equal, *object, otherScratch,
+ skipBarrier);
+ }
+
+ // If the pointer being stored is to a tenured object, no barrier.
+ masm.branchPtrInNurseryChunk(Assembler::NotEqual, setValue, otherScratch,
+ skipBarrier);
+}
+
+} // namespace wasm
+} // namespace js
diff --git a/js/src/wasm/WasmGC.h b/js/src/wasm/WasmGC.h
new file mode 100644
index 0000000000..0e13c65854
--- /dev/null
+++ b/js/src/wasm/WasmGC.h
@@ -0,0 +1,406 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2019 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_gc_h
+#define wasm_gc_h
+
+#include "mozilla/BinarySearch.h"
+
+#include "jit/MacroAssembler.h" // For ABIArgIter
+#include "js/AllocPolicy.h"
+#include "js/Vector.h"
+#include "util/Memory.h"
+
+namespace js {
+
+namespace jit {
+class MacroAssembler;
+} // namespace jit
+
+namespace wasm {
+
+using namespace js::jit;
+
+// Definitions for stack maps.
+
+typedef Vector<bool, 32, SystemAllocPolicy> ExitStubMapVector;
+
+struct StackMap final {
+ // A StackMap is a bit-array containing numMappedWords bits, one bit per
+ // word of stack. Bit index zero is for the lowest addressed word in the
+ // range.
+ //
+ // This is a variable-length structure whose size must be known at creation
+ // time.
+ //
+ // Users of the map will know the address of the wasm::Frame that is covered
+ // by this map. In order that they can calculate the exact address range
+ // covered by the map, the map also stores the offset, from the highest
+ // addressed word of the map, of the embedded wasm::Frame. This is an
+ // offset down from the highest address, rather than up from the lowest, so
+ // as to limit its range to 11 bits, where
+ // 11 == ceil(log2(MaxParams * sizeof-biggest-param-type-in-words))
+ //
+ // The map may also cover a ref-typed DebugFrame. If so that can be noted,
+ // since users of the map need to trace pointers in such a DebugFrame.
+ //
+ // Finally, for sanity checking only, for stack maps associated with a wasm
+ // trap exit stub, the number of words used by the trap exit stub save area
+ // is also noted. This is used in Instance::traceFrame to check that the
+ // TrapExitDummyValue is in the expected place in the frame.
+
+ // The total number of stack words covered by the map ..
+ uint32_t numMappedWords : 30;
+
+ // .. of which this many are "exit stub" extras
+ uint32_t numExitStubWords : 6;
+
+ // Where is Frame* relative to the top? This is an offset in words.
+ uint32_t frameOffsetFromTop : 11;
+
+ // Notes the presence of a DebugFrame which may contain GC-managed data.
+ uint32_t hasDebugFrame : 1;
+
+ private:
+ static constexpr uint32_t maxMappedWords = (1 << 30) - 1;
+ static constexpr uint32_t maxExitStubWords = (1 << 6) - 1;
+ static constexpr uint32_t maxFrameOffsetFromTop = (1 << 11) - 1;
+
+ uint32_t bitmap[1];
+
+ explicit StackMap(uint32_t numMappedWords)
+ : numMappedWords(numMappedWords),
+ numExitStubWords(0),
+ frameOffsetFromTop(0),
+ hasDebugFrame(0) {
+ const uint32_t nBitmap = calcNBitmap(numMappedWords);
+ memset(bitmap, 0, nBitmap * sizeof(bitmap[0]));
+ }
+
+ public:
+ static StackMap* create(uint32_t numMappedWords) {
+ uint32_t nBitmap = calcNBitmap(numMappedWords);
+ char* buf =
+ (char*)js_malloc(sizeof(StackMap) + (nBitmap - 1) * sizeof(bitmap[0]));
+ if (!buf) {
+ return nullptr;
+ }
+ return ::new (buf) StackMap(numMappedWords);
+ }
+
+ void destroy() { js_free((char*)this); }
+
+ // Record the number of words in the map used as a wasm trap exit stub
+ // save area. See comment above.
+ void setExitStubWords(uint32_t nWords) {
+ MOZ_ASSERT(numExitStubWords == 0);
+ MOZ_RELEASE_ASSERT(nWords <= maxExitStubWords);
+ MOZ_ASSERT(nWords <= numMappedWords);
+ numExitStubWords = nWords;
+ }
+
+ // Record the offset from the highest-addressed word of the map, that the
+ // wasm::Frame lives at. See comment above.
+ void setFrameOffsetFromTop(uint32_t nWords) {
+ MOZ_ASSERT(frameOffsetFromTop == 0);
+ MOZ_RELEASE_ASSERT(nWords <= maxFrameOffsetFromTop);
+ MOZ_ASSERT(frameOffsetFromTop < numMappedWords);
+ frameOffsetFromTop = nWords;
+ }
+
+ // If the frame described by this StackMap includes a DebugFrame, call here to
+ // record that fact.
+ void setHasDebugFrame() {
+ MOZ_ASSERT(hasDebugFrame == 0);
+ hasDebugFrame = 1;
+ }
+
+ inline void setBit(uint32_t bitIndex) {
+ MOZ_ASSERT(bitIndex < numMappedWords);
+ uint32_t wordIndex = bitIndex / wordsPerBitmapElem;
+ uint32_t wordOffset = bitIndex % wordsPerBitmapElem;
+ bitmap[wordIndex] |= (1 << wordOffset);
+ }
+
+ inline uint32_t getBit(uint32_t bitIndex) const {
+ MOZ_ASSERT(bitIndex < numMappedWords);
+ uint32_t wordIndex = bitIndex / wordsPerBitmapElem;
+ uint32_t wordOffset = bitIndex % wordsPerBitmapElem;
+ return (bitmap[wordIndex] >> wordOffset) & 1;
+ }
+
+ private:
+ static constexpr uint32_t wordsPerBitmapElem = sizeof(bitmap[0]) * 8;
+
+ static uint32_t calcNBitmap(uint32_t numMappedWords) {
+ MOZ_RELEASE_ASSERT(numMappedWords <= maxMappedWords);
+ uint32_t nBitmap =
+ (numMappedWords + wordsPerBitmapElem - 1) / wordsPerBitmapElem;
+ return nBitmap == 0 ? 1 : nBitmap;
+ }
+};
+
+// This is the expected size for a map that covers 32 or fewer words.
+static_assert(sizeof(StackMap) == 12, "wasm::StackMap has unexpected size");
+
+class StackMaps {
+ public:
+ // A Maplet holds a single code-address-to-map binding. Note that the
+ // code address is the lowest address of the instruction immediately
+ // following the instruction of interest, not of the instruction of
+ // interest itself. In practice (at least for the Wasm Baseline compiler)
+ // this means that |nextInsnAddr| points either immediately after a call
+ // instruction, after a trap instruction or after a no-op.
+ struct Maplet {
+ uint8_t* nextInsnAddr;
+ StackMap* map;
+ Maplet(uint8_t* nextInsnAddr, StackMap* map)
+ : nextInsnAddr(nextInsnAddr), map(map) {}
+ void offsetBy(uintptr_t delta) { nextInsnAddr += delta; }
+ bool operator<(const Maplet& other) const {
+ return uintptr_t(nextInsnAddr) < uintptr_t(other.nextInsnAddr);
+ }
+ };
+
+ private:
+ bool sorted_;
+ Vector<Maplet, 0, SystemAllocPolicy> mapping_;
+
+ public:
+ StackMaps() : sorted_(false) {}
+ ~StackMaps() {
+ for (size_t i = 0; i < mapping_.length(); i++) {
+ mapping_[i].map->destroy();
+ mapping_[i].map = nullptr;
+ }
+ }
+ [[nodiscard]] bool add(uint8_t* nextInsnAddr, StackMap* map) {
+ MOZ_ASSERT(!sorted_);
+ return mapping_.append(Maplet(nextInsnAddr, map));
+ }
+ [[nodiscard]] bool add(const Maplet& maplet) {
+ return add(maplet.nextInsnAddr, maplet.map);
+ }
+ void clear() {
+ for (size_t i = 0; i < mapping_.length(); i++) {
+ mapping_[i].nextInsnAddr = nullptr;
+ mapping_[i].map = nullptr;
+ }
+ mapping_.clear();
+ }
+ bool empty() const { return mapping_.empty(); }
+ size_t length() const { return mapping_.length(); }
+ Maplet* getRef(size_t i) { return &mapping_[i]; }
+ Maplet get(size_t i) const { return mapping_[i]; }
+ Maplet move(size_t i) {
+ Maplet m = mapping_[i];
+ mapping_[i].map = nullptr;
+ return m;
+ }
+ void offsetBy(uintptr_t delta) {
+ for (size_t i = 0; i < mapping_.length(); i++) mapping_[i].offsetBy(delta);
+ }
+ void sort() {
+ MOZ_ASSERT(!sorted_);
+ std::sort(mapping_.begin(), mapping_.end());
+ sorted_ = true;
+ }
+ const StackMap* findMap(uint8_t* nextInsnAddr) const {
+ struct Comparator {
+ int operator()(Maplet aVal) const {
+ if (uintptr_t(mTarget) < uintptr_t(aVal.nextInsnAddr)) {
+ return -1;
+ }
+ if (uintptr_t(mTarget) > uintptr_t(aVal.nextInsnAddr)) {
+ return 1;
+ }
+ return 0;
+ }
+ explicit Comparator(uint8_t* aTarget) : mTarget(aTarget) {}
+ const uint8_t* mTarget;
+ };
+
+ size_t result;
+ if (mozilla::BinarySearchIf(mapping_, 0, mapping_.length(),
+ Comparator(nextInsnAddr), &result)) {
+ return mapping_[result].map;
+ }
+
+ return nullptr;
+ }
+};
+
+// Supporting code for creation of stackmaps.
+
+// StackArgAreaSizeUnaligned returns the size, in bytes, of the stack arg area
+// size needed to pass |argTypes|, excluding any alignment padding beyond the
+// size of the area as a whole. The size is as determined by the platforms
+// native ABI.
+//
+// StackArgAreaSizeAligned returns the same, but rounded up to the nearest 16
+// byte boundary.
+//
+// Note, StackArgAreaSize{Unaligned,Aligned}() must process all the arguments
+// in order to take into account all necessary alignment constraints. The
+// signature must include any receiver argument -- in other words, it must be
+// the complete native-ABI-level call signature.
+template <class T>
+static inline size_t StackArgAreaSizeUnaligned(const T& argTypes) {
+ WasmABIArgIter<const T> i(argTypes);
+ while (!i.done()) {
+ i++;
+ }
+ return i.stackBytesConsumedSoFar();
+}
+
+static inline size_t StackArgAreaSizeUnaligned(
+ const SymbolicAddressSignature& saSig) {
+ // WasmABIArgIter::ABIArgIter wants the items to be iterated over to be
+ // presented in some type that has methods length() and operator[]. So we
+ // have to wrap up |saSig|'s array of types in this API-matching class.
+ class MOZ_STACK_CLASS ItemsAndLength {
+ const MIRType* items_;
+ size_t length_;
+
+ public:
+ ItemsAndLength(const MIRType* items, size_t length)
+ : items_(items), length_(length) {}
+ size_t length() const { return length_; }
+ MIRType operator[](size_t i) const { return items_[i]; }
+ };
+
+ // Assert, at least crudely, that we're not accidentally going to run off
+ // the end of the array of types, nor into undefined parts of it, while
+ // iterating.
+ MOZ_ASSERT(saSig.numArgs <
+ sizeof(saSig.argTypes) / sizeof(saSig.argTypes[0]));
+ MOZ_ASSERT(saSig.argTypes[saSig.numArgs] == MIRType::None /*the end marker*/);
+
+ ItemsAndLength itemsAndLength(saSig.argTypes, saSig.numArgs);
+ return StackArgAreaSizeUnaligned(itemsAndLength);
+}
+
+static inline size_t AlignStackArgAreaSize(size_t unalignedSize) {
+ return AlignBytes(unalignedSize, 16u);
+}
+
+template <class T>
+static inline size_t StackArgAreaSizeAligned(const T& argTypes) {
+ return AlignStackArgAreaSize(StackArgAreaSizeUnaligned(argTypes));
+}
+
+// A stackmap creation helper. Create a stackmap from a vector of booleans.
+// The caller owns the resulting stackmap.
+
+typedef Vector<bool, 128, SystemAllocPolicy> StackMapBoolVector;
+
+wasm::StackMap* ConvertStackMapBoolVectorToStackMap(
+ const StackMapBoolVector& vec, bool hasRefs);
+
+// Generate a stackmap for a function's stack-overflow-at-entry trap, with
+// the structure:
+//
+// <reg dump area>
+// | ++ <space reserved before trap, if any>
+// | ++ <space for Frame>
+// | ++ <inbound arg area>
+// | |
+// Lowest Addr Highest Addr
+//
+// The caller owns the resulting stackmap. This assumes a grow-down stack.
+//
+// For non-debug builds, if the stackmap would contain no pointers, no
+// stackmap is created, and nullptr is returned. For a debug build, a
+// stackmap is always created and returned.
+//
+// The "space reserved before trap" is the space reserved by
+// MacroAssembler::wasmReserveStackChecked, in the case where the frame is
+// "small", as determined by that function.
+[[nodiscard]] bool CreateStackMapForFunctionEntryTrap(
+ const ArgTypeVector& argTypes, const MachineState& trapExitLayout,
+ size_t trapExitLayoutWords, size_t nBytesReservedBeforeTrap,
+ size_t nInboundStackArgBytes, wasm::StackMap** result);
+
+// At a resumable wasm trap, the machine's registers are saved on the stack by
+// (code generated by) GenerateTrapExit(). This function writes into |args| a
+// vector of booleans describing the ref-ness of the saved integer registers.
+// |args[0]| corresponds to the low addressed end of the described section of
+// the save area.
+[[nodiscard]] bool GenerateStackmapEntriesForTrapExit(
+ const ArgTypeVector& args, const MachineState& trapExitLayout,
+ const size_t trapExitLayoutNumWords, ExitStubMapVector* extras);
+
+// Shared write barrier code.
+//
+// A barriered store looks like this:
+//
+// Label skipPreBarrier;
+// EmitWasmPreBarrierGuard(..., &skipPreBarrier);
+// <COMPILER-SPECIFIC ACTIONS HERE>
+// EmitWasmPreBarrierCall(...);
+// bind(&skipPreBarrier);
+//
+// <STORE THE VALUE IN MEMORY HERE>
+//
+// Label skipPostBarrier;
+// <COMPILER-SPECIFIC ACTIONS HERE>
+// EmitWasmPostBarrierGuard(..., &skipPostBarrier);
+// <CALL POST-BARRIER HERE IN A COMPILER-SPECIFIC WAY>
+// bind(&skipPostBarrier);
+//
+// The actions are divided up to allow other actions to be placed between them,
+// such as saving and restoring live registers. The postbarrier call invokes
+// C++ and will kill all live registers.
+
+// Before storing a GC pointer value in memory, skip to `skipBarrier` if the
+// prebarrier is not needed. Will clobber `scratch`.
+//
+// It is OK for `tls` and `scratch` to be the same register.
+
+void EmitWasmPreBarrierGuard(MacroAssembler& masm, Register tls,
+ Register scratch, Register valueAddr,
+ Label* skipBarrier);
+
+// Before storing a GC pointer value in memory, call out-of-line prebarrier
+// code. This assumes `PreBarrierReg` contains the address that will be updated.
+// On ARM64 it also assums that x28 (the PseudoStackPointer) has the same value
+// as SP. `PreBarrierReg` is preserved by the barrier function. Will clobber
+// `scratch`.
+//
+// It is OK for `tls` and `scratch` to be the same register.
+
+void EmitWasmPreBarrierCall(MacroAssembler& masm, Register tls,
+ Register scratch, Register valueAddr);
+
+// After storing a GC pointer value in memory, skip to `skipBarrier` if a
+// postbarrier is not needed. If the location being set is in an heap-allocated
+// object then `object` must reference that object; otherwise it should be None.
+// The value that was stored is `setValue`. Will clobber `otherScratch` and
+// will use other available scratch registers.
+//
+// `otherScratch` cannot be a designated scratch register.
+
+void EmitWasmPostBarrierGuard(MacroAssembler& masm,
+ const Maybe<Register>& object,
+ Register otherScratch, Register setValue,
+ Label* skipBarrier);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_gc_h
diff --git a/js/src/wasm/WasmGenerator.cpp b/js/src/wasm/WasmGenerator.cpp
new file mode 100644
index 0000000000..d477e001b7
--- /dev/null
+++ b/js/src/wasm/WasmGenerator.cpp
@@ -0,0 +1,1362 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmGenerator.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/EnumeratedRange.h"
+#include "mozilla/SHA1.h"
+#include "mozilla/Unused.h"
+
+#include <algorithm>
+#include <thread>
+
+#include "util/Memory.h"
+#include "util/Text.h"
+#include "vm/HelperThreadState.h"
+#include "vm/Time.h"
+#include "vm/TraceLogging.h"
+#include "vm/TraceLoggingTypes.h"
+#include "wasm/WasmBaselineCompile.h"
+#include "wasm/WasmCompile.h"
+#include "wasm/WasmCraneliftCompile.h"
+#include "wasm/WasmIonCompile.h"
+#include "wasm/WasmStubs.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::CheckedInt;
+using mozilla::MakeEnumeratedRange;
+using mozilla::Unused;
+
+bool CompiledCode::swap(MacroAssembler& masm) {
+ MOZ_ASSERT(bytes.empty());
+ if (!masm.swapBuffer(bytes)) {
+ return false;
+ }
+
+ callSites.swap(masm.callSites());
+ callSiteTargets.swap(masm.callSiteTargets());
+ trapSites.swap(masm.trapSites());
+ symbolicAccesses.swap(masm.symbolicAccesses());
+ codeLabels.swap(masm.codeLabels());
+ return true;
+}
+
+bool CompiledCode::swapCranelift(MacroAssembler& masm,
+ CraneliftReusableData& data) {
+ if (!swap(masm)) {
+ return false;
+ }
+ std::swap(data, craneliftReusableData);
+ return true;
+}
+
+// ****************************************************************************
+// ModuleGenerator
+
+static const unsigned GENERATOR_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
+static const unsigned COMPILATION_LIFO_DEFAULT_CHUNK_SIZE = 64 * 1024;
+static const uint32_t BAD_CODE_RANGE = UINT32_MAX;
+
+ModuleGenerator::ModuleGenerator(const CompileArgs& args,
+ ModuleEnvironment* moduleEnv,
+ CompilerEnvironment* compilerEnv,
+ const Atomic<bool>* cancelled,
+ UniqueChars* error)
+ : compileArgs_(&args),
+ error_(error),
+ cancelled_(cancelled),
+ moduleEnv_(moduleEnv),
+ compilerEnv_(compilerEnv),
+ linkData_(nullptr),
+ metadataTier_(nullptr),
+ lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE),
+ masmAlloc_(&lifo_),
+ masm_(masmAlloc_, *moduleEnv, /* limitedSize= */ false),
+ debugTrapCodeOffset_(),
+ lastPatchedCallSite_(0),
+ startOfUnpatchedCallsites_(0),
+ parallel_(false),
+ outstanding_(0),
+ currentTask_(nullptr),
+ batchedBytecode_(0),
+ finishedFuncDefs_(false) {
+ MOZ_ASSERT(IsCompilingWasm());
+}
+
+ModuleGenerator::~ModuleGenerator() {
+ MOZ_ASSERT_IF(finishedFuncDefs_, !batchedBytecode_);
+ MOZ_ASSERT_IF(finishedFuncDefs_, !currentTask_);
+
+ if (parallel_) {
+ if (outstanding_) {
+ AutoLockHelperThreadState lock;
+
+ // Remove any pending compilation tasks from the worklist.
+ size_t removed = RemovePendingWasmCompileTasks(taskState_, mode(), lock);
+ MOZ_ASSERT(outstanding_ >= removed);
+ outstanding_ -= removed;
+
+ // Wait until all active compilation tasks have finished.
+ while (true) {
+ MOZ_ASSERT(outstanding_ >= taskState_.finished().length());
+ outstanding_ -= taskState_.finished().length();
+ taskState_.finished().clear();
+
+ MOZ_ASSERT(outstanding_ >= taskState_.numFailed());
+ outstanding_ -= taskState_.numFailed();
+ taskState_.numFailed() = 0;
+
+ if (!outstanding_) {
+ break;
+ }
+
+ taskState_.condVar().wait(lock); /* failed or finished */
+ }
+ }
+ } else {
+ MOZ_ASSERT(!outstanding_);
+ }
+
+ // Propagate error state.
+ if (error_ && !*error_) {
+ AutoLockHelperThreadState lock;
+ *error_ = std::move(taskState_.errorMessage());
+ }
+}
+
+bool ModuleGenerator::allocateGlobalBytes(uint32_t bytes, uint32_t align,
+ uint32_t* globalDataOffset) {
+ CheckedInt<uint32_t> newGlobalDataLength(metadata_->globalDataLength);
+
+ newGlobalDataLength +=
+ ComputeByteAlignment(newGlobalDataLength.value(), align);
+ if (!newGlobalDataLength.isValid()) {
+ return false;
+ }
+
+ *globalDataOffset = newGlobalDataLength.value();
+ newGlobalDataLength += bytes;
+
+ if (!newGlobalDataLength.isValid()) {
+ return false;
+ }
+
+ metadata_->globalDataLength = newGlobalDataLength.value();
+ return true;
+}
+
+bool ModuleGenerator::init(Metadata* maybeAsmJSMetadata,
+ JSTelemetrySender telemetrySender) {
+ // Perform fallible metadata, linkdata, assumption allocations.
+
+ telemetrySender_ = telemetrySender;
+
+ MOZ_ASSERT(isAsmJS() == !!maybeAsmJSMetadata);
+ if (maybeAsmJSMetadata) {
+ metadata_ = maybeAsmJSMetadata;
+ } else {
+ metadata_ = js_new<Metadata>();
+ if (!metadata_) {
+ return false;
+ }
+ }
+
+ if (compileArgs_->scriptedCaller.filename) {
+ metadata_->filename =
+ DuplicateString(compileArgs_->scriptedCaller.filename.get());
+ if (!metadata_->filename) {
+ return false;
+ }
+
+ metadata_->filenameIsURL = compileArgs_->scriptedCaller.filenameIsURL;
+ } else {
+ MOZ_ASSERT(!compileArgs_->scriptedCaller.filenameIsURL);
+ }
+
+ if (compileArgs_->sourceMapURL) {
+ metadata_->sourceMapURL = DuplicateString(compileArgs_->sourceMapURL.get());
+ if (!metadata_->sourceMapURL) {
+ return false;
+ }
+ }
+
+ linkData_ = js::MakeUnique<LinkData>(tier());
+ if (!linkData_) {
+ return false;
+ }
+
+ metadataTier_ = js::MakeUnique<MetadataTier>(tier());
+ if (!metadataTier_) {
+ return false;
+ }
+
+ // funcToCodeRange maps function indices to code-range indices and all
+ // elements will be initialized by the time module generation is finished.
+
+ if (!metadataTier_->funcToCodeRange.appendN(BAD_CODE_RANGE,
+ moduleEnv_->funcs.length())) {
+ return false;
+ }
+
+ // Pre-reserve space for large Vectors to avoid the significant cost of the
+ // final reallocs. In particular, the MacroAssembler can be enormous, so be
+ // extra conservative. Since large over-reservations may fail when the
+ // actual allocations will succeed, ignore OOM failures. Note,
+ // shrinkStorageToFit calls at the end will trim off unneeded capacity.
+
+ size_t codeSectionSize =
+ moduleEnv_->codeSection ? moduleEnv_->codeSection->size : 0;
+
+ size_t estimatedCodeSize =
+ 1.2 * EstimateCompiledCodeSize(tier(), codeSectionSize);
+ Unused << masm_.reserve(std::min(estimatedCodeSize, MaxCodeBytesPerProcess));
+
+ Unused << metadataTier_->codeRanges.reserve(2 * moduleEnv_->numFuncDefs());
+
+ const size_t ByteCodesPerCallSite = 50;
+ Unused << metadataTier_->callSites.reserve(codeSectionSize /
+ ByteCodesPerCallSite);
+
+ const size_t ByteCodesPerOOBTrap = 10;
+ Unused << metadataTier_->trapSites[Trap::OutOfBounds].reserve(
+ codeSectionSize / ByteCodesPerOOBTrap);
+
+ // Allocate space in TlsData for declarations that need it.
+
+ MOZ_ASSERT(metadata_->globalDataLength == 0);
+
+ for (size_t i = 0; i < moduleEnv_->funcImportGlobalDataOffsets.length();
+ i++) {
+ uint32_t globalDataOffset;
+ if (!allocateGlobalBytes(sizeof(FuncImportTls), sizeof(void*),
+ &globalDataOffset)) {
+ return false;
+ }
+
+ moduleEnv_->funcImportGlobalDataOffsets[i] = globalDataOffset;
+
+ FuncType copy;
+ if (!copy.clone(*moduleEnv_->funcs[i].type)) {
+ return false;
+ }
+ if (!metadataTier_->funcImports.emplaceBack(std::move(copy),
+ globalDataOffset)) {
+ return false;
+ }
+ }
+
+ for (TableDesc& table : moduleEnv_->tables) {
+ if (!allocateGlobalBytes(sizeof(TableTls), sizeof(void*),
+ &table.globalDataOffset)) {
+ return false;
+ }
+ }
+
+ if (!isAsmJS()) {
+ // Copy type definitions to metadata that are required at runtime,
+ // allocating global data so that codegen can find the type id's at
+ // runtime.
+ for (uint32_t typeIndex = 0; typeIndex < moduleEnv_->types.length();
+ typeIndex++) {
+ const TypeDef& typeDef = moduleEnv_->types[typeIndex];
+ TypeIdDesc& typeId = moduleEnv_->typeIds[typeIndex];
+
+ if (TypeIdDesc::isGlobal(typeDef)) {
+ uint32_t globalDataOffset;
+ if (!allocateGlobalBytes(sizeof(void*), sizeof(void*),
+ &globalDataOffset)) {
+ return false;
+ }
+
+ typeId = TypeIdDesc::global(typeDef, globalDataOffset);
+
+ TypeDef copy;
+ if (!copy.clone(typeDef)) {
+ return false;
+ }
+
+ if (!metadata_->types.emplaceBack(std::move(copy), typeId)) {
+ return false;
+ }
+ } else {
+ typeId = TypeIdDesc::immediate(typeDef);
+ }
+ }
+
+ // If we allow type indices, then we need to rewrite the index space to
+ // account for types that are omitted from metadata, such as function
+ // types that fit in an immediate.
+ if (moduleEnv_->functionReferencesEnabled()) {
+ // Do a linear pass to create a map from src index to dest index.
+ RenumberMap map;
+ for (uint32_t srcIndex = 0, destIndex = 0;
+ srcIndex < moduleEnv_->types.length(); srcIndex++) {
+ const TypeDef& typeDef = moduleEnv_->types[srcIndex];
+ if (!TypeIdDesc::isGlobal(typeDef)) {
+ continue;
+ }
+ if (!map.put(srcIndex, destIndex++)) {
+ return false;
+ }
+ }
+
+ // Apply the map
+ for (TypeDefWithId& typeDef : metadata_->types) {
+ typeDef.renumber(map);
+ }
+ }
+ }
+
+ for (GlobalDesc& global : moduleEnv_->globals) {
+ if (global.isConstant()) {
+ continue;
+ }
+
+ uint32_t width =
+ global.isIndirect() ? sizeof(void*) : SizeOf(global.type());
+
+ uint32_t globalDataOffset;
+ if (!allocateGlobalBytes(width, width, &globalDataOffset)) {
+ return false;
+ }
+
+ global.setOffset(globalDataOffset);
+ }
+
+ // Accumulate all exported functions:
+ // - explicitly marked as such;
+ // - implicitly exported by being an element of function tables;
+ // - implicitly exported by being the start function;
+ // The FuncExportVector stored in Metadata needs to be sorted (to allow
+ // O(log(n)) lookup at runtime) and deduplicated. Use a vector with invalid
+ // entries for every single function, that we'll fill as we go through the
+ // exports, and in which we'll remove invalid entries after the fact.
+
+ static_assert(((uint64_t(MaxFuncs) << 1) | 1) < uint64_t(UINT32_MAX),
+ "bit packing won't work in ExportedFunc");
+
+ class ExportedFunc {
+ uint32_t value;
+
+ public:
+ ExportedFunc() : value(UINT32_MAX) {}
+ ExportedFunc(uint32_t index, bool isExplicit)
+ : value((index << 1) | (isExplicit ? 1 : 0)) {}
+ uint32_t index() const { return value >> 1; }
+ bool isExplicit() const { return value & 0x1; }
+ bool operator<(const ExportedFunc& other) const {
+ return index() < other.index();
+ }
+ bool operator==(const ExportedFunc& other) const {
+ return index() == other.index();
+ }
+ bool isInvalid() const { return value == UINT32_MAX; }
+ void mergeExplicit(bool explicitBit) {
+ if (!isExplicit() && explicitBit) {
+ value |= 0x1;
+ }
+ }
+ };
+
+ Vector<ExportedFunc, 8, SystemAllocPolicy> exportedFuncs;
+ if (!exportedFuncs.resize(moduleEnv_->numFuncs())) {
+ return false;
+ }
+
+ auto addOrMerge = [&exportedFuncs](ExportedFunc newEntry) {
+ uint32_t index = newEntry.index();
+ if (exportedFuncs[index].isInvalid()) {
+ exportedFuncs[index] = newEntry;
+ } else {
+ exportedFuncs[index].mergeExplicit(newEntry.isExplicit());
+ }
+ };
+
+ for (const Export& exp : moduleEnv_->exports) {
+ if (exp.kind() == DefinitionKind::Function) {
+ addOrMerge(ExportedFunc(exp.funcIndex(), true));
+ }
+ }
+
+ if (moduleEnv_->startFuncIndex) {
+ addOrMerge(ExportedFunc(*moduleEnv_->startFuncIndex, true));
+ }
+
+ for (const ElemSegment* seg : moduleEnv_->elemSegments) {
+ // For now, the segments always carry function indices regardless of the
+ // segment's declared element type; this works because the only legal
+ // element types are funcref and externref and the only legal values are
+ // functions and null. We always add functions in segments as exported
+ // functions, regardless of the segment's type. In the future, if we make
+ // the representation of AnyRef segments different, we will have to consider
+ // function values in those segments specially.
+ bool isAsmJS = seg->active() && moduleEnv_->tables[seg->tableIndex].isAsmJS;
+ if (!isAsmJS) {
+ for (uint32_t funcIndex : seg->elemFuncIndices) {
+ if (funcIndex != NullFuncIndex) {
+ addOrMerge(ExportedFunc(funcIndex, false));
+ }
+ }
+ }
+ }
+
+ for (const GlobalDesc& global : moduleEnv_->globals) {
+ if (global.isVariable() &&
+ global.initExpr().kind() == InitExpr::Kind::RefFunc) {
+ addOrMerge(ExportedFunc(global.initExpr().refFuncIndex(), false));
+ }
+ }
+
+ auto* newEnd =
+ std::remove_if(exportedFuncs.begin(), exportedFuncs.end(),
+ [](const ExportedFunc& exp) { return exp.isInvalid(); });
+ exportedFuncs.erase(newEnd, exportedFuncs.end());
+
+ if (!metadataTier_->funcExports.reserve(exportedFuncs.length())) {
+ return false;
+ }
+
+ for (const ExportedFunc& funcIndex : exportedFuncs) {
+ FuncType funcType;
+ if (!funcType.clone(*moduleEnv_->funcs[funcIndex.index()].type)) {
+ return false;
+ }
+ metadataTier_->funcExports.infallibleEmplaceBack(
+ std::move(funcType), funcIndex.index(), funcIndex.isExplicit());
+ }
+
+ // Determine whether parallel or sequential compilation is to be used and
+ // initialize the CompileTasks that will be used in either mode.
+
+ GlobalHelperThreadState& threads = HelperThreadState();
+ MOZ_ASSERT(threads.threadCount > 1);
+
+ uint32_t numTasks;
+ if (CanUseExtraThreads() && threads.cpuCount > 1) {
+ parallel_ = true;
+ numTasks = 2 * threads.maxWasmCompilationThreads();
+ } else {
+ numTasks = 1;
+ }
+
+ if (!tasks_.initCapacity(numTasks)) {
+ return false;
+ }
+ for (size_t i = 0; i < numTasks; i++) {
+ tasks_.infallibleEmplaceBack(*moduleEnv_, *compilerEnv_, taskState_,
+ COMPILATION_LIFO_DEFAULT_CHUNK_SIZE,
+ telemetrySender);
+ }
+
+ if (!freeTasks_.reserve(numTasks)) {
+ return false;
+ }
+ for (size_t i = 0; i < numTasks; i++) {
+ freeTasks_.infallibleAppend(&tasks_[i]);
+ }
+
+ // Fill in function stubs for each import so that imported functions can be
+ // used in all the places that normal function definitions can (table
+ // elements, export calls, etc).
+
+ CompiledCode& importCode = tasks_[0].output;
+ MOZ_ASSERT(importCode.empty());
+
+ if (!GenerateImportFunctions(*moduleEnv_, metadataTier_->funcImports,
+ &importCode)) {
+ return false;
+ }
+
+ if (!linkCompiledCode(importCode)) {
+ return false;
+ }
+
+ importCode.clear();
+ return true;
+}
+
+bool ModuleGenerator::funcIsCompiled(uint32_t funcIndex) const {
+ return metadataTier_->funcToCodeRange[funcIndex] != BAD_CODE_RANGE;
+}
+
+const CodeRange& ModuleGenerator::funcCodeRange(uint32_t funcIndex) const {
+ MOZ_ASSERT(funcIsCompiled(funcIndex));
+ const CodeRange& cr =
+ metadataTier_->codeRanges[metadataTier_->funcToCodeRange[funcIndex]];
+ MOZ_ASSERT(cr.isFunction());
+ return cr;
+}
+
+static bool InRange(uint32_t caller, uint32_t callee) {
+ // We assume JumpImmediateRange is defined conservatively enough that the
+ // slight difference between 'caller' (which is really the return address
+ // offset) and the actual base of the relative displacement computation
+ // isn't significant.
+ uint32_t range = std::min(JitOptions.jumpThreshold, JumpImmediateRange);
+ if (caller < callee) {
+ return callee - caller < range;
+ }
+ return caller - callee < range;
+}
+
+typedef HashMap<uint32_t, uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy>
+ OffsetMap;
+typedef EnumeratedArray<Trap, Trap::Limit, Maybe<uint32_t>>
+ TrapMaybeOffsetArray;
+
+bool ModuleGenerator::linkCallSites() {
+ masm_.haltingAlign(CodeAlignment);
+
+ // Create far jumps for calls that have relative offsets that may otherwise
+ // go out of range. This method is called both between function bodies (at a
+ // frequency determined by the ISA's jump range) and once at the very end of
+ // a module's codegen after all possible calls/traps have been emitted.
+
+ OffsetMap existingCallFarJumps;
+ for (; lastPatchedCallSite_ < metadataTier_->callSites.length();
+ lastPatchedCallSite_++) {
+ const CallSite& callSite = metadataTier_->callSites[lastPatchedCallSite_];
+ const CallSiteTarget& target = callSiteTargets_[lastPatchedCallSite_];
+ uint32_t callerOffset = callSite.returnAddressOffset();
+ switch (callSite.kind()) {
+ case CallSiteDesc::Dynamic:
+ case CallSiteDesc::Symbolic:
+ break;
+ case CallSiteDesc::Func: {
+ if (funcIsCompiled(target.funcIndex())) {
+ uint32_t calleeOffset =
+ funcCodeRange(target.funcIndex()).funcUncheckedCallEntry();
+ if (InRange(callerOffset, calleeOffset)) {
+ masm_.patchCall(callerOffset, calleeOffset);
+ break;
+ }
+ }
+
+ OffsetMap::AddPtr p =
+ existingCallFarJumps.lookupForAdd(target.funcIndex());
+ if (!p) {
+ Offsets offsets;
+ offsets.begin = masm_.currentOffset();
+ if (!callFarJumps_.emplaceBack(target.funcIndex(),
+ masm_.farJumpWithPatch())) {
+ return false;
+ }
+ offsets.end = masm_.currentOffset();
+ if (masm_.oom()) {
+ return false;
+ }
+ if (!metadataTier_->codeRanges.emplaceBack(CodeRange::FarJumpIsland,
+ offsets)) {
+ return false;
+ }
+ if (!existingCallFarJumps.add(p, target.funcIndex(), offsets.begin)) {
+ return false;
+ }
+ }
+
+ masm_.patchCall(callerOffset, p->value());
+ break;
+ }
+ case CallSiteDesc::Breakpoint:
+ case CallSiteDesc::EnterFrame:
+ case CallSiteDesc::LeaveFrame: {
+ Uint32Vector& jumps = metadataTier_->debugTrapFarJumpOffsets;
+ if (jumps.empty() || !InRange(jumps.back(), callerOffset)) {
+ Offsets offsets;
+ offsets.begin = masm_.currentOffset();
+ CodeOffset jumpOffset = masm_.farJumpWithPatch();
+ offsets.end = masm_.currentOffset();
+ if (masm_.oom()) {
+ return false;
+ }
+ if (!metadataTier_->codeRanges.emplaceBack(CodeRange::FarJumpIsland,
+ offsets)) {
+ return false;
+ }
+ if (!debugTrapFarJumps_.emplaceBack(jumpOffset)) {
+ return false;
+ }
+ if (!jumps.emplaceBack(offsets.begin)) {
+ return false;
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ masm_.flushBuffer();
+ return !masm_.oom();
+}
+
+void ModuleGenerator::noteCodeRange(uint32_t codeRangeIndex,
+ const CodeRange& codeRange) {
+ switch (codeRange.kind()) {
+ case CodeRange::Function:
+ MOZ_ASSERT(metadataTier_->funcToCodeRange[codeRange.funcIndex()] ==
+ BAD_CODE_RANGE);
+ metadataTier_->funcToCodeRange[codeRange.funcIndex()] = codeRangeIndex;
+ break;
+ case CodeRange::InterpEntry:
+ metadataTier_->lookupFuncExport(codeRange.funcIndex())
+ .initEagerInterpEntryOffset(codeRange.begin());
+ break;
+ case CodeRange::JitEntry:
+ // Nothing to do: jit entries are linked in the jump tables.
+ break;
+ case CodeRange::ImportJitExit:
+ metadataTier_->funcImports[codeRange.funcIndex()].initJitExitOffset(
+ codeRange.begin());
+ break;
+ case CodeRange::ImportInterpExit:
+ metadataTier_->funcImports[codeRange.funcIndex()].initInterpExitOffset(
+ codeRange.begin());
+ break;
+ case CodeRange::DebugTrap:
+ MOZ_ASSERT(!debugTrapCodeOffset_);
+ debugTrapCodeOffset_ = codeRange.begin();
+ break;
+ case CodeRange::TrapExit:
+ MOZ_ASSERT(!linkData_->trapOffset);
+ linkData_->trapOffset = codeRange.begin();
+ break;
+ case CodeRange::Throw:
+ // Jumped to by other stubs, so nothing to do.
+ break;
+ case CodeRange::FarJumpIsland:
+ case CodeRange::BuiltinThunk:
+ MOZ_CRASH("Unexpected CodeRange kind");
+ }
+}
+
+template <class Vec, class Op>
+static bool AppendForEach(Vec* dstVec, const Vec& srcVec, Op op) {
+ if (!dstVec->growByUninitialized(srcVec.length())) {
+ return false;
+ }
+
+ using T = typename Vec::ElementType;
+
+ const T* src = srcVec.begin();
+
+ T* dstBegin = dstVec->begin();
+ T* dstEnd = dstVec->end();
+ T* dstStart = dstEnd - srcVec.length();
+
+ for (T* dst = dstStart; dst != dstEnd; dst++, src++) {
+ new (dst) T(*src);
+ op(dst - dstBegin, dst);
+ }
+
+ return true;
+}
+
+bool ModuleGenerator::linkCompiledCode(CompiledCode& code) {
+ // Before merging in new code, if calls in a prior code range might go out of
+ // range, insert far jumps to extend the range.
+
+ if (!InRange(startOfUnpatchedCallsites_,
+ masm_.size() + code.bytes.length())) {
+ startOfUnpatchedCallsites_ = masm_.size();
+ if (!linkCallSites()) {
+ return false;
+ }
+ }
+
+ // All code offsets in 'code' must be incremented by their position in the
+ // overall module when the code was appended.
+
+ masm_.haltingAlign(CodeAlignment);
+ const size_t offsetInModule = masm_.size();
+ if (!masm_.appendRawCode(code.bytes.begin(), code.bytes.length())) {
+ return false;
+ }
+
+ auto codeRangeOp = [=](uint32_t codeRangeIndex, CodeRange* codeRange) {
+ codeRange->offsetBy(offsetInModule);
+ noteCodeRange(codeRangeIndex, *codeRange);
+ };
+ if (!AppendForEach(&metadataTier_->codeRanges, code.codeRanges,
+ codeRangeOp)) {
+ return false;
+ }
+
+ auto callSiteOp = [=](uint32_t, CallSite* cs) {
+ cs->offsetBy(offsetInModule);
+ };
+ if (!AppendForEach(&metadataTier_->callSites, code.callSites, callSiteOp)) {
+ return false;
+ }
+
+ if (!callSiteTargets_.appendAll(code.callSiteTargets)) {
+ return false;
+ }
+
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ auto trapSiteOp = [=](uint32_t, TrapSite* ts) {
+ ts->offsetBy(offsetInModule);
+ };
+ if (!AppendForEach(&metadataTier_->trapSites[trap], code.trapSites[trap],
+ trapSiteOp)) {
+ return false;
+ }
+ }
+
+ for (const SymbolicAccess& access : code.symbolicAccesses) {
+ uint32_t patchAt = offsetInModule + access.patchAt.offset();
+ if (!linkData_->symbolicLinks[access.target].append(patchAt)) {
+ return false;
+ }
+ }
+
+ for (const CodeLabel& codeLabel : code.codeLabels) {
+ LinkData::InternalLink link;
+ link.patchAtOffset = offsetInModule + codeLabel.patchAt().offset();
+ link.targetOffset = offsetInModule + codeLabel.target().offset();
+#ifdef JS_CODELABEL_LINKMODE
+ link.mode = codeLabel.linkMode();
+#endif
+ if (!linkData_->internalLinks.append(link)) {
+ return false;
+ }
+ }
+
+ for (size_t i = 0; i < code.stackMaps.length(); i++) {
+ StackMaps::Maplet maplet = code.stackMaps.move(i);
+ maplet.offsetBy(offsetInModule);
+ if (!metadataTier_->stackMaps.add(maplet)) {
+ // This function is now the only owner of maplet.map, so we'd better
+ // free it right now.
+ maplet.map->destroy();
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool ExecuteCompileTask(CompileTask* task, UniqueChars* error) {
+ MOZ_ASSERT(task->lifo.isEmpty());
+ MOZ_ASSERT(task->output.empty());
+
+#ifdef ENABLE_SPIDERMONKEY_TELEMETRY
+ int64_t startTime = PRMJ_Now();
+ int compileTimeTelemetryID;
+#endif
+
+ switch (task->compilerEnv.tier()) {
+ case Tier::Optimized:
+ switch (task->compilerEnv.optimizedBackend()) {
+ case OptimizedBackend::Cranelift:
+ if (!CraneliftCompileFunctions(task->moduleEnv, task->compilerEnv,
+ task->lifo, task->inputs,
+ &task->output, error)) {
+ return false;
+ }
+#ifdef ENABLE_SPIDERMONKEY_TELEMETRY
+ compileTimeTelemetryID = JS_TELEMETRY_WASM_COMPILE_TIME_CRANELIFT_US;
+#endif
+ break;
+ case OptimizedBackend::Ion:
+ if (!IonCompileFunctions(task->moduleEnv, task->compilerEnv,
+ task->lifo, task->inputs, &task->output,
+ error)) {
+ return false;
+ }
+#ifdef ENABLE_SPIDERMONKEY_TELEMETRY
+ compileTimeTelemetryID = JS_TELEMETRY_WASM_COMPILE_TIME_ION_US;
+#endif
+ break;
+ }
+ break;
+ case Tier::Baseline:
+ if (!BaselineCompileFunctions(task->moduleEnv, task->compilerEnv,
+ task->lifo, task->inputs, &task->output,
+ error)) {
+ return false;
+ }
+#ifdef ENABLE_SPIDERMONKEY_TELEMETRY
+ compileTimeTelemetryID = JS_TELEMETRY_WASM_COMPILE_TIME_BASELINE_US;
+#endif
+ break;
+ }
+
+#ifdef ENABLE_SPIDERMONKEY_TELEMETRY
+ int64_t endTime = PRMJ_Now();
+ int64_t compileTimeMicros = endTime - startTime;
+
+ task->telemetrySender.addTelemetry(compileTimeTelemetryID, compileTimeMicros);
+#endif
+
+ MOZ_ASSERT(task->lifo.isEmpty());
+ MOZ_ASSERT(task->inputs.length() == task->output.codeRanges.length());
+ task->inputs.clear();
+ return true;
+}
+
+void CompileTask::runHelperThreadTask(AutoLockHelperThreadState& lock) {
+ TraceLoggerThread* logger = TraceLoggerForCurrentThread();
+ AutoTraceLog logCompile(logger, TraceLogger_WasmCompilation);
+
+ UniqueChars error;
+ bool ok;
+
+ {
+ AutoUnlockHelperThreadState unlock(lock);
+ ok = ExecuteCompileTask(this, &error);
+ }
+
+ // Don't release the lock between updating our state and returning from this
+ // method.
+
+ if (!ok || !state.finished().append(this)) {
+ state.numFailed()++;
+ if (!state.errorMessage()) {
+ state.errorMessage() = std::move(error);
+ }
+ }
+
+ state.condVar().notify_one(); /* failed or finished */
+}
+
+bool ModuleGenerator::locallyCompileCurrentTask() {
+ if (!ExecuteCompileTask(currentTask_, error_)) {
+ return false;
+ }
+ if (!finishTask(currentTask_)) {
+ return false;
+ }
+ currentTask_ = nullptr;
+ batchedBytecode_ = 0;
+ return true;
+}
+
+bool ModuleGenerator::finishTask(CompileTask* task) {
+ masm_.haltingAlign(CodeAlignment);
+
+ if (!linkCompiledCode(task->output)) {
+ return false;
+ }
+
+ task->output.clear();
+
+ MOZ_ASSERT(task->inputs.empty());
+ MOZ_ASSERT(task->output.empty());
+ MOZ_ASSERT(task->lifo.isEmpty());
+ freeTasks_.infallibleAppend(task);
+ return true;
+}
+
+bool ModuleGenerator::launchBatchCompile() {
+ MOZ_ASSERT(currentTask_);
+
+ if (cancelled_ && *cancelled_) {
+ return false;
+ }
+
+ if (!parallel_) {
+ return locallyCompileCurrentTask();
+ }
+
+ if (!StartOffThreadWasmCompile(currentTask_, mode())) {
+ return false;
+ }
+ outstanding_++;
+ currentTask_ = nullptr;
+ batchedBytecode_ = 0;
+ return true;
+}
+
+bool ModuleGenerator::finishOutstandingTask() {
+ MOZ_ASSERT(parallel_);
+
+ CompileTask* task = nullptr;
+ {
+ AutoLockHelperThreadState lock;
+ while (true) {
+ MOZ_ASSERT(outstanding_ > 0);
+
+ if (taskState_.numFailed() > 0) {
+ return false;
+ }
+
+ if (!taskState_.finished().empty()) {
+ outstanding_--;
+ task = taskState_.finished().popCopy();
+ break;
+ }
+
+ taskState_.condVar().wait(lock); /* failed or finished */
+ }
+ }
+
+ // Call outside of the compilation lock.
+ return finishTask(task);
+}
+
+bool ModuleGenerator::compileFuncDef(uint32_t funcIndex,
+ uint32_t lineOrBytecode,
+ const uint8_t* begin, const uint8_t* end,
+ Uint32Vector&& lineNums) {
+ MOZ_ASSERT(!finishedFuncDefs_);
+ MOZ_ASSERT(funcIndex < moduleEnv_->numFuncs());
+
+ uint32_t threshold;
+ switch (tier()) {
+ case Tier::Baseline:
+ threshold = JitOptions.wasmBatchBaselineThreshold;
+ break;
+ case Tier::Optimized:
+ switch (compilerEnv_->optimizedBackend()) {
+ case OptimizedBackend::Ion:
+ threshold = JitOptions.wasmBatchIonThreshold;
+ break;
+ case OptimizedBackend::Cranelift:
+ threshold = JitOptions.wasmBatchCraneliftThreshold;
+ break;
+ default:
+ MOZ_CRASH("Invalid optimizedBackend value");
+ }
+ break;
+ default:
+ MOZ_CRASH("Invalid tier value");
+ break;
+ }
+
+ uint32_t funcBytecodeLength = end - begin;
+
+ // Do not go over the threshold if we can avoid it: spin off the compilation
+ // before appending the function if we would go over. (Very large single
+ // functions may still exceed the threshold but this is fine; it'll be very
+ // uncommon and is in any case safely handled by the MacroAssembler's buffer
+ // limit logic.)
+
+ if (currentTask_ && currentTask_->inputs.length() &&
+ batchedBytecode_ + funcBytecodeLength > threshold) {
+ if (!launchBatchCompile()) {
+ return false;
+ }
+ }
+
+ if (!currentTask_) {
+ if (freeTasks_.empty() && !finishOutstandingTask()) {
+ return false;
+ }
+ currentTask_ = freeTasks_.popCopy();
+ }
+
+ if (!currentTask_->inputs.emplaceBack(funcIndex, lineOrBytecode, begin, end,
+ std::move(lineNums))) {
+ return false;
+ }
+
+ batchedBytecode_ += funcBytecodeLength;
+ MOZ_ASSERT(batchedBytecode_ <= MaxCodeSectionBytes);
+ return true;
+}
+
+bool ModuleGenerator::finishFuncDefs() {
+ MOZ_ASSERT(!finishedFuncDefs_);
+
+ if (currentTask_ && !locallyCompileCurrentTask()) {
+ return false;
+ }
+
+ finishedFuncDefs_ = true;
+ return true;
+}
+
+bool ModuleGenerator::finishCodegen() {
+ // Now that all functions and stubs are generated and their CodeRanges
+ // known, patch all calls (which can emit far jumps) and far jumps. Linking
+ // can emit tiny far-jump stubs, so there is an ordering dependency here.
+
+ if (!linkCallSites()) {
+ return false;
+ }
+
+ for (CallFarJump far : callFarJumps_) {
+ masm_.patchFarJump(far.jump,
+ funcCodeRange(far.funcIndex).funcUncheckedCallEntry());
+ }
+
+ for (CodeOffset farJump : debugTrapFarJumps_) {
+ masm_.patchFarJump(farJump, debugTrapCodeOffset_);
+ }
+
+ // None of the linking or far-jump operations should emit masm metadata.
+
+ MOZ_ASSERT(masm_.callSites().empty());
+ MOZ_ASSERT(masm_.callSiteTargets().empty());
+ MOZ_ASSERT(masm_.trapSites().empty());
+ MOZ_ASSERT(masm_.symbolicAccesses().empty());
+ MOZ_ASSERT(masm_.codeLabels().empty());
+
+ masm_.finish();
+ return !masm_.oom();
+}
+
+bool ModuleGenerator::finishMetadataTier() {
+ // The stack maps aren't yet sorted. Do so now, since we'll need to
+ // binary-search them at GC time.
+ metadataTier_->stackMaps.sort();
+
+#ifdef DEBUG
+ // Check that the stack map contains no duplicates, since that could lead to
+ // ambiguities about stack slot pointerness.
+ uint8_t* previousNextInsnAddr = nullptr;
+ for (size_t i = 0; i < metadataTier_->stackMaps.length(); i++) {
+ const StackMaps::Maplet& maplet = metadataTier_->stackMaps.get(i);
+ MOZ_ASSERT_IF(i > 0, uintptr_t(maplet.nextInsnAddr) >
+ uintptr_t(previousNextInsnAddr));
+ previousNextInsnAddr = maplet.nextInsnAddr;
+ }
+
+ // Assert all sorted metadata is sorted.
+ uint32_t last = 0;
+ for (const CodeRange& codeRange : metadataTier_->codeRanges) {
+ MOZ_ASSERT(codeRange.begin() >= last);
+ last = codeRange.end();
+ }
+
+ last = 0;
+ for (const CallSite& callSite : metadataTier_->callSites) {
+ MOZ_ASSERT(callSite.returnAddressOffset() >= last);
+ last = callSite.returnAddressOffset();
+ }
+
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ last = 0;
+ for (const TrapSite& trapSite : metadataTier_->trapSites[trap]) {
+ MOZ_ASSERT(trapSite.pcOffset >= last);
+ last = trapSite.pcOffset;
+ }
+ }
+
+ last = 0;
+ for (uint32_t debugTrapFarJumpOffset :
+ metadataTier_->debugTrapFarJumpOffsets) {
+ MOZ_ASSERT(debugTrapFarJumpOffset >= last);
+ last = debugTrapFarJumpOffset;
+ }
+#endif
+
+ // These Vectors can get large and the excess capacity can be significant,
+ // so realloc them down to size.
+
+ metadataTier_->funcToCodeRange.shrinkStorageToFit();
+ metadataTier_->codeRanges.shrinkStorageToFit();
+ metadataTier_->callSites.shrinkStorageToFit();
+ metadataTier_->trapSites.shrinkStorageToFit();
+ metadataTier_->debugTrapFarJumpOffsets.shrinkStorageToFit();
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ metadataTier_->trapSites[trap].shrinkStorageToFit();
+ }
+
+ return true;
+}
+
+UniqueCodeTier ModuleGenerator::finishCodeTier() {
+ MOZ_ASSERT(finishedFuncDefs_);
+
+ while (outstanding_ > 0) {
+ if (!finishOutstandingTask()) {
+ return nullptr;
+ }
+ }
+
+#ifdef DEBUG
+ for (uint32_t codeRangeIndex : metadataTier_->funcToCodeRange) {
+ MOZ_ASSERT(codeRangeIndex != BAD_CODE_RANGE);
+ }
+#endif
+
+ // Now that all imports/exports are known, we can generate a special
+ // CompiledCode containing stubs.
+
+ CompiledCode& stubCode = tasks_[0].output;
+ MOZ_ASSERT(stubCode.empty());
+
+ if (!GenerateStubs(*moduleEnv_, metadataTier_->funcImports,
+ metadataTier_->funcExports, &stubCode)) {
+ return nullptr;
+ }
+
+ if (!linkCompiledCode(stubCode)) {
+ return nullptr;
+ }
+
+ // Finish linking and metadata.
+
+ if (!finishCodegen()) {
+ return nullptr;
+ }
+
+ if (!finishMetadataTier()) {
+ return nullptr;
+ }
+
+ UniqueModuleSegment segment =
+ ModuleSegment::create(tier(), masm_, *linkData_);
+ if (!segment) {
+ return nullptr;
+ }
+
+ metadataTier_->stackMaps.offsetBy(uintptr_t(segment->base()));
+
+#ifdef DEBUG
+ // Check that each stack map is associated with a plausible instruction.
+ for (size_t i = 0; i < metadataTier_->stackMaps.length(); i++) {
+ MOZ_ASSERT(IsValidStackMapKey(compilerEnv_->debugEnabled(),
+ metadataTier_->stackMaps.get(i).nextInsnAddr),
+ "wasm stack map does not reference a valid insn");
+ }
+#endif
+
+ return js::MakeUnique<CodeTier>(std::move(metadataTier_), std::move(segment));
+}
+
+SharedMetadata ModuleGenerator::finishMetadata(const Bytes& bytecode) {
+ // Finish initialization of Metadata, which is only needed for constructing
+ // the initial Module, not for tier-2 compilation.
+ MOZ_ASSERT(mode() != CompileMode::Tier2);
+
+ // Copy over data from the ModuleEnvironment.
+
+ metadata_->memoryUsage = moduleEnv_->memoryUsage;
+ metadata_->minMemoryLength = moduleEnv_->minMemoryLength;
+ metadata_->maxMemoryLength = moduleEnv_->maxMemoryLength;
+ metadata_->startFuncIndex = moduleEnv_->startFuncIndex;
+ metadata_->tables = std::move(moduleEnv_->tables);
+ metadata_->globals = std::move(moduleEnv_->globals);
+#ifdef ENABLE_WASM_EXCEPTIONS
+ metadata_->events = std::move(moduleEnv_->events);
+#endif
+ metadata_->nameCustomSectionIndex = moduleEnv_->nameCustomSectionIndex;
+ metadata_->moduleName = moduleEnv_->moduleName;
+ metadata_->funcNames = std::move(moduleEnv_->funcNames);
+ metadata_->omitsBoundsChecks = moduleEnv_->hugeMemoryEnabled();
+ metadata_->v128Enabled = moduleEnv_->v128Enabled();
+ metadata_->usesDuplicateImports = moduleEnv_->usesDuplicateImports;
+
+ // Copy over additional debug information.
+
+ if (compilerEnv_->debugEnabled()) {
+ metadata_->debugEnabled = true;
+
+ const size_t numFuncs = moduleEnv_->funcs.length();
+ if (!metadata_->debugFuncArgTypes.resize(numFuncs)) {
+ return nullptr;
+ }
+ if (!metadata_->debugFuncReturnTypes.resize(numFuncs)) {
+ return nullptr;
+ }
+ for (size_t i = 0; i < numFuncs; i++) {
+ if (!metadata_->debugFuncArgTypes[i].appendAll(
+ moduleEnv_->funcs[i].type->args())) {
+ return nullptr;
+ }
+ if (!metadata_->debugFuncReturnTypes[i].appendAll(
+ moduleEnv_->funcs[i].type->results())) {
+ return nullptr;
+ }
+ }
+
+ static_assert(sizeof(ModuleHash) <= sizeof(mozilla::SHA1Sum::Hash),
+ "The ModuleHash size shall not exceed the SHA1 hash size.");
+ mozilla::SHA1Sum::Hash hash;
+ mozilla::SHA1Sum sha1Sum;
+ sha1Sum.update(bytecode.begin(), bytecode.length());
+ sha1Sum.finish(hash);
+ memcpy(metadata_->debugHash, hash, sizeof(ModuleHash));
+ }
+
+ MOZ_ASSERT_IF(moduleEnv_->nameCustomSectionIndex, !!metadata_->namePayload);
+
+ // Metadata shouldn't be mutably modified after finishMetadata().
+ SharedMetadata metadata = metadata_;
+ metadata_ = nullptr;
+ return metadata;
+}
+
+SharedModule ModuleGenerator::finishModule(
+ const ShareableBytes& bytecode,
+ JS::OptimizedEncodingListener* maybeTier2Listener) {
+ MOZ_ASSERT(mode() == CompileMode::Once || mode() == CompileMode::Tier1);
+
+ UniqueCodeTier codeTier = finishCodeTier();
+ if (!codeTier) {
+ return nullptr;
+ }
+
+ JumpTables jumpTables;
+ if (!jumpTables.init(mode(), codeTier->segment(),
+ codeTier->metadata().codeRanges)) {
+ return nullptr;
+ }
+
+ // Copy over data from the Bytecode, which is going away at the end of
+ // compilation.
+
+ DataSegmentVector dataSegments;
+ if (!dataSegments.reserve(moduleEnv_->dataSegments.length())) {
+ return nullptr;
+ }
+ for (const DataSegmentEnv& srcSeg : moduleEnv_->dataSegments) {
+ MutableDataSegment dstSeg = js_new<DataSegment>(srcSeg);
+ if (!dstSeg) {
+ return nullptr;
+ }
+ if (!dstSeg->bytes.append(bytecode.begin() + srcSeg.bytecodeOffset,
+ srcSeg.length)) {
+ return nullptr;
+ }
+ dataSegments.infallibleAppend(std::move(dstSeg));
+ }
+
+ CustomSectionVector customSections;
+ if (!customSections.reserve(moduleEnv_->customSections.length())) {
+ return nullptr;
+ }
+ for (const CustomSectionEnv& srcSec : moduleEnv_->customSections) {
+ CustomSection sec;
+ if (!sec.name.append(bytecode.begin() + srcSec.nameOffset,
+ srcSec.nameLength)) {
+ return nullptr;
+ }
+ MutableBytes payload = js_new<ShareableBytes>();
+ if (!payload) {
+ return nullptr;
+ }
+ if (!payload->append(bytecode.begin() + srcSec.payloadOffset,
+ srcSec.payloadLength)) {
+ return nullptr;
+ }
+ sec.payload = std::move(payload);
+ customSections.infallibleAppend(std::move(sec));
+ }
+
+ if (moduleEnv_->nameCustomSectionIndex) {
+ metadata_->namePayload =
+ customSections[*moduleEnv_->nameCustomSectionIndex].payload;
+ }
+
+ SharedMetadata metadata = finishMetadata(bytecode.bytes);
+ if (!metadata) {
+ return nullptr;
+ }
+
+ MutableCode code =
+ js_new<Code>(std::move(codeTier), *metadata, std::move(jumpTables));
+ if (!code || !code->initialize(*linkData_)) {
+ return nullptr;
+ }
+
+ // See Module debugCodeClaimed_ comments for why we need to make a separate
+ // debug copy.
+
+ UniqueBytes debugUnlinkedCode;
+ UniqueLinkData debugLinkData;
+ const ShareableBytes* debugBytecode = nullptr;
+ if (compilerEnv_->debugEnabled()) {
+ MOZ_ASSERT(mode() == CompileMode::Once);
+ MOZ_ASSERT(tier() == Tier::Debug);
+
+ debugUnlinkedCode = js::MakeUnique<Bytes>();
+ if (!debugUnlinkedCode || !debugUnlinkedCode->resize(masm_.bytesNeeded())) {
+ return nullptr;
+ }
+
+ masm_.executableCopy(debugUnlinkedCode->begin());
+
+ debugLinkData = std::move(linkData_);
+ debugBytecode = &bytecode;
+ }
+
+ // All the components are finished, so create the complete Module and start
+ // tier-2 compilation if requested.
+
+ MutableModule module = js_new<Module>(
+ *code, std::move(moduleEnv_->imports), std::move(moduleEnv_->exports),
+ std::move(dataSegments), std::move(moduleEnv_->elemSegments),
+ std::move(customSections), std::move(debugUnlinkedCode),
+ std::move(debugLinkData), debugBytecode);
+ if (!module) {
+ return nullptr;
+ }
+
+ if (mode() == CompileMode::Tier1) {
+ module->startTier2(*compileArgs_, bytecode, maybeTier2Listener,
+ telemetrySender_);
+ } else if (tier() == Tier::Serialized && maybeTier2Listener) {
+ module->serialize(*linkData_, *maybeTier2Listener);
+ }
+
+ return module;
+}
+
+bool ModuleGenerator::finishTier2(const Module& module) {
+ MOZ_ASSERT(mode() == CompileMode::Tier2);
+ MOZ_ASSERT(tier() == Tier::Optimized);
+ MOZ_ASSERT(!compilerEnv_->debugEnabled());
+
+ if (cancelled_ && *cancelled_) {
+ return false;
+ }
+
+ UniqueCodeTier codeTier = finishCodeTier();
+ if (!codeTier) {
+ return false;
+ }
+
+ if (MOZ_UNLIKELY(JitOptions.wasmDelayTier2)) {
+ // Introduce an artificial delay when testing wasmDelayTier2, since we
+ // want to exercise both tier1 and tier2 code in this case.
+ std::this_thread::sleep_for(std::chrono::milliseconds(500));
+ }
+
+ return module.finishTier2(*linkData_, std::move(codeTier));
+}
+
+size_t CompiledCode::sizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ size_t trapSitesSize = 0;
+ for (const TrapSiteVector& vec : trapSites) {
+ trapSitesSize += vec.sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ return bytes.sizeOfExcludingThis(mallocSizeOf) +
+ codeRanges.sizeOfExcludingThis(mallocSizeOf) +
+ callSites.sizeOfExcludingThis(mallocSizeOf) +
+ callSiteTargets.sizeOfExcludingThis(mallocSizeOf) + trapSitesSize +
+ symbolicAccesses.sizeOfExcludingThis(mallocSizeOf) +
+ codeLabels.sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t CompileTask::sizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return lifo.sizeOfExcludingThis(mallocSizeOf) +
+ inputs.sizeOfExcludingThis(mallocSizeOf) +
+ output.sizeOfExcludingThis(mallocSizeOf);
+}
diff --git a/js/src/wasm/WasmGenerator.h b/js/src/wasm/WasmGenerator.h
new file mode 100644
index 0000000000..2b041b7b1d
--- /dev/null
+++ b/js/src/wasm/WasmGenerator.h
@@ -0,0 +1,270 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_generator_h
+#define wasm_generator_h
+
+#include "mozilla/MemoryReporting.h"
+
+#include "jit/MacroAssembler.h"
+#include "threading/ProtectedData.h"
+#include "vm/HelperThreadTask.h"
+#include "wasm/WasmCompile.h"
+#include "wasm/WasmModule.h"
+#include "wasm/WasmValidate.h"
+
+namespace js {
+namespace wasm {
+
+struct CompileTask;
+typedef Vector<CompileTask*, 0, SystemAllocPolicy> CompileTaskPtrVector;
+
+// FuncCompileInput contains the input for compiling a single function.
+
+struct FuncCompileInput {
+ const uint8_t* begin;
+ const uint8_t* end;
+ uint32_t index;
+ uint32_t lineOrBytecode;
+ Uint32Vector callSiteLineNums;
+
+ FuncCompileInput(uint32_t index, uint32_t lineOrBytecode,
+ const uint8_t* begin, const uint8_t* end,
+ Uint32Vector&& callSiteLineNums)
+ : begin(begin),
+ end(end),
+ index(index),
+ lineOrBytecode(lineOrBytecode),
+ callSiteLineNums(std::move(callSiteLineNums)) {}
+};
+
+typedef Vector<FuncCompileInput, 8, SystemAllocPolicy> FuncCompileInputVector;
+
+void CraneliftFreeReusableData(void* ptr);
+
+struct CraneliftReusableDataDtor {
+ void operator()(void* ptr) { CraneliftFreeReusableData(ptr); }
+};
+
+using CraneliftReusableData =
+ mozilla::UniquePtr<void*, CraneliftReusableDataDtor>;
+
+// CompiledCode contains the resulting code and metadata for a set of compiled
+// input functions or stubs.
+
+struct CompiledCode {
+ Bytes bytes;
+ CodeRangeVector codeRanges;
+ CallSiteVector callSites;
+ CallSiteTargetVector callSiteTargets;
+ TrapSiteVectorArray trapSites;
+ SymbolicAccessVector symbolicAccesses;
+ jit::CodeLabelVector codeLabels;
+ StackMaps stackMaps;
+ CraneliftReusableData craneliftReusableData;
+
+ [[nodiscard]] bool swap(jit::MacroAssembler& masm);
+ [[nodiscard]] bool swapCranelift(jit::MacroAssembler& masm,
+ CraneliftReusableData& craneliftData);
+
+ void clear() {
+ bytes.clear();
+ codeRanges.clear();
+ callSites.clear();
+ callSiteTargets.clear();
+ trapSites.clear();
+ symbolicAccesses.clear();
+ codeLabels.clear();
+ stackMaps.clear();
+ // The cranelift reusable data resets itself lazily.
+ MOZ_ASSERT(empty());
+ }
+
+ bool empty() {
+ return bytes.empty() && codeRanges.empty() && callSites.empty() &&
+ callSiteTargets.empty() && trapSites.empty() &&
+ symbolicAccesses.empty() && codeLabels.empty() && stackMaps.empty();
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+};
+
+// The CompileTaskState of a ModuleGenerator contains the mutable state shared
+// between helper threads executing CompileTasks. Each CompileTask started on a
+// helper thread eventually either ends up in the 'finished' list or increments
+// 'numFailed'.
+
+struct CompileTaskState {
+ HelperThreadLockData<CompileTaskPtrVector> finished_;
+ HelperThreadLockData<uint32_t> numFailed_;
+ HelperThreadLockData<UniqueChars> errorMessage_;
+ HelperThreadLockData<ConditionVariable> condVar_;
+
+ CompileTaskState() : numFailed_(0) {}
+ ~CompileTaskState() {
+ MOZ_ASSERT(finished_.refNoCheck().empty());
+ MOZ_ASSERT(!numFailed_.refNoCheck());
+ }
+
+ CompileTaskPtrVector& finished() { return finished_.ref(); }
+ uint32_t& numFailed() { return numFailed_.ref(); }
+ UniqueChars& errorMessage() { return errorMessage_.ref(); }
+ ConditionVariable& condVar() { return condVar_.ref(); }
+};
+
+// A CompileTask holds a batch of input functions that are to be compiled on a
+// helper thread as well as, eventually, the results of compilation.
+
+struct CompileTask : public HelperThreadTask {
+ const ModuleEnvironment& moduleEnv;
+ const CompilerEnvironment& compilerEnv;
+
+ CompileTaskState& state;
+ LifoAlloc lifo;
+ FuncCompileInputVector inputs;
+ CompiledCode output;
+ JSTelemetrySender telemetrySender;
+
+ CompileTask(const ModuleEnvironment& moduleEnv,
+ const CompilerEnvironment& compilerEnv, CompileTaskState& state,
+ size_t defaultChunkSize, JSTelemetrySender telemetrySender)
+ : moduleEnv(moduleEnv),
+ compilerEnv(compilerEnv),
+ state(state),
+ lifo(defaultChunkSize),
+ telemetrySender(telemetrySender) {}
+
+ virtual ~CompileTask() = default;
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+ void runHelperThreadTask(AutoLockHelperThreadState& locked) override;
+ ThreadType threadType() override { return ThreadType::THREAD_TYPE_WASM; }
+};
+
+// A ModuleGenerator encapsulates the creation of a wasm module. During the
+// lifetime of a ModuleGenerator, a sequence of FunctionGenerators are created
+// and destroyed to compile the individual function bodies. After generating all
+// functions, ModuleGenerator::finish() must be called to complete the
+// compilation and extract the resulting wasm module.
+
+class MOZ_STACK_CLASS ModuleGenerator {
+ typedef Vector<CompileTask, 0, SystemAllocPolicy> CompileTaskVector;
+ typedef Vector<jit::CodeOffset, 0, SystemAllocPolicy> CodeOffsetVector;
+ struct CallFarJump {
+ uint32_t funcIndex;
+ jit::CodeOffset jump;
+ CallFarJump(uint32_t fi, jit::CodeOffset j) : funcIndex(fi), jump(j) {}
+ };
+ typedef Vector<CallFarJump, 0, SystemAllocPolicy> CallFarJumpVector;
+
+ // Constant parameters
+ SharedCompileArgs const compileArgs_;
+ UniqueChars* const error_;
+ const Atomic<bool>* const cancelled_;
+ ModuleEnvironment* const moduleEnv_;
+ CompilerEnvironment* const compilerEnv_;
+ JSTelemetrySender telemetrySender_;
+
+ // Data that is moved into the result of finish()
+ UniqueLinkData linkData_;
+ UniqueMetadataTier metadataTier_;
+ MutableMetadata metadata_;
+
+ // Data scoped to the ModuleGenerator's lifetime
+ CompileTaskState taskState_;
+ LifoAlloc lifo_;
+ jit::JitContext jcx_;
+ jit::TempAllocator masmAlloc_;
+ jit::WasmMacroAssembler masm_;
+ Uint32Vector funcToCodeRange_;
+ uint32_t debugTrapCodeOffset_;
+ CallFarJumpVector callFarJumps_;
+ CallSiteTargetVector callSiteTargets_;
+ uint32_t lastPatchedCallSite_;
+ uint32_t startOfUnpatchedCallsites_;
+ CodeOffsetVector debugTrapFarJumps_;
+
+ // Parallel compilation
+ bool parallel_;
+ uint32_t outstanding_;
+ CompileTaskVector tasks_;
+ CompileTaskPtrVector freeTasks_;
+ CompileTask* currentTask_;
+ uint32_t batchedBytecode_;
+
+ // Assertions
+ DebugOnly<bool> finishedFuncDefs_;
+
+ bool allocateGlobalBytes(uint32_t bytes, uint32_t align,
+ uint32_t* globalDataOff);
+
+ bool funcIsCompiled(uint32_t funcIndex) const;
+ const CodeRange& funcCodeRange(uint32_t funcIndex) const;
+ bool linkCallSites();
+ void noteCodeRange(uint32_t codeRangeIndex, const CodeRange& codeRange);
+ bool linkCompiledCode(CompiledCode& code);
+ bool locallyCompileCurrentTask();
+ bool finishTask(CompileTask* task);
+ bool launchBatchCompile();
+ bool finishOutstandingTask();
+ bool finishCodegen();
+ bool finishMetadataTier();
+ UniqueCodeTier finishCodeTier();
+ SharedMetadata finishMetadata(const Bytes& bytecode);
+
+ bool isAsmJS() const { return moduleEnv_->isAsmJS(); }
+ Tier tier() const { return compilerEnv_->tier(); }
+ CompileMode mode() const { return compilerEnv_->mode(); }
+ bool debugEnabled() const { return compilerEnv_->debugEnabled(); }
+
+ public:
+ ModuleGenerator(const CompileArgs& args, ModuleEnvironment* moduleEnv,
+ CompilerEnvironment* compilerEnv,
+ const Atomic<bool>* cancelled, UniqueChars* error);
+ ~ModuleGenerator();
+ [[nodiscard]] bool init(
+ Metadata* maybeAsmJSMetadata = nullptr,
+ JSTelemetrySender telemetrySender = JSTelemetrySender());
+
+ // Before finishFuncDefs() is called, compileFuncDef() must be called once
+ // for each funcIndex in the range [0, env->numFuncDefs()).
+
+ [[nodiscard]] bool compileFuncDef(
+ uint32_t funcIndex, uint32_t lineOrBytecode, const uint8_t* begin,
+ const uint8_t* end, Uint32Vector&& callSiteLineNums = Uint32Vector());
+
+ // Must be called after the last compileFuncDef() and before finishModule()
+ // or finishTier2().
+
+ [[nodiscard]] bool finishFuncDefs();
+
+ // If env->mode is Once or Tier1, finishModule() must be called to generate
+ // a new Module. Otherwise, if env->mode is Tier2, finishTier2() must be
+ // called to augment the given Module with tier 2 code.
+
+ SharedModule finishModule(
+ const ShareableBytes& bytecode,
+ JS::OptimizedEncodingListener* maybeTier2Listener = nullptr);
+ [[nodiscard]] bool finishTier2(const Module& module);
+};
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_generator_h
diff --git a/js/src/wasm/WasmInstance.cpp b/js/src/wasm/WasmInstance.cpp
new file mode 100644
index 0000000000..cdaf680670
--- /dev/null
+++ b/js/src/wasm/WasmInstance.cpp
@@ -0,0 +1,2099 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmInstance.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/DebugOnly.h"
+
+#include <algorithm>
+
+#include "jsmath.h"
+
+#include "jit/AtomicOperations.h"
+#include "jit/Disassemble.h"
+#include "jit/InlinableNatives.h"
+#include "jit/JitCommon.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitScript.h"
+#include "js/ForOfIterator.h"
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "util/StringBuffer.h"
+#include "util/Text.h"
+#include "vm/BigIntType.h"
+#include "vm/PlainObject.h" // js::PlainObject
+#include "wasm/WasmBuiltins.h"
+#include "wasm/WasmJS.h"
+#include "wasm/WasmModule.h"
+#include "wasm/WasmStubs.h"
+#include "wasm/WasmTypes.h"
+
+#include "gc/StoreBuffer-inl.h"
+#include "vm/ArrayBufferObject-inl.h"
+#include "vm/JSObject-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::BitwiseCast;
+using mozilla::CheckedInt;
+using mozilla::DebugOnly;
+
+using CheckedU32 = CheckedInt<uint32_t>;
+
+class FuncTypeIdSet {
+ typedef HashMap<const FuncType*, uint32_t, FuncTypeHashPolicy,
+ SystemAllocPolicy>
+ Map;
+ Map map_;
+
+ public:
+ ~FuncTypeIdSet() {
+ MOZ_ASSERT_IF(!JSRuntime::hasLiveRuntimes(), map_.empty());
+ }
+
+ bool allocateFuncTypeId(JSContext* cx, const FuncType& funcType,
+ const void** funcTypeId) {
+ Map::AddPtr p = map_.lookupForAdd(funcType);
+ if (p) {
+ MOZ_ASSERT(p->value() > 0);
+ p->value()++;
+ *funcTypeId = p->key();
+ return true;
+ }
+
+ UniquePtr<FuncType> clone = MakeUnique<FuncType>();
+ if (!clone || !clone->clone(funcType) || !map_.add(p, clone.get(), 1)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ *funcTypeId = clone.release();
+ MOZ_ASSERT(!(uintptr_t(*funcTypeId) & TypeIdDesc::ImmediateBit));
+ return true;
+ }
+
+ void deallocateFuncTypeId(const FuncType& funcType, const void* funcTypeId) {
+ Map::Ptr p = map_.lookup(funcType);
+ MOZ_RELEASE_ASSERT(p && p->key() == funcTypeId && p->value() > 0);
+
+ p->value()--;
+ if (!p->value()) {
+ js_delete(p->key());
+ map_.remove(p);
+ }
+ }
+};
+
+ExclusiveData<FuncTypeIdSet> funcTypeIdSet(mutexid::WasmFuncTypeIdSet);
+
+const void** Instance::addressOfTypeId(const TypeIdDesc& typeId) const {
+ return (const void**)(globalData() + typeId.globalDataOffset());
+}
+
+FuncImportTls& Instance::funcImportTls(const FuncImport& fi) {
+ return *(FuncImportTls*)(globalData() + fi.tlsDataOffset());
+}
+
+TableTls& Instance::tableTls(const TableDesc& td) const {
+ return *(TableTls*)(globalData() + td.globalDataOffset);
+}
+
+// TODO(1626251): Consolidate definitions into Iterable.h
+static bool IterableToArray(JSContext* cx, HandleValue iterable,
+ MutableHandle<ArrayObject*> array) {
+ JS::ForOfIterator iterator(cx);
+ if (!iterator.init(iterable, JS::ForOfIterator::ThrowOnNonIterable)) {
+ return false;
+ }
+
+ array.set(NewDenseEmptyArray(cx));
+ if (!array) {
+ return false;
+ }
+
+ RootedValue nextValue(cx);
+ while (true) {
+ bool done;
+ if (!iterator.next(&nextValue, &done)) {
+ return false;
+ }
+ if (done) {
+ break;
+ }
+
+ if (!NewbornArrayPush(cx, array, nextValue)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool UnpackResults(JSContext* cx, const ValTypeVector& resultTypes,
+ const Maybe<char*> stackResultsArea, uint64_t* argv,
+ MutableHandleValue rval) {
+ if (!stackResultsArea) {
+ MOZ_ASSERT(resultTypes.length() <= 1);
+ // Result is either one scalar value to unpack to a wasm value, or
+ // an ignored value for a zero-valued function.
+ if (resultTypes.length() == 1) {
+ return ToWebAssemblyValue(cx, rval, resultTypes[0], argv, true);
+ }
+ return true;
+ }
+
+ MOZ_ASSERT(stackResultsArea.isSome());
+ RootedArrayObject array(cx);
+ if (!IterableToArray(cx, rval, &array)) {
+ return false;
+ }
+
+ if (resultTypes.length() != array->length()) {
+ UniqueChars expected(JS_smprintf("%zu", resultTypes.length()));
+ UniqueChars got(JS_smprintf("%u", array->length()));
+
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_WRONG_NUMBER_OF_VALUES, expected.get(),
+ got.get());
+ return false;
+ }
+
+ DebugOnly<uint64_t> previousOffset = ~(uint64_t)0;
+
+ ABIResultIter iter(ResultType::Vector(resultTypes));
+ // The values are converted in the order they are pushed on the
+ // abstract WebAssembly stack; switch to iterate in push order.
+ while (!iter.done()) {
+ iter.next();
+ }
+ DebugOnly<bool> seenRegisterResult = false;
+ for (iter.switchToPrev(); !iter.done(); iter.prev()) {
+ const ABIResult& result = iter.cur();
+ MOZ_ASSERT(!seenRegisterResult);
+ // Use rval as a scratch area to hold the extracted result.
+ rval.set(array->getDenseElement(iter.index()));
+ if (result.inRegister()) {
+ // Currently, if a function type has results, there can be only
+ // one register result. If there is only one result, it is
+ // returned as a scalar and not an iterable, so we don't get here.
+ // If there are multiple results, we extract the register result
+ // and set `argv[0]` set to the extracted result, to be returned by
+ // register in the stub. The register result follows any stack
+ // results, so this preserves conversion order.
+ if (!ToWebAssemblyValue(cx, rval, result.type(), argv, true)) {
+ return false;
+ }
+ seenRegisterResult = true;
+ continue;
+ }
+ uint32_t result_size = result.size();
+ MOZ_ASSERT(result_size == 4 || result_size == 8);
+#ifdef DEBUG
+ if (previousOffset == ~(uint64_t)0) {
+ previousOffset = (uint64_t)result.stackOffset();
+ } else {
+ MOZ_ASSERT(previousOffset - (uint64_t)result_size ==
+ (uint64_t)result.stackOffset());
+ previousOffset -= (uint64_t)result_size;
+ }
+#endif
+ char* loc = stackResultsArea.value() + result.stackOffset();
+ if (!ToWebAssemblyValue(cx, rval, result.type(), loc, result_size == 8)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool Instance::callImport(JSContext* cx, uint32_t funcImportIndex,
+ unsigned argc, uint64_t* argv) {
+ AssertRealmUnchanged aru(cx);
+
+ Tier tier = code().bestTier();
+
+ const FuncImport& fi = metadata(tier).funcImports[funcImportIndex];
+
+ ArgTypeVector argTypes(fi.funcType());
+ InvokeArgs args(cx);
+ if (!args.init(cx, argTypes.lengthWithoutStackResults())) {
+ return false;
+ }
+
+ if (fi.funcType().hasUnexposableArgOrRet()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_VAL_TYPE);
+ return false;
+ }
+
+ MOZ_ASSERT(argTypes.lengthWithStackResults() == argc);
+ Maybe<char*> stackResultPointer;
+ for (size_t i = 0; i < argc; i++) {
+ const void* rawArgLoc = &argv[i];
+ if (argTypes.isSyntheticStackResultPointerArg(i)) {
+ stackResultPointer = Some(*(char**)rawArgLoc);
+ continue;
+ }
+ size_t naturalIndex = argTypes.naturalIndex(i);
+ ValType type = fi.funcType().args()[naturalIndex];
+ MutableHandleValue argValue = args[naturalIndex];
+ if (!ToJSValue(cx, rawArgLoc, type, argValue)) {
+ return false;
+ }
+ }
+
+ FuncImportTls& import = funcImportTls(fi);
+ RootedFunction importFun(cx, import.fun);
+ MOZ_ASSERT(cx->realm() == importFun->realm());
+
+ RootedValue fval(cx, ObjectValue(*importFun));
+ RootedValue thisv(cx, UndefinedValue());
+ RootedValue rval(cx);
+ if (!Call(cx, fval, thisv, args, &rval)) {
+ return false;
+ }
+
+ if (!UnpackResults(cx, fi.funcType().results(), stackResultPointer, argv,
+ &rval)) {
+ return false;
+ }
+
+ if (!JitOptions.enableWasmJitExit) {
+ return true;
+ }
+
+ // The import may already have become optimized.
+ for (auto t : code().tiers()) {
+ void* jitExitCode = codeBase(t) + fi.jitExitCodeOffset();
+ if (import.code == jitExitCode) {
+ return true;
+ }
+ }
+
+ void* jitExitCode = codeBase(tier) + fi.jitExitCodeOffset();
+
+ // Test if the function is JIT compiled.
+ if (!importFun->hasBytecode()) {
+ return true;
+ }
+
+ JSScript* script = importFun->nonLazyScript();
+ if (!script->hasJitScript()) {
+ return true;
+ }
+
+ // Should have been guarded earlier
+ MOZ_ASSERT(!fi.funcType().hasUnexposableArgOrRet());
+
+ // Functions with unsupported reference types in signature don't have a jit
+ // exit at the moment.
+ if (fi.funcType().temporarilyUnsupportedReftypeForExit()) {
+ return true;
+ }
+
+ // Functions that return multiple values don't have a jit exit at the moment.
+ if (fi.funcType().temporarilyUnsupportedResultCountForJitExit()) {
+ return true;
+ }
+
+ // Let's optimize it!
+
+ import.code = jitExitCode;
+ return true;
+}
+
+/* static */ int32_t /* 0 to signal trap; 1 to signal OK */
+Instance::callImport_general(Instance* instance, int32_t funcImportIndex,
+ int32_t argc, uint64_t* argv) {
+ JSContext* cx = TlsContext.get();
+ return instance->callImport(cx, funcImportIndex, argc, argv);
+}
+
+/* static */ uint32_t Instance::memoryGrow_i32(Instance* instance,
+ uint32_t delta) {
+ MOZ_ASSERT(SASigMemoryGrow.failureMode == FailureMode::Infallible);
+ MOZ_ASSERT(!instance->isAsmJS());
+
+ JSContext* cx = TlsContext.get();
+ RootedWasmMemoryObject memory(cx, instance->memory_);
+
+ uint32_t ret = WasmMemoryObject::grow(memory, delta, cx);
+
+ // If there has been a moving grow, this Instance should have been notified.
+ MOZ_RELEASE_ASSERT(instance->tlsData()->memoryBase ==
+ instance->memory_->buffer().dataPointerEither());
+
+ return ret;
+}
+
+/* static */ uint32_t Instance::memorySize_i32(Instance* instance) {
+ MOZ_ASSERT(SASigMemorySize.failureMode == FailureMode::Infallible);
+
+ // This invariant must hold when running Wasm code. Assert it here so we can
+ // write tests for cross-realm calls.
+ MOZ_ASSERT(TlsContext.get()->realm() == instance->realm());
+
+ uint32_t byteLength = instance->memory()->volatileMemoryLength32();
+ MOZ_ASSERT(byteLength % wasm::PageSize == 0);
+ return byteLength / wasm::PageSize;
+}
+
+template <typename T>
+static int32_t PerformWait(Instance* instance, uint32_t byteOffset, T value,
+ int64_t timeout_ns) {
+ JSContext* cx = TlsContext.get();
+
+ if (!instance->memory()->isShared()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_NONSHARED_WAIT);
+ return -1;
+ }
+
+ if (byteOffset & (sizeof(T) - 1)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_UNALIGNED_ACCESS);
+ return -1;
+ }
+
+ if (byteOffset + sizeof(T) > instance->memory()->volatileMemoryLength32()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ mozilla::Maybe<mozilla::TimeDuration> timeout;
+ if (timeout_ns >= 0) {
+ timeout = mozilla::Some(
+ mozilla::TimeDuration::FromMicroseconds(timeout_ns / 1000));
+ }
+
+ switch (atomics_wait_impl(cx, instance->sharedMemoryBuffer(), byteOffset,
+ value, timeout)) {
+ case FutexThread::WaitResult::OK:
+ return 0;
+ case FutexThread::WaitResult::NotEqual:
+ return 1;
+ case FutexThread::WaitResult::TimedOut:
+ return 2;
+ case FutexThread::WaitResult::Error:
+ return -1;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+/* static */ int32_t Instance::wait_i32(Instance* instance, uint32_t byteOffset,
+ int32_t value, int64_t timeout_ns) {
+ MOZ_ASSERT(SASigWaitI32.failureMode == FailureMode::FailOnNegI32);
+ return PerformWait<int32_t>(instance, byteOffset, value, timeout_ns);
+}
+
+/* static */ int32_t Instance::wait_i64(Instance* instance, uint32_t byteOffset,
+ int64_t value, int64_t timeout_ns) {
+ MOZ_ASSERT(SASigWaitI64.failureMode == FailureMode::FailOnNegI32);
+ return PerformWait<int64_t>(instance, byteOffset, value, timeout_ns);
+}
+
+/* static */ int32_t Instance::wake(Instance* instance, uint32_t byteOffset,
+ int32_t count) {
+ MOZ_ASSERT(SASigWake.failureMode == FailureMode::FailOnNegI32);
+
+ JSContext* cx = TlsContext.get();
+
+ // The alignment guard is not in the wasm spec as of 2017-11-02, but is
+ // considered likely to appear, as 4-byte alignment is required for WAKE by
+ // the spec's validation algorithm.
+
+ if (byteOffset & 3) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_UNALIGNED_ACCESS);
+ return -1;
+ }
+
+ if (byteOffset >= instance->memory()->volatileMemoryLength32()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ if (!instance->memory()->isShared()) {
+ return 0;
+ }
+
+ int64_t woken = atomics_notify_impl(instance->sharedMemoryBuffer(),
+ byteOffset, int64_t(count));
+
+ if (woken > INT32_MAX) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_WAKE_OVERFLOW);
+ return -1;
+ }
+
+ return int32_t(woken);
+}
+
+template <typename T, typename F>
+inline int32_t WasmMemoryCopy(T memBase, uint32_t memLen,
+ uint32_t dstByteOffset, uint32_t srcByteOffset,
+ uint32_t len, F memMove) {
+ // Bounds check and deal with arithmetic overflow.
+ uint64_t dstOffsetLimit = uint64_t(dstByteOffset) + uint64_t(len);
+ uint64_t srcOffsetLimit = uint64_t(srcByteOffset) + uint64_t(len);
+
+ if (dstOffsetLimit > memLen || srcOffsetLimit > memLen) {
+ JSContext* cx = TlsContext.get();
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ memMove(memBase + dstByteOffset, memBase + srcByteOffset, size_t(len));
+ return 0;
+}
+
+/* static */ int32_t Instance::memCopy(Instance* instance,
+ uint32_t dstByteOffset,
+ uint32_t srcByteOffset, uint32_t len,
+ uint8_t* memBase) {
+ MOZ_ASSERT(SASigMemCopy.failureMode == FailureMode::FailOnNegI32);
+
+ const WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
+ uint32_t memLen = ByteLength32(rawBuf);
+
+ return WasmMemoryCopy(memBase, memLen, dstByteOffset, srcByteOffset, len,
+ memmove);
+}
+
+/* static */ int32_t Instance::memCopyShared(Instance* instance,
+ uint32_t dstByteOffset,
+ uint32_t srcByteOffset,
+ uint32_t len, uint8_t* memBase) {
+ MOZ_ASSERT(SASigMemCopy.failureMode == FailureMode::FailOnNegI32);
+
+ using RacyMemMove =
+ void (*)(SharedMem<uint8_t*>, SharedMem<uint8_t*>, size_t);
+
+ const SharedArrayRawBuffer* rawBuf =
+ SharedArrayRawBuffer::fromDataPtr(memBase);
+ uint32_t memLen = VolatileByteLength32(rawBuf);
+
+ return WasmMemoryCopy<SharedMem<uint8_t*>, RacyMemMove>(
+ SharedMem<uint8_t*>::shared(memBase), memLen, dstByteOffset,
+ srcByteOffset, len, AtomicOperations::memmoveSafeWhenRacy);
+}
+
+/* static */ int32_t Instance::dataDrop(Instance* instance, uint32_t segIndex) {
+ MOZ_ASSERT(SASigDataDrop.failureMode == FailureMode::FailOnNegI32);
+
+ MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
+ "ensured by validation");
+
+ if (!instance->passiveDataSegments_[segIndex]) {
+ return 0;
+ }
+
+ SharedDataSegment& segRefPtr = instance->passiveDataSegments_[segIndex];
+ MOZ_RELEASE_ASSERT(!segRefPtr->active());
+
+ // Drop this instance's reference to the DataSegment so it can be released.
+ segRefPtr = nullptr;
+ return 0;
+}
+
+template <typename T, typename F>
+inline int32_t WasmMemoryFill(T memBase, uint32_t memLen, uint32_t byteOffset,
+ uint32_t value, uint32_t len, F memSet) {
+ // Bounds check and deal with arithmetic overflow.
+ uint64_t offsetLimit = uint64_t(byteOffset) + uint64_t(len);
+
+ if (offsetLimit > memLen) {
+ JSContext* cx = TlsContext.get();
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ // The required write direction is upward, but that is not currently
+ // observable as there are no fences nor any read/write protect operation.
+ memSet(memBase + byteOffset, int(value), size_t(len));
+ return 0;
+}
+
+/* static */ int32_t Instance::memFill(Instance* instance, uint32_t byteOffset,
+ uint32_t value, uint32_t len,
+ uint8_t* memBase) {
+ MOZ_ASSERT(SASigMemFill.failureMode == FailureMode::FailOnNegI32);
+
+ const WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
+ uint32_t memLen = ByteLength32(rawBuf);
+
+ return WasmMemoryFill(memBase, memLen, byteOffset, value, len, memset);
+}
+
+/* static */ int32_t Instance::memFillShared(Instance* instance,
+ uint32_t byteOffset,
+ uint32_t value, uint32_t len,
+ uint8_t* memBase) {
+ MOZ_ASSERT(SASigMemFill.failureMode == FailureMode::FailOnNegI32);
+
+ const SharedArrayRawBuffer* rawBuf =
+ SharedArrayRawBuffer::fromDataPtr(memBase);
+ uint32_t memLen = VolatileByteLength32(rawBuf);
+
+ return WasmMemoryFill(SharedMem<uint8_t*>::shared(memBase), memLen,
+ byteOffset, value, len,
+ AtomicOperations::memsetSafeWhenRacy);
+}
+
+/* static */ int32_t Instance::memInit(Instance* instance, uint32_t dstOffset,
+ uint32_t srcOffset, uint32_t len,
+ uint32_t segIndex) {
+ MOZ_ASSERT(SASigMemInit.failureMode == FailureMode::FailOnNegI32);
+
+ MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveDataSegments_.length(),
+ "ensured by validation");
+
+ if (!instance->passiveDataSegments_[segIndex]) {
+ if (len == 0 && srcOffset == 0) {
+ return 0;
+ }
+
+ JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
+ JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ const DataSegment& seg = *instance->passiveDataSegments_[segIndex];
+ MOZ_RELEASE_ASSERT(!seg.active());
+
+ const uint32_t segLen = seg.bytes.length();
+
+ WasmMemoryObject* mem = instance->memory();
+ const uint32_t memLen = mem->volatileMemoryLength32();
+
+ // We are proposing to copy
+ //
+ // seg.bytes.begin()[ srcOffset .. srcOffset + len - 1 ]
+ // to
+ // memoryBase[ dstOffset .. dstOffset + len - 1 ]
+
+ // Bounds check and deal with arithmetic overflow.
+ uint64_t dstOffsetLimit = uint64_t(dstOffset) + uint64_t(len);
+ uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
+
+ if (dstOffsetLimit > memLen || srcOffsetLimit > segLen) {
+ JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
+ JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ // The required read/write direction is upward, but that is not currently
+ // observable as there are no fences nor any read/write protect operation.
+ SharedMem<uint8_t*> dataPtr = mem->buffer().dataPointerEither();
+ if (mem->isShared()) {
+ AtomicOperations::memcpySafeWhenRacy(
+ dataPtr + dstOffset, (uint8_t*)seg.bytes.begin() + srcOffset, len);
+ } else {
+ uint8_t* rawBuf = dataPtr.unwrap(/*Unshared*/);
+ memcpy(rawBuf + dstOffset, (const char*)seg.bytes.begin() + srcOffset, len);
+ }
+ return 0;
+}
+
+/* static */ int32_t Instance::tableCopy(Instance* instance, uint32_t dstOffset,
+ uint32_t srcOffset, uint32_t len,
+ uint32_t dstTableIndex,
+ uint32_t srcTableIndex) {
+ MOZ_ASSERT(SASigMemCopy.failureMode == FailureMode::FailOnNegI32);
+
+ const SharedTable& srcTable = instance->tables()[srcTableIndex];
+ uint32_t srcTableLen = srcTable->length();
+
+ const SharedTable& dstTable = instance->tables()[dstTableIndex];
+ uint32_t dstTableLen = dstTable->length();
+
+ // Bounds check and deal with arithmetic overflow.
+ uint64_t dstOffsetLimit = uint64_t(dstOffset) + len;
+ uint64_t srcOffsetLimit = uint64_t(srcOffset) + len;
+
+ if (dstOffsetLimit > dstTableLen || srcOffsetLimit > srcTableLen) {
+ JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
+ JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ bool isOOM = false;
+
+ if (&srcTable == &dstTable && dstOffset > srcOffset) {
+ for (uint32_t i = len; i > 0; i--) {
+ if (!dstTable->copy(*srcTable, dstOffset + (i - 1),
+ srcOffset + (i - 1))) {
+ isOOM = true;
+ break;
+ }
+ }
+ } else if (&srcTable == &dstTable && dstOffset == srcOffset) {
+ // No-op
+ } else {
+ for (uint32_t i = 0; i < len; i++) {
+ if (!dstTable->copy(*srcTable, dstOffset + i, srcOffset + i)) {
+ isOOM = true;
+ break;
+ }
+ }
+ }
+
+ if (isOOM) {
+ return -1;
+ }
+ return 0;
+}
+
+/* static */ int32_t Instance::elemDrop(Instance* instance, uint32_t segIndex) {
+ MOZ_ASSERT(SASigDataDrop.failureMode == FailureMode::FailOnNegI32);
+
+ MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveElemSegments_.length(),
+ "ensured by validation");
+
+ if (!instance->passiveElemSegments_[segIndex]) {
+ return 0;
+ }
+
+ SharedElemSegment& segRefPtr = instance->passiveElemSegments_[segIndex];
+ MOZ_RELEASE_ASSERT(!segRefPtr->active());
+
+ // Drop this instance's reference to the ElemSegment so it can be released.
+ segRefPtr = nullptr;
+ return 0;
+}
+
+bool Instance::initElems(uint32_t tableIndex, const ElemSegment& seg,
+ uint32_t dstOffset, uint32_t srcOffset, uint32_t len) {
+ Table& table = *tables_[tableIndex];
+ MOZ_ASSERT(dstOffset <= table.length());
+ MOZ_ASSERT(len <= table.length() - dstOffset);
+
+ Tier tier = code().bestTier();
+ const MetadataTier& metadataTier = metadata(tier);
+ const FuncImportVector& funcImports = metadataTier.funcImports;
+ const CodeRangeVector& codeRanges = metadataTier.codeRanges;
+ const Uint32Vector& funcToCodeRange = metadataTier.funcToCodeRange;
+ const Uint32Vector& elemFuncIndices = seg.elemFuncIndices;
+ MOZ_ASSERT(srcOffset <= elemFuncIndices.length());
+ MOZ_ASSERT(len <= elemFuncIndices.length() - srcOffset);
+
+ uint8_t* codeBaseTier = codeBase(tier);
+ for (uint32_t i = 0; i < len; i++) {
+ uint32_t funcIndex = elemFuncIndices[srcOffset + i];
+ if (funcIndex == NullFuncIndex) {
+ table.setNull(dstOffset + i);
+ } else if (!table.isFunction()) {
+ // Note, fnref must be rooted if we do anything more than just store it.
+ void* fnref = Instance::refFunc(this, funcIndex);
+ if (fnref == AnyRef::invalid().forCompiledCode()) {
+ return false; // OOM, which has already been reported.
+ }
+ table.fillAnyRef(dstOffset + i, 1, AnyRef::fromCompiledCode(fnref));
+ } else {
+ if (funcIndex < funcImports.length()) {
+ FuncImportTls& import = funcImportTls(funcImports[funcIndex]);
+ JSFunction* fun = import.fun;
+ if (IsWasmExportedFunction(fun)) {
+ // This element is a wasm function imported from another
+ // instance. To preserve the === function identity required by
+ // the JS embedding spec, we must set the element to the
+ // imported function's underlying CodeRange.funcCheckedCallEntry and
+ // Instance so that future Table.get()s produce the same
+ // function object as was imported.
+ WasmInstanceObject* calleeInstanceObj =
+ ExportedFunctionToInstanceObject(fun);
+ Instance& calleeInstance = calleeInstanceObj->instance();
+ Tier calleeTier = calleeInstance.code().bestTier();
+ const CodeRange& calleeCodeRange =
+ calleeInstanceObj->getExportedFunctionCodeRange(fun, calleeTier);
+ void* code = calleeInstance.codeBase(calleeTier) +
+ calleeCodeRange.funcCheckedCallEntry();
+ table.setFuncRef(dstOffset + i, code, &calleeInstance);
+ continue;
+ }
+ }
+ void* code =
+ codeBaseTier +
+ codeRanges[funcToCodeRange[funcIndex]].funcCheckedCallEntry();
+ table.setFuncRef(dstOffset + i, code, this);
+ }
+ }
+ return true;
+}
+
+/* static */ int32_t Instance::tableInit(Instance* instance, uint32_t dstOffset,
+ uint32_t srcOffset, uint32_t len,
+ uint32_t segIndex,
+ uint32_t tableIndex) {
+ MOZ_ASSERT(SASigTableInit.failureMode == FailureMode::FailOnNegI32);
+
+ MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveElemSegments_.length(),
+ "ensured by validation");
+
+ if (!instance->passiveElemSegments_[segIndex]) {
+ if (len == 0 && srcOffset == 0) {
+ return 0;
+ }
+
+ JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
+ JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ const ElemSegment& seg = *instance->passiveElemSegments_[segIndex];
+ MOZ_RELEASE_ASSERT(!seg.active());
+ const uint32_t segLen = seg.length();
+
+ const Table& table = *instance->tables()[tableIndex];
+ const uint32_t tableLen = table.length();
+
+ // We are proposing to copy
+ //
+ // seg[ srcOffset .. srcOffset + len - 1 ]
+ // to
+ // tableBase[ dstOffset .. dstOffset + len - 1 ]
+
+ // Bounds check and deal with arithmetic overflow.
+ uint64_t dstOffsetLimit = uint64_t(dstOffset) + uint64_t(len);
+ uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
+
+ if (dstOffsetLimit > tableLen || srcOffsetLimit > segLen) {
+ JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
+ JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ if (!instance->initElems(tableIndex, seg, dstOffset, srcOffset, len)) {
+ return -1; // OOM, which has already been reported.
+ }
+
+ return 0;
+}
+
+/* static */ int32_t Instance::tableFill(Instance* instance, uint32_t start,
+ void* value, uint32_t len,
+ uint32_t tableIndex) {
+ MOZ_ASSERT(SASigTableFill.failureMode == FailureMode::FailOnNegI32);
+
+ JSContext* cx = TlsContext.get();
+ Table& table = *instance->tables()[tableIndex];
+
+ // Bounds check and deal with arithmetic overflow.
+ uint64_t offsetLimit = uint64_t(start) + uint64_t(len);
+
+ if (offsetLimit > table.length()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ switch (table.repr()) {
+ case TableRepr::Ref:
+ table.fillAnyRef(start, len, AnyRef::fromCompiledCode(value));
+ break;
+ case TableRepr::Func:
+ MOZ_RELEASE_ASSERT(!table.isAsmJS());
+ table.fillFuncRef(start, len, FuncRef::fromCompiledCode(value), cx);
+ break;
+ }
+
+ return 0;
+}
+
+/* static */ void* Instance::tableGet(Instance* instance, uint32_t index,
+ uint32_t tableIndex) {
+ MOZ_ASSERT(SASigTableGet.failureMode == FailureMode::FailOnInvalidRef);
+
+ const Table& table = *instance->tables()[tableIndex];
+ if (index >= table.length()) {
+ JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
+ JSMSG_WASM_TABLE_OUT_OF_BOUNDS);
+ return AnyRef::invalid().forCompiledCode();
+ }
+
+ if (table.repr() == TableRepr::Ref) {
+ return table.getAnyRef(index).forCompiledCode();
+ }
+
+ MOZ_RELEASE_ASSERT(!table.isAsmJS());
+
+ JSContext* cx = TlsContext.get();
+ RootedFunction fun(cx);
+ if (!table.getFuncRef(cx, index, &fun)) {
+ return AnyRef::invalid().forCompiledCode();
+ }
+
+ return FuncRef::fromJSFunction(fun).forCompiledCode();
+}
+
+/* static */ uint32_t Instance::tableGrow(Instance* instance, void* initValue,
+ uint32_t delta, uint32_t tableIndex) {
+ MOZ_ASSERT(SASigTableGrow.failureMode == FailureMode::Infallible);
+
+ RootedAnyRef ref(TlsContext.get(), AnyRef::fromCompiledCode(initValue));
+ Table& table = *instance->tables()[tableIndex];
+
+ uint32_t oldSize = table.grow(delta);
+
+ if (oldSize != uint32_t(-1) && initValue != nullptr) {
+ switch (table.repr()) {
+ case TableRepr::Ref:
+ table.fillAnyRef(oldSize, delta, ref);
+ break;
+ case TableRepr::Func:
+ MOZ_RELEASE_ASSERT(!table.isAsmJS());
+ table.fillFuncRef(oldSize, delta, FuncRef::fromAnyRefUnchecked(ref),
+ TlsContext.get());
+ break;
+ }
+ }
+
+ return oldSize;
+}
+
+/* static */ int32_t Instance::tableSet(Instance* instance, uint32_t index,
+ void* value, uint32_t tableIndex) {
+ MOZ_ASSERT(SASigTableSet.failureMode == FailureMode::FailOnNegI32);
+
+ Table& table = *instance->tables()[tableIndex];
+ if (index >= table.length()) {
+ JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
+ JSMSG_WASM_TABLE_OUT_OF_BOUNDS);
+ return -1;
+ }
+
+ switch (table.repr()) {
+ case TableRepr::Ref:
+ table.fillAnyRef(index, 1, AnyRef::fromCompiledCode(value));
+ break;
+ case TableRepr::Func:
+ MOZ_RELEASE_ASSERT(!table.isAsmJS());
+ table.fillFuncRef(index, 1, FuncRef::fromCompiledCode(value),
+ TlsContext.get());
+ break;
+ }
+
+ return 0;
+}
+
+/* static */ uint32_t Instance::tableSize(Instance* instance,
+ uint32_t tableIndex) {
+ MOZ_ASSERT(SASigTableSize.failureMode == FailureMode::Infallible);
+ Table& table = *instance->tables()[tableIndex];
+ return table.length();
+}
+
+/* static */ void* Instance::refFunc(Instance* instance, uint32_t funcIndex) {
+ MOZ_ASSERT(SASigRefFunc.failureMode == FailureMode::FailOnInvalidRef);
+ JSContext* cx = TlsContext.get();
+
+ Tier tier = instance->code().bestTier();
+ const MetadataTier& metadataTier = instance->metadata(tier);
+ const FuncImportVector& funcImports = metadataTier.funcImports;
+
+ // If this is an import, we need to recover the original function to maintain
+ // reference equality between a re-exported function and 'ref.func'. The
+ // identity of the imported function object is stable across tiers, which is
+ // what we want.
+ //
+ // Use the imported function only if it is an exported function, otherwise
+ // fall through to get a (possibly new) exported function.
+ if (funcIndex < funcImports.length()) {
+ FuncImportTls& import = instance->funcImportTls(funcImports[funcIndex]);
+ if (IsWasmExportedFunction(import.fun)) {
+ return FuncRef::fromJSFunction(import.fun).forCompiledCode();
+ }
+ }
+
+ RootedFunction fun(cx);
+ RootedWasmInstanceObject instanceObj(cx, instance->object());
+ if (!WasmInstanceObject::getExportedFunction(cx, instanceObj, funcIndex,
+ &fun)) {
+ // Validation ensures that we always have a valid funcIndex, so we must
+ // have OOM'ed
+ ReportOutOfMemory(cx);
+ return AnyRef::invalid().forCompiledCode();
+ }
+
+ return FuncRef::fromJSFunction(fun).forCompiledCode();
+}
+
+/* static */ void Instance::preBarrierFiltering(Instance* instance,
+ gc::Cell** location) {
+ MOZ_ASSERT(SASigPreBarrierFiltering.failureMode == FailureMode::Infallible);
+ MOZ_ASSERT(location);
+ gc::PreWriteBarrier(*reinterpret_cast<JSObject**>(location));
+}
+
+/* static */ void Instance::postBarrier(Instance* instance,
+ gc::Cell** location) {
+ MOZ_ASSERT(SASigPostBarrier.failureMode == FailureMode::Infallible);
+ MOZ_ASSERT(location);
+ TlsContext.get()->runtime()->gc.storeBuffer().putCell(
+ reinterpret_cast<JSObject**>(location));
+}
+
+/* static */ void Instance::postBarrierFiltering(Instance* instance,
+ gc::Cell** location) {
+ MOZ_ASSERT(SASigPostBarrier.failureMode == FailureMode::Infallible);
+ MOZ_ASSERT(location);
+ if (*location == nullptr || !gc::IsInsideNursery(*location)) {
+ return;
+ }
+ TlsContext.get()->runtime()->gc.storeBuffer().putCell(
+ reinterpret_cast<JSObject**>(location));
+}
+
+// The typeIndex is an index into the typeDescrs_ table in the instance.
+// That table holds TypeDescr objects.
+//
+// When we fail to allocate we return a nullptr; the wasm side must check this
+// and propagate it as an error.
+
+/* static */ void* Instance::structNew(Instance* instance, void* structDescr) {
+ MOZ_ASSERT(SASigStructNew.failureMode == FailureMode::FailOnNullPtr);
+ JSContext* cx = TlsContext.get();
+ Rooted<TypeDescr*> typeDescr(cx, (TypeDescr*)structDescr);
+ MOZ_ASSERT(typeDescr);
+ return TypedObject::createZeroed(cx, typeDescr);
+}
+
+static const StructType* GetDescrStructType(JSContext* cx,
+ HandleTypeDescr typeDescr) {
+ const TypeDef& typeDef = typeDescr->getType(cx);
+ return typeDef.isStructType() ? &typeDef.structType() : nullptr;
+}
+
+/* static */ void* Instance::structNarrow(Instance* instance,
+ void* outputStructDescr,
+ void* maybeNullPtr) {
+ MOZ_ASSERT(SASigStructNarrow.failureMode == FailureMode::Infallible);
+
+ JSContext* cx = TlsContext.get();
+
+ Rooted<TypedObject*> obj(cx);
+ Rooted<TypeDescr*> typeDescr(cx);
+
+ if (maybeNullPtr == nullptr) {
+ return maybeNullPtr;
+ }
+
+ void* nonnullPtr = maybeNullPtr;
+ obj = static_cast<TypedObject*>(nonnullPtr);
+ typeDescr = &obj->typeDescr();
+
+ const StructType* inputStructType = GetDescrStructType(cx, typeDescr);
+ if (inputStructType == nullptr) {
+ return nullptr;
+ }
+ Rooted<TypeDescr*> outputTypeDescr(cx, (TypeDescr*)outputStructDescr);
+ const StructType* outputStructType = GetDescrStructType(cx, outputTypeDescr);
+ MOZ_ASSERT(outputStructType);
+
+ // Now we know that the object was created by the instance, and we know its
+ // concrete type. We need to check that its type is an extension of the
+ // type of outputTypeIndex.
+
+ if (!inputStructType->hasPrefix(*outputStructType)) {
+ return nullptr;
+ }
+ return nonnullPtr;
+}
+
+// Note, dst must point into nonmoveable storage that is not in the nursery,
+// this matters for the write barriers. Furthermore, for pointer types the
+// current value of *dst must be null so that only a post-barrier is required.
+//
+// Regarding the destination not being in the nursery, we have these cases.
+// Either the written location is in the global data section in the
+// WasmInstanceObject, or the Cell of a WasmGlobalObject:
+//
+// - WasmInstanceObjects are always tenured and u.ref_ may point to a
+// nursery object, so we need a post-barrier since the global data of an
+// instance is effectively a field of the WasmInstanceObject.
+//
+// - WasmGlobalObjects are always tenured, and they have a Cell field, so a
+// post-barrier may be needed for the same reason as above.
+
+void CopyValPostBarriered(uint8_t* dst, const Val& src) {
+ switch (src.type().kind()) {
+ case ValType::I32: {
+ int32_t x = src.i32();
+ memcpy(dst, &x, sizeof(x));
+ break;
+ }
+ case ValType::I64: {
+ int64_t x = src.i64();
+ memcpy(dst, &x, sizeof(x));
+ break;
+ }
+ case ValType::F32: {
+ float x = src.f32();
+ memcpy(dst, &x, sizeof(x));
+ break;
+ }
+ case ValType::F64: {
+ double x = src.f64();
+ memcpy(dst, &x, sizeof(x));
+ break;
+ }
+ case ValType::V128: {
+ V128 x = src.v128();
+ memcpy(dst, &x, sizeof(x));
+ break;
+ }
+ case ValType::Ref: {
+ // TODO/AnyRef-boxing: With boxed immediates and strings, the write
+ // barrier is going to have to be more complicated.
+ ASSERT_ANYREF_IS_JSOBJECT;
+ MOZ_ASSERT(*(void**)dst == nullptr,
+ "should be null so no need for a pre-barrier");
+ AnyRef x = src.ref();
+ memcpy(dst, x.asJSObjectAddress(), sizeof(*x.asJSObjectAddress()));
+ if (!x.isNull()) {
+ JSObject::postWriteBarrier((JSObject**)dst, nullptr, x.asJSObject());
+ }
+ break;
+ }
+ }
+}
+
+Instance::Instance(JSContext* cx, Handle<WasmInstanceObject*> object,
+ SharedCode code, UniqueTlsData tlsDataIn,
+ HandleWasmMemoryObject memory,
+ SharedExceptionTagVector&& exceptionTags,
+ SharedTableVector&& tables, UniqueDebugState maybeDebug)
+ : realm_(cx->realm()),
+ object_(object),
+ jsJitArgsRectifier_(
+ cx->runtime()->jitRuntime()->getArgumentsRectifier().value),
+ jsJitExceptionHandler_(
+ cx->runtime()->jitRuntime()->getExceptionTail().value),
+ preBarrierCode_(
+ cx->runtime()->jitRuntime()->preBarrier(MIRType::Object).value),
+ code_(code),
+ tlsData_(std::move(tlsDataIn)),
+ memory_(memory),
+ exceptionTags_(std::move(exceptionTags)),
+ tables_(std::move(tables)),
+ maybeDebug_(std::move(maybeDebug)),
+ hasGcTypes_(false) {}
+
+bool Instance::init(JSContext* cx, const JSFunctionVector& funcImports,
+ const ValVector& globalImportValues,
+ const WasmGlobalObjectVector& globalObjs,
+ const DataSegmentVector& dataSegments,
+ const ElemSegmentVector& elemSegments) {
+ MOZ_ASSERT(!!maybeDebug_ == metadata().debugEnabled);
+#ifdef ENABLE_WASM_EXCEPTIONS
+ // Currently the only events are exceptions.
+ MOZ_ASSERT(exceptionTags_.length() == metadata().events.length());
+#else
+ MOZ_ASSERT(exceptionTags_.length() == 0);
+#endif
+
+#ifdef DEBUG
+ for (auto t : code_->tiers()) {
+ MOZ_ASSERT(funcImports.length() == metadata(t).funcImports.length());
+ }
+#endif
+ MOZ_ASSERT(tables_.length() == metadata().tables.length());
+
+ tlsData()->memoryBase =
+ memory_ ? memory_->buffer().dataPointerEither().unwrap() : nullptr;
+ tlsData()->boundsCheckLimit32 = memory_ ? memory_->boundsCheckLimit32() : 0;
+ tlsData()->instance = this;
+ tlsData()->realm = realm_;
+ tlsData()->cx = cx;
+ tlsData()->valueBoxClass = &WasmValueBox::class_;
+ tlsData()->resetInterrupt(cx);
+ tlsData()->jumpTable = code_->tieringJumpTable();
+ tlsData()->addressOfNeedsIncrementalBarrier =
+ (uint8_t*)cx->compartment()->zone()->addressOfNeedsIncrementalBarrier();
+
+ // Initialize function imports in the tls data
+ Tier callerTier = code_->bestTier();
+ for (size_t i = 0; i < metadata(callerTier).funcImports.length(); i++) {
+ JSFunction* f = funcImports[i];
+ const FuncImport& fi = metadata(callerTier).funcImports[i];
+ FuncImportTls& import = funcImportTls(fi);
+ import.fun = f;
+ if (!isAsmJS() && IsWasmExportedFunction(f)) {
+ WasmInstanceObject* calleeInstanceObj =
+ ExportedFunctionToInstanceObject(f);
+ Instance& calleeInstance = calleeInstanceObj->instance();
+ Tier calleeTier = calleeInstance.code().bestTier();
+ const CodeRange& codeRange =
+ calleeInstanceObj->getExportedFunctionCodeRange(f, calleeTier);
+ import.tls = calleeInstance.tlsData();
+ import.realm = f->realm();
+ import.code = calleeInstance.codeBase(calleeTier) +
+ codeRange.funcUncheckedCallEntry();
+ } else if (void* thunk = MaybeGetBuiltinThunk(f, fi.funcType())) {
+ import.tls = tlsData();
+ import.realm = f->realm();
+ import.code = thunk;
+ } else {
+ import.tls = tlsData();
+ import.realm = f->realm();
+ import.code = codeBase(callerTier) + fi.interpExitCodeOffset();
+ }
+ }
+
+ // Initialize tables in the tls data
+ for (size_t i = 0; i < tables_.length(); i++) {
+ const TableDesc& td = metadata().tables[i];
+ TableTls& table = tableTls(td);
+ table.length = tables_[i]->length();
+ table.functionBase = tables_[i]->functionBase();
+ }
+
+ // Initialize globals in the tls data
+ for (size_t i = 0; i < metadata().globals.length(); i++) {
+ const GlobalDesc& global = metadata().globals[i];
+
+ // Constants are baked into the code, never stored in the global area.
+ if (global.isConstant()) {
+ continue;
+ }
+
+ uint8_t* globalAddr = globalData() + global.offset();
+ switch (global.kind()) {
+ case GlobalKind::Import: {
+ size_t imported = global.importIndex();
+ if (global.isIndirect()) {
+ *(void**)globalAddr =
+ (void*)&globalObjs[imported]->val().get().cell();
+ } else {
+ CopyValPostBarriered(globalAddr, globalImportValues[imported]);
+ }
+ break;
+ }
+ case GlobalKind::Variable: {
+ const InitExpr& init = global.initExpr();
+
+ RootedVal val(cx);
+ switch (init.kind()) {
+ case InitExpr::Kind::Constant: {
+ val = Val(init.val());
+ break;
+ }
+ case InitExpr::Kind::GetGlobal: {
+ const GlobalDesc& imported = metadata().globals[init.globalIndex()];
+
+ // Global-ref initializers cannot reference mutable globals, so
+ // the source global should never be indirect.
+ MOZ_ASSERT(!imported.isIndirect());
+
+ val = globalImportValues[imported.importIndex()];
+ break;
+ }
+ case InitExpr::Kind::RefFunc: {
+ void* fnref = Instance::refFunc(this, init.refFuncIndex());
+ if (fnref == AnyRef::invalid().forCompiledCode()) {
+ return false; // OOM, which has already been reported.
+ }
+ val =
+ Val(ValType(RefType::func()), FuncRef::fromCompiledCode(fnref));
+ break;
+ }
+ }
+
+ if (global.isIndirect()) {
+ void* address = (void*)&globalObjs[i]->val().get().cell();
+ *(void**)globalAddr = address;
+ CopyValPostBarriered((uint8_t*)address, val.get());
+ } else {
+ CopyValPostBarriered(globalAddr, val.get());
+ }
+ break;
+ }
+ case GlobalKind::Constant: {
+ MOZ_CRASH("skipped at the top");
+ }
+ }
+ }
+
+ // Add observer if our memory base may grow
+ if (memory_ && memory_->movingGrowable() &&
+ !memory_->addMovingGrowObserver(cx, object_)) {
+ return false;
+ }
+
+ // Add observers if our tables may grow
+ for (const SharedTable& table : tables_) {
+ if (table->movingGrowable() && !table->addMovingGrowObserver(cx, object_)) {
+ return false;
+ }
+ }
+
+ // Allocate in the global type sets for structural type checks
+ if (!metadata().types.empty()) {
+ // Transfer and allocate type objects for the struct types in the module
+ if (GcTypesAvailable(cx)) {
+ uint32_t baseIndex = 0;
+ if (!cx->wasm().typeContext->transferTypes(metadata().types,
+ &baseIndex)) {
+ return false;
+ }
+
+ for (uint32_t typeIndex = 0; typeIndex < metadata().types.length();
+ typeIndex++) {
+ const TypeDefWithId& typeDef = metadata().types[typeIndex];
+ if (!typeDef.isStructType()) {
+ continue;
+ }
+#ifndef ENABLE_WASM_GC
+ MOZ_CRASH("Should not have seen any struct types");
+#else
+ uint32_t globalTypeIndex = baseIndex + typeIndex;
+ Rooted<TypeDescr*> typeDescr(
+ cx, TypeDescr::createFromHandle(cx, TypeHandle(globalTypeIndex)));
+
+ if (!typeDescr) {
+ return false;
+ }
+ *((GCPtrObject*)addressOfTypeId(typeDef.id)) = typeDescr;
+ hasGcTypes_ = true;
+#endif
+ }
+ }
+
+ // Handle functions specially (for now) as they're guaranteed to be
+ // acyclical and can use simpler hash-consing logic.
+ ExclusiveData<FuncTypeIdSet>::Guard lockedFuncTypeIdSet =
+ funcTypeIdSet.lock();
+
+ for (uint32_t typeIndex = 0; typeIndex < metadata().types.length();
+ typeIndex++) {
+ const TypeDefWithId& typeDef = metadata().types[typeIndex];
+ if (!typeDef.isFuncType()) {
+ continue;
+ } else if (typeDef.isFuncType()) {
+ const FuncType& funcType = typeDef.funcType();
+ const void* funcTypeId;
+ if (!lockedFuncTypeIdSet->allocateFuncTypeId(cx, funcType,
+ &funcTypeId)) {
+ return false;
+ }
+ *addressOfTypeId(typeDef.id) = funcTypeId;
+ } else {
+ MOZ_CRASH();
+ }
+ }
+ }
+
+ // Take references to the passive data segments
+ if (!passiveDataSegments_.resize(dataSegments.length())) {
+ return false;
+ }
+ for (size_t i = 0; i < dataSegments.length(); i++) {
+ if (!dataSegments[i]->active()) {
+ passiveDataSegments_[i] = dataSegments[i];
+ }
+ }
+
+ // Take references to the passive element segments
+ if (!passiveElemSegments_.resize(elemSegments.length())) {
+ return false;
+ }
+ for (size_t i = 0; i < elemSegments.length(); i++) {
+ if (elemSegments[i]->kind != ElemSegment::Kind::Active) {
+ passiveElemSegments_[i] = elemSegments[i];
+ }
+ }
+
+ return true;
+}
+
+Instance::~Instance() {
+ realm_->wasm.unregisterInstance(*this);
+
+ if (!metadata().types.empty()) {
+ ExclusiveData<FuncTypeIdSet>::Guard lockedFuncTypeIdSet =
+ funcTypeIdSet.lock();
+
+ for (const TypeDefWithId& typeDef : metadata().types) {
+ if (!typeDef.isFuncType()) {
+ continue;
+ }
+ const FuncType& funcType = typeDef.funcType();
+ if (const void* funcTypeId = *addressOfTypeId(typeDef.id)) {
+ lockedFuncTypeIdSet->deallocateFuncTypeId(funcType, funcTypeId);
+ }
+ }
+ }
+}
+
+size_t Instance::memoryMappedSize() const {
+ return memory_->buffer().wasmMappedSize();
+}
+
+bool Instance::memoryAccessInGuardRegion(uint8_t* addr,
+ unsigned numBytes) const {
+ MOZ_ASSERT(numBytes > 0);
+
+ if (!metadata().usesMemory()) {
+ return false;
+ }
+
+ uint8_t* base = memoryBase().unwrap(/* comparison */);
+ if (addr < base) {
+ return false;
+ }
+
+ size_t lastByteOffset = addr - base + (numBytes - 1);
+ return lastByteOffset >= memory()->volatileMemoryLength32() &&
+ lastByteOffset < memoryMappedSize();
+}
+
+bool Instance::memoryAccessInBounds(uint8_t* addr, unsigned numBytes) const {
+ MOZ_ASSERT(numBytes > 0 && numBytes <= sizeof(double));
+
+ if (!metadata().usesMemory()) {
+ return false;
+ }
+
+ uint8_t* base = memoryBase().unwrap(/* comparison */);
+ if (addr < base) {
+ return false;
+ }
+
+ uint32_t length = memory()->volatileMemoryLength32();
+ if (addr >= base + length) {
+ return false;
+ }
+
+ // The pointer points into the memory. Now check for partial OOB.
+ //
+ // This calculation can't wrap around because the access is small and there
+ // always is a guard page following the memory.
+ size_t lastByteOffset = addr - base + (numBytes - 1);
+ if (lastByteOffset >= length) {
+ return false;
+ }
+
+ return true;
+}
+
+void Instance::tracePrivate(JSTracer* trc) {
+ // This method is only called from WasmInstanceObject so the only reason why
+ // TraceEdge is called is so that the pointer can be updated during a moving
+ // GC.
+ MOZ_ASSERT_IF(trc->isMarkingTracer(), gc::IsMarked(trc->runtime(), &object_));
+ TraceEdge(trc, &object_, "wasm instance object");
+
+ // OK to just do one tier here; though the tiers have different funcImports
+ // tables, they share the tls object.
+ for (const FuncImport& fi : metadata(code().stableTier()).funcImports) {
+ TraceNullableEdge(trc, &funcImportTls(fi).fun, "wasm import");
+ }
+
+ for (const SharedTable& table : tables_) {
+ table->trace(trc);
+ }
+
+ for (const GlobalDesc& global : code().metadata().globals) {
+ // Indirect reference globals get traced by the owning WebAssembly.Global.
+ if (!global.type().isReference() || global.isConstant() ||
+ global.isIndirect()) {
+ continue;
+ }
+ GCPtrObject* obj = (GCPtrObject*)(globalData() + global.offset());
+ TraceNullableEdge(trc, obj, "wasm reference-typed global");
+ }
+
+ TraceNullableEdge(trc, &memory_, "wasm buffer");
+#ifdef ENABLE_WASM_GC
+ if (hasGcTypes_) {
+ for (const TypeDefWithId& typeDef : metadata().types) {
+ if (!typeDef.isStructType()) {
+ continue;
+ }
+ TraceNullableEdge(trc, ((GCPtrObject*)addressOfTypeId(typeDef.id)),
+ "wasm typedescr");
+ }
+ }
+#endif
+
+ if (maybeDebug_) {
+ maybeDebug_->trace(trc);
+ }
+}
+
+void Instance::trace(JSTracer* trc) {
+ // Technically, instead of having this method, the caller could use
+ // Instance::object() to get the owning WasmInstanceObject to mark,
+ // but this method is simpler and more efficient. The trace hook of
+ // WasmInstanceObject will call Instance::tracePrivate at which point we
+ // can mark the rest of the children.
+ TraceEdge(trc, &object_, "wasm instance object");
+}
+
+uintptr_t Instance::traceFrame(JSTracer* trc, const wasm::WasmFrameIter& wfi,
+ uint8_t* nextPC,
+ uintptr_t highestByteVisitedInPrevFrame) {
+ const StackMap* map = code().lookupStackMap(nextPC);
+ if (!map) {
+ return 0;
+ }
+
+ Frame* frame = wfi.frame();
+
+ // |frame| points somewhere in the middle of the area described by |map|.
+ // We have to calculate |scanStart|, the lowest address that is described by
+ // |map|, by consulting |map->frameOffsetFromTop|.
+
+ const size_t numMappedBytes = map->numMappedWords * sizeof(void*);
+ const uintptr_t scanStart = uintptr_t(frame) +
+ (map->frameOffsetFromTop * sizeof(void*)) -
+ numMappedBytes;
+ MOZ_ASSERT(0 == scanStart % sizeof(void*));
+
+ // Do what we can to assert that, for consecutive wasm frames, their stack
+ // maps also abut exactly. This is a useful sanity check on the sizing of
+ // stack maps.
+ //
+ // In debug builds, the stackmap construction machinery goes to considerable
+ // efforts to ensure that the stackmaps for consecutive frames abut exactly.
+ // This is so as to ensure there are no areas of stack inadvertently ignored
+ // by a stackmap, nor covered by two stackmaps. Hence any failure of this
+ // assertion is serious and should be investigated.
+
+ // This condition isn't kept for Cranelift
+ // (https://github.com/bytecodealliance/wasmtime/issues/2281), but this is ok
+ // to disable this assertion because when CL compiles a function, in the
+ // prologue, it (generates code) copies all of the in-memory arguments into
+ // registers. So, because of that, none of the in-memory argument words are
+ // actually live.
+#ifndef JS_CODEGEN_ARM64
+ MOZ_ASSERT_IF(highestByteVisitedInPrevFrame != 0,
+ highestByteVisitedInPrevFrame + 1 == scanStart);
+#endif
+
+ uintptr_t* stackWords = (uintptr_t*)scanStart;
+
+ // If we have some exit stub words, this means the map also covers an area
+ // created by a exit stub, and so the highest word of that should be a
+ // constant created by (code created by) GenerateTrapExit.
+ MOZ_ASSERT_IF(
+ map->numExitStubWords > 0,
+ stackWords[map->numExitStubWords - 1 - TrapExitDummyValueOffsetFromTop] ==
+ TrapExitDummyValue);
+
+ // And actually hand them off to the GC.
+ for (uint32_t i = 0; i < map->numMappedWords; i++) {
+ if (map->getBit(i) == 0) {
+ continue;
+ }
+
+ // TODO/AnyRef-boxing: With boxed immediates and strings, the value may
+ // not be a traceable JSObject*.
+ ASSERT_ANYREF_IS_JSOBJECT;
+
+ // This assertion seems at least moderately effective in detecting
+ // discrepancies or misalignments between the map and reality.
+ MOZ_ASSERT(js::gc::IsCellPointerValidOrNull((const void*)stackWords[i]));
+
+ if (stackWords[i]) {
+ TraceRoot(trc, (JSObject**)&stackWords[i],
+ "Instance::traceWasmFrame: normal word");
+ }
+ }
+
+ // Finally, deal with any GC-managed fields in the DebugFrame, if it is
+ // present.
+ if (map->hasDebugFrame) {
+ DebugFrame* debugFrame = DebugFrame::from(frame);
+ char* debugFrameP = (char*)debugFrame;
+
+ // TODO/AnyRef-boxing: With boxed immediates and strings, the value may
+ // not be a traceable JSObject*.
+ ASSERT_ANYREF_IS_JSOBJECT;
+
+ for (size_t i = 0; i < MaxRegisterResults; i++) {
+ if (debugFrame->hasSpilledRegisterRefResult(i)) {
+ char* resultRefP = debugFrameP + DebugFrame::offsetOfRegisterResult(i);
+ TraceNullableRoot(
+ trc, (JSObject**)resultRefP,
+ "Instance::traceWasmFrame: DebugFrame::resultResults_");
+ }
+ }
+
+ if (debugFrame->hasCachedReturnJSValue()) {
+ char* cachedReturnJSValueP =
+ debugFrameP + DebugFrame::offsetOfCachedReturnJSValue();
+ TraceRoot(trc, (js::Value*)cachedReturnJSValueP,
+ "Instance::traceWasmFrame: DebugFrame::cachedReturnJSValue_");
+ }
+ }
+
+ return scanStart + numMappedBytes - 1;
+}
+
+WasmMemoryObject* Instance::memory() const { return memory_; }
+
+SharedMem<uint8_t*> Instance::memoryBase() const {
+ MOZ_ASSERT(metadata().usesMemory());
+ MOZ_ASSERT(tlsData()->memoryBase == memory_->buffer().dataPointerEither());
+ return memory_->buffer().dataPointerEither();
+}
+
+SharedArrayRawBuffer* Instance::sharedMemoryBuffer() const {
+ MOZ_ASSERT(memory_->isShared());
+ return memory_->sharedArrayRawBuffer();
+}
+
+WasmInstanceObject* Instance::objectUnbarriered() const {
+ return object_.unbarrieredGet();
+}
+
+WasmInstanceObject* Instance::object() const { return object_; }
+
+static bool EnsureEntryStubs(const Instance& instance, uint32_t funcIndex,
+ const FuncExport** funcExport,
+ void** interpEntry) {
+ Tier tier = instance.code().bestTier();
+
+ size_t funcExportIndex;
+ *funcExport =
+ &instance.metadata(tier).lookupFuncExport(funcIndex, &funcExportIndex);
+
+ const FuncExport& fe = **funcExport;
+ if (fe.hasEagerStubs()) {
+ *interpEntry = instance.codeBase(tier) + fe.eagerInterpEntryOffset();
+ return true;
+ }
+
+ MOZ_ASSERT(!instance.isAsmJS(), "only wasm can lazily export functions");
+
+ // If the best tier is Ion, life is simple: background compilation has
+ // already completed and has been committed, so there's no risk of race
+ // conditions here.
+ //
+ // If the best tier is Baseline, there could be a background compilation
+ // happening at the same time. The background compilation will lock the
+ // first tier lazy stubs first to stop new baseline stubs from being
+ // generated, then the second tier stubs to generate them.
+ //
+ // - either we take the tier1 lazy stub lock before the background
+ // compilation gets it, then we generate the lazy stub for tier1. When the
+ // background thread gets the tier1 lazy stub lock, it will see it has a
+ // lazy stub and will recompile it for tier2.
+ // - or we don't take the lock here first. Background compilation won't
+ // find a lazy stub for this function, thus won't generate it. So we'll do
+ // it ourselves after taking the tier2 lock.
+
+ auto stubs = instance.code(tier).lazyStubs().lock();
+ *interpEntry = stubs->lookupInterpEntry(fe.funcIndex());
+ if (*interpEntry) {
+ return true;
+ }
+
+ // The best tier might have changed after we've taken the lock.
+ Tier prevTier = tier;
+ tier = instance.code().bestTier();
+ const CodeTier& codeTier = instance.code(tier);
+ if (tier == prevTier) {
+ if (!stubs->createOne(funcExportIndex, codeTier)) {
+ return false;
+ }
+
+ *interpEntry = stubs->lookupInterpEntry(fe.funcIndex());
+ MOZ_ASSERT(*interpEntry);
+ return true;
+ }
+
+ MOZ_RELEASE_ASSERT(prevTier == Tier::Baseline && tier == Tier::Optimized);
+ auto stubs2 = instance.code(tier).lazyStubs().lock();
+
+ // If it didn't have a stub in the first tier, background compilation
+ // shouldn't have made one in the second tier.
+ MOZ_ASSERT(!stubs2->hasStub(fe.funcIndex()));
+
+ if (!stubs2->createOne(funcExportIndex, codeTier)) {
+ return false;
+ }
+
+ *interpEntry = stubs2->lookupInterpEntry(fe.funcIndex());
+ MOZ_ASSERT(*interpEntry);
+ return true;
+}
+
+static bool GetInterpEntry(JSContext* cx, Instance& instance,
+ uint32_t funcIndex, CallArgs args,
+ void** interpEntry, const FuncType** funcType) {
+ const FuncExport* funcExport;
+ if (!EnsureEntryStubs(instance, funcIndex, &funcExport, interpEntry)) {
+ return false;
+ }
+
+#ifdef DEBUG
+ // EnsureEntryStubs() has ensured jit-entry stubs have been created and
+ // installed in funcIndex's JumpTable entry.
+ if (!funcExport->hasEagerStubs() && funcExport->canHaveJitEntry()) {
+ if (!EnsureBuiltinThunksInitialized()) {
+ return false;
+ }
+ JSFunction& callee = args.callee().as<JSFunction>();
+ void* provisionalJitEntryStub = ProvisionalJitEntryStub();
+ MOZ_ASSERT(provisionalJitEntryStub);
+ MOZ_ASSERT(callee.isWasmWithJitEntry());
+ MOZ_ASSERT(*callee.wasmJitEntry() != provisionalJitEntryStub);
+ }
+#endif
+
+ *funcType = &funcExport->funcType();
+ return true;
+}
+
+bool wasm::ResultsToJSValue(JSContext* cx, ResultType type,
+ void* registerResultLoc,
+ Maybe<char*> stackResultsLoc,
+ MutableHandleValue rval) {
+ if (type.empty()) {
+ // No results: set to undefined, and we're done.
+ rval.setUndefined();
+ return true;
+ }
+
+ // If we added support for multiple register results, we'd need to establish a
+ // convention for how to store them to memory in registerResultLoc. For now
+ // we can punt.
+ static_assert(MaxRegisterResults == 1);
+
+ // Stack results written to stackResultsLoc; register result written
+ // to registerResultLoc.
+
+ // First, convert the register return value, and prepare to iterate in
+ // push order. Note that if the register result is a reference type,
+ // it may be unrooted, so ToJSValue_anyref must not GC in that case.
+ ABIResultIter iter(type);
+ DebugOnly<bool> usedRegisterResult = false;
+ for (; !iter.done(); iter.next()) {
+ if (iter.cur().inRegister()) {
+ MOZ_ASSERT(!usedRegisterResult);
+ if (!ToJSValue<DebugCodegenVal>(cx, registerResultLoc, iter.cur().type(),
+ rval)) {
+ return false;
+ }
+ usedRegisterResult = true;
+ }
+ }
+ MOZ_ASSERT(usedRegisterResult);
+
+ MOZ_ASSERT((stackResultsLoc.isSome()) == (iter.count() > 1));
+ if (!stackResultsLoc) {
+ // A single result: we're done.
+ return true;
+ }
+
+ // Otherwise, collect results in an array, in push order.
+ Rooted<ArrayObject*> array(cx, NewDenseEmptyArray(cx));
+ if (!array) {
+ return false;
+ }
+ RootedValue tmp(cx);
+ for (iter.switchToPrev(); !iter.done(); iter.prev()) {
+ const ABIResult& result = iter.cur();
+ if (result.onStack()) {
+ char* loc = stackResultsLoc.value() + result.stackOffset();
+ if (!ToJSValue<DebugCodegenVal>(cx, loc, result.type(), &tmp)) {
+ return false;
+ }
+ if (!NewbornArrayPush(cx, array, tmp)) {
+ return false;
+ }
+ } else {
+ if (!NewbornArrayPush(cx, array, rval)) {
+ return false;
+ }
+ }
+ }
+ rval.set(ObjectValue(*array));
+ return true;
+}
+
+class MOZ_RAII ReturnToJSResultCollector {
+ class MOZ_RAII StackResultsRooter : public JS::CustomAutoRooter {
+ ReturnToJSResultCollector& collector_;
+
+ public:
+ StackResultsRooter(JSContext* cx, ReturnToJSResultCollector& collector)
+ : JS::CustomAutoRooter(cx), collector_(collector) {}
+
+ void trace(JSTracer* trc) final {
+ for (ABIResultIter iter(collector_.type_); !iter.done(); iter.next()) {
+ const ABIResult& result = iter.cur();
+ if (result.onStack() && result.type().isReference()) {
+ char* loc = collector_.stackResultsArea_.get() + result.stackOffset();
+ JSObject** refLoc = reinterpret_cast<JSObject**>(loc);
+ TraceNullableRoot(trc, refLoc, "StackResultsRooter::trace");
+ }
+ }
+ }
+ };
+ friend class StackResultsRooter;
+
+ ResultType type_;
+ UniquePtr<char[], JS::FreePolicy> stackResultsArea_;
+ Maybe<StackResultsRooter> rooter_;
+
+ public:
+ explicit ReturnToJSResultCollector(const ResultType& type) : type_(type){};
+ bool init(JSContext* cx) {
+ bool needRooter = false;
+ ABIResultIter iter(type_);
+ for (; !iter.done(); iter.next()) {
+ const ABIResult& result = iter.cur();
+ if (result.onStack() && result.type().isReference()) {
+ needRooter = true;
+ }
+ }
+ uint32_t areaBytes = iter.stackBytesConsumedSoFar();
+ MOZ_ASSERT_IF(needRooter, areaBytes > 0);
+ if (areaBytes > 0) {
+ // It is necessary to zero storage for ref results, and it doesn't
+ // hurt to do so for other POD results.
+ stackResultsArea_ = cx->make_zeroed_pod_array<char>(areaBytes);
+ if (!stackResultsArea_) {
+ return false;
+ }
+ if (needRooter) {
+ rooter_.emplace(cx, *this);
+ }
+ }
+ return true;
+ }
+
+ void* stackResultsArea() {
+ MOZ_ASSERT(stackResultsArea_);
+ return stackResultsArea_.get();
+ }
+
+ bool collect(JSContext* cx, void* registerResultLoc,
+ MutableHandleValue rval) {
+ Maybe<char*> stackResultsLoc =
+ stackResultsArea_ ? Some(stackResultsArea_.get()) : Nothing();
+ return ResultsToJSValue(cx, type_, registerResultLoc, stackResultsLoc,
+ rval);
+ }
+};
+
+bool Instance::callExport(JSContext* cx, uint32_t funcIndex, CallArgs args) {
+ if (memory_) {
+ // If there has been a moving grow, this Instance should have been notified.
+ MOZ_RELEASE_ASSERT(memory_->buffer().dataPointerEither() == memoryBase());
+ }
+
+ void* interpEntry;
+ const FuncType* funcType;
+ if (!GetInterpEntry(cx, *this, funcIndex, args, &interpEntry, &funcType)) {
+ return false;
+ }
+
+ if (funcType->hasUnexposableArgOrRet()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_VAL_TYPE);
+ return false;
+ }
+
+ ArgTypeVector argTypes(*funcType);
+ ResultType resultType(ResultType::Vector(funcType->results()));
+ ReturnToJSResultCollector results(resultType);
+ if (!results.init(cx)) {
+ return false;
+ }
+
+ // The calling convention for an external call into wasm is to pass an
+ // array of 16-byte values where each value contains either a coerced int32
+ // (in the low word), or a double value (in the low dword) value, with the
+ // coercions specified by the wasm signature. The external entry point
+ // unpacks this array into the system-ABI-specified registers and stack
+ // memory and then calls into the internal entry point. The return value is
+ // stored in the first element of the array (which, therefore, must have
+ // length >= 1).
+ Vector<ExportArg, 8> exportArgs(cx);
+ if (!exportArgs.resize(
+ std::max<size_t>(1, argTypes.lengthWithStackResults()))) {
+ return false;
+ }
+
+ ASSERT_ANYREF_IS_JSOBJECT;
+ Rooted<GCVector<JSObject*, 8, SystemAllocPolicy>> refs(cx);
+
+ DebugCodegen(DebugChannel::Function, "wasm-function[%d] arguments [",
+ funcIndex);
+ RootedValue v(cx);
+ for (size_t i = 0; i < argTypes.lengthWithStackResults(); ++i) {
+ void* rawArgLoc = &exportArgs[i];
+ if (argTypes.isSyntheticStackResultPointerArg(i)) {
+ *reinterpret_cast<void**>(rawArgLoc) = results.stackResultsArea();
+ continue;
+ }
+ size_t naturalIdx = argTypes.naturalIndex(i);
+ v = naturalIdx < args.length() ? args[naturalIdx] : UndefinedValue();
+ ValType type = funcType->arg(naturalIdx);
+ if (!ToWebAssemblyValue<DebugCodegenVal>(cx, v, type, rawArgLoc, true)) {
+ return false;
+ }
+ if (type.isReference()) {
+ void* ptr = *reinterpret_cast<void**>(rawArgLoc);
+ // Store in rooted array until no more GC is possible.
+ switch (type.refTypeKind()) {
+ case RefType::Func: {
+ RootedFunction ref(cx, FuncRef::fromCompiledCode(ptr).asJSFunction());
+ if (!refs.emplaceBack(ref)) {
+ return false;
+ }
+ break;
+ }
+ case RefType::Extern:
+ case RefType::Eq: {
+ RootedAnyRef ref(cx, AnyRef::fromCompiledCode(ptr));
+ ASSERT_ANYREF_IS_JSOBJECT;
+ if (!refs.emplaceBack(ref.get().asJSObject())) {
+ return false;
+ }
+ break;
+ }
+ case RefType::TypeIndex:
+ MOZ_CRASH("temporarily unsupported Ref type in callExport");
+ }
+ DebugCodegen(DebugChannel::Function, "/(#%d)", int(refs.length() - 1));
+ }
+ }
+
+ // Copy over reference values from the rooted array, if any.
+ if (refs.length() > 0) {
+ DebugCodegen(DebugChannel::Function, "; ");
+ size_t nextRef = 0;
+ for (size_t i = 0; i < argTypes.lengthWithStackResults(); ++i) {
+ if (argTypes.isSyntheticStackResultPointerArg(i)) {
+ continue;
+ }
+ size_t naturalIdx = argTypes.naturalIndex(i);
+ ValType type = funcType->arg(naturalIdx);
+ if (type.isReference()) {
+ void** rawArgLoc = (void**)&exportArgs[i];
+ *rawArgLoc = refs[nextRef++];
+ DebugCodegen(DebugChannel::Function, " ref(#%d) := %p ",
+ int(nextRef - 1), *rawArgLoc);
+ }
+ }
+ refs.clear();
+ }
+
+ DebugCodegen(DebugChannel::Function, "]\n");
+
+ {
+ JitActivation activation(cx);
+
+ // Call the per-exported-function trampoline created by GenerateEntry.
+ auto funcPtr = JS_DATA_TO_FUNC_PTR(ExportFuncPtr, interpEntry);
+ if (!CALL_GENERATED_2(funcPtr, exportArgs.begin(), tlsData())) {
+ return false;
+ }
+ }
+
+ if (isAsmJS() && args.isConstructing()) {
+ // By spec, when a JS function is called as a constructor and this
+ // function returns a primary type, which is the case for all asm.js
+ // exported functions, the returned value is discarded and an empty
+ // object is returned instead.
+ PlainObject* obj = NewBuiltinClassInstance<PlainObject>(cx);
+ if (!obj) {
+ return false;
+ }
+ args.rval().set(ObjectValue(*obj));
+ return true;
+ }
+
+ // Note that we're not rooting the register result, if any; we depend
+ // on ResultsCollector::collect to root the value on our behalf,
+ // before causing any GC.
+ void* registerResultLoc = &exportArgs[0];
+ DebugCodegen(DebugChannel::Function, "wasm-function[%d]; results [",
+ funcIndex);
+ if (!results.collect(cx, registerResultLoc, args.rval())) {
+ return false;
+ }
+ DebugCodegen(DebugChannel::Function, "]\n");
+
+ return true;
+}
+
+JSAtom* Instance::getFuncDisplayAtom(JSContext* cx, uint32_t funcIndex) const {
+ // The "display name" of a function is primarily shown in Error.stack which
+ // also includes location, so use getFuncNameBeforeLocation.
+ UTF8Bytes name;
+ if (!metadata().getFuncNameBeforeLocation(funcIndex, &name)) {
+ return nullptr;
+ }
+
+ return AtomizeUTF8Chars(cx, name.begin(), name.length());
+}
+
+void Instance::ensureProfilingLabels(bool profilingEnabled) const {
+ return code_->ensureProfilingLabels(profilingEnabled);
+}
+
+void Instance::onMovingGrowMemory() {
+ MOZ_ASSERT(!isAsmJS());
+ MOZ_ASSERT(!memory_->isShared());
+
+ ArrayBufferObject& buffer = memory_->buffer().as<ArrayBufferObject>();
+ tlsData()->memoryBase = buffer.dataPointer();
+ tlsData()->boundsCheckLimit32 = memory_->boundsCheckLimit32();
+}
+
+void Instance::onMovingGrowTable(const Table* theTable) {
+ MOZ_ASSERT(!isAsmJS());
+
+ // `theTable` has grown and we must update cached data for it. Importantly,
+ // we can have cached those data in more than one location: we'll have
+ // cached them once for each time the table was imported into this instance.
+ //
+ // When an instance is registered as an observer of a table it is only
+ // registered once, regardless of how many times the table was imported.
+ // Thus when a table is grown, onMovingGrowTable() is only invoked once for
+ // the table.
+ //
+ // Ergo we must go through the entire list of tables in the instance here
+ // and check for the table in all the cached-data slots; we can't exit after
+ // the first hit.
+
+ for (uint32_t i = 0; i < tables_.length(); i++) {
+ if (tables_[i] == theTable) {
+ TableTls& table = tableTls(metadata().tables[i]);
+ table.length = tables_[i]->length();
+ table.functionBase = tables_[i]->functionBase();
+ }
+ }
+}
+
+JSString* Instance::createDisplayURL(JSContext* cx) {
+ // In the best case, we simply have a URL, from a streaming compilation of a
+ // fetched Response.
+
+ if (metadata().filenameIsURL) {
+ return NewStringCopyZ<CanGC>(cx, metadata().filename.get());
+ }
+
+ // Otherwise, build wasm module URL from following parts:
+ // - "wasm:" as protocol;
+ // - URI encoded filename from metadata (if can be encoded), plus ":";
+ // - 64-bit hash of the module bytes (as hex dump).
+
+ JSStringBuilder result(cx);
+ if (!result.append("wasm:")) {
+ return nullptr;
+ }
+
+ if (const char* filename = metadata().filename.get()) {
+ // EncodeURI returns false due to invalid chars or OOM -- fail only
+ // during OOM.
+ JSString* filenamePrefix = EncodeURI(cx, filename, strlen(filename));
+ if (!filenamePrefix) {
+ if (cx->isThrowingOutOfMemory()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(!cx->isThrowingOverRecursed());
+ cx->clearPendingException();
+ return nullptr;
+ }
+
+ if (!result.append(filenamePrefix)) {
+ return nullptr;
+ }
+ }
+
+ if (metadata().debugEnabled) {
+ if (!result.append(":")) {
+ return nullptr;
+ }
+
+ const ModuleHash& hash = metadata().debugHash;
+ for (size_t i = 0; i < sizeof(ModuleHash); i++) {
+ char digit1 = hash[i] / 16, digit2 = hash[i] % 16;
+ if (!result.append(
+ (char)(digit1 < 10 ? digit1 + '0' : digit1 + 'a' - 10))) {
+ return nullptr;
+ }
+ if (!result.append(
+ (char)(digit2 < 10 ? digit2 + '0' : digit2 + 'a' - 10))) {
+ return nullptr;
+ }
+ }
+ }
+
+ return result.finishString();
+}
+
+WasmBreakpointSite* Instance::getOrCreateBreakpointSite(JSContext* cx,
+ uint32_t offset) {
+ MOZ_ASSERT(debugEnabled());
+ return debug().getOrCreateBreakpointSite(cx, this, offset);
+}
+
+void Instance::destroyBreakpointSite(JSFreeOp* fop, uint32_t offset) {
+ MOZ_ASSERT(debugEnabled());
+ return debug().destroyBreakpointSite(fop, this, offset);
+}
+
+void Instance::disassembleExport(JSContext* cx, uint32_t funcIndex, Tier tier,
+ PrintCallback callback) const {
+ const MetadataTier& metadataTier = metadata(tier);
+ const FuncExport& funcExport = metadataTier.lookupFuncExport(funcIndex);
+ const CodeRange& range = metadataTier.codeRange(funcExport);
+ const CodeTier& codeTier = code(tier);
+ const ModuleSegment& segment = codeTier.segment();
+
+ MOZ_ASSERT(range.begin() < segment.length());
+ MOZ_ASSERT(range.end() < segment.length());
+
+ uint8_t* functionCode = segment.base() + range.begin();
+ jit::Disassemble(functionCode, range.end() - range.begin(), callback);
+}
+
+void Instance::addSizeOfMisc(MallocSizeOf mallocSizeOf,
+ Metadata::SeenSet* seenMetadata,
+ Code::SeenSet* seenCode,
+ Table::SeenSet* seenTables, size_t* code,
+ size_t* data) const {
+ *data += mallocSizeOf(this);
+ *data += mallocSizeOf(tlsData_.get());
+ for (const SharedTable& table : tables_) {
+ *data += table->sizeOfIncludingThisIfNotSeen(mallocSizeOf, seenTables);
+ }
+
+ if (maybeDebug_) {
+ maybeDebug_->addSizeOfMisc(mallocSizeOf, seenMetadata, seenCode, code,
+ data);
+ }
+
+ code_->addSizeOfMiscIfNotSeen(mallocSizeOf, seenMetadata, seenCode, code,
+ data);
+}
diff --git a/js/src/wasm/WasmInstance.h b/js/src/wasm/WasmInstance.h
new file mode 100644
index 0000000000..130bb1cfdb
--- /dev/null
+++ b/js/src/wasm/WasmInstance.h
@@ -0,0 +1,236 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_instance_h
+#define wasm_instance_h
+
+#include "gc/Barrier.h"
+#include "gc/Zone.h"
+#include "vm/SharedMem.h"
+#include "wasm/TypedObject.h"
+#include "wasm/WasmCode.h"
+#include "wasm/WasmDebug.h"
+#include "wasm/WasmFrameIter.h" // js::wasm::WasmFrameIter
+#include "wasm/WasmProcess.h"
+#include "wasm/WasmTable.h"
+
+namespace js {
+namespace wasm {
+
+// Instance represents a wasm instance and provides all the support for runtime
+// execution of code in the instance. Instances share various immutable data
+// structures with the Module from which they were instantiated and other
+// instances instantiated from the same Module. However, an Instance has no
+// direct reference to its source Module which allows a Module to be destroyed
+// while it still has live Instances.
+//
+// The instance's code may be shared among multiple instances provided none of
+// those instances are being debugged. Instances that are being debugged own
+// their code.
+
+class Instance {
+ JS::Realm* const realm_;
+ WeakHeapPtrWasmInstanceObject object_;
+ void* jsJitArgsRectifier_;
+ void* jsJitExceptionHandler_;
+ void* preBarrierCode_;
+ const SharedCode code_;
+ const UniqueTlsData tlsData_;
+ const GCPtrWasmMemoryObject memory_;
+ const SharedExceptionTagVector exceptionTags_;
+ const SharedTableVector tables_;
+ DataSegmentVector passiveDataSegments_;
+ ElemSegmentVector passiveElemSegments_;
+ const UniqueDebugState maybeDebug_;
+ bool hasGcTypes_;
+
+ // Internal helpers:
+ const void** addressOfTypeId(const TypeIdDesc& typeId) const;
+ FuncImportTls& funcImportTls(const FuncImport& fi);
+ TableTls& tableTls(const TableDesc& td) const;
+
+ // Only WasmInstanceObject can call the private trace function.
+ friend class js::WasmInstanceObject;
+ void tracePrivate(JSTracer* trc);
+
+ bool callImport(JSContext* cx, uint32_t funcImportIndex, unsigned argc,
+ uint64_t* argv);
+
+ public:
+ Instance(JSContext* cx, HandleWasmInstanceObject object, SharedCode code,
+ UniqueTlsData tlsData, HandleWasmMemoryObject memory,
+ SharedExceptionTagVector&& exceptionTags, SharedTableVector&& tables,
+ UniqueDebugState maybeDebug);
+ ~Instance();
+ bool init(JSContext* cx, const JSFunctionVector& funcImports,
+ const ValVector& globalImportValues,
+ const WasmGlobalObjectVector& globalObjs,
+ const DataSegmentVector& dataSegments,
+ const ElemSegmentVector& elemSegments);
+ void trace(JSTracer* trc);
+
+ // Trace any GC roots on the stack, for the frame associated with |wfi|,
+ // whose next instruction to execute is |nextPC|.
+ //
+ // For consistency checking of StackMap sizes in debug builds, this also
+ // takes |highestByteVisitedInPrevFrame|, which is the address of the
+ // highest byte scanned in the frame below this one on the stack, and in
+ // turn it returns the address of the highest byte scanned in this frame.
+ uintptr_t traceFrame(JSTracer* trc, const wasm::WasmFrameIter& wfi,
+ uint8_t* nextPC,
+ uintptr_t highestByteVisitedInPrevFrame);
+
+ JS::Realm* realm() const { return realm_; }
+ const Code& code() const { return *code_; }
+ const CodeTier& code(Tier t) const { return code_->codeTier(t); }
+ bool debugEnabled() const { return !!maybeDebug_; }
+ DebugState& debug() { return *maybeDebug_; }
+ const ModuleSegment& moduleSegment(Tier t) const { return code_->segment(t); }
+ TlsData* tlsData() const { return tlsData_.get(); }
+ uint8_t* globalData() const { return (uint8_t*)&tlsData_->globalArea; }
+ uint8_t* codeBase(Tier t) const { return code_->segment(t).base(); }
+ const MetadataTier& metadata(Tier t) const { return code_->metadata(t); }
+ const Metadata& metadata() const { return code_->metadata(); }
+ bool isAsmJS() const { return metadata().isAsmJS(); }
+ const SharedTableVector& tables() const { return tables_; }
+ SharedMem<uint8_t*> memoryBase() const;
+ WasmMemoryObject* memory() const;
+ size_t memoryMappedSize() const;
+ SharedArrayRawBuffer* sharedMemoryBuffer() const; // never null
+ bool memoryAccessInGuardRegion(uint8_t* addr, unsigned numBytes) const;
+ bool memoryAccessInBounds(uint8_t* addr, unsigned numBytes) const;
+ const SharedExceptionTagVector& exceptionTags() const {
+ return exceptionTags_;
+ }
+
+ static constexpr size_t offsetOfJSJitArgsRectifier() {
+ return offsetof(Instance, jsJitArgsRectifier_);
+ }
+ static constexpr size_t offsetOfJSJitExceptionHandler() {
+ return offsetof(Instance, jsJitExceptionHandler_);
+ }
+ static constexpr size_t offsetOfPreBarrierCode() {
+ return offsetof(Instance, preBarrierCode_);
+ }
+
+ // This method returns a pointer to the GC object that owns this Instance.
+ // Instances may be reached via weak edges (e.g., Realm::instances_)
+ // so this perform a read-barrier on the returned object unless the barrier
+ // is explicitly waived.
+
+ WasmInstanceObject* object() const;
+ WasmInstanceObject* objectUnbarriered() const;
+
+ // Execute the given export given the JS call arguments, storing the return
+ // value in args.rval.
+
+ [[nodiscard]] bool callExport(JSContext* cx, uint32_t funcIndex,
+ CallArgs args);
+
+ // Return the name associated with a given function index, or generate one
+ // if none was given by the module.
+
+ JSAtom* getFuncDisplayAtom(JSContext* cx, uint32_t funcIndex) const;
+ void ensureProfilingLabels(bool profilingEnabled) const;
+
+ // Called by Wasm(Memory|Table)Object when a moving resize occurs:
+
+ void onMovingGrowMemory();
+ void onMovingGrowTable(const Table* theTable);
+
+ // Called to apply a single ElemSegment at a given offset, assuming
+ // that all bounds validation has already been performed.
+
+ [[nodiscard]] bool initElems(uint32_t tableIndex, const ElemSegment& seg,
+ uint32_t dstOffset, uint32_t srcOffset,
+ uint32_t len);
+
+ // Debugger support:
+
+ JSString* createDisplayURL(JSContext* cx);
+ WasmBreakpointSite* getOrCreateBreakpointSite(JSContext* cx, uint32_t offset);
+ void destroyBreakpointSite(JSFreeOp* fop, uint32_t offset);
+
+ // about:memory reporting:
+
+ void addSizeOfMisc(MallocSizeOf mallocSizeOf, Metadata::SeenSet* seenMetadata,
+ Code::SeenSet* seenCode, Table::SeenSet* seenTables,
+ size_t* code, size_t* data) const;
+
+ // Wasm disassembly support
+
+ void disassembleExport(JSContext* cx, uint32_t funcIndex, Tier tier,
+ PrintCallback callback) const;
+
+ public:
+ // Functions to be called directly from wasm code.
+ static int32_t callImport_general(Instance*, int32_t, int32_t, uint64_t*);
+ static uint32_t memoryGrow_i32(Instance* instance, uint32_t delta);
+ static uint32_t memorySize_i32(Instance* instance);
+ static int32_t wait_i32(Instance* instance, uint32_t byteOffset,
+ int32_t value, int64_t timeout);
+ static int32_t wait_i64(Instance* instance, uint32_t byteOffset,
+ int64_t value, int64_t timeout);
+ static int32_t wake(Instance* instance, uint32_t byteOffset, int32_t count);
+ static int32_t memCopy(Instance* instance, uint32_t destByteOffset,
+ uint32_t srcByteOffset, uint32_t len,
+ uint8_t* memBase);
+ static int32_t memCopyShared(Instance* instance, uint32_t destByteOffset,
+ uint32_t srcByteOffset, uint32_t len,
+ uint8_t* memBase);
+ static int32_t dataDrop(Instance* instance, uint32_t segIndex);
+ static int32_t memFill(Instance* instance, uint32_t byteOffset,
+ uint32_t value, uint32_t len, uint8_t* memBase);
+ static int32_t memFillShared(Instance* instance, uint32_t byteOffset,
+ uint32_t value, uint32_t len, uint8_t* memBase);
+ static int32_t memInit(Instance* instance, uint32_t dstOffset,
+ uint32_t srcOffset, uint32_t len, uint32_t segIndex);
+ static int32_t tableCopy(Instance* instance, uint32_t dstOffset,
+ uint32_t srcOffset, uint32_t len,
+ uint32_t dstTableIndex, uint32_t srcTableIndex);
+ static int32_t elemDrop(Instance* instance, uint32_t segIndex);
+ static int32_t tableFill(Instance* instance, uint32_t start, void* value,
+ uint32_t len, uint32_t tableIndex);
+ static void* tableGet(Instance* instance, uint32_t index,
+ uint32_t tableIndex);
+ static uint32_t tableGrow(Instance* instance, void* initValue, uint32_t delta,
+ uint32_t tableIndex);
+ static int32_t tableSet(Instance* instance, uint32_t index, void* value,
+ uint32_t tableIndex);
+ static uint32_t tableSize(Instance* instance, uint32_t tableIndex);
+ static int32_t tableInit(Instance* instance, uint32_t dstOffset,
+ uint32_t srcOffset, uint32_t len, uint32_t segIndex,
+ uint32_t tableIndex);
+ static void* refFunc(Instance* instance, uint32_t funcIndex);
+ static void preBarrierFiltering(Instance* instance, gc::Cell** location);
+ static void postBarrier(Instance* instance, gc::Cell** location);
+ static void postBarrierFiltering(Instance* instance, gc::Cell** location);
+ static void* structNew(Instance* instance, void* structDescr);
+ static void* structNarrow(Instance* instance, void* outputStructDescr,
+ void* maybeNullPtr);
+};
+
+using UniqueInstance = UniquePtr<Instance>;
+
+bool ResultsToJSValue(JSContext* cx, ResultType type, void* registerResultLoc,
+ Maybe<char*> stackResultsLoc, MutableHandleValue rval);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_instance_h
diff --git a/js/src/wasm/WasmIonCompile.cpp b/js/src/wasm/WasmIonCompile.cpp
new file mode 100644
index 0000000000..d0c3298cd8
--- /dev/null
+++ b/js/src/wasm/WasmIonCompile.cpp
@@ -0,0 +1,5593 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmIonCompile.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include <algorithm>
+
+#include "jit/CodeGenerator.h"
+#include "jit/CompileInfo.h"
+#include "jit/Ion.h"
+#include "jit/IonOptimizationLevels.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "wasm/WasmBaselineCompile.h"
+#include "wasm/WasmBuiltins.h"
+#include "wasm/WasmGC.h"
+#include "wasm/WasmGenerator.h"
+#include "wasm/WasmOpIter.h"
+#include "wasm/WasmSignalHandlers.h"
+#include "wasm/WasmStubs.h"
+#include "wasm/WasmValidate.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::IsPowerOfTwo;
+using mozilla::Maybe;
+using mozilla::Nothing;
+using mozilla::Some;
+
+namespace {
+
+typedef Vector<MBasicBlock*, 8, SystemAllocPolicy> BlockVector;
+typedef Vector<MDefinition*, 8, SystemAllocPolicy> DefVector;
+
+struct IonCompilePolicy {
+ // We store SSA definitions in the value stack.
+ using Value = MDefinition*;
+ using ValueVector = DefVector;
+
+ // We store loop headers and then/else blocks in the control flow stack.
+ using ControlItem = MBasicBlock*;
+};
+
+using IonOpIter = OpIter<IonCompilePolicy>;
+
+class FunctionCompiler;
+
+// CallCompileState describes a call that is being compiled.
+
+class CallCompileState {
+ // A generator object that is passed each argument as it is compiled.
+ WasmABIArgGenerator abi_;
+
+ // Accumulates the register arguments while compiling arguments.
+ MWasmCall::Args regArgs_;
+
+ // Reserved argument for passing Instance* to builtin instance method calls.
+ ABIArg instanceArg_;
+
+ // The stack area in which the callee will write stack return values, or
+ // nullptr if no stack results.
+ MWasmStackResultArea* stackResultArea_ = nullptr;
+
+ // Only FunctionCompiler should be directly manipulating CallCompileState.
+ friend class FunctionCompiler;
+};
+
+// Encapsulates the compilation of a single function in an asm.js module. The
+// function compiler handles the creation and final backend compilation of the
+// MIR graph.
+class FunctionCompiler {
+ struct ControlFlowPatch {
+ MControlInstruction* ins;
+ uint32_t index;
+ ControlFlowPatch(MControlInstruction* ins, uint32_t index)
+ : ins(ins), index(index) {}
+ };
+
+ typedef Vector<ControlFlowPatch, 0, SystemAllocPolicy> ControlFlowPatchVector;
+ typedef Vector<ControlFlowPatchVector, 0, SystemAllocPolicy>
+ ControlFlowPatchsVector;
+
+ const ModuleEnvironment& moduleEnv_;
+ IonOpIter iter_;
+ const FuncCompileInput& func_;
+ const ValTypeVector& locals_;
+ size_t lastReadCallSite_;
+
+ TempAllocator& alloc_;
+ MIRGraph& graph_;
+ const CompileInfo& info_;
+ MIRGenerator& mirGen_;
+
+ MBasicBlock* curBlock_;
+ uint32_t maxStackArgBytes_;
+
+ uint32_t loopDepth_;
+ uint32_t blockDepth_;
+ ControlFlowPatchsVector blockPatches_;
+
+ // TLS pointer argument to the current function.
+ MWasmParameter* tlsPointer_;
+ MWasmParameter* stackResultPointer_;
+
+ public:
+ FunctionCompiler(const ModuleEnvironment& moduleEnv, Decoder& decoder,
+ const FuncCompileInput& func, const ValTypeVector& locals,
+ MIRGenerator& mirGen)
+ : moduleEnv_(moduleEnv),
+ iter_(moduleEnv, decoder),
+ func_(func),
+ locals_(locals),
+ lastReadCallSite_(0),
+ alloc_(mirGen.alloc()),
+ graph_(mirGen.graph()),
+ info_(mirGen.outerInfo()),
+ mirGen_(mirGen),
+ curBlock_(nullptr),
+ maxStackArgBytes_(0),
+ loopDepth_(0),
+ blockDepth_(0),
+ tlsPointer_(nullptr),
+ stackResultPointer_(nullptr) {}
+
+ const ModuleEnvironment& moduleEnv() const { return moduleEnv_; }
+
+ IonOpIter& iter() { return iter_; }
+ TempAllocator& alloc() const { return alloc_; }
+ // FIXME(1401675): Replace with BlockType.
+ uint32_t funcIndex() const { return func_.index; }
+ const FuncType& funcType() const {
+ return *moduleEnv_.funcs[func_.index].type;
+ }
+
+ BytecodeOffset bytecodeOffset() const { return iter_.bytecodeOffset(); }
+ BytecodeOffset bytecodeIfNotAsmJS() const {
+ return moduleEnv_.isAsmJS() ? BytecodeOffset() : iter_.bytecodeOffset();
+ }
+
+ bool init() {
+ // Prepare the entry block for MIR generation:
+
+ const ArgTypeVector args(funcType());
+
+ if (!mirGen_.ensureBallast()) {
+ return false;
+ }
+ if (!newBlock(/* prev */ nullptr, &curBlock_)) {
+ return false;
+ }
+
+ for (WasmABIArgIter i(args); !i.done(); i++) {
+ MWasmParameter* ins = MWasmParameter::New(alloc(), *i, i.mirType());
+ curBlock_->add(ins);
+ if (args.isSyntheticStackResultPointerArg(i.index())) {
+ MOZ_ASSERT(stackResultPointer_ == nullptr);
+ stackResultPointer_ = ins;
+ } else {
+ curBlock_->initSlot(info().localSlot(args.naturalIndex(i.index())),
+ ins);
+ }
+ if (!mirGen_.ensureBallast()) {
+ return false;
+ }
+ }
+
+ // Set up a parameter that receives the hidden TLS pointer argument.
+ tlsPointer_ =
+ MWasmParameter::New(alloc(), ABIArg(WasmTlsReg), MIRType::Pointer);
+ curBlock_->add(tlsPointer_);
+ if (!mirGen_.ensureBallast()) {
+ return false;
+ }
+
+ for (size_t i = args.lengthWithoutStackResults(); i < locals_.length();
+ i++) {
+ MInstruction* ins = nullptr;
+ switch (locals_[i].kind()) {
+ case ValType::I32:
+ ins = MConstant::New(alloc(), Int32Value(0), MIRType::Int32);
+ break;
+ case ValType::I64:
+ ins = MConstant::NewInt64(alloc(), 0);
+ break;
+ case ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ ins =
+ MWasmFloatConstant::NewSimd128(alloc(), SimdConstant::SplatX4(0));
+ break;
+#else
+ return iter().fail("Ion has no SIMD support yet");
+#endif
+ case ValType::F32:
+ ins = MConstant::New(alloc(), Float32Value(0.f), MIRType::Float32);
+ break;
+ case ValType::F64:
+ ins = MConstant::New(alloc(), DoubleValue(0.0), MIRType::Double);
+ break;
+ case ValType::Ref:
+ ins = MWasmNullConstant::New(alloc());
+ break;
+ }
+
+ curBlock_->add(ins);
+ curBlock_->initSlot(info().localSlot(i), ins);
+ if (!mirGen_.ensureBallast()) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ void finish() {
+ mirGen().initWasmMaxStackArgBytes(maxStackArgBytes_);
+
+ MOZ_ASSERT(loopDepth_ == 0);
+ MOZ_ASSERT(blockDepth_ == 0);
+#ifdef DEBUG
+ for (ControlFlowPatchVector& patches : blockPatches_) {
+ MOZ_ASSERT(patches.empty());
+ }
+#endif
+ MOZ_ASSERT(inDeadCode());
+ MOZ_ASSERT(done(), "all bytes must be consumed");
+ MOZ_ASSERT(func_.callSiteLineNums.length() == lastReadCallSite_);
+ }
+
+ /************************* Read-only interface (after local scope setup) */
+
+ MIRGenerator& mirGen() const { return mirGen_; }
+ MIRGraph& mirGraph() const { return graph_; }
+ const CompileInfo& info() const { return info_; }
+
+ MDefinition* getLocalDef(unsigned slot) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ return curBlock_->getSlot(info().localSlot(slot));
+ }
+
+ const ValTypeVector& locals() const { return locals_; }
+
+ /***************************** Code generation (after local scope setup) */
+
+ MDefinition* constant(const Value& v, MIRType type) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ MConstant* constant = MConstant::New(alloc(), v, type);
+ curBlock_->add(constant);
+ return constant;
+ }
+
+ MDefinition* constant(float f) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* cst = MWasmFloatConstant::NewFloat32(alloc(), f);
+ curBlock_->add(cst);
+ return cst;
+ }
+
+ MDefinition* constant(double d) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* cst = MWasmFloatConstant::NewDouble(alloc(), d);
+ curBlock_->add(cst);
+ return cst;
+ }
+
+ MDefinition* constant(int64_t i) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ MConstant* constant = MConstant::NewInt64(alloc(), i);
+ curBlock_->add(constant);
+ return constant;
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ MDefinition* constant(V128 v) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ MWasmFloatConstant* constant = MWasmFloatConstant::NewSimd128(
+ alloc(), SimdConstant::CreateSimd128((int8_t*)v.bytes));
+ curBlock_->add(constant);
+ return constant;
+ }
+#endif
+
+ MDefinition* nullRefConstant() {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ // MConstant has a lot of baggage so we don't use that here.
+ MWasmNullConstant* constant = MWasmNullConstant::New(alloc());
+ curBlock_->add(constant);
+ return constant;
+ }
+
+ void fence() {
+ if (inDeadCode()) {
+ return;
+ }
+ MWasmFence* ins = MWasmFence::New(alloc());
+ curBlock_->add(ins);
+ }
+
+ template <class T>
+ MDefinition* unary(MDefinition* op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ T* ins = T::New(alloc(), op);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ template <class T>
+ MDefinition* unary(MDefinition* op, MIRType type) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ T* ins = T::New(alloc(), op, type);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ template <class T>
+ MDefinition* binary(MDefinition* lhs, MDefinition* rhs) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ T* ins = T::New(alloc(), lhs, rhs);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ template <class T>
+ MDefinition* binary(MDefinition* lhs, MDefinition* rhs, MIRType type) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ T* ins = T::New(alloc(), lhs, rhs, type);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* ursh(MDefinition* lhs, MDefinition* rhs, MIRType type) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MUrsh::NewWasm(alloc(), lhs, rhs, type);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* add(MDefinition* lhs, MDefinition* rhs, MIRType type) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MAdd::NewWasm(alloc(), lhs, rhs, type);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ bool mustPreserveNaN(MIRType type) {
+ return IsFloatingPointType(type) && !moduleEnv().isAsmJS();
+ }
+
+ MDefinition* sub(MDefinition* lhs, MDefinition* rhs, MIRType type) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ // wasm can't fold x - 0.0 because of NaN with custom payloads.
+ MSub* ins = MSub::NewWasm(alloc(), lhs, rhs, type, mustPreserveNaN(type));
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* nearbyInt(MDefinition* input, RoundingMode roundingMode) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ auto* ins = MNearbyInt::New(alloc(), input, input->type(), roundingMode);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* minMax(MDefinition* lhs, MDefinition* rhs, MIRType type,
+ bool isMax) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ if (mustPreserveNaN(type)) {
+ // Convert signaling NaN to quiet NaNs.
+ MDefinition* zero = constant(DoubleValue(0.0), type);
+ lhs = sub(lhs, zero, type);
+ rhs = sub(rhs, zero, type);
+ }
+
+ MMinMax* ins = MMinMax::NewWasm(alloc(), lhs, rhs, type, isMax);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* mul(MDefinition* lhs, MDefinition* rhs, MIRType type,
+ MMul::Mode mode) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ // wasm can't fold x * 1.0 because of NaN with custom payloads.
+ auto* ins =
+ MMul::NewWasm(alloc(), lhs, rhs, type, mode, mustPreserveNaN(type));
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* div(MDefinition* lhs, MDefinition* rhs, MIRType type,
+ bool unsignd) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ bool trapOnError = !moduleEnv().isAsmJS();
+ if (!unsignd && type == MIRType::Int32) {
+ // Enforce the signedness of the operation by coercing the operands
+ // to signed. Otherwise, operands that "look" unsigned to Ion but
+ // are not unsigned to Baldr (eg, unsigned right shifts) may lead to
+ // the operation being executed unsigned. Applies to mod() as well.
+ //
+ // Do this for Int32 only since Int64 is not subject to the same
+ // issues.
+ //
+ // Note the offsets passed to MWasmBuiltinTruncateToInt32 are wrong here,
+ // but it doesn't matter: they're not codegen'd to calls since inputs
+ // already are int32.
+ auto* lhs2 = createTruncateToInt32(lhs);
+ curBlock_->add(lhs2);
+ lhs = lhs2;
+ auto* rhs2 = createTruncateToInt32(rhs);
+ curBlock_->add(rhs2);
+ rhs = rhs2;
+ }
+
+ // For x86 and arm we implement i64 div via c++ builtin.
+ // A call to c++ builtin requires tls pointer.
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+ if (type == MIRType::Int64) {
+ auto* ins =
+ MWasmBuiltinDivI64::New(alloc(), lhs, rhs, tlsPointer_, unsignd,
+ trapOnError, bytecodeOffset());
+ curBlock_->add(ins);
+ return ins;
+ }
+#endif
+
+ auto* ins = MDiv::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
+ bytecodeOffset(), mustPreserveNaN(type));
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MInstruction* createTruncateToInt32(MDefinition* op) {
+ if (op->type() == MIRType::Double || op->type() == MIRType::Float32) {
+ return MWasmBuiltinTruncateToInt32::New(alloc(), op, tlsPointer_);
+ }
+
+ return MTruncateToInt32::New(alloc(), op);
+ }
+
+ MDefinition* mod(MDefinition* lhs, MDefinition* rhs, MIRType type,
+ bool unsignd) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ bool trapOnError = !moduleEnv().isAsmJS();
+ if (!unsignd && type == MIRType::Int32) {
+ // See block comment in div().
+ auto* lhs2 = createTruncateToInt32(lhs);
+ curBlock_->add(lhs2);
+ lhs = lhs2;
+ auto* rhs2 = createTruncateToInt32(rhs);
+ curBlock_->add(rhs2);
+ rhs = rhs2;
+ }
+
+ // For x86 and arm we implement i64 mod via c++ builtin.
+ // A call to c++ builtin requires tls pointer.
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+ if (type == MIRType::Int64) {
+ auto* ins =
+ MWasmBuiltinModI64::New(alloc(), lhs, rhs, tlsPointer_, unsignd,
+ trapOnError, bytecodeOffset());
+ curBlock_->add(ins);
+ return ins;
+ }
+#endif
+
+ // Should be handled separately because we call BuiltinThunk for this case
+ // and so, need to add the dependency from tlsPointer.
+ if (type == MIRType::Double) {
+ auto* ins = MWasmBuiltinModD::New(alloc(), lhs, rhs, tlsPointer_, type,
+ bytecodeOffset());
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ auto* ins = MMod::New(alloc(), lhs, rhs, type, unsignd, trapOnError,
+ bytecodeOffset());
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* bitnot(MDefinition* op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MBitNot::New(alloc(), op);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* select(MDefinition* trueExpr, MDefinition* falseExpr,
+ MDefinition* condExpr) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MWasmSelect::New(alloc(), trueExpr, falseExpr, condExpr);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* extendI32(MDefinition* op, bool isUnsigned) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MExtendInt32ToInt64::New(alloc(), op, isUnsigned);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* signExtend(MDefinition* op, uint32_t srcSize,
+ uint32_t targetSize) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ MInstruction* ins;
+ switch (targetSize) {
+ case 4: {
+ MSignExtendInt32::Mode mode;
+ switch (srcSize) {
+ case 1:
+ mode = MSignExtendInt32::Byte;
+ break;
+ case 2:
+ mode = MSignExtendInt32::Half;
+ break;
+ default:
+ MOZ_CRASH("Bad sign extension");
+ }
+ ins = MSignExtendInt32::New(alloc(), op, mode);
+ break;
+ }
+ case 8: {
+ MSignExtendInt64::Mode mode;
+ switch (srcSize) {
+ case 1:
+ mode = MSignExtendInt64::Byte;
+ break;
+ case 2:
+ mode = MSignExtendInt64::Half;
+ break;
+ case 4:
+ mode = MSignExtendInt64::Word;
+ break;
+ default:
+ MOZ_CRASH("Bad sign extension");
+ }
+ ins = MSignExtendInt64::New(alloc(), op, mode);
+ break;
+ }
+ default: {
+ MOZ_CRASH("Bad sign extension");
+ }
+ }
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* convertI64ToFloatingPoint(MDefinition* op, MIRType type,
+ bool isUnsigned) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+#if defined(JS_CODEGEN_ARM)
+ auto* ins = MBuiltinInt64ToFloatingPoint::New(
+ alloc(), op, tlsPointer_, type, bytecodeOffset(), isUnsigned);
+#else
+ auto* ins = MInt64ToFloatingPoint::New(alloc(), op, type, bytecodeOffset(),
+ isUnsigned);
+#endif
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* rotate(MDefinition* input, MDefinition* count, MIRType type,
+ bool left) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MRotate::New(alloc(), input, count, type, left);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ template <class T>
+ MDefinition* truncate(MDefinition* op, TruncFlags flags) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = T::New(alloc(), op, flags, bytecodeOffset());
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* truncateWithTls(MDefinition* op, TruncFlags flags) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MWasmBuiltinTruncateToInt64::New(alloc(), op, tlsPointer_,
+ flags, bytecodeOffset());
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* compare(MDefinition* lhs, MDefinition* rhs, JSOp op,
+ MCompare::CompareType type) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MCompare::NewWasm(alloc(), lhs, rhs, op, type);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ void assign(unsigned slot, MDefinition* def) {
+ if (inDeadCode()) {
+ return;
+ }
+ curBlock_->setSlot(info().localSlot(slot), def);
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ // About Wasm SIMD as supported by Ion:
+ //
+ // The expectation is that Ion will only ever support SIMD on x86 and x64,
+ // since Cranelift will be the optimizing compiler for Arm64, ARMv7 will cease
+ // to be a tier-1 platform soon, and MIPS32 and MIPS64 will never implement
+ // SIMD.
+ //
+ // The division of the operations into MIR nodes reflects that expectation,
+ // and is a good fit for x86/x64. Should the expectation change we'll
+ // possibly want to re-architect the SIMD support to be a little more general.
+ //
+ // Most SIMD operations map directly to a single MIR node that ultimately ends
+ // up being expanded in the macroassembler.
+ //
+ // Some SIMD operations that do have a complete macroassembler expansion are
+ // open-coded into multiple MIR nodes here; in some cases that's just
+ // convenience, in other cases it may also allow them to benefit from Ion
+ // optimizations. The reason for the expansions will be documented by a
+ // comment.
+
+ // (v128,v128) -> v128 effect-free binary operations
+ MDefinition* binarySimd128(MDefinition* lhs, MDefinition* rhs,
+ bool commutative, SimdOp op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(lhs->type() == MIRType::Simd128 &&
+ rhs->type() == MIRType::Simd128);
+
+ auto* ins = MWasmBinarySimd128::New(alloc(), lhs, rhs, commutative, op);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ // (v128,i32) -> v128 effect-free shift operations
+ MDefinition* shiftSimd128(MDefinition* lhs, MDefinition* rhs, SimdOp op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(lhs->type() == MIRType::Simd128 &&
+ rhs->type() == MIRType::Int32);
+
+ // Do something vector-based when the platform allows it.
+ if ((rhs->isConstant() && !MacroAssembler::MustScalarizeShiftSimd128(
+ op, Imm32(rhs->toConstant()->toInt32()))) ||
+ (!rhs->isConstant() &&
+ !MacroAssembler::MustScalarizeShiftSimd128(op))) {
+ int32_t maskBits;
+ if (!rhs->isConstant() &&
+ MacroAssembler::MustMaskShiftCountSimd128(op, &maskBits)) {
+ MConstant* mask = MConstant::New(alloc(), Int32Value(maskBits));
+ curBlock_->add(mask);
+ MBitAnd* maskedShift = MBitAnd::New(alloc(), rhs, mask, MIRType::Int32);
+ curBlock_->add(maskedShift);
+ rhs = maskedShift;
+ }
+
+ auto* ins = MWasmShiftSimd128::New(alloc(), lhs, rhs, op);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+# ifdef DEBUG
+ js::wasm::ReportSimdAnalysis("shift -> variable scalarized shift");
+# endif
+
+ // Otherwise just scalarize using existing primitive operations.
+ auto* lane0 = reduceSimd128(lhs, SimdOp::I64x2ExtractLane, ValType::I64, 0);
+ auto* lane1 = reduceSimd128(lhs, SimdOp::I64x2ExtractLane, ValType::I64, 1);
+ auto* shiftCount = extendI32(rhs, /*isUnsigned=*/false);
+ auto* shifted0 = binary<MRsh>(lane0, shiftCount, MIRType::Int64);
+ auto* shifted1 = binary<MRsh>(lane1, shiftCount, MIRType::Int64);
+ V128 zero;
+ auto* res0 = constant(zero);
+ auto* res1 =
+ replaceLaneSimd128(res0, shifted0, 0, SimdOp::I64x2ReplaceLane);
+ auto* ins = replaceLaneSimd128(res1, shifted1, 1, SimdOp::I64x2ReplaceLane);
+ return ins;
+ }
+
+ // (v128,scalar,imm) -> v128
+ MDefinition* replaceLaneSimd128(MDefinition* lhs, MDefinition* rhs,
+ uint32_t laneIndex, SimdOp op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(lhs->type() == MIRType::Simd128);
+
+ auto* ins = MWasmReplaceLaneSimd128::New(alloc(), lhs, rhs, laneIndex, op);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ // (scalar) -> v128 effect-free unary operations
+ MDefinition* scalarToSimd128(MDefinition* src, SimdOp op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ auto* ins = MWasmScalarToSimd128::New(alloc(), src, op);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ // (v128) -> v128 effect-free unary operations
+ MDefinition* unarySimd128(MDefinition* src, SimdOp op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(src->type() == MIRType::Simd128);
+ auto* ins = MWasmUnarySimd128::New(alloc(), src, op);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ // (v128, imm) -> scalar effect-free unary operations
+ MDefinition* reduceSimd128(MDefinition* src, SimdOp op, ValType outType,
+ uint32_t imm = 0) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(src->type() == MIRType::Simd128);
+ auto* ins =
+ MWasmReduceSimd128::New(alloc(), src, op, ToMIRType(outType), imm);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ // (v128, v128, v128) -> v128 effect-free operations
+ MDefinition* bitselectSimd128(MDefinition* v1, MDefinition* v2,
+ MDefinition* control) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(v1->type() == MIRType::Simd128);
+ MOZ_ASSERT(v2->type() == MIRType::Simd128);
+ MOZ_ASSERT(control->type() == MIRType::Simd128);
+ auto* ins = MWasmBitselectSimd128::New(alloc(), v1, v2, control);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ // (v128, v128, imm_v128) -> v128 effect-free operations
+ MDefinition* shuffleSimd128(MDefinition* v1, MDefinition* v2, V128 control) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(v1->type() == MIRType::Simd128);
+ MOZ_ASSERT(v2->type() == MIRType::Simd128);
+ auto* ins = MWasmShuffleSimd128::New(
+ alloc(), v1, v2,
+ SimdConstant::CreateX16(reinterpret_cast<int8_t*>(control.bytes)));
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* loadSplatSimd128(Scalar::Type viewType,
+ const LinearMemoryAddress<MDefinition*>& addr,
+ wasm::SimdOp splatOp) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset,
+ bytecodeIfNotAsmJS());
+
+ // Generate better code (on x86)
+ if (viewType == Scalar::Float64) {
+ access.setSplatSimd128Load();
+ return load(addr.base, &access, ValType::V128);
+ }
+
+ ValType resultType = ValType::I32;
+ if (viewType == Scalar::Float32) {
+ resultType = ValType::F32;
+ splatOp = wasm::SimdOp::F32x4Splat;
+ }
+ auto* scalar = load(addr.base, &access, resultType);
+ if (!inDeadCode() && !scalar) {
+ return nullptr;
+ }
+ return scalarToSimd128(scalar, splatOp);
+ }
+
+ MDefinition* loadExtendSimd128(const LinearMemoryAddress<MDefinition*>& addr,
+ wasm::SimdOp op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ // Generate better code (on x86) by loading as a double with an
+ // operation that sign extends directly.
+ MemoryAccessDesc access(Scalar::Float64, addr.align, addr.offset,
+ bytecodeIfNotAsmJS());
+ access.setWidenSimd128Load(op);
+ return load(addr.base, &access, ValType::V128);
+ }
+
+ MDefinition* loadZeroSimd128(Scalar::Type viewType, size_t numBytes,
+ const LinearMemoryAddress<MDefinition*>& addr) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset,
+ bytecodeIfNotAsmJS());
+ access.setZeroExtendSimd128Load();
+ return load(addr.base, &access, ValType::V128);
+ }
+#endif // ENABLE_WASM_SIMD
+
+ private:
+ MWasmLoadTls* maybeLoadMemoryBase() {
+ MWasmLoadTls* load = nullptr;
+#ifdef JS_CODEGEN_X86
+ AliasSet aliases = moduleEnv_.maxMemoryLength.isSome()
+ ? AliasSet::None()
+ : AliasSet::Load(AliasSet::WasmHeapMeta);
+ load = MWasmLoadTls::New(alloc(), tlsPointer_,
+ offsetof(wasm::TlsData, memoryBase),
+ MIRType::Pointer, aliases);
+ curBlock_->add(load);
+#endif
+ return load;
+ }
+
+ MWasmLoadTls* maybeLoadBoundsCheckLimit32() {
+ if (moduleEnv_.hugeMemoryEnabled()) {
+ return nullptr;
+ }
+ AliasSet aliases = moduleEnv_.maxMemoryLength.isSome()
+ ? AliasSet::None()
+ : AliasSet::Load(AliasSet::WasmHeapMeta);
+ auto load = MWasmLoadTls::New(alloc(), tlsPointer_,
+ offsetof(wasm::TlsData, boundsCheckLimit32),
+ MIRType::Int32, aliases);
+ curBlock_->add(load);
+ return load;
+ }
+
+ public:
+ MWasmHeapBase* memoryBase() {
+ MWasmHeapBase* base = nullptr;
+ AliasSet aliases = moduleEnv_.maxMemoryLength.isSome()
+ ? AliasSet::None()
+ : AliasSet::Load(AliasSet::WasmHeapMeta);
+ base = MWasmHeapBase::New(alloc(), tlsPointer_, aliases);
+ curBlock_->add(base);
+ return base;
+ }
+
+ private:
+ // Only sets *mustAdd if it also returns true.
+ bool needAlignmentCheck(MemoryAccessDesc* access, MDefinition* base,
+ bool* mustAdd) {
+ MOZ_ASSERT(!*mustAdd);
+
+ // asm.js accesses are always aligned and need no checks.
+ if (moduleEnv_.isAsmJS() || !access->isAtomic()) {
+ return false;
+ }
+
+ if (base->isConstant()) {
+ int32_t ptr = base->toConstant()->toInt32();
+ // OK to wrap around the address computation here.
+ if (((ptr + access->offset()) & (access->byteSize() - 1)) == 0) {
+ return false;
+ }
+ }
+
+ *mustAdd = (access->offset() & (access->byteSize() - 1)) != 0;
+ return true;
+ }
+
+ void checkOffsetAndAlignmentAndBounds(MemoryAccessDesc* access,
+ MDefinition** base) {
+ MOZ_ASSERT(!inDeadCode());
+
+ uint32_t offsetGuardLimit =
+ GetMaxOffsetGuardLimit(moduleEnv_.hugeMemoryEnabled());
+
+ // Fold a constant base into the offset and make the base 0, provided the
+ // offset stays below the guard limit. The reason for folding the base into
+ // the offset rather than vice versa is that a small offset can be ignored
+ // by both explicit bounds checking and bounds check elimination.
+ if ((*base)->isConstant()) {
+ uint32_t basePtr = (*base)->toConstant()->toInt32();
+ uint32_t offset = access->offset();
+
+ if (offset < offsetGuardLimit && basePtr < offsetGuardLimit - offset) {
+ auto* ins = MConstant::New(alloc(), Int32Value(0), MIRType::Int32);
+ curBlock_->add(ins);
+ *base = ins;
+ access->setOffset(access->offset() + basePtr);
+ }
+ }
+
+ bool mustAdd = false;
+ bool alignmentCheck = needAlignmentCheck(access, *base, &mustAdd);
+
+ // If the offset is bigger than the guard region, a separate instruction is
+ // necessary to add the offset to the base and check for overflow.
+ //
+ // Also add the offset if we have a Wasm atomic access that needs alignment
+ // checking and the offset affects alignment.
+ if (access->offset() >= offsetGuardLimit || mustAdd ||
+ !JitOptions.wasmFoldOffsets) {
+ *base = computeEffectiveAddress(*base, access);
+ }
+
+ if (alignmentCheck) {
+ curBlock_->add(MWasmAlignmentCheck::New(
+ alloc(), *base, access->byteSize(), bytecodeOffset()));
+ }
+
+ MWasmLoadTls* boundsCheckLimit32 = maybeLoadBoundsCheckLimit32();
+ if (boundsCheckLimit32) {
+ auto* ins = MWasmBoundsCheck::New(alloc(), *base, boundsCheckLimit32,
+ bytecodeOffset());
+ curBlock_->add(ins);
+ if (JitOptions.spectreIndexMasking) {
+ *base = ins;
+ }
+ }
+ }
+
+ bool isSmallerAccessForI64(ValType result, const MemoryAccessDesc* access) {
+ if (result == ValType::I64 && access->byteSize() <= 4) {
+ // These smaller accesses should all be zero-extending.
+ MOZ_ASSERT(!isSignedIntType(access->type()));
+ return true;
+ }
+ return false;
+ }
+
+ public:
+ MDefinition* computeEffectiveAddress(MDefinition* base,
+ MemoryAccessDesc* access) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ if (!access->offset()) {
+ return base;
+ }
+ auto* ins =
+ MWasmAddOffset::New(alloc(), base, access->offset(), bytecodeOffset());
+ curBlock_->add(ins);
+ access->clearOffset();
+ return ins;
+ }
+
+ MDefinition* load(MDefinition* base, MemoryAccessDesc* access,
+ ValType result) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
+ MInstruction* load = nullptr;
+ if (moduleEnv_.isAsmJS()) {
+ MOZ_ASSERT(access->offset() == 0);
+ MWasmLoadTls* boundsCheckLimit32 = maybeLoadBoundsCheckLimit32();
+ load = MAsmJSLoadHeap::New(alloc(), memoryBase, base, boundsCheckLimit32,
+ access->type());
+ } else {
+ checkOffsetAndAlignmentAndBounds(access, &base);
+ load =
+ MWasmLoad::New(alloc(), memoryBase, base, *access, ToMIRType(result));
+ }
+ if (!load) {
+ return nullptr;
+ }
+ curBlock_->add(load);
+ return load;
+ }
+
+ void store(MDefinition* base, MemoryAccessDesc* access, MDefinition* v) {
+ if (inDeadCode()) {
+ return;
+ }
+
+ MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
+ MInstruction* store = nullptr;
+ if (moduleEnv_.isAsmJS()) {
+ MOZ_ASSERT(access->offset() == 0);
+ MWasmLoadTls* boundsCheckLimit32 = maybeLoadBoundsCheckLimit32();
+ store = MAsmJSStoreHeap::New(alloc(), memoryBase, base,
+ boundsCheckLimit32, access->type(), v);
+ } else {
+ checkOffsetAndAlignmentAndBounds(access, &base);
+ store = MWasmStore::New(alloc(), memoryBase, base, *access, v);
+ }
+ if (!store) {
+ return;
+ }
+ curBlock_->add(store);
+ }
+
+ MDefinition* atomicCompareExchangeHeap(MDefinition* base,
+ MemoryAccessDesc* access,
+ ValType result, MDefinition* oldv,
+ MDefinition* newv) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ checkOffsetAndAlignmentAndBounds(access, &base);
+
+ if (isSmallerAccessForI64(result, access)) {
+ auto* cvtOldv =
+ MWrapInt64ToInt32::New(alloc(), oldv, /*bottomHalf=*/true);
+ curBlock_->add(cvtOldv);
+ oldv = cvtOldv;
+
+ auto* cvtNewv =
+ MWrapInt64ToInt32::New(alloc(), newv, /*bottomHalf=*/true);
+ curBlock_->add(cvtNewv);
+ newv = cvtNewv;
+ }
+
+ MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
+ MInstruction* cas =
+ MWasmCompareExchangeHeap::New(alloc(), bytecodeOffset(), memoryBase,
+ base, *access, oldv, newv, tlsPointer_);
+ if (!cas) {
+ return nullptr;
+ }
+ curBlock_->add(cas);
+
+ if (isSmallerAccessForI64(result, access)) {
+ cas = MExtendInt32ToInt64::New(alloc(), cas, true);
+ curBlock_->add(cas);
+ }
+
+ return cas;
+ }
+
+ MDefinition* atomicExchangeHeap(MDefinition* base, MemoryAccessDesc* access,
+ ValType result, MDefinition* value) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ checkOffsetAndAlignmentAndBounds(access, &base);
+
+ if (isSmallerAccessForI64(result, access)) {
+ auto* cvtValue =
+ MWrapInt64ToInt32::New(alloc(), value, /*bottomHalf=*/true);
+ curBlock_->add(cvtValue);
+ value = cvtValue;
+ }
+
+ MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
+ MInstruction* xchg =
+ MWasmAtomicExchangeHeap::New(alloc(), bytecodeOffset(), memoryBase,
+ base, *access, value, tlsPointer_);
+ if (!xchg) {
+ return nullptr;
+ }
+ curBlock_->add(xchg);
+
+ if (isSmallerAccessForI64(result, access)) {
+ xchg = MExtendInt32ToInt64::New(alloc(), xchg, true);
+ curBlock_->add(xchg);
+ }
+
+ return xchg;
+ }
+
+ MDefinition* atomicBinopHeap(AtomicOp op, MDefinition* base,
+ MemoryAccessDesc* access, ValType result,
+ MDefinition* value) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ checkOffsetAndAlignmentAndBounds(access, &base);
+
+ if (isSmallerAccessForI64(result, access)) {
+ auto* cvtValue =
+ MWrapInt64ToInt32::New(alloc(), value, /*bottomHalf=*/true);
+ curBlock_->add(cvtValue);
+ value = cvtValue;
+ }
+
+ MWasmLoadTls* memoryBase = maybeLoadMemoryBase();
+ MInstruction* binop =
+ MWasmAtomicBinopHeap::New(alloc(), bytecodeOffset(), op, memoryBase,
+ base, *access, value, tlsPointer_);
+ if (!binop) {
+ return nullptr;
+ }
+ curBlock_->add(binop);
+
+ if (isSmallerAccessForI64(result, access)) {
+ binop = MExtendInt32ToInt64::New(alloc(), binop, true);
+ curBlock_->add(binop);
+ }
+
+ return binop;
+ }
+
+ MDefinition* loadGlobalVar(unsigned globalDataOffset, bool isConst,
+ bool isIndirect, MIRType type) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MInstruction* load;
+ if (isIndirect) {
+ // Pull a pointer to the value out of TlsData::globalArea, then
+ // load from that pointer. Note that the pointer is immutable
+ // even though the value it points at may change, hence the use of
+ // |true| for the first node's |isConst| value, irrespective of
+ // the |isConst| formal parameter to this method. The latter
+ // applies to the denoted value as a whole.
+ auto* cellPtr =
+ MWasmLoadGlobalVar::New(alloc(), MIRType::Pointer, globalDataOffset,
+ /*isConst=*/true, tlsPointer_);
+ curBlock_->add(cellPtr);
+ load = MWasmLoadGlobalCell::New(alloc(), type, cellPtr);
+ } else {
+ // Pull the value directly out of TlsData::globalArea.
+ load = MWasmLoadGlobalVar::New(alloc(), type, globalDataOffset, isConst,
+ tlsPointer_);
+ }
+ curBlock_->add(load);
+ return load;
+ }
+
+ MInstruction* storeGlobalVar(uint32_t globalDataOffset, bool isIndirect,
+ MDefinition* v) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+
+ MInstruction* store;
+ MInstruction* valueAddr = nullptr;
+ if (isIndirect) {
+ // Pull a pointer to the value out of TlsData::globalArea, then
+ // store through that pointer.
+ auto* cellPtr =
+ MWasmLoadGlobalVar::New(alloc(), MIRType::Pointer, globalDataOffset,
+ /*isConst=*/true, tlsPointer_);
+ curBlock_->add(cellPtr);
+ if (v->type() == MIRType::RefOrNull) {
+ valueAddr = cellPtr;
+ store = MWasmStoreRef::New(alloc(), tlsPointer_, valueAddr, v,
+ AliasSet::WasmGlobalCell);
+ } else {
+ store = MWasmStoreGlobalCell::New(alloc(), v, cellPtr);
+ }
+ } else {
+ // Store the value directly in TlsData::globalArea.
+ if (v->type() == MIRType::RefOrNull) {
+ valueAddr = MWasmDerivedPointer::New(
+ alloc(), tlsPointer_,
+ offsetof(wasm::TlsData, globalArea) + globalDataOffset);
+ curBlock_->add(valueAddr);
+ store = MWasmStoreRef::New(alloc(), tlsPointer_, valueAddr, v,
+ AliasSet::WasmGlobalVar);
+ } else {
+ store =
+ MWasmStoreGlobalVar::New(alloc(), globalDataOffset, v, tlsPointer_);
+ }
+ }
+ curBlock_->add(store);
+
+ return valueAddr;
+ }
+
+ void addInterruptCheck() {
+ if (inDeadCode()) {
+ return;
+ }
+ curBlock_->add(
+ MWasmInterruptCheck::New(alloc(), tlsPointer_, bytecodeOffset()));
+ }
+
+ /***************************************************************** Calls */
+
+ // The IonMonkey backend maintains a single stack offset (from the stack
+ // pointer to the base of the frame) by adding the total amount of spill
+ // space required plus the maximum stack required for argument passing.
+ // Since we do not use IonMonkey's MPrepareCall/MPassArg/MCall, we must
+ // manually accumulate, for the entire function, the maximum required stack
+ // space for argument passing. (This is passed to the CodeGenerator via
+ // MIRGenerator::maxWasmStackArgBytes.) This is just be the maximum of the
+ // stack space required for each individual call (as determined by the call
+ // ABI).
+
+ // Operations that modify a CallCompileState.
+
+ bool passInstance(MIRType instanceType, CallCompileState* args) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ // Should only pass an instance once. And it must be a non-GC pointer.
+ MOZ_ASSERT(args->instanceArg_ == ABIArg());
+ MOZ_ASSERT(instanceType == MIRType::Pointer);
+ args->instanceArg_ = args->abi_.next(MIRType::Pointer);
+ return true;
+ }
+
+ // Do not call this directly. Call one of the passArg() variants instead.
+ bool passArgWorker(MDefinition* argDef, MIRType type,
+ CallCompileState* call) {
+ ABIArg arg = call->abi_.next(type);
+ switch (arg.kind()) {
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ case ABIArg::GPR_PAIR: {
+ auto mirLow =
+ MWrapInt64ToInt32::New(alloc(), argDef, /* bottomHalf = */ true);
+ curBlock_->add(mirLow);
+ auto mirHigh =
+ MWrapInt64ToInt32::New(alloc(), argDef, /* bottomHalf = */ false);
+ curBlock_->add(mirHigh);
+ return call->regArgs_.append(
+ MWasmCall::Arg(AnyRegister(arg.gpr64().low), mirLow)) &&
+ call->regArgs_.append(
+ MWasmCall::Arg(AnyRegister(arg.gpr64().high), mirHigh));
+ }
+#endif
+ case ABIArg::GPR:
+ case ABIArg::FPU:
+ return call->regArgs_.append(MWasmCall::Arg(arg.reg(), argDef));
+ case ABIArg::Stack: {
+ auto* mir =
+ MWasmStackArg::New(alloc(), arg.offsetFromArgBase(), argDef);
+ curBlock_->add(mir);
+ return true;
+ }
+ case ABIArg::Uninitialized:
+ MOZ_ASSERT_UNREACHABLE("Uninitialized ABIArg kind");
+ }
+ MOZ_CRASH("Unknown ABIArg kind.");
+ }
+
+ bool passArg(MDefinition* argDef, MIRType type, CallCompileState* call) {
+ if (inDeadCode()) {
+ return true;
+ }
+ return passArgWorker(argDef, type, call);
+ }
+
+ bool passArg(MDefinition* argDef, ValType type, CallCompileState* call) {
+ if (inDeadCode()) {
+ return true;
+ }
+ return passArgWorker(argDef, ToMIRType(type), call);
+ }
+
+ // If the call returns results on the stack, prepare a stack area to receive
+ // them, and pass the address of the stack area to the callee as an additional
+ // argument.
+ bool passStackResultAreaCallArg(const ResultType& resultType,
+ CallCompileState* call) {
+ if (inDeadCode()) {
+ return true;
+ }
+ ABIResultIter iter(resultType);
+ while (!iter.done() && iter.cur().inRegister()) {
+ iter.next();
+ }
+ if (iter.done()) {
+ // No stack results.
+ return true;
+ }
+
+ auto* stackResultArea = MWasmStackResultArea::New(alloc());
+ if (!stackResultArea) {
+ return false;
+ }
+ if (!stackResultArea->init(alloc(), iter.remaining())) {
+ return false;
+ }
+ for (uint32_t base = iter.index(); !iter.done(); iter.next()) {
+ MWasmStackResultArea::StackResult loc(iter.cur().stackOffset(),
+ ToMIRType(iter.cur().type()));
+ stackResultArea->initResult(iter.index() - base, loc);
+ }
+ curBlock_->add(stackResultArea);
+ if (!passArg(stackResultArea, MIRType::Pointer, call)) {
+ return false;
+ }
+ call->stackResultArea_ = stackResultArea;
+ return true;
+ }
+
+ bool finishCall(CallCompileState* call) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ if (!call->regArgs_.append(
+ MWasmCall::Arg(AnyRegister(WasmTlsReg), tlsPointer_))) {
+ return false;
+ }
+
+ uint32_t stackBytes = call->abi_.stackBytesConsumedSoFar();
+
+ maxStackArgBytes_ = std::max(maxStackArgBytes_, stackBytes);
+ return true;
+ }
+
+ // Wrappers for creating various kinds of calls.
+
+ bool collectUnaryCallResult(MIRType type, MDefinition** result) {
+ MInstruction* def;
+ switch (type) {
+ case MIRType::Int32:
+ def = MWasmRegisterResult::New(alloc(), MIRType::Int32, ReturnReg);
+ break;
+ case MIRType::Int64:
+ def = MWasmRegister64Result::New(alloc(), ReturnReg64);
+ break;
+ case MIRType::Float32:
+ def = MWasmFloatRegisterResult::New(alloc(), type, ReturnFloat32Reg);
+ break;
+ case MIRType::Double:
+ def = MWasmFloatRegisterResult::New(alloc(), type, ReturnDoubleReg);
+ break;
+#ifdef ENABLE_WASM_SIMD
+ case MIRType::Simd128:
+ def = MWasmFloatRegisterResult::New(alloc(), type, ReturnSimd128Reg);
+ break;
+#endif
+ case MIRType::RefOrNull:
+ def = MWasmRegisterResult::New(alloc(), MIRType::RefOrNull, ReturnReg);
+ break;
+ default:
+ MOZ_CRASH("unexpected MIRType result for builtin call");
+ }
+
+ if (!def) {
+ return false;
+ }
+
+ curBlock_->add(def);
+ *result = def;
+
+ return true;
+ }
+
+ bool collectCallResults(const ResultType& type,
+ MWasmStackResultArea* stackResultArea,
+ DefVector* results) {
+ if (!results->reserve(type.length())) {
+ return false;
+ }
+
+ // The result iterator goes in the order in which results would be popped
+ // off; we want the order in which they would be pushed.
+ ABIResultIter iter(type);
+ uint32_t stackResultCount = 0;
+ while (!iter.done()) {
+ if (iter.cur().onStack()) {
+ stackResultCount++;
+ }
+ iter.next();
+ }
+
+ for (iter.switchToPrev(); !iter.done(); iter.prev()) {
+ if (!mirGen().ensureBallast()) {
+ return false;
+ }
+ const ABIResult& result = iter.cur();
+ MInstruction* def;
+ if (result.inRegister()) {
+ switch (result.type().kind()) {
+ case wasm::ValType::I32:
+ def =
+ MWasmRegisterResult::New(alloc(), MIRType::Int32, result.gpr());
+ break;
+ case wasm::ValType::I64:
+ def = MWasmRegister64Result::New(alloc(), result.gpr64());
+ break;
+ case wasm::ValType::F32:
+ def = MWasmFloatRegisterResult::New(alloc(), MIRType::Float32,
+ result.fpr());
+ break;
+ case wasm::ValType::F64:
+ def = MWasmFloatRegisterResult::New(alloc(), MIRType::Double,
+ result.fpr());
+ break;
+ case wasm::ValType::Ref:
+ def = MWasmRegisterResult::New(alloc(), MIRType::RefOrNull,
+ result.gpr());
+ break;
+ case wasm::ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ def = MWasmFloatRegisterResult::New(alloc(), MIRType::Simd128,
+ result.fpr());
+#else
+ return this->iter().fail("Ion has no SIMD support yet");
+#endif
+ }
+ } else {
+ MOZ_ASSERT(stackResultArea);
+ MOZ_ASSERT(stackResultCount);
+ uint32_t idx = --stackResultCount;
+ def = MWasmStackResult::New(alloc(), stackResultArea, idx);
+ }
+
+ if (!def) {
+ return false;
+ }
+ curBlock_->add(def);
+ results->infallibleAppend(def);
+ }
+
+ MOZ_ASSERT(results->length() == type.length());
+
+ return true;
+ }
+
+ bool callDirect(const FuncType& funcType, uint32_t funcIndex,
+ uint32_t lineOrBytecode, const CallCompileState& call,
+ DefVector* results) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Func);
+ ResultType resultType = ResultType::Vector(funcType.results());
+ auto callee = CalleeDesc::function(funcIndex);
+ ArgTypeVector args(funcType);
+ auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_,
+ StackArgAreaSizeUnaligned(args));
+ if (!ins) {
+ return false;
+ }
+
+ curBlock_->add(ins);
+
+ return collectCallResults(resultType, call.stackResultArea_, results);
+ }
+
+ bool callIndirect(uint32_t funcTypeIndex, uint32_t tableIndex,
+ MDefinition* index, uint32_t lineOrBytecode,
+ const CallCompileState& call, DefVector* results) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ const FuncType& funcType = moduleEnv_.types[funcTypeIndex].funcType();
+ const TypeIdDesc& funcTypeId = moduleEnv_.typeIds[funcTypeIndex];
+
+ CalleeDesc callee;
+ if (moduleEnv_.isAsmJS()) {
+ MOZ_ASSERT(tableIndex == 0);
+ MOZ_ASSERT(funcTypeId.kind() == TypeIdDescKind::None);
+ const TableDesc& table =
+ moduleEnv_.tables[moduleEnv_.asmJSSigToTableIndex[funcTypeIndex]];
+ MOZ_ASSERT(IsPowerOfTwo(table.initialLength));
+
+ MConstant* mask =
+ MConstant::New(alloc(), Int32Value(table.initialLength - 1));
+ curBlock_->add(mask);
+ MBitAnd* maskedIndex = MBitAnd::New(alloc(), index, mask, MIRType::Int32);
+ curBlock_->add(maskedIndex);
+
+ index = maskedIndex;
+ callee = CalleeDesc::asmJSTable(table);
+ } else {
+ MOZ_ASSERT(funcTypeId.kind() != TypeIdDescKind::None);
+ const TableDesc& table = moduleEnv_.tables[tableIndex];
+ callee = CalleeDesc::wasmTable(table, funcTypeId);
+ }
+
+ CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Dynamic);
+ ArgTypeVector args(funcType);
+ ResultType resultType = ResultType::Vector(funcType.results());
+ auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_,
+ StackArgAreaSizeUnaligned(args), index);
+ if (!ins) {
+ return false;
+ }
+
+ curBlock_->add(ins);
+
+ return collectCallResults(resultType, call.stackResultArea_, results);
+ }
+
+ bool callImport(unsigned globalDataOffset, uint32_t lineOrBytecode,
+ const CallCompileState& call, const FuncType& funcType,
+ DefVector* results) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Dynamic);
+ auto callee = CalleeDesc::import(globalDataOffset);
+ ArgTypeVector args(funcType);
+ ResultType resultType = ResultType::Vector(funcType.results());
+ auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_,
+ StackArgAreaSizeUnaligned(args));
+ if (!ins) {
+ return false;
+ }
+
+ curBlock_->add(ins);
+
+ return collectCallResults(resultType, call.stackResultArea_, results);
+ }
+
+ bool builtinCall(const SymbolicAddressSignature& builtin,
+ uint32_t lineOrBytecode, const CallCompileState& call,
+ MDefinition** def) {
+ if (inDeadCode()) {
+ *def = nullptr;
+ return true;
+ }
+
+ MOZ_ASSERT(builtin.failureMode == FailureMode::Infallible);
+
+ CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Symbolic);
+ auto callee = CalleeDesc::builtin(builtin.identity);
+ auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_,
+ StackArgAreaSizeUnaligned(builtin));
+ if (!ins) {
+ return false;
+ }
+
+ curBlock_->add(ins);
+
+ return collectUnaryCallResult(builtin.retType, def);
+ }
+
+ bool builtinInstanceMethodCall(const SymbolicAddressSignature& builtin,
+ uint32_t lineOrBytecode,
+ const CallCompileState& call,
+ MDefinition** def = nullptr) {
+ MOZ_ASSERT_IF(!def, builtin.retType == MIRType::None);
+ if (inDeadCode()) {
+ if (def) {
+ *def = nullptr;
+ }
+ return true;
+ }
+
+ CallSiteDesc desc(lineOrBytecode, CallSiteDesc::Symbolic);
+ auto* ins = MWasmCall::NewBuiltinInstanceMethodCall(
+ alloc(), desc, builtin.identity, builtin.failureMode, call.instanceArg_,
+ call.regArgs_, StackArgAreaSizeUnaligned(builtin));
+ if (!ins) {
+ return false;
+ }
+
+ curBlock_->add(ins);
+
+ return def ? collectUnaryCallResult(builtin.retType, def) : true;
+ }
+
+ /*********************************************** Control flow generation */
+
+ inline bool inDeadCode() const { return curBlock_ == nullptr; }
+
+ bool returnValues(const DefVector& values) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ if (values.empty()) {
+ curBlock_->end(MWasmReturnVoid::New(alloc(), tlsPointer_));
+ } else {
+ ResultType resultType = ResultType::Vector(funcType().results());
+ ABIResultIter iter(resultType);
+ // Switch to iterate in FIFO order instead of the default LIFO.
+ while (!iter.done()) {
+ iter.next();
+ }
+ iter.switchToPrev();
+ for (uint32_t i = 0; !iter.done(); iter.prev(), i++) {
+ if (!mirGen().ensureBallast()) {
+ return false;
+ }
+ const ABIResult& result = iter.cur();
+ if (result.onStack()) {
+ MOZ_ASSERT(iter.remaining() > 1);
+ if (result.type().isReference()) {
+ auto* loc = MWasmDerivedPointer::New(alloc(), stackResultPointer_,
+ result.stackOffset());
+ curBlock_->add(loc);
+ auto* store =
+ MWasmStoreRef::New(alloc(), tlsPointer_, loc, values[i],
+ AliasSet::WasmStackResult);
+ curBlock_->add(store);
+ } else {
+ auto* store = MWasmStoreStackResult::New(
+ alloc(), stackResultPointer_, result.stackOffset(), values[i]);
+ curBlock_->add(store);
+ }
+ } else {
+ MOZ_ASSERT(iter.remaining() == 1);
+ MOZ_ASSERT(i + 1 == values.length());
+ curBlock_->end(MWasmReturn::New(alloc(), values[i], tlsPointer_));
+ }
+ }
+ }
+ curBlock_ = nullptr;
+ return true;
+ }
+
+ void unreachableTrap() {
+ if (inDeadCode()) {
+ return;
+ }
+
+ auto* ins =
+ MWasmTrap::New(alloc(), wasm::Trap::Unreachable, bytecodeOffset());
+ curBlock_->end(ins);
+ curBlock_ = nullptr;
+ }
+
+ private:
+ static uint32_t numPushed(MBasicBlock* block) {
+ return block->stackDepth() - block->info().firstStackSlot();
+ }
+
+ public:
+ [[nodiscard]] bool pushDefs(const DefVector& defs) {
+ if (inDeadCode()) {
+ return true;
+ }
+ MOZ_ASSERT(numPushed(curBlock_) == 0);
+ if (!curBlock_->ensureHasSlots(defs.length())) {
+ return false;
+ }
+ for (MDefinition* def : defs) {
+ MOZ_ASSERT(def->type() != MIRType::None);
+ curBlock_->push(def);
+ }
+ return true;
+ }
+
+ bool popPushedDefs(DefVector* defs) {
+ size_t n = numPushed(curBlock_);
+ if (!defs->resizeUninitialized(n)) {
+ return false;
+ }
+ for (; n > 0; n--) {
+ MDefinition* def = curBlock_->pop();
+ MOZ_ASSERT(def->type() != MIRType::Value);
+ (*defs)[n - 1] = def;
+ }
+ return true;
+ }
+
+ private:
+ bool addJoinPredecessor(const DefVector& defs, MBasicBlock** joinPred) {
+ *joinPred = curBlock_;
+ if (inDeadCode()) {
+ return true;
+ }
+ return pushDefs(defs);
+ }
+
+ public:
+ bool branchAndStartThen(MDefinition* cond, MBasicBlock** elseBlock) {
+ if (inDeadCode()) {
+ *elseBlock = nullptr;
+ } else {
+ MBasicBlock* thenBlock;
+ if (!newBlock(curBlock_, &thenBlock)) {
+ return false;
+ }
+ if (!newBlock(curBlock_, elseBlock)) {
+ return false;
+ }
+
+ curBlock_->end(MTest::New(alloc(), cond, thenBlock, *elseBlock));
+
+ curBlock_ = thenBlock;
+ mirGraph().moveBlockToEnd(curBlock_);
+ }
+
+ return startBlock();
+ }
+
+ bool switchToElse(MBasicBlock* elseBlock, MBasicBlock** thenJoinPred) {
+ DefVector values;
+ if (!finishBlock(&values)) {
+ return false;
+ }
+
+ if (!elseBlock) {
+ *thenJoinPred = nullptr;
+ } else {
+ if (!addJoinPredecessor(values, thenJoinPred)) {
+ return false;
+ }
+
+ curBlock_ = elseBlock;
+ mirGraph().moveBlockToEnd(curBlock_);
+ }
+
+ return startBlock();
+ }
+
+ bool joinIfElse(MBasicBlock* thenJoinPred, DefVector* defs) {
+ DefVector values;
+ if (!finishBlock(&values)) {
+ return false;
+ }
+
+ if (!thenJoinPred && inDeadCode()) {
+ return true;
+ }
+
+ MBasicBlock* elseJoinPred;
+ if (!addJoinPredecessor(values, &elseJoinPred)) {
+ return false;
+ }
+
+ mozilla::Array<MBasicBlock*, 2> blocks;
+ size_t numJoinPreds = 0;
+ if (thenJoinPred) {
+ blocks[numJoinPreds++] = thenJoinPred;
+ }
+ if (elseJoinPred) {
+ blocks[numJoinPreds++] = elseJoinPred;
+ }
+
+ if (numJoinPreds == 0) {
+ return true;
+ }
+
+ MBasicBlock* join;
+ if (!goToNewBlock(blocks[0], &join)) {
+ return false;
+ }
+ for (size_t i = 1; i < numJoinPreds; ++i) {
+ if (!goToExistingBlock(blocks[i], join)) {
+ return false;
+ }
+ }
+
+ curBlock_ = join;
+ return popPushedDefs(defs);
+ }
+
+ bool startBlock() {
+ MOZ_ASSERT_IF(blockDepth_ < blockPatches_.length(),
+ blockPatches_[blockDepth_].empty());
+ blockDepth_++;
+ return true;
+ }
+
+ bool finishBlock(DefVector* defs) {
+ MOZ_ASSERT(blockDepth_);
+ uint32_t topLabel = --blockDepth_;
+ return bindBranches(topLabel, defs);
+ }
+
+ bool startLoop(MBasicBlock** loopHeader, size_t paramCount) {
+ *loopHeader = nullptr;
+
+ blockDepth_++;
+ loopDepth_++;
+
+ if (inDeadCode()) {
+ return true;
+ }
+
+ // Create the loop header.
+ MOZ_ASSERT(curBlock_->loopDepth() == loopDepth_ - 1);
+ *loopHeader = MBasicBlock::New(mirGraph(), info(), curBlock_,
+ MBasicBlock::PENDING_LOOP_HEADER);
+ if (!*loopHeader) {
+ return false;
+ }
+
+ (*loopHeader)->setLoopDepth(loopDepth_);
+ mirGraph().addBlock(*loopHeader);
+ curBlock_->end(MGoto::New(alloc(), *loopHeader));
+
+ DefVector loopParams;
+ if (!iter().getResults(paramCount, &loopParams)) {
+ return false;
+ }
+ for (size_t i = 0; i < paramCount; i++) {
+ MPhi* phi = MPhi::New(alloc(), loopParams[i]->type());
+ if (!phi) {
+ return false;
+ }
+ if (!phi->reserveLength(2)) {
+ return false;
+ }
+ (*loopHeader)->addPhi(phi);
+ phi->addInput(loopParams[i]);
+ loopParams[i] = phi;
+ }
+ iter().setResults(paramCount, loopParams);
+
+ MBasicBlock* body;
+ if (!goToNewBlock(*loopHeader, &body)) {
+ return false;
+ }
+ curBlock_ = body;
+ return true;
+ }
+
+ private:
+ void fixupRedundantPhis(MBasicBlock* b) {
+ for (size_t i = 0, depth = b->stackDepth(); i < depth; i++) {
+ MDefinition* def = b->getSlot(i);
+ if (def->isUnused()) {
+ b->setSlot(i, def->toPhi()->getOperand(0));
+ }
+ }
+ }
+
+ bool setLoopBackedge(MBasicBlock* loopEntry, MBasicBlock* loopBody,
+ MBasicBlock* backedge, size_t paramCount) {
+ if (!loopEntry->setBackedgeWasm(backedge, paramCount)) {
+ return false;
+ }
+
+ // Flag all redundant phis as unused.
+ for (MPhiIterator phi = loopEntry->phisBegin(); phi != loopEntry->phisEnd();
+ phi++) {
+ MOZ_ASSERT(phi->numOperands() == 2);
+ if (phi->getOperand(0) == phi->getOperand(1)) {
+ phi->setUnused();
+ }
+ }
+
+ // Fix up phis stored in the slots Vector of pending blocks.
+ for (ControlFlowPatchVector& patches : blockPatches_) {
+ for (ControlFlowPatch& p : patches) {
+ MBasicBlock* block = p.ins->block();
+ if (block->loopDepth() >= loopEntry->loopDepth()) {
+ fixupRedundantPhis(block);
+ }
+ }
+ }
+
+ // The loop body, if any, might be referencing recycled phis too.
+ if (loopBody) {
+ fixupRedundantPhis(loopBody);
+ }
+
+ // Discard redundant phis and add to the free list.
+ for (MPhiIterator phi = loopEntry->phisBegin();
+ phi != loopEntry->phisEnd();) {
+ MPhi* entryDef = *phi++;
+ if (!entryDef->isUnused()) {
+ continue;
+ }
+
+ entryDef->justReplaceAllUsesWith(entryDef->getOperand(0));
+ loopEntry->discardPhi(entryDef);
+ mirGraph().addPhiToFreeList(entryDef);
+ }
+
+ return true;
+ }
+
+ public:
+ bool closeLoop(MBasicBlock* loopHeader, DefVector* loopResults) {
+ MOZ_ASSERT(blockDepth_ >= 1);
+ MOZ_ASSERT(loopDepth_);
+
+ uint32_t headerLabel = blockDepth_ - 1;
+
+ if (!loopHeader) {
+ MOZ_ASSERT(inDeadCode());
+ MOZ_ASSERT(headerLabel >= blockPatches_.length() ||
+ blockPatches_[headerLabel].empty());
+ blockDepth_--;
+ loopDepth_--;
+ return true;
+ }
+
+ // Op::Loop doesn't have an implicit backedge so temporarily set
+ // aside the end of the loop body to bind backedges.
+ MBasicBlock* loopBody = curBlock_;
+ curBlock_ = nullptr;
+
+ // As explained in bug 1253544, Ion apparently has an invariant that
+ // there is only one backedge to loop headers. To handle wasm's ability
+ // to have multiple backedges to the same loop header, we bind all those
+ // branches as forward jumps to a single backward jump. This is
+ // unfortunate but the optimizer is able to fold these into single jumps
+ // to backedges.
+ DefVector backedgeValues;
+ if (!bindBranches(headerLabel, &backedgeValues)) {
+ return false;
+ }
+
+ MOZ_ASSERT(loopHeader->loopDepth() == loopDepth_);
+
+ if (curBlock_) {
+ // We're on the loop backedge block, created by bindBranches.
+ for (size_t i = 0, n = numPushed(curBlock_); i != n; i++) {
+ curBlock_->pop();
+ }
+
+ if (!pushDefs(backedgeValues)) {
+ return false;
+ }
+
+ MOZ_ASSERT(curBlock_->loopDepth() == loopDepth_);
+ curBlock_->end(MGoto::New(alloc(), loopHeader));
+ if (!setLoopBackedge(loopHeader, loopBody, curBlock_,
+ backedgeValues.length())) {
+ return false;
+ }
+ }
+
+ curBlock_ = loopBody;
+
+ loopDepth_--;
+
+ // If the loop depth still at the inner loop body, correct it.
+ if (curBlock_ && curBlock_->loopDepth() != loopDepth_) {
+ MBasicBlock* out;
+ if (!goToNewBlock(curBlock_, &out)) {
+ return false;
+ }
+ curBlock_ = out;
+ }
+
+ blockDepth_ -= 1;
+ return inDeadCode() || popPushedDefs(loopResults);
+ }
+
+ bool addControlFlowPatch(MControlInstruction* ins, uint32_t relative,
+ uint32_t index) {
+ MOZ_ASSERT(relative < blockDepth_);
+ uint32_t absolute = blockDepth_ - 1 - relative;
+
+ if (absolute >= blockPatches_.length() &&
+ !blockPatches_.resize(absolute + 1)) {
+ return false;
+ }
+
+ return blockPatches_[absolute].append(ControlFlowPatch(ins, index));
+ }
+
+ bool br(uint32_t relativeDepth, const DefVector& values) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ MGoto* jump = MGoto::New(alloc());
+ if (!addControlFlowPatch(jump, relativeDepth, MGoto::TargetIndex)) {
+ return false;
+ }
+
+ if (!pushDefs(values)) {
+ return false;
+ }
+
+ curBlock_->end(jump);
+ curBlock_ = nullptr;
+ return true;
+ }
+
+ bool brIf(uint32_t relativeDepth, const DefVector& values,
+ MDefinition* condition) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ MBasicBlock* joinBlock = nullptr;
+ if (!newBlock(curBlock_, &joinBlock)) {
+ return false;
+ }
+
+ MTest* test = MTest::New(alloc(), condition, joinBlock);
+ if (!addControlFlowPatch(test, relativeDepth, MTest::TrueBranchIndex)) {
+ return false;
+ }
+
+ if (!pushDefs(values)) {
+ return false;
+ }
+
+ curBlock_->end(test);
+ curBlock_ = joinBlock;
+ return true;
+ }
+
+ bool brTable(MDefinition* operand, uint32_t defaultDepth,
+ const Uint32Vector& depths, const DefVector& values) {
+ if (inDeadCode()) {
+ return true;
+ }
+
+ size_t numCases = depths.length();
+ MOZ_ASSERT(numCases <= INT32_MAX);
+ MOZ_ASSERT(numCases);
+
+ MTableSwitch* table =
+ MTableSwitch::New(alloc(), operand, 0, int32_t(numCases - 1));
+
+ size_t defaultIndex;
+ if (!table->addDefault(nullptr, &defaultIndex)) {
+ return false;
+ }
+ if (!addControlFlowPatch(table, defaultDepth, defaultIndex)) {
+ return false;
+ }
+
+ typedef HashMap<uint32_t, uint32_t, DefaultHasher<uint32_t>,
+ SystemAllocPolicy>
+ IndexToCaseMap;
+
+ IndexToCaseMap indexToCase;
+ if (!indexToCase.put(defaultDepth, defaultIndex)) {
+ return false;
+ }
+
+ for (size_t i = 0; i < numCases; i++) {
+ uint32_t depth = depths[i];
+
+ size_t caseIndex;
+ IndexToCaseMap::AddPtr p = indexToCase.lookupForAdd(depth);
+ if (!p) {
+ if (!table->addSuccessor(nullptr, &caseIndex)) {
+ return false;
+ }
+ if (!addControlFlowPatch(table, depth, caseIndex)) {
+ return false;
+ }
+ if (!indexToCase.add(p, depth, caseIndex)) {
+ return false;
+ }
+ } else {
+ caseIndex = p->value();
+ }
+
+ if (!table->addCase(caseIndex)) {
+ return false;
+ }
+ }
+
+ if (!pushDefs(values)) {
+ return false;
+ }
+
+ curBlock_->end(table);
+ curBlock_ = nullptr;
+
+ return true;
+ }
+
+ /************************************************************ DECODING ***/
+
+ uint32_t readCallSiteLineOrBytecode() {
+ if (!func_.callSiteLineNums.empty()) {
+ return func_.callSiteLineNums[lastReadCallSite_++];
+ }
+ return iter_.lastOpcodeOffset();
+ }
+
+#if DEBUG
+ bool done() const { return iter_.done(); }
+#endif
+
+ /*************************************************************************/
+ private:
+ bool newBlock(MBasicBlock* pred, MBasicBlock** block) {
+ *block = MBasicBlock::New(mirGraph(), info(), pred, MBasicBlock::NORMAL);
+ if (!*block) {
+ return false;
+ }
+ mirGraph().addBlock(*block);
+ (*block)->setLoopDepth(loopDepth_);
+ return true;
+ }
+
+ bool goToNewBlock(MBasicBlock* pred, MBasicBlock** block) {
+ if (!newBlock(pred, block)) {
+ return false;
+ }
+ pred->end(MGoto::New(alloc(), *block));
+ return true;
+ }
+
+ bool goToExistingBlock(MBasicBlock* prev, MBasicBlock* next) {
+ MOZ_ASSERT(prev);
+ MOZ_ASSERT(next);
+ prev->end(MGoto::New(alloc(), next));
+ return next->addPredecessor(alloc(), prev);
+ }
+
+ bool bindBranches(uint32_t absolute, DefVector* defs) {
+ if (absolute >= blockPatches_.length() || blockPatches_[absolute].empty()) {
+ return inDeadCode() || popPushedDefs(defs);
+ }
+
+ ControlFlowPatchVector& patches = blockPatches_[absolute];
+ MControlInstruction* ins = patches[0].ins;
+ MBasicBlock* pred = ins->block();
+
+ MBasicBlock* join = nullptr;
+ if (!newBlock(pred, &join)) {
+ return false;
+ }
+
+ pred->mark();
+ ins->replaceSuccessor(patches[0].index, join);
+
+ for (size_t i = 1; i < patches.length(); i++) {
+ ins = patches[i].ins;
+
+ pred = ins->block();
+ if (!pred->isMarked()) {
+ if (!join->addPredecessor(alloc(), pred)) {
+ return false;
+ }
+ pred->mark();
+ }
+
+ ins->replaceSuccessor(patches[i].index, join);
+ }
+
+ MOZ_ASSERT_IF(curBlock_, !curBlock_->isMarked());
+ for (uint32_t i = 0; i < join->numPredecessors(); i++) {
+ join->getPredecessor(i)->unmark();
+ }
+
+ if (curBlock_ && !goToExistingBlock(curBlock_, join)) {
+ return false;
+ }
+
+ curBlock_ = join;
+
+ if (!popPushedDefs(defs)) {
+ return false;
+ }
+
+ patches.clear();
+ return true;
+ }
+};
+
+template <>
+MDefinition* FunctionCompiler::unary<MToFloat32>(MDefinition* op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MToFloat32::New(alloc(), op, mustPreserveNaN(op->type()));
+ curBlock_->add(ins);
+ return ins;
+}
+
+template <>
+MDefinition* FunctionCompiler::unary<MWasmBuiltinTruncateToInt32>(
+ MDefinition* op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MWasmBuiltinTruncateToInt32::New(alloc(), op, tlsPointer_,
+ bytecodeOffset());
+ curBlock_->add(ins);
+ return ins;
+}
+
+template <>
+MDefinition* FunctionCompiler::unary<MNot>(MDefinition* op) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MNot::NewInt32(alloc(), op);
+ curBlock_->add(ins);
+ return ins;
+}
+
+template <>
+MDefinition* FunctionCompiler::unary<MAbs>(MDefinition* op, MIRType type) {
+ if (inDeadCode()) {
+ return nullptr;
+ }
+ auto* ins = MAbs::NewWasm(alloc(), op, type);
+ curBlock_->add(ins);
+ return ins;
+}
+
+} // end anonymous namespace
+
+static bool EmitI32Const(FunctionCompiler& f) {
+ int32_t i32;
+ if (!f.iter().readI32Const(&i32)) {
+ return false;
+ }
+
+ f.iter().setResult(f.constant(Int32Value(i32), MIRType::Int32));
+ return true;
+}
+
+static bool EmitI64Const(FunctionCompiler& f) {
+ int64_t i64;
+ if (!f.iter().readI64Const(&i64)) {
+ return false;
+ }
+
+ f.iter().setResult(f.constant(i64));
+ return true;
+}
+
+static bool EmitF32Const(FunctionCompiler& f) {
+ float f32;
+ if (!f.iter().readF32Const(&f32)) {
+ return false;
+ }
+
+ f.iter().setResult(f.constant(f32));
+ return true;
+}
+
+static bool EmitF64Const(FunctionCompiler& f) {
+ double f64;
+ if (!f.iter().readF64Const(&f64)) {
+ return false;
+ }
+
+ f.iter().setResult(f.constant(f64));
+ return true;
+}
+
+static bool EmitBlock(FunctionCompiler& f) {
+ ResultType params;
+ return f.iter().readBlock(&params) && f.startBlock();
+}
+
+static bool EmitLoop(FunctionCompiler& f) {
+ ResultType params;
+ if (!f.iter().readLoop(&params)) {
+ return false;
+ }
+
+ MBasicBlock* loopHeader;
+ if (!f.startLoop(&loopHeader, params.length())) {
+ return false;
+ }
+
+ f.addInterruptCheck();
+
+ f.iter().controlItem() = loopHeader;
+ return true;
+}
+
+static bool EmitIf(FunctionCompiler& f) {
+ ResultType params;
+ MDefinition* condition = nullptr;
+ if (!f.iter().readIf(&params, &condition)) {
+ return false;
+ }
+
+ MBasicBlock* elseBlock;
+ if (!f.branchAndStartThen(condition, &elseBlock)) {
+ return false;
+ }
+
+ f.iter().controlItem() = elseBlock;
+ return true;
+}
+
+static bool EmitElse(FunctionCompiler& f) {
+ ResultType paramType;
+ ResultType resultType;
+ DefVector thenValues;
+ if (!f.iter().readElse(&paramType, &resultType, &thenValues)) {
+ return false;
+ }
+
+ if (!f.pushDefs(thenValues)) {
+ return false;
+ }
+
+ if (!f.switchToElse(f.iter().controlItem(), &f.iter().controlItem())) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool EmitEnd(FunctionCompiler& f) {
+ LabelKind kind;
+ ResultType type;
+ DefVector preJoinDefs;
+ DefVector resultsForEmptyElse;
+ if (!f.iter().readEnd(&kind, &type, &preJoinDefs, &resultsForEmptyElse)) {
+ return false;
+ }
+
+ MBasicBlock* block = f.iter().controlItem();
+ f.iter().popEnd();
+
+ if (!f.pushDefs(preJoinDefs)) {
+ return false;
+ }
+
+ DefVector postJoinDefs;
+ switch (kind) {
+ case LabelKind::Body:
+ MOZ_ASSERT(f.iter().controlStackEmpty());
+ if (!f.finishBlock(&postJoinDefs)) {
+ return false;
+ }
+ if (!f.returnValues(postJoinDefs)) {
+ return false;
+ }
+ return f.iter().readFunctionEnd(f.iter().end());
+ case LabelKind::Block:
+ if (!f.finishBlock(&postJoinDefs)) {
+ return false;
+ }
+ break;
+ case LabelKind::Loop:
+ if (!f.closeLoop(block, &postJoinDefs)) {
+ return false;
+ }
+ break;
+ case LabelKind::Then: {
+ // If we didn't see an Else, create a trivial else block so that we create
+ // a diamond anyway, to preserve Ion invariants.
+ if (!f.switchToElse(block, &block)) {
+ return false;
+ }
+
+ if (!f.pushDefs(resultsForEmptyElse)) {
+ return false;
+ }
+
+ if (!f.joinIfElse(block, &postJoinDefs)) {
+ return false;
+ }
+ break;
+ }
+ case LabelKind::Else:
+ if (!f.joinIfElse(block, &postJoinDefs)) {
+ return false;
+ }
+ break;
+#ifdef ENABLE_WASM_EXCEPTIONS
+ case LabelKind::Try:
+ MOZ_CRASH("NYI");
+ break;
+ case LabelKind::Catch:
+ MOZ_CRASH("NYI");
+ break;
+#endif
+ }
+
+ MOZ_ASSERT_IF(!f.inDeadCode(), postJoinDefs.length() == type.length());
+ f.iter().setResults(postJoinDefs.length(), postJoinDefs);
+
+ return true;
+}
+
+static bool EmitBr(FunctionCompiler& f) {
+ uint32_t relativeDepth;
+ ResultType type;
+ DefVector values;
+ if (!f.iter().readBr(&relativeDepth, &type, &values)) {
+ return false;
+ }
+
+ return f.br(relativeDepth, values);
+}
+
+static bool EmitBrIf(FunctionCompiler& f) {
+ uint32_t relativeDepth;
+ ResultType type;
+ DefVector values;
+ MDefinition* condition;
+ if (!f.iter().readBrIf(&relativeDepth, &type, &values, &condition)) {
+ return false;
+ }
+
+ return f.brIf(relativeDepth, values, condition);
+}
+
+static bool EmitBrTable(FunctionCompiler& f) {
+ Uint32Vector depths;
+ uint32_t defaultDepth;
+ ResultType branchValueType;
+ DefVector branchValues;
+ MDefinition* index;
+ if (!f.iter().readBrTable(&depths, &defaultDepth, &branchValueType,
+ &branchValues, &index)) {
+ return false;
+ }
+
+ // If all the targets are the same, or there are no targets, we can just
+ // use a goto. This is not just an optimization: MaybeFoldConditionBlock
+ // assumes that tables have more than one successor.
+ bool allSameDepth = true;
+ for (uint32_t depth : depths) {
+ if (depth != defaultDepth) {
+ allSameDepth = false;
+ break;
+ }
+ }
+
+ if (allSameDepth) {
+ return f.br(defaultDepth, branchValues);
+ }
+
+ return f.brTable(index, defaultDepth, depths, branchValues);
+}
+
+static bool EmitReturn(FunctionCompiler& f) {
+ DefVector values;
+ if (!f.iter().readReturn(&values)) {
+ return false;
+ }
+
+ return f.returnValues(values);
+}
+
+static bool EmitUnreachable(FunctionCompiler& f) {
+ if (!f.iter().readUnreachable()) {
+ return false;
+ }
+
+ f.unreachableTrap();
+ return true;
+}
+
+#ifdef ENABLE_WASM_EXCEPTIONS
+static bool EmitTry(FunctionCompiler& f) {
+ ResultType params;
+ if (!f.iter().readTry(&params)) {
+ return false;
+ }
+
+ MOZ_CRASH("NYI");
+}
+
+static bool EmitCatch(FunctionCompiler& f) {
+ LabelKind kind;
+ uint32_t eventIndex;
+ ResultType paramType, resultType;
+ DefVector tryValues;
+ if (!f.iter().readCatch(&kind, &eventIndex, &paramType, &resultType,
+ &tryValues)) {
+ return false;
+ }
+
+ MOZ_CRASH("NYI");
+}
+
+static bool EmitThrow(FunctionCompiler& f) {
+ uint32_t exnIndex;
+ DefVector argValues;
+ if (!f.iter().readThrow(&exnIndex, &argValues)) {
+ return false;
+ }
+
+ MOZ_CRASH("NYI");
+}
+#endif
+
+static bool EmitCallArgs(FunctionCompiler& f, const FuncType& funcType,
+ const DefVector& args, CallCompileState* call) {
+ for (size_t i = 0, n = funcType.args().length(); i < n; ++i) {
+ if (!f.mirGen().ensureBallast()) {
+ return false;
+ }
+ if (!f.passArg(args[i], funcType.args()[i], call)) {
+ return false;
+ }
+ }
+
+ ResultType resultType = ResultType::Vector(funcType.results());
+ if (!f.passStackResultAreaCallArg(resultType, call)) {
+ return false;
+ }
+
+ return f.finishCall(call);
+}
+
+static bool EmitCall(FunctionCompiler& f, bool asmJSFuncDef) {
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ uint32_t funcIndex;
+ DefVector args;
+ if (asmJSFuncDef) {
+ if (!f.iter().readOldCallDirect(f.moduleEnv().numFuncImports(), &funcIndex,
+ &args)) {
+ return false;
+ }
+ } else {
+ if (!f.iter().readCall(&funcIndex, &args)) {
+ return false;
+ }
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ const FuncType& funcType = *f.moduleEnv().funcs[funcIndex].type;
+
+ CallCompileState call;
+ if (!EmitCallArgs(f, funcType, args, &call)) {
+ return false;
+ }
+
+ DefVector results;
+ if (f.moduleEnv().funcIsImport(funcIndex)) {
+ uint32_t globalDataOffset =
+ f.moduleEnv().funcImportGlobalDataOffsets[funcIndex];
+ if (!f.callImport(globalDataOffset, lineOrBytecode, call, funcType,
+ &results)) {
+ return false;
+ }
+ } else {
+ if (!f.callDirect(funcType, funcIndex, lineOrBytecode, call, &results)) {
+ return false;
+ }
+ }
+
+ f.iter().setResults(results.length(), results);
+ return true;
+}
+
+static bool EmitCallIndirect(FunctionCompiler& f, bool oldStyle) {
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ uint32_t funcTypeIndex;
+ uint32_t tableIndex;
+ MDefinition* callee;
+ DefVector args;
+ if (oldStyle) {
+ tableIndex = 0;
+ if (!f.iter().readOldCallIndirect(&funcTypeIndex, &callee, &args)) {
+ return false;
+ }
+ } else {
+ if (!f.iter().readCallIndirect(&funcTypeIndex, &tableIndex, &callee,
+ &args)) {
+ return false;
+ }
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ const FuncType& funcType = f.moduleEnv().types[funcTypeIndex].funcType();
+
+ CallCompileState call;
+ if (!EmitCallArgs(f, funcType, args, &call)) {
+ return false;
+ }
+
+ DefVector results;
+ if (!f.callIndirect(funcTypeIndex, tableIndex, callee, lineOrBytecode, call,
+ &results)) {
+ return false;
+ }
+
+ f.iter().setResults(results.length(), results);
+ return true;
+}
+
+static bool EmitGetLocal(FunctionCompiler& f) {
+ uint32_t id;
+ if (!f.iter().readGetLocal(f.locals(), &id)) {
+ return false;
+ }
+
+ f.iter().setResult(f.getLocalDef(id));
+ return true;
+}
+
+static bool EmitSetLocal(FunctionCompiler& f) {
+ uint32_t id;
+ MDefinition* value;
+ if (!f.iter().readSetLocal(f.locals(), &id, &value)) {
+ return false;
+ }
+
+ f.assign(id, value);
+ return true;
+}
+
+static bool EmitTeeLocal(FunctionCompiler& f) {
+ uint32_t id;
+ MDefinition* value;
+ if (!f.iter().readTeeLocal(f.locals(), &id, &value)) {
+ return false;
+ }
+
+ f.assign(id, value);
+ return true;
+}
+
+static bool EmitGetGlobal(FunctionCompiler& f) {
+ uint32_t id;
+ if (!f.iter().readGetGlobal(&id)) {
+ return false;
+ }
+
+ const GlobalDesc& global = f.moduleEnv().globals[id];
+ if (!global.isConstant()) {
+ f.iter().setResult(f.loadGlobalVar(global.offset(), !global.isMutable(),
+ global.isIndirect(),
+ ToMIRType(global.type())));
+ return true;
+ }
+
+ LitVal value = global.constantValue();
+ MIRType mirType = ToMIRType(value.type());
+
+ MDefinition* result;
+ switch (value.type().kind()) {
+ case ValType::I32:
+ result = f.constant(Int32Value(value.i32()), mirType);
+ break;
+ case ValType::I64:
+ result = f.constant(int64_t(value.i64()));
+ break;
+ case ValType::F32:
+ result = f.constant(value.f32());
+ break;
+ case ValType::F64:
+ result = f.constant(value.f64());
+ break;
+ case ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ result = f.constant(value.v128());
+ break;
+#else
+ return f.iter().fail("Ion has no SIMD support yet");
+#endif
+ case ValType::Ref:
+ switch (value.type().refTypeKind()) {
+ case RefType::Func:
+ case RefType::Extern:
+ case RefType::Eq:
+ MOZ_ASSERT(value.ref().isNull());
+ result = f.nullRefConstant();
+ break;
+ case RefType::TypeIndex:
+ MOZ_CRASH("unexpected reference type in EmitGetGlobal");
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected type in EmitGetGlobal");
+ }
+
+ f.iter().setResult(result);
+ return true;
+}
+
+static bool EmitSetGlobal(FunctionCompiler& f) {
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ uint32_t id;
+ MDefinition* value;
+ if (!f.iter().readSetGlobal(&id, &value)) {
+ return false;
+ }
+
+ const GlobalDesc& global = f.moduleEnv().globals[id];
+ MOZ_ASSERT(global.isMutable());
+ MInstruction* barrierAddr =
+ f.storeGlobalVar(global.offset(), global.isIndirect(), value);
+
+ // We always call the C++ postbarrier because the location will never be in
+ // the nursery, and the value stored will very frequently be in the nursery.
+ // The C++ postbarrier performs any necessary filtering.
+
+ if (barrierAddr) {
+ const SymbolicAddressSignature& callee = SASigPostBarrierFiltering;
+ CallCompileState args;
+ if (!f.passInstance(callee.argTypes[0], &args)) {
+ return false;
+ }
+ if (!f.passArg(barrierAddr, callee.argTypes[1], &args)) {
+ return false;
+ }
+ f.finishCall(&args);
+ if (!f.builtinInstanceMethodCall(callee, lineOrBytecode, args)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool EmitTeeGlobal(FunctionCompiler& f) {
+ uint32_t id;
+ MDefinition* value;
+ if (!f.iter().readTeeGlobal(&id, &value)) {
+ return false;
+ }
+
+ const GlobalDesc& global = f.moduleEnv().globals[id];
+ MOZ_ASSERT(global.isMutable());
+
+ f.storeGlobalVar(global.offset(), global.isIndirect(), value);
+ return true;
+}
+
+template <typename MIRClass>
+static bool EmitUnary(FunctionCompiler& f, ValType operandType) {
+ MDefinition* input;
+ if (!f.iter().readUnary(operandType, &input)) {
+ return false;
+ }
+
+ f.iter().setResult(f.unary<MIRClass>(input));
+ return true;
+}
+
+template <typename MIRClass>
+static bool EmitConversion(FunctionCompiler& f, ValType operandType,
+ ValType resultType) {
+ MDefinition* input;
+ if (!f.iter().readConversion(operandType, resultType, &input)) {
+ return false;
+ }
+
+ f.iter().setResult(f.unary<MIRClass>(input));
+ return true;
+}
+
+template <typename MIRClass>
+static bool EmitUnaryWithType(FunctionCompiler& f, ValType operandType,
+ MIRType mirType) {
+ MDefinition* input;
+ if (!f.iter().readUnary(operandType, &input)) {
+ return false;
+ }
+
+ f.iter().setResult(f.unary<MIRClass>(input, mirType));
+ return true;
+}
+
+template <typename MIRClass>
+static bool EmitConversionWithType(FunctionCompiler& f, ValType operandType,
+ ValType resultType, MIRType mirType) {
+ MDefinition* input;
+ if (!f.iter().readConversion(operandType, resultType, &input)) {
+ return false;
+ }
+
+ f.iter().setResult(f.unary<MIRClass>(input, mirType));
+ return true;
+}
+
+static bool EmitTruncate(FunctionCompiler& f, ValType operandType,
+ ValType resultType, bool isUnsigned,
+ bool isSaturating) {
+ MDefinition* input = nullptr;
+ if (!f.iter().readConversion(operandType, resultType, &input)) {
+ return false;
+ }
+
+ TruncFlags flags = 0;
+ if (isUnsigned) {
+ flags |= TRUNC_UNSIGNED;
+ }
+ if (isSaturating) {
+ flags |= TRUNC_SATURATING;
+ }
+ if (resultType == ValType::I32) {
+ if (f.moduleEnv().isAsmJS()) {
+ if (input && (input->type() == MIRType::Double ||
+ input->type() == MIRType::Float32)) {
+ f.iter().setResult(f.unary<MWasmBuiltinTruncateToInt32>(input));
+ } else {
+ f.iter().setResult(f.unary<MTruncateToInt32>(input));
+ }
+ } else {
+ f.iter().setResult(f.truncate<MWasmTruncateToInt32>(input, flags));
+ }
+ } else {
+ MOZ_ASSERT(resultType == ValType::I64);
+ MOZ_ASSERT(!f.moduleEnv().isAsmJS());
+#if defined(JS_CODEGEN_ARM)
+ f.iter().setResult(f.truncateWithTls(input, flags));
+#else
+ f.iter().setResult(f.truncate<MWasmTruncateToInt64>(input, flags));
+#endif
+ }
+ return true;
+}
+
+static bool EmitSignExtend(FunctionCompiler& f, uint32_t srcSize,
+ uint32_t targetSize) {
+ MDefinition* input;
+ ValType type = targetSize == 4 ? ValType::I32 : ValType::I64;
+ if (!f.iter().readConversion(type, type, &input)) {
+ return false;
+ }
+
+ f.iter().setResult(f.signExtend(input, srcSize, targetSize));
+ return true;
+}
+
+static bool EmitExtendI32(FunctionCompiler& f, bool isUnsigned) {
+ MDefinition* input;
+ if (!f.iter().readConversion(ValType::I32, ValType::I64, &input)) {
+ return false;
+ }
+
+ f.iter().setResult(f.extendI32(input, isUnsigned));
+ return true;
+}
+
+static bool EmitConvertI64ToFloatingPoint(FunctionCompiler& f,
+ ValType resultType, MIRType mirType,
+ bool isUnsigned) {
+ MDefinition* input;
+ if (!f.iter().readConversion(ValType::I64, resultType, &input)) {
+ return false;
+ }
+
+ f.iter().setResult(f.convertI64ToFloatingPoint(input, mirType, isUnsigned));
+ return true;
+}
+
+static bool EmitReinterpret(FunctionCompiler& f, ValType resultType,
+ ValType operandType, MIRType mirType) {
+ MDefinition* input;
+ if (!f.iter().readConversion(operandType, resultType, &input)) {
+ return false;
+ }
+
+ f.iter().setResult(f.unary<MWasmReinterpret>(input, mirType));
+ return true;
+}
+
+static bool EmitAdd(FunctionCompiler& f, ValType type, MIRType mirType) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(type, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.add(lhs, rhs, mirType));
+ return true;
+}
+
+static bool EmitSub(FunctionCompiler& f, ValType type, MIRType mirType) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(type, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.sub(lhs, rhs, mirType));
+ return true;
+}
+
+static bool EmitRotate(FunctionCompiler& f, ValType type, bool isLeftRotation) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(type, &lhs, &rhs)) {
+ return false;
+ }
+
+ MDefinition* result = f.rotate(lhs, rhs, ToMIRType(type), isLeftRotation);
+ f.iter().setResult(result);
+ return true;
+}
+
+static bool EmitBitNot(FunctionCompiler& f, ValType operandType) {
+ MDefinition* input;
+ if (!f.iter().readUnary(operandType, &input)) {
+ return false;
+ }
+
+ f.iter().setResult(f.bitnot(input));
+ return true;
+}
+
+template <typename MIRClass>
+static bool EmitBitwise(FunctionCompiler& f, ValType operandType,
+ MIRType mirType) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.binary<MIRClass>(lhs, rhs, mirType));
+ return true;
+}
+
+static bool EmitUrsh(FunctionCompiler& f, ValType operandType,
+ MIRType mirType) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.ursh(lhs, rhs, mirType));
+ return true;
+}
+
+static bool EmitMul(FunctionCompiler& f, ValType operandType, MIRType mirType) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(
+ f.mul(lhs, rhs, mirType,
+ mirType == MIRType::Int32 ? MMul::Integer : MMul::Normal));
+ return true;
+}
+
+static bool EmitDiv(FunctionCompiler& f, ValType operandType, MIRType mirType,
+ bool isUnsigned) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.div(lhs, rhs, mirType, isUnsigned));
+ return true;
+}
+
+static bool EmitRem(FunctionCompiler& f, ValType operandType, MIRType mirType,
+ bool isUnsigned) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.mod(lhs, rhs, mirType, isUnsigned));
+ return true;
+}
+
+static bool EmitMinMax(FunctionCompiler& f, ValType operandType,
+ MIRType mirType, bool isMax) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.minMax(lhs, rhs, mirType, isMax));
+ return true;
+}
+
+static bool EmitCopySign(FunctionCompiler& f, ValType operandType) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.binary<MCopySign>(lhs, rhs, ToMIRType(operandType)));
+ return true;
+}
+
+static bool EmitComparison(FunctionCompiler& f, ValType operandType,
+ JSOp compareOp, MCompare::CompareType compareType) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readComparison(operandType, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.compare(lhs, rhs, compareOp, compareType));
+ return true;
+}
+
+static bool EmitSelect(FunctionCompiler& f, bool typed) {
+ StackType type;
+ MDefinition* trueValue;
+ MDefinition* falseValue;
+ MDefinition* condition;
+ if (!f.iter().readSelect(typed, &type, &trueValue, &falseValue, &condition)) {
+ return false;
+ }
+
+ f.iter().setResult(f.select(trueValue, falseValue, condition));
+ return true;
+}
+
+static bool EmitLoad(FunctionCompiler& f, ValType type, Scalar::Type viewType) {
+ LinearMemoryAddress<MDefinition*> addr;
+ if (!f.iter().readLoad(type, Scalar::byteSize(viewType), &addr)) {
+ return false;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset,
+ f.bytecodeIfNotAsmJS());
+ auto* ins = f.load(addr.base, &access, type);
+ if (!f.inDeadCode() && !ins) {
+ return false;
+ }
+
+ f.iter().setResult(ins);
+ return true;
+}
+
+static bool EmitStore(FunctionCompiler& f, ValType resultType,
+ Scalar::Type viewType) {
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* value;
+ if (!f.iter().readStore(resultType, Scalar::byteSize(viewType), &addr,
+ &value)) {
+ return false;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset,
+ f.bytecodeIfNotAsmJS());
+
+ f.store(addr.base, &access, value);
+ return true;
+}
+
+static bool EmitTeeStore(FunctionCompiler& f, ValType resultType,
+ Scalar::Type viewType) {
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* value;
+ if (!f.iter().readTeeStore(resultType, Scalar::byteSize(viewType), &addr,
+ &value)) {
+ return false;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset,
+ f.bytecodeIfNotAsmJS());
+
+ f.store(addr.base, &access, value);
+ return true;
+}
+
+static bool EmitTeeStoreWithCoercion(FunctionCompiler& f, ValType resultType,
+ Scalar::Type viewType) {
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* value;
+ if (!f.iter().readTeeStore(resultType, Scalar::byteSize(viewType), &addr,
+ &value)) {
+ return false;
+ }
+
+ if (resultType == ValType::F32 && viewType == Scalar::Float64) {
+ value = f.unary<MToDouble>(value);
+ } else if (resultType == ValType::F64 && viewType == Scalar::Float32) {
+ value = f.unary<MToFloat32>(value);
+ } else {
+ MOZ_CRASH("unexpected coerced store");
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset,
+ f.bytecodeIfNotAsmJS());
+
+ f.store(addr.base, &access, value);
+ return true;
+}
+
+static bool TryInlineUnaryBuiltin(FunctionCompiler& f, SymbolicAddress callee,
+ MDefinition* input) {
+ if (!input) {
+ return false;
+ }
+
+ MOZ_ASSERT(IsFloatingPointType(input->type()));
+
+ RoundingMode mode;
+ if (!IsRoundingFunction(callee, &mode)) {
+ return false;
+ }
+
+ if (!MNearbyInt::HasAssemblerSupport(mode)) {
+ return false;
+ }
+
+ f.iter().setResult(f.nearbyInt(input, mode));
+ return true;
+}
+
+static bool EmitUnaryMathBuiltinCall(FunctionCompiler& f,
+ const SymbolicAddressSignature& callee) {
+ MOZ_ASSERT(callee.numArgs == 1);
+
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ MDefinition* input;
+ if (!f.iter().readUnary(ValType(callee.argTypes[0]), &input)) {
+ return false;
+ }
+
+ if (TryInlineUnaryBuiltin(f, callee.identity, input)) {
+ return true;
+ }
+
+ CallCompileState call;
+ if (!f.passArg(input, callee.argTypes[0], &call)) {
+ return false;
+ }
+
+ if (!f.finishCall(&call)) {
+ return false;
+ }
+
+ MDefinition* def;
+ if (!f.builtinCall(callee, lineOrBytecode, call, &def)) {
+ return false;
+ }
+
+ f.iter().setResult(def);
+ return true;
+}
+
+static bool EmitBinaryMathBuiltinCall(FunctionCompiler& f,
+ const SymbolicAddressSignature& callee) {
+ MOZ_ASSERT(callee.numArgs == 2);
+ MOZ_ASSERT(callee.argTypes[0] == callee.argTypes[1]);
+
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ CallCompileState call;
+ MDefinition* lhs;
+ MDefinition* rhs;
+ // This call to readBinary assumes both operands have the same type.
+ if (!f.iter().readBinary(ValType(callee.argTypes[0]), &lhs, &rhs)) {
+ return false;
+ }
+
+ if (!f.passArg(lhs, callee.argTypes[0], &call)) {
+ return false;
+ }
+
+ if (!f.passArg(rhs, callee.argTypes[1], &call)) {
+ return false;
+ }
+
+ if (!f.finishCall(&call)) {
+ return false;
+ }
+
+ MDefinition* def;
+ if (!f.builtinCall(callee, lineOrBytecode, call, &def)) {
+ return false;
+ }
+
+ f.iter().setResult(def);
+ return true;
+}
+
+static bool EmitMemoryGrow(FunctionCompiler& f) {
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ const SymbolicAddressSignature& callee = SASigMemoryGrow;
+ CallCompileState args;
+ if (!f.passInstance(callee.argTypes[0], &args)) {
+ return false;
+ }
+
+ MDefinition* delta;
+ if (!f.iter().readMemoryGrow(&delta)) {
+ return false;
+ }
+
+ if (!f.passArg(delta, callee.argTypes[1], &args)) {
+ return false;
+ }
+
+ f.finishCall(&args);
+
+ MDefinition* ret;
+ if (!f.builtinInstanceMethodCall(callee, lineOrBytecode, args, &ret)) {
+ return false;
+ }
+
+ f.iter().setResult(ret);
+ return true;
+}
+
+static bool EmitMemorySize(FunctionCompiler& f) {
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ const SymbolicAddressSignature& callee = SASigMemorySize;
+ CallCompileState args;
+
+ if (!f.iter().readMemorySize()) {
+ return false;
+ }
+
+ if (!f.passInstance(callee.argTypes[0], &args)) {
+ return false;
+ }
+
+ f.finishCall(&args);
+
+ MDefinition* ret;
+ if (!f.builtinInstanceMethodCall(callee, lineOrBytecode, args, &ret)) {
+ return false;
+ }
+
+ f.iter().setResult(ret);
+ return true;
+}
+
+static bool EmitAtomicCmpXchg(FunctionCompiler& f, ValType type,
+ Scalar::Type viewType) {
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* oldValue;
+ MDefinition* newValue;
+ if (!f.iter().readAtomicCmpXchg(&addr, type, byteSize(viewType), &oldValue,
+ &newValue)) {
+ return false;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
+ Synchronization::Full());
+ auto* ins =
+ f.atomicCompareExchangeHeap(addr.base, &access, type, oldValue, newValue);
+ if (!f.inDeadCode() && !ins) {
+ return false;
+ }
+
+ f.iter().setResult(ins);
+ return true;
+}
+
+static bool EmitAtomicLoad(FunctionCompiler& f, ValType type,
+ Scalar::Type viewType) {
+ LinearMemoryAddress<MDefinition*> addr;
+ if (!f.iter().readAtomicLoad(&addr, type, byteSize(viewType))) {
+ return false;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
+ Synchronization::Load());
+ auto* ins = f.load(addr.base, &access, type);
+ if (!f.inDeadCode() && !ins) {
+ return false;
+ }
+
+ f.iter().setResult(ins);
+ return true;
+}
+
+static bool EmitAtomicRMW(FunctionCompiler& f, ValType type,
+ Scalar::Type viewType, jit::AtomicOp op) {
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* value;
+ if (!f.iter().readAtomicRMW(&addr, type, byteSize(viewType), &value)) {
+ return false;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
+ Synchronization::Full());
+ auto* ins = f.atomicBinopHeap(op, addr.base, &access, type, value);
+ if (!f.inDeadCode() && !ins) {
+ return false;
+ }
+
+ f.iter().setResult(ins);
+ return true;
+}
+
+static bool EmitAtomicStore(FunctionCompiler& f, ValType type,
+ Scalar::Type viewType) {
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* value;
+ if (!f.iter().readAtomicStore(&addr, type, byteSize(viewType), &value)) {
+ return false;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
+ Synchronization::Store());
+ f.store(addr.base, &access, value);
+ return true;
+}
+
+static bool EmitWait(FunctionCompiler& f, ValType type, uint32_t byteSize) {
+ MOZ_ASSERT(type == ValType::I32 || type == ValType::I64);
+ MOZ_ASSERT(SizeOf(type) == byteSize);
+
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ const SymbolicAddressSignature& callee =
+ type == ValType::I32 ? SASigWaitI32 : SASigWaitI64;
+ CallCompileState args;
+ if (!f.passInstance(callee.argTypes[0], &args)) {
+ return false;
+ }
+
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* expected;
+ MDefinition* timeout;
+ if (!f.iter().readWait(&addr, type, byteSize, &expected, &timeout)) {
+ return false;
+ }
+
+ MemoryAccessDesc access(type == ValType::I32 ? Scalar::Int32 : Scalar::Int64,
+ addr.align, addr.offset, f.bytecodeOffset());
+ MDefinition* ptr = f.computeEffectiveAddress(addr.base, &access);
+ if (!f.inDeadCode() && !ptr) {
+ return false;
+ }
+
+ if (!f.passArg(ptr, callee.argTypes[1], &args)) {
+ return false;
+ }
+
+ MOZ_ASSERT(ToMIRType(type) == callee.argTypes[2]);
+ if (!f.passArg(expected, callee.argTypes[2], &args)) {
+ return false;
+ }
+
+ if (!f.passArg(timeout, callee.argTypes[3], &args)) {
+ return false;
+ }
+
+ if (!f.finishCall(&args)) {
+ return false;
+ }
+
+ MDefinition* ret;
+ if (!f.builtinInstanceMethodCall(callee, lineOrBytecode, args, &ret)) {
+ return false;
+ }
+
+ f.iter().setResult(ret);
+ return true;
+}
+
+static bool EmitFence(FunctionCompiler& f) {
+ if (!f.iter().readFence()) {
+ return false;
+ }
+
+ f.fence();
+ return true;
+}
+
+static bool EmitWake(FunctionCompiler& f) {
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ const SymbolicAddressSignature& callee = SASigWake;
+ CallCompileState args;
+ if (!f.passInstance(callee.argTypes[0], &args)) {
+ return false;
+ }
+
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* count;
+ if (!f.iter().readWake(&addr, &count)) {
+ return false;
+ }
+
+ MemoryAccessDesc access(Scalar::Int32, addr.align, addr.offset,
+ f.bytecodeOffset());
+ MDefinition* ptr = f.computeEffectiveAddress(addr.base, &access);
+ if (!f.inDeadCode() && !ptr) {
+ return false;
+ }
+
+ if (!f.passArg(ptr, callee.argTypes[1], &args)) {
+ return false;
+ }
+
+ if (!f.passArg(count, callee.argTypes[2], &args)) {
+ return false;
+ }
+
+ if (!f.finishCall(&args)) {
+ return false;
+ }
+
+ MDefinition* ret;
+ if (!f.builtinInstanceMethodCall(callee, lineOrBytecode, args, &ret)) {
+ return false;
+ }
+
+ f.iter().setResult(ret);
+ return true;
+}
+
+static bool EmitAtomicXchg(FunctionCompiler& f, ValType type,
+ Scalar::Type viewType) {
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* value;
+ if (!f.iter().readAtomicRMW(&addr, type, byteSize(viewType), &value)) {
+ return false;
+ }
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, f.bytecodeOffset(),
+ Synchronization::Full());
+ MDefinition* ins = f.atomicExchangeHeap(addr.base, &access, type, value);
+ if (!f.inDeadCode() && !ins) {
+ return false;
+ }
+
+ f.iter().setResult(ins);
+ return true;
+}
+
+static bool EmitMemCopyCall(FunctionCompiler& f, MDefinition* dst,
+ MDefinition* src, MDefinition* len) {
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ const SymbolicAddressSignature& callee =
+ (f.moduleEnv().usesSharedMemory() ? SASigMemCopyShared : SASigMemCopy);
+ CallCompileState args;
+ if (!f.passInstance(callee.argTypes[0], &args)) {
+ return false;
+ }
+
+ if (!f.passArg(dst, callee.argTypes[1], &args)) {
+ return false;
+ }
+ if (!f.passArg(src, callee.argTypes[2], &args)) {
+ return false;
+ }
+ if (!f.passArg(len, callee.argTypes[3], &args)) {
+ return false;
+ }
+ MDefinition* memoryBase = f.memoryBase();
+ if (!f.passArg(memoryBase, callee.argTypes[4], &args)) {
+ return false;
+ }
+ if (!f.finishCall(&args)) {
+ return false;
+ }
+
+ return f.builtinInstanceMethodCall(callee, lineOrBytecode, args);
+}
+
+static bool EmitMemCopyInline(FunctionCompiler& f, MDefinition* dst,
+ MDefinition* src, MDefinition* len) {
+ MOZ_ASSERT(MaxInlineMemoryCopyLength != 0);
+
+ MOZ_ASSERT(len->isConstant() && len->type() == MIRType::Int32);
+ uint32_t length = len->toConstant()->toInt32();
+ MOZ_ASSERT(length != 0 && length <= MaxInlineMemoryCopyLength);
+
+ // Compute the number of copies of each width we will need to do
+ size_t remainder = length;
+#ifdef JS_64BIT
+ size_t numCopies8 = remainder / sizeof(uint64_t);
+ remainder %= sizeof(uint64_t);
+#endif
+ size_t numCopies4 = remainder / sizeof(uint32_t);
+ remainder %= sizeof(uint32_t);
+ size_t numCopies2 = remainder / sizeof(uint16_t);
+ remainder %= sizeof(uint16_t);
+ size_t numCopies1 = remainder;
+
+ // Load all source bytes from low to high using the widest transfer width we
+ // can for the system. We will trap without writing anything if any source
+ // byte is out-of-bounds.
+ size_t offset = 0;
+ DefVector loadedValues;
+
+#ifdef JS_64BIT
+ for (uint32_t i = 0; i < numCopies8; i++) {
+ MemoryAccessDesc access(Scalar::Int64, 1, offset, f.bytecodeOffset());
+ auto* load = f.load(src, &access, ValType::I64);
+ if (!load || !loadedValues.append(load)) {
+ return false;
+ }
+
+ offset += sizeof(uint64_t);
+ }
+#endif
+
+ for (uint32_t i = 0; i < numCopies4; i++) {
+ MemoryAccessDesc access(Scalar::Uint32, 1, offset, f.bytecodeOffset());
+ auto* load = f.load(src, &access, ValType::I32);
+ if (!load || !loadedValues.append(load)) {
+ return false;
+ }
+
+ offset += sizeof(uint32_t);
+ }
+
+ if (numCopies2) {
+ MemoryAccessDesc access(Scalar::Uint16, 1, offset, f.bytecodeOffset());
+ auto* load = f.load(src, &access, ValType::I32);
+ if (!load || !loadedValues.append(load)) {
+ return false;
+ }
+
+ offset += sizeof(uint16_t);
+ }
+
+ if (numCopies1) {
+ MemoryAccessDesc access(Scalar::Uint8, 1, offset, f.bytecodeOffset());
+ auto* load = f.load(src, &access, ValType::I32);
+ if (!load || !loadedValues.append(load)) {
+ return false;
+ }
+ }
+
+ // Store all source bytes to the destination from high to low. We will trap
+ // without writing anything on the first store if any dest byte is
+ // out-of-bounds.
+ offset = length;
+
+ if (numCopies1) {
+ offset -= sizeof(uint8_t);
+
+ MemoryAccessDesc access(Scalar::Uint8, 1, offset, f.bytecodeOffset());
+ auto* value = loadedValues.popCopy();
+ f.store(dst, &access, value);
+ }
+
+ if (numCopies2) {
+ offset -= sizeof(uint16_t);
+
+ MemoryAccessDesc access(Scalar::Uint16, 1, offset, f.bytecodeOffset());
+ auto* value = loadedValues.popCopy();
+ f.store(dst, &access, value);
+ }
+
+ for (uint32_t i = 0; i < numCopies4; i++) {
+ offset -= sizeof(uint32_t);
+
+ MemoryAccessDesc access(Scalar::Uint32, 1, offset, f.bytecodeOffset());
+ auto* value = loadedValues.popCopy();
+ f.store(dst, &access, value);
+ }
+
+#ifdef JS_64BIT
+ for (uint32_t i = 0; i < numCopies8; i++) {
+ offset -= sizeof(uint64_t);
+
+ MemoryAccessDesc access(Scalar::Int64, 1, offset, f.bytecodeOffset());
+ auto* value = loadedValues.popCopy();
+ f.store(dst, &access, value);
+ }
+#endif
+
+ return true;
+}
+
+static bool EmitMemCopy(FunctionCompiler& f) {
+ MDefinition *dst, *src, *len;
+ uint32_t dstMemIndex;
+ uint32_t srcMemIndex;
+ if (!f.iter().readMemOrTableCopy(true, &dstMemIndex, &dst, &srcMemIndex, &src,
+ &len)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ if (MacroAssembler::SupportsFastUnalignedAccesses() && len->isConstant() &&
+ len->type() == MIRType::Int32 && len->toConstant()->toInt32() != 0 &&
+ uint32_t(len->toConstant()->toInt32()) <= MaxInlineMemoryCopyLength) {
+ return EmitMemCopyInline(f, dst, src, len);
+ }
+ return EmitMemCopyCall(f, dst, src, len);
+}
+
+static bool EmitTableCopy(FunctionCompiler& f) {
+ MDefinition *dst, *src, *len;
+ uint32_t dstTableIndex;
+ uint32_t srcTableIndex;
+ if (!f.iter().readMemOrTableCopy(false, &dstTableIndex, &dst, &srcTableIndex,
+ &src, &len)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ const SymbolicAddressSignature& callee = SASigTableCopy;
+ CallCompileState args;
+ if (!f.passInstance(callee.argTypes[0], &args)) {
+ return false;
+ }
+
+ if (!f.passArg(dst, callee.argTypes[1], &args)) {
+ return false;
+ }
+ if (!f.passArg(src, callee.argTypes[2], &args)) {
+ return false;
+ }
+ if (!f.passArg(len, callee.argTypes[3], &args)) {
+ return false;
+ }
+ MDefinition* dti = f.constant(Int32Value(dstTableIndex), MIRType::Int32);
+ if (!dti) {
+ return false;
+ }
+ if (!f.passArg(dti, callee.argTypes[4], &args)) {
+ return false;
+ }
+ MDefinition* sti = f.constant(Int32Value(srcTableIndex), MIRType::Int32);
+ if (!sti) {
+ return false;
+ }
+ if (!f.passArg(sti, callee.argTypes[5], &args)) {
+ return false;
+ }
+ if (!f.finishCall(&args)) {
+ return false;
+ }
+
+ return f.builtinInstanceMethodCall(callee, lineOrBytecode, args);
+}
+
+static bool EmitDataOrElemDrop(FunctionCompiler& f, bool isData) {
+ uint32_t segIndexVal = 0;
+ if (!f.iter().readDataOrElemDrop(isData, &segIndexVal)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ const SymbolicAddressSignature& callee =
+ isData ? SASigDataDrop : SASigElemDrop;
+ CallCompileState args;
+ if (!f.passInstance(callee.argTypes[0], &args)) {
+ return false;
+ }
+
+ MDefinition* segIndex =
+ f.constant(Int32Value(int32_t(segIndexVal)), MIRType::Int32);
+ if (!f.passArg(segIndex, callee.argTypes[1], &args)) {
+ return false;
+ }
+
+ if (!f.finishCall(&args)) {
+ return false;
+ }
+
+ return f.builtinInstanceMethodCall(callee, lineOrBytecode, args);
+}
+
+static bool EmitMemFillCall(FunctionCompiler& f, MDefinition* start,
+ MDefinition* val, MDefinition* len) {
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ const SymbolicAddressSignature& callee =
+ f.moduleEnv().usesSharedMemory() ? SASigMemFillShared : SASigMemFill;
+ CallCompileState args;
+ if (!f.passInstance(callee.argTypes[0], &args)) {
+ return false;
+ }
+
+ if (!f.passArg(start, callee.argTypes[1], &args)) {
+ return false;
+ }
+ if (!f.passArg(val, callee.argTypes[2], &args)) {
+ return false;
+ }
+ if (!f.passArg(len, callee.argTypes[3], &args)) {
+ return false;
+ }
+ MDefinition* memoryBase = f.memoryBase();
+ if (!f.passArg(memoryBase, callee.argTypes[4], &args)) {
+ return false;
+ }
+
+ if (!f.finishCall(&args)) {
+ return false;
+ }
+
+ return f.builtinInstanceMethodCall(callee, lineOrBytecode, args);
+}
+
+static bool EmitMemFillInline(FunctionCompiler& f, MDefinition* start,
+ MDefinition* val, MDefinition* len) {
+ MOZ_ASSERT(MaxInlineMemoryFillLength != 0);
+
+ MOZ_ASSERT(len->isConstant() && len->type() == MIRType::Int32 &&
+ val->isConstant() && val->type() == MIRType::Int32);
+
+ uint32_t length = len->toConstant()->toInt32();
+ uint32_t value = val->toConstant()->toInt32();
+ MOZ_ASSERT(length != 0 && length <= MaxInlineMemoryFillLength);
+
+ // Compute the number of copies of each width we will need to do
+ size_t remainder = length;
+#ifdef JS_64BIT
+ size_t numCopies8 = remainder / sizeof(uint64_t);
+ remainder %= sizeof(uint64_t);
+#endif
+ size_t numCopies4 = remainder / sizeof(uint32_t);
+ remainder %= sizeof(uint32_t);
+ size_t numCopies2 = remainder / sizeof(uint16_t);
+ remainder %= sizeof(uint16_t);
+ size_t numCopies1 = remainder;
+
+ // Generate splatted definitions for wider fills as needed
+#ifdef JS_64BIT
+ MDefinition* val8 =
+ numCopies8 ? f.constant(int64_t(SplatByteToUInt<uint64_t>(value, 8)))
+ : nullptr;
+#endif
+ MDefinition* val4 =
+ numCopies4 ? f.constant(Int32Value(SplatByteToUInt<uint32_t>(value, 4)),
+ MIRType::Int32)
+ : nullptr;
+ MDefinition* val2 =
+ numCopies2 ? f.constant(Int32Value(SplatByteToUInt<uint32_t>(value, 2)),
+ MIRType::Int32)
+ : nullptr;
+
+ // Store the fill value to the destination from high to low. We will trap
+ // without writing anything on the first store if any dest byte is
+ // out-of-bounds.
+ size_t offset = length;
+
+ if (numCopies1) {
+ offset -= sizeof(uint8_t);
+
+ MemoryAccessDesc access(Scalar::Uint8, 1, offset, f.bytecodeOffset());
+ f.store(start, &access, val);
+ }
+
+ if (numCopies2) {
+ offset -= sizeof(uint16_t);
+
+ MemoryAccessDesc access(Scalar::Uint16, 1, offset, f.bytecodeOffset());
+ f.store(start, &access, val2);
+ }
+
+ for (uint32_t i = 0; i < numCopies4; i++) {
+ offset -= sizeof(uint32_t);
+
+ MemoryAccessDesc access(Scalar::Uint32, 1, offset, f.bytecodeOffset());
+ f.store(start, &access, val4);
+ }
+
+#ifdef JS_64BIT
+ for (uint32_t i = 0; i < numCopies8; i++) {
+ offset -= sizeof(uint64_t);
+
+ MemoryAccessDesc access(Scalar::Int64, 1, offset, f.bytecodeOffset());
+ f.store(start, &access, val8);
+ }
+#endif
+
+ return true;
+}
+
+static bool EmitMemFill(FunctionCompiler& f) {
+ MDefinition *start, *val, *len;
+ if (!f.iter().readMemFill(&start, &val, &len)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ if (MacroAssembler::SupportsFastUnalignedAccesses() && len->isConstant() &&
+ len->type() == MIRType::Int32 && len->toConstant()->toInt32() != 0 &&
+ uint32_t(len->toConstant()->toInt32()) <= MaxInlineMemoryFillLength &&
+ val->isConstant() && val->type() == MIRType::Int32) {
+ return EmitMemFillInline(f, start, val, len);
+ }
+ return EmitMemFillCall(f, start, val, len);
+}
+
+static bool EmitMemOrTableInit(FunctionCompiler& f, bool isMem) {
+ uint32_t segIndexVal = 0, dstTableIndex = 0;
+ MDefinition *dstOff, *srcOff, *len;
+ if (!f.iter().readMemOrTableInit(isMem, &segIndexVal, &dstTableIndex, &dstOff,
+ &srcOff, &len)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ const SymbolicAddressSignature& callee =
+ isMem ? SASigMemInit : SASigTableInit;
+ CallCompileState args;
+ if (!f.passInstance(callee.argTypes[0], &args)) {
+ return false;
+ }
+
+ if (!f.passArg(dstOff, callee.argTypes[1], &args)) {
+ return false;
+ }
+ if (!f.passArg(srcOff, callee.argTypes[2], &args)) {
+ return false;
+ }
+ if (!f.passArg(len, callee.argTypes[3], &args)) {
+ return false;
+ }
+
+ MDefinition* segIndex =
+ f.constant(Int32Value(int32_t(segIndexVal)), MIRType::Int32);
+ if (!f.passArg(segIndex, callee.argTypes[4], &args)) {
+ return false;
+ }
+ if (!isMem) {
+ MDefinition* dti = f.constant(Int32Value(dstTableIndex), MIRType::Int32);
+ if (!dti) {
+ return false;
+ }
+ if (!f.passArg(dti, callee.argTypes[5], &args)) {
+ return false;
+ }
+ }
+ if (!f.finishCall(&args)) {
+ return false;
+ }
+
+ return f.builtinInstanceMethodCall(callee, lineOrBytecode, args);
+}
+
+#ifdef ENABLE_WASM_REFTYPES
+// Note, table.{get,grow,set} on table(funcref) are currently rejected by the
+// verifier.
+
+static bool EmitTableFill(FunctionCompiler& f) {
+ uint32_t tableIndex;
+ MDefinition *start, *val, *len;
+ if (!f.iter().readTableFill(&tableIndex, &start, &val, &len)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ const SymbolicAddressSignature& callee = SASigTableFill;
+ CallCompileState args;
+ if (!f.passInstance(callee.argTypes[0], &args)) {
+ return false;
+ }
+
+ if (!f.passArg(start, callee.argTypes[1], &args)) {
+ return false;
+ }
+ if (!f.passArg(val, callee.argTypes[2], &args)) {
+ return false;
+ }
+ if (!f.passArg(len, callee.argTypes[3], &args)) {
+ return false;
+ }
+
+ MDefinition* tableIndexArg =
+ f.constant(Int32Value(tableIndex), MIRType::Int32);
+ if (!tableIndexArg) {
+ return false;
+ }
+ if (!f.passArg(tableIndexArg, callee.argTypes[4], &args)) {
+ return false;
+ }
+
+ if (!f.finishCall(&args)) {
+ return false;
+ }
+
+ return f.builtinInstanceMethodCall(callee, lineOrBytecode, args);
+}
+
+static bool EmitTableGet(FunctionCompiler& f) {
+ uint32_t tableIndex;
+ MDefinition* index;
+ if (!f.iter().readTableGet(&tableIndex, &index)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ const SymbolicAddressSignature& callee = SASigTableGet;
+ CallCompileState args;
+ if (!f.passInstance(callee.argTypes[0], &args)) {
+ return false;
+ }
+
+ if (!f.passArg(index, callee.argTypes[1], &args)) {
+ return false;
+ }
+
+ MDefinition* tableIndexArg =
+ f.constant(Int32Value(tableIndex), MIRType::Int32);
+ if (!tableIndexArg) {
+ return false;
+ }
+ if (!f.passArg(tableIndexArg, callee.argTypes[2], &args)) {
+ return false;
+ }
+
+ if (!f.finishCall(&args)) {
+ return false;
+ }
+
+ // The return value here is either null, denoting an error, or a short-lived
+ // pointer to a location containing a possibly-null ref.
+ MDefinition* ret;
+ if (!f.builtinInstanceMethodCall(callee, lineOrBytecode, args, &ret)) {
+ return false;
+ }
+
+ f.iter().setResult(ret);
+ return true;
+}
+
+static bool EmitTableGrow(FunctionCompiler& f) {
+ uint32_t tableIndex;
+ MDefinition* initValue;
+ MDefinition* delta;
+ if (!f.iter().readTableGrow(&tableIndex, &initValue, &delta)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ const SymbolicAddressSignature& callee = SASigTableGrow;
+ CallCompileState args;
+ if (!f.passInstance(callee.argTypes[0], &args)) {
+ return false;
+ }
+
+ if (!f.passArg(initValue, callee.argTypes[1], &args)) {
+ return false;
+ }
+
+ if (!f.passArg(delta, callee.argTypes[2], &args)) {
+ return false;
+ }
+
+ MDefinition* tableIndexArg =
+ f.constant(Int32Value(tableIndex), MIRType::Int32);
+ if (!tableIndexArg) {
+ return false;
+ }
+ if (!f.passArg(tableIndexArg, callee.argTypes[3], &args)) {
+ return false;
+ }
+
+ if (!f.finishCall(&args)) {
+ return false;
+ }
+
+ MDefinition* ret;
+ if (!f.builtinInstanceMethodCall(callee, lineOrBytecode, args, &ret)) {
+ return false;
+ }
+
+ f.iter().setResult(ret);
+ return true;
+}
+
+static bool EmitTableSet(FunctionCompiler& f) {
+ uint32_t tableIndex;
+ MDefinition* index;
+ MDefinition* value;
+ if (!f.iter().readTableSet(&tableIndex, &index, &value)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ const SymbolicAddressSignature& callee = SASigTableSet;
+ CallCompileState args;
+ if (!f.passInstance(callee.argTypes[0], &args)) {
+ return false;
+ }
+
+ if (!f.passArg(index, callee.argTypes[1], &args)) {
+ return false;
+ }
+
+ if (!f.passArg(value, callee.argTypes[2], &args)) {
+ return false;
+ }
+
+ MDefinition* tableIndexArg =
+ f.constant(Int32Value(tableIndex), MIRType::Int32);
+ if (!tableIndexArg) {
+ return false;
+ }
+ if (!f.passArg(tableIndexArg, callee.argTypes[3], &args)) {
+ return false;
+ }
+
+ if (!f.finishCall(&args)) {
+ return false;
+ }
+
+ return f.builtinInstanceMethodCall(callee, lineOrBytecode, args);
+}
+
+static bool EmitTableSize(FunctionCompiler& f) {
+ uint32_t tableIndex;
+ if (!f.iter().readTableSize(&tableIndex)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ const SymbolicAddressSignature& callee = SASigTableSize;
+ CallCompileState args;
+ if (!f.passInstance(callee.argTypes[0], &args)) {
+ return false;
+ }
+
+ MDefinition* tableIndexArg =
+ f.constant(Int32Value(tableIndex), MIRType::Int32);
+ if (!tableIndexArg) {
+ return false;
+ }
+ if (!f.passArg(tableIndexArg, callee.argTypes[1], &args)) {
+ return false;
+ }
+
+ if (!f.finishCall(&args)) {
+ return false;
+ }
+
+ MDefinition* ret;
+ if (!f.builtinInstanceMethodCall(callee, lineOrBytecode, args, &ret)) {
+ return false;
+ }
+
+ f.iter().setResult(ret);
+ return true;
+}
+
+static bool EmitRefFunc(FunctionCompiler& f) {
+ uint32_t funcIndex;
+ if (!f.iter().readRefFunc(&funcIndex)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ const SymbolicAddressSignature& callee = SASigRefFunc;
+ CallCompileState args;
+ if (!f.passInstance(callee.argTypes[0], &args)) {
+ return false;
+ }
+
+ MDefinition* funcIndexArg = f.constant(Int32Value(funcIndex), MIRType::Int32);
+ if (!funcIndexArg) {
+ return false;
+ }
+ if (!f.passArg(funcIndexArg, callee.argTypes[1], &args)) {
+ return false;
+ }
+
+ if (!f.finishCall(&args)) {
+ return false;
+ }
+
+ // The return value here is either null, denoting an error, or a short-lived
+ // pointer to a location containing a possibly-null ref.
+ MDefinition* ret;
+ if (!f.builtinInstanceMethodCall(callee, lineOrBytecode, args, &ret)) {
+ return false;
+ }
+
+ f.iter().setResult(ret);
+ return true;
+}
+
+static bool EmitRefNull(FunctionCompiler& f) {
+ if (!f.iter().readRefNull()) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ MDefinition* nullVal = f.nullRefConstant();
+ if (!nullVal) {
+ return false;
+ }
+ f.iter().setResult(nullVal);
+ return true;
+}
+
+static bool EmitRefIsNull(FunctionCompiler& f) {
+ MDefinition* input;
+ if (!f.iter().readRefIsNull(&input)) {
+ return false;
+ }
+
+ if (f.inDeadCode()) {
+ return true;
+ }
+
+ MDefinition* nullVal = f.nullRefConstant();
+ if (!nullVal) {
+ return false;
+ }
+ f.iter().setResult(
+ f.compare(input, nullVal, JSOp::Eq, MCompare::Compare_RefOrNull));
+ return true;
+}
+#endif // ENABLE_WASM_REFTYPES
+
+#ifdef ENABLE_WASM_SIMD
+static bool EmitConstSimd128(FunctionCompiler& f) {
+ V128 v128;
+ if (!f.iter().readV128Const(&v128)) {
+ return false;
+ }
+
+ f.iter().setResult(f.constant(v128));
+ return true;
+}
+
+static bool EmitBinarySimd128(FunctionCompiler& f, bool commutative,
+ SimdOp op) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(ValType::V128, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.binarySimd128(lhs, rhs, commutative, op));
+ return true;
+}
+
+static bool EmitShiftSimd128(FunctionCompiler& f, SimdOp op) {
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readVectorShift(&lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.shiftSimd128(lhs, rhs, op));
+ return true;
+}
+
+static bool EmitSplatSimd128(FunctionCompiler& f, ValType inType, SimdOp op) {
+ MDefinition* src;
+ if (!f.iter().readConversion(inType, ValType::V128, &src)) {
+ return false;
+ }
+
+ f.iter().setResult(f.scalarToSimd128(src, op));
+ return true;
+}
+
+static bool EmitUnarySimd128(FunctionCompiler& f, SimdOp op) {
+ MDefinition* src;
+ if (!f.iter().readUnary(ValType::V128, &src)) {
+ return false;
+ }
+
+ f.iter().setResult(f.unarySimd128(src, op));
+ return true;
+}
+
+static bool EmitReduceSimd128(FunctionCompiler& f, SimdOp op) {
+ MDefinition* src;
+ if (!f.iter().readConversion(ValType::V128, ValType::I32, &src)) {
+ return false;
+ }
+
+ f.iter().setResult(f.reduceSimd128(src, op, ValType::I32));
+ return true;
+}
+
+static bool EmitExtractLaneSimd128(FunctionCompiler& f, ValType outType,
+ uint32_t laneLimit, SimdOp op) {
+ uint32_t laneIndex;
+ MDefinition* src;
+ if (!f.iter().readExtractLane(outType, laneLimit, &laneIndex, &src)) {
+ return false;
+ }
+
+ f.iter().setResult(f.reduceSimd128(src, op, outType, laneIndex));
+ return true;
+}
+
+static bool EmitReplaceLaneSimd128(FunctionCompiler& f, ValType laneType,
+ uint32_t laneLimit, SimdOp op) {
+ uint32_t laneIndex;
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readReplaceLane(laneType, laneLimit, &laneIndex, &lhs, &rhs)) {
+ return false;
+ }
+
+ f.iter().setResult(f.replaceLaneSimd128(lhs, rhs, laneIndex, op));
+ return true;
+}
+
+static bool EmitBitselectSimd128(FunctionCompiler& f) {
+ MDefinition* v1;
+ MDefinition* v2;
+ MDefinition* control;
+ if (!f.iter().readVectorSelect(&v1, &v2, &control)) {
+ return false;
+ }
+
+ f.iter().setResult(f.bitselectSimd128(v1, v2, control));
+ return true;
+}
+
+static bool EmitShuffleSimd128(FunctionCompiler& f) {
+ MDefinition* v1;
+ MDefinition* v2;
+ V128 control;
+ if (!f.iter().readVectorShuffle(&v1, &v2, &control)) {
+ return false;
+ }
+
+# ifdef ENABLE_WASM_SIMD_WORMHOLE
+ static const uint8_t trigger[] = {31, 0, 30, 2, 29, 4, 28, 6,
+ 27, 8, 26, 10, 25, 12, 24};
+ static_assert(sizeof(trigger) == 15);
+
+ if (f.moduleEnv().features.simdWormhole &&
+ memcmp(control.bytes, trigger, sizeof(trigger)) == 0) {
+ switch (control.bytes[15]) {
+ case 0:
+ f.iter().setResult(
+ f.binarySimd128(v1, v2, false, wasm::SimdOp::MozWHSELFTEST));
+ return true;
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ case 1:
+ f.iter().setResult(
+ f.binarySimd128(v1, v2, false, wasm::SimdOp::MozWHPMADDUBSW));
+ return true;
+ case 2:
+ f.iter().setResult(
+ f.binarySimd128(v1, v2, false, wasm::SimdOp::MozWHPMADDWD));
+ return true;
+# endif
+ default:
+ return f.iter().fail("Unrecognized wormhole opcode");
+ }
+ }
+# endif
+
+ f.iter().setResult(f.shuffleSimd128(v1, v2, control));
+ return true;
+}
+
+static bool EmitLoadSplatSimd128(FunctionCompiler& f, Scalar::Type viewType,
+ wasm::SimdOp splatOp) {
+ LinearMemoryAddress<MDefinition*> addr;
+ if (!f.iter().readLoadSplat(Scalar::byteSize(viewType), &addr)) {
+ return false;
+ }
+
+ f.iter().setResult(f.loadSplatSimd128(viewType, addr, splatOp));
+ return true;
+}
+
+static bool EmitLoadExtendSimd128(FunctionCompiler& f, wasm::SimdOp op) {
+ LinearMemoryAddress<MDefinition*> addr;
+ if (!f.iter().readLoadExtend(&addr)) {
+ return false;
+ }
+
+ f.iter().setResult(f.loadExtendSimd128(addr, op));
+ return true;
+}
+
+static bool EmitLoadZeroSimd128(FunctionCompiler& f, Scalar::Type viewType,
+ size_t numBytes) {
+ LinearMemoryAddress<MDefinition*> addr;
+ if (!f.iter().readLoadSplat(numBytes, &addr)) {
+ return false;
+ }
+
+ f.iter().setResult(f.loadZeroSimd128(viewType, numBytes, addr));
+ return true;
+}
+#endif
+
+static bool EmitBodyExprs(FunctionCompiler& f) {
+ if (!f.iter().readFunctionStart(f.funcIndex())) {
+ return false;
+ }
+
+#define CHECK(c) \
+ if (!(c)) return false; \
+ break
+
+#ifdef ENABLE_WASM_SIMD_EXPERIMENTAL
+# define CHECK_SIMD_EXPERIMENTAL() (void)(0)
+#else
+# define CHECK_SIMD_EXPERIMENTAL() return f.iter().unrecognizedOpcode(&op)
+#endif
+
+ while (true) {
+ if (!f.mirGen().ensureBallast()) {
+ return false;
+ }
+
+ OpBytes op;
+ if (!f.iter().readOp(&op)) {
+ return false;
+ }
+
+ switch (op.b0) {
+ case uint16_t(Op::End):
+ if (!EmitEnd(f)) {
+ return false;
+ }
+ if (f.iter().controlStackEmpty()) {
+ return true;
+ }
+ break;
+
+ // Control opcodes
+ case uint16_t(Op::Unreachable):
+ CHECK(EmitUnreachable(f));
+ case uint16_t(Op::Nop):
+ CHECK(f.iter().readNop());
+ case uint16_t(Op::Block):
+ CHECK(EmitBlock(f));
+ case uint16_t(Op::Loop):
+ CHECK(EmitLoop(f));
+ case uint16_t(Op::If):
+ CHECK(EmitIf(f));
+ case uint16_t(Op::Else):
+ CHECK(EmitElse(f));
+#ifdef ENABLE_WASM_EXCEPTIONS
+ case uint16_t(Op::Try):
+ if (!f.moduleEnv().exceptionsEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ CHECK(EmitTry(f));
+ case uint16_t(Op::Catch):
+ if (!f.moduleEnv().exceptionsEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ CHECK(EmitCatch(f));
+ case uint16_t(Op::Throw):
+ if (!f.moduleEnv().exceptionsEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ CHECK(EmitThrow(f));
+#endif
+ case uint16_t(Op::Br):
+ CHECK(EmitBr(f));
+ case uint16_t(Op::BrIf):
+ CHECK(EmitBrIf(f));
+ case uint16_t(Op::BrTable):
+ CHECK(EmitBrTable(f));
+ case uint16_t(Op::Return):
+ CHECK(EmitReturn(f));
+
+ // Calls
+ case uint16_t(Op::Call):
+ CHECK(EmitCall(f, /* asmJSFuncDef = */ false));
+ case uint16_t(Op::CallIndirect):
+ CHECK(EmitCallIndirect(f, /* oldStyle = */ false));
+
+ // Parametric operators
+ case uint16_t(Op::Drop):
+ CHECK(f.iter().readDrop());
+ case uint16_t(Op::SelectNumeric):
+ CHECK(EmitSelect(f, /*typed*/ false));
+ case uint16_t(Op::SelectTyped):
+ if (!f.moduleEnv().refTypesEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ CHECK(EmitSelect(f, /*typed*/ true));
+
+ // Locals and globals
+ case uint16_t(Op::GetLocal):
+ CHECK(EmitGetLocal(f));
+ case uint16_t(Op::SetLocal):
+ CHECK(EmitSetLocal(f));
+ case uint16_t(Op::TeeLocal):
+ CHECK(EmitTeeLocal(f));
+ case uint16_t(Op::GetGlobal):
+ CHECK(EmitGetGlobal(f));
+ case uint16_t(Op::SetGlobal):
+ CHECK(EmitSetGlobal(f));
+#ifdef ENABLE_WASM_REFTYPES
+ case uint16_t(Op::TableGet):
+ CHECK(EmitTableGet(f));
+ case uint16_t(Op::TableSet):
+ CHECK(EmitTableSet(f));
+#endif
+
+ // Memory-related operators
+ case uint16_t(Op::I32Load):
+ CHECK(EmitLoad(f, ValType::I32, Scalar::Int32));
+ case uint16_t(Op::I64Load):
+ CHECK(EmitLoad(f, ValType::I64, Scalar::Int64));
+ case uint16_t(Op::F32Load):
+ CHECK(EmitLoad(f, ValType::F32, Scalar::Float32));
+ case uint16_t(Op::F64Load):
+ CHECK(EmitLoad(f, ValType::F64, Scalar::Float64));
+ case uint16_t(Op::I32Load8S):
+ CHECK(EmitLoad(f, ValType::I32, Scalar::Int8));
+ case uint16_t(Op::I32Load8U):
+ CHECK(EmitLoad(f, ValType::I32, Scalar::Uint8));
+ case uint16_t(Op::I32Load16S):
+ CHECK(EmitLoad(f, ValType::I32, Scalar::Int16));
+ case uint16_t(Op::I32Load16U):
+ CHECK(EmitLoad(f, ValType::I32, Scalar::Uint16));
+ case uint16_t(Op::I64Load8S):
+ CHECK(EmitLoad(f, ValType::I64, Scalar::Int8));
+ case uint16_t(Op::I64Load8U):
+ CHECK(EmitLoad(f, ValType::I64, Scalar::Uint8));
+ case uint16_t(Op::I64Load16S):
+ CHECK(EmitLoad(f, ValType::I64, Scalar::Int16));
+ case uint16_t(Op::I64Load16U):
+ CHECK(EmitLoad(f, ValType::I64, Scalar::Uint16));
+ case uint16_t(Op::I64Load32S):
+ CHECK(EmitLoad(f, ValType::I64, Scalar::Int32));
+ case uint16_t(Op::I64Load32U):
+ CHECK(EmitLoad(f, ValType::I64, Scalar::Uint32));
+ case uint16_t(Op::I32Store):
+ CHECK(EmitStore(f, ValType::I32, Scalar::Int32));
+ case uint16_t(Op::I64Store):
+ CHECK(EmitStore(f, ValType::I64, Scalar::Int64));
+ case uint16_t(Op::F32Store):
+ CHECK(EmitStore(f, ValType::F32, Scalar::Float32));
+ case uint16_t(Op::F64Store):
+ CHECK(EmitStore(f, ValType::F64, Scalar::Float64));
+ case uint16_t(Op::I32Store8):
+ CHECK(EmitStore(f, ValType::I32, Scalar::Int8));
+ case uint16_t(Op::I32Store16):
+ CHECK(EmitStore(f, ValType::I32, Scalar::Int16));
+ case uint16_t(Op::I64Store8):
+ CHECK(EmitStore(f, ValType::I64, Scalar::Int8));
+ case uint16_t(Op::I64Store16):
+ CHECK(EmitStore(f, ValType::I64, Scalar::Int16));
+ case uint16_t(Op::I64Store32):
+ CHECK(EmitStore(f, ValType::I64, Scalar::Int32));
+ case uint16_t(Op::MemorySize):
+ CHECK(EmitMemorySize(f));
+ case uint16_t(Op::MemoryGrow):
+ CHECK(EmitMemoryGrow(f));
+
+ // Constants
+ case uint16_t(Op::I32Const):
+ CHECK(EmitI32Const(f));
+ case uint16_t(Op::I64Const):
+ CHECK(EmitI64Const(f));
+ case uint16_t(Op::F32Const):
+ CHECK(EmitF32Const(f));
+ case uint16_t(Op::F64Const):
+ CHECK(EmitF64Const(f));
+
+ // Comparison operators
+ case uint16_t(Op::I32Eqz):
+ CHECK(EmitConversion<MNot>(f, ValType::I32, ValType::I32));
+ case uint16_t(Op::I32Eq):
+ CHECK(
+ EmitComparison(f, ValType::I32, JSOp::Eq, MCompare::Compare_Int32));
+ case uint16_t(Op::I32Ne):
+ CHECK(
+ EmitComparison(f, ValType::I32, JSOp::Ne, MCompare::Compare_Int32));
+ case uint16_t(Op::I32LtS):
+ CHECK(
+ EmitComparison(f, ValType::I32, JSOp::Lt, MCompare::Compare_Int32));
+ case uint16_t(Op::I32LtU):
+ CHECK(EmitComparison(f, ValType::I32, JSOp::Lt,
+ MCompare::Compare_UInt32));
+ case uint16_t(Op::I32GtS):
+ CHECK(
+ EmitComparison(f, ValType::I32, JSOp::Gt, MCompare::Compare_Int32));
+ case uint16_t(Op::I32GtU):
+ CHECK(EmitComparison(f, ValType::I32, JSOp::Gt,
+ MCompare::Compare_UInt32));
+ case uint16_t(Op::I32LeS):
+ CHECK(
+ EmitComparison(f, ValType::I32, JSOp::Le, MCompare::Compare_Int32));
+ case uint16_t(Op::I32LeU):
+ CHECK(EmitComparison(f, ValType::I32, JSOp::Le,
+ MCompare::Compare_UInt32));
+ case uint16_t(Op::I32GeS):
+ CHECK(
+ EmitComparison(f, ValType::I32, JSOp::Ge, MCompare::Compare_Int32));
+ case uint16_t(Op::I32GeU):
+ CHECK(EmitComparison(f, ValType::I32, JSOp::Ge,
+ MCompare::Compare_UInt32));
+ case uint16_t(Op::I64Eqz):
+ CHECK(EmitConversion<MNot>(f, ValType::I64, ValType::I32));
+ case uint16_t(Op::I64Eq):
+ CHECK(
+ EmitComparison(f, ValType::I64, JSOp::Eq, MCompare::Compare_Int64));
+ case uint16_t(Op::I64Ne):
+ CHECK(
+ EmitComparison(f, ValType::I64, JSOp::Ne, MCompare::Compare_Int64));
+ case uint16_t(Op::I64LtS):
+ CHECK(
+ EmitComparison(f, ValType::I64, JSOp::Lt, MCompare::Compare_Int64));
+ case uint16_t(Op::I64LtU):
+ CHECK(EmitComparison(f, ValType::I64, JSOp::Lt,
+ MCompare::Compare_UInt64));
+ case uint16_t(Op::I64GtS):
+ CHECK(
+ EmitComparison(f, ValType::I64, JSOp::Gt, MCompare::Compare_Int64));
+ case uint16_t(Op::I64GtU):
+ CHECK(EmitComparison(f, ValType::I64, JSOp::Gt,
+ MCompare::Compare_UInt64));
+ case uint16_t(Op::I64LeS):
+ CHECK(
+ EmitComparison(f, ValType::I64, JSOp::Le, MCompare::Compare_Int64));
+ case uint16_t(Op::I64LeU):
+ CHECK(EmitComparison(f, ValType::I64, JSOp::Le,
+ MCompare::Compare_UInt64));
+ case uint16_t(Op::I64GeS):
+ CHECK(
+ EmitComparison(f, ValType::I64, JSOp::Ge, MCompare::Compare_Int64));
+ case uint16_t(Op::I64GeU):
+ CHECK(EmitComparison(f, ValType::I64, JSOp::Ge,
+ MCompare::Compare_UInt64));
+ case uint16_t(Op::F32Eq):
+ CHECK(EmitComparison(f, ValType::F32, JSOp::Eq,
+ MCompare::Compare_Float32));
+ case uint16_t(Op::F32Ne):
+ CHECK(EmitComparison(f, ValType::F32, JSOp::Ne,
+ MCompare::Compare_Float32));
+ case uint16_t(Op::F32Lt):
+ CHECK(EmitComparison(f, ValType::F32, JSOp::Lt,
+ MCompare::Compare_Float32));
+ case uint16_t(Op::F32Gt):
+ CHECK(EmitComparison(f, ValType::F32, JSOp::Gt,
+ MCompare::Compare_Float32));
+ case uint16_t(Op::F32Le):
+ CHECK(EmitComparison(f, ValType::F32, JSOp::Le,
+ MCompare::Compare_Float32));
+ case uint16_t(Op::F32Ge):
+ CHECK(EmitComparison(f, ValType::F32, JSOp::Ge,
+ MCompare::Compare_Float32));
+ case uint16_t(Op::F64Eq):
+ CHECK(EmitComparison(f, ValType::F64, JSOp::Eq,
+ MCompare::Compare_Double));
+ case uint16_t(Op::F64Ne):
+ CHECK(EmitComparison(f, ValType::F64, JSOp::Ne,
+ MCompare::Compare_Double));
+ case uint16_t(Op::F64Lt):
+ CHECK(EmitComparison(f, ValType::F64, JSOp::Lt,
+ MCompare::Compare_Double));
+ case uint16_t(Op::F64Gt):
+ CHECK(EmitComparison(f, ValType::F64, JSOp::Gt,
+ MCompare::Compare_Double));
+ case uint16_t(Op::F64Le):
+ CHECK(EmitComparison(f, ValType::F64, JSOp::Le,
+ MCompare::Compare_Double));
+ case uint16_t(Op::F64Ge):
+ CHECK(EmitComparison(f, ValType::F64, JSOp::Ge,
+ MCompare::Compare_Double));
+
+ // Numeric operators
+ case uint16_t(Op::I32Clz):
+ CHECK(EmitUnaryWithType<MClz>(f, ValType::I32, MIRType::Int32));
+ case uint16_t(Op::I32Ctz):
+ CHECK(EmitUnaryWithType<MCtz>(f, ValType::I32, MIRType::Int32));
+ case uint16_t(Op::I32Popcnt):
+ CHECK(EmitUnaryWithType<MPopcnt>(f, ValType::I32, MIRType::Int32));
+ case uint16_t(Op::I32Add):
+ CHECK(EmitAdd(f, ValType::I32, MIRType::Int32));
+ case uint16_t(Op::I32Sub):
+ CHECK(EmitSub(f, ValType::I32, MIRType::Int32));
+ case uint16_t(Op::I32Mul):
+ CHECK(EmitMul(f, ValType::I32, MIRType::Int32));
+ case uint16_t(Op::I32DivS):
+ case uint16_t(Op::I32DivU):
+ CHECK(
+ EmitDiv(f, ValType::I32, MIRType::Int32, Op(op.b0) == Op::I32DivU));
+ case uint16_t(Op::I32RemS):
+ case uint16_t(Op::I32RemU):
+ CHECK(
+ EmitRem(f, ValType::I32, MIRType::Int32, Op(op.b0) == Op::I32RemU));
+ case uint16_t(Op::I32And):
+ CHECK(EmitBitwise<MBitAnd>(f, ValType::I32, MIRType::Int32));
+ case uint16_t(Op::I32Or):
+ CHECK(EmitBitwise<MBitOr>(f, ValType::I32, MIRType::Int32));
+ case uint16_t(Op::I32Xor):
+ CHECK(EmitBitwise<MBitXor>(f, ValType::I32, MIRType::Int32));
+ case uint16_t(Op::I32Shl):
+ CHECK(EmitBitwise<MLsh>(f, ValType::I32, MIRType::Int32));
+ case uint16_t(Op::I32ShrS):
+ CHECK(EmitBitwise<MRsh>(f, ValType::I32, MIRType::Int32));
+ case uint16_t(Op::I32ShrU):
+ CHECK(EmitUrsh(f, ValType::I32, MIRType::Int32));
+ case uint16_t(Op::I32Rotl):
+ case uint16_t(Op::I32Rotr):
+ CHECK(EmitRotate(f, ValType::I32, Op(op.b0) == Op::I32Rotl));
+ case uint16_t(Op::I64Clz):
+ CHECK(EmitUnaryWithType<MClz>(f, ValType::I64, MIRType::Int64));
+ case uint16_t(Op::I64Ctz):
+ CHECK(EmitUnaryWithType<MCtz>(f, ValType::I64, MIRType::Int64));
+ case uint16_t(Op::I64Popcnt):
+ CHECK(EmitUnaryWithType<MPopcnt>(f, ValType::I64, MIRType::Int64));
+ case uint16_t(Op::I64Add):
+ CHECK(EmitAdd(f, ValType::I64, MIRType::Int64));
+ case uint16_t(Op::I64Sub):
+ CHECK(EmitSub(f, ValType::I64, MIRType::Int64));
+ case uint16_t(Op::I64Mul):
+ CHECK(EmitMul(f, ValType::I64, MIRType::Int64));
+ case uint16_t(Op::I64DivS):
+ case uint16_t(Op::I64DivU):
+ CHECK(
+ EmitDiv(f, ValType::I64, MIRType::Int64, Op(op.b0) == Op::I64DivU));
+ case uint16_t(Op::I64RemS):
+ case uint16_t(Op::I64RemU):
+ CHECK(
+ EmitRem(f, ValType::I64, MIRType::Int64, Op(op.b0) == Op::I64RemU));
+ case uint16_t(Op::I64And):
+ CHECK(EmitBitwise<MBitAnd>(f, ValType::I64, MIRType::Int64));
+ case uint16_t(Op::I64Or):
+ CHECK(EmitBitwise<MBitOr>(f, ValType::I64, MIRType::Int64));
+ case uint16_t(Op::I64Xor):
+ CHECK(EmitBitwise<MBitXor>(f, ValType::I64, MIRType::Int64));
+ case uint16_t(Op::I64Shl):
+ CHECK(EmitBitwise<MLsh>(f, ValType::I64, MIRType::Int64));
+ case uint16_t(Op::I64ShrS):
+ CHECK(EmitBitwise<MRsh>(f, ValType::I64, MIRType::Int64));
+ case uint16_t(Op::I64ShrU):
+ CHECK(EmitUrsh(f, ValType::I64, MIRType::Int64));
+ case uint16_t(Op::I64Rotl):
+ case uint16_t(Op::I64Rotr):
+ CHECK(EmitRotate(f, ValType::I64, Op(op.b0) == Op::I64Rotl));
+ case uint16_t(Op::F32Abs):
+ CHECK(EmitUnaryWithType<MAbs>(f, ValType::F32, MIRType::Float32));
+ case uint16_t(Op::F32Neg):
+ CHECK(EmitUnaryWithType<MWasmNeg>(f, ValType::F32, MIRType::Float32));
+ case uint16_t(Op::F32Ceil):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigCeilF));
+ case uint16_t(Op::F32Floor):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigFloorF));
+ case uint16_t(Op::F32Trunc):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigTruncF));
+ case uint16_t(Op::F32Nearest):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigNearbyIntF));
+ case uint16_t(Op::F32Sqrt):
+ CHECK(EmitUnaryWithType<MSqrt>(f, ValType::F32, MIRType::Float32));
+ case uint16_t(Op::F32Add):
+ CHECK(EmitAdd(f, ValType::F32, MIRType::Float32));
+ case uint16_t(Op::F32Sub):
+ CHECK(EmitSub(f, ValType::F32, MIRType::Float32));
+ case uint16_t(Op::F32Mul):
+ CHECK(EmitMul(f, ValType::F32, MIRType::Float32));
+ case uint16_t(Op::F32Div):
+ CHECK(EmitDiv(f, ValType::F32, MIRType::Float32,
+ /* isUnsigned = */ false));
+ case uint16_t(Op::F32Min):
+ case uint16_t(Op::F32Max):
+ CHECK(EmitMinMax(f, ValType::F32, MIRType::Float32,
+ Op(op.b0) == Op::F32Max));
+ case uint16_t(Op::F32CopySign):
+ CHECK(EmitCopySign(f, ValType::F32));
+ case uint16_t(Op::F64Abs):
+ CHECK(EmitUnaryWithType<MAbs>(f, ValType::F64, MIRType::Double));
+ case uint16_t(Op::F64Neg):
+ CHECK(EmitUnaryWithType<MWasmNeg>(f, ValType::F64, MIRType::Double));
+ case uint16_t(Op::F64Ceil):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigCeilD));
+ case uint16_t(Op::F64Floor):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigFloorD));
+ case uint16_t(Op::F64Trunc):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigTruncD));
+ case uint16_t(Op::F64Nearest):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigNearbyIntD));
+ case uint16_t(Op::F64Sqrt):
+ CHECK(EmitUnaryWithType<MSqrt>(f, ValType::F64, MIRType::Double));
+ case uint16_t(Op::F64Add):
+ CHECK(EmitAdd(f, ValType::F64, MIRType::Double));
+ case uint16_t(Op::F64Sub):
+ CHECK(EmitSub(f, ValType::F64, MIRType::Double));
+ case uint16_t(Op::F64Mul):
+ CHECK(EmitMul(f, ValType::F64, MIRType::Double));
+ case uint16_t(Op::F64Div):
+ CHECK(EmitDiv(f, ValType::F64, MIRType::Double,
+ /* isUnsigned = */ false));
+ case uint16_t(Op::F64Min):
+ case uint16_t(Op::F64Max):
+ CHECK(EmitMinMax(f, ValType::F64, MIRType::Double,
+ Op(op.b0) == Op::F64Max));
+ case uint16_t(Op::F64CopySign):
+ CHECK(EmitCopySign(f, ValType::F64));
+
+ // Conversions
+ case uint16_t(Op::I32WrapI64):
+ CHECK(EmitConversion<MWrapInt64ToInt32>(f, ValType::I64, ValType::I32));
+ case uint16_t(Op::I32TruncSF32):
+ case uint16_t(Op::I32TruncUF32):
+ CHECK(EmitTruncate(f, ValType::F32, ValType::I32,
+ Op(op.b0) == Op::I32TruncUF32, false));
+ case uint16_t(Op::I32TruncSF64):
+ case uint16_t(Op::I32TruncUF64):
+ CHECK(EmitTruncate(f, ValType::F64, ValType::I32,
+ Op(op.b0) == Op::I32TruncUF64, false));
+ case uint16_t(Op::I64ExtendSI32):
+ case uint16_t(Op::I64ExtendUI32):
+ CHECK(EmitExtendI32(f, Op(op.b0) == Op::I64ExtendUI32));
+ case uint16_t(Op::I64TruncSF32):
+ case uint16_t(Op::I64TruncUF32):
+ CHECK(EmitTruncate(f, ValType::F32, ValType::I64,
+ Op(op.b0) == Op::I64TruncUF32, false));
+ case uint16_t(Op::I64TruncSF64):
+ case uint16_t(Op::I64TruncUF64):
+ CHECK(EmitTruncate(f, ValType::F64, ValType::I64,
+ Op(op.b0) == Op::I64TruncUF64, false));
+ case uint16_t(Op::F32ConvertSI32):
+ CHECK(EmitConversion<MToFloat32>(f, ValType::I32, ValType::F32));
+ case uint16_t(Op::F32ConvertUI32):
+ CHECK(EmitConversion<MWasmUnsignedToFloat32>(f, ValType::I32,
+ ValType::F32));
+ case uint16_t(Op::F32ConvertSI64):
+ case uint16_t(Op::F32ConvertUI64):
+ CHECK(EmitConvertI64ToFloatingPoint(f, ValType::F32, MIRType::Float32,
+ Op(op.b0) == Op::F32ConvertUI64));
+ case uint16_t(Op::F32DemoteF64):
+ CHECK(EmitConversion<MToFloat32>(f, ValType::F64, ValType::F32));
+ case uint16_t(Op::F64ConvertSI32):
+ CHECK(EmitConversion<MToDouble>(f, ValType::I32, ValType::F64));
+ case uint16_t(Op::F64ConvertUI32):
+ CHECK(EmitConversion<MWasmUnsignedToDouble>(f, ValType::I32,
+ ValType::F64));
+ case uint16_t(Op::F64ConvertSI64):
+ case uint16_t(Op::F64ConvertUI64):
+ CHECK(EmitConvertI64ToFloatingPoint(f, ValType::F64, MIRType::Double,
+ Op(op.b0) == Op::F64ConvertUI64));
+ case uint16_t(Op::F64PromoteF32):
+ CHECK(EmitConversion<MToDouble>(f, ValType::F32, ValType::F64));
+
+ // Reinterpretations
+ case uint16_t(Op::I32ReinterpretF32):
+ CHECK(EmitReinterpret(f, ValType::I32, ValType::F32, MIRType::Int32));
+ case uint16_t(Op::I64ReinterpretF64):
+ CHECK(EmitReinterpret(f, ValType::I64, ValType::F64, MIRType::Int64));
+ case uint16_t(Op::F32ReinterpretI32):
+ CHECK(EmitReinterpret(f, ValType::F32, ValType::I32, MIRType::Float32));
+ case uint16_t(Op::F64ReinterpretI64):
+ CHECK(EmitReinterpret(f, ValType::F64, ValType::I64, MIRType::Double));
+
+#ifdef ENABLE_WASM_GC
+ case uint16_t(Op::RefEq):
+ if (!f.moduleEnv().gcTypesEnabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ CHECK(EmitComparison(f, RefType::extern_(), JSOp::Eq,
+ MCompare::Compare_RefOrNull));
+#endif
+#ifdef ENABLE_WASM_REFTYPES
+ case uint16_t(Op::RefFunc):
+ CHECK(EmitRefFunc(f));
+ case uint16_t(Op::RefNull):
+ CHECK(EmitRefNull(f));
+ case uint16_t(Op::RefIsNull):
+ CHECK(EmitRefIsNull(f));
+#endif
+
+ // Sign extensions
+ case uint16_t(Op::I32Extend8S):
+ CHECK(EmitSignExtend(f, 1, 4));
+ case uint16_t(Op::I32Extend16S):
+ CHECK(EmitSignExtend(f, 2, 4));
+ case uint16_t(Op::I64Extend8S):
+ CHECK(EmitSignExtend(f, 1, 8));
+ case uint16_t(Op::I64Extend16S):
+ CHECK(EmitSignExtend(f, 2, 8));
+ case uint16_t(Op::I64Extend32S):
+ CHECK(EmitSignExtend(f, 4, 8));
+
+ // Gc operations
+#ifdef ENABLE_WASM_GC
+ case uint16_t(Op::GcPrefix): {
+ switch (op.b1) {
+ case uint32_t(GcOp::StructNew):
+ case uint32_t(GcOp::StructGet):
+ case uint32_t(GcOp::StructSet):
+ case uint32_t(GcOp::StructNarrow):
+ // Not yet supported
+ return f.iter().unrecognizedOpcode(&op);
+ default:
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ }
+#endif
+
+ // SIMD operations
+#ifdef ENABLE_WASM_SIMD
+ case uint16_t(Op::SimdPrefix): {
+ if (!f.moduleEnv().v128Enabled()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ switch (op.b1) {
+ case uint32_t(SimdOp::V128Const):
+ CHECK(EmitConstSimd128(f));
+ case uint32_t(SimdOp::V128Load):
+ CHECK(EmitLoad(f, ValType::V128, Scalar::Simd128));
+ case uint32_t(SimdOp::V128Store):
+ CHECK(EmitStore(f, ValType::V128, Scalar::Simd128));
+ case uint32_t(SimdOp::V128And):
+ case uint32_t(SimdOp::V128Or):
+ case uint32_t(SimdOp::V128Xor):
+ case uint32_t(SimdOp::I8x16AvgrU):
+ case uint32_t(SimdOp::I16x8AvgrU):
+ case uint32_t(SimdOp::I8x16Add):
+ case uint32_t(SimdOp::I8x16AddSaturateS):
+ case uint32_t(SimdOp::I8x16AddSaturateU):
+ case uint32_t(SimdOp::I8x16MinS):
+ case uint32_t(SimdOp::I8x16MinU):
+ case uint32_t(SimdOp::I8x16MaxS):
+ case uint32_t(SimdOp::I8x16MaxU):
+ case uint32_t(SimdOp::I16x8Add):
+ case uint32_t(SimdOp::I16x8AddSaturateS):
+ case uint32_t(SimdOp::I16x8AddSaturateU):
+ case uint32_t(SimdOp::I16x8Mul):
+ case uint32_t(SimdOp::I16x8MinS):
+ case uint32_t(SimdOp::I16x8MinU):
+ case uint32_t(SimdOp::I16x8MaxS):
+ case uint32_t(SimdOp::I16x8MaxU):
+ case uint32_t(SimdOp::I32x4Add):
+ case uint32_t(SimdOp::I32x4Mul):
+ case uint32_t(SimdOp::I32x4MinS):
+ case uint32_t(SimdOp::I32x4MinU):
+ case uint32_t(SimdOp::I32x4MaxS):
+ case uint32_t(SimdOp::I32x4MaxU):
+ case uint32_t(SimdOp::I64x2Add):
+ case uint32_t(SimdOp::I64x2Mul):
+ case uint32_t(SimdOp::F32x4Add):
+ case uint32_t(SimdOp::F32x4Mul):
+ case uint32_t(SimdOp::F32x4Min):
+ case uint32_t(SimdOp::F32x4Max):
+ case uint32_t(SimdOp::F64x2Add):
+ case uint32_t(SimdOp::F64x2Mul):
+ case uint32_t(SimdOp::F64x2Min):
+ case uint32_t(SimdOp::F64x2Max):
+ case uint32_t(SimdOp::I8x16Eq):
+ case uint32_t(SimdOp::I8x16Ne):
+ case uint32_t(SimdOp::I16x8Eq):
+ case uint32_t(SimdOp::I16x8Ne):
+ case uint32_t(SimdOp::I32x4Eq):
+ case uint32_t(SimdOp::I32x4Ne):
+ case uint32_t(SimdOp::F32x4Eq):
+ case uint32_t(SimdOp::F32x4Ne):
+ case uint32_t(SimdOp::F64x2Eq):
+ case uint32_t(SimdOp::F64x2Ne):
+ case uint32_t(SimdOp::I32x4DotSI16x8):
+ CHECK(EmitBinarySimd128(f, /* commutative= */ true, SimdOp(op.b1)));
+ case uint32_t(SimdOp::V128AndNot):
+ case uint32_t(SimdOp::I8x16Sub):
+ case uint32_t(SimdOp::I8x16SubSaturateS):
+ case uint32_t(SimdOp::I8x16SubSaturateU):
+ case uint32_t(SimdOp::I16x8Sub):
+ case uint32_t(SimdOp::I16x8SubSaturateS):
+ case uint32_t(SimdOp::I16x8SubSaturateU):
+ case uint32_t(SimdOp::I32x4Sub):
+ case uint32_t(SimdOp::I64x2Sub):
+ case uint32_t(SimdOp::F32x4Sub):
+ case uint32_t(SimdOp::F32x4Div):
+ case uint32_t(SimdOp::F64x2Sub):
+ case uint32_t(SimdOp::F64x2Div):
+ case uint32_t(SimdOp::I8x16NarrowSI16x8):
+ case uint32_t(SimdOp::I8x16NarrowUI16x8):
+ case uint32_t(SimdOp::I16x8NarrowSI32x4):
+ case uint32_t(SimdOp::I16x8NarrowUI32x4):
+ case uint32_t(SimdOp::I8x16LtS):
+ case uint32_t(SimdOp::I8x16LtU):
+ case uint32_t(SimdOp::I8x16GtS):
+ case uint32_t(SimdOp::I8x16GtU):
+ case uint32_t(SimdOp::I8x16LeS):
+ case uint32_t(SimdOp::I8x16LeU):
+ case uint32_t(SimdOp::I8x16GeS):
+ case uint32_t(SimdOp::I8x16GeU):
+ case uint32_t(SimdOp::I16x8LtS):
+ case uint32_t(SimdOp::I16x8LtU):
+ case uint32_t(SimdOp::I16x8GtS):
+ case uint32_t(SimdOp::I16x8GtU):
+ case uint32_t(SimdOp::I16x8LeS):
+ case uint32_t(SimdOp::I16x8LeU):
+ case uint32_t(SimdOp::I16x8GeS):
+ case uint32_t(SimdOp::I16x8GeU):
+ case uint32_t(SimdOp::I32x4LtS):
+ case uint32_t(SimdOp::I32x4LtU):
+ case uint32_t(SimdOp::I32x4GtS):
+ case uint32_t(SimdOp::I32x4GtU):
+ case uint32_t(SimdOp::I32x4LeS):
+ case uint32_t(SimdOp::I32x4LeU):
+ case uint32_t(SimdOp::I32x4GeS):
+ case uint32_t(SimdOp::I32x4GeU):
+ case uint32_t(SimdOp::F32x4Lt):
+ case uint32_t(SimdOp::F32x4Gt):
+ case uint32_t(SimdOp::F32x4Le):
+ case uint32_t(SimdOp::F32x4Ge):
+ case uint32_t(SimdOp::F64x2Lt):
+ case uint32_t(SimdOp::F64x2Gt):
+ case uint32_t(SimdOp::F64x2Le):
+ case uint32_t(SimdOp::F64x2Ge):
+ case uint32_t(SimdOp::V8x16Swizzle):
+ case uint32_t(SimdOp::F32x4PMax):
+ case uint32_t(SimdOp::F32x4PMin):
+ case uint32_t(SimdOp::F64x2PMax):
+ case uint32_t(SimdOp::F64x2PMin):
+ CHECK(
+ EmitBinarySimd128(f, /* commutative= */ false, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I8x16Splat):
+ case uint32_t(SimdOp::I16x8Splat):
+ case uint32_t(SimdOp::I32x4Splat):
+ CHECK(EmitSplatSimd128(f, ValType::I32, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I64x2Splat):
+ CHECK(EmitSplatSimd128(f, ValType::I64, SimdOp(op.b1)));
+ case uint32_t(SimdOp::F32x4Splat):
+ CHECK(EmitSplatSimd128(f, ValType::F32, SimdOp(op.b1)));
+ case uint32_t(SimdOp::F64x2Splat):
+ CHECK(EmitSplatSimd128(f, ValType::F64, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I8x16Neg):
+ case uint32_t(SimdOp::I16x8Neg):
+ case uint32_t(SimdOp::I16x8WidenLowSI8x16):
+ case uint32_t(SimdOp::I16x8WidenHighSI8x16):
+ case uint32_t(SimdOp::I16x8WidenLowUI8x16):
+ case uint32_t(SimdOp::I16x8WidenHighUI8x16):
+ case uint32_t(SimdOp::I32x4Neg):
+ case uint32_t(SimdOp::I32x4WidenLowSI16x8):
+ case uint32_t(SimdOp::I32x4WidenHighSI16x8):
+ case uint32_t(SimdOp::I32x4WidenLowUI16x8):
+ case uint32_t(SimdOp::I32x4WidenHighUI16x8):
+ case uint32_t(SimdOp::I32x4TruncSSatF32x4):
+ case uint32_t(SimdOp::I32x4TruncUSatF32x4):
+ case uint32_t(SimdOp::I64x2Neg):
+ case uint32_t(SimdOp::F32x4Abs):
+ case uint32_t(SimdOp::F32x4Neg):
+ case uint32_t(SimdOp::F32x4Sqrt):
+ case uint32_t(SimdOp::F32x4ConvertSI32x4):
+ case uint32_t(SimdOp::F32x4ConvertUI32x4):
+ case uint32_t(SimdOp::F64x2Abs):
+ case uint32_t(SimdOp::F64x2Neg):
+ case uint32_t(SimdOp::F64x2Sqrt):
+ case uint32_t(SimdOp::V128Not):
+ case uint32_t(SimdOp::I8x16Abs):
+ case uint32_t(SimdOp::I16x8Abs):
+ case uint32_t(SimdOp::I32x4Abs):
+ case uint32_t(SimdOp::F32x4Ceil):
+ case uint32_t(SimdOp::F32x4Floor):
+ case uint32_t(SimdOp::F32x4Trunc):
+ case uint32_t(SimdOp::F32x4Nearest):
+ case uint32_t(SimdOp::F64x2Ceil):
+ case uint32_t(SimdOp::F64x2Floor):
+ case uint32_t(SimdOp::F64x2Trunc):
+ case uint32_t(SimdOp::F64x2Nearest):
+ CHECK(EmitUnarySimd128(f, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I8x16AnyTrue):
+ case uint32_t(SimdOp::I16x8AnyTrue):
+ case uint32_t(SimdOp::I32x4AnyTrue):
+ case uint32_t(SimdOp::I8x16AllTrue):
+ case uint32_t(SimdOp::I16x8AllTrue):
+ case uint32_t(SimdOp::I32x4AllTrue):
+ case uint32_t(SimdOp::I8x16Bitmask):
+ case uint32_t(SimdOp::I16x8Bitmask):
+ case uint32_t(SimdOp::I32x4Bitmask):
+ CHECK(EmitReduceSimd128(f, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I8x16Shl):
+ case uint32_t(SimdOp::I8x16ShrS):
+ case uint32_t(SimdOp::I8x16ShrU):
+ case uint32_t(SimdOp::I16x8Shl):
+ case uint32_t(SimdOp::I16x8ShrS):
+ case uint32_t(SimdOp::I16x8ShrU):
+ case uint32_t(SimdOp::I32x4Shl):
+ case uint32_t(SimdOp::I32x4ShrS):
+ case uint32_t(SimdOp::I32x4ShrU):
+ case uint32_t(SimdOp::I64x2Shl):
+ case uint32_t(SimdOp::I64x2ShrS):
+ case uint32_t(SimdOp::I64x2ShrU):
+ CHECK(EmitShiftSimd128(f, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I8x16ExtractLaneS):
+ case uint32_t(SimdOp::I8x16ExtractLaneU):
+ CHECK(EmitExtractLaneSimd128(f, ValType::I32, 16, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I16x8ExtractLaneS):
+ case uint32_t(SimdOp::I16x8ExtractLaneU):
+ CHECK(EmitExtractLaneSimd128(f, ValType::I32, 8, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I32x4ExtractLane):
+ CHECK(EmitExtractLaneSimd128(f, ValType::I32, 4, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I64x2ExtractLane):
+ CHECK(EmitExtractLaneSimd128(f, ValType::I64, 2, SimdOp(op.b1)));
+ case uint32_t(SimdOp::F32x4ExtractLane):
+ CHECK(EmitExtractLaneSimd128(f, ValType::F32, 4, SimdOp(op.b1)));
+ case uint32_t(SimdOp::F64x2ExtractLane):
+ CHECK(EmitExtractLaneSimd128(f, ValType::F64, 2, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I8x16ReplaceLane):
+ CHECK(EmitReplaceLaneSimd128(f, ValType::I32, 16, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I16x8ReplaceLane):
+ CHECK(EmitReplaceLaneSimd128(f, ValType::I32, 8, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I32x4ReplaceLane):
+ CHECK(EmitReplaceLaneSimd128(f, ValType::I32, 4, SimdOp(op.b1)));
+ case uint32_t(SimdOp::I64x2ReplaceLane):
+ CHECK(EmitReplaceLaneSimd128(f, ValType::I64, 2, SimdOp(op.b1)));
+ case uint32_t(SimdOp::F32x4ReplaceLane):
+ CHECK(EmitReplaceLaneSimd128(f, ValType::F32, 4, SimdOp(op.b1)));
+ case uint32_t(SimdOp::F64x2ReplaceLane):
+ CHECK(EmitReplaceLaneSimd128(f, ValType::F64, 2, SimdOp(op.b1)));
+ case uint32_t(SimdOp::V128Bitselect):
+ CHECK(EmitBitselectSimd128(f));
+ case uint32_t(SimdOp::V8x16Shuffle):
+ CHECK(EmitShuffleSimd128(f));
+ case uint32_t(SimdOp::V8x16LoadSplat):
+ CHECK(EmitLoadSplatSimd128(f, Scalar::Uint8, SimdOp::I8x16Splat));
+ case uint32_t(SimdOp::V16x8LoadSplat):
+ CHECK(EmitLoadSplatSimd128(f, Scalar::Uint16, SimdOp::I16x8Splat));
+ case uint32_t(SimdOp::V32x4LoadSplat):
+ CHECK(EmitLoadSplatSimd128(f, Scalar::Float32, SimdOp::I32x4Splat));
+ case uint32_t(SimdOp::V64x2LoadSplat):
+ CHECK(EmitLoadSplatSimd128(f, Scalar::Float64, SimdOp::I64x2Splat));
+ case uint32_t(SimdOp::I16x8LoadS8x8):
+ case uint32_t(SimdOp::I16x8LoadU8x8):
+ case uint32_t(SimdOp::I32x4LoadS16x4):
+ case uint32_t(SimdOp::I32x4LoadU16x4):
+ case uint32_t(SimdOp::I64x2LoadS32x2):
+ case uint32_t(SimdOp::I64x2LoadU32x2):
+ CHECK(EmitLoadExtendSimd128(f, SimdOp(op.b1)));
+ case uint32_t(SimdOp::V128Load32Zero):
+ CHECK(EmitLoadZeroSimd128(f, Scalar::Float32, 4));
+ case uint32_t(SimdOp::V128Load64Zero):
+ CHECK(EmitLoadZeroSimd128(f, Scalar::Float64, 8));
+ default:
+ return f.iter().unrecognizedOpcode(&op);
+ } // switch (op.b1)
+ break;
+ }
+#endif
+
+ // Miscellaneous operations
+ case uint16_t(Op::MiscPrefix): {
+ switch (op.b1) {
+ case uint32_t(MiscOp::I32TruncSSatF32):
+ case uint32_t(MiscOp::I32TruncUSatF32):
+ CHECK(EmitTruncate(f, ValType::F32, ValType::I32,
+ MiscOp(op.b1) == MiscOp::I32TruncUSatF32, true));
+ case uint32_t(MiscOp::I32TruncSSatF64):
+ case uint32_t(MiscOp::I32TruncUSatF64):
+ CHECK(EmitTruncate(f, ValType::F64, ValType::I32,
+ MiscOp(op.b1) == MiscOp::I32TruncUSatF64, true));
+ case uint32_t(MiscOp::I64TruncSSatF32):
+ case uint32_t(MiscOp::I64TruncUSatF32):
+ CHECK(EmitTruncate(f, ValType::F32, ValType::I64,
+ MiscOp(op.b1) == MiscOp::I64TruncUSatF32, true));
+ case uint32_t(MiscOp::I64TruncSSatF64):
+ case uint32_t(MiscOp::I64TruncUSatF64):
+ CHECK(EmitTruncate(f, ValType::F64, ValType::I64,
+ MiscOp(op.b1) == MiscOp::I64TruncUSatF64, true));
+ case uint32_t(MiscOp::MemCopy):
+ CHECK(EmitMemCopy(f));
+ case uint32_t(MiscOp::DataDrop):
+ CHECK(EmitDataOrElemDrop(f, /*isData=*/true));
+ case uint32_t(MiscOp::MemFill):
+ CHECK(EmitMemFill(f));
+ case uint32_t(MiscOp::MemInit):
+ CHECK(EmitMemOrTableInit(f, /*isMem=*/true));
+ case uint32_t(MiscOp::TableCopy):
+ CHECK(EmitTableCopy(f));
+ case uint32_t(MiscOp::ElemDrop):
+ CHECK(EmitDataOrElemDrop(f, /*isData=*/false));
+ case uint32_t(MiscOp::TableInit):
+ CHECK(EmitMemOrTableInit(f, /*isMem=*/false));
+#ifdef ENABLE_WASM_REFTYPES
+ case uint32_t(MiscOp::TableFill):
+ CHECK(EmitTableFill(f));
+ case uint32_t(MiscOp::TableGrow):
+ CHECK(EmitTableGrow(f));
+ case uint32_t(MiscOp::TableSize):
+ CHECK(EmitTableSize(f));
+#endif
+ default:
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ break;
+ }
+
+ // Thread operations
+ case uint16_t(Op::ThreadPrefix): {
+ if (f.moduleEnv().sharedMemoryEnabled() == Shareable::False) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ switch (op.b1) {
+ case uint32_t(ThreadOp::Wake):
+ CHECK(EmitWake(f));
+
+ case uint32_t(ThreadOp::I32Wait):
+ CHECK(EmitWait(f, ValType::I32, 4));
+ case uint32_t(ThreadOp::I64Wait):
+ CHECK(EmitWait(f, ValType::I64, 8));
+ case uint32_t(ThreadOp::Fence):
+ CHECK(EmitFence(f));
+
+ case uint32_t(ThreadOp::I32AtomicLoad):
+ CHECK(EmitAtomicLoad(f, ValType::I32, Scalar::Int32));
+ case uint32_t(ThreadOp::I64AtomicLoad):
+ CHECK(EmitAtomicLoad(f, ValType::I64, Scalar::Int64));
+ case uint32_t(ThreadOp::I32AtomicLoad8U):
+ CHECK(EmitAtomicLoad(f, ValType::I32, Scalar::Uint8));
+ case uint32_t(ThreadOp::I32AtomicLoad16U):
+ CHECK(EmitAtomicLoad(f, ValType::I32, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicLoad8U):
+ CHECK(EmitAtomicLoad(f, ValType::I64, Scalar::Uint8));
+ case uint32_t(ThreadOp::I64AtomicLoad16U):
+ CHECK(EmitAtomicLoad(f, ValType::I64, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicLoad32U):
+ CHECK(EmitAtomicLoad(f, ValType::I64, Scalar::Uint32));
+
+ case uint32_t(ThreadOp::I32AtomicStore):
+ CHECK(EmitAtomicStore(f, ValType::I32, Scalar::Int32));
+ case uint32_t(ThreadOp::I64AtomicStore):
+ CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Int64));
+ case uint32_t(ThreadOp::I32AtomicStore8U):
+ CHECK(EmitAtomicStore(f, ValType::I32, Scalar::Uint8));
+ case uint32_t(ThreadOp::I32AtomicStore16U):
+ CHECK(EmitAtomicStore(f, ValType::I32, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicStore8U):
+ CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Uint8));
+ case uint32_t(ThreadOp::I64AtomicStore16U):
+ CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicStore32U):
+ CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Uint32));
+
+ case uint32_t(ThreadOp::I32AtomicAdd):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
+ AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I64AtomicAdd):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
+ AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I32AtomicAdd8U):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
+ AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I32AtomicAdd16U):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
+ AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I64AtomicAdd8U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
+ AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I64AtomicAdd16U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
+ AtomicFetchAddOp));
+ case uint32_t(ThreadOp::I64AtomicAdd32U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
+ AtomicFetchAddOp));
+
+ case uint32_t(ThreadOp::I32AtomicSub):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
+ AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I64AtomicSub):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
+ AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I32AtomicSub8U):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
+ AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I32AtomicSub16U):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
+ AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I64AtomicSub8U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
+ AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I64AtomicSub16U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
+ AtomicFetchSubOp));
+ case uint32_t(ThreadOp::I64AtomicSub32U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
+ AtomicFetchSubOp));
+
+ case uint32_t(ThreadOp::I32AtomicAnd):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
+ AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I64AtomicAnd):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
+ AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I32AtomicAnd8U):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
+ AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I32AtomicAnd16U):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
+ AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I64AtomicAnd8U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
+ AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I64AtomicAnd16U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
+ AtomicFetchAndOp));
+ case uint32_t(ThreadOp::I64AtomicAnd32U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
+ AtomicFetchAndOp));
+
+ case uint32_t(ThreadOp::I32AtomicOr):
+ CHECK(
+ EmitAtomicRMW(f, ValType::I32, Scalar::Int32, AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I64AtomicOr):
+ CHECK(
+ EmitAtomicRMW(f, ValType::I64, Scalar::Int64, AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I32AtomicOr8U):
+ CHECK(
+ EmitAtomicRMW(f, ValType::I32, Scalar::Uint8, AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I32AtomicOr16U):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
+ AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I64AtomicOr8U):
+ CHECK(
+ EmitAtomicRMW(f, ValType::I64, Scalar::Uint8, AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I64AtomicOr16U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
+ AtomicFetchOrOp));
+ case uint32_t(ThreadOp::I64AtomicOr32U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
+ AtomicFetchOrOp));
+
+ case uint32_t(ThreadOp::I32AtomicXor):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
+ AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I64AtomicXor):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
+ AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I32AtomicXor8U):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
+ AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I32AtomicXor16U):
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
+ AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I64AtomicXor8U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
+ AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I64AtomicXor16U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
+ AtomicFetchXorOp));
+ case uint32_t(ThreadOp::I64AtomicXor32U):
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
+ AtomicFetchXorOp));
+
+ case uint32_t(ThreadOp::I32AtomicXchg):
+ CHECK(EmitAtomicXchg(f, ValType::I32, Scalar::Int32));
+ case uint32_t(ThreadOp::I64AtomicXchg):
+ CHECK(EmitAtomicXchg(f, ValType::I64, Scalar::Int64));
+ case uint32_t(ThreadOp::I32AtomicXchg8U):
+ CHECK(EmitAtomicXchg(f, ValType::I32, Scalar::Uint8));
+ case uint32_t(ThreadOp::I32AtomicXchg16U):
+ CHECK(EmitAtomicXchg(f, ValType::I32, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicXchg8U):
+ CHECK(EmitAtomicXchg(f, ValType::I64, Scalar::Uint8));
+ case uint32_t(ThreadOp::I64AtomicXchg16U):
+ CHECK(EmitAtomicXchg(f, ValType::I64, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicXchg32U):
+ CHECK(EmitAtomicXchg(f, ValType::I64, Scalar::Uint32));
+
+ case uint32_t(ThreadOp::I32AtomicCmpXchg):
+ CHECK(EmitAtomicCmpXchg(f, ValType::I32, Scalar::Int32));
+ case uint32_t(ThreadOp::I64AtomicCmpXchg):
+ CHECK(EmitAtomicCmpXchg(f, ValType::I64, Scalar::Int64));
+ case uint32_t(ThreadOp::I32AtomicCmpXchg8U):
+ CHECK(EmitAtomicCmpXchg(f, ValType::I32, Scalar::Uint8));
+ case uint32_t(ThreadOp::I32AtomicCmpXchg16U):
+ CHECK(EmitAtomicCmpXchg(f, ValType::I32, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicCmpXchg8U):
+ CHECK(EmitAtomicCmpXchg(f, ValType::I64, Scalar::Uint8));
+ case uint32_t(ThreadOp::I64AtomicCmpXchg16U):
+ CHECK(EmitAtomicCmpXchg(f, ValType::I64, Scalar::Uint16));
+ case uint32_t(ThreadOp::I64AtomicCmpXchg32U):
+ CHECK(EmitAtomicCmpXchg(f, ValType::I64, Scalar::Uint32));
+
+ default:
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ break;
+ }
+
+ // asm.js-specific operators
+ case uint16_t(Op::MozPrefix): {
+ if (!f.moduleEnv().isAsmJS()) {
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ switch (op.b1) {
+ case uint32_t(MozOp::TeeGlobal):
+ CHECK(EmitTeeGlobal(f));
+ case uint32_t(MozOp::I32Min):
+ case uint32_t(MozOp::I32Max):
+ CHECK(EmitMinMax(f, ValType::I32, MIRType::Int32,
+ MozOp(op.b1) == MozOp::I32Max));
+ case uint32_t(MozOp::I32Neg):
+ CHECK(EmitUnaryWithType<MWasmNeg>(f, ValType::I32, MIRType::Int32));
+ case uint32_t(MozOp::I32BitNot):
+ CHECK(EmitBitNot(f, ValType::I32));
+ case uint32_t(MozOp::I32Abs):
+ CHECK(EmitUnaryWithType<MAbs>(f, ValType::I32, MIRType::Int32));
+ case uint32_t(MozOp::F32TeeStoreF64):
+ CHECK(EmitTeeStoreWithCoercion(f, ValType::F32, Scalar::Float64));
+ case uint32_t(MozOp::F64TeeStoreF32):
+ CHECK(EmitTeeStoreWithCoercion(f, ValType::F64, Scalar::Float32));
+ case uint32_t(MozOp::I32TeeStore8):
+ CHECK(EmitTeeStore(f, ValType::I32, Scalar::Int8));
+ case uint32_t(MozOp::I32TeeStore16):
+ CHECK(EmitTeeStore(f, ValType::I32, Scalar::Int16));
+ case uint32_t(MozOp::I64TeeStore8):
+ CHECK(EmitTeeStore(f, ValType::I64, Scalar::Int8));
+ case uint32_t(MozOp::I64TeeStore16):
+ CHECK(EmitTeeStore(f, ValType::I64, Scalar::Int16));
+ case uint32_t(MozOp::I64TeeStore32):
+ CHECK(EmitTeeStore(f, ValType::I64, Scalar::Int32));
+ case uint32_t(MozOp::I32TeeStore):
+ CHECK(EmitTeeStore(f, ValType::I32, Scalar::Int32));
+ case uint32_t(MozOp::I64TeeStore):
+ CHECK(EmitTeeStore(f, ValType::I64, Scalar::Int64));
+ case uint32_t(MozOp::F32TeeStore):
+ CHECK(EmitTeeStore(f, ValType::F32, Scalar::Float32));
+ case uint32_t(MozOp::F64TeeStore):
+ CHECK(EmitTeeStore(f, ValType::F64, Scalar::Float64));
+ case uint32_t(MozOp::F64Mod):
+ CHECK(EmitRem(f, ValType::F64, MIRType::Double,
+ /* isUnsigned = */ false));
+ case uint32_t(MozOp::F64Sin):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigSinD));
+ case uint32_t(MozOp::F64Cos):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigCosD));
+ case uint32_t(MozOp::F64Tan):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigTanD));
+ case uint32_t(MozOp::F64Asin):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigASinD));
+ case uint32_t(MozOp::F64Acos):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigACosD));
+ case uint32_t(MozOp::F64Atan):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigATanD));
+ case uint32_t(MozOp::F64Exp):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigExpD));
+ case uint32_t(MozOp::F64Log):
+ CHECK(EmitUnaryMathBuiltinCall(f, SASigLogD));
+ case uint32_t(MozOp::F64Pow):
+ CHECK(EmitBinaryMathBuiltinCall(f, SASigPowD));
+ case uint32_t(MozOp::F64Atan2):
+ CHECK(EmitBinaryMathBuiltinCall(f, SASigATan2D));
+ case uint32_t(MozOp::OldCallDirect):
+ CHECK(EmitCall(f, /* asmJSFuncDef = */ true));
+ case uint32_t(MozOp::OldCallIndirect):
+ CHECK(EmitCallIndirect(f, /* oldStyle = */ true));
+
+ default:
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ break;
+ }
+
+ default:
+ return f.iter().unrecognizedOpcode(&op);
+ }
+ }
+
+ MOZ_CRASH("unreachable");
+
+#undef CHECK
+#undef CHECK_SIMD_EXPERIMENTAL
+}
+
+bool wasm::IonCompileFunctions(const ModuleEnvironment& moduleEnv,
+ const CompilerEnvironment& compilerEnv,
+ LifoAlloc& lifo,
+ const FuncCompileInputVector& inputs,
+ CompiledCode* code, UniqueChars* error) {
+ MOZ_ASSERT(compilerEnv.tier() == Tier::Optimized);
+ MOZ_ASSERT(compilerEnv.debug() == DebugEnabled::False);
+ MOZ_ASSERT(compilerEnv.optimizedBackend() == OptimizedBackend::Ion);
+
+ TempAllocator alloc(&lifo);
+ JitContext jitContext(&alloc);
+ MOZ_ASSERT(IsCompilingWasm());
+ WasmMacroAssembler masm(alloc, moduleEnv);
+
+ // Swap in already-allocated empty vectors to avoid malloc/free.
+ MOZ_ASSERT(code->empty());
+ if (!code->swap(masm)) {
+ return false;
+ }
+
+ // Create a description of the stack layout created by GenerateTrapExit().
+ MachineState trapExitLayout;
+ size_t trapExitLayoutNumWords;
+ GenerateTrapExitMachineState(&trapExitLayout, &trapExitLayoutNumWords);
+
+ for (const FuncCompileInput& func : inputs) {
+ JitSpewCont(JitSpew_Codegen, "\n");
+ JitSpew(JitSpew_Codegen,
+ "# ================================"
+ "==================================");
+ JitSpew(JitSpew_Codegen, "# ==");
+ JitSpew(JitSpew_Codegen,
+ "# wasm::IonCompileFunctions: starting on function index %d",
+ (int)func.index);
+
+ Decoder d(func.begin, func.end, func.lineOrBytecode, error);
+
+ // Build the local types vector.
+
+ const FuncType& funcType = *moduleEnv.funcs[func.index].type;
+ const TypeIdDesc& funcTypeId = *moduleEnv.funcs[func.index].typeId;
+ ValTypeVector locals;
+ if (!locals.appendAll(funcType.args())) {
+ return false;
+ }
+ if (!DecodeLocalEntries(d, moduleEnv.types, moduleEnv.features, &locals)) {
+ return false;
+ }
+
+ // Set up for Ion compilation.
+
+ const JitCompileOptions options;
+ MIRGraph graph(&alloc);
+ CompileInfo compileInfo(locals.length());
+ MIRGenerator mir(nullptr, options, &alloc, &graph, &compileInfo,
+ IonOptimizations.get(OptimizationLevel::Wasm));
+ mir.initMinWasmHeapLength(moduleEnv.minMemoryLength);
+
+ // Build MIR graph
+ {
+ FunctionCompiler f(moduleEnv, d, func, locals, mir);
+ if (!f.init()) {
+ return false;
+ }
+
+ if (!f.startBlock()) {
+ return false;
+ }
+
+ if (!EmitBodyExprs(f)) {
+ return false;
+ }
+
+ f.finish();
+ }
+
+ // Compile MIR graph
+ {
+ jit::SpewBeginWasmFunction(&mir, func.index);
+ jit::AutoSpewEndFunction spewEndFunction(&mir);
+
+ if (!OptimizeMIR(&mir)) {
+ return false;
+ }
+
+ LIRGraph* lir = GenerateLIR(&mir);
+ if (!lir) {
+ return false;
+ }
+
+ CodeGenerator codegen(&mir, lir, &masm);
+
+ BytecodeOffset prologueTrapOffset(func.lineOrBytecode);
+ FuncOffsets offsets;
+ ArgTypeVector args(funcType);
+ if (!codegen.generateWasm(funcTypeId, prologueTrapOffset, args,
+ trapExitLayout, trapExitLayoutNumWords,
+ &offsets, &code->stackMaps)) {
+ return false;
+ }
+
+ if (!code->codeRanges.emplaceBack(func.index, func.lineOrBytecode,
+ offsets)) {
+ return false;
+ }
+ }
+
+ JitSpew(JitSpew_Codegen,
+ "# wasm::IonCompileFunctions: completed function index %d",
+ (int)func.index);
+ JitSpew(JitSpew_Codegen, "# ==");
+ JitSpew(JitSpew_Codegen,
+ "# ================================"
+ "==================================");
+ JitSpewCont(JitSpew_Codegen, "\n");
+ }
+
+ masm.finish();
+ if (masm.oom()) {
+ return false;
+ }
+
+ return code->swap(masm);
+}
+
+bool js::wasm::IonPlatformSupport() {
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || \
+ defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || \
+ defined(JS_CODEGEN_MIPS64)
+ return true;
+#else
+ return false;
+#endif
+}
diff --git a/js/src/wasm/WasmIonCompile.h b/js/src/wasm/WasmIonCompile.h
new file mode 100644
index 0000000000..c5c916d667
--- /dev/null
+++ b/js/src/wasm/WasmIonCompile.h
@@ -0,0 +1,43 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_ion_compile_h
+#define wasm_ion_compile_h
+
+#include "mozilla/Attributes.h"
+
+#include "wasm/WasmGenerator.h"
+
+namespace js {
+namespace wasm {
+
+// Return whether IonCompileFunction() can generate code on the current device.
+// Usually you do *not* want this, you want IonAvailable().
+[[nodiscard]] bool IonPlatformSupport();
+
+// Generates very fast code at the expense of compilation time.
+[[nodiscard]] bool IonCompileFunctions(const ModuleEnvironment& moduleEnv,
+ const CompilerEnvironment& compilerEnv,
+ LifoAlloc& lifo,
+ const FuncCompileInputVector& inputs,
+ CompiledCode* code, UniqueChars* error);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_ion_compile_h
diff --git a/js/src/wasm/WasmJS.cpp b/js/src/wasm/WasmJS.cpp
new file mode 100644
index 0000000000..6e84445083
--- /dev/null
+++ b/js/src/wasm/WasmJS.cpp
@@ -0,0 +1,4485 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmJS.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/EndianUtils.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/RangedPtr.h"
+
+#include <algorithm>
+
+#include "gc/FreeOp.h"
+#include "jit/AtomicOperations.h"
+#include "jit/JitOptions.h"
+#include "jit/JitRuntime.h"
+#include "jit/Simulator.h"
+#if defined(JS_CODEGEN_X64) // Assembler::HasSSE41
+# include "jit/x64/Assembler-x64.h"
+# include "jit/x86-shared/Architecture-x86-shared.h"
+# include "jit/x86-shared/Assembler-x86-shared.h"
+#endif
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "js/Printf.h"
+#include "js/PropertySpec.h" // JS_{PS,FN}{,_END}
+#include "util/StringBuffer.h"
+#include "util/Text.h"
+#include "vm/ErrorObject.h"
+#include "vm/FunctionFlags.h" // js::FunctionFlags
+#include "vm/GlobalObject.h" // js::GlobalObject
+#include "vm/HelperThreadState.h" // js::PromiseHelperTask
+#include "vm/Interpreter.h"
+#include "vm/PlainObject.h" // js::PlainObject
+#include "vm/PromiseObject.h" // js::PromiseObject
+#include "vm/StringType.h"
+#include "vm/Warnings.h" // js::WarnNumberASCII
+#include "wasm/WasmBaselineCompile.h"
+#include "wasm/WasmBuiltins.h"
+#include "wasm/WasmCompile.h"
+#include "wasm/WasmCraneliftCompile.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmIonCompile.h"
+#include "wasm/WasmModule.h"
+#include "wasm/WasmProcess.h"
+#include "wasm/WasmSignalHandlers.h"
+#include "wasm/WasmStubs.h"
+#include "wasm/WasmValidate.h"
+
+#include "vm/ArrayBufferObject-inl.h"
+#include "vm/JSObject-inl.h"
+#include "vm/NativeObject-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::CheckedInt;
+using mozilla::Nothing;
+using mozilla::RangedPtr;
+using mozilla::Span;
+
+extern mozilla::Atomic<bool> fuzzingSafe;
+
+// About the fuzzer intercession points: If fuzzing has been selected and only a
+// single compiler has been selected then we will disable features that are not
+// supported by that single compiler. This is strictly a concession to the
+// fuzzer infrastructure.
+
+static inline bool IsFuzzing() {
+#ifdef FUZZING
+ return true;
+#else
+ return fuzzingSafe;
+#endif
+}
+
+static inline bool IsFuzzingIon(JSContext* cx) {
+ return IsFuzzing() && !cx->options().wasmBaseline() &&
+ cx->options().wasmIon() && !cx->options().wasmCranelift();
+}
+
+static inline bool IsFuzzingCranelift(JSContext* cx) {
+ return IsFuzzing() && !cx->options().wasmBaseline() &&
+ !cx->options().wasmIon() && cx->options().wasmCranelift();
+}
+
+// These functions read flags and apply fuzzing intercession policies. Never go
+// directly to the flags in code below, always go via these accessors.
+
+static inline bool WasmMultiValueFlag(JSContext* cx) {
+#ifdef ENABLE_WASM_MULTI_VALUE
+ return cx->options().wasmMultiValue();
+#else
+ return false;
+#endif
+}
+
+static inline bool WasmSimdFlag(JSContext* cx) {
+#ifdef ENABLE_WASM_SIMD
+ if (IsFuzzingCranelift(cx)) {
+ return false;
+ }
+ return cx->options().wasmSimd() && js::jit::JitSupportsWasmSimd();
+#else
+ return false;
+#endif
+}
+
+static inline bool WasmSimdWormholeFlag(JSContext* cx) {
+#ifdef ENABLE_WASM_SIMD_WORMHOLE
+ return cx->options().wasmSimdWormhole();
+#else
+ return false;
+#endif
+}
+
+static inline bool WasmReftypesFlag(JSContext* cx) {
+#ifdef ENABLE_WASM_REFTYPES
+ return cx->options().wasmReftypes();
+#else
+ return false;
+#endif
+}
+
+static inline bool WasmFunctionReferencesFlag(JSContext* cx) {
+ if (IsFuzzingIon(cx) || IsFuzzingCranelift(cx)) {
+ return false;
+ }
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ return WasmReftypesFlag(cx) && cx->options().wasmFunctionReferences();
+#else
+ return false;
+#endif
+}
+
+static inline bool WasmGcFlag(JSContext* cx) {
+ if (IsFuzzingIon(cx) || IsFuzzingCranelift(cx)) {
+ return false;
+ }
+#ifdef ENABLE_WASM_GC
+ return WasmFunctionReferencesFlag(cx) && cx->options().wasmGc();
+#else
+ return false;
+#endif
+}
+
+static inline bool WasmThreadsFlag(JSContext* cx) {
+ return cx->realm() &&
+ cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled();
+}
+
+static inline bool WasmExceptionsFlag(JSContext* cx) {
+#ifdef ENABLE_WASM_EXCEPTIONS
+ return cx->options().wasmExceptions();
+#else
+ return false;
+#endif
+}
+
+static inline bool WasmDebuggerActive(JSContext* cx) {
+ if (IsFuzzingIon(cx) || IsFuzzingCranelift(cx)) {
+ return false;
+ }
+ return cx->realm() && cx->realm()->debuggerObservesAsmJS();
+}
+
+/*
+ * [SMDOC] Compiler and feature selection; compiler and feature availability.
+ *
+ * In order to make the computation of whether a wasm feature or wasm compiler
+ * is available predictable, we have established some rules, and implemented
+ * those rules.
+ *
+ * Code elsewhere should use the predicates below to test for features and
+ * compilers, it should never try to compute feature and compiler availability
+ * in other ways.
+ *
+ * At the outset, there is a set of selected compilers C containing at most one
+ * baseline compiler [*] and at most one optimizing compiler [**], and a set of
+ * selected features F. These selections come from defaults and from overrides
+ * by command line switches in the shell and javascript.option.wasm_X in the
+ * browser. Defaults for both features and compilers may be platform specific,
+ * for example, some compilers may not be available on some platforms because
+ * they do not support the architecture at all or they do not support features
+ * that must be enabled by default on the platform.
+ *
+ * [*] Currently we have only one, "baseline" aka "Rabaldr", but other
+ * implementations have additional baseline translators, eg from wasm
+ * bytecode to an internal code processed by an interpreter.
+ *
+ * [**] Currently we have two, "ion" aka "Baldr", and "Cranelift".
+ *
+ *
+ * Compiler availability:
+ *
+ * The set of features F induces a set of available compilers A: these are the
+ * compilers that all support all the features in F. (Some of these compilers
+ * may not be in the set C.)
+ *
+ * The sets C and A are intersected, yielding a set of enabled compilers E.
+ * Notably, the set E may be empty, in which case wasm is effectively disabled
+ * (though the WebAssembly object is still present in the global environment).
+ *
+ * An important consequence is that selecting a feature that is not supported by
+ * a particular compiler disables that compiler completely -- there is no notion
+ * of a compiler being available but suddenly failing when an unsupported
+ * feature is used by a program. If a compiler is available, it supports all
+ * the features that have been selected.
+ *
+ * Equally important, a feature cannot be enabled by default on a platform if
+ * the feature is not supported by all the compilers we wish to have enabled by
+ * default on the platform. We MUST by-default disable features on a platform
+ * that are not supported by all the compilers on the platform.
+ *
+ * As an example:
+ *
+ * On ARM64 the default compilers are Baseline and Cranelift. Say Cranelift
+ * does not support feature X. Thus X cannot be enabled by default on ARM64.
+ * However, X support can be compiled-in to SpiderMonkey, and the user can opt
+ * to enable X. Doing so will disable Cranelift.
+ *
+ * In contrast, X can be enabled by default on x64, where the default
+ * compilers are Baseline and Ion, both of which support X.
+ *
+ * A subtlety is worth noting: on x64, enabling Cranelift (thus disabling Ion)
+ * will not disable X. Instead, the presence of X in the selected feature set
+ * will disable Cranelift, leaving only Baseline. This follows from the logic
+ * described above.
+ *
+ * In a shell build, the testing functions wasmCompilersPresent,
+ * wasmCompileMode, wasmCraneliftDisabledByFeatures, and
+ * wasmIonDisabledByFeatures can be used to probe compiler availability and the
+ * reasons for a compiler being unavailable.
+ *
+ *
+ * Feature availability:
+ *
+ * A feature is available if it is selected and there is at least one available
+ * compiler that implements it.
+ *
+ * For example, --wasm-gc selects the GC feature, and if Baseline is available
+ * then the feature is available.
+ *
+ * In a shell build, there are per-feature testing functions (of the form
+ * wasmFeatureEnabled) to probe whether specific features are available.
+ */
+
+// Compiler availability predicates. These must be kept in sync with the
+// feature predicates in the next section below.
+//
+// These can't call the feature predicates since the feature predicates call
+// back to these predicates. So there will be a small amount of duplicated
+// logic here, but as compilers reach feature parity that duplication will go
+// away.
+//
+// There's a static precedence order between the optimizing compilers. This
+// order currently ranks Cranelift over Ion on all platforms because Cranelift
+// is disabled by default on all platforms: anyone who has enabled Cranelift
+// will wish to use it instead of Ion.
+//
+// The precedence order is implemented by guards in IonAvailable() and
+// CraneliftAvailable(). We expect that it will become more complex as the
+// default settings change. But it should remain static.
+
+bool wasm::BaselineAvailable(JSContext* cx) {
+ // Baseline supports every feature supported by any compiler.
+ return cx->options().wasmBaseline() && BaselinePlatformSupport();
+}
+
+bool wasm::IonAvailable(JSContext* cx) {
+ if (!cx->options().wasmIon() || !IonPlatformSupport()) {
+ return false;
+ }
+ bool isDisabled = false;
+ MOZ_ALWAYS_TRUE(IonDisabledByFeatures(cx, &isDisabled));
+ return !isDisabled && !CraneliftAvailable(cx);
+}
+
+template <size_t ArrayLength>
+static inline bool Append(JSStringBuilder* reason, const char (&s)[ArrayLength],
+ char* sep) {
+ if ((*sep && !reason->append(*sep)) || !reason->append(s)) {
+ return false;
+ }
+ *sep = ',';
+ return true;
+}
+
+bool wasm::IonDisabledByFeatures(JSContext* cx, bool* isDisabled,
+ JSStringBuilder* reason) {
+ // Ion has no debugging support, no gc support.
+ bool debug = WasmDebuggerActive(cx);
+ bool functionReferences = WasmFunctionReferencesFlag(cx);
+ bool gc = WasmGcFlag(cx);
+ bool exn = WasmExceptionsFlag(cx);
+ if (reason) {
+ char sep = 0;
+ if (debug && !Append(reason, "debug", &sep)) {
+ return false;
+ }
+ if (functionReferences && !Append(reason, "function-references", &sep)) {
+ return false;
+ }
+ if (gc && !Append(reason, "gc", &sep)) {
+ return false;
+ }
+ if (exn && !Append(reason, "exceptions", &sep)) {
+ return false;
+ }
+ }
+ *isDisabled = debug || functionReferences || gc || exn;
+ return true;
+}
+
+bool wasm::CraneliftAvailable(JSContext* cx) {
+ if (!cx->options().wasmCranelift() || !CraneliftPlatformSupport()) {
+ return false;
+ }
+ bool isDisabled = false;
+ MOZ_ALWAYS_TRUE(CraneliftDisabledByFeatures(cx, &isDisabled));
+ return !isDisabled;
+}
+
+bool wasm::CraneliftDisabledByFeatures(JSContext* cx, bool* isDisabled,
+ JSStringBuilder* reason) {
+ // Cranelift has no debugging support, no gc support, no simd, and
+ // no exceptions support.
+ bool debug = WasmDebuggerActive(cx);
+ bool functionReferences = WasmFunctionReferencesFlag(cx);
+ bool gc = WasmGcFlag(cx);
+#ifdef JS_CODEGEN_ARM64
+ // Cranelift aarch64 has full SIMD support.
+ bool simdOnNonAarch64 = false;
+#else
+ bool simdOnNonAarch64 = WasmSimdFlag(cx);
+#endif
+ bool exn = WasmExceptionsFlag(cx);
+ if (reason) {
+ char sep = 0;
+ if (debug && !Append(reason, "debug", &sep)) {
+ return false;
+ }
+ if (functionReferences && !Append(reason, "function-references", &sep)) {
+ return false;
+ }
+ if (gc && !Append(reason, "gc", &sep)) {
+ return false;
+ }
+ if (simdOnNonAarch64 && !Append(reason, "simd", &sep)) {
+ return false;
+ }
+ if (exn && !Append(reason, "exceptions", &sep)) {
+ return false;
+ }
+ }
+ *isDisabled = debug || functionReferences || gc || simdOnNonAarch64 || exn;
+ return true;
+}
+
+bool wasm::AnyCompilerAvailable(JSContext* cx) {
+ return wasm::BaselineAvailable(cx) || wasm::IonAvailable(cx) ||
+ wasm::CraneliftAvailable(cx);
+}
+
+// Feature predicates. These must be kept in sync with the predicates in the
+// section above.
+//
+// The meaning of these predicates is tricky: A predicate is true for a feature
+// if the feature is enabled and/or compiled-in *and* we have *at least one*
+// compiler that can support the feature. Subsequent compiler selection must
+// ensure that only compilers that actually support the feature are used.
+
+bool wasm::ReftypesAvailable(JSContext* cx) {
+ // All compilers support reference types.
+ return WasmReftypesFlag(cx) && AnyCompilerAvailable(cx);
+}
+
+bool wasm::FunctionReferencesAvailable(JSContext* cx) {
+ // Cranelift and Ion do not support function-references.
+ return WasmFunctionReferencesFlag(cx) && BaselineAvailable(cx);
+}
+
+bool wasm::GcTypesAvailable(JSContext* cx) {
+ // Cranelift and Ion do not support GC.
+ return WasmGcFlag(cx) && BaselineAvailable(cx);
+}
+
+bool wasm::MultiValuesAvailable(JSContext* cx) {
+ return WasmMultiValueFlag(cx) && AnyCompilerAvailable(cx);
+}
+
+bool wasm::SimdAvailable(JSContext* cx) {
+ return WasmSimdFlag(cx) &&
+ (BaselineAvailable(cx) || IonAvailable(cx) || CraneliftAvailable(cx));
+}
+
+bool wasm::SimdWormholeAvailable(JSContext* cx) {
+ return WasmSimdWormholeFlag(cx) && SimdAvailable(cx) && IonAvailable(cx) &&
+ !BaselineAvailable(cx) && !CraneliftAvailable(cx);
+}
+
+bool wasm::ThreadsAvailable(JSContext* cx) {
+ return WasmThreadsFlag(cx) && AnyCompilerAvailable(cx);
+}
+
+bool wasm::ExceptionsAvailable(JSContext* cx) {
+ // Ion & Cranelift do not support Exceptions (for now).
+ // Exceptions require multi-value.
+ return WasmExceptionsFlag(cx) && MultiValuesAvailable(cx) &&
+ BaselineAvailable(cx);
+}
+
+bool wasm::HasPlatformSupport(JSContext* cx) {
+#if !MOZ_LITTLE_ENDIAN() || defined(JS_CODEGEN_NONE)
+ return false;
+#endif
+
+ if (gc::SystemPageSize() > wasm::PageSize) {
+ return false;
+ }
+
+ if (!JitOptions.supportsFloatingPoint) {
+ return false;
+ }
+
+ if (!JitOptions.supportsUnalignedAccesses) {
+ return false;
+ }
+
+ if (!wasm::EnsureFullSignalHandlers(cx)) {
+ return false;
+ }
+
+ if (!jit::JitSupportsAtomics()) {
+ return false;
+ }
+
+ // Wasm threads require 8-byte lock-free atomics.
+ if (!jit::AtomicOperations::isLockfree8()) {
+ return false;
+ }
+
+ // Lazily initialize the global type context
+ if (!cx->wasm().ensureTypeContext(cx)) {
+ return false;
+ }
+
+ // Test only whether the compilers are supported on the hardware, not whether
+ // they are enabled.
+ return BaselinePlatformSupport() || IonPlatformSupport() ||
+ CraneliftPlatformSupport();
+}
+
+bool wasm::HasSupport(JSContext* cx) {
+ // If the general wasm pref is on, it's on for everything.
+ bool prefEnabled = cx->options().wasm();
+ // If the general pref is off, check trusted principals.
+ if (MOZ_UNLIKELY(!prefEnabled)) {
+ prefEnabled = cx->options().wasmForTrustedPrinciples() && cx->realm() &&
+ cx->realm()->principals() &&
+ cx->realm()->principals()->isSystemOrAddonPrincipal();
+ }
+ // Do not check for compiler availability, as that may be run-time variant.
+ // For HasSupport() we want a stable answer depending only on prefs.
+ return prefEnabled && HasPlatformSupport(cx);
+}
+
+bool wasm::StreamingCompilationAvailable(JSContext* cx) {
+ // This should match EnsureStreamSupport().
+ return HasSupport(cx) && AnyCompilerAvailable(cx) &&
+ cx->runtime()->offThreadPromiseState.ref().initialized() &&
+ CanUseExtraThreads() && cx->runtime()->consumeStreamCallback &&
+ cx->runtime()->reportStreamErrorCallback;
+}
+
+bool wasm::CodeCachingAvailable(JSContext* cx) {
+ // At the moment, we require Ion support for code caching. The main reason
+ // for this is that wasm::CompileAndSerialize() does not have access to
+ // information about which optimizing compiler it should use. See comments in
+ // CompileAndSerialize(), below.
+ return StreamingCompilationAvailable(cx) && IonAvailable(cx);
+}
+
+// As the return values from the underlying buffer accessors will become size_t
+// before long, they are captured as size_t here.
+
+uint32_t wasm::ByteLength32(Handle<ArrayBufferObjectMaybeShared*> buffer) {
+ size_t len = buffer->byteLength().get();
+ MOZ_ASSERT(len <= size_t(MaxMemory32Pages) * PageSize);
+ return uint32_t(len);
+}
+
+uint32_t wasm::ByteLength32(const ArrayBufferObjectMaybeShared& buffer) {
+ size_t len = buffer.byteLength().get();
+ MOZ_ASSERT(len <= size_t(MaxMemory32Pages) * PageSize);
+ return uint32_t(len);
+}
+
+uint32_t wasm::ByteLength32(const WasmArrayRawBuffer* buffer) {
+ size_t len = buffer->byteLength().get();
+ MOZ_ASSERT(len <= size_t(MaxMemory32Pages) * PageSize);
+ return uint32_t(len);
+}
+
+uint32_t wasm::ByteLength32(const ArrayBufferObject& buffer) {
+ size_t len = buffer.byteLength().get();
+ MOZ_ASSERT(len <= size_t(MaxMemory32Pages) * PageSize);
+ return uint32_t(len);
+}
+
+uint32_t wasm::VolatileByteLength32(const SharedArrayRawBuffer* buffer) {
+ size_t len = buffer->volatileByteLength().get();
+ MOZ_ASSERT(len <= size_t(MaxMemory32Pages) * PageSize);
+ return uint32_t(len);
+}
+
+// ============================================================================
+// Imports
+
+static bool ThrowBadImportArg(JSContext* cx) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_IMPORT_ARG);
+ return false;
+}
+
+static bool ThrowBadImportType(JSContext* cx, const char* field,
+ const char* str) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_IMPORT_TYPE, field, str);
+ return false;
+}
+
+static bool GetProperty(JSContext* cx, HandleObject obj, const char* chars,
+ MutableHandleValue v) {
+ JSAtom* atom = AtomizeUTF8Chars(cx, chars, strlen(chars));
+ if (!atom) {
+ return false;
+ }
+
+ RootedId id(cx, AtomToId(atom));
+ return GetProperty(cx, obj, obj, id, v);
+}
+
+bool js::wasm::GetImports(JSContext* cx, const Module& module,
+ HandleObject importObj, ImportValues* imports) {
+ if (!module.imports().empty() && !importObj) {
+ return ThrowBadImportArg(cx);
+ }
+
+ const Metadata& metadata = module.metadata();
+
+#ifdef ENABLE_WASM_EXCEPTIONS
+ uint32_t eventIndex = 0;
+ const EventDescVector& events = metadata.events;
+#endif
+ uint32_t globalIndex = 0;
+ const GlobalDescVector& globals = metadata.globals;
+ uint32_t tableIndex = 0;
+ const TableDescVector& tables = metadata.tables;
+ for (const Import& import : module.imports()) {
+ RootedValue v(cx);
+ if (!GetProperty(cx, importObj, import.module.get(), &v)) {
+ return false;
+ }
+
+ if (!v.isObject()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_IMPORT_FIELD,
+ import.module.get());
+ return false;
+ }
+
+ RootedObject obj(cx, &v.toObject());
+ if (!GetProperty(cx, obj, import.field.get(), &v)) {
+ return false;
+ }
+
+ switch (import.kind) {
+ case DefinitionKind::Function: {
+ if (!IsFunctionObject(v)) {
+ return ThrowBadImportType(cx, import.field.get(), "Function");
+ }
+
+ if (!imports->funcs.append(&v.toObject().as<JSFunction>())) {
+ return false;
+ }
+
+ break;
+ }
+ case DefinitionKind::Table: {
+ const uint32_t index = tableIndex++;
+ if (!v.isObject() || !v.toObject().is<WasmTableObject>()) {
+ return ThrowBadImportType(cx, import.field.get(), "Table");
+ }
+
+ RootedWasmTableObject obj(cx, &v.toObject().as<WasmTableObject>());
+ if (obj->table().elemType() != tables[index].elemType) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_TBL_TYPE_LINK);
+ return false;
+ }
+
+ if (!imports->tables.append(obj)) {
+ return false;
+ }
+ break;
+ }
+ case DefinitionKind::Memory: {
+ if (!v.isObject() || !v.toObject().is<WasmMemoryObject>()) {
+ return ThrowBadImportType(cx, import.field.get(), "Memory");
+ }
+
+ MOZ_ASSERT(!imports->memory);
+ imports->memory = &v.toObject().as<WasmMemoryObject>();
+ break;
+ }
+#ifdef ENABLE_WASM_EXCEPTIONS
+ case DefinitionKind::Event: {
+ const uint32_t index = eventIndex++;
+ if (!v.isObject() || !v.toObject().is<WasmExceptionObject>()) {
+ return ThrowBadImportType(cx, import.field.get(), "Exception");
+ }
+
+ RootedWasmExceptionObject obj(cx,
+ &v.toObject().as<WasmExceptionObject>());
+
+ // Checks whether the signature of the imported exception object matches
+ // the signature declared in the exception import's EventDesc.
+ if (obj->resultType() != events[index].resultType()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_EXN_SIG, import.module.get(),
+ import.field.get());
+ return false;
+ }
+
+ if (!imports->exceptionObjs.append(obj)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ break;
+ }
+#endif
+ case DefinitionKind::Global: {
+ const uint32_t index = globalIndex++;
+ const GlobalDesc& global = globals[index];
+ MOZ_ASSERT(global.importIndex() == index);
+
+ RootedVal val(cx);
+ if (v.isObject() && v.toObject().is<WasmGlobalObject>()) {
+ RootedWasmGlobalObject obj(cx, &v.toObject().as<WasmGlobalObject>());
+
+ if (obj->isMutable() != global.isMutable()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_GLOB_MUT_LINK);
+ return false;
+ }
+ if (obj->type() != global.type()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_GLOB_TYPE_LINK);
+ return false;
+ }
+
+ if (imports->globalObjs.length() <= index &&
+ !imports->globalObjs.resize(index + 1)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ imports->globalObjs[index] = obj;
+ val = obj->val();
+ } else {
+ if (IsNumberType(global.type())) {
+ if (global.type() == ValType::I64 && !v.isBigInt()) {
+ return ThrowBadImportType(cx, import.field.get(), "BigInt");
+ }
+ if (global.type() != ValType::I64 && !v.isNumber()) {
+ return ThrowBadImportType(cx, import.field.get(), "Number");
+ }
+ } else {
+ MOZ_ASSERT(global.type().isReference());
+ if (!global.type().isExternRef() && !v.isObjectOrNull()) {
+ return ThrowBadImportType(cx, import.field.get(),
+ "Object-or-null value required for "
+ "non-externref reference type");
+ }
+ }
+
+ if (global.isMutable()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_GLOB_MUT_LINK);
+ return false;
+ }
+
+ if (!Val::fromJSValue(cx, global.type(), v, &val)) {
+ return false;
+ }
+ }
+
+ if (!imports->globalValues.append(val)) {
+ return false;
+ }
+
+ break;
+ }
+ }
+ }
+
+ MOZ_ASSERT(globalIndex == globals.length() ||
+ !globals[globalIndex].isImport());
+
+ return true;
+}
+
+static bool DescribeScriptedCaller(JSContext* cx, ScriptedCaller* caller,
+ const char* introducer) {
+ // Note: JS::DescribeScriptedCaller returns whether a scripted caller was
+ // found, not whether an error was thrown. This wrapper function converts
+ // back to the more ordinary false-if-error form.
+
+ JS::AutoFilename af;
+ if (JS::DescribeScriptedCaller(cx, &af, &caller->line)) {
+ caller->filename =
+ FormatIntroducedFilename(cx, af.get(), caller->line, introducer);
+ if (!caller->filename) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// ============================================================================
+// Testing / Fuzzing support
+
+bool wasm::Eval(JSContext* cx, Handle<TypedArrayObject*> code,
+ HandleObject importObj,
+ MutableHandleWasmInstanceObject instanceObj) {
+ if (!GlobalObject::ensureConstructor(cx, cx->global(), JSProto_WebAssembly)) {
+ return false;
+ }
+
+ MutableBytes bytecode = cx->new_<ShareableBytes>();
+ if (!bytecode) {
+ return false;
+ }
+
+ if (!bytecode->append((uint8_t*)code->dataPointerEither().unwrap(),
+ code->byteLength().get())) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ ScriptedCaller scriptedCaller;
+ if (!DescribeScriptedCaller(cx, &scriptedCaller, "wasm_eval")) {
+ return false;
+ }
+
+ SharedCompileArgs compileArgs =
+ CompileArgs::build(cx, std::move(scriptedCaller));
+ if (!compileArgs) {
+ return false;
+ }
+
+ UniqueChars error;
+ UniqueCharsVector warnings;
+ JSTelemetrySender sender(cx->runtime());
+ SharedModule module = CompileBuffer(*compileArgs, *bytecode, &error,
+ &warnings, nullptr, sender);
+ if (!module) {
+ if (error) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_COMPILE_ERROR, error.get());
+ return false;
+ }
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ Rooted<ImportValues> imports(cx);
+ if (!GetImports(cx, *module, importObj, imports.address())) {
+ return false;
+ }
+
+ return module->instantiate(cx, imports.get(), nullptr, instanceObj);
+}
+
+struct MOZ_STACK_CLASS SerializeListener : JS::OptimizedEncodingListener {
+ // MOZ_STACK_CLASS means these can be nops.
+ MozExternalRefCountType MOZ_XPCOM_ABI AddRef() override { return 0; }
+ MozExternalRefCountType MOZ_XPCOM_ABI Release() override { return 0; }
+
+ DebugOnly<bool> called = false;
+ Bytes* serialized;
+ explicit SerializeListener(Bytes* serialized) : serialized(serialized) {}
+
+ void storeOptimizedEncoding(JS::UniqueOptimizedEncodingBytes bytes) override {
+ MOZ_ASSERT(!called);
+ called = true;
+ if (serialized->resize(bytes->length())) {
+ memcpy(serialized->begin(), bytes->begin(), bytes->length());
+ }
+ }
+};
+
+bool wasm::CompileAndSerialize(const ShareableBytes& bytecode,
+ Bytes* serialized) {
+ MutableCompileArgs compileArgs = js_new<CompileArgs>(ScriptedCaller());
+ if (!compileArgs) {
+ return false;
+ }
+
+ // The caller has ensured CodeCachingAvailable(). Moreover, we want to ensure
+ // we go straight to tier-2 so that we synchronously call
+ // JS::OptimizedEncodingListener::storeOptimizedEncoding().
+ compileArgs->baselineEnabled = false;
+
+ // We always pick Ion here, and we depend on CodeCachingAvailable() having
+ // determined that Ion is available, see comments at CodeCachingAvailable().
+ // To do better, we need to pass information about which compiler that should
+ // be used into CompileAndSerialize().
+ compileArgs->ionEnabled = true;
+
+ // The caller must ensure that huge memory support is configured the same in
+ // the receiving process of this serialized module.
+ compileArgs->features.hugeMemory = wasm::IsHugeMemoryEnabled();
+
+ SerializeListener listener(serialized);
+
+ UniqueChars error;
+ UniqueCharsVector warnings;
+ SharedModule module =
+ CompileBuffer(*compileArgs, bytecode, &error, &warnings, &listener);
+ if (!module) {
+ fprintf(stderr, "Compilation error: %s\n", error ? error.get() : "oom");
+ return false;
+ }
+
+ MOZ_ASSERT(module->code().hasTier(Tier::Serialized));
+ MOZ_ASSERT(listener.called);
+ return !listener.serialized->empty();
+}
+
+bool wasm::DeserializeModule(JSContext* cx, const Bytes& serialized,
+ MutableHandleObject moduleObj) {
+ MutableModule module =
+ Module::deserialize(serialized.begin(), serialized.length());
+ if (!module) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ moduleObj.set(module->createObject(cx));
+ return !!moduleObj;
+}
+
+// ============================================================================
+// Common functions
+
+// '[EnforceRange] unsigned long' types are coerced with
+// ConvertToInt(v, 32, 'unsigned')
+// defined in Web IDL Section 3.2.4.9.
+static bool EnforceRangeU32(JSContext* cx, HandleValue v, const char* kind,
+ const char* noun, uint32_t* u32) {
+ // Step 4.
+ double x;
+ if (!ToNumber(cx, v, &x)) {
+ return false;
+ }
+
+ // Step 5.
+ if (mozilla::IsNegativeZero(x)) {
+ x = 0.0;
+ }
+
+ // Step 6.1.
+ if (!mozilla::IsFinite(x)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_UINT32, kind, noun);
+ return false;
+ }
+
+ // Step 6.2.
+ x = JS::ToInteger(x);
+
+ // Step 6.3.
+ if (x < 0 || x > double(UINT32_MAX)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_UINT32, kind, noun);
+ return false;
+ }
+
+ *u32 = uint32_t(x);
+ MOZ_ASSERT(double(*u32) == x);
+ return true;
+}
+
+static bool GetLimits(JSContext* cx, HandleObject obj, uint32_t maximumField,
+ const char* kind, Limits* limits, Shareable allowShared) {
+ JSAtom* initialAtom = Atomize(cx, "initial", strlen("initial"));
+ if (!initialAtom) {
+ return false;
+ }
+ RootedId initialId(cx, AtomToId(initialAtom));
+
+ RootedValue initialVal(cx);
+ if (!GetProperty(cx, obj, obj, initialId, &initialVal)) {
+ return false;
+ }
+
+ uint32_t initial = 0;
+ if (!initialVal.isUndefined() &&
+ !EnforceRangeU32(cx, initialVal, kind, "initial size", &initial)) {
+ return false;
+ }
+ limits->initial = initial;
+
+ if (limits->initial > maximumField) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_RANGE,
+ kind, "initial size");
+ return false;
+ }
+
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+ // Get minimum parameter.
+ JSAtom* minimumAtom = Atomize(cx, "minimum", strlen("minimum"));
+ if (!minimumAtom) {
+ return false;
+ }
+ RootedId minimumId(cx, AtomToId(minimumAtom));
+
+ RootedValue minimumVal(cx);
+ if (!GetProperty(cx, obj, obj, minimumId, &minimumVal)) {
+ return false;
+ }
+
+ uint32_t minimum = 0;
+ if (!minimumVal.isUndefined() &&
+ !EnforceRangeU32(cx, minimumVal, kind, "initial size", &minimum)) {
+ return false;
+ }
+ if (!minimumVal.isUndefined()) {
+ limits->initial = minimum;
+ }
+#endif
+
+ // Get maximum parameter.
+ JSAtom* maximumAtom = Atomize(cx, "maximum", strlen("maximum"));
+ if (!maximumAtom) {
+ return false;
+ }
+ RootedId maximumId(cx, AtomToId(maximumAtom));
+
+ RootedValue maxVal(cx);
+ if (!GetProperty(cx, obj, obj, maximumId, &maxVal)) {
+ return false;
+ }
+
+ // maxVal does not have a default value.
+ if (!maxVal.isUndefined()) {
+ uint32_t maximum = 0;
+ if (!EnforceRangeU32(cx, maxVal, kind, "maximum size", &maximum)) {
+ return false;
+ }
+ limits->maximum = Some(maximum);
+
+ if (*limits->maximum > maximumField || limits->initial > *limits->maximum) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_RANGE, kind, "maximum size");
+ return false;
+ }
+ }
+
+ limits->shared = Shareable::False;
+
+ if (allowShared == Shareable::True) {
+ JSAtom* sharedAtom = Atomize(cx, "shared", strlen("shared"));
+ if (!sharedAtom) {
+ return false;
+ }
+ RootedId sharedId(cx, AtomToId(sharedAtom));
+
+ RootedValue sharedVal(cx);
+ if (!GetProperty(cx, obj, obj, sharedId, &sharedVal)) {
+ return false;
+ }
+
+ // shared's default value is false, which is already the value set above.
+ if (!sharedVal.isUndefined()) {
+ limits->shared =
+ ToBoolean(sharedVal) ? Shareable::True : Shareable::False;
+
+ if (limits->shared == Shareable::True) {
+ if (maxVal.isUndefined()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_MISSING_MAXIMUM, kind);
+ return false;
+ }
+
+ if (!cx->realm()
+ ->creationOptions()
+ .getSharedMemoryAndAtomicsEnabled()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_NO_SHMEM_LINK);
+ return false;
+ }
+ }
+ }
+ }
+
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+ // Check both minimum and initial are not supplied.
+ if (minimumVal.isUndefined() == initialVal.isUndefined()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_SUPPLY_ONLY_ONE, "minimum", "initial");
+ return false;
+ }
+#else
+ if (initialVal.isUndefined()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_MISSING_REQUIRED, "initial");
+ return false;
+ }
+#endif
+
+ return true;
+}
+
+template <class Class, const char* name>
+static JSObject* CreateWasmConstructor(JSContext* cx, JSProtoKey key) {
+ RootedAtom className(cx, Atomize(cx, name, strlen(name)));
+ if (!className) {
+ return nullptr;
+ }
+
+ return NewNativeConstructor(cx, Class::construct, 1, className);
+}
+
+// ============================================================================
+// WebAssembly.Module class and methods
+
+const JSClassOps WasmModuleObject::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ WasmModuleObject::finalize, // finalize
+ nullptr, // call
+ nullptr, // hasInstance
+ nullptr, // construct
+ nullptr, // trace
+};
+
+const JSClass WasmModuleObject::class_ = {
+ "WebAssembly.Module",
+ JSCLASS_DELAY_METADATA_BUILDER |
+ JSCLASS_HAS_RESERVED_SLOTS(WasmModuleObject::RESERVED_SLOTS) |
+ JSCLASS_FOREGROUND_FINALIZE,
+ &WasmModuleObject::classOps_,
+ &WasmModuleObject::classSpec_,
+};
+
+const JSClass& WasmModuleObject::protoClass_ = PlainObject::class_;
+
+static constexpr char WasmModuleName[] = "Module";
+
+const ClassSpec WasmModuleObject::classSpec_ = {
+ CreateWasmConstructor<WasmModuleObject, WasmModuleName>,
+ GenericCreatePrototype<WasmModuleObject>,
+ WasmModuleObject::static_methods,
+ nullptr,
+ WasmModuleObject::methods,
+ WasmModuleObject::properties,
+ nullptr,
+ ClassSpec::DontDefineConstructor};
+
+const JSPropertySpec WasmModuleObject::properties[] = {
+ JS_STRING_SYM_PS(toStringTag, "WebAssembly.Module", JSPROP_READONLY),
+ JS_PS_END};
+
+const JSFunctionSpec WasmModuleObject::methods[] = {JS_FS_END};
+
+const JSFunctionSpec WasmModuleObject::static_methods[] = {
+ JS_FN("imports", WasmModuleObject::imports, 1, JSPROP_ENUMERATE),
+ JS_FN("exports", WasmModuleObject::exports, 1, JSPROP_ENUMERATE),
+ JS_FN("customSections", WasmModuleObject::customSections, 2,
+ JSPROP_ENUMERATE),
+ JS_FS_END};
+
+/* static */
+void WasmModuleObject::finalize(JSFreeOp* fop, JSObject* obj) {
+ const Module& module = obj->as<WasmModuleObject>().module();
+ obj->zone()->decJitMemory(module.codeLength(module.code().stableTier()));
+ fop->release(obj, &module, module.gcMallocBytesExcludingCode(),
+ MemoryUse::WasmModule);
+}
+
+static bool IsModuleObject(JSObject* obj, const Module** module) {
+ WasmModuleObject* mobj = obj->maybeUnwrapIf<WasmModuleObject>();
+ if (!mobj) {
+ return false;
+ }
+
+ *module = &mobj->module();
+ return true;
+}
+
+static bool GetModuleArg(JSContext* cx, CallArgs args, uint32_t numRequired,
+ const char* name, const Module** module) {
+ if (!args.requireAtLeast(cx, name, numRequired)) {
+ return false;
+ }
+
+ if (!args[0].isObject() || !IsModuleObject(&args[0].toObject(), module)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_MOD_ARG);
+ return false;
+ }
+
+ return true;
+}
+
+struct KindNames {
+ RootedPropertyName kind;
+ RootedPropertyName table;
+ RootedPropertyName memory;
+ RootedPropertyName event;
+ RootedPropertyName signature;
+
+ explicit KindNames(JSContext* cx)
+ : kind(cx), table(cx), memory(cx), event(cx), signature(cx) {}
+};
+
+static bool InitKindNames(JSContext* cx, KindNames* names) {
+ JSAtom* kind = Atomize(cx, "kind", strlen("kind"));
+ if (!kind) {
+ return false;
+ }
+ names->kind = kind->asPropertyName();
+
+ JSAtom* table = Atomize(cx, "table", strlen("table"));
+ if (!table) {
+ return false;
+ }
+ names->table = table->asPropertyName();
+
+ JSAtom* memory = Atomize(cx, "memory", strlen("memory"));
+ if (!memory) {
+ return false;
+ }
+ names->memory = memory->asPropertyName();
+
+#ifdef ENABLE_WASM_EXCEPTIONS
+ JSAtom* event = Atomize(cx, "event", strlen("event"));
+ if (!event) {
+ return false;
+ }
+ names->event = event->asPropertyName();
+#endif
+
+ JSAtom* signature = Atomize(cx, "signature", strlen("signature"));
+ if (!signature) {
+ return false;
+ }
+ names->signature = signature->asPropertyName();
+
+ return true;
+}
+
+static JSString* KindToString(JSContext* cx, const KindNames& names,
+ DefinitionKind kind) {
+ switch (kind) {
+ case DefinitionKind::Function:
+ return cx->names().function;
+ case DefinitionKind::Table:
+ return names.table;
+ case DefinitionKind::Memory:
+ return names.memory;
+ case DefinitionKind::Global:
+ return cx->names().global;
+#ifdef ENABLE_WASM_EXCEPTIONS
+ case DefinitionKind::Event:
+ return names.event;
+#endif
+ }
+
+ MOZ_CRASH("invalid kind");
+}
+
+static JSString* FuncTypeToString(JSContext* cx, const FuncType& funcType) {
+ JSStringBuilder buf(cx);
+ if (!buf.append('(')) {
+ return nullptr;
+ }
+
+ bool first = true;
+ for (ValType arg : funcType.args()) {
+ if (!first && !buf.append(", ", strlen(", "))) {
+ return nullptr;
+ }
+
+ UniqueChars argStr = ToString(arg);
+ if (!argStr) {
+ return nullptr;
+ }
+
+ if (!buf.append(argStr.get(), strlen(argStr.get()))) {
+ return nullptr;
+ }
+
+ first = false;
+ }
+
+ if (!buf.append(") -> (", strlen(") -> ("))) {
+ return nullptr;
+ }
+
+ first = true;
+ for (ValType result : funcType.results()) {
+ if (!first && !buf.append(", ", strlen(", "))) {
+ return nullptr;
+ }
+
+ UniqueChars resultStr = ToString(result);
+ if (!resultStr) {
+ return nullptr;
+ }
+
+ if (!buf.append(resultStr.get(), strlen(resultStr.get()))) {
+ return nullptr;
+ }
+
+ first = false;
+ }
+
+ if (!buf.append(')')) {
+ return nullptr;
+ }
+
+ return buf.finishString();
+}
+
+static JSString* UTF8CharsToString(JSContext* cx, const char* chars) {
+ return NewStringCopyUTF8Z<CanGC>(cx,
+ JS::ConstUTF8CharsZ(chars, strlen(chars)));
+}
+
+/* static */
+bool WasmModuleObject::imports(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ const Module* module;
+ if (!GetModuleArg(cx, args, 1, "WebAssembly.Module.imports", &module)) {
+ return false;
+ }
+
+ KindNames names(cx);
+ if (!InitKindNames(cx, &names)) {
+ return false;
+ }
+
+ RootedValueVector elems(cx);
+ if (!elems.reserve(module->imports().length())) {
+ return false;
+ }
+
+ const FuncImportVector& funcImports =
+ module->metadata(module->code().stableTier()).funcImports;
+
+ size_t numFuncImport = 0;
+ for (const Import& import : module->imports()) {
+ Rooted<IdValueVector> props(cx, IdValueVector(cx));
+ if (!props.reserve(3)) {
+ return false;
+ }
+
+ JSString* moduleStr = UTF8CharsToString(cx, import.module.get());
+ if (!moduleStr) {
+ return false;
+ }
+ props.infallibleAppend(
+ IdValuePair(NameToId(cx->names().module), StringValue(moduleStr)));
+
+ JSString* nameStr = UTF8CharsToString(cx, import.field.get());
+ if (!nameStr) {
+ return false;
+ }
+ props.infallibleAppend(
+ IdValuePair(NameToId(cx->names().name), StringValue(nameStr)));
+
+ JSString* kindStr = KindToString(cx, names, import.kind);
+ if (!kindStr) {
+ return false;
+ }
+ props.infallibleAppend(
+ IdValuePair(NameToId(names.kind), StringValue(kindStr)));
+
+ if (fuzzingSafe && import.kind == DefinitionKind::Function) {
+ JSString* ftStr =
+ FuncTypeToString(cx, funcImports[numFuncImport++].funcType());
+ if (!ftStr) {
+ return false;
+ }
+ if (!props.append(
+ IdValuePair(NameToId(names.signature), StringValue(ftStr)))) {
+ return false;
+ }
+ }
+
+ JSObject* obj = NewPlainObjectWithProperties(cx, props.begin(),
+ props.length(), GenericObject);
+ if (!obj) {
+ return false;
+ }
+
+ elems.infallibleAppend(ObjectValue(*obj));
+ }
+
+ JSObject* arr = NewDenseCopiedArray(cx, elems.length(), elems.begin());
+ if (!arr) {
+ return false;
+ }
+
+ args.rval().setObject(*arr);
+ return true;
+}
+
+/* static */
+bool WasmModuleObject::exports(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ const Module* module;
+ if (!GetModuleArg(cx, args, 1, "WebAssembly.Module.exports", &module)) {
+ return false;
+ }
+
+ KindNames names(cx);
+ if (!InitKindNames(cx, &names)) {
+ return false;
+ }
+
+ RootedValueVector elems(cx);
+ if (!elems.reserve(module->exports().length())) {
+ return false;
+ }
+
+ for (const Export& exp : module->exports()) {
+ Rooted<IdValueVector> props(cx, IdValueVector(cx));
+ if (!props.reserve(2)) {
+ return false;
+ }
+
+ JSString* nameStr = UTF8CharsToString(cx, exp.fieldName());
+ if (!nameStr) {
+ return false;
+ }
+ props.infallibleAppend(
+ IdValuePair(NameToId(cx->names().name), StringValue(nameStr)));
+
+ JSString* kindStr = KindToString(cx, names, exp.kind());
+ if (!kindStr) {
+ return false;
+ }
+ props.infallibleAppend(
+ IdValuePair(NameToId(names.kind), StringValue(kindStr)));
+
+ if (fuzzingSafe && exp.kind() == DefinitionKind::Function) {
+ const FuncExport& fe = module->metadata(module->code().stableTier())
+ .lookupFuncExport(exp.funcIndex());
+ JSString* ftStr = FuncTypeToString(cx, fe.funcType());
+ if (!ftStr) {
+ return false;
+ }
+ if (!props.append(
+ IdValuePair(NameToId(names.signature), StringValue(ftStr)))) {
+ return false;
+ }
+ }
+
+ JSObject* obj = NewPlainObjectWithProperties(cx, props.begin(),
+ props.length(), GenericObject);
+ if (!obj) {
+ return false;
+ }
+
+ elems.infallibleAppend(ObjectValue(*obj));
+ }
+
+ JSObject* arr = NewDenseCopiedArray(cx, elems.length(), elems.begin());
+ if (!arr) {
+ return false;
+ }
+
+ args.rval().setObject(*arr);
+ return true;
+}
+
+/* static */
+bool WasmModuleObject::customSections(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ const Module* module;
+ if (!GetModuleArg(cx, args, 2, "WebAssembly.Module.customSections",
+ &module)) {
+ return false;
+ }
+
+ Vector<char, 8> name(cx);
+ {
+ RootedString str(cx, ToString(cx, args.get(1)));
+ if (!str) {
+ return false;
+ }
+
+ Rooted<JSLinearString*> linear(cx, str->ensureLinear(cx));
+ if (!linear) {
+ return false;
+ }
+
+ if (!name.initLengthUninitialized(
+ JS::GetDeflatedUTF8StringLength(linear))) {
+ return false;
+ }
+
+ mozilla::Unused << JS::DeflateStringToUTF8Buffer(
+ linear, Span(name.begin(), name.length()));
+ }
+
+ RootedValueVector elems(cx);
+ RootedArrayBufferObject buf(cx);
+ for (const CustomSection& cs : module->customSections()) {
+ if (name.length() != cs.name.length()) {
+ continue;
+ }
+ if (memcmp(name.begin(), cs.name.begin(), name.length())) {
+ continue;
+ }
+
+ buf = ArrayBufferObject::createZeroed(cx, BufferSize(cs.payload->length()));
+ if (!buf) {
+ return false;
+ }
+
+ memcpy(buf->dataPointer(), cs.payload->begin(), cs.payload->length());
+ if (!elems.append(ObjectValue(*buf))) {
+ return false;
+ }
+ }
+
+ JSObject* arr = NewDenseCopiedArray(cx, elems.length(), elems.begin());
+ if (!arr) {
+ return false;
+ }
+
+ args.rval().setObject(*arr);
+ return true;
+}
+
+/* static */
+WasmModuleObject* WasmModuleObject::create(JSContext* cx, const Module& module,
+ HandleObject proto) {
+ AutoSetNewObjectMetadata metadata(cx);
+ auto* obj = NewObjectWithGivenProto<WasmModuleObject>(cx, proto);
+ if (!obj) {
+ return nullptr;
+ }
+
+ // This accounts for module allocation size (excluding code which is handled
+ // separately - see below). This assumes that the size of associated data
+ // doesn't change for the life of the WasmModuleObject. The size is counted
+ // once per WasmModuleObject referencing a Module.
+ InitReservedSlot(obj, MODULE_SLOT, const_cast<Module*>(&module),
+ module.gcMallocBytesExcludingCode(), MemoryUse::WasmModule);
+ module.AddRef();
+
+ // Bug 1569888: We account for the first tier here; the second tier, if
+ // different, also needs to be accounted for.
+ cx->zone()->incJitMemory(module.codeLength(module.code().stableTier()));
+ return obj;
+}
+
+static bool GetBufferSource(JSContext* cx, JSObject* obj, unsigned errorNumber,
+ MutableBytes* bytecode) {
+ *bytecode = cx->new_<ShareableBytes>();
+ if (!*bytecode) {
+ return false;
+ }
+
+ JSObject* unwrapped = CheckedUnwrapStatic(obj);
+
+ SharedMem<uint8_t*> dataPointer;
+ size_t byteLength;
+ if (!unwrapped || !IsBufferSource(unwrapped, &dataPointer, &byteLength)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, errorNumber);
+ return false;
+ }
+
+ if (!(*bytecode)->append(dataPointer.unwrap(), byteLength)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+static SharedCompileArgs InitCompileArgs(JSContext* cx,
+ const char* introducer) {
+ ScriptedCaller scriptedCaller;
+ if (!DescribeScriptedCaller(cx, &scriptedCaller, introducer)) {
+ return nullptr;
+ }
+
+ return CompileArgs::build(cx, std::move(scriptedCaller));
+}
+
+static bool ReportCompileWarnings(JSContext* cx,
+ const UniqueCharsVector& warnings) {
+ // Avoid spamming the console.
+ size_t numWarnings = std::min<size_t>(warnings.length(), 3);
+
+ for (size_t i = 0; i < numWarnings; i++) {
+ if (!WarnNumberASCII(cx, JSMSG_WASM_COMPILE_WARNING, warnings[i].get())) {
+ return false;
+ }
+ }
+
+ if (warnings.length() > numWarnings) {
+ if (!WarnNumberASCII(cx, JSMSG_WASM_COMPILE_WARNING,
+ "other warnings suppressed")) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* static */
+bool WasmModuleObject::construct(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs callArgs = CallArgsFromVp(argc, vp);
+
+ Log(cx, "sync new Module() started");
+
+ if (!ThrowIfNotConstructing(cx, callArgs, "Module")) {
+ return false;
+ }
+
+ if (!callArgs.requireAtLeast(cx, "WebAssembly.Module", 1)) {
+ return false;
+ }
+
+ if (!callArgs[0].isObject()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_BUF_ARG);
+ return false;
+ }
+
+ MutableBytes bytecode;
+ if (!GetBufferSource(cx, &callArgs[0].toObject(), JSMSG_WASM_BAD_BUF_ARG,
+ &bytecode)) {
+ return false;
+ }
+
+ SharedCompileArgs compileArgs = InitCompileArgs(cx, "WebAssembly.Module");
+ if (!compileArgs) {
+ return false;
+ }
+
+ UniqueChars error;
+ UniqueCharsVector warnings;
+ JSTelemetrySender sender(cx->runtime());
+ SharedModule module = CompileBuffer(*compileArgs, *bytecode, &error,
+ &warnings, nullptr, sender);
+ if (!module) {
+ if (error) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_COMPILE_ERROR, error.get());
+ return false;
+ }
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ if (!ReportCompileWarnings(cx, warnings)) {
+ return false;
+ }
+
+ RootedObject proto(cx);
+ if (!GetPrototypeFromBuiltinConstructor(cx, callArgs, JSProto_WasmModule,
+ &proto)) {
+ return false;
+ }
+ if (!proto) {
+ proto = GlobalObject::getOrCreatePrototype(cx, JSProto_WasmModule);
+ }
+
+ RootedObject moduleObj(cx, WasmModuleObject::create(cx, *module, proto));
+ if (!moduleObj) {
+ return false;
+ }
+
+ Log(cx, "sync new Module() succeded");
+
+ callArgs.rval().setObject(*moduleObj);
+ return true;
+}
+
+const Module& WasmModuleObject::module() const {
+ MOZ_ASSERT(is<WasmModuleObject>());
+ return *(const Module*)getReservedSlot(MODULE_SLOT).toPrivate();
+}
+
+// ============================================================================
+// WebAssembly.Instance class and methods
+
+const JSClassOps WasmInstanceObject::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ WasmInstanceObject::finalize, // finalize
+ nullptr, // call
+ nullptr, // hasInstance
+ nullptr, // construct
+ WasmInstanceObject::trace, // trace
+};
+
+const JSClass WasmInstanceObject::class_ = {
+ "WebAssembly.Instance",
+ JSCLASS_DELAY_METADATA_BUILDER |
+ JSCLASS_HAS_RESERVED_SLOTS(WasmInstanceObject::RESERVED_SLOTS) |
+ JSCLASS_FOREGROUND_FINALIZE,
+ &WasmInstanceObject::classOps_,
+ &WasmInstanceObject::classSpec_,
+};
+
+const JSClass& WasmInstanceObject::protoClass_ = PlainObject::class_;
+
+static constexpr char WasmInstanceName[] = "Instance";
+
+const ClassSpec WasmInstanceObject::classSpec_ = {
+ CreateWasmConstructor<WasmInstanceObject, WasmInstanceName>,
+ GenericCreatePrototype<WasmInstanceObject>,
+ WasmInstanceObject::static_methods,
+ nullptr,
+ WasmInstanceObject::methods,
+ WasmInstanceObject::properties,
+ nullptr,
+ ClassSpec::DontDefineConstructor};
+
+static bool IsInstance(HandleValue v) {
+ return v.isObject() && v.toObject().is<WasmInstanceObject>();
+}
+
+/* static */
+bool WasmInstanceObject::exportsGetterImpl(JSContext* cx,
+ const CallArgs& args) {
+ args.rval().setObject(
+ args.thisv().toObject().as<WasmInstanceObject>().exportsObj());
+ return true;
+}
+
+/* static */
+bool WasmInstanceObject::exportsGetter(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsInstance, exportsGetterImpl>(cx, args);
+}
+
+const JSPropertySpec WasmInstanceObject::properties[] = {
+ JS_PSG("exports", WasmInstanceObject::exportsGetter, JSPROP_ENUMERATE),
+ JS_STRING_SYM_PS(toStringTag, "WebAssembly.Instance", JSPROP_READONLY),
+ JS_PS_END};
+
+const JSFunctionSpec WasmInstanceObject::methods[] = {JS_FS_END};
+
+const JSFunctionSpec WasmInstanceObject::static_methods[] = {JS_FS_END};
+
+bool WasmInstanceObject::isNewborn() const {
+ MOZ_ASSERT(is<WasmInstanceObject>());
+ return getReservedSlot(INSTANCE_SLOT).isUndefined();
+}
+
+// WeakScopeMap maps from function index to js::Scope. This maps is weak
+// to avoid holding scope objects alive. The scopes are normally created
+// during debugging.
+//
+// This is defined here in order to avoid recursive dependency between
+// WasmJS.h and Scope.h.
+using WasmFunctionScopeMap =
+ JS::WeakCache<GCHashMap<uint32_t, WeakHeapPtr<WasmFunctionScope*>,
+ DefaultHasher<uint32_t>, ZoneAllocPolicy>>;
+class WasmInstanceObject::UnspecifiedScopeMap {
+ public:
+ WasmFunctionScopeMap& asWasmFunctionScopeMap() {
+ return *(WasmFunctionScopeMap*)this;
+ }
+};
+
+/* static */
+void WasmInstanceObject::finalize(JSFreeOp* fop, JSObject* obj) {
+ WasmInstanceObject& instance = obj->as<WasmInstanceObject>();
+ fop->delete_(obj, &instance.exports(), MemoryUse::WasmInstanceExports);
+ fop->delete_(obj, &instance.scopes().asWasmFunctionScopeMap(),
+ MemoryUse::WasmInstanceScopes);
+ fop->delete_(obj, &instance.indirectGlobals(),
+ MemoryUse::WasmInstanceGlobals);
+ if (!instance.isNewborn()) {
+ if (instance.instance().debugEnabled()) {
+ instance.instance().debug().finalize(fop);
+ }
+ fop->delete_(obj, &instance.instance(), MemoryUse::WasmInstanceInstance);
+ }
+}
+
+/* static */
+void WasmInstanceObject::trace(JSTracer* trc, JSObject* obj) {
+ WasmInstanceObject& instanceObj = obj->as<WasmInstanceObject>();
+ instanceObj.exports().trace(trc);
+ instanceObj.indirectGlobals().trace(trc);
+ if (!instanceObj.isNewborn()) {
+ instanceObj.instance().tracePrivate(trc);
+ }
+}
+
+/* static */
+WasmInstanceObject* WasmInstanceObject::create(
+ JSContext* cx, SharedCode code, const DataSegmentVector& dataSegments,
+ const ElemSegmentVector& elemSegments, UniqueTlsData tlsData,
+ HandleWasmMemoryObject memory, SharedExceptionTagVector&& exceptionTags,
+ SharedTableVector&& tables, const JSFunctionVector& funcImports,
+ const GlobalDescVector& globals, const ValVector& globalImportValues,
+ const WasmGlobalObjectVector& globalObjs, HandleObject proto,
+ UniqueDebugState maybeDebug) {
+ UniquePtr<ExportMap> exports = js::MakeUnique<ExportMap>(cx->zone());
+ if (!exports) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ UniquePtr<WasmFunctionScopeMap> scopes =
+ js::MakeUnique<WasmFunctionScopeMap>(cx->zone(), cx->zone());
+ if (!scopes) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ uint32_t indirectGlobals = 0;
+
+ for (uint32_t i = 0; i < globalObjs.length(); i++) {
+ if (globalObjs[i] && globals[i].isIndirect()) {
+ indirectGlobals++;
+ }
+ }
+
+ Rooted<UniquePtr<GlobalObjectVector>> indirectGlobalObjs(
+ cx, js::MakeUnique<GlobalObjectVector>(cx->zone()));
+ if (!indirectGlobalObjs || !indirectGlobalObjs->resize(indirectGlobals)) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ {
+ uint32_t next = 0;
+ for (uint32_t i = 0; i < globalObjs.length(); i++) {
+ if (globalObjs[i] && globals[i].isIndirect()) {
+ (*indirectGlobalObjs)[next++] = globalObjs[i];
+ }
+ }
+ }
+
+ Instance* instance = nullptr;
+ RootedWasmInstanceObject obj(cx);
+
+ {
+ // We must delay creating metadata for this object until after all its
+ // slots have been initialized. We must also create the metadata before
+ // calling Instance::init as that may allocate new objects.
+ AutoSetNewObjectMetadata metadata(cx);
+ obj = NewObjectWithGivenProto<WasmInstanceObject>(cx, proto);
+ if (!obj) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(obj->isTenured(), "assumed by WasmTableObject write barriers");
+
+ // Finalization assumes these slots are always initialized:
+ InitReservedSlot(obj, EXPORTS_SLOT, exports.release(),
+ MemoryUse::WasmInstanceExports);
+
+ InitReservedSlot(obj, SCOPES_SLOT, scopes.release(),
+ MemoryUse::WasmInstanceScopes);
+
+ InitReservedSlot(obj, GLOBALS_SLOT, indirectGlobalObjs.release(),
+ MemoryUse::WasmInstanceGlobals);
+
+ obj->initReservedSlot(INSTANCE_SCOPE_SLOT, UndefinedValue());
+
+ // The INSTANCE_SLOT may not be initialized if Instance allocation fails,
+ // leading to an observable "newborn" state in tracing/finalization.
+ MOZ_ASSERT(obj->isNewborn());
+
+ // Root the Instance via WasmInstanceObject before any possible GC.
+ instance = cx->new_<Instance>(cx, obj, code, std::move(tlsData), memory,
+ std::move(exceptionTags), std::move(tables),
+ std::move(maybeDebug));
+ if (!instance) {
+ return nullptr;
+ }
+
+ InitReservedSlot(obj, INSTANCE_SLOT, instance,
+ MemoryUse::WasmInstanceInstance);
+ MOZ_ASSERT(!obj->isNewborn());
+ }
+
+ if (!instance->init(cx, funcImports, globalImportValues, globalObjs,
+ dataSegments, elemSegments)) {
+ return nullptr;
+ }
+
+ return obj;
+}
+
+void WasmInstanceObject::initExportsObj(JSObject& exportsObj) {
+ MOZ_ASSERT(getReservedSlot(EXPORTS_OBJ_SLOT).isUndefined());
+ setReservedSlot(EXPORTS_OBJ_SLOT, ObjectValue(exportsObj));
+}
+
+static bool GetImportArg(JSContext* cx, CallArgs callArgs,
+ MutableHandleObject importObj) {
+ if (!callArgs.get(1).isUndefined()) {
+ if (!callArgs[1].isObject()) {
+ return ThrowBadImportArg(cx);
+ }
+ importObj.set(&callArgs[1].toObject());
+ }
+ return true;
+}
+
+/* static */
+bool WasmInstanceObject::construct(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ Log(cx, "sync new Instance() started");
+
+ if (!ThrowIfNotConstructing(cx, args, "Instance")) {
+ return false;
+ }
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Instance", 1)) {
+ return false;
+ }
+
+ const Module* module;
+ if (!args[0].isObject() || !IsModuleObject(&args[0].toObject(), &module)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_MOD_ARG);
+ return false;
+ }
+
+ RootedObject importObj(cx);
+ if (!GetImportArg(cx, args, &importObj)) {
+ return false;
+ }
+
+ RootedObject instanceProto(cx);
+ if (!GetPrototypeFromBuiltinConstructor(cx, args, JSProto_WasmInstance,
+ &instanceProto)) {
+ return false;
+ }
+ if (!instanceProto) {
+ instanceProto =
+ GlobalObject::getOrCreatePrototype(cx, JSProto_WasmInstance);
+ }
+
+ Rooted<ImportValues> imports(cx);
+ if (!GetImports(cx, *module, importObj, imports.address())) {
+ return false;
+ }
+
+ RootedWasmInstanceObject instanceObj(cx);
+ if (!module->instantiate(cx, imports.get(), instanceProto, &instanceObj)) {
+ return false;
+ }
+
+ Log(cx, "sync new Instance() succeeded");
+
+ args.rval().setObject(*instanceObj);
+ return true;
+}
+
+Instance& WasmInstanceObject::instance() const {
+ MOZ_ASSERT(!isNewborn());
+ return *(Instance*)getReservedSlot(INSTANCE_SLOT).toPrivate();
+}
+
+JSObject& WasmInstanceObject::exportsObj() const {
+ return getReservedSlot(EXPORTS_OBJ_SLOT).toObject();
+}
+
+WasmInstanceObject::ExportMap& WasmInstanceObject::exports() const {
+ return *(ExportMap*)getReservedSlot(EXPORTS_SLOT).toPrivate();
+}
+
+WasmInstanceObject::UnspecifiedScopeMap& WasmInstanceObject::scopes() const {
+ return *(UnspecifiedScopeMap*)(getReservedSlot(SCOPES_SLOT).toPrivate());
+}
+
+WasmInstanceObject::GlobalObjectVector& WasmInstanceObject::indirectGlobals()
+ const {
+ return *(GlobalObjectVector*)getReservedSlot(GLOBALS_SLOT).toPrivate();
+}
+
+static bool WasmCall(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ RootedFunction callee(cx, &args.callee().as<JSFunction>());
+
+ Instance& instance = ExportedFunctionToInstance(callee);
+ uint32_t funcIndex = ExportedFunctionToFuncIndex(callee);
+ return instance.callExport(cx, funcIndex, args);
+}
+
+/* static */
+bool WasmInstanceObject::getExportedFunction(
+ JSContext* cx, HandleWasmInstanceObject instanceObj, uint32_t funcIndex,
+ MutableHandleFunction fun) {
+ if (ExportMap::Ptr p = instanceObj->exports().lookup(funcIndex)) {
+ fun.set(p->value());
+ return true;
+ }
+
+ const Instance& instance = instanceObj->instance();
+ const FuncExport& funcExport =
+ instance.metadata(instance.code().bestTier()).lookupFuncExport(funcIndex);
+ unsigned numArgs = funcExport.funcType().args().length();
+
+ if (instance.isAsmJS()) {
+ // asm.js needs to act like a normal JS function which means having the
+ // name from the original source and being callable as a constructor.
+ RootedAtom name(cx, instance.getFuncDisplayAtom(cx, funcIndex));
+ if (!name) {
+ return false;
+ }
+ fun.set(NewNativeConstructor(cx, WasmCall, numArgs, name,
+ gc::AllocKind::FUNCTION_EXTENDED,
+ TenuredObject, FunctionFlags::ASMJS_CTOR));
+ if (!fun) {
+ return false;
+ }
+
+ // asm.js does not support jit entries.
+ fun->setWasmFuncIndex(funcIndex);
+ } else {
+ RootedAtom name(cx, NumberToAtom(cx, funcIndex));
+ if (!name) {
+ return false;
+ }
+
+ fun.set(NewNativeFunction(cx, WasmCall, numArgs, name,
+ gc::AllocKind::FUNCTION_EXTENDED, TenuredObject,
+ FunctionFlags::WASM));
+ if (!fun) {
+ return false;
+ }
+
+ // Some applications eagerly access all table elements which currently
+ // triggers worst-case behavior for lazy stubs, since each will allocate a
+ // separate 4kb code page. Most eagerly-accessed functions are not called,
+ // so use a shared, provisional (and slow) stub as JitEntry and wait until
+ // Instance::callExport() to create the fast entry stubs.
+ if (funcExport.canHaveJitEntry()) {
+ if (!funcExport.hasEagerStubs()) {
+ if (!EnsureBuiltinThunksInitialized()) {
+ return false;
+ }
+ void* provisionalJitEntryStub = ProvisionalJitEntryStub();
+ MOZ_ASSERT(provisionalJitEntryStub);
+ instance.code().setJitEntryIfNull(funcIndex, provisionalJitEntryStub);
+ }
+ fun->setWasmJitEntry(instance.code().getAddressOfJitEntry(funcIndex));
+ } else {
+ fun->setWasmFuncIndex(funcIndex);
+ }
+ }
+
+ fun->setExtendedSlot(FunctionExtended::WASM_INSTANCE_SLOT,
+ ObjectValue(*instanceObj));
+
+ void* tlsData = instanceObj->instance().tlsData();
+ fun->setExtendedSlot(FunctionExtended::WASM_TLSDATA_SLOT,
+ PrivateValue(tlsData));
+
+ if (!instanceObj->exports().putNew(funcIndex, fun)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+const CodeRange& WasmInstanceObject::getExportedFunctionCodeRange(
+ JSFunction* fun, Tier tier) {
+ uint32_t funcIndex = ExportedFunctionToFuncIndex(fun);
+ MOZ_ASSERT(exports().lookup(funcIndex)->value() == fun);
+ const MetadataTier& metadata = instance().metadata(tier);
+ return metadata.codeRange(metadata.lookupFuncExport(funcIndex));
+}
+
+/* static */
+WasmInstanceScope* WasmInstanceObject::getScope(
+ JSContext* cx, HandleWasmInstanceObject instanceObj) {
+ if (!instanceObj->getReservedSlot(INSTANCE_SCOPE_SLOT).isUndefined()) {
+ return (WasmInstanceScope*)instanceObj->getReservedSlot(INSTANCE_SCOPE_SLOT)
+ .toGCThing();
+ }
+
+ Rooted<WasmInstanceScope*> instanceScope(
+ cx, WasmInstanceScope::create(cx, instanceObj));
+ if (!instanceScope) {
+ return nullptr;
+ }
+
+ instanceObj->setReservedSlot(INSTANCE_SCOPE_SLOT,
+ PrivateGCThingValue(instanceScope));
+
+ return instanceScope;
+}
+
+/* static */
+WasmFunctionScope* WasmInstanceObject::getFunctionScope(
+ JSContext* cx, HandleWasmInstanceObject instanceObj, uint32_t funcIndex) {
+ if (auto p =
+ instanceObj->scopes().asWasmFunctionScopeMap().lookup(funcIndex)) {
+ return p->value();
+ }
+
+ Rooted<WasmInstanceScope*> instanceScope(
+ cx, WasmInstanceObject::getScope(cx, instanceObj));
+ if (!instanceScope) {
+ return nullptr;
+ }
+
+ Rooted<WasmFunctionScope*> funcScope(
+ cx, WasmFunctionScope::create(cx, instanceScope, funcIndex));
+ if (!funcScope) {
+ return nullptr;
+ }
+
+ if (!instanceObj->scopes().asWasmFunctionScopeMap().putNew(funcIndex,
+ funcScope)) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ return funcScope;
+}
+
+bool wasm::IsWasmExportedFunction(JSFunction* fun) {
+ return fun->kind() == FunctionFlags::Wasm;
+}
+
+Instance& wasm::ExportedFunctionToInstance(JSFunction* fun) {
+ return ExportedFunctionToInstanceObject(fun)->instance();
+}
+
+WasmInstanceObject* wasm::ExportedFunctionToInstanceObject(JSFunction* fun) {
+ MOZ_ASSERT(fun->kind() == FunctionFlags::Wasm ||
+ fun->kind() == FunctionFlags::AsmJS);
+ const Value& v = fun->getExtendedSlot(FunctionExtended::WASM_INSTANCE_SLOT);
+ return &v.toObject().as<WasmInstanceObject>();
+}
+
+uint32_t wasm::ExportedFunctionToFuncIndex(JSFunction* fun) {
+ Instance& instance = ExportedFunctionToInstanceObject(fun)->instance();
+ return instance.code().getFuncIndex(fun);
+}
+
+// ============================================================================
+// WebAssembly.Memory class and methods
+
+const JSClassOps WasmMemoryObject::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ WasmMemoryObject::finalize, // finalize
+ nullptr, // call
+ nullptr, // hasInstance
+ nullptr, // construct
+ nullptr, // trace
+};
+
+const JSClass WasmMemoryObject::class_ = {
+ "WebAssembly.Memory",
+ JSCLASS_DELAY_METADATA_BUILDER |
+ JSCLASS_HAS_RESERVED_SLOTS(WasmMemoryObject::RESERVED_SLOTS) |
+ JSCLASS_FOREGROUND_FINALIZE,
+ &WasmMemoryObject::classOps_, &WasmMemoryObject::classSpec_};
+
+const JSClass& WasmMemoryObject::protoClass_ = PlainObject::class_;
+
+static constexpr char WasmMemoryName[] = "Memory";
+
+const ClassSpec WasmMemoryObject::classSpec_ = {
+ CreateWasmConstructor<WasmMemoryObject, WasmMemoryName>,
+ GenericCreatePrototype<WasmMemoryObject>,
+ WasmMemoryObject::static_methods,
+ nullptr,
+ WasmMemoryObject::methods,
+ WasmMemoryObject::properties,
+ nullptr,
+ ClassSpec::DontDefineConstructor};
+
+/* static */
+void WasmMemoryObject::finalize(JSFreeOp* fop, JSObject* obj) {
+ WasmMemoryObject& memory = obj->as<WasmMemoryObject>();
+ if (memory.hasObservers()) {
+ fop->delete_(obj, &memory.observers(), MemoryUse::WasmMemoryObservers);
+ }
+}
+
+/* static */
+WasmMemoryObject* WasmMemoryObject::create(
+ JSContext* cx, HandleArrayBufferObjectMaybeShared buffer,
+ HandleObject proto) {
+ AutoSetNewObjectMetadata metadata(cx);
+ auto* obj = NewObjectWithGivenProto<WasmMemoryObject>(cx, proto);
+ if (!obj) {
+ return nullptr;
+ }
+
+ obj->initReservedSlot(BUFFER_SLOT, ObjectValue(*buffer));
+ MOZ_ASSERT(!obj->hasObservers());
+ return obj;
+}
+
+/* static */
+bool WasmMemoryObject::construct(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ if (!ThrowIfNotConstructing(cx, args, "Memory")) {
+ return false;
+ }
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Memory", 1)) {
+ return false;
+ }
+
+ if (!args.get(0).isObject()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_DESC_ARG, "memory");
+ return false;
+ }
+
+ RootedObject obj(cx, &args[0].toObject());
+ Limits limits;
+ if (!GetLimits(cx, obj, MaxMemory32LimitField, "Memory", &limits,
+ Shareable::True)) {
+ return false;
+ }
+
+ if (limits.initial > MaxMemory32Pages) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_MEM_IMP_LIMIT);
+ return false;
+ }
+
+ ConvertMemoryPagesToBytes(&limits);
+
+ RootedArrayBufferObjectMaybeShared buffer(cx);
+ if (!CreateWasmBuffer(cx, MemoryKind::Memory32, limits, &buffer)) {
+ return false;
+ }
+
+ RootedObject proto(cx);
+ if (!GetPrototypeFromBuiltinConstructor(cx, args, JSProto_WasmMemory,
+ &proto)) {
+ return false;
+ }
+ if (!proto) {
+ proto = GlobalObject::getOrCreatePrototype(cx, JSProto_WasmMemory);
+ }
+
+ RootedWasmMemoryObject memoryObj(cx,
+ WasmMemoryObject::create(cx, buffer, proto));
+ if (!memoryObj) {
+ return false;
+ }
+
+ args.rval().setObject(*memoryObj);
+ return true;
+}
+
+static bool IsMemory(HandleValue v) {
+ return v.isObject() && v.toObject().is<WasmMemoryObject>();
+}
+
+/* static */
+bool WasmMemoryObject::bufferGetterImpl(JSContext* cx, const CallArgs& args) {
+ RootedWasmMemoryObject memoryObj(
+ cx, &args.thisv().toObject().as<WasmMemoryObject>());
+ RootedArrayBufferObjectMaybeShared buffer(cx, &memoryObj->buffer());
+
+ if (memoryObj->isShared()) {
+ uint32_t memoryLength = memoryObj->volatileMemoryLength32();
+ MOZ_ASSERT(memoryLength >= ByteLength32(buffer));
+
+ if (memoryLength > ByteLength32(buffer)) {
+ RootedSharedArrayBufferObject newBuffer(
+ cx,
+ SharedArrayBufferObject::New(cx, memoryObj->sharedArrayRawBuffer(),
+ BufferSize(memoryLength)));
+ if (!newBuffer) {
+ return false;
+ }
+ // OK to addReference after we try to allocate because the memoryObj
+ // keeps the rawBuffer alive.
+ if (!memoryObj->sharedArrayRawBuffer()->addReference()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_SC_SAB_REFCNT_OFLO);
+ return false;
+ }
+ buffer = newBuffer;
+ memoryObj->setReservedSlot(BUFFER_SLOT, ObjectValue(*newBuffer));
+ }
+ }
+
+ args.rval().setObject(*buffer);
+ return true;
+}
+
+/* static */
+bool WasmMemoryObject::bufferGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsMemory, bufferGetterImpl>(cx, args);
+}
+
+const JSPropertySpec WasmMemoryObject::properties[] = {
+ JS_PSG("buffer", WasmMemoryObject::bufferGetter, JSPROP_ENUMERATE),
+ JS_STRING_SYM_PS(toStringTag, "WebAssembly.Memory", JSPROP_READONLY),
+ JS_PS_END};
+
+/* static */
+bool WasmMemoryObject::growImpl(JSContext* cx, const CallArgs& args) {
+ RootedWasmMemoryObject memory(
+ cx, &args.thisv().toObject().as<WasmMemoryObject>());
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Memory.grow", 1)) {
+ return false;
+ }
+
+ uint32_t delta;
+ if (!EnforceRangeU32(cx, args.get(0), "Memory", "grow delta", &delta)) {
+ return false;
+ }
+
+ uint32_t ret = grow(memory, delta, cx);
+
+ if (ret == uint32_t(-1)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_GROW,
+ "memory");
+ return false;
+ }
+
+ args.rval().setInt32(ret);
+ return true;
+}
+
+/* static */
+bool WasmMemoryObject::grow(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsMemory, growImpl>(cx, args);
+}
+
+const JSFunctionSpec WasmMemoryObject::methods[] = {
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+ JS_FN("type", WasmMemoryObject::type, 0, JSPROP_ENUMERATE),
+#endif
+ JS_FN("grow", WasmMemoryObject::grow, 1, JSPROP_ENUMERATE), JS_FS_END};
+
+const JSFunctionSpec WasmMemoryObject::static_methods[] = {JS_FS_END};
+
+ArrayBufferObjectMaybeShared& WasmMemoryObject::buffer() const {
+ return getReservedSlot(BUFFER_SLOT)
+ .toObject()
+ .as<ArrayBufferObjectMaybeShared>();
+}
+
+SharedArrayRawBuffer* WasmMemoryObject::sharedArrayRawBuffer() const {
+ MOZ_ASSERT(isShared());
+ return buffer().as<SharedArrayBufferObject>().rawBufferObject();
+}
+
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+bool WasmMemoryObject::typeImpl(JSContext* cx, const CallArgs& args) {
+ RootedWasmMemoryObject memoryObj(
+ cx, &args.thisv().toObject().as<WasmMemoryObject>());
+ Rooted<IdValueVector> props(cx, IdValueVector(cx));
+
+ Maybe<uint32_t> bufferMaxSize = memoryObj->buffer().wasmMaxSize();
+ if (bufferMaxSize.isSome()) {
+ uint32_t maximumPages = bufferMaxSize.value() / wasm::PageSize;
+ if (!props.append(IdValuePair(NameToId(cx->names().maximum),
+ Int32Value(maximumPages)))) {
+ return false;
+ }
+ }
+
+ uint32_t minimumPages = mozilla::AssertedCast<uint32_t>(
+ memoryObj->volatileMemoryLength32() / wasm::PageSize);
+ if (!props.append(IdValuePair(NameToId(cx->names().minimum),
+ Int32Value(minimumPages)))) {
+ return false;
+ }
+
+ if (!props.append(IdValuePair(NameToId(cx->names().shared),
+ BooleanValue(memoryObj->isShared())))) {
+ return false;
+ }
+
+ JSObject* memoryType = NewPlainObjectWithProperties(
+ cx, props.begin(), props.length(), GenericObject);
+ if (!memoryType) {
+ return false;
+ }
+ args.rval().setObject(*memoryType);
+ return true;
+}
+
+bool WasmMemoryObject::type(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsMemory, typeImpl>(cx, args);
+}
+#endif
+
+uint32_t WasmMemoryObject::volatileMemoryLength32() const {
+ if (isShared()) {
+ return VolatileByteLength32(sharedArrayRawBuffer());
+ }
+ return ByteLength32(buffer());
+}
+
+bool WasmMemoryObject::isShared() const {
+ return buffer().is<SharedArrayBufferObject>();
+}
+
+bool WasmMemoryObject::hasObservers() const {
+ return !getReservedSlot(OBSERVERS_SLOT).isUndefined();
+}
+
+WasmMemoryObject::InstanceSet& WasmMemoryObject::observers() const {
+ MOZ_ASSERT(hasObservers());
+ return *reinterpret_cast<InstanceSet*>(
+ getReservedSlot(OBSERVERS_SLOT).toPrivate());
+}
+
+WasmMemoryObject::InstanceSet* WasmMemoryObject::getOrCreateObservers(
+ JSContext* cx) {
+ if (!hasObservers()) {
+ auto observers = MakeUnique<InstanceSet>(cx->zone(), cx->zone());
+ if (!observers) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ InitReservedSlot(this, OBSERVERS_SLOT, observers.release(),
+ MemoryUse::WasmMemoryObservers);
+ }
+
+ return &observers();
+}
+
+bool WasmMemoryObject::isHuge() const {
+#ifdef WASM_SUPPORTS_HUGE_MEMORY
+ static_assert(MaxMemory32Bytes < HugeMappedSize,
+ "Non-huge buffer may be confused as huge");
+ return buffer().wasmMappedSize() >= HugeMappedSize;
+#else
+ return false;
+#endif
+}
+
+bool WasmMemoryObject::movingGrowable() const {
+ return !isHuge() && !buffer().wasmMaxSize();
+}
+
+uint32_t WasmMemoryObject::boundsCheckLimit32() const {
+ if (!buffer().isWasm() || isHuge()) {
+ return ByteLength32(buffer());
+ }
+ size_t mappedSize = buffer().wasmMappedSize();
+ MOZ_ASSERT(mappedSize <= UINT32_MAX);
+ MOZ_ASSERT(mappedSize >= wasm::GuardSize);
+ MOZ_ASSERT(wasm::IsValidBoundsCheckImmediate(mappedSize - wasm::GuardSize));
+ return mappedSize - wasm::GuardSize;
+}
+
+bool WasmMemoryObject::addMovingGrowObserver(JSContext* cx,
+ WasmInstanceObject* instance) {
+ MOZ_ASSERT(movingGrowable());
+
+ InstanceSet* observers = getOrCreateObservers(cx);
+ if (!observers) {
+ return false;
+ }
+
+ if (!observers->putNew(instance)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+/* static */
+uint32_t WasmMemoryObject::growShared(HandleWasmMemoryObject memory,
+ uint32_t delta) {
+ SharedArrayRawBuffer* rawBuf = memory->sharedArrayRawBuffer();
+ SharedArrayRawBuffer::Lock lock(rawBuf);
+
+ MOZ_ASSERT(VolatileByteLength32(rawBuf) % PageSize == 0);
+ uint32_t oldNumPages = VolatileByteLength32(rawBuf) / PageSize;
+
+ CheckedInt<uint32_t> newSize = oldNumPages;
+ newSize += delta;
+ newSize *= PageSize;
+ if (!newSize.isValid()) {
+ return -1;
+ }
+
+ if (newSize.value() > rawBuf->maxSize()) {
+ return -1;
+ }
+
+ if (!rawBuf->wasmGrowToSizeInPlace(lock, BufferSize(newSize.value()))) {
+ return -1;
+ }
+
+ // New buffer objects will be created lazily in all agents (including in
+ // this agent) by bufferGetterImpl, above, so no more work to do here.
+
+ return oldNumPages;
+}
+
+/* static */
+uint32_t WasmMemoryObject::grow(HandleWasmMemoryObject memory, uint32_t delta,
+ JSContext* cx) {
+ if (memory->isShared()) {
+ return growShared(memory, delta);
+ }
+
+ RootedArrayBufferObject oldBuf(cx, &memory->buffer().as<ArrayBufferObject>());
+
+ MOZ_ASSERT(ByteLength32(oldBuf) % PageSize == 0);
+ uint32_t oldNumPages = ByteLength32(oldBuf) / PageSize;
+
+ // FIXME (large ArrayBuffer): This does not allow 65536 pages, which is
+ // technically the max. That may be a webcompat problem. We can fix this
+ // once wasmMovingGrowToSize and wasmGrowToSizeInPlace accept size_t rather
+ // than uint32_t. See the FIXME in WasmConstants.h for additional
+ // information.
+ static_assert(MaxMemory32Pages <= UINT32_MAX / PageSize, "Avoid overflows");
+
+ CheckedInt<uint32_t> newSize = oldNumPages;
+ newSize += delta;
+ newSize *= PageSize;
+ if (!newSize.isValid()) {
+ return -1;
+ }
+
+ // Always check against the max here, do not rely on the buffer resizers to
+ // use the correct limit, they don't have enough context.
+ if (newSize.value() > MaxMemory32Pages * PageSize) {
+ return -1;
+ }
+
+ RootedArrayBufferObject newBuf(cx);
+
+ if (memory->movingGrowable()) {
+ MOZ_ASSERT(!memory->isHuge());
+ if (!ArrayBufferObject::wasmMovingGrowToSize(BufferSize(newSize.value()),
+ oldBuf, &newBuf, cx)) {
+ return -1;
+ }
+ } else {
+ if (Maybe<uint64_t> maxSize = oldBuf->wasmMaxSize()) {
+ if (newSize.value() > maxSize.value()) {
+ return -1;
+ }
+ }
+
+ if (!ArrayBufferObject::wasmGrowToSizeInPlace(BufferSize(newSize.value()),
+ oldBuf, &newBuf, cx)) {
+ return -1;
+ }
+ }
+
+ memory->setReservedSlot(BUFFER_SLOT, ObjectValue(*newBuf));
+
+ // Only notify moving-grow-observers after the BUFFER_SLOT has been updated
+ // since observers will call buffer().
+ if (memory->hasObservers()) {
+ for (InstanceSet::Range r = memory->observers().all(); !r.empty();
+ r.popFront()) {
+ r.front()->instance().onMovingGrowMemory();
+ }
+ }
+
+ return oldNumPages;
+}
+
+bool js::wasm::IsSharedWasmMemoryObject(JSObject* obj) {
+ WasmMemoryObject* mobj = obj->maybeUnwrapIf<WasmMemoryObject>();
+ return mobj && mobj->isShared();
+}
+
+// ============================================================================
+// WebAssembly.Table class and methods
+
+const JSClassOps WasmTableObject::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ WasmTableObject::finalize, // finalize
+ nullptr, // call
+ nullptr, // hasInstance
+ nullptr, // construct
+ WasmTableObject::trace, // trace
+};
+
+const JSClass WasmTableObject::class_ = {
+ "WebAssembly.Table",
+ JSCLASS_DELAY_METADATA_BUILDER |
+ JSCLASS_HAS_RESERVED_SLOTS(WasmTableObject::RESERVED_SLOTS) |
+ JSCLASS_FOREGROUND_FINALIZE,
+ &WasmTableObject::classOps_, &WasmTableObject::classSpec_};
+
+const JSClass& WasmTableObject::protoClass_ = PlainObject::class_;
+
+static constexpr char WasmTableName[] = "Table";
+
+const ClassSpec WasmTableObject::classSpec_ = {
+ CreateWasmConstructor<WasmTableObject, WasmTableName>,
+ GenericCreatePrototype<WasmTableObject>,
+ WasmTableObject::static_methods,
+ nullptr,
+ WasmTableObject::methods,
+ WasmTableObject::properties,
+ nullptr,
+ ClassSpec::DontDefineConstructor};
+
+bool WasmTableObject::isNewborn() const {
+ MOZ_ASSERT(is<WasmTableObject>());
+ return getReservedSlot(TABLE_SLOT).isUndefined();
+}
+
+/* static */
+void WasmTableObject::finalize(JSFreeOp* fop, JSObject* obj) {
+ WasmTableObject& tableObj = obj->as<WasmTableObject>();
+ if (!tableObj.isNewborn()) {
+ auto& table = tableObj.table();
+ fop->release(obj, &table, table.gcMallocBytes(), MemoryUse::WasmTableTable);
+ }
+}
+
+/* static */
+void WasmTableObject::trace(JSTracer* trc, JSObject* obj) {
+ WasmTableObject& tableObj = obj->as<WasmTableObject>();
+ if (!tableObj.isNewborn()) {
+ tableObj.table().tracePrivate(trc);
+ }
+}
+
+// Return the JS value to use when a parameter to a function requiring a table
+// value is omitted. An implementation of [1].
+//
+// [1]
+// https://webassembly.github.io/reference-types/js-api/index.html#defaultvalue
+static Value TableDefaultValue(wasm::RefType tableType) {
+ return tableType.isExtern() ? UndefinedValue() : NullValue();
+}
+
+/* static */
+WasmTableObject* WasmTableObject::create(JSContext* cx, uint32_t initialLength,
+ Maybe<uint32_t> maximumLength,
+ wasm::RefType tableType,
+ HandleObject proto) {
+ AutoSetNewObjectMetadata metadata(cx);
+ RootedWasmTableObject obj(
+ cx, NewObjectWithGivenProto<WasmTableObject>(cx, proto));
+ if (!obj) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(obj->isNewborn());
+
+ TableDesc td(tableType, initialLength, maximumLength, /*isAsmJS*/ false,
+ /*importedOrExported=*/true);
+
+ SharedTable table = Table::create(cx, td, obj);
+ if (!table) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ size_t size = table->gcMallocBytes();
+ InitReservedSlot(obj, TABLE_SLOT, table.forget().take(), size,
+ MemoryUse::WasmTableTable);
+
+ MOZ_ASSERT(!obj->isNewborn());
+ return obj;
+}
+
+/* static */
+bool WasmTableObject::construct(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ if (!ThrowIfNotConstructing(cx, args, "Table")) {
+ return false;
+ }
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Table", 1)) {
+ return false;
+ }
+
+ if (!args.get(0).isObject()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_DESC_ARG, "table");
+ return false;
+ }
+
+ RootedObject obj(cx, &args[0].toObject());
+
+ JSAtom* elementAtom = Atomize(cx, "element", strlen("element"));
+ if (!elementAtom) {
+ return false;
+ }
+ RootedId elementId(cx, AtomToId(elementAtom));
+
+ RootedValue elementVal(cx);
+ if (!GetProperty(cx, obj, obj, elementId, &elementVal)) {
+ return false;
+ }
+
+ RootedString elementStr(cx, ToString(cx, elementVal));
+ if (!elementStr) {
+ return false;
+ }
+
+ RootedLinearString elementLinearStr(cx, elementStr->ensureLinear(cx));
+ if (!elementLinearStr) {
+ return false;
+ }
+
+ RefType tableType;
+ if (StringEqualsLiteral(elementLinearStr, "anyfunc") ||
+ StringEqualsLiteral(elementLinearStr, "funcref")) {
+ tableType = RefType::func();
+#ifdef ENABLE_WASM_REFTYPES
+ } else if (StringEqualsLiteral(elementLinearStr, "externref")) {
+ if (!ReftypesAvailable(cx)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_ELEMENT);
+ return false;
+ }
+ tableType = RefType::extern_();
+#endif
+#ifdef ENABLE_WASM_GC
+ } else if (StringEqualsLiteral(elementLinearStr, "eqref")) {
+ if (!GcTypesAvailable(cx)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_ELEMENT);
+ return false;
+ }
+ tableType = RefType::eq();
+#endif
+ } else {
+#ifdef ENABLE_WASM_REFTYPES
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_ELEMENT_GENERALIZED);
+#else
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_ELEMENT);
+#endif
+ return false;
+ }
+
+ Limits limits;
+ if (!GetLimits(cx, obj, MaxTableLimitField, "Table", &limits,
+ Shareable::False)) {
+ return false;
+ }
+
+ if (limits.initial > MaxTableLength) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_TABLE_IMP_LIMIT);
+ return false;
+ }
+
+ RootedObject proto(cx);
+ if (!GetPrototypeFromBuiltinConstructor(cx, args, JSProto_WasmTable,
+ &proto)) {
+ return false;
+ }
+ if (!proto) {
+ proto = GlobalObject::getOrCreatePrototype(cx, JSProto_WasmTable);
+ }
+
+ // The rest of the runtime expects table limits to be within a 32-bit range.
+ static_assert(MaxTableLimitField <= UINT32_MAX, "invariant");
+ uint32_t initialLength = uint32_t(limits.initial);
+ Maybe<uint32_t> maximumLength;
+ if (limits.maximum) {
+ maximumLength = Some(uint32_t(*limits.maximum));
+ }
+
+ RootedWasmTableObject table(
+ cx, WasmTableObject::create(cx, initialLength, maximumLength, tableType,
+ proto));
+ if (!table) {
+ return false;
+ }
+
+ // Initialize the table to a default value
+ RootedValue initValue(
+ cx, args.length() < 2 ? TableDefaultValue(tableType) : args[1]);
+
+ // Skip initializing the table if the fill value is null, as that is the
+ // default value.
+ if (!initValue.isNull() &&
+ !table->fillRange(cx, 0, initialLength, initValue)) {
+ return false;
+ }
+#ifdef DEBUG
+ // Assert that null is the default value of a new table.
+ if (initValue.isNull()) {
+ table->assertRangeNull(0, initialLength);
+ }
+#endif
+
+ args.rval().setObject(*table);
+ return true;
+}
+
+static bool IsTable(HandleValue v) {
+ return v.isObject() && v.toObject().is<WasmTableObject>();
+}
+
+/* static */
+bool WasmTableObject::lengthGetterImpl(JSContext* cx, const CallArgs& args) {
+ args.rval().setNumber(
+ args.thisv().toObject().as<WasmTableObject>().table().length());
+ return true;
+}
+
+/* static */
+bool WasmTableObject::lengthGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsTable, lengthGetterImpl>(cx, args);
+}
+
+const JSPropertySpec WasmTableObject::properties[] = {
+ JS_PSG("length", WasmTableObject::lengthGetter, JSPROP_ENUMERATE),
+ JS_STRING_SYM_PS(toStringTag, "WebAssembly.Table", JSPROP_READONLY),
+ JS_PS_END};
+
+static bool ToTableIndex(JSContext* cx, HandleValue v, const Table& table,
+ const char* noun, uint32_t* index) {
+ if (!EnforceRangeU32(cx, v, "Table", noun, index)) {
+ return false;
+ }
+
+ if (*index >= table.length()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_RANGE, "Table", noun);
+ return false;
+ }
+
+ return true;
+}
+
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+/* static */
+bool WasmTableObject::typeImpl(JSContext* cx, const CallArgs& args) {
+ Rooted<IdValueVector> props(cx, IdValueVector(cx));
+ Table& table = args.thisv().toObject().as<WasmTableObject>().table();
+
+ const char* elementValue;
+ switch (table.repr()) {
+ case TableRepr::Func:
+ elementValue = "funcref";
+ break;
+ case TableRepr::Ref:
+ elementValue = "externref";
+ break;
+ default:
+ MOZ_CRASH("Should not happen");
+ }
+ JSString* elementString = UTF8CharsToString(cx, elementValue);
+ if (!elementString) {
+ return false;
+ }
+ if (!props.append(IdValuePair(NameToId(cx->names().element),
+ StringValue(elementString)))) {
+ return false;
+ }
+
+ if (table.maximum().isSome()) {
+ if (!props.append(IdValuePair(NameToId(cx->names().maximum),
+ Int32Value(table.maximum().value())))) {
+ return false;
+ }
+ }
+
+ if (!props.append(IdValuePair(NameToId(cx->names().minimum),
+ Int32Value(table.length())))) {
+ return false;
+ }
+
+ JSObject* tableType = NewPlainObjectWithProperties(
+ cx, props.begin(), props.length(), GenericObject);
+ if (!tableType) {
+ return false;
+ }
+ args.rval().setObject(*tableType);
+ return true;
+}
+
+/* static */
+bool WasmTableObject::type(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsTable, typeImpl>(cx, args);
+}
+#endif
+
+/* static */
+bool WasmTableObject::getImpl(JSContext* cx, const CallArgs& args) {
+ RootedWasmTableObject tableObj(
+ cx, &args.thisv().toObject().as<WasmTableObject>());
+ const Table& table = tableObj->table();
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Table.get", 1)) {
+ return false;
+ }
+
+ uint32_t index;
+ if (!ToTableIndex(cx, args.get(0), table, "get index", &index)) {
+ return false;
+ }
+
+ switch (table.repr()) {
+ case TableRepr::Func: {
+ MOZ_RELEASE_ASSERT(!table.isAsmJS());
+ RootedFunction fun(cx);
+ if (!table.getFuncRef(cx, index, &fun)) {
+ return false;
+ }
+ args.rval().setObjectOrNull(fun);
+ break;
+ }
+ case TableRepr::Ref: {
+ args.rval().set(UnboxAnyRef(table.getAnyRef(index)));
+ break;
+ }
+ }
+ return true;
+}
+
+/* static */
+bool WasmTableObject::get(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsTable, getImpl>(cx, args);
+}
+
+/* static */
+bool WasmTableObject::setImpl(JSContext* cx, const CallArgs& args) {
+ RootedWasmTableObject tableObj(
+ cx, &args.thisv().toObject().as<WasmTableObject>());
+ Table& table = tableObj->table();
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Table.set", 1)) {
+ return false;
+ }
+
+ uint32_t index;
+ if (!ToTableIndex(cx, args.get(0), table, "set index", &index)) {
+ return false;
+ }
+
+ RootedValue fillValue(
+ cx, args.length() < 2 ? TableDefaultValue(table.elemType()) : args[1]);
+ if (!tableObj->fillRange(cx, index, 1, fillValue)) {
+ return false;
+ }
+
+ args.rval().setUndefined();
+ return true;
+}
+
+/* static */
+bool WasmTableObject::set(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsTable, setImpl>(cx, args);
+}
+
+/* static */
+bool WasmTableObject::growImpl(JSContext* cx, const CallArgs& args) {
+ RootedWasmTableObject tableObj(
+ cx, &args.thisv().toObject().as<WasmTableObject>());
+ Table& table = tableObj->table();
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Table.grow", 1)) {
+ return false;
+ }
+
+ uint32_t delta;
+ if (!EnforceRangeU32(cx, args.get(0), "Table", "grow delta", &delta)) {
+ return false;
+ }
+
+ uint32_t oldLength = table.grow(delta);
+
+ if (oldLength == uint32_t(-1)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_GROW,
+ "table");
+ return false;
+ }
+
+ // Fill the grown range of the table
+ RootedValue fillValue(
+ cx, args.length() < 2 ? TableDefaultValue(table.elemType()) : args[1]);
+
+ // Skip filling the grown range of the table if the fill value is null, as
+ // that is the default value.
+ if (!fillValue.isNull() &&
+ !tableObj->fillRange(cx, oldLength, delta, fillValue)) {
+ return false;
+ }
+#ifdef DEBUG
+ // Assert that null is the default value of the grown range.
+ if (fillValue.isNull()) {
+ tableObj->assertRangeNull(oldLength, delta);
+ }
+#endif
+
+ args.rval().setInt32(oldLength);
+ return true;
+}
+
+/* static */
+bool WasmTableObject::grow(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsTable, growImpl>(cx, args);
+}
+
+const JSFunctionSpec WasmTableObject::methods[] = {
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+ JS_FN("type", WasmTableObject::type, 0, JSPROP_ENUMERATE),
+#endif
+ JS_FN("get", WasmTableObject::get, 1, JSPROP_ENUMERATE),
+ JS_FN("set", WasmTableObject::set, 2, JSPROP_ENUMERATE),
+ JS_FN("grow", WasmTableObject::grow, 1, JSPROP_ENUMERATE), JS_FS_END};
+
+const JSFunctionSpec WasmTableObject::static_methods[] = {JS_FS_END};
+
+Table& WasmTableObject::table() const {
+ return *(Table*)getReservedSlot(TABLE_SLOT).toPrivate();
+}
+
+bool WasmTableObject::fillRange(JSContext* cx, uint32_t index, uint32_t length,
+ HandleValue value) const {
+ Table& tab = table();
+
+ // All consumers are required to either bounds check or statically be in
+ // bounds
+ MOZ_ASSERT(uint64_t(index) + uint64_t(length) <= tab.length());
+
+ RootedFunction fun(cx);
+ RootedAnyRef any(cx, AnyRef::null());
+ if (!CheckRefType(cx, tab.elemType(), value, &fun, &any)) {
+ return false;
+ }
+ switch (tab.repr()) {
+ case TableRepr::Func:
+ MOZ_RELEASE_ASSERT(!tab.isAsmJS());
+ tab.fillFuncRef(index, length, FuncRef::fromJSFunction(fun), cx);
+ break;
+ case TableRepr::Ref:
+ tab.fillAnyRef(index, length, any);
+ break;
+ }
+ return true;
+}
+
+#ifdef DEBUG
+void WasmTableObject::assertRangeNull(uint32_t index, uint32_t length) const {
+ Table& tab = table();
+ switch (tab.repr()) {
+ case TableRepr::Func:
+ for (uint32_t i = index; i < index + length; i++) {
+ MOZ_ASSERT(tab.getFuncRef(i).code == nullptr);
+ }
+ break;
+ case TableRepr::Ref:
+ for (uint32_t i = index; i < index + length; i++) {
+ MOZ_ASSERT(tab.getAnyRef(i).isNull());
+ }
+ break;
+ }
+}
+#endif
+
+// ============================================================================
+// WebAssembly.global class and methods
+
+const JSClassOps WasmGlobalObject::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ WasmGlobalObject::finalize, // finalize
+ nullptr, // call
+ nullptr, // hasInstance
+ nullptr, // construct
+ WasmGlobalObject::trace, // trace
+};
+
+const JSClass WasmGlobalObject::class_ = {
+ "WebAssembly.Global",
+ JSCLASS_HAS_RESERVED_SLOTS(WasmGlobalObject::RESERVED_SLOTS) |
+ JSCLASS_BACKGROUND_FINALIZE,
+ &WasmGlobalObject::classOps_, &WasmGlobalObject::classSpec_};
+
+const JSClass& WasmGlobalObject::protoClass_ = PlainObject::class_;
+
+static constexpr char WasmGlobalName[] = "Global";
+
+const ClassSpec WasmGlobalObject::classSpec_ = {
+ CreateWasmConstructor<WasmGlobalObject, WasmGlobalName>,
+ GenericCreatePrototype<WasmGlobalObject>,
+ WasmGlobalObject::static_methods,
+ nullptr,
+ WasmGlobalObject::methods,
+ WasmGlobalObject::properties,
+ nullptr,
+ ClassSpec::DontDefineConstructor};
+
+/* static */
+void WasmGlobalObject::trace(JSTracer* trc, JSObject* obj) {
+ WasmGlobalObject* global = reinterpret_cast<WasmGlobalObject*>(obj);
+ if (global->isNewborn()) {
+ // This can happen while we're allocating the object, in which case
+ // every single slot of the object is not defined yet. In particular,
+ // there's nothing to trace yet.
+ return;
+ }
+ global->val().get().trace(trc);
+}
+
+/* static */
+void WasmGlobalObject::finalize(JSFreeOp* fop, JSObject* obj) {
+ WasmGlobalObject* global = reinterpret_cast<WasmGlobalObject*>(obj);
+ if (!global->isNewborn()) {
+ fop->delete_(obj, &global->val(), MemoryUse::WasmGlobalCell);
+ }
+}
+
+/* static */
+WasmGlobalObject* WasmGlobalObject::create(JSContext* cx, HandleVal hval,
+ bool isMutable, HandleObject proto) {
+ AutoSetNewObjectMetadata metadata(cx);
+ RootedWasmGlobalObject obj(
+ cx, NewObjectWithGivenProto<WasmGlobalObject>(cx, proto));
+ if (!obj) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(obj->isNewborn());
+ MOZ_ASSERT(obj->isTenured(), "assumed by global.set post barriers");
+
+ GCPtrVal* val = js_new<GCPtrVal>(Val(hval.get().type()));
+ if (!val) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+ obj->initReservedSlot(MUTABLE_SLOT, JS::BooleanValue(isMutable));
+ InitReservedSlot(obj, VAL_SLOT, val, MemoryUse::WasmGlobalCell);
+
+ // It's simpler to initialize the cell after the object has been created,
+ // to avoid needing to root the cell before the object creation.
+ obj->val() = hval.get();
+
+ MOZ_ASSERT(!obj->isNewborn());
+
+ return obj;
+}
+
+/* static */
+bool WasmGlobalObject::construct(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ if (!ThrowIfNotConstructing(cx, args, "Global")) {
+ return false;
+ }
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Global", 1)) {
+ return false;
+ }
+
+ if (!args.get(0).isObject()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_DESC_ARG, "global");
+ return false;
+ }
+
+ RootedObject obj(cx, &args[0].toObject());
+
+ // Extract properties in lexicographic order per spec.
+
+ RootedValue mutableVal(cx);
+ if (!JS_GetProperty(cx, obj, "mutable", &mutableVal)) {
+ return false;
+ }
+
+ RootedValue typeVal(cx);
+ if (!JS_GetProperty(cx, obj, "value", &typeVal)) {
+ return false;
+ }
+
+ RootedString typeStr(cx, ToString(cx, typeVal));
+ if (!typeStr) {
+ return false;
+ }
+
+ RootedLinearString typeLinearStr(cx, typeStr->ensureLinear(cx));
+ if (!typeLinearStr) {
+ return false;
+ }
+
+ ValType globalType;
+ if (StringEqualsLiteral(typeLinearStr, "i32")) {
+ globalType = ValType::I32;
+ } else if (StringEqualsLiteral(typeLinearStr, "i64")) {
+ globalType = ValType::I64;
+ } else if (StringEqualsLiteral(typeLinearStr, "f32")) {
+ globalType = ValType::F32;
+ } else if (StringEqualsLiteral(typeLinearStr, "f64")) {
+ globalType = ValType::F64;
+#ifdef ENABLE_WASM_SIMD
+ } else if (SimdAvailable(cx) && StringEqualsLiteral(typeLinearStr, "v128")) {
+ globalType = ValType::V128;
+#endif
+#ifdef ENABLE_WASM_REFTYPES
+ } else if (ReftypesAvailable(cx) &&
+ StringEqualsLiteral(typeLinearStr, "funcref")) {
+ globalType = RefType::func();
+ } else if (ReftypesAvailable(cx) &&
+ StringEqualsLiteral(typeLinearStr, "externref")) {
+ globalType = RefType::extern_();
+#endif
+#ifdef ENABLE_WASM_GC
+ } else if (GcTypesAvailable(cx) &&
+ StringEqualsLiteral(typeLinearStr, "eqref")) {
+ globalType = RefType::eq();
+#endif
+ } else {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_GLOBAL_TYPE);
+ return false;
+ }
+
+ bool isMutable = ToBoolean(mutableVal);
+
+ // Extract the initial value, or provide a suitable default.
+ RootedVal globalVal(cx, globalType);
+
+ // Override with non-undefined value, if provided.
+ RootedValue valueVal(cx, args.get(1));
+ if (!valueVal.isUndefined() ||
+ (args.length() >= 2 && globalType.isReference())) {
+ if (!Val::fromJSValue(cx, globalType, valueVal, &globalVal)) {
+ return false;
+ }
+ }
+
+ RootedObject proto(cx);
+ if (!GetPrototypeFromBuiltinConstructor(cx, args, JSProto_WasmGlobal,
+ &proto)) {
+ return false;
+ }
+ if (!proto) {
+ proto = GlobalObject::getOrCreatePrototype(cx, JSProto_WasmGlobal);
+ }
+
+ WasmGlobalObject* global =
+ WasmGlobalObject::create(cx, globalVal, isMutable, proto);
+ if (!global) {
+ return false;
+ }
+
+ args.rval().setObject(*global);
+ return true;
+}
+
+static bool IsGlobal(HandleValue v) {
+ return v.isObject() && v.toObject().is<WasmGlobalObject>();
+}
+
+/* static */
+bool WasmGlobalObject::valueGetterImpl(JSContext* cx, const CallArgs& args) {
+ const WasmGlobalObject& globalObj =
+ args.thisv().toObject().as<WasmGlobalObject>();
+ if (!globalObj.type().isExposable()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_VAL_TYPE);
+ return false;
+ }
+ return globalObj.val().get().toJSValue(cx, args.rval());
+}
+
+/* static */
+bool WasmGlobalObject::valueGetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsGlobal, valueGetterImpl>(cx, args);
+}
+
+/* static */
+bool WasmGlobalObject::valueSetterImpl(JSContext* cx, const CallArgs& args) {
+ if (!args.requireAtLeast(cx, "WebAssembly.Global setter", 1)) {
+ return false;
+ }
+
+ RootedWasmGlobalObject global(
+ cx, &args.thisv().toObject().as<WasmGlobalObject>());
+ if (!global->isMutable()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_GLOBAL_IMMUTABLE);
+ return false;
+ }
+
+ RootedVal val(cx);
+ if (!Val::fromJSValue(cx, global->type(), args.get(0), &val)) {
+ return false;
+ }
+ global->val() = val.get();
+
+ args.rval().setUndefined();
+ return true;
+}
+
+/* static */
+bool WasmGlobalObject::valueSetter(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsGlobal, valueSetterImpl>(cx, args);
+}
+
+const JSPropertySpec WasmGlobalObject::properties[] = {
+ JS_PSGS("value", WasmGlobalObject::valueGetter,
+ WasmGlobalObject::valueSetter, JSPROP_ENUMERATE),
+ JS_STRING_SYM_PS(toStringTag, "WebAssembly.Global", JSPROP_READONLY),
+ JS_PS_END};
+
+const JSFunctionSpec WasmGlobalObject::methods[] = {
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+ JS_FN("type", WasmGlobalObject::type, 0, JSPROP_ENUMERATE),
+#endif
+ JS_FN(js_valueOf_str, WasmGlobalObject::valueGetter, 0, JSPROP_ENUMERATE),
+ JS_FS_END};
+
+const JSFunctionSpec WasmGlobalObject::static_methods[] = {JS_FS_END};
+
+bool WasmGlobalObject::isMutable() const {
+ return getReservedSlot(MUTABLE_SLOT).toBoolean();
+}
+
+ValType WasmGlobalObject::type() const { return val().get().type(); }
+
+GCPtrVal& WasmGlobalObject::val() const {
+ return *reinterpret_cast<GCPtrVal*>(getReservedSlot(VAL_SLOT).toPrivate());
+}
+
+#ifdef ENABLE_WASM_TYPE_REFLECTIONS
+/* static */
+bool WasmGlobalObject::typeImpl(JSContext* cx, const CallArgs& args) {
+ RootedWasmGlobalObject global(
+ cx, &args.thisv().toObject().as<WasmGlobalObject>());
+ Rooted<IdValueVector> props(cx, IdValueVector(cx));
+
+ if (!props.append(IdValuePair(NameToId(cx->names().mutable_),
+ BooleanValue(global->isMutable())))) {
+ return false;
+ }
+
+ JSString* valueType = UTF8CharsToString(cx, ToString(global->type()).get());
+ if (!valueType) {
+ return false;
+ }
+ if (!props.append(
+ IdValuePair(NameToId(cx->names().value), StringValue(valueType)))) {
+ return false;
+ }
+
+ JSObject* globalType = NewPlainObjectWithProperties(
+ cx, props.begin(), props.length(), GenericObject);
+ if (!globalType) {
+ return false;
+ }
+ args.rval().setObject(*globalType);
+ return true;
+}
+
+/* static */
+bool WasmGlobalObject::type(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsGlobal, typeImpl>(cx, args);
+}
+#endif
+
+// ============================================================================
+// WebAssembly.Exception class and methods
+
+const JSClassOps WasmExceptionObject::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ WasmExceptionObject::finalize, // finalize
+ nullptr, // call
+ nullptr, // hasInstance
+ nullptr, // construct
+ nullptr, // trace
+};
+
+const JSClass WasmExceptionObject::class_ = {
+ "WebAssembly.Exception",
+ JSCLASS_HAS_RESERVED_SLOTS(WasmExceptionObject::RESERVED_SLOTS) |
+ JSCLASS_FOREGROUND_FINALIZE,
+ &WasmExceptionObject::classOps_, &WasmExceptionObject::classSpec_};
+
+const JSClass& WasmExceptionObject::protoClass_ = PlainObject::class_;
+
+static constexpr char WasmExceptionName[] = "Exception";
+
+const ClassSpec WasmExceptionObject::classSpec_ = {
+ CreateWasmConstructor<WasmExceptionObject, WasmExceptionName>,
+ GenericCreatePrototype<WasmExceptionObject>,
+ WasmExceptionObject::static_methods,
+ nullptr,
+ WasmExceptionObject::methods,
+ WasmExceptionObject::properties,
+ nullptr,
+ ClassSpec::DontDefineConstructor};
+
+/* static */
+void WasmExceptionObject::finalize(JSFreeOp* fop, JSObject* obj) {
+ WasmExceptionObject& exnObj = obj->as<WasmExceptionObject>();
+ if (!exnObj.isNewborn()) {
+ fop->release(obj, &exnObj.tag(), MemoryUse::WasmExceptionTag);
+ fop->delete_(obj, &exnObj.valueTypes(), MemoryUse::WasmExceptionType);
+ }
+}
+
+bool WasmExceptionObject::construct(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ if (!ThrowIfNotConstructing(cx, args, "Exception")) {
+ return false;
+ }
+
+ // FIXME: The JS API is not finalized and may specify a different behavior
+ // here.
+ // For now, we implement the same behavior as V8 and error when called.
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_EXN_CONSTRUCTOR);
+
+ return false;
+}
+
+/* static */
+WasmExceptionObject* WasmExceptionObject::create(JSContext* cx,
+ const ValTypeVector& type,
+ HandleObject proto) {
+ AutoSetNewObjectMetadata metadata(cx);
+ RootedWasmExceptionObject obj(
+ cx, NewObjectWithGivenProto<WasmExceptionObject>(cx, proto));
+ if (!obj) {
+ return nullptr;
+ }
+
+ MOZ_ASSERT(obj->isNewborn());
+
+ SharedExceptionTag tag = SharedExceptionTag(cx->new_<ExceptionTag>());
+ if (!tag) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ InitReservedSlot(obj, TAG_SLOT, tag.forget().take(),
+ MemoryUse::WasmExceptionTag);
+
+ wasm::ValTypeVector* newValueTypes = js_new<ValTypeVector>();
+ for (uint32_t i = 0; i < type.length(); i++) {
+ if (!newValueTypes->append(type[i])) {
+ return nullptr;
+ }
+ }
+ InitReservedSlot(obj, TYPE_SLOT, newValueTypes, MemoryUse::WasmExceptionType);
+
+ MOZ_ASSERT(!obj->isNewborn());
+
+ return obj;
+}
+
+bool WasmExceptionObject::isNewborn() const {
+ MOZ_ASSERT(is<WasmExceptionObject>());
+ return getReservedSlot(TYPE_SLOT).isUndefined();
+}
+
+const JSPropertySpec WasmExceptionObject::properties[] = {
+ JS_STRING_SYM_PS(toStringTag, "WebAssembly.Exception", JSPROP_READONLY),
+ JS_PS_END};
+
+const JSFunctionSpec WasmExceptionObject::methods[] = {JS_FS_END};
+
+const JSFunctionSpec WasmExceptionObject::static_methods[] = {JS_FS_END};
+
+wasm::ValTypeVector& WasmExceptionObject::valueTypes() const {
+ return *(ValTypeVector*)getFixedSlot(TYPE_SLOT).toPrivate();
+};
+
+wasm::ResultType WasmExceptionObject::resultType() const {
+ return wasm::ResultType::Vector(valueTypes());
+}
+
+ExceptionTag& WasmExceptionObject::tag() const {
+ return *(ExceptionTag*)getReservedSlot(TAG_SLOT).toPrivate();
+}
+
+// ============================================================================
+// WebAssembly class and static methods
+
+static bool WebAssembly_toSource(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setString(cx->names().WebAssembly);
+ return true;
+}
+
+static bool RejectWithPendingException(JSContext* cx,
+ Handle<PromiseObject*> promise) {
+ if (!cx->isExceptionPending()) {
+ return false;
+ }
+
+ RootedValue rejectionValue(cx);
+ if (!GetAndClearException(cx, &rejectionValue)) {
+ return false;
+ }
+
+ return PromiseObject::reject(cx, promise, rejectionValue);
+}
+
+static bool Reject(JSContext* cx, const CompileArgs& args,
+ Handle<PromiseObject*> promise, const UniqueChars& error) {
+ if (!error) {
+ ReportOutOfMemory(cx);
+ return RejectWithPendingException(cx, promise);
+ }
+
+ RootedObject stack(cx, promise->allocationSite());
+ RootedString filename(
+ cx, JS_NewStringCopyZ(cx, args.scriptedCaller.filename.get()));
+ if (!filename) {
+ return false;
+ }
+
+ unsigned line = args.scriptedCaller.line;
+
+ // Ideally we'd report a JSMSG_WASM_COMPILE_ERROR here, but there's no easy
+ // way to create an ErrorObject for an arbitrary error code with multiple
+ // replacements.
+ UniqueChars str(JS_smprintf("wasm validation error: %s", error.get()));
+ if (!str) {
+ return false;
+ }
+
+ size_t len = strlen(str.get());
+ RootedString message(cx, NewStringCopyN<CanGC>(cx, str.get(), len));
+ if (!message) {
+ return false;
+ }
+
+ RootedObject errorObj(
+ cx, ErrorObject::create(cx, JSEXN_WASMCOMPILEERROR, stack, filename, 0,
+ line, 0, nullptr, message));
+ if (!errorObj) {
+ return false;
+ }
+
+ RootedValue rejectionValue(cx, ObjectValue(*errorObj));
+ return PromiseObject::reject(cx, promise, rejectionValue);
+}
+
+static void LogAsync(JSContext* cx, const char* funcName,
+ const Module& module) {
+ Log(cx, "async %s succeeded%s", funcName,
+ module.loggingDeserialized() ? " (loaded from cache)" : "");
+}
+
+enum class Ret { Pair, Instance };
+
+class AsyncInstantiateTask : public OffThreadPromiseTask {
+ SharedModule module_;
+ PersistentRooted<ImportValues> imports_;
+ Ret ret_;
+
+ public:
+ AsyncInstantiateTask(JSContext* cx, const Module& module, Ret ret,
+ Handle<PromiseObject*> promise)
+ : OffThreadPromiseTask(cx, promise),
+ module_(&module),
+ imports_(cx),
+ ret_(ret) {}
+
+ ImportValues& imports() { return imports_.get(); }
+
+ bool resolve(JSContext* cx, Handle<PromiseObject*> promise) override {
+ RootedObject instanceProto(
+ cx, &cx->global()->getPrototype(JSProto_WasmInstance).toObject());
+
+ RootedWasmInstanceObject instanceObj(cx);
+ if (!module_->instantiate(cx, imports_.get(), instanceProto,
+ &instanceObj)) {
+ return RejectWithPendingException(cx, promise);
+ }
+
+ RootedValue resolutionValue(cx);
+ if (ret_ == Ret::Instance) {
+ resolutionValue = ObjectValue(*instanceObj);
+ } else {
+ RootedObject resultObj(cx, JS_NewPlainObject(cx));
+ if (!resultObj) {
+ return RejectWithPendingException(cx, promise);
+ }
+
+ RootedObject moduleProto(
+ cx, &cx->global()->getPrototype(JSProto_WasmModule).toObject());
+ RootedObject moduleObj(
+ cx, WasmModuleObject::create(cx, *module_, moduleProto));
+ if (!moduleObj) {
+ return RejectWithPendingException(cx, promise);
+ }
+
+ RootedValue val(cx, ObjectValue(*moduleObj));
+ if (!JS_DefineProperty(cx, resultObj, "module", val, JSPROP_ENUMERATE)) {
+ return RejectWithPendingException(cx, promise);
+ }
+
+ val = ObjectValue(*instanceObj);
+ if (!JS_DefineProperty(cx, resultObj, "instance", val,
+ JSPROP_ENUMERATE)) {
+ return RejectWithPendingException(cx, promise);
+ }
+
+ resolutionValue = ObjectValue(*resultObj);
+ }
+
+ if (!PromiseObject::resolve(cx, promise, resolutionValue)) {
+ return RejectWithPendingException(cx, promise);
+ }
+
+ LogAsync(cx, "instantiate", *module_);
+ return true;
+ }
+};
+
+static bool AsyncInstantiate(JSContext* cx, const Module& module,
+ HandleObject importObj, Ret ret,
+ Handle<PromiseObject*> promise) {
+ auto task = js::MakeUnique<AsyncInstantiateTask>(cx, module, ret, promise);
+ if (!task || !task->init(cx)) {
+ return false;
+ }
+
+ if (!GetImports(cx, module, importObj, &task->imports())) {
+ return RejectWithPendingException(cx, promise);
+ }
+
+ task.release()->dispatchResolveAndDestroy();
+ return true;
+}
+
+static bool ResolveCompile(JSContext* cx, const Module& module,
+ Handle<PromiseObject*> promise) {
+ RootedObject proto(
+ cx, &cx->global()->getPrototype(JSProto_WasmModule).toObject());
+ RootedObject moduleObj(cx, WasmModuleObject::create(cx, module, proto));
+ if (!moduleObj) {
+ return RejectWithPendingException(cx, promise);
+ }
+
+ RootedValue resolutionValue(cx, ObjectValue(*moduleObj));
+ if (!PromiseObject::resolve(cx, promise, resolutionValue)) {
+ return RejectWithPendingException(cx, promise);
+ }
+
+ LogAsync(cx, "compile", module);
+ return true;
+}
+
+struct CompileBufferTask : PromiseHelperTask {
+ MutableBytes bytecode;
+ SharedCompileArgs compileArgs;
+ UniqueChars error;
+ UniqueCharsVector warnings;
+ SharedModule module;
+ bool instantiate;
+ PersistentRootedObject importObj;
+ JSTelemetrySender sender;
+
+ CompileBufferTask(JSContext* cx, Handle<PromiseObject*> promise,
+ HandleObject importObj)
+ : PromiseHelperTask(cx, promise),
+ instantiate(true),
+ importObj(cx, importObj),
+ sender(cx->runtime()) {}
+
+ CompileBufferTask(JSContext* cx, Handle<PromiseObject*> promise)
+ : PromiseHelperTask(cx, promise), instantiate(false) {}
+
+ bool init(JSContext* cx, const char* introducer) {
+ compileArgs = InitCompileArgs(cx, introducer);
+ if (!compileArgs) {
+ return false;
+ }
+ return PromiseHelperTask::init(cx);
+ }
+
+ void execute() override {
+ module = CompileBuffer(*compileArgs, *bytecode, &error, &warnings, nullptr,
+ sender);
+ }
+
+ bool resolve(JSContext* cx, Handle<PromiseObject*> promise) override {
+ if (!module) {
+ return Reject(cx, *compileArgs, promise, error);
+ }
+ if (!ReportCompileWarnings(cx, warnings)) {
+ return false;
+ }
+ if (instantiate) {
+ return AsyncInstantiate(cx, *module, importObj, Ret::Pair, promise);
+ }
+ return ResolveCompile(cx, *module, promise);
+ }
+};
+
+static bool RejectWithPendingException(JSContext* cx,
+ Handle<PromiseObject*> promise,
+ CallArgs& callArgs) {
+ if (!RejectWithPendingException(cx, promise)) {
+ return false;
+ }
+
+ callArgs.rval().setObject(*promise);
+ return true;
+}
+
+static bool EnsurePromiseSupport(JSContext* cx) {
+ if (!cx->runtime()->offThreadPromiseState.ref().initialized()) {
+ JS_ReportErrorASCII(
+ cx, "WebAssembly Promise APIs not supported in this runtime.");
+ return false;
+ }
+ return true;
+}
+
+static bool GetBufferSource(JSContext* cx, CallArgs callArgs, const char* name,
+ MutableBytes* bytecode) {
+ if (!callArgs.requireAtLeast(cx, name, 1)) {
+ return false;
+ }
+
+ if (!callArgs[0].isObject()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_BUF_ARG);
+ return false;
+ }
+
+ return GetBufferSource(cx, &callArgs[0].toObject(), JSMSG_WASM_BAD_BUF_ARG,
+ bytecode);
+}
+
+static bool WebAssembly_compile(JSContext* cx, unsigned argc, Value* vp) {
+ if (!EnsurePromiseSupport(cx)) {
+ return false;
+ }
+
+ Log(cx, "async compile() started");
+
+ Rooted<PromiseObject*> promise(cx, PromiseObject::createSkippingExecutor(cx));
+ if (!promise) {
+ return false;
+ }
+
+ auto task = cx->make_unique<CompileBufferTask>(cx, promise);
+ if (!task || !task->init(cx, "WebAssembly.compile")) {
+ return false;
+ }
+
+ CallArgs callArgs = CallArgsFromVp(argc, vp);
+
+ if (!GetBufferSource(cx, callArgs, "WebAssembly.compile", &task->bytecode)) {
+ return RejectWithPendingException(cx, promise, callArgs);
+ }
+
+ if (!StartOffThreadPromiseHelperTask(cx, std::move(task))) {
+ return false;
+ }
+
+ callArgs.rval().setObject(*promise);
+ return true;
+}
+
+static bool GetInstantiateArgs(JSContext* cx, CallArgs callArgs,
+ MutableHandleObject firstArg,
+ MutableHandleObject importObj) {
+ if (!callArgs.requireAtLeast(cx, "WebAssembly.instantiate", 1)) {
+ return false;
+ }
+
+ if (!callArgs[0].isObject()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_BUF_MOD_ARG);
+ return false;
+ }
+
+ firstArg.set(&callArgs[0].toObject());
+
+ return GetImportArg(cx, callArgs, importObj);
+}
+
+static bool WebAssembly_instantiate(JSContext* cx, unsigned argc, Value* vp) {
+ if (!EnsurePromiseSupport(cx)) {
+ return false;
+ }
+
+ Log(cx, "async instantiate() started");
+
+ Rooted<PromiseObject*> promise(cx, PromiseObject::createSkippingExecutor(cx));
+ if (!promise) {
+ return false;
+ }
+
+ CallArgs callArgs = CallArgsFromVp(argc, vp);
+
+ RootedObject firstArg(cx);
+ RootedObject importObj(cx);
+ if (!GetInstantiateArgs(cx, callArgs, &firstArg, &importObj)) {
+ return RejectWithPendingException(cx, promise, callArgs);
+ }
+
+ const Module* module;
+ if (IsModuleObject(firstArg, &module)) {
+ if (!AsyncInstantiate(cx, *module, importObj, Ret::Instance, promise)) {
+ return false;
+ }
+ } else {
+ auto task = cx->make_unique<CompileBufferTask>(cx, promise, importObj);
+ if (!task || !task->init(cx, "WebAssembly.instantiate")) {
+ return false;
+ }
+
+ if (!GetBufferSource(cx, firstArg, JSMSG_WASM_BAD_BUF_MOD_ARG,
+ &task->bytecode)) {
+ return RejectWithPendingException(cx, promise, callArgs);
+ }
+
+ if (!StartOffThreadPromiseHelperTask(cx, std::move(task))) {
+ return false;
+ }
+ }
+
+ callArgs.rval().setObject(*promise);
+ return true;
+}
+
+static bool WebAssembly_validate(JSContext* cx, unsigned argc, Value* vp) {
+ CallArgs callArgs = CallArgsFromVp(argc, vp);
+
+ MutableBytes bytecode;
+ if (!GetBufferSource(cx, callArgs, "WebAssembly.validate", &bytecode)) {
+ return false;
+ }
+
+ UniqueChars error;
+ bool validated = Validate(cx, *bytecode, &error);
+
+ // If the reason for validation failure was OOM (signalled by null error
+ // message), report out-of-memory so that validate's return is always
+ // correct.
+ if (!validated && !error) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ if (error) {
+ MOZ_ASSERT(!validated);
+ Log(cx, "validate() failed with: %s", error.get());
+ }
+
+ callArgs.rval().setBoolean(validated);
+ return true;
+}
+
+static bool EnsureStreamSupport(JSContext* cx) {
+ // This should match wasm::StreamingCompilationAvailable().
+
+ if (!EnsurePromiseSupport(cx)) {
+ return false;
+ }
+
+ if (!CanUseExtraThreads()) {
+ JS_ReportErrorASCII(
+ cx, "WebAssembly.compileStreaming not supported with --no-threads");
+ return false;
+ }
+
+ if (!cx->runtime()->consumeStreamCallback) {
+ JS_ReportErrorASCII(cx,
+ "WebAssembly streaming not supported in this runtime");
+ return false;
+ }
+
+ return true;
+}
+
+// This value is chosen and asserted to be disjoint from any host error code.
+static const size_t StreamOOMCode = 0;
+
+static bool RejectWithStreamErrorNumber(JSContext* cx, size_t errorCode,
+ Handle<PromiseObject*> promise) {
+ if (errorCode == StreamOOMCode) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ cx->runtime()->reportStreamErrorCallback(cx, errorCode);
+ return RejectWithPendingException(cx, promise);
+}
+
+class CompileStreamTask : public PromiseHelperTask, public JS::StreamConsumer {
+ // The stream progresses monotonically through these states; the helper
+ // thread wait()s for streamState_ to reach Closed.
+ enum StreamState { Env, Code, Tail, Closed };
+ ExclusiveWaitableData<StreamState> streamState_;
+
+ // Immutable:
+ const bool instantiate_;
+ const PersistentRootedObject importObj_;
+
+ // Immutable after noteResponseURLs() which is called at most once before
+ // first call on stream thread:
+ const MutableCompileArgs compileArgs_;
+
+ // Immutable after Env state:
+ Bytes envBytes_;
+ SectionRange codeSection_;
+
+ // The code section vector is resized once during the Env state and filled
+ // in chunk by chunk during the Code state, updating the end-pointer after
+ // each chunk:
+ Bytes codeBytes_;
+ uint8_t* codeBytesEnd_;
+ ExclusiveBytesPtr exclusiveCodeBytesEnd_;
+
+ // Immutable after Tail state:
+ Bytes tailBytes_;
+ ExclusiveStreamEndData exclusiveStreamEnd_;
+
+ // Written once before Closed state and read in Closed state on main thread:
+ SharedModule module_;
+ Maybe<size_t> streamError_;
+ UniqueChars compileError_;
+ UniqueCharsVector warnings_;
+
+ // Set on stream thread and read racily on helper thread to abort compilation:
+ Atomic<bool> streamFailed_;
+
+ JSTelemetrySender sender_;
+
+ // Called on some thread before consumeChunk(), streamEnd(), streamError()):
+
+ void noteResponseURLs(const char* url, const char* sourceMapUrl) override {
+ if (url) {
+ compileArgs_->scriptedCaller.filename = DuplicateString(url);
+ compileArgs_->scriptedCaller.filenameIsURL = true;
+ }
+ if (sourceMapUrl) {
+ compileArgs_->sourceMapURL = DuplicateString(sourceMapUrl);
+ }
+ }
+
+ // Called on a stream thread:
+
+ // Until StartOffThreadPromiseHelperTask succeeds, we are responsible for
+ // dispatching ourselves back to the JS thread.
+ //
+ // Warning: After this function returns, 'this' can be deleted at any time, so
+ // the caller must immediately return from the stream callback.
+ void setClosedAndDestroyBeforeHelperThreadStarted() {
+ streamState_.lock().get() = Closed;
+ dispatchResolveAndDestroy();
+ }
+
+ // See setClosedAndDestroyBeforeHelperThreadStarted() comment.
+ bool rejectAndDestroyBeforeHelperThreadStarted(size_t errorNumber) {
+ MOZ_ASSERT(streamState_.lock() == Env);
+ MOZ_ASSERT(!streamError_);
+ streamError_ = Some(errorNumber);
+ setClosedAndDestroyBeforeHelperThreadStarted();
+ return false;
+ }
+
+ // Once StartOffThreadPromiseHelperTask succeeds, the helper thread will
+ // dispatchResolveAndDestroy() after execute() returns, but execute()
+ // wait()s for state to be Closed.
+ //
+ // Warning: After this function returns, 'this' can be deleted at any time, so
+ // the caller must immediately return from the stream callback.
+ void setClosedAndDestroyAfterHelperThreadStarted() {
+ auto streamState = streamState_.lock();
+ MOZ_ASSERT(streamState != Closed);
+ streamState.get() = Closed;
+ streamState.notify_one(/* stream closed */);
+ }
+
+ // See setClosedAndDestroyAfterHelperThreadStarted() comment.
+ bool rejectAndDestroyAfterHelperThreadStarted(size_t errorNumber) {
+ MOZ_ASSERT(!streamError_);
+ streamError_ = Some(errorNumber);
+ streamFailed_ = true;
+ exclusiveCodeBytesEnd_.lock().notify_one();
+ exclusiveStreamEnd_.lock().notify_one();
+ setClosedAndDestroyAfterHelperThreadStarted();
+ return false;
+ }
+
+ bool consumeChunk(const uint8_t* begin, size_t length) override {
+ switch (streamState_.lock().get()) {
+ case Env: {
+ if (!envBytes_.append(begin, length)) {
+ return rejectAndDestroyBeforeHelperThreadStarted(StreamOOMCode);
+ }
+
+ if (!StartsCodeSection(envBytes_.begin(), envBytes_.end(),
+ &codeSection_)) {
+ return true;
+ }
+
+ uint32_t extraBytes = envBytes_.length() - codeSection_.start;
+ if (extraBytes) {
+ envBytes_.shrinkTo(codeSection_.start);
+ }
+
+ if (codeSection_.size > MaxCodeSectionBytes) {
+ return rejectAndDestroyBeforeHelperThreadStarted(StreamOOMCode);
+ }
+
+ if (!codeBytes_.resize(codeSection_.size)) {
+ return rejectAndDestroyBeforeHelperThreadStarted(StreamOOMCode);
+ }
+
+ codeBytesEnd_ = codeBytes_.begin();
+ exclusiveCodeBytesEnd_.lock().get() = codeBytesEnd_;
+
+ if (!StartOffThreadPromiseHelperTask(this)) {
+ return rejectAndDestroyBeforeHelperThreadStarted(StreamOOMCode);
+ }
+
+ // Set the state to Code iff StartOffThreadPromiseHelperTask()
+ // succeeds so that the state tells us whether we are before or
+ // after the helper thread started.
+ streamState_.lock().get() = Code;
+
+ if (extraBytes) {
+ return consumeChunk(begin + length - extraBytes, extraBytes);
+ }
+
+ return true;
+ }
+ case Code: {
+ size_t copyLength =
+ std::min<size_t>(length, codeBytes_.end() - codeBytesEnd_);
+ memcpy(codeBytesEnd_, begin, copyLength);
+ codeBytesEnd_ += copyLength;
+
+ {
+ auto codeStreamEnd = exclusiveCodeBytesEnd_.lock();
+ codeStreamEnd.get() = codeBytesEnd_;
+ codeStreamEnd.notify_one();
+ }
+
+ if (codeBytesEnd_ != codeBytes_.end()) {
+ return true;
+ }
+
+ streamState_.lock().get() = Tail;
+
+ if (uint32_t extraBytes = length - copyLength) {
+ return consumeChunk(begin + copyLength, extraBytes);
+ }
+
+ return true;
+ }
+ case Tail: {
+ if (!tailBytes_.append(begin, length)) {
+ return rejectAndDestroyAfterHelperThreadStarted(StreamOOMCode);
+ }
+
+ return true;
+ }
+ case Closed:
+ MOZ_CRASH("consumeChunk() in Closed state");
+ }
+ MOZ_CRASH("unreachable");
+ }
+
+ void streamEnd(JS::OptimizedEncodingListener* tier2Listener) override {
+ switch (streamState_.lock().get()) {
+ case Env: {
+ SharedBytes bytecode = js_new<ShareableBytes>(std::move(envBytes_));
+ if (!bytecode) {
+ rejectAndDestroyBeforeHelperThreadStarted(StreamOOMCode);
+ return;
+ }
+ module_ = CompileBuffer(*compileArgs_, *bytecode, &compileError_,
+ &warnings_, nullptr, sender_);
+ setClosedAndDestroyBeforeHelperThreadStarted();
+ return;
+ }
+ case Code:
+ case Tail:
+ // Unlock exclusiveStreamEnd_ before locking streamState_.
+ {
+ auto streamEnd = exclusiveStreamEnd_.lock();
+ MOZ_ASSERT(!streamEnd->reached);
+ streamEnd->reached = true;
+ streamEnd->tailBytes = &tailBytes_;
+ streamEnd->tier2Listener = tier2Listener;
+ streamEnd.notify_one();
+ }
+ setClosedAndDestroyAfterHelperThreadStarted();
+ return;
+ case Closed:
+ MOZ_CRASH("streamEnd() in Closed state");
+ }
+ }
+
+ void streamError(size_t errorCode) override {
+ MOZ_ASSERT(errorCode != StreamOOMCode);
+ switch (streamState_.lock().get()) {
+ case Env:
+ rejectAndDestroyBeforeHelperThreadStarted(errorCode);
+ return;
+ case Tail:
+ case Code:
+ rejectAndDestroyAfterHelperThreadStarted(errorCode);
+ return;
+ case Closed:
+ MOZ_CRASH("streamError() in Closed state");
+ }
+ }
+
+ void consumeOptimizedEncoding(const uint8_t* begin, size_t length) override {
+ module_ = Module::deserialize(begin, length);
+
+ MOZ_ASSERT(streamState_.lock().get() == Env);
+ setClosedAndDestroyBeforeHelperThreadStarted();
+ }
+
+ // Called on a helper thread:
+
+ void execute() override {
+ module_ =
+ CompileStreaming(*compileArgs_, envBytes_, codeBytes_,
+ exclusiveCodeBytesEnd_, exclusiveStreamEnd_,
+ streamFailed_, &compileError_, &warnings_, sender_);
+
+ // When execute() returns, the CompileStreamTask will be dispatched
+ // back to its JS thread to call resolve() and then be destroyed. We
+ // can't let this happen until the stream has been closed lest
+ // consumeChunk() or streamEnd() be called on a dead object.
+ auto streamState = streamState_.lock();
+ while (streamState != Closed) {
+ streamState.wait(/* stream closed */);
+ }
+ }
+
+ // Called on a JS thread after streaming compilation completes/errors:
+
+ bool resolve(JSContext* cx, Handle<PromiseObject*> promise) override {
+ MOZ_ASSERT(streamState_.lock() == Closed);
+
+ if (module_) {
+ MOZ_ASSERT(!streamFailed_ && !streamError_ && !compileError_);
+ if (!ReportCompileWarnings(cx, warnings_)) {
+ return false;
+ }
+ if (instantiate_) {
+ return AsyncInstantiate(cx, *module_, importObj_, Ret::Pair, promise);
+ }
+ return ResolveCompile(cx, *module_, promise);
+ }
+
+ if (streamError_) {
+ return RejectWithStreamErrorNumber(cx, *streamError_, promise);
+ }
+
+ return Reject(cx, *compileArgs_, promise, compileError_);
+ }
+
+ public:
+ CompileStreamTask(JSContext* cx, Handle<PromiseObject*> promise,
+ CompileArgs& compileArgs, bool instantiate,
+ HandleObject importObj)
+ : PromiseHelperTask(cx, promise),
+ streamState_(mutexid::WasmStreamStatus, Env),
+ instantiate_(instantiate),
+ importObj_(cx, importObj),
+ compileArgs_(&compileArgs),
+ codeSection_{},
+ codeBytesEnd_(nullptr),
+ exclusiveCodeBytesEnd_(mutexid::WasmCodeBytesEnd, nullptr),
+ exclusiveStreamEnd_(mutexid::WasmStreamEnd),
+ streamFailed_(false),
+ sender_(cx->runtime()) {
+ MOZ_ASSERT_IF(importObj_, instantiate_);
+ }
+};
+
+// A short-lived object that captures the arguments of a
+// WebAssembly.{compileStreaming,instantiateStreaming} while waiting for
+// the Promise<Response> to resolve to a (hopefully) Promise.
+class ResolveResponseClosure : public NativeObject {
+ static const unsigned COMPILE_ARGS_SLOT = 0;
+ static const unsigned PROMISE_OBJ_SLOT = 1;
+ static const unsigned INSTANTIATE_SLOT = 2;
+ static const unsigned IMPORT_OBJ_SLOT = 3;
+ static const JSClassOps classOps_;
+
+ static void finalize(JSFreeOp* fop, JSObject* obj) {
+ auto& closure = obj->as<ResolveResponseClosure>();
+ fop->release(obj, &closure.compileArgs(),
+ MemoryUse::WasmResolveResponseClosure);
+ }
+
+ public:
+ static const unsigned RESERVED_SLOTS = 4;
+ static const JSClass class_;
+
+ static ResolveResponseClosure* create(JSContext* cx, const CompileArgs& args,
+ HandleObject promise, bool instantiate,
+ HandleObject importObj) {
+ MOZ_ASSERT_IF(importObj, instantiate);
+
+ AutoSetNewObjectMetadata metadata(cx);
+ auto* obj = NewObjectWithGivenProto<ResolveResponseClosure>(cx, nullptr);
+ if (!obj) {
+ return nullptr;
+ }
+
+ args.AddRef();
+ InitReservedSlot(obj, COMPILE_ARGS_SLOT, const_cast<CompileArgs*>(&args),
+ MemoryUse::WasmResolveResponseClosure);
+ obj->setReservedSlot(PROMISE_OBJ_SLOT, ObjectValue(*promise));
+ obj->setReservedSlot(INSTANTIATE_SLOT, BooleanValue(instantiate));
+ obj->setReservedSlot(IMPORT_OBJ_SLOT, ObjectOrNullValue(importObj));
+ return obj;
+ }
+
+ CompileArgs& compileArgs() const {
+ return *(CompileArgs*)getReservedSlot(COMPILE_ARGS_SLOT).toPrivate();
+ }
+ PromiseObject& promise() const {
+ return getReservedSlot(PROMISE_OBJ_SLOT).toObject().as<PromiseObject>();
+ }
+ bool instantiate() const {
+ return getReservedSlot(INSTANTIATE_SLOT).toBoolean();
+ }
+ JSObject* importObj() const {
+ return getReservedSlot(IMPORT_OBJ_SLOT).toObjectOrNull();
+ }
+};
+
+const JSClassOps ResolveResponseClosure::classOps_ = {
+ nullptr, // addProperty
+ nullptr, // delProperty
+ nullptr, // enumerate
+ nullptr, // newEnumerate
+ nullptr, // resolve
+ nullptr, // mayResolve
+ ResolveResponseClosure::finalize, // finalize
+ nullptr, // call
+ nullptr, // hasInstance
+ nullptr, // construct
+ nullptr, // trace
+};
+
+const JSClass ResolveResponseClosure::class_ = {
+ "WebAssembly ResolveResponseClosure",
+ JSCLASS_DELAY_METADATA_BUILDER |
+ JSCLASS_HAS_RESERVED_SLOTS(ResolveResponseClosure::RESERVED_SLOTS) |
+ JSCLASS_FOREGROUND_FINALIZE,
+ &ResolveResponseClosure::classOps_,
+};
+
+static ResolveResponseClosure* ToResolveResponseClosure(CallArgs args) {
+ return &args.callee()
+ .as<JSFunction>()
+ .getExtendedSlot(0)
+ .toObject()
+ .as<ResolveResponseClosure>();
+}
+
+static bool RejectWithErrorNumber(JSContext* cx, uint32_t errorNumber,
+ Handle<PromiseObject*> promise) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr, errorNumber);
+ return RejectWithPendingException(cx, promise);
+}
+
+static bool ResolveResponse_OnFulfilled(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs callArgs = CallArgsFromVp(argc, vp);
+
+ Rooted<ResolveResponseClosure*> closure(cx,
+ ToResolveResponseClosure(callArgs));
+ Rooted<PromiseObject*> promise(cx, &closure->promise());
+ CompileArgs& compileArgs = closure->compileArgs();
+ bool instantiate = closure->instantiate();
+ Rooted<JSObject*> importObj(cx, closure->importObj());
+
+ auto task = cx->make_unique<CompileStreamTask>(cx, promise, compileArgs,
+ instantiate, importObj);
+ if (!task || !task->init(cx)) {
+ return false;
+ }
+
+ if (!callArgs.get(0).isObject()) {
+ return RejectWithErrorNumber(cx, JSMSG_WASM_BAD_RESPONSE_VALUE, promise);
+ }
+
+ RootedObject response(cx, &callArgs.get(0).toObject());
+ if (!cx->runtime()->consumeStreamCallback(cx, response, JS::MimeType::Wasm,
+ task.get())) {
+ return RejectWithPendingException(cx, promise);
+ }
+
+ Unused << task.release();
+
+ callArgs.rval().setUndefined();
+ return true;
+}
+
+static bool ResolveResponse_OnRejected(JSContext* cx, unsigned argc,
+ Value* vp) {
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ Rooted<ResolveResponseClosure*> closure(cx, ToResolveResponseClosure(args));
+ Rooted<PromiseObject*> promise(cx, &closure->promise());
+
+ if (!PromiseObject::reject(cx, promise, args.get(0))) {
+ return false;
+ }
+
+ args.rval().setUndefined();
+ return true;
+}
+
+static bool ResolveResponse(JSContext* cx, CallArgs callArgs,
+ Handle<PromiseObject*> promise,
+ bool instantiate = false,
+ HandleObject importObj = nullptr) {
+ MOZ_ASSERT_IF(importObj, instantiate);
+
+ const char* introducer = instantiate ? "WebAssembly.instantiateStreaming"
+ : "WebAssembly.compileStreaming";
+
+ SharedCompileArgs compileArgs = InitCompileArgs(cx, introducer);
+ if (!compileArgs) {
+ return false;
+ }
+
+ RootedObject closure(
+ cx, ResolveResponseClosure::create(cx, *compileArgs, promise, instantiate,
+ importObj));
+ if (!closure) {
+ return false;
+ }
+
+ RootedFunction onResolved(
+ cx, NewNativeFunction(cx, ResolveResponse_OnFulfilled, 1, nullptr,
+ gc::AllocKind::FUNCTION_EXTENDED, GenericObject));
+ if (!onResolved) {
+ return false;
+ }
+
+ RootedFunction onRejected(
+ cx, NewNativeFunction(cx, ResolveResponse_OnRejected, 1, nullptr,
+ gc::AllocKind::FUNCTION_EXTENDED, GenericObject));
+ if (!onRejected) {
+ return false;
+ }
+
+ onResolved->setExtendedSlot(0, ObjectValue(*closure));
+ onRejected->setExtendedSlot(0, ObjectValue(*closure));
+
+ RootedObject resolve(cx,
+ PromiseObject::unforgeableResolve(cx, callArgs.get(0)));
+ if (!resolve) {
+ return false;
+ }
+
+ return JS::AddPromiseReactions(cx, resolve, onResolved, onRejected);
+}
+
+static bool WebAssembly_compileStreaming(JSContext* cx, unsigned argc,
+ Value* vp) {
+ if (!EnsureStreamSupport(cx)) {
+ return false;
+ }
+
+ Log(cx, "async compileStreaming() started");
+
+ Rooted<PromiseObject*> promise(cx, PromiseObject::createSkippingExecutor(cx));
+ if (!promise) {
+ return false;
+ }
+
+ CallArgs callArgs = CallArgsFromVp(argc, vp);
+
+ if (!ResolveResponse(cx, callArgs, promise)) {
+ return RejectWithPendingException(cx, promise, callArgs);
+ }
+
+ callArgs.rval().setObject(*promise);
+ return true;
+}
+
+static bool WebAssembly_instantiateStreaming(JSContext* cx, unsigned argc,
+ Value* vp) {
+ if (!EnsureStreamSupport(cx)) {
+ return false;
+ }
+
+ Log(cx, "async instantiateStreaming() started");
+
+ Rooted<PromiseObject*> promise(cx, PromiseObject::createSkippingExecutor(cx));
+ if (!promise) {
+ return false;
+ }
+
+ CallArgs callArgs = CallArgsFromVp(argc, vp);
+
+ RootedObject firstArg(cx);
+ RootedObject importObj(cx);
+ if (!GetInstantiateArgs(cx, callArgs, &firstArg, &importObj)) {
+ return RejectWithPendingException(cx, promise, callArgs);
+ }
+
+ if (!ResolveResponse(cx, callArgs, promise, true, importObj)) {
+ return RejectWithPendingException(cx, promise, callArgs);
+ }
+
+ callArgs.rval().setObject(*promise);
+ return true;
+}
+
+static const JSFunctionSpec WebAssembly_static_methods[] = {
+ JS_FN(js_toSource_str, WebAssembly_toSource, 0, 0),
+ JS_FN("compile", WebAssembly_compile, 1, JSPROP_ENUMERATE),
+ JS_FN("instantiate", WebAssembly_instantiate, 1, JSPROP_ENUMERATE),
+ JS_FN("validate", WebAssembly_validate, 1, JSPROP_ENUMERATE),
+ JS_FN("compileStreaming", WebAssembly_compileStreaming, 1,
+ JSPROP_ENUMERATE),
+ JS_FN("instantiateStreaming", WebAssembly_instantiateStreaming, 1,
+ JSPROP_ENUMERATE),
+ JS_FS_END};
+
+static JSObject* CreateWebAssemblyObject(JSContext* cx, JSProtoKey key) {
+ MOZ_RELEASE_ASSERT(HasSupport(cx));
+
+ Handle<GlobalObject*> global = cx->global();
+ RootedObject proto(cx, GlobalObject::getOrCreateObjectPrototype(cx, global));
+ if (!proto) {
+ return nullptr;
+ }
+ return NewTenuredObjectWithGivenProto(cx, &WasmNamespaceObject::class_,
+ proto);
+}
+
+static bool WebAssemblyClassFinish(JSContext* cx, HandleObject object,
+ HandleObject proto) {
+ Handle<WasmNamespaceObject*> wasm = object.as<WasmNamespaceObject>();
+
+ struct NameAndProtoKey {
+ const char* const name;
+ JSProtoKey key;
+ };
+
+ constexpr NameAndProtoKey entries[] = {
+ {"Module", JSProto_WasmModule},
+ {"Instance", JSProto_WasmInstance},
+ {"Memory", JSProto_WasmMemory},
+ {"Table", JSProto_WasmTable},
+ {"Global", JSProto_WasmGlobal},
+#ifdef ENABLE_WASM_EXCEPTIONS
+ {"Exception", JSProto_WasmException},
+#endif
+ {"CompileError", GetExceptionProtoKey(JSEXN_WASMCOMPILEERROR)},
+ {"LinkError", GetExceptionProtoKey(JSEXN_WASMLINKERROR)},
+ {"RuntimeError", GetExceptionProtoKey(JSEXN_WASMRUNTIMEERROR)},
+ };
+
+ RootedValue ctorValue(cx);
+ RootedId id(cx);
+ for (const auto& entry : entries) {
+ const char* name = entry.name;
+ JSProtoKey key = entry.key;
+
+ JSObject* ctor = GlobalObject::getOrCreateConstructor(cx, key);
+ if (!ctor) {
+ return false;
+ }
+ ctorValue.setObject(*ctor);
+
+ JSAtom* className = Atomize(cx, name, strlen(name));
+ if (!className) {
+ return false;
+ }
+ id.set(AtomToId(className));
+
+ if (!DefineDataProperty(cx, wasm, id, ctorValue, 0)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static const ClassSpec WebAssemblyClassSpec = {CreateWebAssemblyObject,
+ nullptr,
+ WebAssembly_static_methods,
+ nullptr,
+ nullptr,
+ nullptr,
+ WebAssemblyClassFinish};
+
+const JSClass js::WasmNamespaceObject::class_ = {
+ js_WebAssembly_str, JSCLASS_HAS_CACHED_PROTO(JSProto_WebAssembly),
+ JS_NULL_CLASS_OPS, &WebAssemblyClassSpec};
diff --git a/js/src/wasm/WasmJS.h b/js/src/wasm/WasmJS.h
new file mode 100644
index 0000000000..e3028c52df
--- /dev/null
+++ b/js/src/wasm/WasmJS.h
@@ -0,0 +1,527 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_js_h
+#define wasm_js_h
+
+#include "mozilla/HashTable.h" // DefaultHasher
+#include "mozilla/Maybe.h" // mozilla::Maybe
+
+#include <stdint.h> // int32_t, int64_t, uint32_t
+
+#include "gc/Barrier.h" // HeapPtr
+#include "gc/ZoneAllocator.h" // ZoneAllocPolicy
+#include "js/AllocPolicy.h" // SystemAllocPolicy
+#include "js/Class.h" // JSClassOps, ClassSpec
+#include "js/GCHashTable.h" // GCHashMap, GCHashSet
+#include "js/GCVector.h" // GCVector
+#include "js/PropertySpec.h" // JSPropertySpec, JSFunctionSpec
+#include "js/RootingAPI.h" // MovableCellHasher
+#include "js/SweepingAPI.h" // JS::WeakCache
+#include "js/TypeDecls.h" // HandleValue, HandleObject, MutableHandleObject, MutableHandleFunction
+#include "js/Vector.h" // JS::Vector
+#include "vm/JSFunction.h" // JSFunction
+#include "vm/NativeObject.h" // NativeObject
+#include "wasm/WasmTypes.h" // MutableHandleWasmInstanceObject, wasm::*
+
+class JSFreeOp;
+class JSObject;
+class JSTracer;
+struct JSContext;
+
+namespace JS {
+class CallArgs;
+class Value;
+} // namespace JS
+
+namespace js {
+
+class ArrayBufferObject;
+class ArrayBufferObjectMaybeShared;
+class JSStringBuilder;
+class SharedArrayRawBuffer;
+class StructTypeDescr;
+class TypedArrayObject;
+class WasmArrayRawBuffer;
+class WasmFunctionScope;
+class WasmInstanceScope;
+class SharedArrayRawBuffer;
+
+namespace wasm {
+
+struct ImportValues;
+
+// Return whether WebAssembly can in principle be compiled on this platform (ie
+// combination of hardware and OS), assuming at least one of the compilers that
+// supports the platform is not disabled by other settings.
+//
+// This predicate must be checked and must be true to call any of the top-level
+// wasm eval/compile methods.
+
+bool HasPlatformSupport(JSContext* cx);
+
+// Return whether WebAssembly is supported on this platform. This determines
+// whether the WebAssembly object is exposed to JS in this context / realm and
+//
+// It does *not* guarantee that a compiler is actually available; that has to be
+// checked separately, as it is sometimes run-time variant, depending on whether
+// a debugger has been created or not.
+
+bool HasSupport(JSContext* cx);
+
+// Predicates for compiler availability.
+//
+// These three predicates together select zero or one baseline compiler and zero
+// or one optimizing compiler, based on: what's compiled into the executable,
+// what's supported on the current platform, what's selected by options, and the
+// current run-time environment. As it is possible for the computed values to
+// change (when a value changes in about:config or the debugger pane is shown or
+// hidden), it is inadvisable to cache these values in such a way that they
+// could become invalid. Generally it is cheap always to recompute them.
+
+bool BaselineAvailable(JSContext* cx);
+bool IonAvailable(JSContext* cx);
+bool CraneliftAvailable(JSContext* cx);
+
+// Test all three.
+
+bool AnyCompilerAvailable(JSContext* cx);
+
+// Predicates for white-box compiler disablement testing.
+//
+// These predicates determine whether the optimizing compilers were disabled by
+// features that are enabled at compile-time or run-time. They do not consider
+// the hardware platform on whether other compilers are enabled.
+//
+// If `reason` is not null then it is populated with a string that describes
+// the specific features that disable the compiler.
+//
+// Returns false on OOM (which happens only when a reason is requested),
+// otherwise true, with the result in `*isDisabled` and optionally the reason in
+// `*reason`.
+
+bool IonDisabledByFeatures(JSContext* cx, bool* isDisabled,
+ JSStringBuilder* reason = nullptr);
+bool CraneliftDisabledByFeatures(JSContext* cx, bool* isDisabled,
+ JSStringBuilder* reason = nullptr);
+
+// Predicates for feature availability.
+//
+// The following predicates check whether particular wasm features are enabled,
+// and for each, whether at least one compiler is (currently) available that
+// supports the feature.
+
+// Streaming compilation.
+bool StreamingCompilationAvailable(JSContext* cx);
+
+// Caching of optimized code. Implies both streaming compilation and an
+// optimizing compiler tier.
+bool CodeCachingAvailable(JSContext* cx);
+
+// General reference types (externref, funcref) and operations on them.
+bool ReftypesAvailable(JSContext* cx);
+
+// Typed functions reference support.
+bool FunctionReferencesAvailable(JSContext* cx);
+
+// Experimental (ref T) types and structure types.
+bool GcTypesAvailable(JSContext* cx);
+
+// Multi-value block and function returns.
+bool MultiValuesAvailable(JSContext* cx);
+
+// Shared memory and atomics.
+bool ThreadsAvailable(JSContext* cx);
+
+// SIMD data and operations.
+bool SimdAvailable(JSContext* cx);
+
+// Very experimental SIMD operations.
+bool SimdWormholeAvailable(JSContext* cx);
+
+#if defined(ENABLE_WASM_SIMD) && defined(DEBUG)
+// Report the result of a Simd simplification to the testing infrastructure.
+void ReportSimdAnalysis(const char* data);
+#endif
+
+// Returns true if WebAssembly as configured by compile-time flags and run-time
+// options can support try/catch, throw, rethrow, and branch_on_exn (evolving).
+bool ExceptionsAvailable(JSContext* cx);
+
+// Compiles the given binary wasm module given the ArrayBufferObject
+// and links the module's imports with the given import object.
+
+[[nodiscard]] bool Eval(JSContext* cx, Handle<TypedArrayObject*> code,
+ HandleObject importObj,
+ MutableHandleWasmInstanceObject instanceObj);
+
+// Extracts the various imports from the given import object into the given
+// ImportValues structure while checking the imports against the given module.
+// The resulting structure can be passed to WasmModule::instantiate.
+
+struct ImportValues;
+[[nodiscard]] bool GetImports(JSContext* cx, const Module& module,
+ HandleObject importObj, ImportValues* imports);
+
+// For testing cross-process (de)serialization, this pair of functions are
+// responsible for, in the child process, compiling the given wasm bytecode
+// to a wasm::Module that is serialized into the given byte array, and, in
+// the parent process, deserializing the given byte array into a
+// WebAssembly.Module object.
+
+[[nodiscard]] bool CompileAndSerialize(const ShareableBytes& bytecode,
+ Bytes* serialized);
+
+[[nodiscard]] bool DeserializeModule(JSContext* cx, const Bytes& serialized,
+ MutableHandleObject module);
+
+// A WebAssembly "Exported Function" is the spec name for the JS function
+// objects created to wrap wasm functions. This predicate returns false
+// for asm.js functions which are semantically just normal JS functions
+// (even if they are implemented via wasm under the hood). The accessor
+// functions for extracting the instance and func-index of a wasm function
+// can be used for both wasm and asm.js, however.
+
+bool IsWasmExportedFunction(JSFunction* fun);
+
+Instance& ExportedFunctionToInstance(JSFunction* fun);
+WasmInstanceObject* ExportedFunctionToInstanceObject(JSFunction* fun);
+uint32_t ExportedFunctionToFuncIndex(JSFunction* fun);
+
+bool IsSharedWasmMemoryObject(JSObject* obj);
+
+// Abstractions that clarify that we are working on a 32-bit memory and check
+// that the buffer length does not exceed that's memory's fixed limits.
+//
+// Once the larger ArrayBuffers are stable these may become accessors on the
+// objects themselves: wasmByteLength32() etc.
+uint32_t ByteLength32(Handle<ArrayBufferObjectMaybeShared*> buffer);
+uint32_t ByteLength32(const ArrayBufferObjectMaybeShared& buffer);
+uint32_t ByteLength32(const WasmArrayRawBuffer* buffer);
+uint32_t ByteLength32(const ArrayBufferObject& buffer);
+uint32_t VolatileByteLength32(const SharedArrayRawBuffer* buffer);
+
+} // namespace wasm
+
+// The class of WebAssembly.Module. Each WasmModuleObject owns a
+// wasm::Module. These objects are used both as content-facing JS objects and as
+// internal implementation details of asm.js.
+
+class WasmModuleObject : public NativeObject {
+ static const unsigned MODULE_SLOT = 0;
+ static const JSClassOps classOps_;
+ static const ClassSpec classSpec_;
+ static void finalize(JSFreeOp* fop, JSObject* obj);
+ static bool imports(JSContext* cx, unsigned argc, Value* vp);
+ static bool exports(JSContext* cx, unsigned argc, Value* vp);
+ static bool customSections(JSContext* cx, unsigned argc, Value* vp);
+
+ public:
+ static const unsigned RESERVED_SLOTS = 1;
+ static const JSClass class_;
+ static const JSClass& protoClass_;
+ static const JSPropertySpec properties[];
+ static const JSFunctionSpec methods[];
+ static const JSFunctionSpec static_methods[];
+ static bool construct(JSContext*, unsigned, Value*);
+
+ static WasmModuleObject* create(JSContext* cx, const wasm::Module& module,
+ HandleObject proto);
+ const wasm::Module& module() const;
+};
+
+// The class of WebAssembly.Global. This wraps a storage location, and there is
+// a per-agent one-to-one relationship between the WasmGlobalObject and the
+// storage location (the Cell) it wraps: if a module re-exports an imported
+// global, the imported and exported WasmGlobalObjects are the same, and if a
+// module exports a global twice, the two exported WasmGlobalObjects are the
+// same.
+
+// TODO/AnyRef-boxing: With boxed immediates and strings, JSObject* is no longer
+// the most appropriate representation for Cell::anyref.
+STATIC_ASSERT_ANYREF_IS_JSOBJECT;
+
+class WasmGlobalObject : public NativeObject {
+ static const unsigned MUTABLE_SLOT = 0;
+ static const unsigned VAL_SLOT = 1;
+
+ static const JSClassOps classOps_;
+ static const ClassSpec classSpec_;
+ static void finalize(JSFreeOp*, JSObject* obj);
+ static void trace(JSTracer* trc, JSObject* obj);
+
+ static bool typeImpl(JSContext* cx, const CallArgs& args);
+ static bool type(JSContext* cx, unsigned argc, Value* vp);
+
+ static bool valueGetterImpl(JSContext* cx, const CallArgs& args);
+ static bool valueGetter(JSContext* cx, unsigned argc, Value* vp);
+ static bool valueSetterImpl(JSContext* cx, const CallArgs& args);
+ static bool valueSetter(JSContext* cx, unsigned argc, Value* vp);
+
+ public:
+ static const unsigned RESERVED_SLOTS = 2;
+ static const JSClass class_;
+ static const JSClass& protoClass_;
+ static const JSPropertySpec properties[];
+ static const JSFunctionSpec methods[];
+ static const JSFunctionSpec static_methods[];
+ static bool construct(JSContext*, unsigned, Value*);
+
+ static WasmGlobalObject* create(JSContext* cx, wasm::HandleVal value,
+ bool isMutable, HandleObject proto);
+ bool isNewborn() { return getReservedSlot(VAL_SLOT).isUndefined(); }
+
+ bool isMutable() const;
+ wasm::ValType type() const;
+ wasm::GCPtrVal& val() const;
+};
+
+// The class of WebAssembly.Instance. Each WasmInstanceObject owns a
+// wasm::Instance. These objects are used both as content-facing JS objects and
+// as internal implementation details of asm.js.
+
+class WasmInstanceObject : public NativeObject {
+ static const unsigned INSTANCE_SLOT = 0;
+ static const unsigned EXPORTS_OBJ_SLOT = 1;
+ static const unsigned EXPORTS_SLOT = 2;
+ static const unsigned SCOPES_SLOT = 3;
+ static const unsigned INSTANCE_SCOPE_SLOT = 4;
+ static const unsigned GLOBALS_SLOT = 5;
+
+ static const JSClassOps classOps_;
+ static const ClassSpec classSpec_;
+ static bool exportsGetterImpl(JSContext* cx, const CallArgs& args);
+ static bool exportsGetter(JSContext* cx, unsigned argc, Value* vp);
+ bool isNewborn() const;
+ static void finalize(JSFreeOp* fop, JSObject* obj);
+ static void trace(JSTracer* trc, JSObject* obj);
+
+ // ExportMap maps from function index to exported function object.
+ // This allows the instance to lazily create exported function
+ // objects on demand (instead up-front for all table elements) while
+ // correctly preserving observable function object identity.
+ using ExportMap = GCHashMap<uint32_t, HeapPtr<JSFunction*>,
+ DefaultHasher<uint32_t>, ZoneAllocPolicy>;
+ ExportMap& exports() const;
+
+ // See the definition inside WasmJS.cpp.
+ class UnspecifiedScopeMap;
+ UnspecifiedScopeMap& scopes() const;
+
+ public:
+ static const unsigned RESERVED_SLOTS = 6;
+ static const JSClass class_;
+ static const JSClass& protoClass_;
+ static const JSPropertySpec properties[];
+ static const JSFunctionSpec methods[];
+ static const JSFunctionSpec static_methods[];
+ static bool construct(JSContext*, unsigned, Value*);
+
+ static WasmInstanceObject* create(
+ JSContext* cx, RefPtr<const wasm::Code> code,
+ const wasm::DataSegmentVector& dataSegments,
+ const wasm::ElemSegmentVector& elemSegments, wasm::UniqueTlsData tlsData,
+ HandleWasmMemoryObject memory,
+ Vector<RefPtr<wasm::ExceptionTag>, 0, SystemAllocPolicy>&& exceptionTags,
+ Vector<RefPtr<wasm::Table>, 0, SystemAllocPolicy>&& tables,
+ const JSFunctionVector& funcImports,
+ const wasm::GlobalDescVector& globals,
+ const wasm::ValVector& globalImportValues,
+ const WasmGlobalObjectVector& globalObjs, HandleObject proto,
+ UniquePtr<wasm::DebugState> maybeDebug);
+ void initExportsObj(JSObject& exportsObj);
+
+ wasm::Instance& instance() const;
+ JSObject& exportsObj() const;
+
+ static bool getExportedFunction(JSContext* cx,
+ HandleWasmInstanceObject instanceObj,
+ uint32_t funcIndex,
+ MutableHandleFunction fun);
+
+ const wasm::CodeRange& getExportedFunctionCodeRange(JSFunction* fun,
+ wasm::Tier tier);
+
+ static WasmInstanceScope* getScope(JSContext* cx,
+ HandleWasmInstanceObject instanceObj);
+ static WasmFunctionScope* getFunctionScope(
+ JSContext* cx, HandleWasmInstanceObject instanceObj, uint32_t funcIndex);
+
+ using GlobalObjectVector =
+ GCVector<HeapPtr<WasmGlobalObject*>, 0, ZoneAllocPolicy>;
+ GlobalObjectVector& indirectGlobals() const;
+};
+
+// The class of WebAssembly.Memory. A WasmMemoryObject references an ArrayBuffer
+// or SharedArrayBuffer object which owns the actual memory.
+
+class WasmMemoryObject : public NativeObject {
+ static const unsigned BUFFER_SLOT = 0;
+ static const unsigned OBSERVERS_SLOT = 1;
+ static const JSClassOps classOps_;
+ static const ClassSpec classSpec_;
+ static void finalize(JSFreeOp* fop, JSObject* obj);
+ static bool bufferGetterImpl(JSContext* cx, const CallArgs& args);
+ static bool bufferGetter(JSContext* cx, unsigned argc, Value* vp);
+ static bool typeImpl(JSContext* cx, const CallArgs& args);
+ static bool type(JSContext* cx, unsigned argc, Value* vp);
+ static bool growImpl(JSContext* cx, const CallArgs& args);
+ static bool grow(JSContext* cx, unsigned argc, Value* vp);
+ static uint32_t growShared(HandleWasmMemoryObject memory, uint32_t delta);
+
+ using InstanceSet =
+ JS::WeakCache<GCHashSet<WeakHeapPtrWasmInstanceObject,
+ MovableCellHasher<WeakHeapPtrWasmInstanceObject>,
+ ZoneAllocPolicy>>;
+ bool hasObservers() const;
+ InstanceSet& observers() const;
+ InstanceSet* getOrCreateObservers(JSContext* cx);
+
+ public:
+ static const unsigned RESERVED_SLOTS = 2;
+ static const JSClass class_;
+ static const JSClass& protoClass_;
+ static const JSPropertySpec properties[];
+ static const JSFunctionSpec methods[];
+ static const JSFunctionSpec static_methods[];
+ static bool construct(JSContext*, unsigned, Value*);
+
+ static WasmMemoryObject* create(JSContext* cx,
+ Handle<ArrayBufferObjectMaybeShared*> buffer,
+ HandleObject proto);
+
+ // `buffer()` returns the current buffer object always. If the buffer
+ // represents shared memory then `buffer().byteLength()` never changes, and
+ // in particular it may be a smaller value than that returned from
+ // `volatileMemoryLength32()` below.
+ //
+ // Generally, you do not want to call `buffer().byteLength()`, but to call
+ // `volatileMemoryLength32()`, instead.
+ ArrayBufferObjectMaybeShared& buffer() const;
+
+ // The current length of the memory. In the case of shared memory, the
+ // length can change at any time. Also note that this will acquire a lock
+ // for shared memory, so do not call this from a signal handler.
+ uint32_t volatileMemoryLength32() const;
+
+ bool isShared() const;
+ bool isHuge() const;
+ bool movingGrowable() const;
+ uint32_t boundsCheckLimit32() const;
+
+ // If isShared() is true then obtain the underlying buffer object.
+ SharedArrayRawBuffer* sharedArrayRawBuffer() const;
+
+ bool addMovingGrowObserver(JSContext* cx, WasmInstanceObject* instance);
+ static uint32_t grow(HandleWasmMemoryObject memory, uint32_t delta,
+ JSContext* cx);
+};
+
+// The class of WebAssembly.Table. A WasmTableObject holds a refcount on a
+// wasm::Table, allowing a Table to be shared between multiple Instances
+// (eventually between multiple threads).
+
+class WasmTableObject : public NativeObject {
+ static const unsigned TABLE_SLOT = 0;
+ static const JSClassOps classOps_;
+ static const ClassSpec classSpec_;
+ bool isNewborn() const;
+ static void finalize(JSFreeOp* fop, JSObject* obj);
+ static void trace(JSTracer* trc, JSObject* obj);
+ static bool lengthGetterImpl(JSContext* cx, const CallArgs& args);
+ static bool lengthGetter(JSContext* cx, unsigned argc, Value* vp);
+ static bool typeImpl(JSContext* cx, const CallArgs& args);
+ static bool type(JSContext* cx, unsigned argc, Value* vp);
+ static bool getImpl(JSContext* cx, const CallArgs& args);
+ static bool get(JSContext* cx, unsigned argc, Value* vp);
+ static bool setImpl(JSContext* cx, const CallArgs& args);
+ static bool set(JSContext* cx, unsigned argc, Value* vp);
+ static bool growImpl(JSContext* cx, const CallArgs& args);
+ static bool grow(JSContext* cx, unsigned argc, Value* vp);
+
+ public:
+ static const unsigned RESERVED_SLOTS = 1;
+ static const JSClass class_;
+ static const JSClass& protoClass_;
+ static const JSPropertySpec properties[];
+ static const JSFunctionSpec methods[];
+ static const JSFunctionSpec static_methods[];
+ static bool construct(JSContext*, unsigned, Value*);
+
+ // Note that, after creation, a WasmTableObject's table() is not initialized
+ // and must be initialized before use.
+
+ static WasmTableObject* create(JSContext* cx, uint32_t initialLength,
+ mozilla::Maybe<uint32_t> maximumLength,
+ wasm::RefType tableType, HandleObject proto);
+ wasm::Table& table() const;
+
+ // Perform the standard `ToWebAssemblyValue` coercion on `value` and fill the
+ // range [index, index + length) in the table. Callers are required to ensure
+ // the range is within bounds. Returns false if the coercion failed.
+ bool fillRange(JSContext* cx, uint32_t index, uint32_t length,
+ HandleValue value) const;
+#ifdef DEBUG
+ void assertRangeNull(uint32_t index, uint32_t length) const;
+#endif
+};
+
+// The class of WebAssembly.Exception. This class is used to track exception
+// types for exports and imports.
+
+class WasmExceptionObject : public NativeObject {
+ static const unsigned TAG_SLOT = 0;
+ static const unsigned TYPE_SLOT = 1;
+
+ static const JSClassOps classOps_;
+ static const ClassSpec classSpec_;
+ static void finalize(JSFreeOp*, JSObject* obj);
+ static void trace(JSTracer* trc, JSObject* obj);
+
+ public:
+ static const unsigned RESERVED_SLOTS = 2;
+ static const JSClass class_;
+ static const JSClass& protoClass_;
+ static const JSPropertySpec properties[];
+ static const JSFunctionSpec methods[];
+ static const JSFunctionSpec static_methods[];
+ static bool construct(JSContext*, unsigned, Value*);
+
+ static WasmExceptionObject* create(JSContext* cx,
+ const wasm::ValTypeVector& type,
+ HandleObject proto);
+ bool isNewborn() const;
+
+ wasm::ValTypeVector& valueTypes() const;
+ wasm::ResultType resultType() const;
+ wasm::ExceptionTag& tag() const;
+};
+
+// The class of the WebAssembly global namespace object.
+
+class WasmNamespaceObject : public NativeObject {
+ public:
+ static const JSClass class_;
+
+ private:
+ static const ClassSpec classSpec_;
+};
+
+} // namespace js
+
+#endif // wasm_js_h
diff --git a/js/src/wasm/WasmModule.cpp b/js/src/wasm/WasmModule.cpp
new file mode 100644
index 0000000000..033810948a
--- /dev/null
+++ b/js/src/wasm/WasmModule.cpp
@@ -0,0 +1,1360 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmModule.h"
+
+#include <chrono>
+#include <thread>
+
+#include "jit/JitOptions.h"
+#include "js/BuildId.h" // JS::BuildIdCharVector
+#include "js/experimental/TypedData.h" // JS_NewUint8Array
+#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
+#include "threading/LockGuard.h"
+#include "vm/HelperThreadState.h" // Tier2GeneratorTask
+#include "vm/PlainObject.h" // js::PlainObject
+#include "wasm/TypedObject.h"
+#include "wasm/WasmBaselineCompile.h"
+#include "wasm/WasmCompile.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmIonCompile.h"
+#include "wasm/WasmJS.h"
+#include "wasm/WasmSerialize.h"
+#include "wasm/WasmUtility.h"
+
+#include "debugger/DebugAPI-inl.h"
+#include "vm/ArrayBufferObject-inl.h"
+#include "vm/JSAtom-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+class Module::Tier2GeneratorTaskImpl : public Tier2GeneratorTask {
+ SharedCompileArgs compileArgs_;
+ SharedBytes bytecode_;
+ SharedModule module_;
+ Atomic<bool> cancelled_;
+ JSTelemetrySender telemetrySender_;
+
+ public:
+ Tier2GeneratorTaskImpl(const CompileArgs& compileArgs,
+ const ShareableBytes& bytecode, Module& module,
+ JSTelemetrySender telemetrySender)
+ : compileArgs_(&compileArgs),
+ bytecode_(&bytecode),
+ module_(&module),
+ cancelled_(false),
+ telemetrySender_(telemetrySender) {}
+
+ ~Tier2GeneratorTaskImpl() override {
+ module_->tier2Listener_ = nullptr;
+ module_->testingTier2Active_ = false;
+ }
+
+ void cancel() override { cancelled_ = true; }
+
+ void runHelperThreadTask(AutoLockHelperThreadState& locked) override {
+ {
+ AutoUnlockHelperThreadState unlock(locked);
+ CompileTier2(*compileArgs_, bytecode_->bytes, *module_, &cancelled_,
+ telemetrySender_);
+ }
+
+ // During shutdown the main thread will wait for any ongoing (cancelled)
+ // tier-2 generation to shut down normally. To do so, it waits on the
+ // CONSUMER condition for the count of finished generators to rise.
+ HelperThreadState().incWasmTier2GeneratorsFinished(locked);
+
+ // The task is finished, release it.
+ js_delete(this);
+ }
+
+ ThreadType threadType() override {
+ return ThreadType::THREAD_TYPE_WASM_TIER2;
+ }
+};
+
+Module::~Module() {
+ // Note: Modules can be destroyed on any thread.
+ MOZ_ASSERT(!tier2Listener_);
+ MOZ_ASSERT(!testingTier2Active_);
+}
+
+void Module::startTier2(const CompileArgs& args, const ShareableBytes& bytecode,
+ JS::OptimizedEncodingListener* listener,
+ JSTelemetrySender telemetrySender) {
+ MOZ_ASSERT(!testingTier2Active_);
+
+ auto task = MakeUnique<Tier2GeneratorTaskImpl>(args, bytecode, *this,
+ telemetrySender);
+ if (!task) {
+ return;
+ }
+
+ // These will be cleared asynchronously by ~Tier2GeneratorTaskImpl() if not
+ // sooner by finishTier2().
+ tier2Listener_ = listener;
+ testingTier2Active_ = true;
+
+ StartOffThreadWasmTier2Generator(std::move(task));
+}
+
+bool Module::finishTier2(const LinkData& linkData2,
+ UniqueCodeTier code2) const {
+ MOZ_ASSERT(code().bestTier() == Tier::Baseline &&
+ code2->tier() == Tier::Optimized);
+
+ // Install the data in the data structures. They will not be visible
+ // until commitTier2().
+
+ if (!code().setTier2(std::move(code2), linkData2)) {
+ return false;
+ }
+
+ // Before we can make tier-2 live, we need to compile tier2 versions of any
+ // extant tier1 lazy stubs (otherwise, tiering would break the assumption
+ // that any extant exported wasm function has had a lazy entry stub already
+ // compiled for it).
+ {
+ // We need to prevent new tier1 stubs generation until we've committed
+ // the newer tier2 stubs, otherwise we might not generate one tier2
+ // stub that has been generated for tier1 before we committed.
+
+ const MetadataTier& metadataTier1 = metadata(Tier::Baseline);
+
+ auto stubs1 = code().codeTier(Tier::Baseline).lazyStubs().lock();
+ auto stubs2 = code().codeTier(Tier::Optimized).lazyStubs().lock();
+
+ MOZ_ASSERT(stubs2->empty());
+
+ Uint32Vector funcExportIndices;
+ for (size_t i = 0; i < metadataTier1.funcExports.length(); i++) {
+ const FuncExport& fe = metadataTier1.funcExports[i];
+ if (fe.hasEagerStubs()) {
+ continue;
+ }
+ if (!stubs1->hasStub(fe.funcIndex())) {
+ continue;
+ }
+ if (!funcExportIndices.emplaceBack(i)) {
+ return false;
+ }
+ }
+
+ const CodeTier& tier2 = code().codeTier(Tier::Optimized);
+
+ Maybe<size_t> stub2Index;
+ if (!stubs2->createTier2(funcExportIndices, tier2, &stub2Index)) {
+ return false;
+ }
+
+ // Now that we can't fail or otherwise abort tier2, make it live.
+
+ MOZ_ASSERT(!code().hasTier2());
+ code().commitTier2();
+
+ stubs2->setJitEntries(stub2Index, code());
+ }
+
+ // And we update the jump vector.
+
+ uint8_t* base = code().segment(Tier::Optimized).base();
+ for (const CodeRange& cr : metadata(Tier::Optimized).codeRanges) {
+ // These are racy writes that we just want to be visible, atomically,
+ // eventually. All hardware we care about will do this right. But
+ // we depend on the compiler not splitting the stores hidden inside the
+ // set*Entry functions.
+ if (cr.isFunction()) {
+ code().setTieringEntry(cr.funcIndex(), base + cr.funcTierEntry());
+ } else if (cr.isJitEntry()) {
+ code().setJitEntry(cr.funcIndex(), base + cr.begin());
+ }
+ }
+
+ // Tier-2 is done; let everyone know. Mark tier-2 active for testing
+ // purposes so that wasmHasTier2CompilationCompleted() only returns true
+ // after tier-2 has been fully cached.
+
+ if (tier2Listener_) {
+ serialize(linkData2, *tier2Listener_);
+ tier2Listener_ = nullptr;
+ }
+ testingTier2Active_ = false;
+
+ return true;
+}
+
+void Module::testingBlockOnTier2Complete() const {
+ while (testingTier2Active_) {
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+ }
+}
+
+/* virtual */
+size_t Module::serializedSize(const LinkData& linkData) const {
+ JS::BuildIdCharVector buildId;
+ {
+ AutoEnterOOMUnsafeRegion oom;
+ if (!GetOptimizedEncodingBuildId(&buildId)) {
+ oom.crash("getting build id");
+ }
+ }
+
+ return SerializedPodVectorSize(buildId) + linkData.serializedSize() +
+ SerializedVectorSize(imports_) + SerializedVectorSize(exports_) +
+ SerializedVectorSize(dataSegments_) +
+ SerializedVectorSize(elemSegments_) +
+ SerializedVectorSize(customSections_) + code_->serializedSize();
+}
+
+/* virtual */
+void Module::serialize(const LinkData& linkData, uint8_t* begin,
+ size_t size) const {
+ MOZ_RELEASE_ASSERT(!metadata().debugEnabled);
+ MOZ_RELEASE_ASSERT(code_->hasTier(Tier::Serialized));
+
+ JS::BuildIdCharVector buildId;
+ {
+ AutoEnterOOMUnsafeRegion oom;
+ if (!GetOptimizedEncodingBuildId(&buildId)) {
+ oom.crash("getting build id");
+ }
+ }
+
+ uint8_t* cursor = begin;
+ cursor = SerializePodVector(cursor, buildId);
+ cursor = linkData.serialize(cursor);
+ cursor = SerializeVector(cursor, imports_);
+ cursor = SerializeVector(cursor, exports_);
+ cursor = SerializeVector(cursor, dataSegments_);
+ cursor = SerializeVector(cursor, elemSegments_);
+ cursor = SerializeVector(cursor, customSections_);
+ cursor = code_->serialize(cursor, linkData);
+ MOZ_RELEASE_ASSERT(cursor == begin + size);
+}
+
+/* static */
+MutableModule Module::deserialize(const uint8_t* begin, size_t size,
+ Metadata* maybeMetadata) {
+ MutableMetadata metadata(maybeMetadata);
+ if (!metadata) {
+ metadata = js_new<Metadata>();
+ if (!metadata) {
+ return nullptr;
+ }
+ }
+
+ const uint8_t* cursor = begin;
+
+ JS::BuildIdCharVector currentBuildId;
+ if (!GetOptimizedEncodingBuildId(&currentBuildId)) {
+ return nullptr;
+ }
+
+ JS::BuildIdCharVector deserializedBuildId;
+ cursor = DeserializePodVector(cursor, &deserializedBuildId);
+ if (!cursor) {
+ return nullptr;
+ }
+
+ MOZ_RELEASE_ASSERT(EqualContainers(currentBuildId, deserializedBuildId));
+
+ LinkData linkData(Tier::Serialized);
+ cursor = linkData.deserialize(cursor);
+ if (!cursor) {
+ return nullptr;
+ }
+
+ ImportVector imports;
+ cursor = DeserializeVector(cursor, &imports);
+ if (!cursor) {
+ return nullptr;
+ }
+
+ ExportVector exports;
+ cursor = DeserializeVector(cursor, &exports);
+ if (!cursor) {
+ return nullptr;
+ }
+
+ DataSegmentVector dataSegments;
+ cursor = DeserializeVector(cursor, &dataSegments);
+ if (!cursor) {
+ return nullptr;
+ }
+
+ ElemSegmentVector elemSegments;
+ cursor = DeserializeVector(cursor, &elemSegments);
+ if (!cursor) {
+ return nullptr;
+ }
+
+ CustomSectionVector customSections;
+ cursor = DeserializeVector(cursor, &customSections);
+ if (!cursor) {
+ return nullptr;
+ }
+
+ SharedCode code;
+ cursor = Code::deserialize(cursor, linkData, *metadata, &code);
+ if (!cursor) {
+ return nullptr;
+ }
+
+ MOZ_RELEASE_ASSERT(cursor == begin + size);
+ MOZ_RELEASE_ASSERT(!!maybeMetadata == code->metadata().isAsmJS());
+
+ if (metadata->nameCustomSectionIndex) {
+ metadata->namePayload =
+ customSections[*metadata->nameCustomSectionIndex].payload;
+ } else {
+ MOZ_RELEASE_ASSERT(!metadata->moduleName);
+ MOZ_RELEASE_ASSERT(metadata->funcNames.empty());
+ }
+
+ return js_new<Module>(*code, std::move(imports), std::move(exports),
+ std::move(dataSegments), std::move(elemSegments),
+ std::move(customSections), nullptr, nullptr, nullptr,
+ /* loggingDeserialized = */ true);
+}
+
+void Module::serialize(const LinkData& linkData,
+ JS::OptimizedEncodingListener& listener) const {
+ auto bytes = MakeUnique<JS::OptimizedEncodingBytes>();
+ if (!bytes || !bytes->resize(serializedSize(linkData))) {
+ return;
+ }
+
+ serialize(linkData, bytes->begin(), bytes->length());
+
+ listener.storeOptimizedEncoding(std::move(bytes));
+}
+
+/* virtual */
+JSObject* Module::createObject(JSContext* cx) const {
+ if (!GlobalObject::ensureConstructor(cx, cx->global(), JSProto_WebAssembly)) {
+ return nullptr;
+ }
+
+ RootedObject proto(
+ cx, &cx->global()->getPrototype(JSProto_WasmModule).toObject());
+ return WasmModuleObject::create(cx, *this, proto);
+}
+
+/* virtual */
+JSObject* Module::createObjectForAsmJS(JSContext* cx) const {
+ // Use nullptr to get the default object prototype. These objects are never
+ // exposed to script for asm.js.
+ return WasmModuleObject::create(cx, *this, nullptr);
+}
+
+bool wasm::GetOptimizedEncodingBuildId(JS::BuildIdCharVector* buildId) {
+ // From a JS API perspective, the "build id" covers everything that can
+ // cause machine code to become invalid, so include both the actual build-id
+ // and cpu-id.
+
+ if (!GetBuildId || !GetBuildId(buildId)) {
+ return false;
+ }
+
+ uint32_t cpu = ObservedCPUFeatures();
+
+ if (!buildId->reserve(buildId->length() +
+ 12 /* "()" + 8 nibbles + "m[+-]" */)) {
+ return false;
+ }
+
+ buildId->infallibleAppend('(');
+ while (cpu) {
+ buildId->infallibleAppend('0' + (cpu & 0xf));
+ cpu >>= 4;
+ }
+ buildId->infallibleAppend(')');
+
+ buildId->infallibleAppend('m');
+ buildId->infallibleAppend(wasm::IsHugeMemoryEnabled() ? '+' : '-');
+
+ return true;
+}
+
+/* virtual */
+void Module::addSizeOfMisc(MallocSizeOf mallocSizeOf,
+ Metadata::SeenSet* seenMetadata,
+ Code::SeenSet* seenCode, size_t* code,
+ size_t* data) const {
+ code_->addSizeOfMiscIfNotSeen(mallocSizeOf, seenMetadata, seenCode, code,
+ data);
+ *data += mallocSizeOf(this) +
+ SizeOfVectorExcludingThis(imports_, mallocSizeOf) +
+ SizeOfVectorExcludingThis(exports_, mallocSizeOf) +
+ SizeOfVectorExcludingThis(dataSegments_, mallocSizeOf) +
+ SizeOfVectorExcludingThis(elemSegments_, mallocSizeOf) +
+ SizeOfVectorExcludingThis(customSections_, mallocSizeOf);
+
+ if (debugUnlinkedCode_) {
+ *data += debugUnlinkedCode_->sizeOfExcludingThis(mallocSizeOf);
+ }
+}
+
+void Module::initGCMallocBytesExcludingCode() {
+ // The size doesn't have to be exact so use the serialization framework to
+ // calculate a value.
+ gcMallocBytesExcludingCode_ = sizeof(*this) + SerializedVectorSize(imports_) +
+ SerializedVectorSize(exports_) +
+ SerializedVectorSize(dataSegments_) +
+ SerializedVectorSize(elemSegments_) +
+ SerializedVectorSize(customSections_);
+}
+
+// Extracting machine code as JS object. The result has the "code" property, as
+// a Uint8Array, and the "segments" property as array objects. The objects
+// contain offsets in the "code" array and basic information about a code
+// segment/function body.
+bool Module::extractCode(JSContext* cx, Tier tier,
+ MutableHandleValue vp) const {
+ RootedPlainObject result(cx, NewBuiltinClassInstance<PlainObject>(cx));
+ if (!result) {
+ return false;
+ }
+
+ // This function is only used for testing purposes so we can simply
+ // block on tiered compilation to complete.
+ testingBlockOnTier2Complete();
+
+ if (!code_->hasTier(tier)) {
+ vp.setNull();
+ return true;
+ }
+
+ const ModuleSegment& moduleSegment = code_->segment(tier);
+ RootedObject code(cx, JS_NewUint8Array(cx, moduleSegment.length()));
+ if (!code) {
+ return false;
+ }
+
+ memcpy(code->as<TypedArrayObject>().dataPointerUnshared(),
+ moduleSegment.base(), moduleSegment.length());
+
+ RootedValue value(cx, ObjectValue(*code));
+ if (!JS_DefineProperty(cx, result, "code", value, JSPROP_ENUMERATE)) {
+ return false;
+ }
+
+ RootedObject segments(cx, NewDenseEmptyArray(cx));
+ if (!segments) {
+ return false;
+ }
+
+ for (const CodeRange& p : metadata(tier).codeRanges) {
+ RootedObject segment(cx, NewObjectWithGivenProto<PlainObject>(cx, nullptr));
+ if (!segment) {
+ return false;
+ }
+
+ value.setNumber((uint32_t)p.begin());
+ if (!JS_DefineProperty(cx, segment, "begin", value, JSPROP_ENUMERATE)) {
+ return false;
+ }
+
+ value.setNumber((uint32_t)p.end());
+ if (!JS_DefineProperty(cx, segment, "end", value, JSPROP_ENUMERATE)) {
+ return false;
+ }
+
+ value.setNumber((uint32_t)p.kind());
+ if (!JS_DefineProperty(cx, segment, "kind", value, JSPROP_ENUMERATE)) {
+ return false;
+ }
+
+ if (p.isFunction()) {
+ value.setNumber((uint32_t)p.funcIndex());
+ if (!JS_DefineProperty(cx, segment, "funcIndex", value,
+ JSPROP_ENUMERATE)) {
+ return false;
+ }
+
+ value.setNumber((uint32_t)p.funcUncheckedCallEntry());
+ if (!JS_DefineProperty(cx, segment, "funcBodyBegin", value,
+ JSPROP_ENUMERATE)) {
+ return false;
+ }
+
+ value.setNumber((uint32_t)p.end());
+ if (!JS_DefineProperty(cx, segment, "funcBodyEnd", value,
+ JSPROP_ENUMERATE)) {
+ return false;
+ }
+ }
+
+ if (!NewbornArrayPush(cx, segments, ObjectValue(*segment))) {
+ return false;
+ }
+ }
+
+ value.setObject(*segments);
+ if (!JS_DefineProperty(cx, result, "segments", value, JSPROP_ENUMERATE)) {
+ return false;
+ }
+
+ vp.setObject(*result);
+ return true;
+}
+
+static uint32_t EvaluateOffsetInitExpr(const ValVector& globalImportValues,
+ InitExpr initExpr) {
+ switch (initExpr.kind()) {
+ case InitExpr::Kind::Constant:
+ return initExpr.val().i32();
+ case InitExpr::Kind::GetGlobal:
+ return globalImportValues[initExpr.globalIndex()].i32();
+ case InitExpr::Kind::RefFunc:
+ break;
+ }
+
+ MOZ_CRASH("bad initializer expression");
+}
+
+#ifdef DEBUG
+static bool AllSegmentsArePassive(const DataSegmentVector& vec) {
+ for (const DataSegment* seg : vec) {
+ if (seg->active()) {
+ return false;
+ }
+ }
+ return true;
+}
+#endif
+
+bool Module::initSegments(JSContext* cx, HandleWasmInstanceObject instanceObj,
+ HandleWasmMemoryObject memoryObj,
+ const ValVector& globalImportValues) const {
+ MOZ_ASSERT_IF(!memoryObj, AllSegmentsArePassive(dataSegments_));
+
+ Instance& instance = instanceObj->instance();
+ const SharedTableVector& tables = instance.tables();
+
+ // Write data/elem segments into memories/tables.
+
+ for (const ElemSegment* seg : elemSegments_) {
+ if (seg->active()) {
+ uint32_t offset =
+ EvaluateOffsetInitExpr(globalImportValues, seg->offset());
+ uint32_t count = seg->length();
+
+ uint32_t tableLength = tables[seg->tableIndex]->length();
+ if (offset > tableLength || tableLength - offset < count) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_OUT_OF_BOUNDS);
+ return false;
+ }
+
+ if (!instance.initElems(seg->tableIndex, *seg, offset, 0, count)) {
+ return false; // OOM
+ }
+ }
+ }
+
+ if (memoryObj) {
+ uint32_t memoryLength = memoryObj->volatileMemoryLength32();
+ uint8_t* memoryBase =
+ memoryObj->buffer().dataPointerEither().unwrap(/* memcpy */);
+
+ for (const DataSegment* seg : dataSegments_) {
+ if (!seg->active()) {
+ continue;
+ }
+
+ uint32_t offset =
+ EvaluateOffsetInitExpr(globalImportValues, seg->offset());
+ uint32_t count = seg->bytes.length();
+
+ if (offset > memoryLength || memoryLength - offset < count) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_OUT_OF_BOUNDS);
+ return false;
+ }
+ memcpy(memoryBase + offset, seg->bytes.begin(), count);
+ }
+ }
+
+ return true;
+}
+
+static const Import& FindImportFunction(const ImportVector& imports,
+ uint32_t funcImportIndex) {
+ for (const Import& import : imports) {
+ if (import.kind != DefinitionKind::Function) {
+ continue;
+ }
+ if (funcImportIndex == 0) {
+ return import;
+ }
+ funcImportIndex--;
+ }
+ MOZ_CRASH("ran out of imports");
+}
+
+bool Module::instantiateFunctions(JSContext* cx,
+ const JSFunctionVector& funcImports) const {
+#ifdef DEBUG
+ for (auto t : code().tiers()) {
+ MOZ_ASSERT(funcImports.length() == metadata(t).funcImports.length());
+ }
+#endif
+
+ if (metadata().isAsmJS()) {
+ return true;
+ }
+
+ Tier tier = code().stableTier();
+
+ for (size_t i = 0; i < metadata(tier).funcImports.length(); i++) {
+ JSFunction* f = funcImports[i];
+ if (!IsWasmExportedFunction(f)) {
+ continue;
+ }
+
+ uint32_t funcIndex = ExportedFunctionToFuncIndex(f);
+ Instance& instance = ExportedFunctionToInstance(f);
+ Tier otherTier = instance.code().stableTier();
+
+ const FuncExport& funcExport =
+ instance.metadata(otherTier).lookupFuncExport(funcIndex);
+
+ if (funcExport.funcType() != metadata(tier).funcImports[i].funcType()) {
+ const Import& import = FindImportFunction(imports_, i);
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_IMPORT_SIG, import.module.get(),
+ import.field.get());
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template <typename T>
+static bool CheckLimits(JSContext* cx, T declaredMin,
+ const Maybe<T>& declaredMax, T actualLength,
+ const Maybe<T>& actualMax, bool isAsmJS,
+ const char* kind) {
+ if (isAsmJS) {
+ MOZ_ASSERT(actualLength >= declaredMin);
+ MOZ_ASSERT(!declaredMax);
+ MOZ_ASSERT(actualLength == actualMax.value());
+ return true;
+ }
+
+ if (actualLength < declaredMin ||
+ actualLength > declaredMax.valueOr(UINT32_MAX)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_IMP_SIZE, kind);
+ return false;
+ }
+
+ if ((actualMax && declaredMax && *actualMax > *declaredMax) ||
+ (!actualMax && declaredMax)) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_IMP_MAX, kind);
+ return false;
+ }
+
+ return true;
+}
+
+static bool CheckSharing(JSContext* cx, bool declaredShared, bool isShared) {
+ if (isShared &&
+ !cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_NO_SHMEM_LINK);
+ return false;
+ }
+
+ if (declaredShared && !isShared) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_IMP_SHARED_REQD);
+ return false;
+ }
+
+ if (!declaredShared && isShared) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_IMP_SHARED_BANNED);
+ return false;
+ }
+
+ return true;
+}
+
+// asm.js module instantiation supplies its own buffer, but for wasm, create and
+// initialize the buffer if one is requested. Either way, the buffer is wrapped
+// in a WebAssembly.Memory object which is what the Instance stores.
+bool Module::instantiateMemory(JSContext* cx,
+ MutableHandleWasmMemoryObject memory) const {
+ if (!metadata().usesMemory()) {
+ MOZ_ASSERT(!memory);
+ MOZ_ASSERT(AllSegmentsArePassive(dataSegments_));
+ return true;
+ }
+
+ uint64_t declaredMin = metadata().minMemoryLength;
+ Maybe<uint64_t> declaredMax = metadata().maxMemoryLength;
+ bool declaredShared = metadata().memoryUsage == MemoryUsage::Shared;
+
+ if (memory) {
+ MOZ_ASSERT_IF(metadata().isAsmJS(), memory->buffer().isPreparedForAsmJS());
+ MOZ_ASSERT_IF(!metadata().isAsmJS(), memory->buffer().isWasm());
+
+ if (!CheckLimits(cx, declaredMin, declaredMax,
+ uint64_t(memory->volatileMemoryLength32()),
+ memory->buffer().wasmMaxSize(), metadata().isAsmJS(),
+ "Memory")) {
+ return false;
+ }
+
+ if (!CheckSharing(cx, declaredShared, memory->isShared())) {
+ return false;
+ }
+ } else {
+ MOZ_ASSERT(!metadata().isAsmJS());
+
+ if (declaredMin / PageSize > MaxMemory32Pages) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_MEM_IMP_LIMIT);
+ return false;
+ }
+
+ RootedArrayBufferObjectMaybeShared buffer(cx);
+ Limits l(declaredMin, declaredMax,
+ declaredShared ? Shareable::True : Shareable::False);
+ if (!CreateWasmBuffer(cx, MemoryKind::Memory32, l, &buffer)) {
+ return false;
+ }
+
+ RootedObject proto(
+ cx, &cx->global()->getPrototype(JSProto_WasmMemory).toObject());
+ memory.set(WasmMemoryObject::create(cx, buffer, proto));
+ if (!memory) {
+ return false;
+ }
+ }
+
+ MOZ_RELEASE_ASSERT(memory->isHuge() == metadata().omitsBoundsChecks);
+
+ return true;
+}
+
+#ifdef ENABLE_WASM_EXCEPTIONS
+bool Module::instantiateImportedException(
+ JSContext* cx, Handle<WasmExceptionObject*> exnObj,
+ WasmExceptionObjectVector& exnObjs, SharedExceptionTagVector* tags) const {
+ MOZ_ASSERT(exnObj);
+ // The check whether the EventDesc signature matches the exnObj value types
+ // is done by js::wasm::GetImports().
+
+ // Collects the exception tag from the imported exception.
+ ExceptionTag& tag = exnObj->tag();
+
+ if (!tags->append(&tag)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+bool Module::instantiateLocalException(JSContext* cx, const EventDesc& ed,
+ WasmExceptionObjectVector& exnObjs,
+ SharedExceptionTagVector* tags,
+ uint32_t exnIndex) const {
+ SharedExceptionTag tag;
+ // Extend exnObjs in anticipation of an exported exception object.
+ if (exnObjs.length() <= exnIndex && !exnObjs.resize(exnIndex + 1)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ if (ed.isExport) {
+ // If the exception description is exported, create an export exception
+ // object for it.
+ RootedObject proto(
+ cx, &cx->global()->getPrototype(JSProto_WasmException).toObject());
+ RootedWasmExceptionObject exnObj(
+ cx, WasmExceptionObject::create(cx, ed.type, proto));
+ if (!exnObj) {
+ return false;
+ }
+ // Take the exception tag that was created inside the WasmExceptionObject.
+ tag = &exnObj->tag();
+ // Save the new export exception object.
+ exnObjs[exnIndex] = exnObj;
+ } else {
+ // Create a new tag for every non exported exception.
+ tag = SharedExceptionTag(cx->new_<ExceptionTag>());
+ if (!tag) {
+ return false;
+ }
+ // The exnObj is null if the exception is neither exported nor imported.
+ }
+ // Collect a tag for every exception.
+ if (!tags->emplaceBack(tag)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+bool Module::instantiateExceptions(JSContext* cx,
+ WasmExceptionObjectVector& exnObjs,
+ SharedExceptionTagVector* tags) const {
+ uint32_t exnIndex = 0;
+ for (const EventDesc& ed : metadata().events) {
+ if (exnIndex < exnObjs.length()) {
+ Rooted<WasmExceptionObject*> exnObj(cx, exnObjs[exnIndex]);
+ if (!instantiateImportedException(cx, exnObj, exnObjs, tags)) {
+ return false;
+ }
+ } else {
+ if (!instantiateLocalException(cx, ed, exnObjs, tags, exnIndex)) {
+ return false;
+ }
+ }
+ exnIndex++;
+ }
+ return true;
+}
+#endif
+
+bool Module::instantiateImportedTable(JSContext* cx, const TableDesc& td,
+ Handle<WasmTableObject*> tableObj,
+ WasmTableObjectVector* tableObjs,
+ SharedTableVector* tables) const {
+ MOZ_ASSERT(tableObj);
+ MOZ_ASSERT(!metadata().isAsmJS());
+
+ Table& table = tableObj->table();
+ if (!CheckLimits(cx, td.initialLength, td.maximumLength, table.length(),
+ table.maximum(), metadata().isAsmJS(), "Table")) {
+ return false;
+ }
+
+ if (!tables->append(&table)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ if (!tableObjs->append(tableObj)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+bool Module::instantiateLocalTable(JSContext* cx, const TableDesc& td,
+ WasmTableObjectVector* tableObjs,
+ SharedTableVector* tables) const {
+ if (td.initialLength > MaxTableLength) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_TABLE_IMP_LIMIT);
+ return false;
+ }
+
+ SharedTable table;
+ Rooted<WasmTableObject*> tableObj(cx);
+ if (td.importedOrExported) {
+ RootedObject proto(
+ cx, &cx->global()->getPrototype(JSProto_WasmTable).toObject());
+ tableObj.set(WasmTableObject::create(cx, td.initialLength, td.maximumLength,
+ td.elemType, proto));
+ if (!tableObj) {
+ return false;
+ }
+ table = &tableObj->table();
+ } else {
+ table = Table::create(cx, td, /* HandleWasmTableObject = */ nullptr);
+ if (!table) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+
+ // Note, appending a null pointer for non-exported local tables.
+ if (!tableObjs->append(tableObj.get())) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ if (!tables->emplaceBack(table)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+bool Module::instantiateTables(JSContext* cx,
+ const WasmTableObjectVector& tableImports,
+ MutableHandle<WasmTableObjectVector> tableObjs,
+ SharedTableVector* tables) const {
+ uint32_t tableIndex = 0;
+ for (const TableDesc& td : metadata().tables) {
+ if (tableIndex < tableImports.length()) {
+ Rooted<WasmTableObject*> tableObj(cx, tableImports[tableIndex]);
+ if (!instantiateImportedTable(cx, td, tableObj, &tableObjs.get(),
+ tables)) {
+ return false;
+ }
+ } else {
+ if (!instantiateLocalTable(cx, td, &tableObjs.get(), tables)) {
+ return false;
+ }
+ }
+ tableIndex++;
+ }
+ return true;
+}
+
+static bool EnsureExportedGlobalObject(JSContext* cx,
+ const ValVector& globalImportValues,
+ size_t globalIndex,
+ const GlobalDesc& global,
+ WasmGlobalObjectVector& globalObjs) {
+ if (globalIndex < globalObjs.length() && globalObjs[globalIndex]) {
+ return true;
+ }
+
+ RootedVal val(cx);
+ if (global.kind() == GlobalKind::Import) {
+ // If this is an import, then this must be a constant global that was
+ // provided without a global object. We must initialize it with the
+ // provided value while we still can differentiate this case.
+ MOZ_ASSERT(!global.isMutable());
+ val.set(Val(globalImportValues[globalIndex]));
+ } else {
+ // If this is not an import, then the initial value will be set by
+ // Instance::init() for indirect globals or else by CreateExportObject().
+ // In either case, we initialize with a default value here.
+ val.set(Val(global.type()));
+ }
+
+ RootedObject proto(
+ cx, &cx->global()->getPrototype(JSProto_WasmGlobal).toObject());
+ RootedWasmGlobalObject go(
+ cx, WasmGlobalObject::create(cx, val, global.isMutable(), proto));
+ if (!go) {
+ return false;
+ }
+
+ if (globalObjs.length() <= globalIndex &&
+ !globalObjs.resize(globalIndex + 1)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ globalObjs[globalIndex] = go;
+ return true;
+}
+
+bool Module::instantiateGlobals(JSContext* cx,
+ const ValVector& globalImportValues,
+ WasmGlobalObjectVector& globalObjs) const {
+ // If there are exported globals that aren't in globalObjs because they
+ // originate in this module or because they were immutable imports that came
+ // in as primitive values then we must create cells in the globalObjs for
+ // them here, as WasmInstanceObject::create() and CreateExportObject() will
+ // need the cells to exist.
+
+ const GlobalDescVector& globals = metadata().globals;
+
+ for (const Export& exp : exports_) {
+ if (exp.kind() != DefinitionKind::Global) {
+ continue;
+ }
+ unsigned globalIndex = exp.globalIndex();
+ const GlobalDesc& global = globals[globalIndex];
+ if (!EnsureExportedGlobalObject(cx, globalImportValues, globalIndex, global,
+ globalObjs)) {
+ return false;
+ }
+ }
+
+ // Imported globals that are not re-exported may also have received only a
+ // primitive value; these globals are always immutable. Assert that we do
+ // not need to create any additional Global objects for such imports.
+
+#ifdef DEBUG
+ size_t numGlobalImports = 0;
+ for (const Import& import : imports_) {
+ if (import.kind != DefinitionKind::Global) {
+ continue;
+ }
+ size_t globalIndex = numGlobalImports++;
+ const GlobalDesc& global = globals[globalIndex];
+ MOZ_ASSERT(global.importIndex() == globalIndex);
+ MOZ_ASSERT_IF(global.isIndirect(),
+ globalIndex < globalObjs.length() || globalObjs[globalIndex]);
+ }
+ MOZ_ASSERT_IF(!metadata().isAsmJS(),
+ numGlobalImports == globals.length() ||
+ !globals[numGlobalImports].isImport());
+#endif
+ return true;
+}
+
+SharedCode Module::getDebugEnabledCode() const {
+ MOZ_ASSERT(metadata().debugEnabled);
+ MOZ_ASSERT(debugUnlinkedCode_);
+ MOZ_ASSERT(debugLinkData_);
+
+ // The first time through, use the pre-linked code in the module but
+ // mark it as having been claimed. Subsequently, instantiate the copy of the
+ // code bytes that we keep around for debugging instead, because the
+ // debugger may patch the pre-linked code at any time.
+ if (debugCodeClaimed_.compareExchange(false, true)) {
+ return code_;
+ }
+
+ Tier tier = Tier::Baseline;
+ auto segment =
+ ModuleSegment::create(tier, *debugUnlinkedCode_, *debugLinkData_);
+ if (!segment) {
+ return nullptr;
+ }
+
+ UniqueMetadataTier metadataTier = js::MakeUnique<MetadataTier>(tier);
+ if (!metadataTier || !metadataTier->clone(metadata(tier))) {
+ return nullptr;
+ }
+
+ auto codeTier =
+ js::MakeUnique<CodeTier>(std::move(metadataTier), std::move(segment));
+ if (!codeTier) {
+ return nullptr;
+ }
+
+ JumpTables jumpTables;
+ if (!jumpTables.init(CompileMode::Once, codeTier->segment(),
+ metadata(tier).codeRanges)) {
+ return nullptr;
+ }
+
+ MutableCode debugCode =
+ js_new<Code>(std::move(codeTier), metadata(), std::move(jumpTables));
+ if (!debugCode || !debugCode->initialize(*debugLinkData_)) {
+ return nullptr;
+ }
+
+ return debugCode;
+}
+
+static bool GetFunctionExport(JSContext* cx,
+ HandleWasmInstanceObject instanceObj,
+ const JSFunctionVector& funcImports,
+ uint32_t funcIndex, MutableHandleFunction func) {
+ if (funcIndex < funcImports.length() &&
+ IsWasmExportedFunction(funcImports[funcIndex])) {
+ func.set(funcImports[funcIndex]);
+ return true;
+ }
+
+ return instanceObj->getExportedFunction(cx, instanceObj, funcIndex, func);
+}
+
+static bool GetGlobalExport(JSContext* cx, HandleWasmInstanceObject instanceObj,
+ const JSFunctionVector& funcImports,
+ const GlobalDesc& global, uint32_t globalIndex,
+ const ValVector& globalImportValues,
+ const WasmGlobalObjectVector& globalObjs,
+ MutableHandleValue val) {
+ // A global object for this index is guaranteed to exist by
+ // instantiateGlobals.
+ RootedWasmGlobalObject globalObj(cx, globalObjs[globalIndex]);
+ val.setObject(*globalObj);
+
+ // We are responsible to set the initial value of the global object here if
+ // it's not imported or indirect. Imported global objects have their initial
+ // value set by their defining module, or are set by
+ // EnsureExportedGlobalObject when a constant value is provided as an import.
+ // Indirect exported globals that are not imported, are initialized in
+ // Instance::init.
+ if (global.isIndirect() || global.isImport()) {
+ return true;
+ }
+
+ // This must be an exported immutable global defined in this module. The
+ // instance either has compiled the value into the code or has its own copy
+ // in its global data area. Either way, we must initialize the global object
+ // with the same initial value.
+ MOZ_ASSERT(!global.isMutable());
+ MOZ_ASSERT(!global.isImport());
+ RootedVal globalVal(cx);
+ switch (global.kind()) {
+ case GlobalKind::Variable: {
+ const InitExpr& init = global.initExpr();
+ switch (init.kind()) {
+ case InitExpr::Kind::Constant:
+ globalVal.set(Val(init.val()));
+ break;
+ case InitExpr::Kind::GetGlobal:
+ globalVal.set(Val(globalImportValues[init.globalIndex()]));
+ break;
+ case InitExpr::Kind::RefFunc:
+ RootedFunction func(cx);
+ if (!GetFunctionExport(cx, instanceObj, funcImports,
+ init.refFuncIndex(), &func)) {
+ return false;
+ }
+ globalVal.set(
+ Val(ValType(RefType::func()), FuncRef::fromJSFunction(func)));
+ }
+ break;
+ }
+ case GlobalKind::Constant: {
+ globalVal.set(Val(global.constantValue()));
+ break;
+ }
+ case GlobalKind::Import: {
+ MOZ_CRASH();
+ }
+ }
+
+ globalObj->val() = globalVal;
+ return true;
+}
+
+static bool CreateExportObject(JSContext* cx,
+ HandleWasmInstanceObject instanceObj,
+ const JSFunctionVector& funcImports,
+ const WasmTableObjectVector& tableObjs,
+ HandleWasmMemoryObject memoryObj,
+ const WasmExceptionObjectVector& exceptionObjs,
+ const ValVector& globalImportValues,
+ const WasmGlobalObjectVector& globalObjs,
+ const ExportVector& exports) {
+ const Instance& instance = instanceObj->instance();
+ const Metadata& metadata = instance.metadata();
+ const GlobalDescVector& globals = metadata.globals;
+
+ if (metadata.isAsmJS() && exports.length() == 1 &&
+ strlen(exports[0].fieldName()) == 0) {
+ RootedFunction func(cx);
+ if (!GetFunctionExport(cx, instanceObj, funcImports, exports[0].funcIndex(),
+ &func)) {
+ return false;
+ }
+ instanceObj->initExportsObj(*func.get());
+ return true;
+ }
+
+ RootedObject exportObj(cx);
+ uint8_t propertyAttr = JSPROP_ENUMERATE;
+
+ if (metadata.isAsmJS()) {
+ exportObj = NewBuiltinClassInstance<PlainObject>(cx);
+ } else {
+ exportObj = NewObjectWithGivenProto<PlainObject>(cx, nullptr);
+ propertyAttr |= JSPROP_READONLY | JSPROP_PERMANENT;
+ }
+ if (!exportObj) {
+ return false;
+ }
+
+ for (const Export& exp : exports) {
+ JSAtom* atom =
+ AtomizeUTF8Chars(cx, exp.fieldName(), strlen(exp.fieldName()));
+ if (!atom) {
+ return false;
+ }
+
+ RootedId id(cx, AtomToId(atom));
+ RootedValue val(cx);
+ switch (exp.kind()) {
+ case DefinitionKind::Function: {
+ RootedFunction func(cx);
+ if (!GetFunctionExport(cx, instanceObj, funcImports, exp.funcIndex(),
+ &func)) {
+ return false;
+ }
+ val = ObjectValue(*func);
+ break;
+ }
+ case DefinitionKind::Table: {
+ val = ObjectValue(*tableObjs[exp.tableIndex()]);
+ break;
+ }
+ case DefinitionKind::Memory: {
+ val = ObjectValue(*memoryObj);
+ break;
+ }
+ case DefinitionKind::Global: {
+ const GlobalDesc& global = globals[exp.globalIndex()];
+ if (!GetGlobalExport(cx, instanceObj, funcImports, global,
+ exp.globalIndex(), globalImportValues, globalObjs,
+ &val)) {
+ return false;
+ }
+ break;
+ }
+#ifdef ENABLE_WASM_EXCEPTIONS
+ case DefinitionKind::Event: {
+ val = ObjectValue(*exceptionObjs[exp.eventIndex()]);
+ break;
+ }
+#endif
+ }
+
+ if (!JS_DefinePropertyById(cx, exportObj, id, val, propertyAttr)) {
+ return false;
+ }
+ }
+
+ if (!metadata.isAsmJS()) {
+ if (!PreventExtensions(cx, exportObj)) {
+ return false;
+ }
+ }
+
+ instanceObj->initExportsObj(*exportObj);
+ return true;
+}
+
+bool Module::instantiate(JSContext* cx, ImportValues& imports,
+ HandleObject instanceProto,
+ MutableHandleWasmInstanceObject instance) const {
+ MOZ_RELEASE_ASSERT(cx->wasm().haveSignalHandlers);
+
+ if (!instantiateFunctions(cx, imports.funcs)) {
+ return false;
+ }
+
+ RootedWasmMemoryObject memory(cx, imports.memory);
+ if (!instantiateMemory(cx, &memory)) {
+ return false;
+ }
+
+ // Note that the following will extend imports.exceptionObjs with wrappers for
+ // the local (non-imported) exceptions of the module.
+ // The resulting vector is sparse, i.e., it will be null in slots that contain
+ // exceptions that are neither exported or imported.
+ // On the contrary, all the slots of exceptionTags will be filled with
+ // unique tags.
+
+ SharedExceptionTagVector tags;
+#ifdef ENABLE_WASM_EXCEPTIONS
+ if (!instantiateExceptions(cx, imports.exceptionObjs, &tags)) {
+ return false;
+ }
+#endif
+
+ // Note that tableObjs is sparse: it will be null in slots that contain
+ // tables that are neither exported nor imported.
+
+ Rooted<WasmTableObjectVector> tableObjs(cx);
+ SharedTableVector tables;
+ if (!instantiateTables(cx, imports.tables, &tableObjs, &tables)) {
+ return false;
+ }
+
+ if (!instantiateGlobals(cx, imports.globalValues, imports.globalObjs)) {
+ return false;
+ }
+
+ UniqueTlsData tlsData = CreateTlsData(metadata().globalDataLength);
+ if (!tlsData) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ SharedCode code;
+ UniqueDebugState maybeDebug;
+ if (metadata().debugEnabled) {
+ code = getDebugEnabledCode();
+ if (!code) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ maybeDebug = cx->make_unique<DebugState>(*code, *this);
+ if (!maybeDebug) {
+ return false;
+ }
+ } else {
+ code = code_;
+ }
+
+ instance.set(WasmInstanceObject::create(
+ cx, code, dataSegments_, elemSegments_, std::move(tlsData), memory,
+ std::move(tags), std::move(tables), imports.funcs, metadata().globals,
+ imports.globalValues, imports.globalObjs, instanceProto,
+ std::move(maybeDebug)));
+ if (!instance) {
+ return false;
+ }
+
+ if (!CreateExportObject(cx, instance, imports.funcs, tableObjs.get(), memory,
+ imports.exceptionObjs, imports.globalValues,
+ imports.globalObjs, exports_)) {
+ return false;
+ }
+
+ // Register the instance with the Realm so that it can find out about global
+ // events like profiling being enabled in the realm. Registration does not
+ // require a fully-initialized instance and must precede initSegments as the
+ // final pre-requisite for a live instance.
+
+ if (!cx->realm()->wasm.registerInstance(cx, instance)) {
+ return false;
+ }
+
+ // Perform initialization as the final step after the instance is fully
+ // constructed since this can make the instance live to content (even if the
+ // start function fails).
+
+ if (!initSegments(cx, instance, memory, imports.globalValues)) {
+ return false;
+ }
+
+ // Now that the instance is fully live and initialized, the start function.
+ // Note that failure may cause instantiation to throw, but the instance may
+ // still be live via edges created by initSegments or the start function.
+
+ if (metadata().startFuncIndex) {
+ FixedInvokeArgs<0> args(cx);
+ if (!instance->instance().callExport(cx, *metadata().startFuncIndex,
+ args)) {
+ return false;
+ }
+ }
+
+ JSUseCounter useCounter =
+ metadata().isAsmJS() ? JSUseCounter::ASMJS : JSUseCounter::WASM;
+ cx->runtime()->setUseCounter(instance, useCounter);
+
+ if (metadata().usesDuplicateImports) {
+ cx->runtime()->setUseCounter(instance,
+ JSUseCounter::WASM_DUPLICATE_IMPORTS);
+ }
+
+ if (cx->options().testWasmAwaitTier2()) {
+ testingBlockOnTier2Complete();
+ }
+
+ return true;
+}
diff --git a/js/src/wasm/WasmModule.h b/js/src/wasm/WasmModule.h
new file mode 100644
index 0000000000..f670029f13
--- /dev/null
+++ b/js/src/wasm/WasmModule.h
@@ -0,0 +1,257 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_module_h
+#define wasm_module_h
+
+#include "js/WasmModule.h"
+#include "js/BuildId.h"
+
+#include "wasm/WasmCode.h"
+#include "wasm/WasmJS.h"
+#include "wasm/WasmTable.h"
+
+struct JSTelemetrySender;
+
+namespace js {
+namespace wasm {
+
+struct CompileArgs;
+
+// In the context of wasm, the OptimizedEncodingListener specifically is
+// listening for the completion of tier-2.
+
+using Tier2Listener = RefPtr<JS::OptimizedEncodingListener>;
+
+// A struct containing the typed, imported values that are harvested from the
+// import object and passed to Module::instantiate(). This struct must be
+// stored in a (Persistent)Rooted, not in the heap due to its use of TraceRoot()
+// and complete lack of barriers.
+
+struct ImportValues {
+ JSFunctionVector funcs;
+ WasmTableObjectVector tables;
+ WasmMemoryObject* memory;
+ WasmExceptionObjectVector exceptionObjs;
+ WasmGlobalObjectVector globalObjs;
+ ValVector globalValues;
+
+ ImportValues() : memory(nullptr) {}
+
+ void trace(JSTracer* trc) {
+ funcs.trace(trc);
+ tables.trace(trc);
+ if (memory) {
+ TraceRoot(trc, &memory, "import values memory");
+ }
+ exceptionObjs.trace(trc);
+ globalObjs.trace(trc);
+ globalValues.trace(trc);
+ }
+};
+
+// Module represents a compiled wasm module and primarily provides three
+// operations: instantiation, tiered compilation, serialization. A Module can be
+// instantiated any number of times to produce new Instance objects. A Module
+// can have a single tier-2 task initiated to augment a Module's code with a
+// higher tier. A Module can have its optimized code serialized at any point
+// where the LinkData is also available, which is primarily (1) at the end of
+// module generation, (2) at the end of tier-2 compilation.
+//
+// Fully linked-and-instantiated code (represented by Code and its owned
+// ModuleSegment) can be shared between instances, provided none of those
+// instances are being debugged. If patchable code is needed then each instance
+// must have its own Code. Module eagerly creates a new Code and gives it to the
+// first instance; it then instantiates new Code objects from a copy of the
+// unlinked code that it keeps around for that purpose.
+
+class Module : public JS::WasmModule {
+ const SharedCode code_;
+ const ImportVector imports_;
+ const ExportVector exports_;
+ const DataSegmentVector dataSegments_;
+ const ElemSegmentVector elemSegments_;
+ const CustomSectionVector customSections_;
+
+ // These fields are only meaningful when code_->metadata().debugEnabled.
+ // `debugCodeClaimed_` is set to false initially and then to true when
+ // `code_` is already being used for an instance and can't be shared because
+ // it may be patched by the debugger. Subsequent instances must then create
+ // copies by linking the `debugUnlinkedCode_` using `debugLinkData_`.
+ // This could all be removed if debugging didn't need to perform
+ // per-instance code patching.
+
+ mutable Atomic<bool> debugCodeClaimed_;
+ const UniqueConstBytes debugUnlinkedCode_;
+ const UniqueLinkData debugLinkData_;
+ const SharedBytes debugBytecode_;
+
+ // This field is set during tier-2 compilation and cleared on success or
+ // failure. These happen on different threads and are serialized by the
+ // control flow of helper tasks.
+
+ mutable Tier2Listener tier2Listener_;
+
+ // This flag is used for logging (and testing) purposes to indicate
+ // whether the module was deserialized (from a cache).
+
+ const bool loggingDeserialized_;
+
+ // This flag is only used for testing purposes and is cleared on success or
+ // failure. The field is racily polled from various threads.
+
+ mutable Atomic<bool> testingTier2Active_;
+
+ // Cached malloc allocation size for GC memory tracking.
+
+ size_t gcMallocBytesExcludingCode_;
+
+ bool instantiateFunctions(JSContext* cx,
+ const JSFunctionVector& funcImports) const;
+ bool instantiateMemory(JSContext* cx,
+ MutableHandleWasmMemoryObject memory) const;
+#ifdef ENABLE_WASM_EXCEPTIONS
+ bool instantiateImportedException(JSContext* cx,
+ Handle<WasmExceptionObject*> exnObj,
+ WasmExceptionObjectVector& exnObjs,
+ SharedExceptionTagVector* tags) const;
+ bool instantiateLocalException(JSContext* cx, const EventDesc& ed,
+ WasmExceptionObjectVector& exnObjs,
+ SharedExceptionTagVector* tags,
+ uint32_t exnIndex) const;
+ bool instantiateExceptions(JSContext* cx, WasmExceptionObjectVector& exnObjs,
+ SharedExceptionTagVector* tags) const;
+#endif
+ bool instantiateImportedTable(JSContext* cx, const TableDesc& td,
+ Handle<WasmTableObject*> table,
+ WasmTableObjectVector* tableObjs,
+ SharedTableVector* tables) const;
+ bool instantiateLocalTable(JSContext* cx, const TableDesc& td,
+ WasmTableObjectVector* tableObjs,
+ SharedTableVector* tables) const;
+ bool instantiateTables(JSContext* cx,
+ const WasmTableObjectVector& tableImports,
+ MutableHandle<WasmTableObjectVector> tableObjs,
+ SharedTableVector* tables) const;
+ bool instantiateGlobals(JSContext* cx, const ValVector& globalImportValues,
+ WasmGlobalObjectVector& globalObjs) const;
+ bool initSegments(JSContext* cx, HandleWasmInstanceObject instance,
+ HandleWasmMemoryObject memory,
+ const ValVector& globalImportValues) const;
+ SharedCode getDebugEnabledCode() const;
+
+ class Tier2GeneratorTaskImpl;
+
+ public:
+ Module(const Code& code, ImportVector&& imports, ExportVector&& exports,
+ DataSegmentVector&& dataSegments, ElemSegmentVector&& elemSegments,
+ CustomSectionVector&& customSections,
+ UniqueConstBytes debugUnlinkedCode = nullptr,
+ UniqueLinkData debugLinkData = nullptr,
+ const ShareableBytes* debugBytecode = nullptr,
+ bool loggingDeserialized = false)
+ : code_(&code),
+ imports_(std::move(imports)),
+ exports_(std::move(exports)),
+ dataSegments_(std::move(dataSegments)),
+ elemSegments_(std::move(elemSegments)),
+ customSections_(std::move(customSections)),
+ debugCodeClaimed_(false),
+ debugUnlinkedCode_(std::move(debugUnlinkedCode)),
+ debugLinkData_(std::move(debugLinkData)),
+ debugBytecode_(debugBytecode),
+ loggingDeserialized_(loggingDeserialized),
+ testingTier2Active_(false) {
+ MOZ_ASSERT_IF(metadata().debugEnabled,
+ debugUnlinkedCode_ && debugLinkData_);
+ initGCMallocBytesExcludingCode();
+ }
+ ~Module() override;
+
+ const Code& code() const { return *code_; }
+ const ModuleSegment& moduleSegment(Tier t) const { return code_->segment(t); }
+ const Metadata& metadata() const { return code_->metadata(); }
+ const MetadataTier& metadata(Tier t) const { return code_->metadata(t); }
+ const ImportVector& imports() const { return imports_; }
+ const ExportVector& exports() const { return exports_; }
+ const CustomSectionVector& customSections() const { return customSections_; }
+ const Bytes& debugBytecode() const { return debugBytecode_->bytes; }
+ uint32_t codeLength(Tier t) const { return code_->segment(t).length(); }
+
+ // Instantiate this module with the given imports:
+
+ bool instantiate(JSContext* cx, ImportValues& imports,
+ HandleObject instanceProto,
+ MutableHandleWasmInstanceObject instanceObj) const;
+
+ // Tier-2 compilation may be initiated after the Module is constructed at
+ // most once. When tier-2 compilation completes, ModuleGenerator calls
+ // finishTier2() from a helper thread, passing tier-variant data which will
+ // be installed and made visible.
+
+ void startTier2(const CompileArgs& args, const ShareableBytes& bytecode,
+ JS::OptimizedEncodingListener* listener,
+ JSTelemetrySender telemetrySender);
+ bool finishTier2(const LinkData& linkData2, UniqueCodeTier code2) const;
+
+ void testingBlockOnTier2Complete() const;
+ bool testingTier2Active() const { return testingTier2Active_; }
+
+ // Code caching support.
+
+ size_t serializedSize(const LinkData& linkData) const;
+ void serialize(const LinkData& linkData, uint8_t* begin, size_t size) const;
+ void serialize(const LinkData& linkData,
+ JS::OptimizedEncodingListener& listener) const;
+ static RefPtr<Module> deserialize(const uint8_t* begin, size_t size,
+ Metadata* maybeMetadata = nullptr);
+ bool loggingDeserialized() const { return loggingDeserialized_; }
+
+ // JS API and JS::WasmModule implementation:
+
+ JSObject* createObject(JSContext* cx) const override;
+ JSObject* createObjectForAsmJS(JSContext* cx) const override;
+
+ // about:memory reporting:
+
+ void addSizeOfMisc(MallocSizeOf mallocSizeOf, Metadata::SeenSet* seenMetadata,
+ Code::SeenSet* seenCode, size_t* code, size_t* data) const;
+
+ // GC malloc memory tracking:
+
+ void initGCMallocBytesExcludingCode();
+ size_t gcMallocBytesExcludingCode() const {
+ return gcMallocBytesExcludingCode_;
+ }
+
+ // Generated code analysis support:
+
+ bool extractCode(JSContext* cx, Tier tier, MutableHandleValue vp) const;
+};
+
+using MutableModule = RefPtr<Module>;
+using SharedModule = RefPtr<const Module>;
+
+// JS API implementations:
+
+[[nodiscard]] bool GetOptimizedEncodingBuildId(JS::BuildIdCharVector* buildId);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_module_h
diff --git a/js/src/wasm/WasmOpIter.cpp b/js/src/wasm/WasmOpIter.cpp
new file mode 100644
index 0000000000..ee1b5966d6
--- /dev/null
+++ b/js/src/wasm/WasmOpIter.cpp
@@ -0,0 +1,702 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmOpIter.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+#ifdef ENABLE_WASM_GC
+# ifndef ENABLE_WASM_REFTYPES
+# error "GC types require the reftypes feature"
+# endif
+#endif
+
+#ifdef DEBUG
+
+# ifdef ENABLE_WASM_REFTYPES
+# define WASM_REF_OP(code) return code
+# else
+# define WASM_REF_OP(code) break
+# endif
+# ifdef ENABLE_WASM_FUNCTION_REFERENCES
+# define WASM_FUNCTION_REFERENCES_OP(code) return code
+# else
+# define WASM_FUNCTION_REFERENCES_OP(code) break
+# endif
+# ifdef ENABLE_WASM_GC
+# define WASM_GC_OP(code) return code
+# else
+# define WASM_GC_OP(code) break
+# endif
+# ifdef ENABLE_WASM_SIMD
+# define WASM_SIMD_OP(code) return code
+# else
+# define WASM_SIMD_OP(code) break
+# endif
+# ifdef ENABLE_WASM_EXCEPTIONS
+# define WASM_EXN_OP(code) return code
+# else
+# define WASM_EXN_OP(code) break
+# endif
+
+OpKind wasm::Classify(OpBytes op) {
+ switch (Op(op.b0)) {
+ case Op::Block:
+ return OpKind::Block;
+ case Op::Loop:
+ return OpKind::Loop;
+ case Op::Unreachable:
+ return OpKind::Unreachable;
+ case Op::Drop:
+ return OpKind::Drop;
+ case Op::I32Const:
+ return OpKind::I32;
+ case Op::I64Const:
+ return OpKind::I64;
+ case Op::F32Const:
+ return OpKind::F32;
+ case Op::F64Const:
+ return OpKind::F64;
+ case Op::Br:
+ return OpKind::Br;
+ case Op::BrIf:
+ return OpKind::BrIf;
+ case Op::BrTable:
+ return OpKind::BrTable;
+ case Op::Nop:
+ return OpKind::Nop;
+ case Op::I32Clz:
+ case Op::I32Ctz:
+ case Op::I32Popcnt:
+ case Op::I64Clz:
+ case Op::I64Ctz:
+ case Op::I64Popcnt:
+ case Op::F32Abs:
+ case Op::F32Neg:
+ case Op::F32Ceil:
+ case Op::F32Floor:
+ case Op::F32Trunc:
+ case Op::F32Nearest:
+ case Op::F32Sqrt:
+ case Op::F64Abs:
+ case Op::F64Neg:
+ case Op::F64Ceil:
+ case Op::F64Floor:
+ case Op::F64Trunc:
+ case Op::F64Nearest:
+ case Op::F64Sqrt:
+ return OpKind::Unary;
+ case Op::I32Add:
+ case Op::I32Sub:
+ case Op::I32Mul:
+ case Op::I32DivS:
+ case Op::I32DivU:
+ case Op::I32RemS:
+ case Op::I32RemU:
+ case Op::I32And:
+ case Op::I32Or:
+ case Op::I32Xor:
+ case Op::I32Shl:
+ case Op::I32ShrS:
+ case Op::I32ShrU:
+ case Op::I32Rotl:
+ case Op::I32Rotr:
+ case Op::I64Add:
+ case Op::I64Sub:
+ case Op::I64Mul:
+ case Op::I64DivS:
+ case Op::I64DivU:
+ case Op::I64RemS:
+ case Op::I64RemU:
+ case Op::I64And:
+ case Op::I64Or:
+ case Op::I64Xor:
+ case Op::I64Shl:
+ case Op::I64ShrS:
+ case Op::I64ShrU:
+ case Op::I64Rotl:
+ case Op::I64Rotr:
+ case Op::F32Add:
+ case Op::F32Sub:
+ case Op::F32Mul:
+ case Op::F32Div:
+ case Op::F32Min:
+ case Op::F32Max:
+ case Op::F32CopySign:
+ case Op::F64Add:
+ case Op::F64Sub:
+ case Op::F64Mul:
+ case Op::F64Div:
+ case Op::F64Min:
+ case Op::F64Max:
+ case Op::F64CopySign:
+ return OpKind::Binary;
+ case Op::I32Eq:
+ case Op::I32Ne:
+ case Op::I32LtS:
+ case Op::I32LtU:
+ case Op::I32LeS:
+ case Op::I32LeU:
+ case Op::I32GtS:
+ case Op::I32GtU:
+ case Op::I32GeS:
+ case Op::I32GeU:
+ case Op::I64Eq:
+ case Op::I64Ne:
+ case Op::I64LtS:
+ case Op::I64LtU:
+ case Op::I64LeS:
+ case Op::I64LeU:
+ case Op::I64GtS:
+ case Op::I64GtU:
+ case Op::I64GeS:
+ case Op::I64GeU:
+ case Op::F32Eq:
+ case Op::F32Ne:
+ case Op::F32Lt:
+ case Op::F32Le:
+ case Op::F32Gt:
+ case Op::F32Ge:
+ case Op::F64Eq:
+ case Op::F64Ne:
+ case Op::F64Lt:
+ case Op::F64Le:
+ case Op::F64Gt:
+ case Op::F64Ge:
+ return OpKind::Comparison;
+ case Op::I32Eqz:
+ case Op::I32WrapI64:
+ case Op::I32TruncSF32:
+ case Op::I32TruncUF32:
+ case Op::I32ReinterpretF32:
+ case Op::I32TruncSF64:
+ case Op::I32TruncUF64:
+ case Op::I64ExtendSI32:
+ case Op::I64ExtendUI32:
+ case Op::I64TruncSF32:
+ case Op::I64TruncUF32:
+ case Op::I64TruncSF64:
+ case Op::I64TruncUF64:
+ case Op::I64ReinterpretF64:
+ case Op::I64Eqz:
+ case Op::F32ConvertSI32:
+ case Op::F32ConvertUI32:
+ case Op::F32ReinterpretI32:
+ case Op::F32ConvertSI64:
+ case Op::F32ConvertUI64:
+ case Op::F32DemoteF64:
+ case Op::F64ConvertSI32:
+ case Op::F64ConvertUI32:
+ case Op::F64ConvertSI64:
+ case Op::F64ConvertUI64:
+ case Op::F64ReinterpretI64:
+ case Op::F64PromoteF32:
+ case Op::I32Extend8S:
+ case Op::I32Extend16S:
+ case Op::I64Extend8S:
+ case Op::I64Extend16S:
+ case Op::I64Extend32S:
+ return OpKind::Conversion;
+ case Op::I32Load8S:
+ case Op::I32Load8U:
+ case Op::I32Load16S:
+ case Op::I32Load16U:
+ case Op::I64Load8S:
+ case Op::I64Load8U:
+ case Op::I64Load16S:
+ case Op::I64Load16U:
+ case Op::I64Load32S:
+ case Op::I64Load32U:
+ case Op::I32Load:
+ case Op::I64Load:
+ case Op::F32Load:
+ case Op::F64Load:
+ return OpKind::Load;
+ case Op::I32Store8:
+ case Op::I32Store16:
+ case Op::I64Store8:
+ case Op::I64Store16:
+ case Op::I64Store32:
+ case Op::I32Store:
+ case Op::I64Store:
+ case Op::F32Store:
+ case Op::F64Store:
+ return OpKind::Store;
+ case Op::SelectNumeric:
+ case Op::SelectTyped:
+ return OpKind::Select;
+ case Op::GetLocal:
+ return OpKind::GetLocal;
+ case Op::SetLocal:
+ return OpKind::SetLocal;
+ case Op::TeeLocal:
+ return OpKind::TeeLocal;
+ case Op::GetGlobal:
+ return OpKind::GetGlobal;
+ case Op::SetGlobal:
+ return OpKind::SetGlobal;
+ case Op::TableGet:
+ WASM_REF_OP(OpKind::TableGet);
+ case Op::TableSet:
+ WASM_REF_OP(OpKind::TableSet);
+ case Op::Call:
+ return OpKind::Call;
+ case Op::CallIndirect:
+ return OpKind::CallIndirect;
+ case Op::Return:
+ case Op::Limit:
+ // Accept Limit, for use in decoding the end of a function after the body.
+ return OpKind::Return;
+ case Op::If:
+ return OpKind::If;
+ case Op::Else:
+ return OpKind::Else;
+ case Op::End:
+ return OpKind::End;
+# ifdef ENABLE_WASM_EXCEPTIONS
+ case Op::Catch:
+ WASM_EXN_OP(OpKind::Catch);
+ case Op::Throw:
+ WASM_EXN_OP(OpKind::Throw);
+ case Op::Try:
+ WASM_EXN_OP(OpKind::Try);
+# endif
+ case Op::MemorySize:
+ return OpKind::MemorySize;
+ case Op::MemoryGrow:
+ return OpKind::MemoryGrow;
+ case Op::RefNull:
+ WASM_REF_OP(OpKind::RefNull);
+ case Op::RefIsNull:
+ WASM_REF_OP(OpKind::Conversion);
+ case Op::RefFunc:
+ WASM_REF_OP(OpKind::RefFunc);
+ case Op::RefAsNonNull:
+ WASM_FUNCTION_REFERENCES_OP(OpKind::RefAsNonNull);
+ case Op::BrOnNull:
+ WASM_FUNCTION_REFERENCES_OP(OpKind::BrOnNull);
+ case Op::RefEq:
+ WASM_GC_OP(OpKind::Comparison);
+ case Op::GcPrefix: {
+ switch (GcOp(op.b1)) {
+ case GcOp::Limit:
+ // Reject Limit for GcPrefix encoding
+ break;
+ case GcOp::StructNew:
+ WASM_GC_OP(OpKind::StructNew);
+ case GcOp::StructGet:
+ WASM_GC_OP(OpKind::StructGet);
+ case GcOp::StructSet:
+ WASM_GC_OP(OpKind::StructSet);
+ case GcOp::StructNarrow:
+ WASM_GC_OP(OpKind::StructNarrow);
+ }
+ break;
+ }
+ case Op::SimdPrefix: {
+ switch (SimdOp(op.b1)) {
+ case SimdOp::Limit:
+ // Reject Limit for SimdPrefix encoding
+ break;
+ case SimdOp::I8x16ExtractLaneS:
+ case SimdOp::I8x16ExtractLaneU:
+ case SimdOp::I16x8ExtractLaneS:
+ case SimdOp::I16x8ExtractLaneU:
+ case SimdOp::I32x4ExtractLane:
+ case SimdOp::I64x2ExtractLane:
+ case SimdOp::F32x4ExtractLane:
+ case SimdOp::F64x2ExtractLane:
+ WASM_SIMD_OP(OpKind::ExtractLane);
+ case SimdOp::I8x16Splat:
+ case SimdOp::I16x8Splat:
+ case SimdOp::I32x4Splat:
+ case SimdOp::I64x2Splat:
+ case SimdOp::F32x4Splat:
+ case SimdOp::F64x2Splat:
+ case SimdOp::I8x16AnyTrue:
+ case SimdOp::I8x16AllTrue:
+ case SimdOp::I16x8AnyTrue:
+ case SimdOp::I16x8AllTrue:
+ case SimdOp::I32x4AnyTrue:
+ case SimdOp::I32x4AllTrue:
+ case SimdOp::I8x16Bitmask:
+ case SimdOp::I16x8Bitmask:
+ case SimdOp::I32x4Bitmask:
+ WASM_SIMD_OP(OpKind::Conversion);
+ case SimdOp::I8x16ReplaceLane:
+ case SimdOp::I16x8ReplaceLane:
+ case SimdOp::I32x4ReplaceLane:
+ case SimdOp::I64x2ReplaceLane:
+ case SimdOp::F32x4ReplaceLane:
+ case SimdOp::F64x2ReplaceLane:
+ WASM_SIMD_OP(OpKind::ReplaceLane);
+ case SimdOp::I8x16Eq:
+ case SimdOp::I8x16Ne:
+ case SimdOp::I8x16LtS:
+ case SimdOp::I8x16LtU:
+ case SimdOp::I8x16GtS:
+ case SimdOp::I8x16GtU:
+ case SimdOp::I8x16LeS:
+ case SimdOp::I8x16LeU:
+ case SimdOp::I8x16GeS:
+ case SimdOp::I8x16GeU:
+ case SimdOp::I16x8Eq:
+ case SimdOp::I16x8Ne:
+ case SimdOp::I16x8LtS:
+ case SimdOp::I16x8LtU:
+ case SimdOp::I16x8GtS:
+ case SimdOp::I16x8GtU:
+ case SimdOp::I16x8LeS:
+ case SimdOp::I16x8LeU:
+ case SimdOp::I16x8GeS:
+ case SimdOp::I16x8GeU:
+ case SimdOp::I32x4Eq:
+ case SimdOp::I32x4Ne:
+ case SimdOp::I32x4LtS:
+ case SimdOp::I32x4LtU:
+ case SimdOp::I32x4GtS:
+ case SimdOp::I32x4GtU:
+ case SimdOp::I32x4LeS:
+ case SimdOp::I32x4LeU:
+ case SimdOp::I32x4GeS:
+ case SimdOp::I32x4GeU:
+ case SimdOp::F32x4Eq:
+ case SimdOp::F32x4Ne:
+ case SimdOp::F32x4Lt:
+ case SimdOp::F32x4Gt:
+ case SimdOp::F32x4Le:
+ case SimdOp::F32x4Ge:
+ case SimdOp::F64x2Eq:
+ case SimdOp::F64x2Ne:
+ case SimdOp::F64x2Lt:
+ case SimdOp::F64x2Gt:
+ case SimdOp::F64x2Le:
+ case SimdOp::F64x2Ge:
+ case SimdOp::V128And:
+ case SimdOp::V128Or:
+ case SimdOp::V128Xor:
+ case SimdOp::V128AndNot:
+ case SimdOp::I8x16AvgrU:
+ case SimdOp::I16x8AvgrU:
+ case SimdOp::I8x16Add:
+ case SimdOp::I8x16AddSaturateS:
+ case SimdOp::I8x16AddSaturateU:
+ case SimdOp::I8x16Sub:
+ case SimdOp::I8x16SubSaturateS:
+ case SimdOp::I8x16SubSaturateU:
+ case SimdOp::I8x16MinS:
+ case SimdOp::I8x16MaxS:
+ case SimdOp::I8x16MinU:
+ case SimdOp::I8x16MaxU:
+ case SimdOp::I16x8Add:
+ case SimdOp::I16x8AddSaturateS:
+ case SimdOp::I16x8AddSaturateU:
+ case SimdOp::I16x8Sub:
+ case SimdOp::I16x8SubSaturateS:
+ case SimdOp::I16x8SubSaturateU:
+ case SimdOp::I16x8Mul:
+ case SimdOp::I16x8MinS:
+ case SimdOp::I16x8MaxS:
+ case SimdOp::I16x8MinU:
+ case SimdOp::I16x8MaxU:
+ case SimdOp::I32x4Add:
+ case SimdOp::I32x4Sub:
+ case SimdOp::I32x4Mul:
+ case SimdOp::I32x4MinS:
+ case SimdOp::I32x4MaxS:
+ case SimdOp::I32x4MinU:
+ case SimdOp::I32x4MaxU:
+ case SimdOp::I64x2Add:
+ case SimdOp::I64x2Sub:
+ case SimdOp::I64x2Mul:
+ case SimdOp::F32x4Add:
+ case SimdOp::F32x4Sub:
+ case SimdOp::F32x4Mul:
+ case SimdOp::F32x4Div:
+ case SimdOp::F32x4Min:
+ case SimdOp::F32x4Max:
+ case SimdOp::F64x2Add:
+ case SimdOp::F64x2Sub:
+ case SimdOp::F64x2Mul:
+ case SimdOp::F64x2Div:
+ case SimdOp::F64x2Min:
+ case SimdOp::F64x2Max:
+ case SimdOp::I8x16NarrowSI16x8:
+ case SimdOp::I8x16NarrowUI16x8:
+ case SimdOp::I16x8NarrowSI32x4:
+ case SimdOp::I16x8NarrowUI32x4:
+ case SimdOp::V8x16Swizzle:
+ case SimdOp::F32x4PMin:
+ case SimdOp::F32x4PMax:
+ case SimdOp::F64x2PMin:
+ case SimdOp::F64x2PMax:
+ case SimdOp::I32x4DotSI16x8:
+ WASM_SIMD_OP(OpKind::Binary);
+ case SimdOp::I8x16Neg:
+ case SimdOp::I16x8Neg:
+ case SimdOp::I16x8WidenLowSI8x16:
+ case SimdOp::I16x8WidenHighSI8x16:
+ case SimdOp::I16x8WidenLowUI8x16:
+ case SimdOp::I16x8WidenHighUI8x16:
+ case SimdOp::I32x4Neg:
+ case SimdOp::I32x4WidenLowSI16x8:
+ case SimdOp::I32x4WidenHighSI16x8:
+ case SimdOp::I32x4WidenLowUI16x8:
+ case SimdOp::I32x4WidenHighUI16x8:
+ case SimdOp::I32x4TruncSSatF32x4:
+ case SimdOp::I32x4TruncUSatF32x4:
+ case SimdOp::I64x2Neg:
+ case SimdOp::F32x4Abs:
+ case SimdOp::F32x4Neg:
+ case SimdOp::F32x4Sqrt:
+ case SimdOp::F32x4ConvertSI32x4:
+ case SimdOp::F32x4ConvertUI32x4:
+ case SimdOp::F64x2Abs:
+ case SimdOp::F64x2Neg:
+ case SimdOp::F64x2Sqrt:
+ case SimdOp::V128Not:
+ case SimdOp::I8x16Abs:
+ case SimdOp::I16x8Abs:
+ case SimdOp::I32x4Abs:
+ case SimdOp::F32x4Ceil:
+ case SimdOp::F32x4Floor:
+ case SimdOp::F32x4Trunc:
+ case SimdOp::F32x4Nearest:
+ case SimdOp::F64x2Ceil:
+ case SimdOp::F64x2Floor:
+ case SimdOp::F64x2Trunc:
+ case SimdOp::F64x2Nearest:
+ WASM_SIMD_OP(OpKind::Unary);
+ case SimdOp::I8x16Shl:
+ case SimdOp::I8x16ShrS:
+ case SimdOp::I8x16ShrU:
+ case SimdOp::I16x8Shl:
+ case SimdOp::I16x8ShrS:
+ case SimdOp::I16x8ShrU:
+ case SimdOp::I32x4Shl:
+ case SimdOp::I32x4ShrS:
+ case SimdOp::I32x4ShrU:
+ case SimdOp::I64x2Shl:
+ case SimdOp::I64x2ShrS:
+ case SimdOp::I64x2ShrU:
+ WASM_SIMD_OP(OpKind::VectorShift);
+ case SimdOp::V128Bitselect:
+ WASM_SIMD_OP(OpKind::VectorSelect);
+ case SimdOp::V8x16Shuffle:
+ WASM_SIMD_OP(OpKind::VectorShuffle);
+ case SimdOp::V128Const:
+ WASM_SIMD_OP(OpKind::V128);
+ case SimdOp::V128Load:
+ case SimdOp::V8x16LoadSplat:
+ case SimdOp::V16x8LoadSplat:
+ case SimdOp::V32x4LoadSplat:
+ case SimdOp::V64x2LoadSplat:
+ case SimdOp::I16x8LoadS8x8:
+ case SimdOp::I16x8LoadU8x8:
+ case SimdOp::I32x4LoadS16x4:
+ case SimdOp::I32x4LoadU16x4:
+ case SimdOp::I64x2LoadS32x2:
+ case SimdOp::I64x2LoadU32x2:
+ case SimdOp::V128Load32Zero:
+ case SimdOp::V128Load64Zero:
+ WASM_SIMD_OP(OpKind::Load);
+ case SimdOp::V128Store:
+ WASM_SIMD_OP(OpKind::Store);
+# ifdef ENABLE_WASM_SIMD_WORMHOLE
+ case SimdOp::MozWHSELFTEST:
+ case SimdOp::MozWHPMADDUBSW:
+ case SimdOp::MozWHPMADDWD:
+ MOZ_CRASH("Should not be seen");
+# endif
+ }
+ break;
+ }
+ case Op::MiscPrefix: {
+ switch (MiscOp(op.b1)) {
+ case MiscOp::Limit:
+ // Reject Limit for MiscPrefix encoding
+ break;
+ case MiscOp::I32TruncSSatF32:
+ case MiscOp::I32TruncUSatF32:
+ case MiscOp::I32TruncSSatF64:
+ case MiscOp::I32TruncUSatF64:
+ case MiscOp::I64TruncSSatF32:
+ case MiscOp::I64TruncUSatF32:
+ case MiscOp::I64TruncSSatF64:
+ case MiscOp::I64TruncUSatF64:
+ return OpKind::Conversion;
+ case MiscOp::MemCopy:
+ case MiscOp::TableCopy:
+ return OpKind::MemOrTableCopy;
+ case MiscOp::DataDrop:
+ case MiscOp::ElemDrop:
+ return OpKind::DataOrElemDrop;
+ case MiscOp::MemFill:
+ return OpKind::MemFill;
+ case MiscOp::MemInit:
+ case MiscOp::TableInit:
+ return OpKind::MemOrTableInit;
+ case MiscOp::TableFill:
+ WASM_REF_OP(OpKind::TableFill);
+ case MiscOp::TableGrow:
+ WASM_REF_OP(OpKind::TableGrow);
+ case MiscOp::TableSize:
+ WASM_REF_OP(OpKind::TableSize);
+ }
+ break;
+ }
+ case Op::ThreadPrefix: {
+ switch (ThreadOp(op.b1)) {
+ case ThreadOp::Limit:
+ // Reject Limit for ThreadPrefix encoding
+ break;
+ case ThreadOp::Wake:
+ return OpKind::Wake;
+ case ThreadOp::I32Wait:
+ case ThreadOp::I64Wait:
+ return OpKind::Wait;
+ case ThreadOp::Fence:
+ return OpKind::Fence;
+ case ThreadOp::I32AtomicLoad:
+ case ThreadOp::I64AtomicLoad:
+ case ThreadOp::I32AtomicLoad8U:
+ case ThreadOp::I32AtomicLoad16U:
+ case ThreadOp::I64AtomicLoad8U:
+ case ThreadOp::I64AtomicLoad16U:
+ case ThreadOp::I64AtomicLoad32U:
+ return OpKind::AtomicLoad;
+ case ThreadOp::I32AtomicStore:
+ case ThreadOp::I64AtomicStore:
+ case ThreadOp::I32AtomicStore8U:
+ case ThreadOp::I32AtomicStore16U:
+ case ThreadOp::I64AtomicStore8U:
+ case ThreadOp::I64AtomicStore16U:
+ case ThreadOp::I64AtomicStore32U:
+ return OpKind::AtomicStore;
+ case ThreadOp::I32AtomicAdd:
+ case ThreadOp::I64AtomicAdd:
+ case ThreadOp::I32AtomicAdd8U:
+ case ThreadOp::I32AtomicAdd16U:
+ case ThreadOp::I64AtomicAdd8U:
+ case ThreadOp::I64AtomicAdd16U:
+ case ThreadOp::I64AtomicAdd32U:
+ case ThreadOp::I32AtomicSub:
+ case ThreadOp::I64AtomicSub:
+ case ThreadOp::I32AtomicSub8U:
+ case ThreadOp::I32AtomicSub16U:
+ case ThreadOp::I64AtomicSub8U:
+ case ThreadOp::I64AtomicSub16U:
+ case ThreadOp::I64AtomicSub32U:
+ case ThreadOp::I32AtomicAnd:
+ case ThreadOp::I64AtomicAnd:
+ case ThreadOp::I32AtomicAnd8U:
+ case ThreadOp::I32AtomicAnd16U:
+ case ThreadOp::I64AtomicAnd8U:
+ case ThreadOp::I64AtomicAnd16U:
+ case ThreadOp::I64AtomicAnd32U:
+ case ThreadOp::I32AtomicOr:
+ case ThreadOp::I64AtomicOr:
+ case ThreadOp::I32AtomicOr8U:
+ case ThreadOp::I32AtomicOr16U:
+ case ThreadOp::I64AtomicOr8U:
+ case ThreadOp::I64AtomicOr16U:
+ case ThreadOp::I64AtomicOr32U:
+ case ThreadOp::I32AtomicXor:
+ case ThreadOp::I64AtomicXor:
+ case ThreadOp::I32AtomicXor8U:
+ case ThreadOp::I32AtomicXor16U:
+ case ThreadOp::I64AtomicXor8U:
+ case ThreadOp::I64AtomicXor16U:
+ case ThreadOp::I64AtomicXor32U:
+ case ThreadOp::I32AtomicXchg:
+ case ThreadOp::I64AtomicXchg:
+ case ThreadOp::I32AtomicXchg8U:
+ case ThreadOp::I32AtomicXchg16U:
+ case ThreadOp::I64AtomicXchg8U:
+ case ThreadOp::I64AtomicXchg16U:
+ case ThreadOp::I64AtomicXchg32U:
+ return OpKind::AtomicBinOp;
+ case ThreadOp::I32AtomicCmpXchg:
+ case ThreadOp::I64AtomicCmpXchg:
+ case ThreadOp::I32AtomicCmpXchg8U:
+ case ThreadOp::I32AtomicCmpXchg16U:
+ case ThreadOp::I64AtomicCmpXchg8U:
+ case ThreadOp::I64AtomicCmpXchg16U:
+ case ThreadOp::I64AtomicCmpXchg32U:
+ return OpKind::AtomicCompareExchange;
+ default:
+ break;
+ }
+ break;
+ }
+ case Op::MozPrefix: {
+ switch (MozOp(op.b1)) {
+ case MozOp::Limit:
+ // Reject Limit for the MozPrefix encoding
+ break;
+ case MozOp::TeeGlobal:
+ return OpKind::TeeGlobal;
+ case MozOp::I32BitNot:
+ case MozOp::I32Abs:
+ case MozOp::I32Neg:
+ return OpKind::Unary;
+ case MozOp::I32Min:
+ case MozOp::I32Max:
+ case MozOp::F64Mod:
+ case MozOp::F64Pow:
+ case MozOp::F64Atan2:
+ return OpKind::Binary;
+ case MozOp::F64Sin:
+ case MozOp::F64Cos:
+ case MozOp::F64Tan:
+ case MozOp::F64Asin:
+ case MozOp::F64Acos:
+ case MozOp::F64Atan:
+ case MozOp::F64Exp:
+ case MozOp::F64Log:
+ return OpKind::Unary;
+ case MozOp::I32TeeStore8:
+ case MozOp::I32TeeStore16:
+ case MozOp::I64TeeStore8:
+ case MozOp::I64TeeStore16:
+ case MozOp::I64TeeStore32:
+ case MozOp::I32TeeStore:
+ case MozOp::I64TeeStore:
+ case MozOp::F32TeeStore:
+ case MozOp::F64TeeStore:
+ case MozOp::F32TeeStoreF64:
+ case MozOp::F64TeeStoreF32:
+ return OpKind::TeeStore;
+ case MozOp::OldCallDirect:
+ return OpKind::OldCallDirect;
+ case MozOp::OldCallIndirect:
+ return OpKind::OldCallIndirect;
+ }
+ break;
+ }
+ }
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unimplemented opcode");
+}
+
+# undef WASM_EXN_OP
+# undef WASM_GC_OP
+# undef WASM_REF_OP
+
+#endif
diff --git a/js/src/wasm/WasmOpIter.h b/js/src/wasm/WasmOpIter.h
new file mode 100644
index 0000000000..fde161c95f
--- /dev/null
+++ b/js/src/wasm/WasmOpIter.h
@@ -0,0 +1,2827 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_op_iter_h
+#define wasm_op_iter_h
+
+#include "mozilla/CompactPair.h"
+#include "mozilla/Poison.h"
+
+#include <type_traits>
+
+#include "jit/AtomicOp.h"
+#include "js/Printf.h"
+#include "wasm/WasmUtility.h"
+#include "wasm/WasmValidate.h"
+
+namespace js {
+namespace wasm {
+
+// The kind of a control-flow stack item.
+enum class LabelKind : uint8_t {
+ Body,
+ Block,
+ Loop,
+ Then,
+ Else,
+#ifdef ENABLE_WASM_EXCEPTIONS
+ Try,
+ Catch,
+#endif
+};
+
+// The type of values on the operand stack during validation. This is either a
+// ValType or the special type "Bottom".
+
+class StackType {
+ PackedTypeCode tc_;
+
+ explicit StackType(PackedTypeCode tc) : tc_(tc) {}
+
+ public:
+ StackType() : tc_(InvalidPackedTypeCode()) {}
+
+ explicit StackType(const ValType& t) : tc_(t.packed()) {
+ MOZ_ASSERT(IsValid(tc_));
+ MOZ_ASSERT(!isBottom());
+ }
+
+ static StackType bottom() { return StackType(PackTypeCode(TypeCode::Limit)); }
+
+ bool isBottom() const {
+ MOZ_ASSERT(IsValid(tc_));
+ return UnpackTypeCodeType(tc_) == TypeCode::Limit;
+ }
+
+ ValType valType() const {
+ MOZ_ASSERT(IsValid(tc_));
+ MOZ_ASSERT(!isBottom());
+ return ValType(tc_);
+ }
+
+ ValType asNonNullable() const {
+ MOZ_ASSERT(IsValid(tc_));
+ MOZ_ASSERT(!isBottom());
+ return ValType(RepackTypeCodeAsNonNullable(tc_));
+ }
+
+ bool isValidForUntypedSelect() const {
+ MOZ_ASSERT(IsValid(tc_));
+ if (isBottom()) {
+ return true;
+ }
+ switch (valType().kind()) {
+ case ValType::I32:
+ case ValType::F32:
+ case ValType::I64:
+ case ValType::F64:
+#ifdef ENABLE_WASM_SIMD
+ case ValType::V128:
+#endif
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ bool operator==(const StackType& that) const {
+ MOZ_ASSERT(IsValid(tc_) && IsValid(that.tc_));
+ return tc_ == that.tc_;
+ }
+
+ bool operator!=(const StackType& that) const {
+ MOZ_ASSERT(IsValid(tc_) && IsValid(that.tc_));
+ return tc_ != that.tc_;
+ }
+};
+
+#ifdef DEBUG
+// Families of opcodes that share a signature and validation logic.
+enum class OpKind {
+ Block,
+ Loop,
+ Unreachable,
+ Drop,
+ I32,
+ I64,
+ F32,
+ F64,
+ V128,
+ Br,
+ BrIf,
+ BrTable,
+ Nop,
+ Unary,
+ Binary,
+ Comparison,
+ Conversion,
+ Load,
+ Store,
+ TeeStore,
+ MemorySize,
+ MemoryGrow,
+ Select,
+ GetLocal,
+ SetLocal,
+ TeeLocal,
+ GetGlobal,
+ SetGlobal,
+ TeeGlobal,
+ Call,
+ CallIndirect,
+ OldCallDirect,
+ OldCallIndirect,
+ Return,
+ If,
+ Else,
+ End,
+ Wait,
+ Wake,
+ Fence,
+ AtomicLoad,
+ AtomicStore,
+ AtomicBinOp,
+ AtomicCompareExchange,
+ OldAtomicLoad,
+ OldAtomicStore,
+ OldAtomicBinOp,
+ OldAtomicCompareExchange,
+ OldAtomicExchange,
+ MemOrTableCopy,
+ DataOrElemDrop,
+ MemFill,
+ MemOrTableInit,
+ TableFill,
+ TableGet,
+ TableGrow,
+ TableSet,
+ TableSize,
+ RefNull,
+ RefFunc,
+ RefAsNonNull,
+ BrOnNull,
+ StructNew,
+ StructGet,
+ StructSet,
+ StructNarrow,
+# ifdef ENABLE_WASM_SIMD
+ ExtractLane,
+ ReplaceLane,
+ VectorShift,
+ VectorSelect,
+ VectorShuffle,
+# endif
+# ifdef ENABLE_WASM_EXCEPTIONS
+ Catch,
+ Throw,
+ Try,
+# endif
+};
+
+// Return the OpKind for a given Op. This is used for sanity-checking that
+// API users use the correct read function for a given Op.
+OpKind Classify(OpBytes op);
+#endif
+
+// Common fields for linear memory access.
+template <typename Value>
+struct LinearMemoryAddress {
+ Value base;
+ uint32_t offset;
+ uint32_t align;
+
+ LinearMemoryAddress() : offset(0), align(0) {}
+ LinearMemoryAddress(Value base, uint32_t offset, uint32_t align)
+ : base(base), offset(offset), align(align) {}
+};
+
+template <typename ControlItem>
+class ControlStackEntry {
+ // Use a pair to optimize away empty ControlItem.
+ mozilla::CompactPair<BlockType, ControlItem> typeAndItem_;
+
+ // The "base" of a control stack entry is valueStack_.length() minus
+ // type().params().length(), i.e., the size of the value stack "below"
+ // this block.
+ uint32_t valueStackBase_;
+ bool polymorphicBase_;
+
+ LabelKind kind_;
+
+ public:
+ ControlStackEntry(LabelKind kind, BlockType type, uint32_t valueStackBase)
+ : typeAndItem_(type, ControlItem()),
+ valueStackBase_(valueStackBase),
+ polymorphicBase_(false),
+ kind_(kind) {
+ MOZ_ASSERT(type != BlockType());
+ }
+
+ LabelKind kind() const { return kind_; }
+ BlockType type() const { return typeAndItem_.first(); }
+ ResultType resultType() const { return type().results(); }
+ ResultType branchTargetType() const {
+ return kind_ == LabelKind::Loop ? type().params() : type().results();
+ }
+ uint32_t valueStackBase() const { return valueStackBase_; }
+ ControlItem& controlItem() { return typeAndItem_.second(); }
+ void setPolymorphicBase() { polymorphicBase_ = true; }
+ bool polymorphicBase() const { return polymorphicBase_; }
+
+ void switchToElse() {
+ MOZ_ASSERT(kind() == LabelKind::Then);
+ kind_ = LabelKind::Else;
+ polymorphicBase_ = false;
+ }
+
+#ifdef ENABLE_WASM_EXCEPTIONS
+ void switchToCatch() {
+ MOZ_ASSERT(kind() == LabelKind::Try);
+ kind_ = LabelKind::Catch;
+ polymorphicBase_ = false;
+ }
+#endif
+};
+
+template <typename Value>
+class TypeAndValueT {
+ // Use a Pair to optimize away empty Value.
+ mozilla::CompactPair<StackType, Value> tv_;
+
+ public:
+ TypeAndValueT() : tv_(StackType::bottom(), Value()) {}
+ explicit TypeAndValueT(StackType type) : tv_(type, Value()) {}
+ explicit TypeAndValueT(ValType type) : tv_(StackType(type), Value()) {}
+ TypeAndValueT(StackType type, Value value) : tv_(type, value) {}
+ TypeAndValueT(ValType type, Value value) : tv_(StackType(type), value) {}
+ StackType type() const { return tv_.first(); }
+ StackType& typeRef() { return tv_.first(); }
+ Value value() const { return tv_.second(); }
+ void setValue(Value value) { tv_.second() = value; }
+};
+
+// An iterator over the bytes of a function body. It performs validation
+// and unpacks the data into a usable form.
+//
+// The MOZ_STACK_CLASS attribute here is because of the use of DebugOnly.
+// There's otherwise nothing inherent in this class which would require
+// it to be used on the stack.
+template <typename Policy>
+class MOZ_STACK_CLASS OpIter : private Policy {
+ public:
+ using Value = typename Policy::Value;
+ using ValueVector = typename Policy::ValueVector;
+ using TypeAndValue = TypeAndValueT<Value>;
+ typedef Vector<TypeAndValue, 8, SystemAllocPolicy> TypeAndValueStack;
+ using ControlItem = typename Policy::ControlItem;
+ using Control = ControlStackEntry<ControlItem>;
+ typedef Vector<Control, 8, SystemAllocPolicy> ControlStack;
+
+ private:
+ Decoder& d_;
+ const ModuleEnvironment& env_;
+
+ TypeAndValueStack valueStack_;
+ TypeAndValueStack elseParamStack_;
+ ControlStack controlStack_;
+
+#ifdef DEBUG
+ OpBytes op_;
+#endif
+ size_t offsetOfLastReadOp_;
+
+ [[nodiscard]] bool readFixedU8(uint8_t* out) { return d_.readFixedU8(out); }
+ [[nodiscard]] bool readFixedU32(uint32_t* out) {
+ return d_.readFixedU32(out);
+ }
+ [[nodiscard]] bool readVarS32(int32_t* out) { return d_.readVarS32(out); }
+ [[nodiscard]] bool readVarU32(uint32_t* out) { return d_.readVarU32(out); }
+ [[nodiscard]] bool readVarS64(int64_t* out) { return d_.readVarS64(out); }
+ [[nodiscard]] bool readVarU64(uint64_t* out) { return d_.readVarU64(out); }
+ [[nodiscard]] bool readFixedF32(float* out) { return d_.readFixedF32(out); }
+ [[nodiscard]] bool readFixedF64(double* out) { return d_.readFixedF64(out); }
+
+ [[nodiscard]] bool readMemOrTableIndex(bool isMem, uint32_t* index);
+ [[nodiscard]] bool readLinearMemoryAddress(uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr);
+ [[nodiscard]] bool readLinearMemoryAddressAligned(
+ uint32_t byteSize, LinearMemoryAddress<Value>* addr);
+ [[nodiscard]] bool readBlockType(BlockType* type);
+ [[nodiscard]] bool readStructTypeIndex(uint32_t* typeIndex);
+ [[nodiscard]] bool readFieldIndex(uint32_t* fieldIndex,
+ const StructType& structType);
+
+ [[nodiscard]] bool popCallArgs(const ValTypeVector& expectedTypes,
+ ValueVector* values);
+
+ [[nodiscard]] bool failEmptyStack();
+ [[nodiscard]] bool popStackType(StackType* type, Value* value);
+ [[nodiscard]] bool popWithType(ValType expected, Value* value);
+ [[nodiscard]] bool popWithType(ResultType expected, ValueVector* values);
+ [[nodiscard]] bool popWithRefType(Value* value, StackType* type);
+ [[nodiscard]] bool popThenPushType(ResultType expected, ValueVector* values);
+ [[nodiscard]] bool topWithType(ResultType expected, ValueVector* values);
+
+ [[nodiscard]] bool pushControl(LabelKind kind, BlockType type);
+ [[nodiscard]] bool checkStackAtEndOfBlock(ResultType* type,
+ ValueVector* values);
+ [[nodiscard]] bool getControl(uint32_t relativeDepth, Control** controlEntry);
+ [[nodiscard]] bool checkBranchValue(uint32_t relativeDepth, ResultType* type,
+ ValueVector* values);
+ [[nodiscard]] bool checkBrTableEntry(uint32_t* relativeDepth,
+ ResultType prevBranchType,
+ ResultType* branchType,
+ ValueVector* branchValues);
+
+ [[nodiscard]] bool push(ValType t) { return valueStack_.emplaceBack(t); }
+ [[nodiscard]] bool push(TypeAndValue tv) { return valueStack_.append(tv); }
+ [[nodiscard]] bool push(ResultType t) {
+ for (size_t i = 0; i < t.length(); i++) {
+ if (!push(t[i])) {
+ return false;
+ }
+ }
+ return true;
+ }
+ void infalliblePush(StackType t) { valueStack_.infallibleEmplaceBack(t); }
+ void infalliblePush(ValType t) {
+ valueStack_.infallibleEmplaceBack(StackType(t));
+ }
+ void infalliblePush(TypeAndValue tv) { valueStack_.infallibleAppend(tv); }
+
+ void afterUnconditionalBranch() {
+ valueStack_.shrinkTo(controlStack_.back().valueStackBase());
+ controlStack_.back().setPolymorphicBase();
+ }
+
+ inline bool checkIsSubtypeOf(ValType lhs, ValType rhs);
+
+ public:
+#ifdef DEBUG
+ explicit OpIter(const ModuleEnvironment& env, Decoder& decoder)
+ : d_(decoder),
+ env_(env),
+ op_(OpBytes(Op::Limit)),
+ offsetOfLastReadOp_(0) {}
+#else
+ explicit OpIter(const ModuleEnvironment& env, Decoder& decoder)
+ : d_(decoder), env_(env), offsetOfLastReadOp_(0) {}
+#endif
+
+ // Return the decoding byte offset.
+ uint32_t currentOffset() const { return d_.currentOffset(); }
+
+ // Return the offset within the entire module of the last-read op.
+ size_t lastOpcodeOffset() const {
+ return offsetOfLastReadOp_ ? offsetOfLastReadOp_ : d_.currentOffset();
+ }
+
+ // Return a BytecodeOffset describing where the current op should be reported
+ // to trap/call.
+ BytecodeOffset bytecodeOffset() const {
+ return BytecodeOffset(lastOpcodeOffset());
+ }
+
+ // Test whether the iterator has reached the end of the buffer.
+ bool done() const { return d_.done(); }
+
+ // Return a pointer to the end of the buffer being decoded by this iterator.
+ const uint8_t* end() const { return d_.end(); }
+
+ // Report a general failure.
+ [[nodiscard]] bool fail(const char* msg) MOZ_COLD;
+
+ // Report a general failure with a context
+ [[nodiscard]] bool fail_ctx(const char* fmt, const char* context) MOZ_COLD;
+
+ // Report an unrecognized opcode.
+ [[nodiscard]] bool unrecognizedOpcode(const OpBytes* expr) MOZ_COLD;
+
+ // Return whether the innermost block has a polymorphic base of its stack.
+ // Ideally this accessor would be removed; consider using something else.
+ bool currentBlockHasPolymorphicBase() const {
+ return !controlStack_.empty() && controlStack_.back().polymorphicBase();
+ }
+
+ // ------------------------------------------------------------------------
+ // Decoding and validation interface.
+
+ [[nodiscard]] bool readOp(OpBytes* op);
+ [[nodiscard]] bool readFunctionStart(uint32_t funcIndex);
+ [[nodiscard]] bool readFunctionEnd(const uint8_t* bodyEnd);
+ [[nodiscard]] bool readReturn(ValueVector* values);
+ [[nodiscard]] bool readBlock(ResultType* paramType);
+ [[nodiscard]] bool readLoop(ResultType* paramType);
+ [[nodiscard]] bool readIf(ResultType* paramType, Value* condition);
+ [[nodiscard]] bool readElse(ResultType* paramType, ResultType* resultType,
+ ValueVector* thenResults);
+ [[nodiscard]] bool readEnd(LabelKind* kind, ResultType* type,
+ ValueVector* results,
+ ValueVector* resultsForEmptyElse);
+ void popEnd();
+ [[nodiscard]] bool readBr(uint32_t* relativeDepth, ResultType* type,
+ ValueVector* values);
+ [[nodiscard]] bool readBrIf(uint32_t* relativeDepth, ResultType* type,
+ ValueVector* values, Value* condition);
+ [[nodiscard]] bool readBrTable(Uint32Vector* depths, uint32_t* defaultDepth,
+ ResultType* defaultBranchValueType,
+ ValueVector* branchValues, Value* index);
+#ifdef ENABLE_WASM_EXCEPTIONS
+ [[nodiscard]] bool readTry(ResultType* type);
+ [[nodiscard]] bool readCatch(LabelKind* kind, uint32_t* eventIndex,
+ ResultType* paramType, ResultType* resultType,
+ ValueVector* tryResults);
+ [[nodiscard]] bool readThrow(uint32_t* eventIndex, ValueVector* argValues);
+#endif
+ [[nodiscard]] bool readUnreachable();
+ [[nodiscard]] bool readDrop();
+ [[nodiscard]] bool readUnary(ValType operandType, Value* input);
+ [[nodiscard]] bool readConversion(ValType operandType, ValType resultType,
+ Value* input);
+ [[nodiscard]] bool readBinary(ValType operandType, Value* lhs, Value* rhs);
+ [[nodiscard]] bool readComparison(ValType operandType, Value* lhs,
+ Value* rhs);
+ [[nodiscard]] bool readLoad(ValType resultType, uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr);
+ [[nodiscard]] bool readStore(ValType resultType, uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr, Value* value);
+ [[nodiscard]] bool readTeeStore(ValType resultType, uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr,
+ Value* value);
+ [[nodiscard]] bool readNop();
+ [[nodiscard]] bool readMemorySize();
+ [[nodiscard]] bool readMemoryGrow(Value* input);
+ [[nodiscard]] bool readSelect(bool typed, StackType* type, Value* trueValue,
+ Value* falseValue, Value* condition);
+ [[nodiscard]] bool readGetLocal(const ValTypeVector& locals, uint32_t* id);
+ [[nodiscard]] bool readSetLocal(const ValTypeVector& locals, uint32_t* id,
+ Value* value);
+ [[nodiscard]] bool readTeeLocal(const ValTypeVector& locals, uint32_t* id,
+ Value* value);
+ [[nodiscard]] bool readGetGlobal(uint32_t* id);
+ [[nodiscard]] bool readSetGlobal(uint32_t* id, Value* value);
+ [[nodiscard]] bool readTeeGlobal(uint32_t* id, Value* value);
+ [[nodiscard]] bool readI32Const(int32_t* i32);
+ [[nodiscard]] bool readI64Const(int64_t* i64);
+ [[nodiscard]] bool readF32Const(float* f32);
+ [[nodiscard]] bool readF64Const(double* f64);
+ [[nodiscard]] bool readRefFunc(uint32_t* funcTypeIndex);
+ [[nodiscard]] bool readRefNull();
+ [[nodiscard]] bool readRefIsNull(Value* input);
+ [[nodiscard]] bool readRefAsNonNull(Value* input);
+ [[nodiscard]] bool readBrOnNull(uint32_t* relativeDepth, ResultType* type,
+ ValueVector* values, Value* condition);
+ [[nodiscard]] bool readCall(uint32_t* calleeIndex, ValueVector* argValues);
+ [[nodiscard]] bool readCallIndirect(uint32_t* funcTypeIndex,
+ uint32_t* tableIndex, Value* callee,
+ ValueVector* argValues);
+ [[nodiscard]] bool readOldCallDirect(uint32_t numFuncImports,
+ uint32_t* funcIndex,
+ ValueVector* argValues);
+ [[nodiscard]] bool readOldCallIndirect(uint32_t* funcTypeIndex, Value* callee,
+ ValueVector* argValues);
+ [[nodiscard]] bool readWake(LinearMemoryAddress<Value>* addr, Value* count);
+ [[nodiscard]] bool readWait(LinearMemoryAddress<Value>* addr,
+ ValType resultType, uint32_t byteSize,
+ Value* value, Value* timeout);
+ [[nodiscard]] bool readFence();
+ [[nodiscard]] bool readAtomicLoad(LinearMemoryAddress<Value>* addr,
+ ValType resultType, uint32_t byteSize);
+ [[nodiscard]] bool readAtomicStore(LinearMemoryAddress<Value>* addr,
+ ValType resultType, uint32_t byteSize,
+ Value* value);
+ [[nodiscard]] bool readAtomicRMW(LinearMemoryAddress<Value>* addr,
+ ValType resultType, uint32_t byteSize,
+ Value* value);
+ [[nodiscard]] bool readAtomicCmpXchg(LinearMemoryAddress<Value>* addr,
+ ValType resultType, uint32_t byteSize,
+ Value* oldValue, Value* newValue);
+ [[nodiscard]] bool readMemOrTableCopy(bool isMem,
+ uint32_t* dstMemOrTableIndex,
+ Value* dst,
+ uint32_t* srcMemOrTableIndex,
+ Value* src, Value* len);
+ [[nodiscard]] bool readDataOrElemDrop(bool isData, uint32_t* segIndex);
+ [[nodiscard]] bool readMemFill(Value* start, Value* val, Value* len);
+ [[nodiscard]] bool readMemOrTableInit(bool isMem, uint32_t* segIndex,
+ uint32_t* dstTableIndex, Value* dst,
+ Value* src, Value* len);
+ [[nodiscard]] bool readTableFill(uint32_t* tableIndex, Value* start,
+ Value* val, Value* len);
+ [[nodiscard]] bool readTableGet(uint32_t* tableIndex, Value* index);
+ [[nodiscard]] bool readTableGrow(uint32_t* tableIndex, Value* initValue,
+ Value* delta);
+ [[nodiscard]] bool readTableSet(uint32_t* tableIndex, Value* index,
+ Value* value);
+ [[nodiscard]] bool readTableSize(uint32_t* tableIndex);
+ [[nodiscard]] bool readStructNew(uint32_t* typeIndex, ValueVector* argValues);
+ [[nodiscard]] bool readStructGet(uint32_t* typeIndex, uint32_t* fieldIndex,
+ Value* ptr);
+ [[nodiscard]] bool readStructSet(uint32_t* typeIndex, uint32_t* fieldIndex,
+ Value* ptr, Value* val);
+ [[nodiscard]] bool readStructNarrow(ValType* inputType, ValType* outputType,
+ Value* ptr);
+ [[nodiscard]] bool readValType(ValType* type);
+ [[nodiscard]] bool readHeapType(bool nullable, RefType* type);
+ [[nodiscard]] bool readReferenceType(ValType* type,
+ const char* const context);
+
+#ifdef ENABLE_WASM_SIMD
+ [[nodiscard]] bool readLaneIndex(uint32_t inputLanes, uint32_t* laneIndex);
+ [[nodiscard]] bool readExtractLane(ValType resultType, uint32_t inputLanes,
+ uint32_t* laneIndex, Value* input);
+ [[nodiscard]] bool readReplaceLane(ValType operandType, uint32_t inputLanes,
+ uint32_t* laneIndex, Value* baseValue,
+ Value* operand);
+ [[nodiscard]] bool readVectorShift(Value* baseValue, Value* shift);
+ [[nodiscard]] bool readVectorSelect(Value* v1, Value* v2, Value* controlMask);
+ [[nodiscard]] bool readVectorShuffle(Value* v1, Value* v2, V128* selectMask);
+ [[nodiscard]] bool readV128Const(V128* f64);
+ [[nodiscard]] bool readLoadSplat(uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr);
+ [[nodiscard]] bool readLoadExtend(LinearMemoryAddress<Value>* addr);
+#endif
+
+ // At a location where readOp is allowed, peek at the next opcode
+ // without consuming it or updating any internal state.
+ // Never fails: returns uint16_t(Op::Limit) in op->b0 if it can't read.
+ void peekOp(OpBytes* op);
+
+ // ------------------------------------------------------------------------
+ // Stack management.
+
+ // Set the top N result values.
+ void setResults(size_t count, const ValueVector& values) {
+ MOZ_ASSERT(valueStack_.length() >= count);
+ size_t base = valueStack_.length() - count;
+ for (size_t i = 0; i < count; i++) {
+ valueStack_[base + i].setValue(values[i]);
+ }
+ }
+
+ bool getResults(size_t count, ValueVector* values) {
+ MOZ_ASSERT(valueStack_.length() >= count);
+ if (!values->resize(count)) {
+ return false;
+ }
+ size_t base = valueStack_.length() - count;
+ for (size_t i = 0; i < count; i++) {
+ (*values)[i] = valueStack_[base + i].value();
+ }
+ return true;
+ }
+
+ // Set the result value of the current top-of-value-stack expression.
+ void setResult(Value value) { valueStack_.back().setValue(value); }
+
+ // Return the result value of the current top-of-value-stack expression.
+ Value getResult() { return valueStack_.back().value(); }
+
+ // Return a reference to the top of the control stack.
+ ControlItem& controlItem() { return controlStack_.back().controlItem(); }
+
+ // Return a reference to an element in the control stack.
+ ControlItem& controlItem(uint32_t relativeDepth) {
+ return controlStack_[controlStack_.length() - 1 - relativeDepth]
+ .controlItem();
+ }
+
+ // Return a reference to the outermost element on the control stack.
+ ControlItem& controlOutermost() { return controlStack_[0].controlItem(); }
+
+ // Test whether the control-stack is empty, meaning we've consumed the final
+ // end of the function body.
+ bool controlStackEmpty() const { return controlStack_.empty(); }
+};
+
+template <typename Policy>
+inline bool OpIter<Policy>::checkIsSubtypeOf(ValType actual, ValType expected) {
+ if (env_.types.isSubtypeOf(actual, expected)) {
+ return true;
+ }
+
+ UniqueChars actualText = ToString(actual);
+ if (!actualText) {
+ return false;
+ }
+
+ UniqueChars expectedText = ToString(expected);
+ if (!expectedText) {
+ return false;
+ }
+
+ UniqueChars error(
+ JS_smprintf("type mismatch: expression has type %s but expected %s",
+ actualText.get(), expectedText.get()));
+ if (!error) {
+ return false;
+ }
+
+ return fail(error.get());
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::unrecognizedOpcode(const OpBytes* expr) {
+ UniqueChars error(JS_smprintf("unrecognized opcode: %x %x", expr->b0,
+ IsPrefixByte(expr->b0) ? expr->b1 : 0));
+ if (!error) {
+ return false;
+ }
+
+ return fail(error.get());
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::fail(const char* msg) {
+ return d_.fail(lastOpcodeOffset(), msg);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::fail_ctx(const char* fmt, const char* context) {
+ UniqueChars error(JS_smprintf(fmt, context));
+ if (!error) {
+ return false;
+ }
+ return fail(error.get());
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::failEmptyStack() {
+ return valueStack_.empty() ? fail("popping value from empty stack")
+ : fail("popping value from outside block");
+}
+
+// This function pops exactly one value from the stack, yielding Bottom types in
+// various cases and therefore making it the caller's responsibility to do the
+// right thing for StackType::Bottom. Prefer (pop|top)WithType. This is an
+// optimization for the super-common case where the caller is statically
+// expecting the resulttype `[valtype]`.
+template <typename Policy>
+inline bool OpIter<Policy>::popStackType(StackType* type, Value* value) {
+ Control& block = controlStack_.back();
+
+ MOZ_ASSERT(valueStack_.length() >= block.valueStackBase());
+ if (MOZ_UNLIKELY(valueStack_.length() == block.valueStackBase())) {
+ // If the base of this block's stack is polymorphic, then we can pop a
+ // dummy value of the bottom type; it won't be used since we're in
+ // unreachable code.
+ if (block.polymorphicBase()) {
+ *type = StackType::bottom();
+ *value = Value();
+
+ // Maintain the invariant that, after a pop, there is always memory
+ // reserved to push a value infallibly.
+ return valueStack_.reserve(valueStack_.length() + 1);
+ }
+
+ return failEmptyStack();
+ }
+
+ TypeAndValue& tv = valueStack_.back();
+ *type = tv.type();
+ *value = tv.value();
+ valueStack_.popBack();
+ return true;
+}
+
+// This function pops exactly one value from the stack, checking that it has the
+// expected type which can either be a specific value type or a type variable.
+template <typename Policy>
+inline bool OpIter<Policy>::popWithType(ValType expectedType, Value* value) {
+ StackType stackType;
+ if (!popStackType(&stackType, value)) {
+ return false;
+ }
+
+ return stackType.isBottom() ||
+ checkIsSubtypeOf(stackType.valType(), expectedType);
+}
+
+// Pops each of the given expected types (in reverse, because it's a stack).
+template <typename Policy>
+inline bool OpIter<Policy>::popWithType(ResultType expected,
+ ValueVector* values) {
+ size_t expectedLength = expected.length();
+ if (!values->resize(expectedLength)) {
+ return false;
+ }
+ for (size_t i = 0; i < expectedLength; i++) {
+ size_t reverseIndex = expectedLength - i - 1;
+ ValType expectedType = expected[reverseIndex];
+ Value* value = &(*values)[reverseIndex];
+ if (!popWithType(expectedType, value)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// This function pops exactly one value from the stack, checking that it is a
+// reference type.
+template <typename Policy>
+inline bool OpIter<Policy>::popWithRefType(Value* value, StackType* type) {
+ if (!popStackType(type, value)) {
+ return false;
+ }
+
+ if (type->isBottom() || type->valType().isReference()) {
+ return true;
+ }
+
+ UniqueChars actualText = ToString(type->valType());
+ if (!actualText) {
+ return false;
+ }
+
+ UniqueChars error(JS_smprintf(
+ "type mismatch: expression has type %s but expected a reference type",
+ actualText.get()));
+ if (!error) {
+ return false;
+ }
+
+ return fail(error.get());
+}
+
+// This function is an optimization of the sequence:
+// popWithType(ResultType, tmp)
+// push(ResultType, tmp)
+template <typename Policy>
+inline bool OpIter<Policy>::popThenPushType(ResultType expected,
+ ValueVector* values) {
+ if (expected.empty()) {
+ return true;
+ }
+
+ Control& block = controlStack_.back();
+
+ size_t expectedLength = expected.length();
+ if (values && !values->resize(expectedLength)) {
+ return false;
+ }
+
+ for (size_t i = 0; i != expectedLength; i++) {
+ // We're iterating as-if we were popping each expected/actual type one by
+ // one, which means iterating the array of expected results backwards.
+ // The "current" value stack length refers to what the value stack length
+ // would have been if we were popping it.
+ size_t reverseIndex = expectedLength - i - 1;
+ ValType expectedType = expected[reverseIndex];
+ auto collectValue = [&](const Value& v) {
+ if (values) {
+ (*values)[reverseIndex] = v;
+ }
+ };
+
+ size_t currentValueStackLength = valueStack_.length() - i;
+
+ MOZ_ASSERT(currentValueStackLength >= block.valueStackBase());
+ if (currentValueStackLength == block.valueStackBase()) {
+ if (!block.polymorphicBase()) {
+ return failEmptyStack();
+ }
+
+ // If the base of this block's stack is polymorphic, then we can just
+ // pull out as many fake values as we need to validate; they won't be used
+ // since we're in unreachable code. We must however push these types on
+ // the operand stack since they are now fixed by this constraint.
+ if (!valueStack_.insert(valueStack_.begin() + currentValueStackLength,
+ TypeAndValue(expectedType))) {
+ return false;
+ }
+
+ collectValue(Value());
+ } else {
+ TypeAndValue& observed = valueStack_[currentValueStackLength - 1];
+
+ if (observed.type().isBottom()) {
+ observed.typeRef() = StackType(expectedType);
+ collectValue(Value());
+ } else {
+ if (!checkIsSubtypeOf(observed.type().valType(), expectedType)) {
+ return false;
+ }
+
+ collectValue(observed.value());
+ }
+ }
+ }
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::topWithType(ResultType expected,
+ ValueVector* values) {
+ if (expected.empty()) {
+ return true;
+ }
+
+ Control& block = controlStack_.back();
+
+ size_t expectedLength = expected.length();
+ if (values && !values->resize(expectedLength)) {
+ return false;
+ }
+
+ for (size_t i = 0; i != expectedLength; i++) {
+ // We're iterating as-if we were popping each expected/actual type one by
+ // one, which means iterating the array of expected results backwards.
+ // The "current" value stack length refers to what the value stack length
+ // would have been if we were popping it.
+ size_t reverseIndex = expectedLength - i - 1;
+ ValType expectedType = expected[reverseIndex];
+ auto collectValue = [&](const Value& v) {
+ if (values) {
+ (*values)[reverseIndex] = v;
+ }
+ };
+
+ size_t currentValueStackLength = valueStack_.length() - i;
+
+ MOZ_ASSERT(currentValueStackLength >= block.valueStackBase());
+ if (currentValueStackLength == block.valueStackBase()) {
+ if (!block.polymorphicBase()) {
+ return failEmptyStack();
+ }
+
+ // If the base of this block's stack is polymorphic, then we can just
+ // pull out as many fake values as we need to validate; they won't be used
+ // since we're in unreachable code.
+ if (!valueStack_.insert(valueStack_.begin() + currentValueStackLength,
+ TypeAndValue())) {
+ return false;
+ }
+
+ collectValue(Value());
+ } else {
+ TypeAndValue& observed = valueStack_[currentValueStackLength - 1];
+
+ if (observed.type().isBottom()) {
+ collectValue(Value());
+ } else {
+ if (!checkIsSubtypeOf(observed.type().valType(), expectedType)) {
+ return false;
+ }
+
+ collectValue(observed.value());
+ }
+ }
+ }
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::pushControl(LabelKind kind, BlockType type) {
+ ResultType paramType = type.params();
+
+ ValueVector values;
+ if (!popThenPushType(paramType, &values)) {
+ return false;
+ }
+ MOZ_ASSERT(valueStack_.length() >= paramType.length());
+ uint32_t valueStackBase = valueStack_.length() - paramType.length();
+ return controlStack_.emplaceBack(kind, type, valueStackBase);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::checkStackAtEndOfBlock(ResultType* expectedType,
+ ValueVector* values) {
+ Control& block = controlStack_.back();
+ *expectedType = block.type().results();
+
+ MOZ_ASSERT(valueStack_.length() >= block.valueStackBase());
+ if (expectedType->length() < valueStack_.length() - block.valueStackBase()) {
+ return fail("unused values not explicitly dropped by end of block");
+ }
+
+ return popThenPushType(*expectedType, values);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::getControl(uint32_t relativeDepth,
+ Control** controlEntry) {
+ if (relativeDepth >= controlStack_.length()) {
+ return fail("branch depth exceeds current nesting level");
+ }
+
+ *controlEntry = &controlStack_[controlStack_.length() - 1 - relativeDepth];
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readBlockType(BlockType* type) {
+ uint8_t nextByte;
+ if (!d_.peekByte(&nextByte)) {
+ return fail("unable to read block type");
+ }
+
+ if (nextByte == uint8_t(TypeCode::BlockVoid)) {
+ d_.uncheckedReadFixedU8();
+ *type = BlockType::VoidToVoid();
+ return true;
+ }
+
+ if ((nextByte & SLEB128SignMask) == SLEB128SignBit) {
+ ValType v;
+ if (!readValType(&v)) {
+ return false;
+ }
+ *type = BlockType::VoidToSingle(v);
+ return true;
+ }
+
+#ifdef ENABLE_WASM_MULTI_VALUE
+ if (!env_.multiValueEnabled()) {
+ return fail("invalid block type reference");
+ }
+
+ int32_t x;
+ if (!d_.readVarS32(&x) || x < 0 || uint32_t(x) >= env_.types.length()) {
+ return fail("invalid block type type index");
+ }
+
+ if (!env_.types.isFuncType(x)) {
+ return fail("block type type index must be func type");
+ }
+
+ *type = BlockType::Func(env_.types.funcType(x));
+
+ return true;
+#else
+ return fail("invalid block type reference");
+#endif
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readOp(OpBytes* op) {
+ MOZ_ASSERT(!controlStack_.empty());
+
+ offsetOfLastReadOp_ = d_.currentOffset();
+
+ if (MOZ_UNLIKELY(!d_.readOp(op))) {
+ return fail("unable to read opcode");
+ }
+
+#ifdef DEBUG
+ op_ = *op;
+#endif
+
+ return true;
+}
+
+template <typename Policy>
+inline void OpIter<Policy>::peekOp(OpBytes* op) {
+ const uint8_t* pos = d_.currentPosition();
+
+ if (MOZ_UNLIKELY(!d_.readOp(op))) {
+ op->b0 = uint16_t(Op::Limit);
+ }
+
+ d_.rollbackPosition(pos);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readFunctionStart(uint32_t funcIndex) {
+ MOZ_ASSERT(elseParamStack_.empty());
+ MOZ_ASSERT(valueStack_.empty());
+ MOZ_ASSERT(controlStack_.empty());
+ MOZ_ASSERT(op_.b0 == uint16_t(Op::Limit));
+ BlockType type = BlockType::FuncResults(*env_.funcs[funcIndex].type);
+ return pushControl(LabelKind::Body, type);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readFunctionEnd(const uint8_t* bodyEnd) {
+ if (d_.currentPosition() != bodyEnd) {
+ return fail("function body length mismatch");
+ }
+
+ if (!controlStack_.empty()) {
+ return fail("unbalanced function body control flow");
+ }
+ MOZ_ASSERT(elseParamStack_.empty());
+
+#ifdef DEBUG
+ op_ = OpBytes(Op::Limit);
+#endif
+ valueStack_.clear();
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readReturn(ValueVector* values) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Return);
+
+ Control& body = controlStack_[0];
+ MOZ_ASSERT(body.kind() == LabelKind::Body);
+
+ if (!popWithType(body.resultType(), values)) {
+ return false;
+ }
+
+ afterUnconditionalBranch();
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readBlock(ResultType* paramType) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Block);
+
+ BlockType type;
+ if (!readBlockType(&type)) {
+ return false;
+ }
+
+ *paramType = type.params();
+ return pushControl(LabelKind::Block, type);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readLoop(ResultType* paramType) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Loop);
+
+ BlockType type;
+ if (!readBlockType(&type)) {
+ return false;
+ }
+
+ *paramType = type.params();
+ return pushControl(LabelKind::Loop, type);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readIf(ResultType* paramType, Value* condition) {
+ MOZ_ASSERT(Classify(op_) == OpKind::If);
+
+ BlockType type;
+ if (!readBlockType(&type)) {
+ return false;
+ }
+
+ if (!popWithType(ValType::I32, condition)) {
+ return false;
+ }
+
+ if (!pushControl(LabelKind::Then, type)) {
+ return false;
+ }
+
+ *paramType = type.params();
+ size_t paramsLength = type.params().length();
+ return elseParamStack_.append(valueStack_.end() - paramsLength, paramsLength);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readElse(ResultType* paramType,
+ ResultType* resultType,
+ ValueVector* thenResults) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Else);
+
+ Control& block = controlStack_.back();
+ if (block.kind() != LabelKind::Then) {
+ return fail("else can only be used within an if");
+ }
+
+ *paramType = block.type().params();
+ if (!checkStackAtEndOfBlock(resultType, thenResults)) {
+ return false;
+ }
+
+ valueStack_.shrinkTo(block.valueStackBase());
+
+ size_t nparams = block.type().params().length();
+ MOZ_ASSERT(elseParamStack_.length() >= nparams);
+ valueStack_.infallibleAppend(elseParamStack_.end() - nparams, nparams);
+ elseParamStack_.shrinkBy(nparams);
+
+ block.switchToElse();
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readEnd(LabelKind* kind, ResultType* type,
+ ValueVector* results,
+ ValueVector* resultsForEmptyElse) {
+ MOZ_ASSERT(Classify(op_) == OpKind::End);
+
+ if (!checkStackAtEndOfBlock(type, results)) {
+ return false;
+ }
+
+ Control& block = controlStack_.back();
+
+ if (block.kind() == LabelKind::Then) {
+ ResultType params = block.type().params();
+ // If an `if` block ends with `end` instead of `else`, then the `else` block
+ // implicitly passes the `if` parameters as the `else` results. In that
+ // case, assert that the `if`'s param type matches the result type.
+ if (params != block.type().results()) {
+ return fail("if without else with a result value");
+ }
+
+ size_t nparams = params.length();
+ MOZ_ASSERT(elseParamStack_.length() >= nparams);
+ if (!resultsForEmptyElse->resize(nparams)) {
+ return false;
+ }
+ const TypeAndValue* elseParams = elseParamStack_.end() - nparams;
+ for (size_t i = 0; i < nparams; i++) {
+ (*resultsForEmptyElse)[i] = elseParams[i].value();
+ }
+ elseParamStack_.shrinkBy(nparams);
+ }
+
+#ifdef ENABLE_WASM_EXCEPTIONS
+ if (block.kind() == LabelKind::Try) {
+ return fail("try without catch or unwind not allowed");
+ }
+#endif
+
+ *kind = block.kind();
+ return true;
+}
+
+template <typename Policy>
+inline void OpIter<Policy>::popEnd() {
+ MOZ_ASSERT(Classify(op_) == OpKind::End);
+
+ controlStack_.popBack();
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::checkBranchValue(uint32_t relativeDepth,
+ ResultType* type,
+ ValueVector* values) {
+ Control* block = nullptr;
+ if (!getControl(relativeDepth, &block)) {
+ return false;
+ }
+
+ *type = block->branchTargetType();
+ return topWithType(*type, values);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readBr(uint32_t* relativeDepth, ResultType* type,
+ ValueVector* values) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Br);
+
+ if (!readVarU32(relativeDepth)) {
+ return fail("unable to read br depth");
+ }
+
+ if (!checkBranchValue(*relativeDepth, type, values)) {
+ return false;
+ }
+
+ afterUnconditionalBranch();
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readBrIf(uint32_t* relativeDepth, ResultType* type,
+ ValueVector* values, Value* condition) {
+ MOZ_ASSERT(Classify(op_) == OpKind::BrIf);
+
+ if (!readVarU32(relativeDepth)) {
+ return fail("unable to read br_if depth");
+ }
+
+ if (!popWithType(ValType::I32, condition)) {
+ return false;
+ }
+
+ return checkBranchValue(*relativeDepth, type, values);
+}
+
+#define UNKNOWN_ARITY UINT32_MAX
+
+template <typename Policy>
+inline bool OpIter<Policy>::checkBrTableEntry(uint32_t* relativeDepth,
+ ResultType prevType,
+ ResultType* type,
+ ValueVector* branchValues) {
+ if (!readVarU32(relativeDepth)) {
+ return fail("unable to read br_table depth");
+ }
+
+ Control* block = nullptr;
+ if (!getControl(*relativeDepth, &block)) {
+ return false;
+ }
+
+ *type = block->branchTargetType();
+
+ if (prevType != ResultType()) {
+ if (prevType.length() != type->length()) {
+ return fail("br_table targets must all have the same arity");
+ }
+
+ // Avoid re-collecting the same values for subsequent branch targets.
+ branchValues = nullptr;
+ }
+
+ return topWithType(*type, branchValues);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readBrTable(Uint32Vector* depths,
+ uint32_t* defaultDepth,
+ ResultType* defaultBranchType,
+ ValueVector* branchValues,
+ Value* index) {
+ MOZ_ASSERT(Classify(op_) == OpKind::BrTable);
+
+ uint32_t tableLength;
+ if (!readVarU32(&tableLength)) {
+ return fail("unable to read br_table table length");
+ }
+
+ if (tableLength > MaxBrTableElems) {
+ return fail("br_table too big");
+ }
+
+ if (!popWithType(ValType::I32, index)) {
+ return false;
+ }
+
+ if (!depths->resize(tableLength)) {
+ return false;
+ }
+
+ ResultType prevBranchType;
+ for (uint32_t i = 0; i < tableLength; i++) {
+ ResultType branchType;
+ if (!checkBrTableEntry(&(*depths)[i], prevBranchType, &branchType,
+ branchValues)) {
+ return false;
+ }
+ prevBranchType = branchType;
+ }
+
+ if (!checkBrTableEntry(defaultDepth, prevBranchType, defaultBranchType,
+ branchValues)) {
+ return false;
+ }
+
+ MOZ_ASSERT(*defaultBranchType != ResultType());
+
+ afterUnconditionalBranch();
+ return true;
+}
+
+#undef UNKNOWN_ARITY
+
+#ifdef ENABLE_WASM_EXCEPTIONS
+template <typename Policy>
+inline bool OpIter<Policy>::readTry(ResultType* paramType) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Try);
+
+ BlockType type;
+ if (!readBlockType(&type)) {
+ return false;
+ }
+
+ *paramType = type.params();
+ return pushControl(LabelKind::Try, type);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readCatch(LabelKind* kind, uint32_t* eventIndex,
+ ResultType* paramType,
+ ResultType* resultType,
+ ValueVector* tryResults) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Catch);
+
+ if (!readVarU32(eventIndex)) {
+ return fail("expected event index");
+ }
+ if (*eventIndex >= env_.events.length()) {
+ return fail("event index out of range");
+ }
+
+ Control& block = controlStack_.back();
+ if (block.kind() != LabelKind::Try && block.kind() != LabelKind::Catch) {
+ return fail("catch can only be used within a try");
+ }
+ *kind = block.kind();
+ *paramType = block.type().params();
+
+ if (!checkStackAtEndOfBlock(resultType, tryResults)) {
+ return false;
+ }
+
+ valueStack_.shrinkTo(block.valueStackBase());
+ if (block.kind() == LabelKind::Try) {
+ block.switchToCatch();
+ }
+
+ return push(env_.events[*eventIndex].resultType());
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readThrow(uint32_t* eventIndex,
+ ValueVector* argValues) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Throw);
+
+ if (!readVarU32(eventIndex)) {
+ return fail("expected event index");
+ }
+ if (*eventIndex >= env_.events.length()) {
+ return fail("event index out of range");
+ }
+
+ if (!popWithType(env_.events[*eventIndex].resultType(), argValues)) {
+ return false;
+ }
+
+ afterUnconditionalBranch();
+ return true;
+}
+#endif
+
+template <typename Policy>
+inline bool OpIter<Policy>::readUnreachable() {
+ MOZ_ASSERT(Classify(op_) == OpKind::Unreachable);
+
+ afterUnconditionalBranch();
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readDrop() {
+ MOZ_ASSERT(Classify(op_) == OpKind::Drop);
+ StackType type;
+ Value value;
+ return popStackType(&type, &value);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readUnary(ValType operandType, Value* input) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Unary);
+
+ if (!popWithType(operandType, input)) {
+ return false;
+ }
+
+ infalliblePush(operandType);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readConversion(ValType operandType,
+ ValType resultType, Value* input) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Conversion);
+
+ if (!popWithType(operandType, input)) {
+ return false;
+ }
+
+ infalliblePush(resultType);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readBinary(ValType operandType, Value* lhs,
+ Value* rhs) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Binary);
+
+ if (!popWithType(operandType, rhs)) {
+ return false;
+ }
+
+ if (!popWithType(operandType, lhs)) {
+ return false;
+ }
+
+ infalliblePush(operandType);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readComparison(ValType operandType, Value* lhs,
+ Value* rhs) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Comparison);
+
+ if (!popWithType(operandType, rhs)) {
+ return false;
+ }
+
+ if (!popWithType(operandType, lhs)) {
+ return false;
+ }
+
+ infalliblePush(ValType::I32);
+
+ return true;
+}
+
+// For memories, the index is currently always a placeholder zero byte.
+//
+// For tables, the index is a placeholder zero byte until we get multi-table
+// with the reftypes proposal.
+//
+// The zero-ness of the value must be checked by the caller.
+template <typename Policy>
+inline bool OpIter<Policy>::readMemOrTableIndex(bool isMem, uint32_t* index) {
+#ifdef ENABLE_WASM_REFTYPES
+ bool readByte = isMem;
+#else
+ bool readByte = true;
+#endif
+ if (readByte) {
+ uint8_t indexTmp;
+ if (!readFixedU8(&indexTmp)) {
+ return fail("unable to read memory or table index");
+ }
+ *index = indexTmp;
+ } else {
+ if (!readVarU32(index)) {
+ return fail("unable to read memory or table index");
+ }
+ }
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readLinearMemoryAddress(
+ uint32_t byteSize, LinearMemoryAddress<Value>* addr) {
+ if (!env_.usesMemory()) {
+ return fail("can't touch memory without memory");
+ }
+
+ uint8_t alignLog2;
+ if (!readFixedU8(&alignLog2)) {
+ return fail("unable to read load alignment");
+ }
+
+ if (!readVarU32(&addr->offset)) {
+ return fail("unable to read load offset");
+ }
+
+ if (alignLog2 >= 32 || (uint32_t(1) << alignLog2) > byteSize) {
+ return fail("greater than natural alignment");
+ }
+
+ if (!popWithType(ValType::I32, &addr->base)) {
+ return false;
+ }
+
+ addr->align = uint32_t(1) << alignLog2;
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readLinearMemoryAddressAligned(
+ uint32_t byteSize, LinearMemoryAddress<Value>* addr) {
+ if (!readLinearMemoryAddress(byteSize, addr)) {
+ return false;
+ }
+
+ if (addr->align != byteSize) {
+ return fail("not natural alignment");
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readLoad(ValType resultType, uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Load);
+
+ if (!readLinearMemoryAddress(byteSize, addr)) {
+ return false;
+ }
+
+ infalliblePush(resultType);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readStore(ValType resultType, uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr,
+ Value* value) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Store);
+
+ if (!popWithType(resultType, value)) {
+ return false;
+ }
+
+ if (!readLinearMemoryAddress(byteSize, addr)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readTeeStore(ValType resultType, uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr,
+ Value* value) {
+ MOZ_ASSERT(Classify(op_) == OpKind::TeeStore);
+
+ if (!popWithType(resultType, value)) {
+ return false;
+ }
+
+ if (!readLinearMemoryAddress(byteSize, addr)) {
+ return false;
+ }
+
+ infalliblePush(TypeAndValue(resultType, *value));
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readNop() {
+ MOZ_ASSERT(Classify(op_) == OpKind::Nop);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readMemorySize() {
+ MOZ_ASSERT(Classify(op_) == OpKind::MemorySize);
+
+ if (!env_.usesMemory()) {
+ return fail("can't touch memory without memory");
+ }
+
+ uint8_t flags;
+ if (!readFixedU8(&flags)) {
+ return fail("failed to read memory flags");
+ }
+
+ if (flags != uint8_t(MemoryTableFlags::Default)) {
+ return fail("unexpected flags");
+ }
+
+ return push(ValType::I32);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readMemoryGrow(Value* input) {
+ MOZ_ASSERT(Classify(op_) == OpKind::MemoryGrow);
+
+ if (!env_.usesMemory()) {
+ return fail("can't touch memory without memory");
+ }
+
+ uint8_t flags;
+ if (!readFixedU8(&flags)) {
+ return fail("failed to read memory flags");
+ }
+
+ if (flags != uint8_t(MemoryTableFlags::Default)) {
+ return fail("unexpected flags");
+ }
+
+ if (!popWithType(ValType::I32, input)) {
+ return false;
+ }
+
+ infalliblePush(ValType::I32);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readSelect(bool typed, StackType* type,
+ Value* trueValue, Value* falseValue,
+ Value* condition) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Select);
+
+ if (typed) {
+ uint32_t length;
+ if (!readVarU32(&length)) {
+ return fail("unable to read select result length");
+ }
+ if (length != 1) {
+ return fail("bad number of results");
+ }
+ ValType result;
+ if (!readValType(&result)) {
+ return fail("invalid result type for select");
+ }
+
+ if (!popWithType(ValType::I32, condition)) {
+ return false;
+ }
+ if (!popWithType(result, falseValue)) {
+ return false;
+ }
+ if (!popWithType(result, trueValue)) {
+ return false;
+ }
+
+ *type = StackType(result);
+ infalliblePush(*type);
+ return true;
+ }
+
+ if (!popWithType(ValType::I32, condition)) {
+ return false;
+ }
+
+ StackType falseType;
+ if (!popStackType(&falseType, falseValue)) {
+ return false;
+ }
+
+ StackType trueType;
+ if (!popStackType(&trueType, trueValue)) {
+ return false;
+ }
+
+ if (!falseType.isValidForUntypedSelect() ||
+ !trueType.isValidForUntypedSelect()) {
+ return fail("invalid types for untyped select");
+ }
+
+ if (falseType.isBottom()) {
+ *type = trueType;
+ } else if (trueType.isBottom() || falseType == trueType) {
+ *type = falseType;
+ } else {
+ return fail("select operand types must match");
+ }
+
+ infalliblePush(*type);
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readGetLocal(const ValTypeVector& locals,
+ uint32_t* id) {
+ MOZ_ASSERT(Classify(op_) == OpKind::GetLocal);
+
+ if (!readVarU32(id)) {
+ return fail("unable to read local index");
+ }
+
+ if (*id >= locals.length()) {
+ return fail("local.get index out of range");
+ }
+
+ return push(locals[*id]);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readSetLocal(const ValTypeVector& locals,
+ uint32_t* id, Value* value) {
+ MOZ_ASSERT(Classify(op_) == OpKind::SetLocal);
+
+ if (!readVarU32(id)) {
+ return fail("unable to read local index");
+ }
+
+ if (*id >= locals.length()) {
+ return fail("local.set index out of range");
+ }
+
+ return popWithType(locals[*id], value);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readTeeLocal(const ValTypeVector& locals,
+ uint32_t* id, Value* value) {
+ MOZ_ASSERT(Classify(op_) == OpKind::TeeLocal);
+
+ if (!readVarU32(id)) {
+ return fail("unable to read local index");
+ }
+
+ if (*id >= locals.length()) {
+ return fail("local.set index out of range");
+ }
+
+ ValueVector single;
+ if (!popThenPushType(ResultType::Single(locals[*id]), &single)) {
+ return false;
+ }
+
+ *value = single[0];
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readGetGlobal(uint32_t* id) {
+ MOZ_ASSERT(Classify(op_) == OpKind::GetGlobal);
+
+ if (!readVarU32(id)) {
+ return fail("unable to read global index");
+ }
+
+ if (*id >= env_.globals.length()) {
+ return fail("global.get index out of range");
+ }
+
+ return push(env_.globals[*id].type());
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readSetGlobal(uint32_t* id, Value* value) {
+ MOZ_ASSERT(Classify(op_) == OpKind::SetGlobal);
+
+ if (!readVarU32(id)) {
+ return fail("unable to read global index");
+ }
+
+ if (*id >= env_.globals.length()) {
+ return fail("global.set index out of range");
+ }
+
+ if (!env_.globals[*id].isMutable()) {
+ return fail("can't write an immutable global");
+ }
+
+ return popWithType(env_.globals[*id].type(), value);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readTeeGlobal(uint32_t* id, Value* value) {
+ MOZ_ASSERT(Classify(op_) == OpKind::TeeGlobal);
+
+ if (!readVarU32(id)) {
+ return fail("unable to read global index");
+ }
+
+ if (*id >= env_.globals.length()) {
+ return fail("global.set index out of range");
+ }
+
+ if (!env_.globals[*id].isMutable()) {
+ return fail("can't write an immutable global");
+ }
+
+ ValueVector single;
+ if (!popThenPushType(ResultType::Single(env_.globals[*id].type()), &single)) {
+ return false;
+ }
+
+ MOZ_ASSERT(single.length() == 1);
+ *value = single[0];
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readI32Const(int32_t* i32) {
+ MOZ_ASSERT(Classify(op_) == OpKind::I32);
+
+ if (!readVarS32(i32)) {
+ return fail("failed to read I32 constant");
+ }
+
+ return push(ValType::I32);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readI64Const(int64_t* i64) {
+ MOZ_ASSERT(Classify(op_) == OpKind::I64);
+
+ if (!readVarS64(i64)) {
+ return fail("failed to read I64 constant");
+ }
+
+ return push(ValType::I64);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readF32Const(float* f32) {
+ MOZ_ASSERT(Classify(op_) == OpKind::F32);
+
+ if (!readFixedF32(f32)) {
+ return fail("failed to read F32 constant");
+ }
+
+ return push(ValType::F32);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readF64Const(double* f64) {
+ MOZ_ASSERT(Classify(op_) == OpKind::F64);
+
+ if (!readFixedF64(f64)) {
+ return fail("failed to read F64 constant");
+ }
+
+ return push(ValType::F64);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readRefFunc(uint32_t* funcTypeIndex) {
+ MOZ_ASSERT(Classify(op_) == OpKind::RefFunc);
+
+ if (!readVarU32(funcTypeIndex)) {
+ return fail("unable to read function index");
+ }
+ if (*funcTypeIndex >= env_.funcs.length()) {
+ return fail("function index out of range");
+ }
+ if (!env_.validForRefFunc.getBit(*funcTypeIndex)) {
+ return fail(
+ "function index is not declared in a section before the code section");
+ }
+ return push(RefType::func());
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readRefNull() {
+ MOZ_ASSERT(Classify(op_) == OpKind::RefNull);
+
+ RefType type;
+ if (!readHeapType(true, &type)) {
+ return false;
+ }
+ return push(type);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readRefIsNull(Value* input) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Conversion);
+
+ StackType type;
+ if (!popWithRefType(input, &type)) {
+ return false;
+ }
+ return push(ValType::I32);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readRefAsNonNull(Value* input) {
+ MOZ_ASSERT(Classify(op_) == OpKind::RefAsNonNull);
+
+ StackType type;
+ if (!popWithRefType(input, &type)) {
+ return false;
+ }
+
+ if (type.isBottom()) {
+ infalliblePush(type);
+ } else {
+ infalliblePush(type.asNonNullable());
+ }
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readBrOnNull(uint32_t* relativeDepth,
+ ResultType* type, ValueVector* values,
+ Value* condition) {
+ MOZ_ASSERT(Classify(op_) == OpKind::BrOnNull);
+
+ if (!readVarU32(relativeDepth)) {
+ return fail("unable to read br_on_null depth");
+ }
+
+ StackType refType;
+ if (!popWithRefType(condition, &refType)) {
+ return false;
+ }
+
+ if (!checkBranchValue(*relativeDepth, type, values)) {
+ return false;
+ }
+
+ if (refType.isBottom()) {
+ infalliblePush(refType);
+ } else {
+ infalliblePush(refType.asNonNullable());
+ }
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readValType(ValType* type) {
+ return d_.readValType(env_.types, env_.features, type);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readHeapType(bool nullable, RefType* type) {
+ return d_.readHeapType(env_.types, env_.features, nullable, type);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readReferenceType(ValType* type,
+ const char* context) {
+ if (!readValType(type) || !type->isReference()) {
+ return fail_ctx("invalid reference type for %s", context);
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::popCallArgs(const ValTypeVector& expectedTypes,
+ ValueVector* values) {
+ // Iterate through the argument types backward so that pops occur in the
+ // right order.
+
+ if (!values->resize(expectedTypes.length())) {
+ return false;
+ }
+
+ for (int32_t i = expectedTypes.length() - 1; i >= 0; i--) {
+ if (!popWithType(expectedTypes[i], &(*values)[i])) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readCall(uint32_t* funcTypeIndex,
+ ValueVector* argValues) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Call);
+
+ if (!readVarU32(funcTypeIndex)) {
+ return fail("unable to read call function index");
+ }
+
+ if (*funcTypeIndex >= env_.funcs.length()) {
+ return fail("callee index out of range");
+ }
+
+ const FuncType& funcType = *env_.funcs[*funcTypeIndex].type;
+
+ if (!popCallArgs(funcType.args(), argValues)) {
+ return false;
+ }
+
+ return push(ResultType::Vector(funcType.results()));
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readCallIndirect(uint32_t* funcTypeIndex,
+ uint32_t* tableIndex,
+ Value* callee,
+ ValueVector* argValues) {
+ MOZ_ASSERT(Classify(op_) == OpKind::CallIndirect);
+ MOZ_ASSERT(funcTypeIndex != tableIndex);
+
+ if (!readVarU32(funcTypeIndex)) {
+ return fail("unable to read call_indirect signature index");
+ }
+
+ if (*funcTypeIndex >= env_.numTypes()) {
+ return fail("signature index out of range");
+ }
+
+ if (!readVarU32(tableIndex)) {
+ return fail("unable to read call_indirect table index");
+ }
+ if (*tableIndex >= env_.tables.length()) {
+ // Special case this for improved user experience.
+ if (!env_.tables.length()) {
+ return fail("can't call_indirect without a table");
+ }
+ return fail("table index out of range for call_indirect");
+ }
+ if (!env_.tables[*tableIndex].elemType.isFunc()) {
+ return fail("indirect calls must go through a table of 'funcref'");
+ }
+
+ if (!popWithType(ValType::I32, callee)) {
+ return false;
+ }
+
+ if (!env_.types.isFuncType(*funcTypeIndex)) {
+ return fail("expected signature type");
+ }
+
+ const FuncType& funcType = env_.types.funcType(*funcTypeIndex);
+
+#ifdef WASM_PRIVATE_REFTYPES
+ if (env_.tables[*tableIndex].importedOrExported &&
+ funcType.exposesTypeIndex()) {
+ return fail("cannot expose indexed reference type");
+ }
+#endif
+
+ if (!popCallArgs(funcType.args(), argValues)) {
+ return false;
+ }
+
+ return push(ResultType::Vector(funcType.results()));
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readOldCallDirect(uint32_t numFuncImports,
+ uint32_t* funcTypeIndex,
+ ValueVector* argValues) {
+ MOZ_ASSERT(Classify(op_) == OpKind::OldCallDirect);
+
+ uint32_t funcDefIndex;
+ if (!readVarU32(&funcDefIndex)) {
+ return fail("unable to read call function index");
+ }
+
+ if (UINT32_MAX - funcDefIndex < numFuncImports) {
+ return fail("callee index out of range");
+ }
+
+ *funcTypeIndex = numFuncImports + funcDefIndex;
+
+ if (*funcTypeIndex >= env_.funcs.length()) {
+ return fail("callee index out of range");
+ }
+
+ const FuncType& funcType = *env_.funcs[*funcTypeIndex].type;
+
+ if (!popCallArgs(funcType.args(), argValues)) {
+ return false;
+ }
+
+ return push(ResultType::Vector(funcType.results()));
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readOldCallIndirect(uint32_t* funcTypeIndex,
+ Value* callee,
+ ValueVector* argValues) {
+ MOZ_ASSERT(Classify(op_) == OpKind::OldCallIndirect);
+
+ if (!readVarU32(funcTypeIndex)) {
+ return fail("unable to read call_indirect signature index");
+ }
+
+ if (*funcTypeIndex >= env_.numTypes()) {
+ return fail("signature index out of range");
+ }
+
+ if (!env_.types.isFuncType(*funcTypeIndex)) {
+ return fail("expected signature type");
+ }
+
+ const FuncType& funcType = env_.types.funcType(*funcTypeIndex);
+
+ if (!popCallArgs(funcType.args(), argValues)) {
+ return false;
+ }
+
+ if (!popWithType(ValType::I32, callee)) {
+ return false;
+ }
+
+ return push(ResultType::Vector(funcType.results()));
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readWake(LinearMemoryAddress<Value>* addr,
+ Value* count) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Wake);
+
+ if (!popWithType(ValType::I32, count)) {
+ return false;
+ }
+
+ uint32_t byteSize = 4; // Per spec; smallest WAIT is i32.
+
+ if (!readLinearMemoryAddressAligned(byteSize, addr)) {
+ return false;
+ }
+
+ infalliblePush(ValType::I32);
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readWait(LinearMemoryAddress<Value>* addr,
+ ValType valueType, uint32_t byteSize,
+ Value* value, Value* timeout) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Wait);
+
+ if (!popWithType(ValType::I64, timeout)) {
+ return false;
+ }
+
+ if (!popWithType(valueType, value)) {
+ return false;
+ }
+
+ if (!readLinearMemoryAddressAligned(byteSize, addr)) {
+ return false;
+ }
+
+ infalliblePush(ValType::I32);
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readFence() {
+ MOZ_ASSERT(Classify(op_) == OpKind::Fence);
+ uint8_t flags;
+ if (!readFixedU8(&flags)) {
+ return fail("expected memory order after fence");
+ }
+ if (flags != 0) {
+ return fail("non-zero memory order not supported yet");
+ }
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readAtomicLoad(LinearMemoryAddress<Value>* addr,
+ ValType resultType,
+ uint32_t byteSize) {
+ MOZ_ASSERT(Classify(op_) == OpKind::AtomicLoad);
+
+ if (!readLinearMemoryAddressAligned(byteSize, addr)) {
+ return false;
+ }
+
+ infalliblePush(resultType);
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readAtomicStore(LinearMemoryAddress<Value>* addr,
+ ValType resultType,
+ uint32_t byteSize, Value* value) {
+ MOZ_ASSERT(Classify(op_) == OpKind::AtomicStore);
+
+ if (!popWithType(resultType, value)) {
+ return false;
+ }
+
+ if (!readLinearMemoryAddressAligned(byteSize, addr)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readAtomicRMW(LinearMemoryAddress<Value>* addr,
+ ValType resultType, uint32_t byteSize,
+ Value* value) {
+ MOZ_ASSERT(Classify(op_) == OpKind::AtomicBinOp);
+
+ if (!popWithType(resultType, value)) {
+ return false;
+ }
+
+ if (!readLinearMemoryAddressAligned(byteSize, addr)) {
+ return false;
+ }
+
+ infalliblePush(resultType);
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readAtomicCmpXchg(LinearMemoryAddress<Value>* addr,
+ ValType resultType,
+ uint32_t byteSize,
+ Value* oldValue,
+ Value* newValue) {
+ MOZ_ASSERT(Classify(op_) == OpKind::AtomicCompareExchange);
+
+ if (!popWithType(resultType, newValue)) {
+ return false;
+ }
+
+ if (!popWithType(resultType, oldValue)) {
+ return false;
+ }
+
+ if (!readLinearMemoryAddressAligned(byteSize, addr)) {
+ return false;
+ }
+
+ infalliblePush(resultType);
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readMemOrTableCopy(bool isMem,
+ uint32_t* dstMemOrTableIndex,
+ Value* dst,
+ uint32_t* srcMemOrTableIndex,
+ Value* src, Value* len) {
+ MOZ_ASSERT(Classify(op_) == OpKind::MemOrTableCopy);
+ MOZ_ASSERT(dstMemOrTableIndex != srcMemOrTableIndex);
+
+ // Spec requires (dest, src) as of 2019-10-04.
+ if (!readMemOrTableIndex(isMem, dstMemOrTableIndex)) {
+ return false;
+ }
+ if (!readMemOrTableIndex(isMem, srcMemOrTableIndex)) {
+ return false;
+ }
+
+ if (isMem) {
+ if (!env_.usesMemory()) {
+ return fail("can't touch memory without memory");
+ }
+ if (*srcMemOrTableIndex != 0 || *dstMemOrTableIndex != 0) {
+ return fail("memory index out of range for memory.copy");
+ }
+ } else {
+ if (*dstMemOrTableIndex >= env_.tables.length() ||
+ *srcMemOrTableIndex >= env_.tables.length()) {
+ return fail("table index out of range for table.copy");
+ }
+ ValType dstElemType = env_.tables[*dstMemOrTableIndex].elemType;
+ ValType srcElemType = env_.tables[*srcMemOrTableIndex].elemType;
+ if (!checkIsSubtypeOf(srcElemType, dstElemType)) {
+ return false;
+ }
+ }
+
+ if (!popWithType(ValType::I32, len)) {
+ return false;
+ }
+
+ if (!popWithType(ValType::I32, src)) {
+ return false;
+ }
+
+ if (!popWithType(ValType::I32, dst)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readDataOrElemDrop(bool isData,
+ uint32_t* segIndex) {
+ MOZ_ASSERT(Classify(op_) == OpKind::DataOrElemDrop);
+
+ if (!readVarU32(segIndex)) {
+ return fail("unable to read segment index");
+ }
+
+ if (isData) {
+ if (env_.dataCount.isNothing()) {
+ return fail("data.drop requires a DataCount section");
+ }
+ if (*segIndex >= *env_.dataCount) {
+ return fail("data.drop segment index out of range");
+ }
+ } else {
+ if (*segIndex >= env_.elemSegments.length()) {
+ return fail("element segment index out of range for elem.drop");
+ }
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readMemFill(Value* start, Value* val, Value* len) {
+ MOZ_ASSERT(Classify(op_) == OpKind::MemFill);
+
+ if (!env_.usesMemory()) {
+ return fail("can't touch memory without memory");
+ }
+
+ uint8_t memoryIndex;
+ if (!readFixedU8(&memoryIndex)) {
+ return fail("failed to read memory index");
+ }
+ if (!env_.usesMemory()) {
+ return fail("can't touch memory without memory");
+ }
+ if (memoryIndex != 0) {
+ return fail("memory index must be zero");
+ }
+
+ if (!popWithType(ValType::I32, len)) {
+ return false;
+ }
+
+ if (!popWithType(ValType::I32, val)) {
+ return false;
+ }
+
+ if (!popWithType(ValType::I32, start)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readMemOrTableInit(bool isMem, uint32_t* segIndex,
+ uint32_t* dstTableIndex,
+ Value* dst, Value* src,
+ Value* len) {
+ MOZ_ASSERT(Classify(op_) == OpKind::MemOrTableInit);
+ MOZ_ASSERT(segIndex != dstTableIndex);
+
+ if (!popWithType(ValType::I32, len)) {
+ return false;
+ }
+
+ if (!popWithType(ValType::I32, src)) {
+ return false;
+ }
+
+ if (!popWithType(ValType::I32, dst)) {
+ return false;
+ }
+
+ if (!readVarU32(segIndex)) {
+ return fail("unable to read segment index");
+ }
+
+ uint32_t memOrTableIndex = 0;
+ if (!readMemOrTableIndex(isMem, &memOrTableIndex)) {
+ return false;
+ }
+ if (isMem) {
+ if (!env_.usesMemory()) {
+ return fail("can't touch memory without memory");
+ }
+ if (memOrTableIndex != 0) {
+ return fail("memory index must be zero");
+ }
+ if (env_.dataCount.isNothing()) {
+ return fail("memory.init requires a DataCount section");
+ }
+ if (*segIndex >= *env_.dataCount) {
+ return fail("memory.init segment index out of range");
+ }
+ } else {
+ if (memOrTableIndex >= env_.tables.length()) {
+ return fail("table index out of range for table.init");
+ }
+ *dstTableIndex = memOrTableIndex;
+
+ if (*segIndex >= env_.elemSegments.length()) {
+ return fail("table.init segment index out of range");
+ }
+ if (!checkIsSubtypeOf(env_.elemSegments[*segIndex]->elemType,
+ env_.tables[*dstTableIndex].elemType)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readTableFill(uint32_t* tableIndex, Value* start,
+ Value* val, Value* len) {
+ MOZ_ASSERT(Classify(op_) == OpKind::TableFill);
+
+ if (!readVarU32(tableIndex)) {
+ return fail("unable to read table index");
+ }
+ if (*tableIndex >= env_.tables.length()) {
+ return fail("table index out of range for table.fill");
+ }
+
+ if (!popWithType(ValType::I32, len)) {
+ return false;
+ }
+ if (!popWithType(env_.tables[*tableIndex].elemType, val)) {
+ return false;
+ }
+ if (!popWithType(ValType::I32, start)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readTableGet(uint32_t* tableIndex, Value* index) {
+ MOZ_ASSERT(Classify(op_) == OpKind::TableGet);
+
+ if (!readVarU32(tableIndex)) {
+ return fail("unable to read table index");
+ }
+ if (*tableIndex >= env_.tables.length()) {
+ return fail("table index out of range for table.get");
+ }
+
+ if (!popWithType(ValType::I32, index)) {
+ return false;
+ }
+
+ infalliblePush(env_.tables[*tableIndex].elemType);
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readTableGrow(uint32_t* tableIndex,
+ Value* initValue, Value* delta) {
+ MOZ_ASSERT(Classify(op_) == OpKind::TableGrow);
+
+ if (!readVarU32(tableIndex)) {
+ return fail("unable to read table index");
+ }
+ if (*tableIndex >= env_.tables.length()) {
+ return fail("table index out of range for table.grow");
+ }
+
+ if (!popWithType(ValType::I32, delta)) {
+ return false;
+ }
+ if (!popWithType(env_.tables[*tableIndex].elemType, initValue)) {
+ return false;
+ }
+
+ infalliblePush(ValType::I32);
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readTableSet(uint32_t* tableIndex, Value* index,
+ Value* value) {
+ MOZ_ASSERT(Classify(op_) == OpKind::TableSet);
+
+ if (!readVarU32(tableIndex)) {
+ return fail("unable to read table index");
+ }
+ if (*tableIndex >= env_.tables.length()) {
+ return fail("table index out of range for table.set");
+ }
+
+ if (!popWithType(env_.tables[*tableIndex].elemType, value)) {
+ return false;
+ }
+ if (!popWithType(ValType::I32, index)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readTableSize(uint32_t* tableIndex) {
+ MOZ_ASSERT(Classify(op_) == OpKind::TableSize);
+
+ *tableIndex = 0;
+
+ if (!readVarU32(tableIndex)) {
+ return fail("unable to read table index");
+ }
+ if (*tableIndex >= env_.tables.length()) {
+ return fail("table index out of range for table.size");
+ }
+
+ return push(ValType::I32);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readStructTypeIndex(uint32_t* typeIndex) {
+ if (!readVarU32(typeIndex)) {
+ return fail("unable to read type index");
+ }
+
+ if (*typeIndex >= env_.types.length()) {
+ return fail("type index out of range");
+ }
+
+ if (!env_.types.isStructType(*typeIndex)) {
+ return fail("not a struct type");
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readFieldIndex(uint32_t* fieldIndex,
+ const StructType& structType) {
+ if (!readVarU32(fieldIndex)) {
+ return fail("unable to read field index");
+ }
+
+ if (structType.fields_.length() <= *fieldIndex) {
+ return fail("field index out of range");
+ }
+
+ return true;
+}
+
+// Semantics of struct.new, struct.get, struct.set, and struct.narrow documented
+// (for now) on https://github.com/lars-t-hansen/moz-gc-experiments.
+
+template <typename Policy>
+inline bool OpIter<Policy>::readStructNew(uint32_t* typeIndex,
+ ValueVector* argValues) {
+ MOZ_ASSERT(Classify(op_) == OpKind::StructNew);
+
+ if (!readStructTypeIndex(typeIndex)) {
+ return false;
+ }
+
+ const StructType& str = env_.types.structType(*typeIndex);
+
+ if (!argValues->resize(str.fields_.length())) {
+ return false;
+ }
+
+ static_assert(MaxStructFields <= INT32_MAX, "Or we iloop below");
+
+ for (int32_t i = str.fields_.length() - 1; i >= 0; i--) {
+ if (!popWithType(str.fields_[i].type, &(*argValues)[i])) {
+ return false;
+ }
+ }
+
+ return push(RefType::fromTypeIndex(*typeIndex, false));
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readStructGet(uint32_t* typeIndex,
+ uint32_t* fieldIndex, Value* ptr) {
+ MOZ_ASSERT(typeIndex != fieldIndex);
+ MOZ_ASSERT(Classify(op_) == OpKind::StructGet);
+
+ if (!readStructTypeIndex(typeIndex)) {
+ return false;
+ }
+
+ const StructType& structType = env_.types.structType(*typeIndex);
+
+ if (!readFieldIndex(fieldIndex, structType)) {
+ return false;
+ }
+
+ if (!popWithType(RefType::fromTypeIndex(*typeIndex, true), ptr)) {
+ return false;
+ }
+
+ return push(structType.fields_[*fieldIndex].type);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readStructSet(uint32_t* typeIndex,
+ uint32_t* fieldIndex, Value* ptr,
+ Value* val) {
+ MOZ_ASSERT(typeIndex != fieldIndex);
+ MOZ_ASSERT(Classify(op_) == OpKind::StructSet);
+
+ if (!readStructTypeIndex(typeIndex)) {
+ return false;
+ }
+
+ const StructType& structType = env_.types.structType(*typeIndex);
+
+ if (!readFieldIndex(fieldIndex, structType)) {
+ return false;
+ }
+
+ if (!popWithType(structType.fields_[*fieldIndex].type, val)) {
+ return false;
+ }
+
+ if (!structType.fields_[*fieldIndex].isMutable) {
+ return fail("field is not mutable");
+ }
+
+ if (!popWithType(RefType::fromTypeIndex(*typeIndex, true), ptr)) {
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readStructNarrow(ValType* inputType,
+ ValType* outputType, Value* ptr) {
+ MOZ_ASSERT(inputType != outputType);
+ MOZ_ASSERT(Classify(op_) == OpKind::StructNarrow);
+
+ if (!readReferenceType(inputType, "struct.narrow")) {
+ return false;
+ }
+
+ if (!readReferenceType(outputType, "struct.narrow")) {
+ return false;
+ }
+
+ if (env_.types.isStructType(inputType->refType())) {
+ if (!env_.types.isStructType(outputType->refType())) {
+ return fail("invalid type combination in struct.narrow");
+ }
+
+ const StructType& inputStruct = env_.types.structType(inputType->refType());
+ const StructType& outputStruct =
+ env_.types.structType(outputType->refType());
+
+ if (!outputStruct.hasPrefix(inputStruct)) {
+ return fail("invalid narrowing operation");
+ }
+ } else if (outputType->isEqRef()) {
+ if (!inputType->isEqRef()) {
+ return fail("invalid type combination in struct.narrow");
+ }
+ }
+
+ if (!popWithType(*inputType, ptr)) {
+ return false;
+ }
+
+ return push(*outputType);
+}
+
+#ifdef ENABLE_WASM_SIMD
+
+template <typename Policy>
+inline bool OpIter<Policy>::readLaneIndex(uint32_t inputLanes,
+ uint32_t* laneIndex) {
+ uint8_t tmp;
+ if (!readFixedU8(&tmp)) {
+ return false; // Caller signals error
+ }
+ if (tmp >= inputLanes) {
+ return false;
+ }
+ *laneIndex = tmp;
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readExtractLane(ValType resultType,
+ uint32_t inputLanes,
+ uint32_t* laneIndex, Value* input) {
+ MOZ_ASSERT(Classify(op_) == OpKind::ExtractLane);
+
+ if (!readLaneIndex(inputLanes, laneIndex)) {
+ return fail("missing or invalid extract_lane lane index");
+ }
+
+ if (!popWithType(ValType::V128, input)) {
+ return false;
+ }
+
+ infalliblePush(resultType);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readReplaceLane(ValType operandType,
+ uint32_t inputLanes,
+ uint32_t* laneIndex,
+ Value* baseValue, Value* operand) {
+ MOZ_ASSERT(Classify(op_) == OpKind::ReplaceLane);
+
+ if (!readLaneIndex(inputLanes, laneIndex)) {
+ return fail("missing or invalid replace_lane lane index");
+ }
+
+ if (!popWithType(operandType, operand)) {
+ return false;
+ }
+
+ if (!popWithType(ValType::V128, baseValue)) {
+ return false;
+ }
+
+ infalliblePush(ValType::V128);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readVectorShift(Value* baseValue, Value* shift) {
+ MOZ_ASSERT(Classify(op_) == OpKind::VectorShift);
+
+ if (!popWithType(ValType::I32, shift)) {
+ return false;
+ }
+
+ if (!popWithType(ValType::V128, baseValue)) {
+ return false;
+ }
+
+ infalliblePush(ValType::V128);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readVectorSelect(Value* v1, Value* v2,
+ Value* controlMask) {
+ MOZ_ASSERT(Classify(op_) == OpKind::VectorSelect);
+
+ if (!popWithType(ValType::V128, controlMask)) {
+ return false;
+ }
+
+ if (!popWithType(ValType::V128, v2)) {
+ return false;
+ }
+
+ if (!popWithType(ValType::V128, v1)) {
+ return false;
+ }
+
+ infalliblePush(ValType::V128);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readVectorShuffle(Value* v1, Value* v2,
+ V128* selectMask) {
+ MOZ_ASSERT(Classify(op_) == OpKind::VectorShuffle);
+
+ for (unsigned i = 0; i < 16; i++) {
+ uint8_t tmp;
+ if (!readFixedU8(&tmp)) {
+ return fail("unable to read shuffle index");
+ }
+ if (tmp > 31) {
+ return fail("shuffle index out of range");
+ }
+ selectMask->bytes[i] = tmp;
+ }
+
+ if (!popWithType(ValType::V128, v2)) {
+ return false;
+ }
+
+ if (!popWithType(ValType::V128, v1)) {
+ return false;
+ }
+
+ infalliblePush(ValType::V128);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readV128Const(V128* value) {
+ MOZ_ASSERT(Classify(op_) == OpKind::V128);
+
+ for (unsigned i = 0; i < 16; i++) {
+ if (!readFixedU8(&value->bytes[i])) {
+ return fail("unable to read V128 constant");
+ }
+ }
+
+ return push(ValType::V128);
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readLoadSplat(uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Load);
+
+ if (!readLinearMemoryAddress(byteSize, addr)) {
+ return false;
+ }
+
+ infalliblePush(ValType::V128);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool OpIter<Policy>::readLoadExtend(LinearMemoryAddress<Value>* addr) {
+ MOZ_ASSERT(Classify(op_) == OpKind::Load);
+
+ if (!readLinearMemoryAddress(/*byteSize=*/8, addr)) {
+ return false;
+ }
+
+ infalliblePush(ValType::V128);
+
+ return true;
+}
+
+#endif // ENABLE_WASM_SIMD
+
+} // namespace wasm
+} // namespace js
+
+namespace mozilla {
+
+// Specialize IsPod for the Nothing specializations.
+template <>
+struct IsPod<js::wasm::TypeAndValueT<Nothing>> : std::true_type {};
+template <>
+struct IsPod<js::wasm::ControlStackEntry<Nothing>> : std::true_type {};
+
+} // namespace mozilla
+
+#endif // wasm_op_iter_h
diff --git a/js/src/wasm/WasmProcess.cpp b/js/src/wasm/WasmProcess.cpp
new file mode 100644
index 0000000000..fcbd1d3418
--- /dev/null
+++ b/js/src/wasm/WasmProcess.cpp
@@ -0,0 +1,407 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2017 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmProcess.h"
+
+#include "mozilla/BinarySearch.h"
+#include "mozilla/ScopeExit.h"
+
+#include "gc/Memory.h"
+#include "threading/ExclusiveData.h"
+#include "vm/MutexIDs.h"
+#ifdef ENABLE_WASM_CRANELIFT
+# include "wasm/cranelift/clifapi.h"
+#endif
+#include "wasm/WasmBuiltins.h"
+#include "wasm/WasmCode.h"
+#include "wasm/WasmInstance.h"
+
+using namespace js;
+using namespace wasm;
+
+using mozilla::BinarySearchIf;
+
+// Per-process map from values of program-counter (pc) to CodeSegments.
+//
+// Whenever a new CodeSegment is ready to use, it has to be registered so that
+// we can have fast lookups from pc to CodeSegments in numerous places. Since
+// wasm compilation may be tiered, and the second tier doesn't have access to
+// any JSContext/JS::Compartment/etc lying around, we have to use a process-wide
+// map instead.
+
+typedef Vector<const CodeSegment*, 0, SystemAllocPolicy> CodeSegmentVector;
+
+Atomic<bool> wasm::CodeExists(false);
+
+// Because of profiling, the thread running wasm might need to know to which
+// CodeSegment the current PC belongs, during a call to lookup(). A lookup
+// is a read-only operation, and we don't want to take a lock then
+// (otherwise, we could have a deadlock situation if an async lookup
+// happened on a given thread that was holding mutatorsMutex_ while getting
+// sampled). Since the writer could be modifying the data that is getting
+// looked up, the writer functions use spin-locks to know if there are any
+// observers (i.e. calls to lookup()) of the atomic data.
+
+static Atomic<size_t> sNumActiveLookups(0);
+
+class ProcessCodeSegmentMap {
+ // Since writes (insertions or removals) can happen on any background
+ // thread at the same time, we need a lock here.
+
+ Mutex mutatorsMutex_;
+
+ CodeSegmentVector segments1_;
+ CodeSegmentVector segments2_;
+
+ // Except during swapAndWait(), there are no lookup() observers of the
+ // vector pointed to by mutableCodeSegments_
+
+ CodeSegmentVector* mutableCodeSegments_;
+ Atomic<const CodeSegmentVector*> readonlyCodeSegments_;
+
+ struct CodeSegmentPC {
+ const void* pc;
+ explicit CodeSegmentPC(const void* pc) : pc(pc) {}
+ int operator()(const CodeSegment* cs) const {
+ if (cs->containsCodePC(pc)) {
+ return 0;
+ }
+ if (pc < cs->base()) {
+ return -1;
+ }
+ return 1;
+ }
+ };
+
+ void swapAndWait() {
+ // Both vectors are consistent for lookup at this point although their
+ // contents are different: there is no way for the looked up PC to be
+ // in the code segment that is getting registered, because the code
+ // segment is not even fully created yet.
+
+ // If a lookup happens before this instruction, then the
+ // soon-to-become-former read-only pointer is used during the lookup,
+ // which is valid.
+
+ mutableCodeSegments_ = const_cast<CodeSegmentVector*>(
+ readonlyCodeSegments_.exchange(mutableCodeSegments_));
+
+ // If a lookup happens after this instruction, then the updated vector
+ // is used, which is valid:
+ // - in case of insertion, it means the new vector contains more data,
+ // but it's fine since the code segment is getting registered and thus
+ // isn't even fully created yet, so the code can't be running.
+ // - in case of removal, it means the new vector contains one less
+ // entry, but it's fine since unregistering means the code segment
+ // isn't used by any live instance anymore, thus PC can't be in the
+ // to-be-removed code segment's range.
+
+ // A lookup could have happened on any of the two vectors. Wait for
+ // observers to be done using any vector before mutating.
+
+ while (sNumActiveLookups > 0) {
+ }
+ }
+
+ public:
+ ProcessCodeSegmentMap()
+ : mutatorsMutex_(mutexid::WasmCodeSegmentMap),
+ mutableCodeSegments_(&segments1_),
+ readonlyCodeSegments_(&segments2_) {}
+
+ ~ProcessCodeSegmentMap() {
+ MOZ_RELEASE_ASSERT(sNumActiveLookups == 0);
+ MOZ_ASSERT(segments1_.empty());
+ MOZ_ASSERT(segments2_.empty());
+ segments1_.clearAndFree();
+ segments2_.clearAndFree();
+ }
+
+ bool insert(const CodeSegment* cs) {
+ LockGuard<Mutex> lock(mutatorsMutex_);
+
+ size_t index;
+ MOZ_ALWAYS_FALSE(BinarySearchIf(*mutableCodeSegments_, 0,
+ mutableCodeSegments_->length(),
+ CodeSegmentPC(cs->base()), &index));
+
+ if (!mutableCodeSegments_->insert(mutableCodeSegments_->begin() + index,
+ cs)) {
+ return false;
+ }
+
+ CodeExists = true;
+
+ swapAndWait();
+
+#ifdef DEBUG
+ size_t otherIndex;
+ MOZ_ALWAYS_FALSE(BinarySearchIf(*mutableCodeSegments_, 0,
+ mutableCodeSegments_->length(),
+ CodeSegmentPC(cs->base()), &otherIndex));
+ MOZ_ASSERT(index == otherIndex);
+#endif
+
+ // Although we could simply revert the insertion in the read-only
+ // vector, it is simpler to just crash and given that each CodeSegment
+ // consumes multiple pages, it is unlikely this insert() would OOM in
+ // practice
+ AutoEnterOOMUnsafeRegion oom;
+ if (!mutableCodeSegments_->insert(mutableCodeSegments_->begin() + index,
+ cs)) {
+ oom.crash("when inserting a CodeSegment in the process-wide map");
+ }
+
+ return true;
+ }
+
+ void remove(const CodeSegment* cs) {
+ LockGuard<Mutex> lock(mutatorsMutex_);
+
+ size_t index;
+ MOZ_ALWAYS_TRUE(BinarySearchIf(*mutableCodeSegments_, 0,
+ mutableCodeSegments_->length(),
+ CodeSegmentPC(cs->base()), &index));
+
+ mutableCodeSegments_->erase(mutableCodeSegments_->begin() + index);
+
+ if (!mutableCodeSegments_->length()) {
+ CodeExists = false;
+ }
+
+ swapAndWait();
+
+#ifdef DEBUG
+ size_t otherIndex;
+ MOZ_ALWAYS_TRUE(BinarySearchIf(*mutableCodeSegments_, 0,
+ mutableCodeSegments_->length(),
+ CodeSegmentPC(cs->base()), &otherIndex));
+ MOZ_ASSERT(index == otherIndex);
+#endif
+
+ mutableCodeSegments_->erase(mutableCodeSegments_->begin() + index);
+ }
+
+ const CodeSegment* lookup(const void* pc) {
+ const CodeSegmentVector* readonly = readonlyCodeSegments_;
+
+ size_t index;
+ if (!BinarySearchIf(*readonly, 0, readonly->length(), CodeSegmentPC(pc),
+ &index)) {
+ return nullptr;
+ }
+
+ // It is fine returning a raw CodeSegment*, because we assume we are
+ // looking up a live PC in code which is on the stack, keeping the
+ // CodeSegment alive.
+
+ return (*readonly)[index];
+ }
+};
+
+// This field is only atomic to handle buggy scenarios where we crash during
+// startup or shutdown and thus racily perform wasm::LookupCodeSegment() from
+// the crashing thread.
+
+static Atomic<ProcessCodeSegmentMap*> sProcessCodeSegmentMap(nullptr);
+
+bool wasm::RegisterCodeSegment(const CodeSegment* cs) {
+ MOZ_ASSERT(cs->codeTier().code().initialized());
+
+ // This function cannot race with startup/shutdown.
+ ProcessCodeSegmentMap* map = sProcessCodeSegmentMap;
+ MOZ_RELEASE_ASSERT(map);
+ return map->insert(cs);
+}
+
+void wasm::UnregisterCodeSegment(const CodeSegment* cs) {
+ // This function cannot race with startup/shutdown.
+ ProcessCodeSegmentMap* map = sProcessCodeSegmentMap;
+ MOZ_RELEASE_ASSERT(map);
+ map->remove(cs);
+}
+
+const CodeSegment* wasm::LookupCodeSegment(
+ const void* pc, const CodeRange** codeRange /*= nullptr */) {
+ // Since wasm::LookupCodeSegment() can race with wasm::ShutDown(), we must
+ // additionally keep sNumActiveLookups above zero for the duration we're
+ // using the ProcessCodeSegmentMap. wasm::ShutDown() spin-waits on
+ // sNumActiveLookups getting to zero.
+
+ auto decObserver = mozilla::MakeScopeExit([&] {
+ MOZ_ASSERT(sNumActiveLookups > 0);
+ sNumActiveLookups--;
+ });
+ sNumActiveLookups++;
+
+ ProcessCodeSegmentMap* map = sProcessCodeSegmentMap;
+ if (!map) {
+ return nullptr;
+ }
+
+ if (const CodeSegment* found = map->lookup(pc)) {
+ if (codeRange) {
+ *codeRange = found->isModule() ? found->asModule()->lookupRange(pc)
+ : found->asLazyStub()->lookupRange(pc);
+ }
+ return found;
+ }
+
+ if (codeRange) {
+ *codeRange = nullptr;
+ }
+
+ return nullptr;
+}
+
+const Code* wasm::LookupCode(const void* pc,
+ const CodeRange** codeRange /* = nullptr */) {
+ const CodeSegment* found = LookupCodeSegment(pc, codeRange);
+ MOZ_ASSERT_IF(!found && codeRange, !*codeRange);
+ return found ? &found->code() : nullptr;
+}
+
+bool wasm::InCompiledCode(void* pc) {
+ if (LookupCodeSegment(pc)) {
+ return true;
+ }
+
+ const CodeRange* codeRange;
+ uint8_t* codeBase;
+ return LookupBuiltinThunk(pc, &codeRange, &codeBase);
+}
+
+/**
+ * ReadLockFlag maintains a flag that can be mutated multiple times before it
+ * is read, at which point it maintains the same value.
+ */
+class ReadLockFlag {
+ private:
+ bool enabled_;
+ bool read_;
+
+ public:
+ ReadLockFlag() : enabled_(false), read_(false) {}
+
+ bool get() {
+ read_ = true;
+ return enabled_;
+ }
+
+ bool set(bool enabled) {
+ if (read_) {
+ return false;
+ }
+ enabled_ = enabled;
+ return true;
+ }
+};
+
+#ifdef WASM_SUPPORTS_HUGE_MEMORY
+/*
+ * Some 64 bit systems greatly limit the range of available virtual memory. We
+ * require about 6GiB for each wasm huge memory, which can exhaust the address
+ * spaces of these systems quickly. In order to avoid this, we only enable huge
+ * memory if we observe a large enough address space.
+ *
+ * This number is conservatively chosen to continue using huge memory on our
+ * smallest address space system, Android on ARM64 (39 bits), along with a bit
+ * for error in detecting the address space limit.
+ */
+static const size_t MinAddressBitsForHugeMemory = 38;
+
+/*
+ * In addition to the above, some systems impose an independent limit on the
+ * amount of virtual memory that may be used.
+ */
+static const size_t MinVirtualMemoryLimitForHugeMemory =
+ size_t(1) << MinAddressBitsForHugeMemory;
+#endif
+
+ExclusiveData<ReadLockFlag> sHugeMemoryEnabled(mutexid::WasmHugeMemoryEnabled);
+
+static bool IsHugeMemoryEnabledHelper() {
+ auto state = sHugeMemoryEnabled.lock();
+ return state->get();
+}
+
+bool wasm::IsHugeMemoryEnabled() {
+ static bool enabled = IsHugeMemoryEnabledHelper();
+ return enabled;
+}
+
+bool wasm::DisableHugeMemory() {
+ auto state = sHugeMemoryEnabled.lock();
+ return state->set(false);
+}
+
+void ConfigureHugeMemory() {
+#ifdef WASM_SUPPORTS_HUGE_MEMORY
+ if (gc::SystemAddressBits() < MinAddressBitsForHugeMemory) {
+ return;
+ }
+
+ if (gc::VirtualMemoryLimit() != size_t(-1) &&
+ gc::VirtualMemoryLimit() < MinVirtualMemoryLimitForHugeMemory) {
+ return;
+ }
+
+ auto state = sHugeMemoryEnabled.lock();
+ bool set = state->set(true);
+ MOZ_RELEASE_ASSERT(set);
+#endif
+}
+
+bool wasm::Init() {
+ MOZ_RELEASE_ASSERT(!sProcessCodeSegmentMap);
+
+ ConfigureHugeMemory();
+
+#ifdef ENABLE_WASM_CRANELIFT
+ cranelift_initialize();
+#endif
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ ProcessCodeSegmentMap* map = js_new<ProcessCodeSegmentMap>();
+ if (!map) {
+ oomUnsafe.crash("js::wasm::Init");
+ }
+
+ sProcessCodeSegmentMap = map;
+ return true;
+}
+
+void wasm::ShutDown() {
+ // If there are live runtimes then we are already pretty much leaking the
+ // world, so to avoid spurious assertions (which are valid and valuable when
+ // there are not live JSRuntimes), don't bother releasing anything here.
+ if (JSRuntime::hasLiveRuntimes()) {
+ return;
+ }
+
+ // After signalling shutdown by clearing sProcessCodeSegmentMap, wait for
+ // concurrent wasm::LookupCodeSegment()s to finish.
+ ProcessCodeSegmentMap* map = sProcessCodeSegmentMap;
+ MOZ_RELEASE_ASSERT(map);
+ sProcessCodeSegmentMap = nullptr;
+ while (sNumActiveLookups > 0) {
+ }
+
+ ReleaseBuiltinThunks();
+ js_delete(map);
+}
diff --git a/js/src/wasm/WasmProcess.h b/js/src/wasm/WasmProcess.h
new file mode 100644
index 0000000000..6fb7f48afa
--- /dev/null
+++ b/js/src/wasm/WasmProcess.h
@@ -0,0 +1,73 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2017 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_process_h
+#define wasm_process_h
+
+#include "mozilla/Atomics.h"
+#include "mozilla/Attributes.h"
+
+namespace js {
+namespace wasm {
+
+class Code;
+class CodeRange;
+class CodeSegment;
+
+// These methods return the wasm::CodeSegment (resp. wasm::Code) containing
+// the given pc, if any exist in the process. These methods do not take a lock,
+// and thus are safe to use in a profiling context.
+
+const CodeSegment* LookupCodeSegment(const void* pc,
+ const CodeRange** codeRange = nullptr);
+
+const Code* LookupCode(const void* pc, const CodeRange** codeRange = nullptr);
+
+// Return whether the given PC is in any type of wasm code (module or builtin).
+
+bool InCompiledCode(void* pc);
+
+// A bool member that can be used as a very fast lookup to know if there is any
+// code segment at all.
+
+extern mozilla::Atomic<bool> CodeExists;
+
+// These methods allow to (un)register CodeSegments so they can be looked up
+// via pc in the methods described above.
+
+bool RegisterCodeSegment(const CodeSegment* cs);
+
+void UnregisterCodeSegment(const CodeSegment* cs);
+
+// Whether this process is configured to use huge memory or not.
+
+bool IsHugeMemoryEnabled();
+
+[[nodiscard]] bool DisableHugeMemory();
+
+// Called once before/after the last VM execution which could execute or compile
+// wasm.
+
+bool Init();
+
+void ShutDown();
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_process_h
diff --git a/js/src/wasm/WasmRealm.cpp b/js/src/wasm/WasmRealm.cpp
new file mode 100644
index 0000000000..c2ced6cf42
--- /dev/null
+++ b/js/src/wasm/WasmRealm.cpp
@@ -0,0 +1,142 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmRealm.h"
+
+#include "vm/Realm.h"
+#include "wasm/WasmInstance.h"
+
+#include "debugger/DebugAPI-inl.h"
+
+using namespace js;
+using namespace wasm;
+
+wasm::Realm::Realm(JSRuntime* rt) : runtime_(rt) {}
+
+wasm::Realm::~Realm() { MOZ_ASSERT(instances_.empty()); }
+
+struct InstanceComparator {
+ const Instance& target;
+ explicit InstanceComparator(const Instance& target) : target(target) {}
+
+ int operator()(const Instance* instance) const {
+ if (instance == &target) {
+ return 0;
+ }
+
+ // Instances can share code, so the segments can be equal (though they
+ // can't partially overlap). If the codeBases are equal, we sort by
+ // Instance address. Thus a Code may map to many instances.
+
+ // Compare by the first tier, always.
+
+ Tier instanceTier = instance->code().stableTier();
+ Tier targetTier = target.code().stableTier();
+
+ if (instance->codeBase(instanceTier) == target.codeBase(targetTier)) {
+ return instance < &target ? -1 : 1;
+ }
+
+ return target.codeBase(targetTier) < instance->codeBase(instanceTier) ? -1
+ : 1;
+ }
+};
+
+bool wasm::Realm::registerInstance(JSContext* cx,
+ HandleWasmInstanceObject instanceObj) {
+ MOZ_ASSERT(runtime_ == cx->runtime());
+
+ Instance& instance = instanceObj->instance();
+ MOZ_ASSERT(this == &instance.realm()->wasm);
+
+ instance.ensureProfilingLabels(cx->runtime()->geckoProfiler().enabled());
+
+ if (instance.debugEnabled() &&
+ instance.realm()->debuggerObservesAllExecution()) {
+ instance.debug().ensureEnterFrameTrapsState(cx, true);
+ }
+
+ {
+ if (!instances_.reserve(instances_.length() + 1)) {
+ return false;
+ }
+
+ auto runtimeInstances = cx->runtime()->wasmInstances.lock();
+ if (!runtimeInstances->reserve(runtimeInstances->length() + 1)) {
+ return false;
+ }
+
+ // To avoid implementing rollback, do not fail after mutations start.
+
+ InstanceComparator cmp(instance);
+ size_t index;
+
+ MOZ_ALWAYS_FALSE(
+ BinarySearchIf(instances_, 0, instances_.length(), cmp, &index));
+ MOZ_ALWAYS_TRUE(instances_.insert(instances_.begin() + index, &instance));
+
+ MOZ_ALWAYS_FALSE(BinarySearchIf(runtimeInstances.get(), 0,
+ runtimeInstances->length(), cmp, &index));
+ MOZ_ALWAYS_TRUE(
+ runtimeInstances->insert(runtimeInstances->begin() + index, &instance));
+ }
+
+ // Notify the debugger after wasmInstances is unlocked.
+ DebugAPI::onNewWasmInstance(cx, instanceObj);
+ return true;
+}
+
+void wasm::Realm::unregisterInstance(Instance& instance) {
+ InstanceComparator cmp(instance);
+ size_t index;
+
+ if (BinarySearchIf(instances_, 0, instances_.length(), cmp, &index)) {
+ instances_.erase(instances_.begin() + index);
+ }
+
+ auto runtimeInstances = runtime_->wasmInstances.lock();
+ if (BinarySearchIf(runtimeInstances.get(), 0, runtimeInstances->length(), cmp,
+ &index)) {
+ runtimeInstances->erase(runtimeInstances->begin() + index);
+ }
+}
+
+void wasm::Realm::ensureProfilingLabels(bool profilingEnabled) {
+ for (Instance* instance : instances_) {
+ instance->ensureProfilingLabels(profilingEnabled);
+ }
+}
+
+void wasm::Realm::addSizeOfExcludingThis(MallocSizeOf mallocSizeOf,
+ size_t* realmTables) {
+ *realmTables += instances_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+void wasm::InterruptRunningCode(JSContext* cx) {
+ auto runtimeInstances = cx->runtime()->wasmInstances.lock();
+ for (Instance* instance : runtimeInstances.get()) {
+ instance->tlsData()->setInterrupt();
+ }
+}
+
+void wasm::ResetInterruptState(JSContext* cx) {
+ auto runtimeInstances = cx->runtime()->wasmInstances.lock();
+ for (Instance* instance : runtimeInstances.get()) {
+ instance->tlsData()->resetInterrupt(cx);
+ }
+}
diff --git a/js/src/wasm/WasmRealm.h b/js/src/wasm/WasmRealm.h
new file mode 100644
index 0000000000..0d8649535f
--- /dev/null
+++ b/js/src/wasm/WasmRealm.h
@@ -0,0 +1,79 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_realm_h
+#define wasm_realm_h
+
+#include "wasm/WasmJS.h"
+
+namespace js {
+namespace wasm {
+
+// wasm::Realm lives in JS::Realm and contains the wasm-related per-realm state.
+// wasm::Realm tracks every live instance in the realm and must be notified, via
+// registerInstance(), of any new WasmInstanceObject.
+
+class Realm {
+ JSRuntime* runtime_;
+ InstanceVector instances_;
+
+ public:
+ explicit Realm(JSRuntime* rt);
+ ~Realm();
+
+ // Before a WasmInstanceObject can be considered fully constructed and
+ // valid, it must be registered with the Realm. If this method fails,
+ // an error has been reported and the instance object must be abandoned.
+ // After a successful registration, an Instance must call
+ // unregisterInstance() before being destroyed.
+
+ bool registerInstance(JSContext* cx, HandleWasmInstanceObject instanceObj);
+ void unregisterInstance(Instance& instance);
+
+ // Return a vector of all live instances in the realm. The lifetime of
+ // these Instances is determined by their owning WasmInstanceObject.
+ // Note that accessing instances()[i]->object() triggers a read barrier
+ // since instances() is effectively a weak list.
+
+ const InstanceVector& instances() const { return instances_; }
+
+ // Ensure all Instances in this Realm have profiling labels created.
+
+ void ensureProfilingLabels(bool profilingEnabled);
+
+ // about:memory reporting
+
+ void addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ size_t* realmTables);
+};
+
+// Interrupt all running wasm Instances that have been registered with
+// wasm::Realms in the given JSContext.
+
+extern void InterruptRunningCode(JSContext* cx);
+
+// After a wasm Instance sees an interrupt request and calls
+// CheckForInterrupt(), it should call RunningCodeInterrupted() to clear the
+// interrupt request for all wasm Instances to avoid spurious trapping.
+
+void ResetInterruptState(JSContext* cx);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_realm_h
diff --git a/js/src/wasm/WasmSerialize.h b/js/src/wasm/WasmSerialize.h
new file mode 100644
index 0000000000..5c86f617a0
--- /dev/null
+++ b/js/src/wasm/WasmSerialize.h
@@ -0,0 +1,198 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_serialize_h
+#define wasm_serialize_h
+
+#include <type_traits>
+
+#include "js/Vector.h"
+
+namespace js {
+namespace wasm {
+
+// Factor out common serialization, cloning and about:memory size-computation
+// functions for reuse when serializing wasm and asm.js modules.
+
+static inline uint8_t* WriteBytes(uint8_t* dst, const void* src,
+ size_t nbytes) {
+ if (nbytes) {
+ memcpy(dst, src, nbytes);
+ }
+ return dst + nbytes;
+}
+
+static inline const uint8_t* ReadBytes(const uint8_t* src, void* dst,
+ size_t nbytes) {
+ if (nbytes) {
+ memcpy(dst, src, nbytes);
+ }
+ return src + nbytes;
+}
+
+static inline const uint8_t* ReadBytesChecked(const uint8_t* src,
+ size_t* remain, void* dst,
+ size_t nbytes) {
+ if (*remain < nbytes) {
+ return nullptr;
+ }
+ memcpy(dst, src, nbytes);
+ *remain -= nbytes;
+ return src + nbytes;
+}
+
+template <class T>
+static inline uint8_t* WriteScalar(uint8_t* dst, T t) {
+ memcpy(dst, &t, sizeof(t));
+ return dst + sizeof(t);
+}
+
+template <class T>
+static inline const uint8_t* ReadScalar(const uint8_t* src, T* dst) {
+ memcpy(dst, src, sizeof(*dst));
+ return src + sizeof(*dst);
+}
+
+template <class T>
+static inline const uint8_t* ReadScalarChecked(const uint8_t* src,
+ size_t* remain, T* dst) {
+ if (*remain < sizeof(*dst)) {
+ return nullptr;
+ }
+ memcpy(dst, src, sizeof(*dst));
+ *remain -= sizeof(*dst);
+ return src + sizeof(*dst);
+}
+
+template <class T, size_t N>
+static inline size_t SerializedVectorSize(
+ const mozilla::Vector<T, N, SystemAllocPolicy>& vec) {
+ size_t size = sizeof(uint32_t);
+ for (size_t i = 0; i < vec.length(); i++) {
+ size += vec[i].serializedSize();
+ }
+ return size;
+}
+
+template <class T, size_t N>
+static inline uint8_t* SerializeVector(
+ uint8_t* cursor, const mozilla::Vector<T, N, SystemAllocPolicy>& vec) {
+ cursor = WriteScalar<uint32_t>(cursor, vec.length());
+ for (size_t i = 0; i < vec.length(); i++) {
+ cursor = vec[i].serialize(cursor);
+ }
+ return cursor;
+}
+
+template <class T, size_t N>
+static inline const uint8_t* DeserializeVector(
+ const uint8_t* cursor, mozilla::Vector<T, N, SystemAllocPolicy>* vec) {
+ uint32_t length;
+ cursor = ReadScalar<uint32_t>(cursor, &length);
+ if (!vec->resize(length)) {
+ return nullptr;
+ }
+ for (size_t i = 0; i < vec->length(); i++) {
+ if (!(cursor = (*vec)[i].deserialize(cursor))) {
+ return nullptr;
+ }
+ }
+ return cursor;
+}
+
+template <class T, size_t N>
+static inline size_t SizeOfVectorExcludingThis(
+ const mozilla::Vector<T, N, SystemAllocPolicy>& vec,
+ MallocSizeOf mallocSizeOf) {
+ size_t size = vec.sizeOfExcludingThis(mallocSizeOf);
+ for (const T& t : vec) {
+ size += t.sizeOfExcludingThis(mallocSizeOf);
+ }
+ return size;
+}
+
+template <class T, size_t N>
+static inline size_t SerializedPodVectorSize(
+ const mozilla::Vector<T, N, SystemAllocPolicy>& vec) {
+ return sizeof(uint32_t) + vec.length() * sizeof(T);
+}
+
+template <class T, size_t N>
+static inline uint8_t* SerializePodVector(
+ uint8_t* cursor, const mozilla::Vector<T, N, SystemAllocPolicy>& vec) {
+ // This binary format must not change without taking into consideration the
+ // constraints in Assumptions::serialize.
+
+ cursor = WriteScalar<uint32_t>(cursor, vec.length());
+ cursor = WriteBytes(cursor, vec.begin(), vec.length() * sizeof(T));
+ return cursor;
+}
+
+template <class T, size_t N>
+static inline const uint8_t* DeserializePodVector(
+ const uint8_t* cursor, mozilla::Vector<T, N, SystemAllocPolicy>* vec) {
+ uint32_t length;
+ cursor = ReadScalar<uint32_t>(cursor, &length);
+ if (!vec->initLengthUninitialized(length)) {
+ return nullptr;
+ }
+ cursor = ReadBytes(cursor, vec->begin(), length * sizeof(T));
+ return cursor;
+}
+
+template <class T, size_t N>
+static inline const uint8_t* DeserializePodVectorChecked(
+ const uint8_t* cursor, size_t* remain,
+ mozilla::Vector<T, N, SystemAllocPolicy>* vec) {
+ uint32_t length;
+ cursor = ReadScalarChecked<uint32_t>(cursor, remain, &length);
+ if (!cursor || !vec->initLengthUninitialized(length)) {
+ return nullptr;
+ }
+ cursor = ReadBytesChecked(cursor, remain, vec->begin(), length * sizeof(T));
+ return cursor;
+}
+
+template <class T>
+inline size_t SerializableRefPtr<T>::serializedSize() const {
+ return (*this)->serializedSize();
+}
+
+template <class T>
+inline uint8_t* SerializableRefPtr<T>::serialize(uint8_t* cursor) const {
+ return (*this)->serialize(cursor);
+}
+
+template <class T>
+inline const uint8_t* SerializableRefPtr<T>::deserialize(
+ const uint8_t* cursor) {
+ auto* t = js_new<std::remove_const_t<T>>();
+ *this = t;
+ return t->deserialize(cursor);
+}
+
+template <class T>
+inline size_t SerializableRefPtr<T>::sizeOfExcludingThis(
+ mozilla::MallocSizeOf mallocSizeOf) const {
+ return (*this)->sizeOfExcludingThis(mallocSizeOf);
+}
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_serialize_h
diff --git a/js/src/wasm/WasmSignalHandlers.cpp b/js/src/wasm/WasmSignalHandlers.cpp
new file mode 100644
index 0000000000..6ab1a0c717
--- /dev/null
+++ b/js/src/wasm/WasmSignalHandlers.cpp
@@ -0,0 +1,1221 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2014 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmSignalHandlers.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/ThreadLocal.h"
+
+#include "threading/Thread.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/Realm.h"
+#include "vm/Runtime.h"
+#include "wasm/WasmInstance.h"
+
+#if defined(XP_WIN)
+# include <winternl.h> // must include before util/Windows.h's `#undef`s
+# include "util/Windows.h"
+#elif defined(XP_DARWIN)
+# include <mach/exc.h>
+# include <mach/mach.h>
+#else
+# include <signal.h>
+#endif
+
+using namespace js;
+using namespace js::wasm;
+
+using mozilla::DebugOnly;
+
+// =============================================================================
+// This following pile of macros and includes defines the ToRegisterState() and
+// the ContextTo{PC,FP,SP,LR}() functions from the (highly) platform-specific
+// CONTEXT struct which is provided to the signal handler.
+// =============================================================================
+
+#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
+# include <sys/ucontext.h> // for ucontext_t, mcontext_t
+#endif
+
+#if defined(__x86_64__)
+# if defined(__DragonFly__)
+# include <machine/npx.h> // for union savefpu
+# elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
+ defined(__NetBSD__) || defined(__OpenBSD__)
+# include <machine/fpu.h> // for struct savefpu/fxsave64
+# endif
+#endif
+
+#if defined(XP_WIN)
+# define EIP_sig(p) ((p)->Eip)
+# define EBP_sig(p) ((p)->Ebp)
+# define ESP_sig(p) ((p)->Esp)
+# define RIP_sig(p) ((p)->Rip)
+# define RSP_sig(p) ((p)->Rsp)
+# define RBP_sig(p) ((p)->Rbp)
+# define R11_sig(p) ((p)->R11)
+# define R13_sig(p) ((p)->R13)
+# define R14_sig(p) ((p)->R14)
+# define R15_sig(p) ((p)->R15)
+# define EPC_sig(p) ((p)->Pc)
+# define RFP_sig(p) ((p)->Fp)
+# define R31_sig(p) ((p)->Sp)
+# define RLR_sig(p) ((p)->Lr)
+#elif defined(__OpenBSD__)
+# define EIP_sig(p) ((p)->sc_eip)
+# define EBP_sig(p) ((p)->sc_ebp)
+# define ESP_sig(p) ((p)->sc_esp)
+# define RIP_sig(p) ((p)->sc_rip)
+# define RSP_sig(p) ((p)->sc_rsp)
+# define RBP_sig(p) ((p)->sc_rbp)
+# define R11_sig(p) ((p)->sc_r11)
+# if defined(__arm__)
+# define R13_sig(p) ((p)->sc_usr_sp)
+# define R14_sig(p) ((p)->sc_usr_lr)
+# define R15_sig(p) ((p)->sc_pc)
+# else
+# define R13_sig(p) ((p)->sc_r13)
+# define R14_sig(p) ((p)->sc_r14)
+# define R15_sig(p) ((p)->sc_r15)
+# endif
+# if defined(__aarch64__)
+# define EPC_sig(p) ((p)->sc_elr)
+# define RFP_sig(p) ((p)->sc_x[29])
+# define RLR_sig(p) ((p)->sc_lr)
+# define R31_sig(p) ((p)->sc_sp)
+# endif
+# if defined(__mips__)
+# define EPC_sig(p) ((p)->sc_pc)
+# define RFP_sig(p) ((p)->sc_regs[30])
+# endif
+# if defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || \
+ defined(__PPC64LE__)
+# define R01_sig(p) ((p)->sc_frame.fixreg[1])
+# define R32_sig(p) ((p)->sc_frame.srr0)
+# endif
+#elif defined(__linux__) || defined(__sun)
+# if defined(__linux__)
+# define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_EIP])
+# define EBP_sig(p) ((p)->uc_mcontext.gregs[REG_EBP])
+# define ESP_sig(p) ((p)->uc_mcontext.gregs[REG_ESP])
+# else
+# define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_PC])
+# define EBP_sig(p) ((p)->uc_mcontext.gregs[REG_EBP])
+# define ESP_sig(p) ((p)->uc_mcontext.gregs[REG_ESP])
+# endif
+# define RIP_sig(p) ((p)->uc_mcontext.gregs[REG_RIP])
+# define RSP_sig(p) ((p)->uc_mcontext.gregs[REG_RSP])
+# define RBP_sig(p) ((p)->uc_mcontext.gregs[REG_RBP])
+# if defined(__linux__) && defined(__arm__)
+# define R11_sig(p) ((p)->uc_mcontext.arm_fp)
+# define R13_sig(p) ((p)->uc_mcontext.arm_sp)
+# define R14_sig(p) ((p)->uc_mcontext.arm_lr)
+# define R15_sig(p) ((p)->uc_mcontext.arm_pc)
+# else
+# define R11_sig(p) ((p)->uc_mcontext.gregs[REG_R11])
+# define R13_sig(p) ((p)->uc_mcontext.gregs[REG_R13])
+# define R14_sig(p) ((p)->uc_mcontext.gregs[REG_R14])
+# define R15_sig(p) ((p)->uc_mcontext.gregs[REG_R15])
+# endif
+# if defined(__linux__) && defined(__aarch64__)
+# define EPC_sig(p) ((p)->uc_mcontext.pc)
+# define RFP_sig(p) ((p)->uc_mcontext.regs[29])
+# define RLR_sig(p) ((p)->uc_mcontext.regs[30])
+# define R31_sig(p) ((p)->uc_mcontext.sp)
+# endif
+# if defined(__linux__) && defined(__mips__)
+# define EPC_sig(p) ((p)->uc_mcontext.pc)
+# define RFP_sig(p) ((p)->uc_mcontext.gregs[30])
+# define RSP_sig(p) ((p)->uc_mcontext.gregs[29])
+# define R31_sig(p) ((p)->uc_mcontext.gregs[31])
+# endif
+# if defined(__linux__) && (defined(__sparc__) && defined(__arch64__))
+# define PC_sig(p) ((p)->uc_mcontext.mc_gregs[MC_PC])
+# define FP_sig(p) ((p)->uc_mcontext.mc_fp)
+# define SP_sig(p) ((p)->uc_mcontext.mc_i7)
+# endif
+# if defined(__linux__) && (defined(__ppc64__) || defined(__PPC64__) || \
+ defined(__ppc64le__) || defined(__PPC64LE__))
+# define R01_sig(p) ((p)->uc_mcontext.gp_regs[1])
+# define R32_sig(p) ((p)->uc_mcontext.gp_regs[32])
+# endif
+#elif defined(__NetBSD__)
+# define EIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EIP])
+# define EBP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EBP])
+# define ESP_sig(p) ((p)->uc_mcontext.__gregs[_REG_ESP])
+# define RIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RIP])
+# define RSP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RSP])
+# define RBP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RBP])
+# define R11_sig(p) ((p)->uc_mcontext.__gregs[_REG_R11])
+# define R13_sig(p) ((p)->uc_mcontext.__gregs[_REG_R13])
+# define R14_sig(p) ((p)->uc_mcontext.__gregs[_REG_R14])
+# define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15])
+# if defined(__aarch64__)
+# define EPC_sig(p) ((p)->uc_mcontext.__gregs[_REG_PC])
+# define RFP_sig(p) ((p)->uc_mcontext.__gregs[_REG_X29])
+# define RLR_sig(p) ((p)->uc_mcontext.__gregs[_REG_X30])
+# define R31_sig(p) ((p)->uc_mcontext.__gregs[_REG_SP])
+# endif
+# if defined(__mips__)
+# define EPC_sig(p) ((p)->uc_mcontext.__gregs[_REG_EPC])
+# define RFP_sig(p) ((p)->uc_mcontext.__gregs[_REG_S8])
+# endif
+# if defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || \
+ defined(__PPC64LE__)
+# define R01_sig(p) ((p)->uc_mcontext.__gregs[_REG_R1])
+# define R32_sig(p) ((p)->uc_mcontext.__gregs[_REG_PC])
+# endif
+#elif defined(__DragonFly__) || defined(__FreeBSD__) || \
+ defined(__FreeBSD_kernel__)
+# define EIP_sig(p) ((p)->uc_mcontext.mc_eip)
+# define EBP_sig(p) ((p)->uc_mcontext.mc_ebp)
+# define ESP_sig(p) ((p)->uc_mcontext.mc_esp)
+# define RIP_sig(p) ((p)->uc_mcontext.mc_rip)
+# define RSP_sig(p) ((p)->uc_mcontext.mc_rsp)
+# define RBP_sig(p) ((p)->uc_mcontext.mc_rbp)
+# if defined(__FreeBSD__) && defined(__arm__)
+# define R11_sig(p) ((p)->uc_mcontext.__gregs[_REG_R11])
+# define R13_sig(p) ((p)->uc_mcontext.__gregs[_REG_R13])
+# define R14_sig(p) ((p)->uc_mcontext.__gregs[_REG_R14])
+# define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15])
+# else
+# define R11_sig(p) ((p)->uc_mcontext.mc_r11)
+# define R13_sig(p) ((p)->uc_mcontext.mc_r13)
+# define R14_sig(p) ((p)->uc_mcontext.mc_r14)
+# define R15_sig(p) ((p)->uc_mcontext.mc_r15)
+# endif
+# if defined(__FreeBSD__) && defined(__aarch64__)
+# define EPC_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_elr)
+# define RFP_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_x[29])
+# define RLR_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_lr)
+# define R31_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_sp)
+# endif
+# if defined(__FreeBSD__) && defined(__mips__)
+# define EPC_sig(p) ((p)->uc_mcontext.mc_pc)
+# define RFP_sig(p) ((p)->uc_mcontext.mc_regs[30])
+# endif
+# if defined(__FreeBSD__) && (defined(__ppc64__) || defined(__PPC64__) || \
+ defined(__ppc64le__) || defined(__PPC64LE__))
+# define R01_sig(p) ((p)->uc_mcontext.mc_gpr[1])
+# define R32_sig(p) ((p)->uc_mcontext.mc_srr0)
+# endif
+#elif defined(XP_DARWIN)
+# define EIP_sig(p) ((p)->thread.uts.ts32.__eip)
+# define EBP_sig(p) ((p)->thread.uts.ts32.__ebp)
+# define ESP_sig(p) ((p)->thread.uts.ts32.__esp)
+# define RIP_sig(p) ((p)->thread.__rip)
+# define RBP_sig(p) ((p)->thread.__rbp)
+# define RSP_sig(p) ((p)->thread.__rsp)
+# define R11_sig(p) ((p)->thread.__r[11])
+# define R13_sig(p) ((p)->thread.__sp)
+# define R14_sig(p) ((p)->thread.__lr)
+# define R15_sig(p) ((p)->thread.__pc)
+# define EPC_sig(p) ((p)->thread.__pc)
+# define RFP_sig(p) ((p)->thread.__fp)
+# define R31_sig(p) ((p)->thread.__sp)
+# define RLR_sig(p) ((p)->thread.__lr)
+#else
+# error "Don't know how to read/write to the thread state via the mcontext_t."
+#endif
+
+// On ARM Linux, including Android, unaligned FP accesses that were not flagged
+// as unaligned will tend to trap (with SIGBUS) and will need to be emulated.
+//
+// We can only perform this emulation if the system header files provide access
+// to the FP registers. In particular, <sys/user.h> must have definitions of
+// `struct user_vfp` and `struct user_vfp_exc`, as it does on Android.
+//
+// Those definitions are however not present in the headers of every Linux
+// distro - Raspbian is known to be a problem, for example. However those
+// distros are tier-3 platforms.
+//
+// If you run into compile problems on a tier-3 platform, you can disable the
+// emulation here.
+
+#if defined(__linux__) && defined(__arm__)
+# define WASM_EMULATE_ARM_UNALIGNED_FP_ACCESS
+#endif
+
+#ifdef WASM_EMULATE_ARM_UNALIGNED_FP_ACCESS
+# include <sys/user.h>
+#endif
+
+#if defined(ANDROID)
+// Not all versions of the Android NDK define ucontext_t or mcontext_t.
+// Detect this and provide custom but compatible definitions. Note that these
+// follow the GLibc naming convention to access register values from
+// mcontext_t.
+//
+// See: https://chromiumcodereview.appspot.com/10829122/
+// See: http://code.google.com/p/android/issues/detail?id=34784
+# if !defined(__BIONIC_HAVE_UCONTEXT_T)
+# if defined(__arm__)
+
+// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
+// Old versions of the C library <signal.h> didn't define the type.
+# if !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+# include <asm/sigcontext.h>
+# endif
+
+typedef struct sigcontext mcontext_t;
+
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used so don't define them here.
+} ucontext_t;
+
+# elif defined(__mips__)
+
+typedef struct {
+ uint32_t regmask;
+ uint32_t status;
+ uint64_t pc;
+ uint64_t gregs[32];
+ uint64_t fpregs[32];
+ uint32_t acx;
+ uint32_t fpc_csr;
+ uint32_t fpc_eir;
+ uint32_t used_math;
+ uint32_t dsp;
+ uint64_t mdhi;
+ uint64_t mdlo;
+ uint32_t hi1;
+ uint32_t lo1;
+ uint32_t hi2;
+ uint32_t lo2;
+ uint32_t hi3;
+ uint32_t lo3;
+} mcontext_t;
+
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used so don't define them here.
+} ucontext_t;
+
+# elif defined(__i386__)
+// x86 version for Android.
+typedef struct {
+ uint32_t gregs[19];
+ void* fpregs;
+ uint32_t oldmask;
+ uint32_t cr2;
+} mcontext_t;
+
+typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+enum { REG_EIP = 14 };
+# endif // defined(__i386__)
+# endif // !defined(__BIONIC_HAVE_UCONTEXT_T)
+#endif // defined(ANDROID)
+
+#if defined(XP_DARWIN)
+# if defined(__x86_64__)
+struct macos_x64_context {
+ x86_thread_state64_t thread;
+ x86_float_state64_t float_;
+};
+# define CONTEXT macos_x64_context
+# elif defined(__i386__)
+struct macos_x86_context {
+ x86_thread_state_t thread;
+ x86_float_state_t float_;
+};
+# define CONTEXT macos_x86_context
+# elif defined(__arm__)
+struct macos_arm_context {
+ arm_thread_state_t thread;
+ arm_neon_state_t float_;
+};
+# define CONTEXT macos_arm_context
+# elif defined(__aarch64__)
+struct macos_aarch64_context {
+ arm_thread_state64_t thread;
+ arm_neon_state64_t float_;
+};
+# define CONTEXT macos_aarch64_context
+# else
+# error Unsupported architecture
+# endif
+#elif !defined(XP_WIN)
+# define CONTEXT ucontext_t
+#endif
+
+#if defined(_M_X64) || defined(__x86_64__)
+# define PC_sig(p) RIP_sig(p)
+# define FP_sig(p) RBP_sig(p)
+# define SP_sig(p) RSP_sig(p)
+#elif defined(_M_IX86) || defined(__i386__)
+# define PC_sig(p) EIP_sig(p)
+# define FP_sig(p) EBP_sig(p)
+# define SP_sig(p) ESP_sig(p)
+#elif defined(__arm__)
+# define FP_sig(p) R11_sig(p)
+# define SP_sig(p) R13_sig(p)
+# define LR_sig(p) R14_sig(p)
+# define PC_sig(p) R15_sig(p)
+#elif defined(_M_ARM64) || defined(__aarch64__)
+# define PC_sig(p) EPC_sig(p)
+# define FP_sig(p) RFP_sig(p)
+# define SP_sig(p) R31_sig(p)
+# define LR_sig(p) RLR_sig(p)
+#elif defined(__mips__)
+# define PC_sig(p) EPC_sig(p)
+# define FP_sig(p) RFP_sig(p)
+# define SP_sig(p) RSP_sig(p)
+# define LR_sig(p) R31_sig(p)
+#elif defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || \
+ defined(__PPC64LE__)
+# define PC_sig(p) R32_sig(p)
+# define SP_sig(p) R01_sig(p)
+# define FP_sig(p) R01_sig(p)
+#endif
+
+static void SetContextPC(CONTEXT* context, uint8_t* pc) {
+#ifdef PC_sig
+ *reinterpret_cast<uint8_t**>(&PC_sig(context)) = pc;
+#else
+ MOZ_CRASH();
+#endif
+}
+
+static uint8_t* ContextToPC(CONTEXT* context) {
+#ifdef PC_sig
+ return reinterpret_cast<uint8_t*>(PC_sig(context));
+#else
+ MOZ_CRASH();
+#endif
+}
+
+static uint8_t* ContextToFP(CONTEXT* context) {
+#ifdef FP_sig
+ return reinterpret_cast<uint8_t*>(FP_sig(context));
+#else
+ MOZ_CRASH();
+#endif
+}
+
+static uint8_t* ContextToSP(CONTEXT* context) {
+#ifdef SP_sig
+ return reinterpret_cast<uint8_t*>(SP_sig(context));
+#else
+ MOZ_CRASH();
+#endif
+}
+
+#if defined(__arm__) || defined(__aarch64__) || defined(__mips__)
+static uint8_t* ContextToLR(CONTEXT* context) {
+# ifdef LR_sig
+ return reinterpret_cast<uint8_t*>(LR_sig(context));
+# else
+ MOZ_CRASH();
+# endif
+}
+#endif
+
+static JS::ProfilingFrameIterator::RegisterState ToRegisterState(
+ CONTEXT* context) {
+ JS::ProfilingFrameIterator::RegisterState state;
+ state.fp = ContextToFP(context);
+ state.pc = ContextToPC(context);
+ state.sp = ContextToSP(context);
+#if defined(__arm__) || defined(__aarch64__) || defined(__mips__)
+ state.lr = ContextToLR(context);
+#else
+ state.lr = (void*)UINTPTR_MAX;
+#endif
+ return state;
+}
+
+// =============================================================================
+// All signals/exceptions funnel down to this one trap-handling function which
+// tests whether the pc is in a wasm module and, if so, whether there is
+// actually a trap expected at this pc. These tests both avoid real bugs being
+// silently converted to wasm traps and provides the trapping wasm bytecode
+// offset we need to report in the error.
+//
+// Crashing inside wasm trap handling (due to a bug in trap handling or exposed
+// during trap handling) must be reported like a normal crash, not cause the
+// crash report to be lost. On Windows and non-Mach Unix, a crash during the
+// handler reenters the handler, possibly repeatedly until exhausting the stack,
+// and so we prevent recursion with the thread-local sAlreadyHandlingTrap. On
+// Mach, the wasm exception handler has its own thread and is installed only on
+// the thread-level debugging ports of JSRuntime threads, so a crash on
+// exception handler thread will not recurse; it will bubble up to the
+// process-level debugging ports (where Breakpad is installed).
+// =============================================================================
+
+static MOZ_THREAD_LOCAL(bool) sAlreadyHandlingTrap;
+
+struct AutoHandlingTrap {
+ AutoHandlingTrap() {
+ MOZ_ASSERT(!sAlreadyHandlingTrap.get());
+ sAlreadyHandlingTrap.set(true);
+ }
+
+ ~AutoHandlingTrap() {
+ MOZ_ASSERT(sAlreadyHandlingTrap.get());
+ sAlreadyHandlingTrap.set(false);
+ }
+};
+
+#ifdef WASM_EMULATE_ARM_UNALIGNED_FP_ACCESS
+
+// Code to handle SIGBUS for unaligned floating point accesses on 32-bit ARM.
+
+static uintptr_t ReadGPR(CONTEXT* context, uint32_t rn) {
+ switch (rn) {
+ case 0:
+ return context->uc_mcontext.arm_r0;
+ case 1:
+ return context->uc_mcontext.arm_r1;
+ case 2:
+ return context->uc_mcontext.arm_r2;
+ case 3:
+ return context->uc_mcontext.arm_r3;
+ case 4:
+ return context->uc_mcontext.arm_r4;
+ case 5:
+ return context->uc_mcontext.arm_r5;
+ case 6:
+ return context->uc_mcontext.arm_r6;
+ case 7:
+ return context->uc_mcontext.arm_r7;
+ case 8:
+ return context->uc_mcontext.arm_r8;
+ case 9:
+ return context->uc_mcontext.arm_r9;
+ case 10:
+ return context->uc_mcontext.arm_r10;
+ case 11:
+ return context->uc_mcontext.arm_fp;
+ case 12:
+ return context->uc_mcontext.arm_ip;
+ case 13:
+ return context->uc_mcontext.arm_sp;
+ case 14:
+ return context->uc_mcontext.arm_lr;
+ case 15:
+ return context->uc_mcontext.arm_pc;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+// Linux kernel data structures.
+//
+// The vfp_sigframe is a kernel type overlaid on the uc_regspace field of the
+// ucontext_t if the first word of the uc_regspace is VFP_MAGIC. (user_vfp and
+// user_vfp_exc are defined in sys/user.h and are stable.)
+//
+// VFP_MAGIC appears to have been stable since a commit to Linux on 2010-04-11,
+// when it was changed from being 0x56465001 on ARMv6 and earlier and 0x56465002
+// on ARMv7 and later, to being 0x56465001 on all CPU versions. This was in
+// Kernel 2.6.34-rc5.
+//
+// My best interpretation of the Android commit history is that Android has had
+// vfp_sigframe and VFP_MAGIC in this form since at least Android 3.4 / 2012;
+// Firefox requires Android 4.0 at least and we're probably safe here.
+
+struct vfp_sigframe {
+ unsigned long magic;
+ unsigned long size;
+ struct user_vfp ufp;
+ struct user_vfp_exc ufp_exc;
+};
+
+# define VFP_MAGIC 0x56465001
+
+static vfp_sigframe* GetVFPFrame(CONTEXT* context) {
+ if (context->uc_regspace[0] != VFP_MAGIC) {
+ return nullptr;
+ }
+ return (vfp_sigframe*)&context->uc_regspace;
+}
+
+static bool ReadFPR64(CONTEXT* context, uint32_t vd, double* val) {
+ MOZ_ASSERT(vd < 32);
+ vfp_sigframe* frame = GetVFPFrame(context);
+ if (frame) {
+ *val = ((double*)frame->ufp.fpregs)[vd];
+ return true;
+ }
+ return false;
+}
+
+static bool WriteFPR64(CONTEXT* context, uint32_t vd, double val) {
+ MOZ_ASSERT(vd < 32);
+ vfp_sigframe* frame = GetVFPFrame(context);
+ if (frame) {
+ ((double*)frame->ufp.fpregs)[vd] = val;
+ return true;
+ }
+ return false;
+}
+
+static bool ReadFPR32(CONTEXT* context, uint32_t vd, float* val) {
+ MOZ_ASSERT(vd < 32);
+ vfp_sigframe* frame = GetVFPFrame(context);
+ if (frame) {
+ *val = ((float*)frame->ufp.fpregs)[vd];
+ return true;
+ }
+ return false;
+}
+
+static bool WriteFPR32(CONTEXT* context, uint32_t vd, float val) {
+ MOZ_ASSERT(vd < 32);
+ vfp_sigframe* frame = GetVFPFrame(context);
+ if (frame) {
+ ((float*)frame->ufp.fpregs)[vd] = val;
+ return true;
+ }
+ return false;
+}
+
+static bool HandleUnalignedTrap(CONTEXT* context, uint8_t* pc,
+ Instance* instance) {
+ // ARM only, no Thumb.
+ MOZ_RELEASE_ASSERT(uintptr_t(pc) % 4 == 0);
+
+ // wasmLoadImpl() and wasmStoreImpl() in MacroAssembler-arm.cpp emit plain,
+ // unconditional VLDR and VSTR instructions that do not use the PC as the base
+ // register.
+ uint32_t instr = *(uint32_t*)pc;
+ uint32_t masked = instr & 0x0F300E00;
+ bool isVLDR = masked == 0x0D100A00;
+ bool isVSTR = masked == 0x0D000A00;
+
+ if (!isVLDR && !isVSTR) {
+ // Three obvious cases if we don't get our expected instructions:
+ // - masm is generating other FP access instructions than it should
+ // - we're encountering a device that traps on new kinds of accesses,
+ // perhaps unaligned integer accesses
+ // - general code generation bugs that lead to SIGBUS
+# ifdef ANDROID
+ __android_log_print(ANDROID_LOG_ERROR, "WASM", "Bad SIGBUS instr %08x",
+ instr);
+# endif
+# ifdef DEBUG
+ MOZ_CRASH("Unexpected instruction");
+# endif
+ return false;
+ }
+
+ bool isUnconditional = (instr >> 28) == 0xE;
+ bool isDouble = (instr & 0x00000100) != 0;
+ bool isAdd = (instr & 0x00800000) != 0;
+ uint32_t dBit = (instr >> 22) & 1;
+ uint32_t offs = (instr & 0xFF) << 2;
+ uint32_t rn = (instr >> 16) & 0xF;
+
+ MOZ_RELEASE_ASSERT(isUnconditional);
+ MOZ_RELEASE_ASSERT(rn != 15);
+
+ uint8_t* p = (uint8_t*)ReadGPR(context, rn) + (isAdd ? offs : -offs);
+
+ if (!instance->memoryAccessInBounds(
+ p, isDouble ? sizeof(double) : sizeof(float))) {
+ return false;
+ }
+
+ if (isDouble) {
+ uint32_t vd = ((instr >> 12) & 0xF) | (dBit << 4);
+ double val;
+ if (isVLDR) {
+ memcpy(&val, p, sizeof(val));
+ if (WriteFPR64(context, vd, val)) {
+ SetContextPC(context, pc + 4);
+ return true;
+ }
+ } else {
+ if (ReadFPR64(context, vd, &val)) {
+ memcpy(p, &val, sizeof(val));
+ SetContextPC(context, pc + 4);
+ return true;
+ }
+ }
+ } else {
+ uint32_t vd = ((instr >> 11) & (0xF << 1)) | dBit;
+ float val;
+ if (isVLDR) {
+ memcpy(&val, p, sizeof(val));
+ if (WriteFPR32(context, vd, val)) {
+ SetContextPC(context, pc + 4);
+ return true;
+ }
+ } else {
+ if (ReadFPR32(context, vd, &val)) {
+ memcpy(p, &val, sizeof(val));
+ SetContextPC(context, pc + 4);
+ return true;
+ }
+ }
+ }
+
+# ifdef DEBUG
+ MOZ_CRASH(
+ "SIGBUS handler could not access FP register, incompatible kernel?");
+# endif
+ return false;
+}
+#else // WASM_EMULATE_ARM_UNALIGNED_FP_ACCESS
+static bool HandleUnalignedTrap(CONTEXT* context, uint8_t* pc,
+ Instance* instance) {
+ return false;
+}
+#endif // WASM_EMULATE_ARM_UNALIGNED_FP_ACCESS
+
+[[nodiscard]] static bool HandleTrap(CONTEXT* context,
+ bool isUnalignedSignal = false,
+ JSContext* assertCx = nullptr) {
+ MOZ_ASSERT(sAlreadyHandlingTrap.get());
+
+ uint8_t* pc = ContextToPC(context);
+ const CodeSegment* codeSegment = LookupCodeSegment(pc);
+ if (!codeSegment || !codeSegment->isModule()) {
+ return false;
+ }
+
+ const ModuleSegment& segment = *codeSegment->asModule();
+
+ Trap trap;
+ BytecodeOffset bytecode;
+ if (!segment.code().lookupTrap(pc, &trap, &bytecode)) {
+ return false;
+ }
+
+ // We have a safe, expected wasm trap, so fp is well-defined to be a Frame*.
+ // For the first sanity check, the Trap::IndirectCallBadSig special case is
+ // due to this trap occurring in the indirect call prologue, while fp points
+ // to the caller's Frame which can be in a different Module. In any case,
+ // though, the containing JSContext is the same.
+
+ auto* frame = reinterpret_cast<Frame*>(ContextToFP(context));
+ Instance* instance = GetNearestEffectiveTls(frame)->instance;
+ MOZ_RELEASE_ASSERT(&instance->code() == &segment.code() ||
+ trap == Trap::IndirectCallBadSig);
+
+ if (isUnalignedSignal) {
+ if (trap != Trap::OutOfBounds) {
+ return false;
+ }
+ if (HandleUnalignedTrap(context, pc, instance)) {
+ return true;
+ }
+ }
+
+ JSContext* cx =
+ instance->realm()->runtimeFromAnyThread()->mainContextFromAnyThread();
+ MOZ_RELEASE_ASSERT(!assertCx || cx == assertCx);
+
+ // JitActivation::startWasmTrap() stores enough register state from the
+ // point of the trap to allow stack unwinding or resumption, both of which
+ // will call finishWasmTrap().
+ jit::JitActivation* activation = cx->activation()->asJit();
+ activation->startWasmTrap(trap, bytecode.offset(), ToRegisterState(context));
+ SetContextPC(context, segment.trapCode());
+ return true;
+}
+
+// =============================================================================
+// The following platform-specific handlers funnel all signals/exceptions into
+// the shared HandleTrap() above.
+// =============================================================================
+
+#if defined(XP_WIN)
+// Obtained empirically from thread_local codegen on x86/x64/arm64.
+// Compiled in all user binaries, so should be stable over time.
+static const unsigned sThreadLocalArrayPointerIndex = 11;
+
+static LONG WINAPI WasmTrapHandler(LPEXCEPTION_POINTERS exception) {
+ // Make sure TLS is initialized before reading sAlreadyHandlingTrap.
+ if (!NtCurrentTeb()->Reserved1[sThreadLocalArrayPointerIndex]) {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ if (sAlreadyHandlingTrap.get()) {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+ AutoHandlingTrap aht;
+
+ EXCEPTION_RECORD* record = exception->ExceptionRecord;
+ if (record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION &&
+ record->ExceptionCode != EXCEPTION_ILLEGAL_INSTRUCTION) {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ if (!HandleTrap(exception->ContextRecord, false, TlsContext.get())) {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ return EXCEPTION_CONTINUE_EXECUTION;
+}
+
+#elif defined(XP_DARWIN)
+// On OSX we are forced to use the lower-level Mach exception mechanism instead
+// of Unix signals because breakpad uses Mach exceptions and would otherwise
+// report a crash before wasm gets a chance to handle the exception.
+
+// This definition was generated by mig (the Mach Interface Generator) for the
+// routine 'exception_raise' (exc.defs).
+# pragma pack(4)
+typedef struct {
+ mach_msg_header_t Head;
+ /* start of the kernel processed data */
+ mach_msg_body_t msgh_body;
+ mach_msg_port_descriptor_t thread;
+ mach_msg_port_descriptor_t task;
+ /* end of the kernel processed data */
+ NDR_record_t NDR;
+ exception_type_t exception;
+ mach_msg_type_number_t codeCnt;
+ int64_t code[2];
+} Request__mach_exception_raise_t;
+# pragma pack()
+
+// The full Mach message also includes a trailer.
+struct ExceptionRequest {
+ Request__mach_exception_raise_t body;
+ mach_msg_trailer_t trailer;
+};
+
+static bool HandleMachException(const ExceptionRequest& request) {
+ // Get the port of the JSContext's thread from the message.
+ mach_port_t cxThread = request.body.thread.name;
+
+ // Read out the JSRuntime thread's register state.
+ CONTEXT context;
+# if defined(__x86_64__)
+ unsigned int thread_state_count = x86_THREAD_STATE64_COUNT;
+ unsigned int float_state_count = x86_FLOAT_STATE64_COUNT;
+ int thread_state = x86_THREAD_STATE64;
+ int float_state = x86_FLOAT_STATE64;
+# elif defined(__i386__)
+ unsigned int thread_state_count = x86_THREAD_STATE_COUNT;
+ unsigned int float_state_count = x86_FLOAT_STATE_COUNT;
+ int thread_state = x86_THREAD_STATE;
+ int float_state = x86_FLOAT_STATE;
+# elif defined(__arm__)
+ unsigned int thread_state_count = ARM_THREAD_STATE_COUNT;
+ unsigned int float_state_count = ARM_NEON_STATE_COUNT;
+ int thread_state = ARM_THREAD_STATE;
+ int float_state = ARM_NEON_STATE;
+# elif defined(__aarch64__)
+ unsigned int thread_state_count = ARM_THREAD_STATE64_COUNT;
+ unsigned int float_state_count = ARM_NEON_STATE64_COUNT;
+ int thread_state = ARM_THREAD_STATE64;
+ int float_state = ARM_NEON_STATE64;
+# else
+# error Unsupported architecture
+# endif
+ kern_return_t kret;
+ kret = thread_get_state(cxThread, thread_state,
+ (thread_state_t)&context.thread, &thread_state_count);
+ if (kret != KERN_SUCCESS) {
+ return false;
+ }
+ kret = thread_get_state(cxThread, float_state,
+ (thread_state_t)&context.float_, &float_state_count);
+ if (kret != KERN_SUCCESS) {
+ return false;
+ }
+
+ if (request.body.exception != EXC_BAD_ACCESS &&
+ request.body.exception != EXC_BAD_INSTRUCTION) {
+ return false;
+ }
+
+ {
+ AutoNoteSingleThreadedRegion anstr;
+ AutoHandlingTrap aht;
+ if (!HandleTrap(&context)) {
+ return false;
+ }
+ }
+
+ // Update the thread state with the new pc and register values.
+ kret = thread_set_state(cxThread, float_state,
+ (thread_state_t)&context.float_, float_state_count);
+ if (kret != KERN_SUCCESS) {
+ return false;
+ }
+ kret = thread_set_state(cxThread, thread_state,
+ (thread_state_t)&context.thread, thread_state_count);
+ if (kret != KERN_SUCCESS) {
+ return false;
+ }
+
+ return true;
+}
+
+static mach_port_t sMachDebugPort = MACH_PORT_NULL;
+
+static void MachExceptionHandlerThread() {
+ // Taken from mach_exc in /usr/include/mach/mach_exc.defs.
+ static const unsigned EXCEPTION_MSG_ID = 2405;
+
+ while (true) {
+ ExceptionRequest request;
+ kern_return_t kret =
+ mach_msg(&request.body.Head, MACH_RCV_MSG, 0, sizeof(request),
+ sMachDebugPort, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+
+ // If we fail even receiving the message, we can't even send a reply!
+ // Rather than hanging the faulting thread (hanging the browser), crash.
+ if (kret != KERN_SUCCESS) {
+ fprintf(stderr, "MachExceptionHandlerThread: mach_msg failed with %d\n",
+ (int)kret);
+ MOZ_CRASH();
+ }
+
+ if (request.body.Head.msgh_id != EXCEPTION_MSG_ID) {
+ fprintf(stderr, "Unexpected msg header id %d\n",
+ (int)request.body.Head.msgh_bits);
+ MOZ_CRASH();
+ }
+
+ // Some thread just commited an EXC_BAD_ACCESS and has been suspended by
+ // the kernel. The kernel is waiting for us to reply with instructions.
+ // Our default is the "not handled" reply (by setting the RetCode field
+ // of the reply to KERN_FAILURE) which tells the kernel to continue
+ // searching at the process and system level. If this is an asm.js
+ // expected exception, we handle it and return KERN_SUCCESS.
+ bool handled = HandleMachException(request);
+ kern_return_t replyCode = handled ? KERN_SUCCESS : KERN_FAILURE;
+
+ // This magic incantation to send a reply back to the kernel was
+ // derived from the exc_server generated by
+ // 'mig -v /usr/include/mach/mach_exc.defs'.
+ __Reply__exception_raise_t reply;
+ reply.Head.msgh_bits =
+ MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request.body.Head.msgh_bits), 0);
+ reply.Head.msgh_size = sizeof(reply);
+ reply.Head.msgh_remote_port = request.body.Head.msgh_remote_port;
+ reply.Head.msgh_local_port = MACH_PORT_NULL;
+ reply.Head.msgh_id = request.body.Head.msgh_id + 100;
+ reply.NDR = NDR_record;
+ reply.RetCode = replyCode;
+ mach_msg(&reply.Head, MACH_SEND_MSG, sizeof(reply), 0, MACH_PORT_NULL,
+ MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+ }
+}
+
+#else // If not Windows or Mac, assume Unix
+
+# ifdef __mips__
+static const uint32_t kWasmTrapSignal = SIGFPE;
+# else
+static const uint32_t kWasmTrapSignal = SIGILL;
+# endif
+
+static struct sigaction sPrevSEGVHandler;
+static struct sigaction sPrevSIGBUSHandler;
+static struct sigaction sPrevWasmTrapHandler;
+
+static void WasmTrapHandler(int signum, siginfo_t* info, void* context) {
+ if (!sAlreadyHandlingTrap.get()) {
+ AutoHandlingTrap aht;
+ MOZ_RELEASE_ASSERT(signum == SIGSEGV || signum == SIGBUS ||
+ signum == kWasmTrapSignal);
+ if (HandleTrap((CONTEXT*)context, signum == SIGBUS, TlsContext.get())) {
+ return;
+ }
+ }
+
+ struct sigaction* previousSignal = nullptr;
+ switch (signum) {
+ case SIGSEGV:
+ previousSignal = &sPrevSEGVHandler;
+ break;
+ case SIGBUS:
+ previousSignal = &sPrevSIGBUSHandler;
+ break;
+ case kWasmTrapSignal:
+ previousSignal = &sPrevWasmTrapHandler;
+ break;
+ }
+ MOZ_ASSERT(previousSignal);
+
+ // This signal is not for any asm.js code we expect, so we need to forward
+ // the signal to the next handler. If there is no next handler (SIG_IGN or
+ // SIG_DFL), then it's time to crash. To do this, we set the signal back to
+ // its original disposition and return. This will cause the faulting op to
+ // be re-executed which will crash in the normal way. The advantage of
+ // doing this to calling _exit() is that we remove ourselves from the crash
+ // stack which improves crash reports. If there is a next handler, call it.
+ // It will either crash synchronously, fix up the instruction so that
+ // execution can continue and return, or trigger a crash by returning the
+ // signal to it's original disposition and returning.
+ //
+ // Note: the order of these tests matter.
+ if (previousSignal->sa_flags & SA_SIGINFO) {
+ previousSignal->sa_sigaction(signum, info, context);
+ } else if (previousSignal->sa_handler == SIG_DFL ||
+ previousSignal->sa_handler == SIG_IGN) {
+ sigaction(signum, previousSignal, nullptr);
+ } else {
+ previousSignal->sa_handler(signum);
+ }
+}
+#endif // XP_WIN || XP_DARWIN || assume unix
+
+#if defined(ANDROID) && defined(MOZ_LINKER)
+extern "C" MFBT_API bool IsSignalHandlingBroken();
+#endif
+
+struct InstallState {
+ bool tried;
+ bool success;
+ InstallState() : tried(false), success(false) {}
+};
+
+static ExclusiveData<InstallState> sEagerInstallState(
+ mutexid::WasmSignalInstallState);
+
+void wasm::EnsureEagerProcessSignalHandlers() {
+ auto eagerInstallState = sEagerInstallState.lock();
+ if (eagerInstallState->tried) {
+ return;
+ }
+
+ eagerInstallState->tried = true;
+ MOZ_RELEASE_ASSERT(eagerInstallState->success == false);
+
+#if defined(JS_CODEGEN_NONE)
+ // If there is no JIT, then there should be no Wasm signal handlers.
+ return;
+#endif
+
+#if defined(ANDROID) && defined(MOZ_LINKER)
+ // Signal handling is broken on some android systems.
+ if (IsSignalHandlingBroken()) {
+ return;
+ }
+#endif
+
+ sAlreadyHandlingTrap.infallibleInit();
+
+ // Install whatever exception/signal handler is appropriate for the OS.
+#if defined(XP_WIN)
+
+# if defined(MOZ_ASAN)
+ // Under ASan we need to let the ASan runtime's ShadowExceptionHandler stay
+ // in the first handler position. This requires some coordination with
+ // MemoryProtectionExceptionHandler::isDisabled().
+ const bool firstHandler = false;
+# else
+ // Otherwise, WasmTrapHandler needs to go first, so that we can recover
+ // from wasm faults and continue execution without triggering handlers
+ // such as MemoryProtectionExceptionHandler that assume we are crashing.
+ const bool firstHandler = true;
+# endif
+ if (!AddVectoredExceptionHandler(firstHandler, WasmTrapHandler)) {
+ // Windows has all sorts of random security knobs for disabling things
+ // so make this a dynamic failure that disables wasm, not a MOZ_CRASH().
+ return;
+ }
+
+#elif defined(XP_DARWIN)
+ // All the Mach setup in EnsureLazyProcessSignalHandlers.
+#else
+ // SA_NODEFER allows us to reenter the signal handler if we crash while
+ // handling the signal, and fall through to the Breakpad handler by testing
+ // handlingSegFault.
+
+ // Allow handling OOB with signals on all architectures
+ struct sigaction faultHandler;
+ faultHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
+ faultHandler.sa_sigaction = WasmTrapHandler;
+ sigemptyset(&faultHandler.sa_mask);
+ if (sigaction(SIGSEGV, &faultHandler, &sPrevSEGVHandler)) {
+ MOZ_CRASH("unable to install segv handler");
+ }
+
+# if defined(JS_CODEGEN_ARM)
+ // On Arm Handle Unaligned Accesses
+ struct sigaction busHandler;
+ busHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
+ busHandler.sa_sigaction = WasmTrapHandler;
+ sigemptyset(&busHandler.sa_mask);
+ if (sigaction(SIGBUS, &busHandler, &sPrevSIGBUSHandler)) {
+ MOZ_CRASH("unable to install sigbus handler");
+ }
+# endif
+
+ // Install a handler to handle the instructions that are emitted to implement
+ // wasm traps.
+ struct sigaction wasmTrapHandler;
+ wasmTrapHandler.sa_flags = SA_SIGINFO | SA_NODEFER | SA_ONSTACK;
+ wasmTrapHandler.sa_sigaction = WasmTrapHandler;
+ sigemptyset(&wasmTrapHandler.sa_mask);
+ if (sigaction(kWasmTrapSignal, &wasmTrapHandler, &sPrevWasmTrapHandler)) {
+ MOZ_CRASH("unable to install wasm trap handler");
+ }
+#endif
+
+ eagerInstallState->success = true;
+}
+
+static ExclusiveData<InstallState> sLazyInstallState(
+ mutexid::WasmSignalInstallState);
+
+static bool EnsureLazyProcessSignalHandlers() {
+ auto lazyInstallState = sLazyInstallState.lock();
+ if (lazyInstallState->tried) {
+ return lazyInstallState->success;
+ }
+
+ lazyInstallState->tried = true;
+ MOZ_RELEASE_ASSERT(lazyInstallState->success == false);
+
+#ifdef XP_DARWIN
+ // Create the port that all JSContext threads will redirect their traps to.
+ kern_return_t kret;
+ kret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE,
+ &sMachDebugPort);
+ if (kret != KERN_SUCCESS) {
+ return false;
+ }
+ kret = mach_port_insert_right(mach_task_self(), sMachDebugPort,
+ sMachDebugPort, MACH_MSG_TYPE_MAKE_SEND);
+ if (kret != KERN_SUCCESS) {
+ return false;
+ }
+
+ // Create the thread that will wait on and service sMachDebugPort.
+ // It's not useful to destroy this thread on process shutdown so
+ // immediately detach on successful start.
+ Thread handlerThread;
+ if (!handlerThread.init(MachExceptionHandlerThread)) {
+ return false;
+ }
+ handlerThread.detach();
+#endif
+
+ lazyInstallState->success = true;
+ return true;
+}
+
+bool wasm::EnsureFullSignalHandlers(JSContext* cx) {
+ if (cx->wasm().triedToInstallSignalHandlers) {
+ return cx->wasm().haveSignalHandlers;
+ }
+
+ cx->wasm().triedToInstallSignalHandlers = true;
+ MOZ_RELEASE_ASSERT(!cx->wasm().haveSignalHandlers);
+
+ {
+ auto eagerInstallState = sEagerInstallState.lock();
+ MOZ_RELEASE_ASSERT(eagerInstallState->tried);
+ if (!eagerInstallState->success) {
+ return false;
+ }
+ }
+
+ if (!EnsureLazyProcessSignalHandlers()) {
+ return false;
+ }
+
+#ifdef XP_DARWIN
+ // In addition to the process-wide signal handler setup, OSX needs each
+ // thread configured to send its exceptions to sMachDebugPort. While there
+ // are also task-level (i.e. process-level) exception ports, those are
+ // "claimed" by breakpad and chaining Mach exceptions is dark magic that we
+ // avoid by instead intercepting exceptions at the thread level before they
+ // propagate to the process-level. This works because there are no other
+ // uses of thread-level exception ports.
+ MOZ_RELEASE_ASSERT(sMachDebugPort != MACH_PORT_NULL);
+ thread_port_t thisThread = mach_thread_self();
+ kern_return_t kret = thread_set_exception_ports(
+ thisThread, EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION,
+ sMachDebugPort, EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES,
+ THREAD_STATE_NONE);
+ mach_port_deallocate(mach_task_self(), thisThread);
+ if (kret != KERN_SUCCESS) {
+ return false;
+ }
+#endif
+
+ cx->wasm().haveSignalHandlers = true;
+ return true;
+}
+
+bool wasm::MemoryAccessTraps(const RegisterState& regs, uint8_t* addr,
+ uint32_t numBytes, uint8_t** newPC) {
+ const wasm::CodeSegment* codeSegment = wasm::LookupCodeSegment(regs.pc);
+ if (!codeSegment || !codeSegment->isModule()) {
+ return false;
+ }
+
+ const wasm::ModuleSegment& segment = *codeSegment->asModule();
+
+ Trap trap;
+ BytecodeOffset bytecode;
+ if (!segment.code().lookupTrap(regs.pc, &trap, &bytecode) ||
+ trap != Trap::OutOfBounds) {
+ return false;
+ }
+
+ Instance& instance =
+ *GetNearestEffectiveTls(Frame::fromUntaggedWasmExitFP(regs.fp))->instance;
+ MOZ_ASSERT(&instance.code() == &segment.code());
+
+ if (!instance.memoryAccessInGuardRegion((uint8_t*)addr, numBytes)) {
+ return false;
+ }
+
+ jit::JitActivation* activation = TlsContext.get()->activation()->asJit();
+ activation->startWasmTrap(Trap::OutOfBounds, bytecode.offset(), regs);
+ *newPC = segment.trapCode();
+ return true;
+}
+
+bool wasm::HandleIllegalInstruction(const RegisterState& regs,
+ uint8_t** newPC) {
+ const wasm::CodeSegment* codeSegment = wasm::LookupCodeSegment(regs.pc);
+ if (!codeSegment || !codeSegment->isModule()) {
+ return false;
+ }
+
+ const wasm::ModuleSegment& segment = *codeSegment->asModule();
+
+ Trap trap;
+ BytecodeOffset bytecode;
+ if (!segment.code().lookupTrap(regs.pc, &trap, &bytecode)) {
+ return false;
+ }
+
+ jit::JitActivation* activation = TlsContext.get()->activation()->asJit();
+ activation->startWasmTrap(trap, bytecode.offset(), regs);
+ *newPC = segment.trapCode();
+ return true;
+}
+
+#undef WASM_EMULATE_ARM_UNALIGNED_FP_ACCESS
diff --git a/js/src/wasm/WasmSignalHandlers.h b/js/src/wasm/WasmSignalHandlers.h
new file mode 100644
index 0000000000..0cd50f2adc
--- /dev/null
+++ b/js/src/wasm/WasmSignalHandlers.h
@@ -0,0 +1,65 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2014 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_signal_handlers_h
+#define wasm_signal_handlers_h
+
+#include "mozilla/Attributes.h"
+
+#include "js/ProfilingFrameIterator.h"
+#include "wasm/WasmProcess.h"
+
+namespace js {
+namespace wasm {
+
+using RegisterState = JS::ProfilingFrameIterator::RegisterState;
+
+// This function performs the low-overhead signal handler initialization that we
+// want to do eagerly to ensure a more-deterministic global process state. This
+// is especially relevant for signal handlers since handler ordering depends on
+// installation order: the wasm signal handler must run *before* the other crash
+// handlers (ds/MemoryProtectionExceptionHandler.h and breakpad) and since POSIX
+// signal handlers work LIFO, this function needs to be called at the end of the
+// startup process, after the other two handlers have been installed. Currently,
+// this is achieved by having JSRuntime() call this function. There can be
+// multiple JSRuntimes per process so this function can thus be called multiple
+// times, having no effect after the first call.
+void EnsureEagerProcessSignalHandlers();
+
+// Assuming EnsureEagerProcessSignalHandlers() has already been called,
+// this function performs the full installation of signal handlers which must
+// be performed per-thread/JSContext. This operation may incur some overhead and
+// so should be done only when needed to use wasm. Currently, this is done in
+// wasm::HasPlatformSupport() which is called when deciding whether to expose
+// the 'WebAssembly' object on the global object.
+bool EnsureFullSignalHandlers(JSContext* cx);
+
+// Return whether, with the given simulator register state, a memory access to
+// 'addr' of size 'numBytes' needs to trap and, if so, where the simulator
+// should redirect pc to.
+bool MemoryAccessTraps(const RegisterState& regs, uint8_t* addr,
+ uint32_t numBytes, uint8_t** newPC);
+
+// Return whether, with the given simulator register state, an illegal
+// instruction fault is expected and, if so, the value of the next PC.
+bool HandleIllegalInstruction(const RegisterState& regs, uint8_t** newPC);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_signal_handlers_h
diff --git a/js/src/wasm/WasmStubs.cpp b/js/src/wasm/WasmStubs.cpp
new file mode 100644
index 0000000000..56d28292ef
--- /dev/null
+++ b/js/src/wasm/WasmStubs.cpp
@@ -0,0 +1,3037 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmStubs.h"
+
+#include <algorithm>
+#include <iterator>
+
+#include "jit/ABIFunctions.h"
+#include "jit/JitFrames.h"
+#include "jit/JitScript.h"
+#include "jit/RegisterAllocator.h"
+#include "js/Printf.h"
+#include "util/Memory.h"
+#include "wasm/WasmCode.h"
+#include "wasm/WasmGenerator.h"
+#include "wasm/WasmInstance.h"
+
+#include "jit/ABIFunctionList-inl.h"
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+typedef Vector<jit::MIRType, 8, SystemAllocPolicy> MIRTypeVector;
+using ABIArgMIRTypeIter = jit::ABIArgIter<MIRTypeVector>;
+
+/*****************************************************************************/
+// ABIResultIter implementation
+
+static uint32_t ResultStackSize(ValType type) {
+ switch (type.kind()) {
+ case ValType::I32:
+ return ABIResult::StackSizeOfInt32;
+ case ValType::I64:
+ return ABIResult::StackSizeOfInt64;
+ case ValType::F32:
+ return ABIResult::StackSizeOfFloat;
+ case ValType::F64:
+ return ABIResult::StackSizeOfDouble;
+#ifdef ENABLE_WASM_SIMD
+ case ValType::V128:
+ return ABIResult::StackSizeOfV128;
+#endif
+ case ValType::Ref:
+ return ABIResult::StackSizeOfPtr;
+ default:
+ MOZ_CRASH("Unexpected result type");
+ }
+}
+
+uint32_t ABIResult::size() const { return ResultStackSize(type()); }
+
+void ABIResultIter::settleRegister(ValType type) {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT_IF(direction_ == Next, index() < MaxRegisterResults);
+ MOZ_ASSERT_IF(direction_ == Prev, index() >= count_ - MaxRegisterResults);
+ static_assert(MaxRegisterResults == 1, "expected a single register result");
+
+ switch (type.kind()) {
+ case ValType::I32:
+ cur_ = ABIResult(type, ReturnReg);
+ break;
+ case ValType::I64:
+ cur_ = ABIResult(type, ReturnReg64);
+ break;
+ case ValType::F32:
+ cur_ = ABIResult(type, ReturnFloat32Reg);
+ break;
+ case ValType::F64:
+ cur_ = ABIResult(type, ReturnDoubleReg);
+ break;
+ case ValType::Ref:
+ cur_ = ABIResult(type, ReturnReg);
+ break;
+#ifdef ENABLE_WASM_SIMD
+ case ValType::V128:
+ cur_ = ABIResult(type, ReturnSimd128Reg);
+ break;
+#endif
+ default:
+ MOZ_CRASH("Unexpected result type");
+ }
+}
+
+void ABIResultIter::settleNext() {
+ MOZ_ASSERT(direction_ == Next);
+ MOZ_ASSERT(!done());
+
+ uint32_t typeIndex = count_ - index_ - 1;
+ ValType type = type_[typeIndex];
+
+ if (index_ < MaxRegisterResults) {
+ settleRegister(type);
+ return;
+ }
+
+ cur_ = ABIResult(type, nextStackOffset_);
+ nextStackOffset_ += ResultStackSize(type);
+}
+
+void ABIResultIter::settlePrev() {
+ MOZ_ASSERT(direction_ == Prev);
+ MOZ_ASSERT(!done());
+ uint32_t typeIndex = index_;
+ ValType type = type_[typeIndex];
+
+ if (count_ - index_ - 1 < MaxRegisterResults) {
+ settleRegister(type);
+ return;
+ }
+
+ uint32_t size = ResultStackSize(type);
+ MOZ_ASSERT(nextStackOffset_ >= size);
+ nextStackOffset_ -= size;
+ cur_ = ABIResult(type, nextStackOffset_);
+}
+
+// Register save/restore.
+//
+// On ARM64, the register sets are not able to represent SIMD registers (see
+// lengthy comment in Architecture-arm64.h for information), and so we use a
+// hack to save and restore them: on this architecture, when we care about SIMD,
+// we call special routines that know about them.
+//
+// In a couple of cases it is not currently necessary to save and restore SIMD
+// registers, but the extra traffic is all along slow paths and not really worth
+// optimizing.
+static void PushRegsInMask(MacroAssembler& masm, const LiveRegisterSet& set) {
+#if defined(ENABLE_WASM_SIMD) && defined(JS_CODEGEN_ARM64)
+ masm.PushRegsInMaskForWasmStubs(set);
+#else
+ masm.PushRegsInMask(set);
+#endif
+}
+
+static void PopRegsInMask(MacroAssembler& masm, const LiveRegisterSet& set) {
+#if defined(ENABLE_WASM_SIMD) && defined(JS_CODEGEN_ARM64)
+ masm.PopRegsInMaskForWasmStubs(set, LiveRegisterSet());
+#else
+ masm.PopRegsInMask(set);
+#endif
+}
+
+static void PopRegsInMaskIgnore(MacroAssembler& masm,
+ const LiveRegisterSet& set,
+ const LiveRegisterSet& ignore) {
+#if defined(ENABLE_WASM_SIMD) && defined(JS_CODEGEN_ARM64)
+ masm.PopRegsInMaskForWasmStubs(set, ignore);
+#else
+ masm.PopRegsInMaskIgnore(set, ignore);
+#endif
+}
+
+#ifdef WASM_CODEGEN_DEBUG
+template <class Closure>
+static void GenPrint(DebugChannel channel, MacroAssembler& masm,
+ const Maybe<Register>& taken, Closure passArgAndCall) {
+ if (!IsCodegenDebugEnabled(channel)) {
+ return;
+ }
+
+ AllocatableRegisterSet regs(RegisterSet::All());
+ LiveRegisterSet save(regs.asLiveSet());
+ PushRegsInMask(masm, save);
+
+ if (taken) {
+ regs.take(taken.value());
+ }
+ Register temp = regs.takeAnyGeneral();
+
+ {
+ MOZ_ASSERT(MaybeGetJitContext(),
+ "codegen debug checks require a jit context");
+ masm.setupUnalignedABICall(temp);
+ passArgAndCall(IsCompilingWasm(), temp);
+ }
+
+ PopRegsInMask(masm, save);
+}
+
+static void GenPrintf(DebugChannel channel, MacroAssembler& masm,
+ const char* fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ UniqueChars str = JS_vsmprintf(fmt, ap);
+ va_end(ap);
+
+ GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
+ // If we've gone this far, it means we're actually using the debugging
+ // strings. In this case, we leak them! This is only for debugging, and
+ // doing the right thing is cumbersome (in Ion, it'd mean add a vec of
+ // strings to the IonScript; in wasm, it'd mean add it to the current
+ // Module and serialize it properly).
+ const char* text = str.release();
+
+ masm.movePtr(ImmPtr((void*)text, ImmPtr::NoCheckToken()), temp);
+ masm.passABIArg(temp);
+ if (inWasm) {
+ masm.callDebugWithABI(SymbolicAddress::PrintText);
+ } else {
+ using Fn = void (*)(const char* output);
+ masm.callWithABI<Fn, PrintText>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+ }
+ });
+}
+
+static void GenPrintIsize(DebugChannel channel, MacroAssembler& masm,
+ const Register& src) {
+ GenPrint(channel, masm, Some(src), [&](bool inWasm, Register _temp) {
+ masm.passABIArg(src);
+ if (inWasm) {
+ masm.callDebugWithABI(SymbolicAddress::PrintI32);
+ } else {
+ using Fn = void (*)(int32_t val);
+ masm.callWithABI<Fn, PrintI32>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+ }
+ });
+}
+
+static void GenPrintPtr(DebugChannel channel, MacroAssembler& masm,
+ const Register& src) {
+ GenPrint(channel, masm, Some(src), [&](bool inWasm, Register _temp) {
+ masm.passABIArg(src);
+ if (inWasm) {
+ masm.callDebugWithABI(SymbolicAddress::PrintPtr);
+ } else {
+ using Fn = void (*)(uint8_t * val);
+ masm.callWithABI<Fn, PrintPtr>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+ }
+ });
+}
+
+static void GenPrintI64(DebugChannel channel, MacroAssembler& masm,
+ const Register64& src) {
+# if JS_BITS_PER_WORD == 64
+ GenPrintf(channel, masm, "i64 ");
+ GenPrintIsize(channel, masm, src.reg);
+# else
+ GenPrintf(channel, masm, "i64(");
+ GenPrintIsize(channel, masm, src.low);
+ GenPrintIsize(channel, masm, src.high);
+ GenPrintf(channel, masm, ") ");
+# endif
+}
+
+static void GenPrintF32(DebugChannel channel, MacroAssembler& masm,
+ const FloatRegister& src) {
+ GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
+ masm.passABIArg(src, MoveOp::FLOAT32);
+ if (inWasm) {
+ masm.callDebugWithABI(SymbolicAddress::PrintF32);
+ } else {
+ using Fn = void (*)(float val);
+ masm.callWithABI<Fn, PrintF32>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+ }
+ });
+}
+
+static void GenPrintF64(DebugChannel channel, MacroAssembler& masm,
+ const FloatRegister& src) {
+ GenPrint(channel, masm, Nothing(), [&](bool inWasm, Register temp) {
+ masm.passABIArg(src, MoveOp::DOUBLE);
+ if (inWasm) {
+ masm.callDebugWithABI(SymbolicAddress::PrintF64);
+ } else {
+ using Fn = void (*)(double val);
+ masm.callWithABI<Fn, PrintF64>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+ }
+ });
+}
+
+# ifdef ENABLE_WASM_SIMD
+static void GenPrintV128(DebugChannel channel, MacroAssembler& masm,
+ const FloatRegister& src) {
+ // TODO: We might try to do something meaningful here once SIMD data are
+ // aligned and hence C++-ABI compliant. For now, just make ourselves visible.
+ GenPrintf(channel, masm, "v128");
+}
+# endif
+#else
+static void GenPrintf(DebugChannel channel, MacroAssembler& masm,
+ const char* fmt, ...) {}
+static void GenPrintIsize(DebugChannel channel, MacroAssembler& masm,
+ const Register& src) {}
+static void GenPrintPtr(DebugChannel channel, MacroAssembler& masm,
+ const Register& src) {}
+static void GenPrintI64(DebugChannel channel, MacroAssembler& masm,
+ const Register64& src) {}
+static void GenPrintF32(DebugChannel channel, MacroAssembler& masm,
+ const FloatRegister& src) {}
+static void GenPrintF64(DebugChannel channel, MacroAssembler& masm,
+ const FloatRegister& src) {}
+# ifdef ENABLE_WASM_SIMD
+static void GenPrintV128(DebugChannel channel, MacroAssembler& masm,
+ const FloatRegister& src) {}
+# endif
+#endif
+
+static bool FinishOffsets(MacroAssembler& masm, Offsets* offsets) {
+ // On old ARM hardware, constant pools could be inserted and they need to
+ // be flushed before considering the size of the masm.
+ masm.flushBuffer();
+ offsets->end = masm.size();
+ return !masm.oom();
+}
+
+static void AssertStackAlignment(MacroAssembler& masm, uint32_t alignment,
+ uint32_t addBeforeAssert = 0) {
+ MOZ_ASSERT(
+ (sizeof(Frame) + masm.framePushed() + addBeforeAssert) % alignment == 0);
+ masm.assertStackAlignment(alignment, addBeforeAssert);
+}
+
+template <class VectorT, template <class VecT> class ABIArgIterT>
+static unsigned StackArgBytesHelper(const VectorT& args) {
+ ABIArgIterT<VectorT> iter(args);
+ while (!iter.done()) {
+ iter++;
+ }
+ return iter.stackBytesConsumedSoFar();
+}
+
+template <class VectorT>
+static unsigned StackArgBytesForNativeABI(const VectorT& args) {
+ return StackArgBytesHelper<VectorT, ABIArgIter>(args);
+}
+
+template <class VectorT>
+static unsigned StackArgBytesForWasmABI(const VectorT& args) {
+ return StackArgBytesHelper<VectorT, WasmABIArgIter>(args);
+}
+
+static unsigned StackArgBytesForWasmABI(const FuncType& funcType) {
+ ArgTypeVector args(funcType);
+ return StackArgBytesForWasmABI(args);
+}
+
+static void Move64(MacroAssembler& masm, const Address& src,
+ const Address& dest, Register scratch) {
+#if JS_BITS_PER_WORD == 32
+ masm.load32(LowWord(src), scratch);
+ masm.store32(scratch, LowWord(dest));
+ masm.load32(HighWord(src), scratch);
+ masm.store32(scratch, HighWord(dest));
+#else
+ Register64 scratch64(scratch);
+ masm.load64(src, scratch64);
+ masm.store64(scratch64, dest);
+#endif
+}
+
+static void SetupABIArguments(MacroAssembler& masm, const FuncExport& fe,
+ Register argv, Register scratch) {
+ // Copy parameters out of argv and into the registers/stack-slots specified by
+ // the wasm ABI.
+ //
+ // SetupABIArguments are only used for C++ -> wasm calls through callExport(),
+ // and V128 and Ref types (other than externref) are not currently allowed.
+ ArgTypeVector args(fe.funcType());
+ for (WasmABIArgIter iter(args); !iter.done(); iter++) {
+ unsigned argOffset = iter.index() * sizeof(ExportArg);
+ Address src(argv, argOffset);
+ MIRType type = iter.mirType();
+ switch (iter->kind()) {
+ case ABIArg::GPR:
+ if (type == MIRType::Int32) {
+ masm.load32(src, iter->gpr());
+ } else if (type == MIRType::Int64) {
+ masm.load64(src, iter->gpr64());
+ } else if (type == MIRType::RefOrNull) {
+ masm.loadPtr(src, iter->gpr());
+ } else if (type == MIRType::StackResults) {
+ MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
+ masm.loadPtr(src, iter->gpr());
+ } else {
+ MOZ_CRASH("unknown GPR type");
+ }
+ break;
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ case ABIArg::GPR_PAIR:
+ if (type == MIRType::Int64) {
+ masm.load64(src, iter->gpr64());
+ } else {
+ MOZ_CRASH("wasm uses hardfp for function calls.");
+ }
+ break;
+#endif
+ case ABIArg::FPU: {
+ static_assert(sizeof(ExportArg) >= jit::Simd128DataSize,
+ "ExportArg must be big enough to store SIMD values");
+ switch (type) {
+ case MIRType::Double:
+ masm.loadDouble(src, iter->fpu());
+ break;
+ case MIRType::Float32:
+ masm.loadFloat32(src, iter->fpu());
+ break;
+ case MIRType::Simd128:
+#ifdef ENABLE_WASM_SIMD
+ // We will reach this point when we generate interpreter entry stubs
+ // for exports that receive v128 values, but the code will never be
+ // executed because such exports cannot be called from JS.
+ masm.breakpoint();
+ break;
+#else
+ MOZ_CRASH("V128 not supported in SetupABIArguments");
+#endif
+ default:
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected FPU type");
+ break;
+ }
+ break;
+ }
+ case ABIArg::Stack:
+ switch (type) {
+ case MIRType::Int32:
+ masm.load32(src, scratch);
+ masm.storePtr(scratch, Address(masm.getStackPointer(),
+ iter->offsetFromArgBase()));
+ break;
+ case MIRType::Int64: {
+ RegisterOrSP sp = masm.getStackPointer();
+ Move64(masm, src, Address(sp, iter->offsetFromArgBase()), scratch);
+ break;
+ }
+ case MIRType::RefOrNull:
+ masm.loadPtr(src, scratch);
+ masm.storePtr(scratch, Address(masm.getStackPointer(),
+ iter->offsetFromArgBase()));
+ break;
+ case MIRType::Double: {
+ ScratchDoubleScope fpscratch(masm);
+ masm.loadDouble(src, fpscratch);
+ masm.storeDouble(fpscratch, Address(masm.getStackPointer(),
+ iter->offsetFromArgBase()));
+ break;
+ }
+ case MIRType::Float32: {
+ ScratchFloat32Scope fpscratch(masm);
+ masm.loadFloat32(src, fpscratch);
+ masm.storeFloat32(fpscratch, Address(masm.getStackPointer(),
+ iter->offsetFromArgBase()));
+ break;
+ }
+ case MIRType::Simd128: {
+#ifdef ENABLE_WASM_SIMD
+ // We will reach this point when we generate interpreter entry stubs
+ // for exports that receive v128 values, but the code will never be
+ // executed because such exports cannot be called from JS.
+ masm.breakpoint();
+ break;
+#else
+ MOZ_CRASH("V128 not supported in SetupABIArguments");
+#endif
+ }
+ case MIRType::StackResults: {
+ MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
+ masm.loadPtr(src, scratch);
+ masm.storePtr(scratch, Address(masm.getStackPointer(),
+ iter->offsetFromArgBase()));
+ break;
+ }
+ default:
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE(
+ "unexpected stack arg type");
+ }
+ break;
+ case ABIArg::Uninitialized:
+ MOZ_CRASH("Uninitialized ABIArg kind");
+ }
+ }
+}
+
+static void StoreRegisterResult(MacroAssembler& masm, const FuncExport& fe,
+ Register loc) {
+ ResultType results = ResultType::Vector(fe.funcType().results());
+ DebugOnly<bool> sawRegisterResult = false;
+ for (ABIResultIter iter(results); !iter.done(); iter.next()) {
+ const ABIResult& result = iter.cur();
+ if (result.inRegister()) {
+ MOZ_ASSERT(!sawRegisterResult);
+ sawRegisterResult = true;
+ switch (result.type().kind()) {
+ case ValType::I32:
+ masm.store32(result.gpr(), Address(loc, 0));
+ break;
+ case ValType::I64:
+ masm.store64(result.gpr64(), Address(loc, 0));
+ break;
+ case ValType::V128:
+#ifdef ENABLE_WASM_SIMD
+ masm.storeUnalignedSimd128(result.fpr(), Address(loc, 0));
+ break;
+#else
+ MOZ_CRASH("V128 not supported in StoreABIReturn");
+#endif
+ case ValType::F32:
+ masm.canonicalizeFloat(result.fpr());
+ masm.storeFloat32(result.fpr(), Address(loc, 0));
+ break;
+ case ValType::F64:
+ masm.canonicalizeDouble(result.fpr());
+ masm.storeDouble(result.fpr(), Address(loc, 0));
+ break;
+ case ValType::Ref:
+ masm.storePtr(result.gpr(), Address(loc, 0));
+ break;
+ }
+ }
+ }
+ MOZ_ASSERT(sawRegisterResult == (results.length() > 0));
+}
+
+#if defined(JS_CODEGEN_ARM)
+// The ARM system ABI also includes d15 & s31 in the non volatile float
+// registers. Also exclude lr (a.k.a. r14) as we preserve it manually.
+static const LiveRegisterSet NonVolatileRegs = LiveRegisterSet(
+ GeneralRegisterSet(Registers::NonVolatileMask &
+ ~(Registers::SetType(1) << Registers::lr)),
+ FloatRegisterSet(FloatRegisters::NonVolatileMask |
+ (FloatRegisters::SetType(1) << FloatRegisters::d15) |
+ (FloatRegisters::SetType(1) << FloatRegisters::s31)));
+#elif defined(JS_CODEGEN_ARM64)
+// Exclude the Link Register (x30) because it is preserved manually.
+//
+// Include x16 (scratch) to make a 16-byte aligned amount of integer registers.
+// Include d31 (scratch) to make a 16-byte aligned amount of floating registers.
+static const LiveRegisterSet NonVolatileRegs = LiveRegisterSet(
+ GeneralRegisterSet((Registers::NonVolatileMask &
+ ~(Registers::SetType(1) << Registers::lr)) |
+ (Registers::SetType(1) << Registers::x16)),
+ FloatRegisterSet(FloatRegisters::NonVolatileMask |
+ FloatRegisters::NonAllocatableMask));
+#else
+static const LiveRegisterSet NonVolatileRegs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask),
+ FloatRegisterSet(FloatRegisters::NonVolatileMask));
+#endif
+
+#if defined(JS_CODEGEN_NONE)
+static const unsigned NonVolatileRegsPushSize = 0;
+#elif defined(ENABLE_WASM_SIMD) && defined(JS_CODEGEN_ARM64)
+static const unsigned NonVolatileRegsPushSize =
+ NonVolatileRegs.gprs().size() * sizeof(intptr_t) +
+ FloatRegister::GetPushSizeInBytesForWasmStubs(NonVolatileRegs.fpus());
+#else
+static const unsigned NonVolatileRegsPushSize =
+ NonVolatileRegs.gprs().size() * sizeof(intptr_t) +
+ NonVolatileRegs.fpus().getPushSizeInBytes();
+#endif
+
+#ifdef ENABLE_WASM_REFTYPES
+static const unsigned NumExtraPushed = 2; // tls and argv
+#else
+static const unsigned NumExtraPushed = 1; // argv
+#endif
+
+#ifdef JS_CODEGEN_ARM64
+static const unsigned WasmPushSize = 16;
+#else
+static const unsigned WasmPushSize = sizeof(void*);
+#endif
+
+static const unsigned FramePushedBeforeAlign =
+ NonVolatileRegsPushSize + NumExtraPushed * WasmPushSize;
+
+static void AssertExpectedSP(const MacroAssembler& masm) {
+#ifdef JS_CODEGEN_ARM64
+ MOZ_ASSERT(sp.Is(masm.GetStackPointer64()));
+#endif
+}
+
+template <class Operand>
+static void WasmPush(MacroAssembler& masm, const Operand& op) {
+#ifdef JS_CODEGEN_ARM64
+ // Allocate a pad word so that SP can remain properly aligned. |op| will be
+ // written at the lower-addressed of the two words pushed here.
+ masm.reserveStack(WasmPushSize);
+ masm.storePtr(op, Address(masm.getStackPointer(), 0));
+#else
+ masm.Push(op);
+#endif
+}
+
+static void WasmPop(MacroAssembler& masm, Register r) {
+#ifdef JS_CODEGEN_ARM64
+ // Also pop the pad word allocated by WasmPush.
+ masm.loadPtr(Address(masm.getStackPointer(), 0), r);
+ masm.freeStack(WasmPushSize);
+#else
+ masm.Pop(r);
+#endif
+}
+
+static void MoveSPForJitABI(MacroAssembler& masm) {
+#ifdef JS_CODEGEN_ARM64
+ masm.moveStackPtrTo(PseudoStackPointer);
+#endif
+}
+
+static void CallFuncExport(MacroAssembler& masm, const FuncExport& fe,
+ const Maybe<ImmPtr>& funcPtr) {
+ MOZ_ASSERT(fe.hasEagerStubs() == !funcPtr);
+ if (funcPtr) {
+ masm.call(*funcPtr);
+ } else {
+ masm.call(CallSiteDesc(CallSiteDesc::Func), fe.funcIndex());
+ }
+}
+
+STATIC_ASSERT_ANYREF_IS_JSOBJECT; // Strings are currently boxed
+
+// Unboxing is branchy and contorted because of Spectre mitigations - we don't
+// have enough scratch registers. Were it not for the spectre mitigations in
+// branchTestObjClass, the branch nest below would be restructured significantly
+// by inverting branches and using fewer registers.
+
+// Unbox an anyref in src (clobbering src in the process) and then re-box it as
+// a Value in *dst. See the definition of AnyRef for a discussion of pointer
+// representation.
+static void UnboxAnyrefIntoValue(MacroAssembler& masm, Register tls,
+ Register src, const Address& dst,
+ Register scratch) {
+ MOZ_ASSERT(src != scratch);
+
+ // Not actually the value we're passing, but we've no way of
+ // decoding anything better.
+ GenPrintPtr(DebugChannel::Import, masm, src);
+
+ Label notNull, mustUnbox, done;
+ masm.branchTestPtr(Assembler::NonZero, src, src, &notNull);
+ masm.storeValue(NullValue(), dst);
+ masm.jump(&done);
+
+ masm.bind(&notNull);
+ // The type test will clear src if the test fails, so store early.
+ masm.storeValue(JSVAL_TYPE_OBJECT, src, dst);
+ // Spectre mitigations: see comment above about efficiency.
+ masm.branchTestObjClass(Assembler::Equal, src,
+ Address(tls, offsetof(TlsData, valueBoxClass)),
+ scratch, src, &mustUnbox);
+ masm.jump(&done);
+
+ masm.bind(&mustUnbox);
+ Move64(masm, Address(src, WasmValueBox::offsetOfValue()), dst, scratch);
+
+ masm.bind(&done);
+}
+
+// Unbox an anyref in src and then re-box it as a Value in dst.
+// See the definition of AnyRef for a discussion of pointer representation.
+static void UnboxAnyrefIntoValueReg(MacroAssembler& masm, Register tls,
+ Register src, ValueOperand dst,
+ Register scratch) {
+ MOZ_ASSERT(src != scratch);
+#if JS_BITS_PER_WORD == 32
+ MOZ_ASSERT(dst.typeReg() != scratch);
+ MOZ_ASSERT(dst.payloadReg() != scratch);
+#else
+ MOZ_ASSERT(dst.valueReg() != scratch);
+#endif
+
+ // Not actually the value we're passing, but we've no way of
+ // decoding anything better.
+ GenPrintPtr(DebugChannel::Import, masm, src);
+
+ Label notNull, mustUnbox, done;
+ masm.branchTestPtr(Assembler::NonZero, src, src, &notNull);
+ masm.moveValue(NullValue(), dst);
+ masm.jump(&done);
+
+ masm.bind(&notNull);
+ // The type test will clear src if the test fails, so store early.
+ masm.moveValue(TypedOrValueRegister(MIRType::Object, AnyRegister(src)), dst);
+ // Spectre mitigations: see comment above about efficiency.
+ masm.branchTestObjClass(Assembler::Equal, src,
+ Address(tls, offsetof(TlsData, valueBoxClass)),
+ scratch, src, &mustUnbox);
+ masm.jump(&done);
+
+ masm.bind(&mustUnbox);
+ masm.loadValue(Address(src, WasmValueBox::offsetOfValue()), dst);
+
+ masm.bind(&done);
+}
+
+// Box the Value in src as an anyref in dest. src and dest must not overlap.
+// See the definition of AnyRef for a discussion of pointer representation.
+static void BoxValueIntoAnyref(MacroAssembler& masm, ValueOperand src,
+ Register dest, Label* oolConvert) {
+ Label nullValue, objectValue, done;
+ {
+ ScratchTagScope tag(masm, src);
+ masm.splitTagForTest(src, tag);
+ masm.branchTestObject(Assembler::Equal, tag, &objectValue);
+ masm.branchTestNull(Assembler::Equal, tag, &nullValue);
+ masm.jump(oolConvert);
+ }
+
+ masm.bind(&nullValue);
+ masm.xorPtr(dest, dest);
+ masm.jump(&done);
+
+ masm.bind(&objectValue);
+ masm.unboxObject(src, dest);
+
+ masm.bind(&done);
+}
+
+// Generate a stub that enters wasm from a C++ caller via the native ABI. The
+// signature of the entry point is Module::ExportFuncPtr. The exported wasm
+// function has an ABI derived from its specific signature, so this function
+// must map from the ABI of ExportFuncPtr to the export's signature's ABI.
+static bool GenerateInterpEntry(MacroAssembler& masm, const FuncExport& fe,
+ const Maybe<ImmPtr>& funcPtr,
+ Offsets* offsets) {
+ AssertExpectedSP(masm);
+ masm.haltingAlign(CodeAlignment);
+
+ offsets->begin = masm.currentOffset();
+
+ // Save the return address if it wasn't already saved by the call insn.
+#ifdef JS_USE_LINK_REGISTER
+# if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || \
+ defined(JS_CODEGEN_MIPS64)
+ masm.pushReturnAddress();
+# elif defined(JS_CODEGEN_ARM64)
+ // WasmPush updates framePushed() unlike pushReturnAddress(), but that's
+ // cancelled by the setFramePushed() below.
+ WasmPush(masm, lr);
+# else
+ MOZ_CRASH("Implement this");
+# endif
+#endif
+
+ // Save all caller non-volatile registers before we clobber them here and in
+ // the wasm callee (which does not preserve non-volatile registers).
+ masm.setFramePushed(0);
+ PushRegsInMask(masm, NonVolatileRegs);
+ MOZ_ASSERT(masm.framePushed() == NonVolatileRegsPushSize);
+
+ // Put the 'argv' argument into a non-argument/return/TLS register so that
+ // we can use 'argv' while we fill in the arguments for the wasm callee.
+ // Use a second non-argument/return register as temporary scratch.
+ Register argv = ABINonArgReturnReg0;
+ Register scratch = ABINonArgReturnReg1;
+
+ // Read the arguments of wasm::ExportFuncPtr according to the native ABI.
+ // The entry stub's frame is 1 word.
+ const unsigned argBase = sizeof(void*) + masm.framePushed();
+ ABIArgGenerator abi;
+ ABIArg arg;
+
+ // arg 1: ExportArg*
+ arg = abi.next(MIRType::Pointer);
+ if (arg.kind() == ABIArg::GPR) {
+ masm.movePtr(arg.gpr(), argv);
+ } else {
+ masm.loadPtr(
+ Address(masm.getStackPointer(), argBase + arg.offsetFromArgBase()),
+ argv);
+ }
+
+ // Arg 2: TlsData*
+ arg = abi.next(MIRType::Pointer);
+ if (arg.kind() == ABIArg::GPR) {
+ masm.movePtr(arg.gpr(), WasmTlsReg);
+ } else {
+ masm.loadPtr(
+ Address(masm.getStackPointer(), argBase + arg.offsetFromArgBase()),
+ WasmTlsReg);
+ }
+
+#ifdef ENABLE_WASM_REFTYPES
+ WasmPush(masm, WasmTlsReg);
+#endif
+
+ // Save 'argv' on the stack so that we can recover it after the call.
+ WasmPush(masm, argv);
+
+ // Since we're about to dynamically align the stack, reset the frame depth
+ // so we can still assert static stack depth balancing.
+ MOZ_ASSERT(masm.framePushed() == FramePushedBeforeAlign);
+ masm.setFramePushed(0);
+
+ // Dynamically align the stack since ABIStackAlignment is not necessarily
+ // WasmStackAlignment. Preserve SP so it can be restored after the call.
+#ifdef JS_CODEGEN_ARM64
+ static_assert(WasmStackAlignment == 16, "ARM64 SP alignment");
+#else
+ masm.moveStackPtrTo(scratch);
+ masm.andToStackPtr(Imm32(~(WasmStackAlignment - 1)));
+ masm.Push(scratch);
+#endif
+
+ // Reserve stack space for the wasm call.
+ unsigned argDecrement =
+ StackDecrementForCall(WasmStackAlignment, masm.framePushed(),
+ StackArgBytesForWasmABI(fe.funcType()));
+ masm.reserveStack(argDecrement);
+
+ // Copy parameters out of argv and into the wasm ABI registers/stack-slots.
+ SetupABIArguments(masm, fe, argv, scratch);
+
+ // Setup wasm register state. The nullness of the frame pointer is used to
+ // determine whether the call ended in success or failure.
+ masm.movePtr(ImmWord(0), FramePointer);
+ masm.loadWasmPinnedRegsFromTls();
+
+ masm.storePtr(WasmTlsReg,
+ Address(masm.getStackPointer(), WasmCalleeTLSOffsetBeforeCall));
+
+ // Call into the real function. Note that, due to the throw stub, fp, tls
+ // and pinned registers may be clobbered.
+ masm.assertStackAlignment(WasmStackAlignment);
+ CallFuncExport(masm, fe, funcPtr);
+ masm.assertStackAlignment(WasmStackAlignment);
+
+ // Pop the arguments pushed after the dynamic alignment.
+ masm.freeStack(argDecrement);
+
+ // Pop the stack pointer to its value right before dynamic alignment.
+#ifdef JS_CODEGEN_ARM64
+ static_assert(WasmStackAlignment == 16, "ARM64 SP alignment");
+#else
+ masm.PopStackPtr();
+#endif
+ MOZ_ASSERT(masm.framePushed() == 0);
+ masm.setFramePushed(FramePushedBeforeAlign);
+
+ // Recover the 'argv' pointer which was saved before aligning the stack.
+ WasmPop(masm, argv);
+
+#ifdef ENABLE_WASM_REFTYPES
+ WasmPop(masm, WasmTlsReg);
+#endif
+
+ // Store the register result, if any, in argv[0].
+ // No spectre.index_masking is required, as the value leaves ReturnReg.
+ StoreRegisterResult(masm, fe, argv);
+
+ // After the ReturnReg is stored into argv[0] but before fp is clobbered by
+ // the PopRegsInMask(NonVolatileRegs) below, set the return value based on
+ // whether fp is null (which is the case for successful returns) or the
+ // FailFP magic value (set by the throw stub);
+ Label success, join;
+ masm.branchTestPtr(Assembler::Zero, FramePointer, FramePointer, &success);
+#ifdef DEBUG
+ Label ok;
+ masm.branchPtr(Assembler::Equal, FramePointer, Imm32(FailFP), &ok);
+ masm.breakpoint();
+ masm.bind(&ok);
+#endif
+ masm.move32(Imm32(false), ReturnReg);
+ masm.jump(&join);
+ masm.bind(&success);
+ masm.move32(Imm32(true), ReturnReg);
+ masm.bind(&join);
+
+ // Restore clobbered non-volatile registers of the caller.
+ PopRegsInMask(masm, NonVolatileRegs);
+ MOZ_ASSERT(masm.framePushed() == 0);
+
+#if defined(JS_CODEGEN_ARM64)
+ masm.setFramePushed(WasmPushSize);
+ WasmPop(masm, lr);
+ masm.abiret();
+#else
+ masm.ret();
+#endif
+
+ return FinishOffsets(masm, offsets);
+}
+
+#ifdef JS_PUNBOX64
+static const ValueOperand ScratchValIonEntry = ValueOperand(ABINonArgReg0);
+#else
+static const ValueOperand ScratchValIonEntry =
+ ValueOperand(ABINonArgReg0, ABINonArgReg1);
+#endif
+static const Register ScratchIonEntry = ABINonArgReg2;
+
+static void CallSymbolicAddress(MacroAssembler& masm, bool isAbsolute,
+ SymbolicAddress sym) {
+ if (isAbsolute) {
+ masm.call(ImmPtr(SymbolicAddressTarget(sym), ImmPtr::NoCheckToken()));
+ } else {
+ masm.call(sym);
+ }
+}
+
+// Load instance's TLS from the callee.
+static void GenerateJitEntryLoadTls(MacroAssembler& masm, unsigned frameSize) {
+ AssertExpectedSP(masm);
+
+ // ScratchIonEntry := callee => JSFunction*
+ unsigned offset = frameSize + JitFrameLayout::offsetOfCalleeToken();
+ masm.loadFunctionFromCalleeToken(Address(masm.getStackPointer(), offset),
+ ScratchIonEntry);
+
+ // ScratchIonEntry := callee->getExtendedSlot(WASM_TLSDATA_SLOT)->toPrivate()
+ // => TlsData*
+ offset = FunctionExtended::offsetOfExtendedSlot(
+ FunctionExtended::WASM_TLSDATA_SLOT);
+ masm.loadPrivate(Address(ScratchIonEntry, offset), WasmTlsReg);
+}
+
+// Creates a JS fake exit frame for wasm, so the frame iterators just use
+// JSJit frame iteration.
+static void GenerateJitEntryThrow(MacroAssembler& masm, unsigned frameSize) {
+ AssertExpectedSP(masm);
+
+ MOZ_ASSERT(masm.framePushed() == frameSize);
+
+ GenerateJitEntryLoadTls(masm, frameSize);
+
+ masm.freeStack(frameSize);
+ MoveSPForJitABI(masm);
+
+ masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, cx)), ScratchIonEntry);
+ masm.enterFakeExitFrameForWasm(ScratchIonEntry, ScratchIonEntry,
+ ExitFrameType::WasmGenericJitEntry);
+
+ masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, instance)),
+ ScratchIonEntry);
+ masm.loadPtr(
+ Address(ScratchIonEntry, Instance::offsetOfJSJitExceptionHandler()),
+ ScratchIonEntry);
+ masm.jump(ScratchIonEntry);
+}
+
+// Helper function for allocating a BigInt and initializing it from an I64
+// in GenerateJitEntry and GenerateImportInterpExit. The return result is
+// written to scratch.
+static void GenerateBigIntInitialization(MacroAssembler& masm,
+ unsigned bytesPushedByPrologue,
+ Register64 input, Register scratch,
+ const FuncExport* fe, Label* fail) {
+#if JS_BITS_PER_WORD == 32
+ MOZ_ASSERT(input.low != scratch);
+ MOZ_ASSERT(input.high != scratch);
+#else
+ MOZ_ASSERT(input.reg != scratch);
+#endif
+
+ // We need to avoid clobbering other argument registers and the input.
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ LiveRegisterSet save(regs.asLiveSet());
+ PushRegsInMask(masm, save);
+
+ unsigned frameSize = StackDecrementForCall(
+ ABIStackAlignment, masm.framePushed() + bytesPushedByPrologue, 0);
+ masm.reserveStack(frameSize);
+ masm.assertStackAlignment(ABIStackAlignment);
+
+ // Needs to use a different call type depending on stub it's used from.
+ if (fe) {
+ CallSymbolicAddress(masm, !fe->hasEagerStubs(),
+ SymbolicAddress::AllocateBigInt);
+ } else {
+ masm.call(SymbolicAddress::AllocateBigInt);
+ }
+ masm.storeCallPointerResult(scratch);
+
+ masm.assertStackAlignment(ABIStackAlignment);
+ masm.freeStack(frameSize);
+
+ LiveRegisterSet ignore;
+ ignore.add(scratch);
+ PopRegsInMaskIgnore(masm, save, ignore);
+
+ masm.branchTest32(Assembler::Zero, scratch, scratch, fail);
+ masm.initializeBigInt64(Scalar::BigInt64, scratch, input);
+}
+
+// Generate a stub that enters wasm from a jit code caller via the jit ABI.
+//
+// ARM64 note: This does not save the PseudoStackPointer so we must be sure to
+// recompute it on every return path, be it normal return or exception return.
+// The JIT code we return to assumes it is correct.
+
+static bool GenerateJitEntry(MacroAssembler& masm, size_t funcExportIndex,
+ const FuncExport& fe, const Maybe<ImmPtr>& funcPtr,
+ Offsets* offsets) {
+ AssertExpectedSP(masm);
+
+ RegisterOrSP sp = masm.getStackPointer();
+
+ GenerateJitEntryPrologue(masm, offsets);
+
+ // The jit caller has set up the following stack layout (sp grows to the
+ // left):
+ // <-- retAddr | descriptor | callee | argc | this | arg1..N
+
+ unsigned normalBytesNeeded = StackArgBytesForWasmABI(fe.funcType());
+
+ MIRTypeVector coerceArgTypes;
+ MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Int32));
+ MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
+ MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
+ unsigned oolBytesNeeded = StackArgBytesForWasmABI(coerceArgTypes);
+
+ unsigned bytesNeeded = std::max(normalBytesNeeded, oolBytesNeeded);
+
+ // Note the jit caller ensures the stack is aligned *after* the call
+ // instruction.
+ unsigned frameSize = StackDecrementForCall(WasmStackAlignment,
+ masm.framePushed(), bytesNeeded);
+
+ // Reserve stack space for wasm ABI arguments, set up like this:
+ // <-- ABI args | padding
+ masm.reserveStack(frameSize);
+
+ GenerateJitEntryLoadTls(masm, frameSize);
+
+ if (fe.funcType().hasUnexposableArgOrRet()) {
+ CallSymbolicAddress(masm, !fe.hasEagerStubs(),
+ SymbolicAddress::ReportV128JSCall);
+ GenerateJitEntryThrow(masm, frameSize);
+ return FinishOffsets(masm, offsets);
+ }
+
+ FloatRegister scratchF = ABINonArgDoubleReg;
+ Register scratchG = ScratchIonEntry;
+ ValueOperand scratchV = ScratchValIonEntry;
+
+ GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; arguments ",
+ fe.funcIndex());
+
+ // We do two loops:
+ // - one loop up-front will make sure that all the Value tags fit the
+ // expected signature argument types. If at least one inline conversion
+ // fails, we just jump to the OOL path which will call into C++. Inline
+ // conversions are ordered in the way we expect them to happen the most.
+ // - the second loop will unbox the arguments into the right registers.
+ Label oolCall;
+ for (size_t i = 0; i < fe.funcType().args().length(); i++) {
+ unsigned jitArgOffset = frameSize + JitFrameLayout::offsetOfActualArg(i);
+ Address jitArgAddr(sp, jitArgOffset);
+ masm.loadValue(jitArgAddr, scratchV);
+
+ Label next;
+ switch (fe.funcType().args()[i].kind()) {
+ case ValType::I32: {
+ ScratchTagScope tag(masm, scratchV);
+ masm.splitTagForTest(scratchV, tag);
+
+ // For int32 inputs, just skip.
+ masm.branchTestInt32(Assembler::Equal, tag, &next);
+
+ // For double inputs, unbox, truncate and store back.
+ Label storeBack, notDouble;
+ masm.branchTestDouble(Assembler::NotEqual, tag, &notDouble);
+ {
+ ScratchTagScopeRelease _(&tag);
+ masm.unboxDouble(scratchV, scratchF);
+ masm.branchTruncateDoubleMaybeModUint32(scratchF, scratchG, &oolCall);
+ masm.jump(&storeBack);
+ }
+ masm.bind(&notDouble);
+
+ // For null or undefined, store 0.
+ Label nullOrUndefined, notNullOrUndefined;
+ masm.branchTestUndefined(Assembler::Equal, tag, &nullOrUndefined);
+ masm.branchTestNull(Assembler::NotEqual, tag, &notNullOrUndefined);
+ masm.bind(&nullOrUndefined);
+ {
+ ScratchTagScopeRelease _(&tag);
+ masm.storeValue(Int32Value(0), jitArgAddr);
+ }
+ masm.jump(&next);
+ masm.bind(&notNullOrUndefined);
+
+ // For booleans, store the number value back. Other types (symbol,
+ // object, strings) go to the C++ call.
+ masm.branchTestBoolean(Assembler::NotEqual, tag, &oolCall);
+ masm.unboxBoolean(scratchV, scratchG);
+ // fallthrough:
+
+ masm.bind(&storeBack);
+ {
+ ScratchTagScopeRelease _(&tag);
+ masm.storeValue(JSVAL_TYPE_INT32, scratchG, jitArgAddr);
+ }
+ break;
+ }
+ case ValType::I64: {
+ ScratchTagScope tag(masm, scratchV);
+ masm.splitTagForTest(scratchV, tag);
+
+ // For BigInt inputs, just skip. Otherwise go to C++ for other
+ // types that require creating a new BigInt or erroring.
+ masm.branchTestBigInt(Assembler::NotEqual, tag, &oolCall);
+ masm.jump(&next);
+ break;
+ }
+ case ValType::F32:
+ case ValType::F64: {
+ // Note we can reuse the same code for f32/f64 here, since for the
+ // case of f32, the conversion of f64 to f32 will happen in the
+ // second loop.
+ ScratchTagScope tag(masm, scratchV);
+ masm.splitTagForTest(scratchV, tag);
+
+ // For double inputs, just skip.
+ masm.branchTestDouble(Assembler::Equal, tag, &next);
+
+ // For int32 inputs, convert and rebox.
+ Label storeBack, notInt32;
+ {
+ ScratchTagScopeRelease _(&tag);
+ masm.branchTestInt32(Assembler::NotEqual, scratchV, &notInt32);
+ masm.int32ValueToDouble(scratchV, scratchF);
+ masm.jump(&storeBack);
+ }
+ masm.bind(&notInt32);
+
+ // For undefined (missing argument), store NaN.
+ Label notUndefined;
+ masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
+ {
+ ScratchTagScopeRelease _(&tag);
+ masm.storeValue(DoubleValue(JS::GenericNaN()), jitArgAddr);
+ masm.jump(&next);
+ }
+ masm.bind(&notUndefined);
+
+ // +null is 0.
+ Label notNull;
+ masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
+ {
+ ScratchTagScopeRelease _(&tag);
+ masm.storeValue(DoubleValue(0.), jitArgAddr);
+ }
+ masm.jump(&next);
+ masm.bind(&notNull);
+
+ // For booleans, store the number value back. Other types (symbol,
+ // object, strings) go to the C++ call.
+ masm.branchTestBoolean(Assembler::NotEqual, tag, &oolCall);
+ masm.boolValueToDouble(scratchV, scratchF);
+ // fallthrough:
+
+ masm.bind(&storeBack);
+ {
+ ScratchTagScopeRelease _(&tag);
+ masm.boxDouble(scratchF, jitArgAddr);
+ }
+ break;
+ }
+ case ValType::Ref: {
+ switch (fe.funcType().args()[i].refTypeKind()) {
+ case RefType::Extern: {
+ ScratchTagScope tag(masm, scratchV);
+ masm.splitTagForTest(scratchV, tag);
+
+ // For object inputs, we handle object and null inline, everything
+ // else requires an actual box and we go out of line to allocate
+ // that.
+ masm.branchTestObject(Assembler::Equal, tag, &next);
+ masm.branchTestNull(Assembler::Equal, tag, &next);
+ masm.jump(&oolCall);
+ break;
+ }
+ case RefType::Func:
+ case RefType::Eq:
+ case RefType::TypeIndex: {
+ // Guarded against by temporarilyUnsupportedReftypeForEntry()
+ MOZ_CRASH("unexpected argument type when calling from the jit");
+ }
+ }
+ break;
+ }
+ case ValType::V128: {
+ // Guarded against by hasUnexposableArgOrRet()
+ MOZ_CRASH("unexpected argument type when calling from the jit");
+ }
+ default: {
+ MOZ_CRASH("unexpected argument type when calling from the jit");
+ }
+ }
+ masm.nopAlign(CodeAlignment);
+ masm.bind(&next);
+ }
+
+ Label rejoinBeforeCall;
+ masm.bind(&rejoinBeforeCall);
+
+ // Convert all the expected values to unboxed values on the stack.
+ ArgTypeVector args(fe.funcType());
+ for (WasmABIArgIter iter(args); !iter.done(); iter++) {
+ unsigned jitArgOffset =
+ frameSize + JitFrameLayout::offsetOfActualArg(iter.index());
+ Address argv(sp, jitArgOffset);
+ bool isStackArg = iter->kind() == ABIArg::Stack;
+ switch (iter.mirType()) {
+ case MIRType::Int32: {
+ Register target = isStackArg ? ScratchIonEntry : iter->gpr();
+ masm.unboxInt32(argv, target);
+ GenPrintIsize(DebugChannel::Function, masm, target);
+ if (isStackArg) {
+ masm.storePtr(target, Address(sp, iter->offsetFromArgBase()));
+ }
+ break;
+ }
+ case MIRType::Int64: {
+ // The coercion has provided a BigInt value by this point, which
+ // we need to convert to an I64 here.
+ if (isStackArg) {
+ Address dst(sp, iter->offsetFromArgBase());
+ Register src = scratchV.payloadOrValueReg();
+#if JS_BITS_PER_WORD == 64
+ Register64 scratch64(scratchG);
+#else
+ Register64 scratch64(scratchG, ABINonArgReg3);
+#endif
+ masm.unboxBigInt(argv, src);
+ masm.loadBigInt64(src, scratch64);
+ GenPrintI64(DebugChannel::Function, masm, scratch64);
+ masm.store64(scratch64, dst);
+ } else {
+ Register src = scratchG;
+ Register64 target = iter->gpr64();
+ masm.unboxBigInt(argv, src);
+ masm.loadBigInt64(src, target);
+ GenPrintI64(DebugChannel::Function, masm, target);
+ }
+ break;
+ }
+ case MIRType::Float32: {
+ FloatRegister target = isStackArg ? ABINonArgDoubleReg : iter->fpu();
+ masm.unboxDouble(argv, ABINonArgDoubleReg);
+ masm.convertDoubleToFloat32(ABINonArgDoubleReg, target);
+ GenPrintF32(DebugChannel::Function, masm, target.asSingle());
+ if (isStackArg) {
+ masm.storeFloat32(target, Address(sp, iter->offsetFromArgBase()));
+ }
+ break;
+ }
+ case MIRType::Double: {
+ FloatRegister target = isStackArg ? ABINonArgDoubleReg : iter->fpu();
+ masm.unboxDouble(argv, target);
+ GenPrintF64(DebugChannel::Function, masm, target);
+ if (isStackArg) {
+ masm.storeDouble(target, Address(sp, iter->offsetFromArgBase()));
+ }
+ break;
+ }
+ case MIRType::RefOrNull: {
+ Register target = isStackArg ? ScratchIonEntry : iter->gpr();
+ masm.unboxObjectOrNull(argv, target);
+ GenPrintPtr(DebugChannel::Function, masm, target);
+ if (isStackArg) {
+ masm.storePtr(target, Address(sp, iter->offsetFromArgBase()));
+ }
+ break;
+ }
+ default: {
+ MOZ_CRASH("unexpected input argument when calling from jit");
+ }
+ }
+ }
+
+ GenPrintf(DebugChannel::Function, masm, "\n");
+
+ // Setup wasm register state.
+ masm.loadWasmPinnedRegsFromTls();
+
+ masm.storePtr(WasmTlsReg,
+ Address(masm.getStackPointer(), WasmCalleeTLSOffsetBeforeCall));
+
+ // Call into the real function. Note that, due to the throw stub, fp, tls
+ // and pinned registers may be clobbered.
+ masm.assertStackAlignment(WasmStackAlignment);
+ CallFuncExport(masm, fe, funcPtr);
+ masm.assertStackAlignment(WasmStackAlignment);
+
+ // If fp is equal to the FailFP magic value (set by the throw stub), then
+ // report the exception to the JIT caller by jumping into the exception
+ // stub; otherwise the FP value is still set to the parent ion frame value.
+ Label exception;
+ masm.branchPtr(Assembler::Equal, FramePointer, Imm32(FailFP), &exception);
+
+ // Pop arguments.
+ masm.freeStack(frameSize);
+
+ GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; returns ",
+ fe.funcIndex());
+
+ // Store the return value in the JSReturnOperand.
+ const ValTypeVector& results = fe.funcType().results();
+ if (results.length() == 0) {
+ GenPrintf(DebugChannel::Function, masm, "void");
+ masm.moveValue(UndefinedValue(), JSReturnOperand);
+ } else {
+ MOZ_ASSERT(results.length() == 1, "multi-value return to JS unimplemented");
+ switch (results[0].kind()) {
+ case ValType::I32:
+ GenPrintIsize(DebugChannel::Function, masm, ReturnReg);
+ // No spectre.index_masking is required, as the value is boxed.
+ masm.boxNonDouble(JSVAL_TYPE_INT32, ReturnReg, JSReturnOperand);
+ break;
+ case ValType::F32: {
+ masm.canonicalizeFloat(ReturnFloat32Reg);
+ masm.convertFloat32ToDouble(ReturnFloat32Reg, ReturnDoubleReg);
+ GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
+ ScratchDoubleScope fpscratch(masm);
+ masm.boxDouble(ReturnDoubleReg, JSReturnOperand, fpscratch);
+ break;
+ }
+ case ValType::F64: {
+ masm.canonicalizeDouble(ReturnDoubleReg);
+ GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
+ ScratchDoubleScope fpscratch(masm);
+ masm.boxDouble(ReturnDoubleReg, JSReturnOperand, fpscratch);
+ break;
+ }
+ case ValType::I64: {
+ Label fail, done;
+ GenPrintI64(DebugChannel::Function, masm, ReturnReg64);
+ GenerateBigIntInitialization(masm, 0, ReturnReg64, scratchG, &fe,
+ &fail);
+ masm.boxNonDouble(JSVAL_TYPE_BIGINT, scratchG, JSReturnOperand);
+ masm.jump(&done);
+ masm.bind(&fail);
+ // Fixup the stack for the exception tail so that we can share it.
+ masm.reserveStack(frameSize);
+ masm.jump(&exception);
+ masm.bind(&done);
+ // Un-fixup the stack for the benefit of the assertion below.
+ masm.setFramePushed(0);
+ break;
+ }
+ case ValType::V128: {
+ MOZ_CRASH("unexpected return type when calling from ion to wasm");
+ }
+ case ValType::Ref: {
+ switch (results[0].refTypeKind()) {
+ case RefType::Func:
+ case RefType::Eq:
+ // For FuncRef and EqRef use the AnyRef path for now, since that
+ // will work.
+ case RefType::Extern:
+ // Per comment above, the call may have clobbered the Tls register,
+ // so reload since unboxing will need it.
+ GenerateJitEntryLoadTls(masm, /* frameSize */ 0);
+ UnboxAnyrefIntoValueReg(masm, WasmTlsReg, ReturnReg,
+ JSReturnOperand, WasmJitEntryReturnScratch);
+ break;
+ case RefType::TypeIndex:
+ MOZ_CRASH("unexpected return type when calling from ion to wasm");
+ }
+ break;
+ }
+ }
+ }
+
+ GenPrintf(DebugChannel::Function, masm, "\n");
+
+ MOZ_ASSERT(masm.framePushed() == 0);
+#ifdef JS_CODEGEN_ARM64
+ masm.loadPtr(Address(sp, 0), lr);
+ masm.addToStackPtr(Imm32(8));
+ masm.moveStackPtrTo(PseudoStackPointer);
+ masm.abiret();
+#else
+ masm.ret();
+#endif
+
+ // Generate an OOL call to the C++ conversion path.
+ if (fe.funcType().args().length()) {
+ masm.bind(&oolCall);
+ masm.setFramePushed(frameSize);
+
+ // Baseline and Ion call C++ runtime via BuiltinThunk with wasm abi, so to
+ // unify the BuiltinThunk's interface we call it here with wasm abi.
+ jit::WasmABIArgIter<MIRTypeVector> argsIter(coerceArgTypes);
+
+ // argument 0: function export index.
+ if (argsIter->kind() == ABIArg::GPR) {
+ masm.movePtr(ImmWord(funcExportIndex), argsIter->gpr());
+ } else {
+ masm.storePtr(ImmWord(funcExportIndex),
+ Address(sp, argsIter->offsetFromArgBase()));
+ }
+ argsIter++;
+
+ // argument 1: tlsData
+ if (argsIter->kind() == ABIArg::GPR) {
+ masm.movePtr(WasmTlsReg, argsIter->gpr());
+ } else {
+ masm.storePtr(WasmTlsReg, Address(sp, argsIter->offsetFromArgBase()));
+ }
+ argsIter++;
+
+ // argument 2: effective address of start of argv
+ Address argv(sp, masm.framePushed() + JitFrameLayout::offsetOfActualArg(0));
+ if (argsIter->kind() == ABIArg::GPR) {
+ masm.computeEffectiveAddress(argv, argsIter->gpr());
+ } else {
+ masm.computeEffectiveAddress(argv, ScratchIonEntry);
+ masm.storePtr(ScratchIonEntry,
+ Address(sp, argsIter->offsetFromArgBase()));
+ }
+ argsIter++;
+ MOZ_ASSERT(argsIter.done());
+
+ masm.assertStackAlignment(ABIStackAlignment);
+ CallSymbolicAddress(masm, !fe.hasEagerStubs(),
+ SymbolicAddress::CoerceInPlace_JitEntry);
+ masm.assertStackAlignment(ABIStackAlignment);
+
+ // No spectre.index_masking is required, as the return value is used as a
+ // bool.
+ masm.branchTest32(Assembler::NonZero, ReturnReg, ReturnReg,
+ &rejoinBeforeCall);
+ }
+
+ // Prepare to throw: reload WasmTlsReg from the frame.
+ masm.bind(&exception);
+ masm.setFramePushed(frameSize);
+ GenerateJitEntryThrow(masm, frameSize);
+
+ return FinishOffsets(masm, offsets);
+}
+
+void wasm::GenerateDirectCallFromJit(MacroAssembler& masm, const FuncExport& fe,
+ const Instance& inst,
+ const JitCallStackArgVector& stackArgs,
+ bool profilingEnabled, Register scratch,
+ uint32_t* callOffset) {
+ MOZ_ASSERT(!IsCompilingWasm());
+
+ size_t framePushedAtStart = masm.framePushed();
+
+ if (profilingEnabled) {
+ // FramePointer isn't volatile, manually preserve it because it will be
+ // clobbered below.
+ masm.Push(FramePointer);
+ } else {
+#ifdef DEBUG
+ // Ensure that the FramePointer is actually Ion-volatile. This might
+ // assert when bug 1426134 lands.
+ AllocatableRegisterSet set(RegisterSet::All());
+ TakeJitRegisters(/* profiling */ false, &set);
+ MOZ_ASSERT(set.has(FramePointer),
+ "replace the whole if branch by the then body when this fails");
+#endif
+ }
+
+ // Note, if code here pushes a reference value into the frame for its own
+ // purposes (and not just as an argument to the callee) then the frame must be
+ // traced in TraceJitExitFrame, see the case there for DirectWasmJitCall. The
+ // callee will trace values that are pushed as arguments, however.
+
+ // Push a special frame descriptor that indicates the frame size so we can
+ // directly iterate from the current JIT frame without an extra call.
+ *callOffset = masm.buildFakeExitFrame(scratch);
+ masm.loadJSContext(scratch);
+
+ masm.moveStackPtrTo(FramePointer);
+ masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::DirectWasmJitCall);
+ masm.orPtr(Imm32(ExitOrJitEntryFPTag), FramePointer);
+
+ // Move stack arguments to their final locations.
+ unsigned bytesNeeded = StackArgBytesForWasmABI(fe.funcType());
+ bytesNeeded = StackDecrementForCall(WasmStackAlignment, masm.framePushed(),
+ bytesNeeded);
+ if (bytesNeeded) {
+ masm.reserveStack(bytesNeeded);
+ }
+
+ GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; arguments ",
+ fe.funcIndex());
+
+ ArgTypeVector args(fe.funcType());
+ for (WasmABIArgIter iter(args); !iter.done(); iter++) {
+ MOZ_ASSERT_IF(iter->kind() == ABIArg::GPR, iter->gpr() != scratch);
+ MOZ_ASSERT_IF(iter->kind() == ABIArg::GPR, iter->gpr() != FramePointer);
+ if (iter->kind() != ABIArg::Stack) {
+ switch (iter.mirType()) {
+ case MIRType::Int32:
+ GenPrintIsize(DebugChannel::Function, masm, iter->gpr());
+ break;
+ case MIRType::Int64:
+ GenPrintI64(DebugChannel::Function, masm, iter->gpr64());
+ break;
+ case MIRType::Float32:
+ GenPrintF32(DebugChannel::Function, masm, iter->fpu());
+ break;
+ case MIRType::Double:
+ GenPrintF64(DebugChannel::Function, masm, iter->fpu());
+ break;
+ case MIRType::RefOrNull:
+ GenPrintPtr(DebugChannel::Function, masm, iter->gpr());
+ break;
+ case MIRType::StackResults:
+ MOZ_ASSERT(args.isSyntheticStackResultPointerArg(iter.index()));
+ GenPrintPtr(DebugChannel::Function, masm, iter->gpr());
+ break;
+ default:
+ MOZ_CRASH("ion to wasm fast path can only handle i32/f32/f64");
+ }
+ continue;
+ }
+
+ Address dst(masm.getStackPointer(), iter->offsetFromArgBase());
+
+ const JitCallStackArg& stackArg = stackArgs[iter.index()];
+ switch (stackArg.tag()) {
+ case JitCallStackArg::Tag::Imm32:
+ GenPrintf(DebugChannel::Function, masm, "%d ", stackArg.imm32());
+ masm.storePtr(ImmWord(stackArg.imm32()), dst);
+ break;
+ case JitCallStackArg::Tag::GPR:
+ MOZ_ASSERT(stackArg.gpr() != scratch);
+ MOZ_ASSERT(stackArg.gpr() != FramePointer);
+ GenPrintIsize(DebugChannel::Function, masm, stackArg.gpr());
+ masm.storePtr(stackArg.gpr(), dst);
+ break;
+ case JitCallStackArg::Tag::FPU:
+ switch (iter.mirType()) {
+ case MIRType::Double:
+ GenPrintF64(DebugChannel::Function, masm, stackArg.fpu());
+ masm.storeDouble(stackArg.fpu(), dst);
+ break;
+ case MIRType::Float32:
+ GenPrintF32(DebugChannel::Function, masm, stackArg.fpu());
+ masm.storeFloat32(stackArg.fpu(), dst);
+ break;
+ default:
+ MOZ_CRASH(
+ "unexpected MIR type for a float register in wasm fast call");
+ }
+ break;
+ case JitCallStackArg::Tag::Address: {
+ // The address offsets were valid *before* we pushed our frame.
+ Address src = stackArg.addr();
+ src.offset += masm.framePushed() - framePushedAtStart;
+ switch (iter.mirType()) {
+ case MIRType::Double: {
+ ScratchDoubleScope fpscratch(masm);
+ GenPrintF64(DebugChannel::Function, masm, fpscratch);
+ masm.loadDouble(src, fpscratch);
+ masm.storeDouble(fpscratch, dst);
+ break;
+ }
+ case MIRType::Float32: {
+ ScratchFloat32Scope fpscratch(masm);
+ masm.loadFloat32(src, fpscratch);
+ GenPrintF32(DebugChannel::Function, masm, fpscratch);
+ masm.storeFloat32(fpscratch, dst);
+ break;
+ }
+ case MIRType::Int32: {
+ masm.loadPtr(src, scratch);
+ GenPrintIsize(DebugChannel::Function, masm, scratch);
+ masm.storePtr(scratch, dst);
+ break;
+ }
+ case MIRType::RefOrNull: {
+ masm.loadPtr(src, scratch);
+ GenPrintPtr(DebugChannel::Function, masm, scratch);
+ masm.storePtr(scratch, dst);
+ break;
+ }
+ case MIRType::StackResults: {
+ MOZ_CRASH("multi-value in ion to wasm fast path unimplemented");
+ }
+ default: {
+ MOZ_CRASH("unexpected MIR type for a stack slot in wasm fast call");
+ }
+ }
+ break;
+ }
+ case JitCallStackArg::Tag::Undefined: {
+ MOZ_CRASH("can't happen because of arg.kind() check");
+ }
+ }
+ }
+
+ GenPrintf(DebugChannel::Function, masm, "\n");
+
+ // Load tls; from now on, WasmTlsReg is live.
+ masm.movePtr(ImmPtr(inst.tlsData()), WasmTlsReg);
+ masm.storePtr(WasmTlsReg,
+ Address(masm.getStackPointer(), WasmCalleeTLSOffsetBeforeCall));
+ masm.loadWasmPinnedRegsFromTls();
+
+ // Actual call.
+ const CodeTier& codeTier = inst.code().codeTier(inst.code().bestTier());
+ const MetadataTier& metadata = codeTier.metadata();
+ const CodeRange& codeRange = metadata.codeRange(fe);
+ void* callee = codeTier.segment().base() + codeRange.funcUncheckedCallEntry();
+
+ masm.assertStackAlignment(WasmStackAlignment);
+ masm.callJit(ImmPtr(callee));
+#ifdef JS_CODEGEN_ARM64
+ // WASM does not use the emulated stack pointer, so reinitialize it as it
+ // might be clobbered either by WASM or by any C++ calls within.
+ masm.initPseudoStackPtr();
+#endif
+ masm.assertStackAlignment(WasmStackAlignment);
+
+ masm.branchPtr(Assembler::Equal, FramePointer, Imm32(wasm::FailFP),
+ masm.exceptionLabel());
+
+ // Store the return value in the appropriate place.
+ GenPrintf(DebugChannel::Function, masm, "wasm-function[%d]; returns ",
+ fe.funcIndex());
+ const ValTypeVector& results = fe.funcType().results();
+ if (results.length() == 0) {
+ masm.moveValue(UndefinedValue(), JSReturnOperand);
+ GenPrintf(DebugChannel::Function, masm, "void");
+ } else {
+ MOZ_ASSERT(results.length() == 1, "multi-value return to JS unimplemented");
+ switch (results[0].kind()) {
+ case wasm::ValType::I32:
+ // The return value is in ReturnReg, which is what Ion expects.
+ GenPrintIsize(DebugChannel::Function, masm, ReturnReg);
+#if defined(JS_CODEGEN_X64)
+ if (JitOptions.spectreIndexMasking) {
+ masm.movl(ReturnReg, ReturnReg);
+ }
+#endif
+ break;
+ case wasm::ValType::I64:
+ // The return value is in ReturnReg64, which is what Ion expects.
+ GenPrintI64(DebugChannel::Function, masm, ReturnReg64);
+ break;
+ case wasm::ValType::F32:
+ masm.canonicalizeFloat(ReturnFloat32Reg);
+ GenPrintF32(DebugChannel::Function, masm, ReturnFloat32Reg);
+ break;
+ case wasm::ValType::F64:
+ masm.canonicalizeDouble(ReturnDoubleReg);
+ GenPrintF64(DebugChannel::Function, masm, ReturnDoubleReg);
+ break;
+ case wasm::ValType::Ref:
+ switch (results[0].refTypeKind()) {
+ case wasm::RefType::Func:
+ case wasm::RefType::Eq:
+ // For FuncRef and EqRef, use the AnyRef path for now, since that
+ // will work.
+ case wasm::RefType::Extern:
+ // The call to wasm above preserves the WasmTlsReg, we don't need to
+ // reload it here.
+ UnboxAnyrefIntoValueReg(masm, WasmTlsReg, ReturnReg,
+ JSReturnOperand, WasmJitEntryReturnScratch);
+ break;
+ case wasm::RefType::TypeIndex:
+ MOZ_CRASH("unexpected return type when calling from ion to wasm");
+ }
+ break;
+ case wasm::ValType::V128:
+ MOZ_CRASH("unexpected return type when calling from ion to wasm");
+ }
+ }
+
+ GenPrintf(DebugChannel::Function, masm, "\n");
+
+ // Free args + frame descriptor.
+ masm.leaveExitFrame(bytesNeeded + ExitFrameLayout::Size());
+
+ // If we pushed it, free FramePointer.
+ if (profilingEnabled) {
+ masm.Pop(FramePointer);
+ }
+
+ MOZ_ASSERT(framePushedAtStart == masm.framePushed());
+}
+
+static void StackCopy(MacroAssembler& masm, MIRType type, Register scratch,
+ Address src, Address dst) {
+ if (type == MIRType::Int32) {
+ masm.load32(src, scratch);
+ GenPrintIsize(DebugChannel::Import, masm, scratch);
+ masm.store32(scratch, dst);
+ } else if (type == MIRType::Int64) {
+#if JS_BITS_PER_WORD == 32
+ GenPrintf(DebugChannel::Import, masm, "i64(");
+ masm.load32(LowWord(src), scratch);
+ GenPrintIsize(DebugChannel::Import, masm, scratch);
+ masm.store32(scratch, LowWord(dst));
+ masm.load32(HighWord(src), scratch);
+ GenPrintIsize(DebugChannel::Import, masm, scratch);
+ masm.store32(scratch, HighWord(dst));
+ GenPrintf(DebugChannel::Import, masm, ") ");
+#else
+ Register64 scratch64(scratch);
+ masm.load64(src, scratch64);
+ GenPrintIsize(DebugChannel::Import, masm, scratch);
+ masm.store64(scratch64, dst);
+#endif
+ } else if (type == MIRType::RefOrNull || type == MIRType::Pointer ||
+ type == MIRType::StackResults) {
+ masm.loadPtr(src, scratch);
+ GenPrintPtr(DebugChannel::Import, masm, scratch);
+ masm.storePtr(scratch, dst);
+ } else if (type == MIRType::Float32) {
+ ScratchFloat32Scope fpscratch(masm);
+ masm.loadFloat32(src, fpscratch);
+ GenPrintF32(DebugChannel::Import, masm, fpscratch);
+ masm.storeFloat32(fpscratch, dst);
+ } else if (type == MIRType::Double) {
+ ScratchDoubleScope fpscratch(masm);
+ masm.loadDouble(src, fpscratch);
+ GenPrintF64(DebugChannel::Import, masm, fpscratch);
+ masm.storeDouble(fpscratch, dst);
+#ifdef ENABLE_WASM_SIMD
+ } else if (type == MIRType::Simd128) {
+ ScratchSimd128Scope fpscratch(masm);
+ masm.loadUnalignedSimd128(src, fpscratch);
+ GenPrintV128(DebugChannel::Import, masm, fpscratch);
+ masm.storeUnalignedSimd128(fpscratch, dst);
+#endif
+ } else {
+ MOZ_CRASH("StackCopy: unexpected type");
+ }
+}
+
+using ToValue = bool;
+
+// Note, when toValue is true then this may destroy the values in incoming
+// argument registers as a result of Spectre mitigation.
+static void FillArgumentArrayForExit(
+ MacroAssembler& masm, Register tls, unsigned funcImportIndex,
+ const FuncType& funcType, unsigned argOffset,
+ unsigned offsetFromFPToCallerStackArgs, Register scratch, Register scratch2,
+ Register scratch3, ToValue toValue, Label* throwLabel) {
+ MOZ_ASSERT(scratch != scratch2);
+ MOZ_ASSERT(scratch != scratch3);
+ MOZ_ASSERT(scratch2 != scratch3);
+
+ // This loop does not root the values that are being constructed in
+ // for the arguments. Allocations that are generated by code either
+ // in the loop or called from it should be NoGC allocations.
+ GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; arguments ",
+ funcImportIndex);
+
+ ArgTypeVector args(funcType);
+ for (ABIArgIter i(args); !i.done(); i++) {
+ Address dst(masm.getStackPointer(), argOffset + i.index() * sizeof(Value));
+
+ MIRType type = i.mirType();
+ MOZ_ASSERT(args.isSyntheticStackResultPointerArg(i.index()) ==
+ (type == MIRType::StackResults));
+ switch (i->kind()) {
+ case ABIArg::GPR:
+ if (type == MIRType::Int32) {
+ GenPrintIsize(DebugChannel::Import, masm, i->gpr());
+ if (toValue) {
+ masm.storeValue(JSVAL_TYPE_INT32, i->gpr(), dst);
+ } else {
+ masm.store32(i->gpr(), dst);
+ }
+ } else if (type == MIRType::Int64) {
+ GenPrintI64(DebugChannel::Import, masm, i->gpr64());
+
+ if (toValue) {
+ GenerateBigIntInitialization(masm, offsetFromFPToCallerStackArgs,
+ i->gpr64(), scratch, nullptr,
+ throwLabel);
+ masm.storeValue(JSVAL_TYPE_BIGINT, scratch, dst);
+ } else {
+ masm.store64(i->gpr64(), dst);
+ }
+ } else if (type == MIRType::RefOrNull) {
+ if (toValue) {
+ // This works also for FuncRef because it is distinguishable from
+ // a boxed AnyRef.
+ masm.movePtr(i->gpr(), scratch2);
+ UnboxAnyrefIntoValue(masm, tls, scratch2, dst, scratch);
+ } else {
+ GenPrintPtr(DebugChannel::Import, masm, i->gpr());
+ masm.storePtr(i->gpr(), dst);
+ }
+ } else if (type == MIRType::StackResults) {
+ MOZ_ASSERT(!toValue, "Multi-result exit to JIT unimplemented");
+ GenPrintPtr(DebugChannel::Import, masm, i->gpr());
+ masm.storePtr(i->gpr(), dst);
+ } else {
+ MOZ_CRASH("FillArgumentArrayForExit, ABIArg::GPR: unexpected type");
+ }
+ break;
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ case ABIArg::GPR_PAIR:
+ if (type == MIRType::Int64) {
+ GenPrintI64(DebugChannel::Import, masm, i->gpr64());
+
+ if (toValue) {
+ GenerateBigIntInitialization(masm, offsetFromFPToCallerStackArgs,
+ i->gpr64(), scratch, nullptr,
+ throwLabel);
+ masm.storeValue(JSVAL_TYPE_BIGINT, scratch, dst);
+ } else {
+ masm.store64(i->gpr64(), dst);
+ }
+ } else {
+ MOZ_CRASH("wasm uses hardfp for function calls.");
+ }
+ break;
+#endif
+ case ABIArg::FPU: {
+ FloatRegister srcReg = i->fpu();
+ if (type == MIRType::Double) {
+ if (toValue) {
+ // Preserve the NaN pattern in the input.
+ ScratchDoubleScope fpscratch(masm);
+ masm.moveDouble(srcReg, fpscratch);
+ masm.canonicalizeDouble(fpscratch);
+ GenPrintF64(DebugChannel::Import, masm, fpscratch);
+ masm.boxDouble(fpscratch, dst);
+ } else {
+ GenPrintF64(DebugChannel::Import, masm, srcReg);
+ masm.storeDouble(srcReg, dst);
+ }
+ } else if (type == MIRType::Float32) {
+ if (toValue) {
+ // JS::Values can't store Float32, so convert to a Double.
+ ScratchDoubleScope fpscratch(masm);
+ masm.convertFloat32ToDouble(srcReg, fpscratch);
+ masm.canonicalizeDouble(fpscratch);
+ GenPrintF64(DebugChannel::Import, masm, fpscratch);
+ masm.boxDouble(fpscratch, dst);
+ } else {
+ // Preserve the NaN pattern in the input.
+ GenPrintF32(DebugChannel::Import, masm, srcReg);
+ masm.storeFloat32(srcReg, dst);
+ }
+ } else if (type == MIRType::Simd128) {
+ // The value should never escape; the call will be stopped later as
+ // the import is being called. But we should generate something sane
+ // here for the boxed case since a debugger or the stack walker may
+ // observe something.
+ ScratchDoubleScope dscratch(masm);
+ masm.loadConstantDouble(0, dscratch);
+ GenPrintF64(DebugChannel::Import, masm, dscratch);
+ if (toValue) {
+ masm.boxDouble(dscratch, dst);
+ } else {
+ masm.storeDouble(dscratch, dst);
+ }
+ } else {
+ MOZ_CRASH("Unknown MIRType in wasm exit stub");
+ }
+ break;
+ }
+ case ABIArg::Stack: {
+ Address src(FramePointer,
+ offsetFromFPToCallerStackArgs + i->offsetFromArgBase());
+ if (toValue) {
+ if (type == MIRType::Int32) {
+ masm.load32(src, scratch);
+ GenPrintIsize(DebugChannel::Import, masm, scratch);
+ masm.storeValue(JSVAL_TYPE_INT32, scratch, dst);
+ } else if (type == MIRType::Int64) {
+#if JS_BITS_PER_WORD == 64
+ Register64 scratch64(scratch2);
+#else
+ Register64 scratch64(scratch2, scratch3);
+#endif
+ masm.load64(src, scratch64);
+ GenPrintI64(DebugChannel::Import, masm, scratch64);
+ GenerateBigIntInitialization(masm, sizeof(Frame), scratch64,
+ scratch, nullptr, throwLabel);
+ masm.storeValue(JSVAL_TYPE_BIGINT, scratch, dst);
+ } else if (type == MIRType::RefOrNull) {
+ // This works also for FuncRef because it is distinguishable from a
+ // boxed AnyRef.
+ masm.loadPtr(src, scratch);
+ UnboxAnyrefIntoValue(masm, tls, scratch, dst, scratch2);
+ } else if (IsFloatingPointType(type)) {
+ ScratchDoubleScope dscratch(masm);
+ FloatRegister fscratch = dscratch.asSingle();
+ if (type == MIRType::Float32) {
+ masm.loadFloat32(src, fscratch);
+ masm.convertFloat32ToDouble(fscratch, dscratch);
+ } else {
+ masm.loadDouble(src, dscratch);
+ }
+ masm.canonicalizeDouble(dscratch);
+ GenPrintF64(DebugChannel::Import, masm, dscratch);
+ masm.boxDouble(dscratch, dst);
+ } else if (type == MIRType::Simd128) {
+ // The value should never escape; the call will be stopped later as
+ // the import is being called. But we should generate something
+ // sane here for the boxed case since a debugger or the stack walker
+ // may observe something.
+ ScratchDoubleScope dscratch(masm);
+ masm.loadConstantDouble(0, dscratch);
+ GenPrintF64(DebugChannel::Import, masm, dscratch);
+ masm.boxDouble(dscratch, dst);
+ } else {
+ MOZ_CRASH(
+ "FillArgumentArrayForExit, ABIArg::Stack: unexpected type");
+ }
+ } else {
+ if (type == MIRType::Simd128) {
+ // As above. StackCopy does not know this trick.
+ ScratchDoubleScope dscratch(masm);
+ masm.loadConstantDouble(0, dscratch);
+ GenPrintF64(DebugChannel::Import, masm, dscratch);
+ masm.storeDouble(dscratch, dst);
+ } else {
+ StackCopy(masm, type, scratch, src, dst);
+ }
+ }
+ break;
+ }
+ case ABIArg::Uninitialized:
+ MOZ_CRASH("Uninitialized ABIArg kind");
+ }
+ }
+ GenPrintf(DebugChannel::Import, masm, "\n");
+}
+
+// Generate a wrapper function with the standard intra-wasm call ABI which
+// simply calls an import. This wrapper function allows any import to be treated
+// like a normal wasm function for the purposes of exports and table calls. In
+// particular, the wrapper function provides:
+// - a table entry, so JS imports can be put into tables
+// - normal entries, so that, if the import is re-exported, an entry stub can
+// be generated and called without any special cases
+static bool GenerateImportFunction(jit::MacroAssembler& masm,
+ const FuncImport& fi, TypeIdDesc funcTypeId,
+ FuncOffsets* offsets) {
+ AssertExpectedSP(masm);
+
+ GenerateFunctionPrologue(masm, funcTypeId, Nothing(), offsets);
+
+ MOZ_ASSERT(masm.framePushed() == 0);
+ const unsigned sizeOfTlsSlot = sizeof(void*);
+ unsigned framePushed = StackDecrementForCall(
+ WasmStackAlignment,
+ sizeof(Frame), // pushed by prologue
+ StackArgBytesForWasmABI(fi.funcType()) + sizeOfTlsSlot);
+ masm.wasmReserveStackChecked(framePushed, BytecodeOffset(0));
+ MOZ_ASSERT(masm.framePushed() == framePushed);
+
+ masm.storePtr(WasmTlsReg,
+ Address(masm.getStackPointer(), framePushed - sizeOfTlsSlot));
+
+ // The argument register state is already setup by our caller. We just need
+ // to be sure not to clobber it before the call.
+ Register scratch = ABINonArgReg0;
+
+ // Copy our frame's stack arguments to the callee frame's stack argument.
+ unsigned offsetFromFPToCallerStackArgs = sizeof(Frame);
+ ArgTypeVector args(fi.funcType());
+ for (WasmABIArgIter i(args); !i.done(); i++) {
+ if (i->kind() != ABIArg::Stack) {
+ continue;
+ }
+
+ Address src(FramePointer,
+ offsetFromFPToCallerStackArgs + i->offsetFromArgBase());
+ Address dst(masm.getStackPointer(), i->offsetFromArgBase());
+ GenPrintf(DebugChannel::Import, masm,
+ "calling exotic import function with arguments: ");
+ StackCopy(masm, i.mirType(), scratch, src, dst);
+ GenPrintf(DebugChannel::Import, masm, "\n");
+ }
+
+ // Call the import exit stub.
+ CallSiteDesc desc(CallSiteDesc::Dynamic);
+ MoveSPForJitABI(masm);
+ masm.wasmCallImport(desc, CalleeDesc::import(fi.tlsDataOffset()));
+
+ // Restore the TLS register and pinned regs, per wasm function ABI.
+ masm.loadPtr(Address(masm.getStackPointer(), framePushed - sizeOfTlsSlot),
+ WasmTlsReg);
+ masm.loadWasmPinnedRegsFromTls();
+
+ // Restore cx->realm.
+ masm.switchToWasmTlsRealm(ABINonArgReturnReg0, ABINonArgReturnReg1);
+
+ GenerateFunctionEpilogue(masm, framePushed, offsets);
+ return FinishOffsets(masm, offsets);
+}
+
+static const unsigned STUBS_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
+
+bool wasm::GenerateImportFunctions(const ModuleEnvironment& env,
+ const FuncImportVector& imports,
+ CompiledCode* code) {
+ LifoAlloc lifo(STUBS_LIFO_DEFAULT_CHUNK_SIZE);
+ TempAllocator alloc(&lifo);
+ WasmMacroAssembler masm(alloc, env);
+
+ for (uint32_t funcIndex = 0; funcIndex < imports.length(); funcIndex++) {
+ const FuncImport& fi = imports[funcIndex];
+
+ FuncOffsets offsets;
+ if (!GenerateImportFunction(masm, fi, *env.funcs[funcIndex].typeId,
+ &offsets)) {
+ return false;
+ }
+ if (!code->codeRanges.emplaceBack(funcIndex, /* bytecodeOffset = */ 0,
+ offsets)) {
+ return false;
+ }
+ }
+
+ masm.finish();
+ if (masm.oom()) {
+ return false;
+ }
+
+ return code->swap(masm);
+}
+
+// Generate a stub that is called via the internal ABI derived from the
+// signature of the import and calls into an appropriate callImport C++
+// function, having boxed all the ABI arguments into a homogeneous Value array.
+static bool GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi,
+ uint32_t funcImportIndex,
+ Label* throwLabel,
+ CallableOffsets* offsets) {
+ AssertExpectedSP(masm);
+ masm.setFramePushed(0);
+
+ // Argument types for Instance::callImport_*:
+ static const MIRType typeArray[] = {MIRType::Pointer, // Instance*
+ MIRType::Pointer, // funcImportIndex
+ MIRType::Int32, // argc
+ MIRType::Pointer}; // argv
+ MIRTypeVector invokeArgTypes;
+ MOZ_ALWAYS_TRUE(invokeArgTypes.append(typeArray, std::size(typeArray)));
+
+ // At the point of the call, the stack layout shall be (sp grows to the left):
+ // | stack args | padding | argv[] | padding | retaddr | caller stack args |
+ // The padding between stack args and argv ensures that argv is aligned. The
+ // padding between argv and retaddr ensures that sp is aligned.
+ unsigned argOffset =
+ AlignBytes(StackArgBytesForNativeABI(invokeArgTypes), sizeof(double));
+ // The abiArgCount includes a stack result pointer argument if needed.
+ unsigned abiArgCount = ArgTypeVector(fi.funcType()).lengthWithStackResults();
+ unsigned argBytes = std::max<size_t>(1, abiArgCount) * sizeof(Value);
+ unsigned framePushed =
+ StackDecrementForCall(ABIStackAlignment,
+ sizeof(Frame), // pushed by prologue
+ argOffset + argBytes);
+
+ GenerateExitPrologue(masm, framePushed, ExitReason::Fixed::ImportInterp,
+ offsets);
+
+ // Fill the argument array.
+ unsigned offsetFromFPToCallerStackArgs = sizeof(FrameWithTls);
+ Register scratch = ABINonArgReturnReg0;
+ Register scratch2 = ABINonArgReturnReg1;
+ // The scratch3 reg does not need to be non-volatile, but has to be
+ // distinct from scratch & scratch2.
+ Register scratch3 = ABINonVolatileReg;
+ FillArgumentArrayForExit(masm, WasmTlsReg, funcImportIndex, fi.funcType(),
+ argOffset, offsetFromFPToCallerStackArgs, scratch,
+ scratch2, scratch3, ToValue(false), throwLabel);
+
+ // Prepare the arguments for the call to Instance::callImport_*.
+ ABIArgMIRTypeIter i(invokeArgTypes);
+
+ // argument 0: Instance*
+ Address instancePtr(WasmTlsReg, offsetof(TlsData, instance));
+ if (i->kind() == ABIArg::GPR) {
+ masm.loadPtr(instancePtr, i->gpr());
+ } else {
+ masm.loadPtr(instancePtr, scratch);
+ masm.storePtr(scratch,
+ Address(masm.getStackPointer(), i->offsetFromArgBase()));
+ }
+ i++;
+
+ // argument 1: funcImportIndex
+ if (i->kind() == ABIArg::GPR) {
+ masm.mov(ImmWord(funcImportIndex), i->gpr());
+ } else {
+ masm.store32(Imm32(funcImportIndex),
+ Address(masm.getStackPointer(), i->offsetFromArgBase()));
+ }
+ i++;
+
+ // argument 2: argc
+ unsigned argc = abiArgCount;
+ if (i->kind() == ABIArg::GPR) {
+ masm.mov(ImmWord(argc), i->gpr());
+ } else {
+ masm.store32(Imm32(argc),
+ Address(masm.getStackPointer(), i->offsetFromArgBase()));
+ }
+ i++;
+
+ // argument 3: argv
+ Address argv(masm.getStackPointer(), argOffset);
+ if (i->kind() == ABIArg::GPR) {
+ masm.computeEffectiveAddress(argv, i->gpr());
+ } else {
+ masm.computeEffectiveAddress(argv, scratch);
+ masm.storePtr(scratch,
+ Address(masm.getStackPointer(), i->offsetFromArgBase()));
+ }
+ i++;
+ MOZ_ASSERT(i.done());
+
+ // Make the call, test whether it succeeded, and extract the return value.
+ AssertStackAlignment(masm, ABIStackAlignment);
+ masm.call(SymbolicAddress::CallImport_General);
+ masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+
+ ResultType resultType = ResultType::Vector(fi.funcType().results());
+ ValType registerResultType;
+ for (ABIResultIter iter(resultType); !iter.done(); iter.next()) {
+ if (iter.cur().inRegister()) {
+ MOZ_ASSERT(!registerResultType.isValid());
+ registerResultType = iter.cur().type();
+ }
+ }
+ if (!registerResultType.isValid()) {
+ GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
+ funcImportIndex);
+ GenPrintf(DebugChannel::Import, masm, "void");
+ } else {
+ switch (registerResultType.kind()) {
+ case ValType::I32:
+ masm.load32(argv, ReturnReg);
+ // No spectre.index_masking is required, as we know the value comes from
+ // an i32 load.
+ GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
+ funcImportIndex);
+ GenPrintIsize(DebugChannel::Import, masm, ReturnReg);
+ break;
+ case ValType::I64:
+ masm.load64(argv, ReturnReg64);
+ GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
+ funcImportIndex);
+ GenPrintI64(DebugChannel::Import, masm, ReturnReg64);
+ break;
+ case ValType::V128:
+ // Note, CallImport_V128 currently always throws, so we should never
+ // reach this point.
+ masm.breakpoint();
+ break;
+ case ValType::F32:
+ masm.loadFloat32(argv, ReturnFloat32Reg);
+ GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
+ funcImportIndex);
+ GenPrintF32(DebugChannel::Import, masm, ReturnFloat32Reg);
+ break;
+ case ValType::F64:
+ masm.loadDouble(argv, ReturnDoubleReg);
+ GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
+ funcImportIndex);
+ GenPrintF64(DebugChannel::Import, masm, ReturnDoubleReg);
+ break;
+ case ValType::Ref:
+ switch (registerResultType.refTypeKind()) {
+ case RefType::Func:
+ masm.loadPtr(argv, ReturnReg);
+ GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
+ funcImportIndex);
+ GenPrintPtr(DebugChannel::Import, masm, ReturnReg);
+ break;
+ case RefType::Extern:
+ case RefType::Eq:
+ masm.loadPtr(argv, ReturnReg);
+ GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
+ funcImportIndex);
+ GenPrintPtr(DebugChannel::Import, masm, ReturnReg);
+ break;
+ case RefType::TypeIndex:
+ MOZ_CRASH("No Ref support here yet");
+ }
+ break;
+ }
+ }
+
+ GenPrintf(DebugChannel::Import, masm, "\n");
+
+ // The native ABI preserves the TLS, heap and global registers since they
+ // are non-volatile.
+ MOZ_ASSERT(NonVolatileRegs.has(WasmTlsReg));
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || \
+ defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || \
+ defined(JS_CODEGEN_MIPS64)
+ MOZ_ASSERT(NonVolatileRegs.has(HeapReg));
+#endif
+
+ GenerateExitEpilogue(masm, framePushed, ExitReason::Fixed::ImportInterp,
+ offsets);
+
+ return FinishOffsets(masm, offsets);
+}
+
+// Generate a stub that is called via the internal ABI derived from the
+// signature of the import and calls into a compatible JIT function,
+// having boxed all the ABI arguments into the JIT stack frame layout.
+static bool GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi,
+ unsigned funcImportIndex, Label* throwLabel,
+ JitExitOffsets* offsets) {
+ AssertExpectedSP(masm);
+ masm.setFramePushed(0);
+
+ // JIT calls use the following stack layout (sp grows to the left):
+ // | WasmToJSJitFrameLayout | this | arg1..N | saved Tls |
+ // Unlike most ABIs, the JIT ABI requires that sp be JitStackAlignment-
+ // aligned *after* pushing the return address.
+ static_assert(WasmStackAlignment >= JitStackAlignment, "subsumes");
+ const unsigned sizeOfTlsSlot = sizeof(void*);
+ const unsigned sizeOfRetAddr = sizeof(void*);
+ const unsigned sizeOfPreFrame =
+ WasmToJSJitFrameLayout::Size() - sizeOfRetAddr;
+ const unsigned sizeOfThisAndArgs =
+ (1 + fi.funcType().args().length()) * sizeof(Value);
+ const unsigned totalJitFrameBytes =
+ sizeOfRetAddr + sizeOfPreFrame + sizeOfThisAndArgs + sizeOfTlsSlot;
+ const unsigned jitFramePushed =
+ StackDecrementForCall(JitStackAlignment,
+ sizeof(Frame), // pushed by prologue
+ totalJitFrameBytes) -
+ sizeOfRetAddr;
+ const unsigned sizeOfThisAndArgsAndPadding = jitFramePushed - sizeOfPreFrame;
+
+ // On ARM64 we must align the SP to a 16-byte boundary.
+#ifdef JS_CODEGEN_ARM64
+ const unsigned frameAlignExtra = sizeof(void*);
+#else
+ const unsigned frameAlignExtra = 0;
+#endif
+
+ GenerateJitExitPrologue(masm, jitFramePushed + frameAlignExtra, offsets);
+
+ // 1. Descriptor.
+ size_t argOffset = frameAlignExtra;
+ uint32_t descriptor =
+ MakeFrameDescriptor(sizeOfThisAndArgsAndPadding, FrameType::WasmToJSJit,
+ WasmToJSJitFrameLayout::Size());
+ masm.storePtr(ImmWord(uintptr_t(descriptor)),
+ Address(masm.getStackPointer(), argOffset));
+ argOffset += sizeof(size_t);
+
+ // 2. Callee, part 1 -- need the callee register for argument filling, so
+ // record offset here and set up callee later.
+ size_t calleeArgOffset = argOffset;
+ argOffset += sizeof(size_t);
+
+ // 3. Argc.
+ unsigned argc = fi.funcType().args().length();
+ masm.storePtr(ImmWord(uintptr_t(argc)),
+ Address(masm.getStackPointer(), argOffset));
+ argOffset += sizeof(size_t);
+ MOZ_ASSERT(argOffset == sizeOfPreFrame + frameAlignExtra);
+
+ // 4. |this| value.
+ masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), argOffset));
+ argOffset += sizeof(Value);
+
+ // 5. Fill the arguments.
+ const uint32_t offsetFromFPToCallerStackArgs = sizeof(FrameWithTls);
+ Register scratch = ABINonArgReturnReg1; // Repeatedly clobbered
+ Register scratch2 = ABINonArgReturnReg0; // Reused as callee below
+ // The scratch3 reg does not need to be non-volatile, but has to be
+ // distinct from scratch & scratch2.
+ Register scratch3 = ABINonVolatileReg;
+ FillArgumentArrayForExit(masm, WasmTlsReg, funcImportIndex, fi.funcType(),
+ argOffset, offsetFromFPToCallerStackArgs, scratch,
+ scratch2, scratch3, ToValue(true), throwLabel);
+ argOffset += fi.funcType().args().length() * sizeof(Value);
+ MOZ_ASSERT(argOffset == sizeOfThisAndArgs + sizeOfPreFrame + frameAlignExtra);
+
+ // Preserve Tls because the JIT callee clobbers it.
+ const size_t savedTlsOffset = argOffset;
+ masm.storePtr(WasmTlsReg, Address(masm.getStackPointer(), savedTlsOffset));
+
+ // 2. Callee, part 2 -- now that the register is free, set up the callee.
+ Register callee = ABINonArgReturnReg0; // Live until call
+
+ // 2.1. Get JSFunction callee.
+ masm.loadWasmGlobalPtr(fi.tlsDataOffset() + offsetof(FuncImportTls, fun),
+ callee);
+
+ // 2.2. Save callee.
+ masm.storePtr(callee, Address(masm.getStackPointer(), calleeArgOffset));
+
+ // 6. Check if we need to rectify arguments.
+ masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), scratch);
+
+ Label rectify;
+ masm.branch32(Assembler::Above, scratch, Imm32(fi.funcType().args().length()),
+ &rectify);
+
+ // 7. If we haven't rectified arguments, load callee executable entry point.
+
+ masm.loadJitCodeRaw(callee, callee);
+
+ Label rejoinBeforeCall;
+ masm.bind(&rejoinBeforeCall);
+
+ AssertStackAlignment(masm, JitStackAlignment,
+ sizeOfRetAddr + frameAlignExtra);
+#ifdef JS_CODEGEN_ARM64
+ // Conform to JIT ABI.
+ masm.addToStackPtr(Imm32(8));
+#endif
+ MoveSPForJitABI(masm);
+ masm.callJitNoProfiler(callee);
+#ifdef JS_CODEGEN_ARM64
+ // Conform to platform conventions - align the SP.
+ masm.subFromStackPtr(Imm32(8));
+#endif
+
+ // Note that there might be a GC thing in the JSReturnOperand now.
+ // In all the code paths from here:
+ // - either the value is unboxed because it was a primitive and we don't
+ // need to worry about rooting anymore.
+ // - or the value needs to be rooted, but nothing can cause a GC between
+ // here and CoerceInPlace, which roots before coercing to a primitive.
+
+ // The JIT callee clobbers all registers, including WasmTlsReg and
+ // FramePointer, so restore those here. During this sequence of
+ // instructions, FP can't be trusted by the profiling frame iterator.
+ offsets->untrustedFPStart = masm.currentOffset();
+ AssertStackAlignment(masm, JitStackAlignment,
+ sizeOfRetAddr + frameAlignExtra);
+
+ masm.loadPtr(Address(masm.getStackPointer(), savedTlsOffset), WasmTlsReg);
+ masm.moveStackPtrTo(FramePointer);
+ masm.addPtr(Imm32(masm.framePushed()), FramePointer);
+ offsets->untrustedFPEnd = masm.currentOffset();
+
+ // As explained above, the frame was aligned for the JIT ABI such that
+ // (sp + sizeof(void*)) % JitStackAlignment == 0
+ // But now we possibly want to call one of several different C++ functions,
+ // so subtract the sizeof(void*) so that sp is aligned for an ABI call.
+ static_assert(ABIStackAlignment <= JitStackAlignment, "subsumes");
+#ifdef JS_CODEGEN_ARM64
+ // We've already allocated the extra space for frame alignment.
+ static_assert(sizeOfRetAddr == frameAlignExtra, "ARM64 SP alignment");
+#else
+ masm.reserveStack(sizeOfRetAddr);
+#endif
+ unsigned nativeFramePushed = masm.framePushed();
+ AssertStackAlignment(masm, ABIStackAlignment);
+
+#ifdef DEBUG
+ {
+ Label ok;
+ masm.branchTestMagic(Assembler::NotEqual, JSReturnOperand, &ok);
+ masm.breakpoint();
+ masm.bind(&ok);
+ }
+#endif
+
+ GenPrintf(DebugChannel::Import, masm, "wasm-import[%u]; returns ",
+ funcImportIndex);
+
+ Label oolConvert;
+ const ValTypeVector& results = fi.funcType().results();
+ if (results.length() == 0) {
+ GenPrintf(DebugChannel::Import, masm, "void");
+ } else {
+ MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
+ switch (results[0].kind()) {
+ case ValType::I32:
+ // No spectre.index_masking required, as the return value does not come
+ // to us in ReturnReg.
+ masm.truncateValueToInt32(JSReturnOperand, ReturnDoubleReg, ReturnReg,
+ &oolConvert);
+ GenPrintIsize(DebugChannel::Import, masm, ReturnReg);
+ break;
+ case ValType::I64:
+ // No fastpath for now, go immediately to ool case
+ masm.jump(&oolConvert);
+ break;
+ case ValType::V128:
+ // Unreachable as callImport should not call the stub.
+ masm.breakpoint();
+ break;
+ case ValType::F32:
+ masm.convertValueToFloat(JSReturnOperand, ReturnFloat32Reg,
+ &oolConvert);
+ GenPrintF32(DebugChannel::Import, masm, ReturnFloat32Reg);
+ break;
+ case ValType::F64:
+ masm.convertValueToDouble(JSReturnOperand, ReturnDoubleReg,
+ &oolConvert);
+ GenPrintF64(DebugChannel::Import, masm, ReturnDoubleReg);
+ break;
+ case ValType::Ref:
+ switch (results[0].refTypeKind()) {
+ case RefType::Extern:
+ BoxValueIntoAnyref(masm, JSReturnOperand, ReturnReg, &oolConvert);
+ GenPrintPtr(DebugChannel::Import, masm, ReturnReg);
+ break;
+ case RefType::Func:
+ case RefType::Eq:
+ case RefType::TypeIndex:
+ MOZ_CRASH("typed reference returned by import (jit exit) NYI");
+ }
+ break;
+ }
+ }
+
+ GenPrintf(DebugChannel::Import, masm, "\n");
+
+ Label done;
+ masm.bind(&done);
+
+ GenerateJitExitEpilogue(masm, masm.framePushed(), offsets);
+
+ {
+ // Call the arguments rectifier.
+ masm.bind(&rectify);
+ masm.loadPtr(Address(WasmTlsReg, offsetof(TlsData, instance)), callee);
+ masm.loadPtr(Address(callee, Instance::offsetOfJSJitArgsRectifier()),
+ callee);
+ masm.jump(&rejoinBeforeCall);
+ }
+
+ if (oolConvert.used()) {
+ masm.bind(&oolConvert);
+ masm.setFramePushed(nativeFramePushed);
+
+ // Coercion calls use the following stack layout (sp grows to the left):
+ // | args | padding | Value argv[1] | padding | exit Frame |
+ MIRTypeVector coerceArgTypes;
+ MOZ_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
+ unsigned offsetToCoerceArgv =
+ AlignBytes(StackArgBytesForNativeABI(coerceArgTypes), sizeof(Value));
+ MOZ_ASSERT(nativeFramePushed >= offsetToCoerceArgv + sizeof(Value));
+ AssertStackAlignment(masm, ABIStackAlignment);
+
+ // Store return value into argv[0].
+ masm.storeValue(JSReturnOperand,
+ Address(masm.getStackPointer(), offsetToCoerceArgv));
+
+ // From this point, it's safe to reuse the scratch register (which
+ // might be part of the JSReturnOperand).
+
+ // The JIT might have clobbered exitFP at this point. Since there's
+ // going to be a CoerceInPlace call, pretend we're still doing the JIT
+ // call by restoring our tagged exitFP.
+ SetExitFP(masm, ExitReason::Fixed::ImportJit, scratch);
+
+ // argument 0: argv
+ ABIArgMIRTypeIter i(coerceArgTypes);
+ Address argv(masm.getStackPointer(), offsetToCoerceArgv);
+ if (i->kind() == ABIArg::GPR) {
+ masm.computeEffectiveAddress(argv, i->gpr());
+ } else {
+ masm.computeEffectiveAddress(argv, scratch);
+ masm.storePtr(scratch,
+ Address(masm.getStackPointer(), i->offsetFromArgBase()));
+ }
+ i++;
+ MOZ_ASSERT(i.done());
+
+ // Call coercion function. Note that right after the call, the value of
+ // FP is correct because FP is non-volatile in the native ABI.
+ AssertStackAlignment(masm, ABIStackAlignment);
+ const ValTypeVector& results = fi.funcType().results();
+ if (results.length() > 0) {
+ // NOTE that once there can be more than one result and we can box some of
+ // the results (as we must for AnyRef), pointer and already-boxed results
+ // must be rooted while subsequent results are boxed.
+ MOZ_ASSERT(results.length() == 1, "multi-value return unimplemented");
+ switch (results[0].kind()) {
+ case ValType::I32:
+ masm.call(SymbolicAddress::CoerceInPlace_ToInt32);
+ masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+ masm.unboxInt32(Address(masm.getStackPointer(), offsetToCoerceArgv),
+ ReturnReg);
+ // No spectre.index_masking required, as we generate a known-good
+ // value in a safe way here.
+ break;
+ case ValType::I64: {
+ masm.call(SymbolicAddress::CoerceInPlace_ToBigInt);
+ masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+ Address argv(masm.getStackPointer(), offsetToCoerceArgv);
+ masm.unboxBigInt(argv, scratch);
+ masm.loadBigInt64(scratch, ReturnReg64);
+ break;
+ }
+ case ValType::F64:
+ case ValType::F32:
+ masm.call(SymbolicAddress::CoerceInPlace_ToNumber);
+ masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+ masm.unboxDouble(Address(masm.getStackPointer(), offsetToCoerceArgv),
+ ReturnDoubleReg);
+ if (results[0].kind() == ValType::F32) {
+ masm.convertDoubleToFloat32(ReturnDoubleReg, ReturnFloat32Reg);
+ }
+ break;
+ case ValType::Ref:
+ switch (results[0].refTypeKind()) {
+ case RefType::Extern:
+ masm.call(SymbolicAddress::BoxValue_Anyref);
+ masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg,
+ throwLabel);
+ break;
+ case RefType::Func:
+ case RefType::Eq:
+ case RefType::TypeIndex:
+ MOZ_CRASH("Unsupported convert type");
+ }
+ break;
+ default:
+ MOZ_CRASH("Unsupported convert type");
+ }
+ }
+
+ // Maintain the invariant that exitFP is either unset or not set to a
+ // wasm tagged exitFP, per the jit exit contract.
+ ClearExitFP(masm, scratch);
+
+ masm.jump(&done);
+ masm.setFramePushed(0);
+ }
+
+ MOZ_ASSERT(masm.framePushed() == 0);
+
+ return FinishOffsets(masm, offsets);
+}
+
+struct ABIFunctionArgs {
+ ABIFunctionType abiType;
+ size_t len;
+
+ explicit ABIFunctionArgs(ABIFunctionType sig)
+ : abiType(ABIFunctionType(sig >> ArgType_Shift)) {
+ len = 0;
+ uint32_t i = uint32_t(abiType);
+ while (i) {
+ i = i >> ArgType_Shift;
+ len++;
+ }
+ }
+
+ size_t length() const { return len; }
+
+ MIRType operator[](size_t i) const {
+ MOZ_ASSERT(i < len);
+ uint32_t abi = uint32_t(abiType);
+ while (i--) {
+ abi = abi >> ArgType_Shift;
+ }
+ return ToMIRType(ABIArgType(abi & ArgType_Mask));
+ }
+};
+
+bool wasm::GenerateBuiltinThunk(MacroAssembler& masm, ABIFunctionType abiType,
+ ExitReason exitReason, void* funcPtr,
+ CallableOffsets* offsets) {
+ AssertExpectedSP(masm);
+ masm.setFramePushed(0);
+
+ ABIFunctionArgs args(abiType);
+ uint32_t framePushed =
+ StackDecrementForCall(ABIStackAlignment,
+ sizeof(Frame), // pushed by prologue
+ StackArgBytesForNativeABI(args));
+
+ GenerateExitPrologue(masm, framePushed, exitReason, offsets);
+
+ // Copy out and convert caller arguments, if needed.
+ unsigned offsetFromFPToCallerStackArgs = sizeof(FrameWithTls);
+ Register scratch = ABINonArgReturnReg0;
+ for (ABIArgIter i(args); !i.done(); i++) {
+ if (i->argInRegister()) {
+#ifdef JS_CODEGEN_ARM
+ // Non hard-fp passes the args values in GPRs.
+ if (!UseHardFpABI() && IsFloatingPointType(i.mirType())) {
+ FloatRegister input = i->fpu();
+ if (i.mirType() == MIRType::Float32) {
+ masm.ma_vxfer(input, Register::FromCode(input.id()));
+ } else if (i.mirType() == MIRType::Double) {
+ uint32_t regId = input.singleOverlay().id();
+ masm.ma_vxfer(input, Register::FromCode(regId),
+ Register::FromCode(regId + 1));
+ }
+ }
+#endif
+ continue;
+ }
+
+ Address src(FramePointer,
+ offsetFromFPToCallerStackArgs + i->offsetFromArgBase());
+ Address dst(masm.getStackPointer(), i->offsetFromArgBase());
+ StackCopy(masm, i.mirType(), scratch, src, dst);
+ }
+
+ AssertStackAlignment(masm, ABIStackAlignment);
+ MoveSPForJitABI(masm);
+ masm.call(ImmPtr(funcPtr, ImmPtr::NoCheckToken()));
+
+#if defined(JS_CODEGEN_X64)
+ // No spectre.index_masking is required, as the caller will mask.
+#elif defined(JS_CODEGEN_X86)
+ // x86 passes the return value on the x87 FP stack.
+ Operand op(esp, 0);
+ MIRType retType = ToMIRType(ABIArgType(abiType & ArgType_Mask));
+ if (retType == MIRType::Float32) {
+ masm.fstp32(op);
+ masm.loadFloat32(op, ReturnFloat32Reg);
+ } else if (retType == MIRType::Double) {
+ masm.fstp(op);
+ masm.loadDouble(op, ReturnDoubleReg);
+ }
+#elif defined(JS_CODEGEN_ARM)
+ // Non hard-fp passes the return values in GPRs.
+ MIRType retType = ToMIRType(ABIArgType(abiType & ArgType_Mask));
+ if (!UseHardFpABI() && IsFloatingPointType(retType)) {
+ masm.ma_vxfer(r0, r1, d0);
+ }
+#endif
+
+ GenerateExitEpilogue(masm, framePushed, exitReason, offsets);
+ return FinishOffsets(masm, offsets);
+}
+
+#if defined(JS_CODEGEN_ARM)
+static const LiveRegisterSet RegsToPreserve(
+ GeneralRegisterSet(Registers::AllMask &
+ ~((Registers::SetType(1) << Registers::sp) |
+ (Registers::SetType(1) << Registers::pc))),
+ FloatRegisterSet(FloatRegisters::AllDoubleMask));
+# ifdef ENABLE_WASM_SIMD
+# error "high lanes of SIMD registers need to be saved too."
+# endif
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+static const LiveRegisterSet RegsToPreserve(
+ GeneralRegisterSet(Registers::AllMask &
+ ~((Registers::SetType(1) << Registers::k0) |
+ (Registers::SetType(1) << Registers::k1) |
+ (Registers::SetType(1) << Registers::sp) |
+ (Registers::SetType(1) << Registers::zero))),
+ FloatRegisterSet(FloatRegisters::AllDoubleMask));
+# ifdef ENABLE_WASM_SIMD
+# error "high lanes of SIMD registers need to be saved too."
+# endif
+#elif defined(JS_CODEGEN_ARM64)
+// We assume that traps do not happen while lr is live. This both ensures that
+// the size of RegsToPreserve is a multiple of 2 (preserving WasmStackAlignment)
+// and gives us a register to clobber in the return path.
+//
+// Note there are no SIMD registers in the set; the doubles in the set stand in
+// for SIMD registers, which are pushed as appropriate. See comments above at
+// PushRegsInMask and lengty comment in Architecture-arm64.h.
+static const LiveRegisterSet RegsToPreserve(
+ GeneralRegisterSet(Registers::AllMask &
+ ~((Registers::SetType(1) << Registers::StackPointer) |
+ (Registers::SetType(1) << Registers::lr))),
+ FloatRegisterSet(FloatRegisters::AllDoubleMask));
+#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+// It's correct to use FloatRegisters::AllMask even when SIMD is not enabled;
+// PushRegsInMask strips out the high lanes of the XMM registers in this case,
+// while the singles will be stripped as they are aliased by the larger doubles.
+static const LiveRegisterSet RegsToPreserve(
+ GeneralRegisterSet(Registers::AllMask &
+ ~(Registers::SetType(1) << Registers::StackPointer)),
+ FloatRegisterSet(FloatRegisters::AllMask));
+#else
+static const LiveRegisterSet RegsToPreserve(
+ GeneralRegisterSet(0), FloatRegisterSet(FloatRegisters::AllDoubleMask));
+# ifdef ENABLE_WASM_SIMD
+# error "no SIMD support"
+# endif
+#endif
+
+// Generate a MachineState which describes the locations of the GPRs as saved
+// by GenerateTrapExit. FP registers are ignored. Note that the values
+// stored in the MachineState are offsets in words downwards from the top of
+// the save area. That is, a higher value implies a lower address.
+void wasm::GenerateTrapExitMachineState(MachineState* machine,
+ size_t* numWords) {
+ // This is the number of words pushed by the initial WasmPush().
+ *numWords = WasmPushSize / sizeof(void*);
+ MOZ_ASSERT(*numWords == TrapExitDummyValueOffsetFromTop + 1);
+
+ // And these correspond to the PushRegsInMask() that immediately follows.
+ for (GeneralRegisterBackwardIterator iter(RegsToPreserve.gprs()); iter.more();
+ ++iter) {
+ machine->setRegisterLocation(*iter,
+ reinterpret_cast<uintptr_t*>(*numWords));
+ (*numWords)++;
+ }
+}
+
+// Generate a stub which calls WasmReportTrap() and can be executed by having
+// the signal handler redirect PC from any trapping instruction.
+static bool GenerateTrapExit(MacroAssembler& masm, Label* throwLabel,
+ Offsets* offsets) {
+ AssertExpectedSP(masm);
+ masm.haltingAlign(CodeAlignment);
+
+ masm.setFramePushed(0);
+
+ offsets->begin = masm.currentOffset();
+
+ // Traps can only happen at well-defined program points. However, since
+ // traps may resume and the optimal assumption for the surrounding code is
+ // that registers are not clobbered, we need to preserve all registers in
+ // the trap exit. One simplifying assumption is that flags may be clobbered.
+ // Push a dummy word to use as return address below.
+ WasmPush(masm, ImmWord(TrapExitDummyValue));
+ unsigned framePushedBeforePreserve = masm.framePushed();
+ PushRegsInMask(masm, RegsToPreserve);
+ unsigned offsetOfReturnWord = masm.framePushed() - framePushedBeforePreserve;
+
+ // We know that StackPointer is word-aligned, but not necessarily
+ // stack-aligned, so we need to align it dynamically.
+ Register preAlignStackPointer = ABINonVolatileReg;
+ masm.moveStackPtrTo(preAlignStackPointer);
+ masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
+ if (ShadowStackSpace) {
+ masm.subFromStackPtr(Imm32(ShadowStackSpace));
+ }
+
+ masm.assertStackAlignment(ABIStackAlignment);
+ masm.call(SymbolicAddress::HandleTrap);
+
+ // WasmHandleTrap returns null if control should transfer to the throw stub.
+ masm.branchTestPtr(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+
+ // Otherwise, the return value is the TrapData::resumePC we must jump to.
+ // We must restore register state before jumping, which will clobber
+ // ReturnReg, so store ReturnReg in the above-reserved stack slot which we
+ // use to jump to via ret.
+ masm.moveToStackPtr(preAlignStackPointer);
+ masm.storePtr(ReturnReg, Address(masm.getStackPointer(), offsetOfReturnWord));
+ PopRegsInMask(masm, RegsToPreserve);
+#ifdef JS_CODEGEN_ARM64
+ WasmPop(masm, lr);
+ masm.abiret();
+#else
+ masm.ret();
+#endif
+
+ return FinishOffsets(masm, offsets);
+}
+
+// Generate a stub that restores the stack pointer to what it was on entry to
+// the wasm activation, sets the return register to 'false' and then executes a
+// return which will return from this wasm activation to the caller. This stub
+// should only be called after the caller has reported an error.
+static bool GenerateThrowStub(MacroAssembler& masm, Label* throwLabel,
+ Offsets* offsets) {
+ AssertExpectedSP(masm);
+ masm.haltingAlign(CodeAlignment);
+
+ masm.bind(throwLabel);
+
+ offsets->begin = masm.currentOffset();
+
+ // Conservatively, the stack pointer can be unaligned and we must align it
+ // dynamically.
+ masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
+ if (ShadowStackSpace) {
+ masm.subFromStackPtr(Imm32(ShadowStackSpace));
+ }
+
+ // WasmHandleThrow unwinds JitActivation::wasmExitFP() and returns the
+ // address of the return address on the stack this stub should return to.
+ // Set the FramePointer to a magic value to indicate a return by throw.
+ masm.call(SymbolicAddress::HandleThrow);
+ masm.moveToStackPtr(ReturnReg);
+ masm.move32(Imm32(FailFP), FramePointer);
+#ifdef JS_CODEGEN_ARM64
+ masm.loadPtr(Address(ReturnReg, 0), lr);
+ masm.addToStackPtr(Imm32(8));
+ masm.abiret();
+#else
+ masm.ret();
+#endif
+
+ return FinishOffsets(masm, offsets);
+}
+
+static const LiveRegisterSet AllAllocatableRegs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::AllocatableMask),
+ FloatRegisterSet(FloatRegisters::AllMask));
+
+// Generate a stub that handle toggable enter/leave frame traps or breakpoints.
+// The trap records frame pointer (via GenerateExitPrologue) and saves most of
+// registers to not affect the code generated by WasmBaselineCompile.
+static bool GenerateDebugTrapStub(MacroAssembler& masm, Label* throwLabel,
+ CallableOffsets* offsets) {
+ AssertExpectedSP(masm);
+ masm.haltingAlign(CodeAlignment);
+ masm.setFramePushed(0);
+
+ GenerateExitPrologue(masm, 0, ExitReason::Fixed::DebugTrap, offsets);
+
+ // Save all registers used between baseline compiler operations.
+ PushRegsInMask(masm, AllAllocatableRegs);
+
+ uint32_t framePushed = masm.framePushed();
+
+ // This method might be called with unaligned stack -- aligning and
+ // saving old stack pointer at the top.
+#ifdef JS_CODEGEN_ARM64
+ // On ARM64 however the stack is always aligned.
+ static_assert(ABIStackAlignment == 16, "ARM64 SP alignment");
+#else
+ Register scratch = ABINonArgReturnReg0;
+ masm.moveStackPtrTo(scratch);
+ masm.subFromStackPtr(Imm32(sizeof(intptr_t)));
+ masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
+ masm.storePtr(scratch, Address(masm.getStackPointer(), 0));
+#endif
+
+ if (ShadowStackSpace) {
+ masm.subFromStackPtr(Imm32(ShadowStackSpace));
+ }
+ masm.assertStackAlignment(ABIStackAlignment);
+ masm.call(SymbolicAddress::HandleDebugTrap);
+
+ masm.branchIfFalseBool(ReturnReg, throwLabel);
+
+ if (ShadowStackSpace) {
+ masm.addToStackPtr(Imm32(ShadowStackSpace));
+ }
+#ifndef JS_CODEGEN_ARM64
+ masm.Pop(scratch);
+ masm.moveToStackPtr(scratch);
+#endif
+
+ masm.setFramePushed(framePushed);
+ PopRegsInMask(masm, AllAllocatableRegs);
+
+ GenerateExitEpilogue(masm, 0, ExitReason::Fixed::DebugTrap, offsets);
+
+ return FinishOffsets(masm, offsets);
+}
+
+bool wasm::GenerateEntryStubs(MacroAssembler& masm, size_t funcExportIndex,
+ const FuncExport& fe, const Maybe<ImmPtr>& callee,
+ bool isAsmJS, CodeRangeVector* codeRanges) {
+ MOZ_ASSERT(!callee == fe.hasEagerStubs());
+ MOZ_ASSERT_IF(isAsmJS, fe.hasEagerStubs());
+
+ Offsets offsets;
+ if (!GenerateInterpEntry(masm, fe, callee, &offsets)) {
+ return false;
+ }
+ if (!codeRanges->emplaceBack(CodeRange::InterpEntry, fe.funcIndex(),
+ offsets)) {
+ return false;
+ }
+
+ if (isAsmJS || fe.funcType().temporarilyUnsupportedReftypeForEntry()) {
+ return true;
+ }
+
+ // SIMD spec requires JS calls to exports with V128 in the signature to throw.
+ if (fe.funcType().hasUnexposableArgOrRet()) {
+ return true;
+ }
+
+ // Returning multiple values to JS JIT code not yet implemented (see
+ // bug 1595031).
+ if (fe.funcType().temporarilyUnsupportedResultCountForJitEntry()) {
+ return true;
+ }
+
+ if (!GenerateJitEntry(masm, funcExportIndex, fe, callee, &offsets)) {
+ return false;
+ }
+ if (!codeRanges->emplaceBack(CodeRange::JitEntry, fe.funcIndex(), offsets)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool wasm::GenerateProvisionalJitEntryStub(MacroAssembler& masm,
+ Offsets* offsets) {
+ AssertExpectedSP(masm);
+ masm.setFramePushed(0);
+ offsets->begin = masm.currentOffset();
+
+#ifdef JS_CODEGEN_ARM64
+ // Unaligned ABI calls require SP+PSP, but our mode here is SP-only
+ masm.SetStackPointer64(PseudoStackPointer64);
+ masm.Mov(PseudoStackPointer64, sp);
+#endif
+
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::Volatile());
+ Register temp = regs.takeAny();
+
+ using Fn = void* (*)();
+ masm.setupUnalignedABICall(temp);
+ masm.callWithABI<Fn, GetContextSensitiveInterpreterStub>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+#ifdef JS_USE_LINK_REGISTER
+ masm.popReturnAddress();
+#endif
+
+ masm.jump(ReturnReg);
+
+#ifdef JS_CODEGEN_ARM64
+ // Undo the SP+PSP mode
+ masm.SetStackPointer64(sp);
+#endif
+
+ if (!FinishOffsets(masm, offsets)) {
+ return false;
+ }
+ return true;
+}
+
+bool wasm::GenerateStubs(const ModuleEnvironment& env,
+ const FuncImportVector& imports,
+ const FuncExportVector& exports, CompiledCode* code) {
+ LifoAlloc lifo(STUBS_LIFO_DEFAULT_CHUNK_SIZE);
+ TempAllocator alloc(&lifo);
+ WasmMacroAssembler masm(alloc, env);
+
+ // Swap in already-allocated empty vectors to avoid malloc/free.
+ if (!code->swap(masm)) {
+ return false;
+ }
+
+ Label throwLabel;
+
+ JitSpew(JitSpew_Codegen, "# Emitting wasm import stubs");
+
+ for (uint32_t funcIndex = 0; funcIndex < imports.length(); funcIndex++) {
+ const FuncImport& fi = imports[funcIndex];
+
+ CallableOffsets interpOffsets;
+ if (!GenerateImportInterpExit(masm, fi, funcIndex, &throwLabel,
+ &interpOffsets)) {
+ return false;
+ }
+ if (!code->codeRanges.emplaceBack(CodeRange::ImportInterpExit, funcIndex,
+ interpOffsets)) {
+ return false;
+ }
+
+ // SIMD spec requires calls to JS functions with V128 in the signature to
+ // throw.
+ if (fi.funcType().hasUnexposableArgOrRet()) {
+ continue;
+ }
+
+ if (fi.funcType().temporarilyUnsupportedReftypeForExit()) {
+ continue;
+ }
+
+ // Exit to JS JIT code returning multiple values not yet implemented
+ // (see bug 1595031).
+ if (fi.funcType().temporarilyUnsupportedResultCountForJitExit()) {
+ continue;
+ }
+
+ JitExitOffsets jitOffsets;
+ if (!GenerateImportJitExit(masm, fi, funcIndex, &throwLabel, &jitOffsets)) {
+ return false;
+ }
+ if (!code->codeRanges.emplaceBack(funcIndex, jitOffsets)) {
+ return false;
+ }
+ }
+
+ JitSpew(JitSpew_Codegen, "# Emitting wasm export stubs");
+
+ Maybe<ImmPtr> noAbsolute;
+ for (size_t i = 0; i < exports.length(); i++) {
+ const FuncExport& fe = exports[i];
+ if (!fe.hasEagerStubs()) {
+ continue;
+ }
+ if (!GenerateEntryStubs(masm, i, fe, noAbsolute, env.isAsmJS(),
+ &code->codeRanges)) {
+ return false;
+ }
+ }
+
+ JitSpew(JitSpew_Codegen, "# Emitting wasm exit stubs");
+
+ Offsets offsets;
+
+ if (!GenerateTrapExit(masm, &throwLabel, &offsets)) {
+ return false;
+ }
+ if (!code->codeRanges.emplaceBack(CodeRange::TrapExit, offsets)) {
+ return false;
+ }
+
+ CallableOffsets callableOffsets;
+ if (!GenerateDebugTrapStub(masm, &throwLabel, &callableOffsets)) {
+ return false;
+ }
+ if (!code->codeRanges.emplaceBack(CodeRange::DebugTrap, callableOffsets)) {
+ return false;
+ }
+
+ if (!GenerateThrowStub(masm, &throwLabel, &offsets)) {
+ return false;
+ }
+ if (!code->codeRanges.emplaceBack(CodeRange::Throw, offsets)) {
+ return false;
+ }
+
+ masm.finish();
+ if (masm.oom()) {
+ return false;
+ }
+
+ return code->swap(masm);
+}
diff --git a/js/src/wasm/WasmStubs.h b/js/src/wasm/WasmStubs.h
new file mode 100644
index 0000000000..a096ea0ab3
--- /dev/null
+++ b/js/src/wasm/WasmStubs.h
@@ -0,0 +1,364 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_stubs_h
+#define wasm_stubs_h
+
+#include "wasm/WasmFrameIter.h" // js::wasm::ExitReason
+#include "wasm/WasmGenerator.h"
+#include "wasm/WasmOpIter.h"
+
+namespace js {
+namespace wasm {
+
+// ValType and location for a single result: either in a register or on the
+// stack.
+
+class ABIResult {
+ ValType type_;
+ enum class Location { Gpr, Gpr64, Fpr, Stack } loc_;
+ union {
+ Register gpr_;
+ Register64 gpr64_;
+ FloatRegister fpr_;
+ uint32_t stackOffset_;
+ };
+
+ void validate() {
+#ifdef DEBUG
+ if (onStack()) {
+ return;
+ }
+ MOZ_ASSERT(inRegister());
+ switch (type_.kind()) {
+ case ValType::I32:
+ MOZ_ASSERT(loc_ == Location::Gpr);
+ break;
+ case ValType::I64:
+ MOZ_ASSERT(loc_ == Location::Gpr64);
+ break;
+ case ValType::F32:
+ case ValType::F64:
+ MOZ_ASSERT(loc_ == Location::Fpr);
+ break;
+ case ValType::Ref:
+ MOZ_ASSERT(loc_ == Location::Gpr);
+ break;
+ case ValType::V128:
+ MOZ_ASSERT(loc_ == Location::Fpr);
+ break;
+ }
+#endif
+ }
+
+ friend class ABIResultIter;
+ ABIResult() {}
+
+ public:
+ // Sizes of items in the stack area.
+ //
+ // The size values come from the implementations of Push() in
+ // MacroAssembler-x86-shared.cpp and MacroAssembler-arm-shared.cpp, and from
+ // VFPRegister::size() in Architecture-arm.h.
+ //
+ // On ARM unlike on x86 we push a single for float.
+
+ static constexpr size_t StackSizeOfPtr = sizeof(intptr_t);
+ static constexpr size_t StackSizeOfInt32 = StackSizeOfPtr;
+ static constexpr size_t StackSizeOfInt64 = sizeof(int64_t);
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
+ static constexpr size_t StackSizeOfFloat = sizeof(float);
+#else
+ static constexpr size_t StackSizeOfFloat = sizeof(double);
+#endif
+ static constexpr size_t StackSizeOfDouble = sizeof(double);
+#ifdef ENABLE_WASM_SIMD
+ static constexpr size_t StackSizeOfV128 = sizeof(V128);
+#endif
+
+ ABIResult(ValType type, Register gpr)
+ : type_(type), loc_(Location::Gpr), gpr_(gpr) {
+ validate();
+ }
+ ABIResult(ValType type, Register64 gpr64)
+ : type_(type), loc_(Location::Gpr64), gpr64_(gpr64) {
+ validate();
+ }
+ ABIResult(ValType type, FloatRegister fpr)
+ : type_(type), loc_(Location::Fpr), fpr_(fpr) {
+ validate();
+ }
+ ABIResult(ValType type, uint32_t stackOffset)
+ : type_(type), loc_(Location::Stack), stackOffset_(stackOffset) {
+ validate();
+ }
+
+ ValType type() const { return type_; }
+ bool onStack() const { return loc_ == Location::Stack; }
+ bool inRegister() const { return !onStack(); }
+ Register gpr() const {
+ MOZ_ASSERT(loc_ == Location::Gpr);
+ return gpr_;
+ }
+ Register64 gpr64() const {
+ MOZ_ASSERT(loc_ == Location::Gpr64);
+ return gpr64_;
+ }
+ FloatRegister fpr() const {
+ MOZ_ASSERT(loc_ == Location::Fpr);
+ return fpr_;
+ }
+ // Offset from SP.
+ uint32_t stackOffset() const {
+ MOZ_ASSERT(loc_ == Location::Stack);
+ return stackOffset_;
+ }
+ uint32_t size() const;
+};
+
+// Just as WebAssembly functions can take multiple arguments, they can also
+// return multiple results. As with a call, a limited number of results will be
+// located in registers, and the rest will be stored in a stack area. The
+// |ABIResultIter| computes result locations, given a |ResultType|.
+//
+// Recall that a |ResultType| represents a sequence of value types t1..tN,
+// indexed from 1 to N. In principle it doesn't matter how we decide which
+// results get to be in registers and which go to the stack. To better
+// harmonize with WebAssembly's abstract stack machine, whose properties are
+// taken advantage of by the baseline compiler, our strategy is to start
+// allocating result locations in "reverse" order: from result N down to 1.
+//
+// If a result with index I is in a register, then all results with index J > I
+// are also in registers. If a result I is on the stack, then all results with
+// index K < I are also on the stack, farther away from the stack pointer than
+// result I.
+//
+// Currently only a single result is ever stored in a register, though this may
+// change in the future on register-rich platforms.
+//
+// NB: The baseline compiler also uses thie ABI for locations of block
+// parameters and return values, within individual WebAssembly functions.
+
+class ABIResultIter {
+ ResultType type_;
+ uint32_t count_;
+ uint32_t index_;
+ uint32_t nextStackOffset_;
+ enum { Next, Prev } direction_;
+ ABIResult cur_;
+
+ void settleRegister(ValType type);
+ void settleNext();
+ void settlePrev();
+
+ public:
+ explicit ABIResultIter(const ResultType& type)
+ : type_(type), count_(type.length()) {
+ reset();
+ }
+
+ void reset() {
+ index_ = nextStackOffset_ = 0;
+ direction_ = Next;
+ if (!done()) {
+ settleNext();
+ }
+ }
+ bool done() const { return index_ == count_; }
+ uint32_t index() const { return index_; }
+ uint32_t count() const { return count_; }
+ uint32_t remaining() const { return count_ - index_; }
+ void switchToNext() {
+ MOZ_ASSERT(direction_ == Prev);
+ if (!done() && cur().onStack()) {
+ nextStackOffset_ += cur().size();
+ }
+ index_ = count_ - index_;
+ direction_ = Next;
+ if (!done()) {
+ settleNext();
+ }
+ }
+ void switchToPrev() {
+ MOZ_ASSERT(direction_ == Next);
+ if (!done() && cur().onStack()) {
+ nextStackOffset_ -= cur().size();
+ }
+ index_ = count_ - index_;
+ direction_ = Prev;
+ if (!done()) settlePrev();
+ }
+ void next() {
+ MOZ_ASSERT(direction_ == Next);
+ MOZ_ASSERT(!done());
+ index_++;
+ if (!done()) {
+ settleNext();
+ }
+ }
+ void prev() {
+ MOZ_ASSERT(direction_ == Prev);
+ MOZ_ASSERT(!done());
+ index_++;
+ if (!done()) {
+ settlePrev();
+ }
+ }
+ const ABIResult& cur() const {
+ MOZ_ASSERT(!done());
+ return cur_;
+ }
+
+ uint32_t stackBytesConsumedSoFar() const { return nextStackOffset_; }
+
+ static inline bool HasStackResults(const ResultType& type) {
+ return type.length() > MaxRegisterResults;
+ }
+
+ static uint32_t MeasureStackBytes(const ResultType& type) {
+ if (!HasStackResults(type)) {
+ return 0;
+ }
+ ABIResultIter iter(type);
+ while (!iter.done()) {
+ iter.next();
+ }
+ return iter.stackBytesConsumedSoFar();
+ }
+};
+
+extern bool GenerateBuiltinThunk(jit::MacroAssembler& masm,
+ jit::ABIFunctionType abiType,
+ ExitReason exitReason, void* funcPtr,
+ CallableOffsets* offsets);
+
+extern bool GenerateImportFunctions(const ModuleEnvironment& env,
+ const FuncImportVector& imports,
+ CompiledCode* code);
+
+extern bool GenerateStubs(const ModuleEnvironment& env,
+ const FuncImportVector& imports,
+ const FuncExportVector& exports, CompiledCode* code);
+
+extern bool GenerateEntryStubs(jit::MacroAssembler& masm,
+ size_t funcExportIndex,
+ const FuncExport& funcExport,
+ const Maybe<jit::ImmPtr>& callee, bool isAsmJS,
+ CodeRangeVector* codeRanges);
+
+extern void GenerateTrapExitMachineState(jit::MachineState* machine,
+ size_t* numWords);
+
+extern bool GenerateProvisionalJitEntryStub(MacroAssembler& masm,
+ Offsets* offsets);
+
+// A value that is written into the trap exit frame, which is useful for
+// cross-checking during garbage collection.
+static constexpr uintptr_t TrapExitDummyValue = 1337;
+
+// And its offset, in words, down from the highest-addressed word of the trap
+// exit frame. The value is written into the frame using WasmPush. In the
+// case where WasmPush allocates more than one word, the value will therefore
+// be written at the lowest-addressed word.
+#ifdef JS_CODEGEN_ARM64
+static constexpr size_t TrapExitDummyValueOffsetFromTop = 1;
+#else
+static constexpr size_t TrapExitDummyValueOffsetFromTop = 0;
+#endif
+
+// An argument that will end up on the stack according to the system ABI, to be
+// passed to GenerateDirectCallFromJit. Since the direct JIT call creates its
+// own frame, it is its responsibility to put stack arguments to their expected
+// locations; so the caller of GenerateDirectCallFromJit can put them anywhere.
+
+class JitCallStackArg {
+ public:
+ enum class Tag {
+ Imm32,
+ GPR,
+ FPU,
+ Address,
+ Undefined,
+ };
+
+ private:
+ Tag tag_;
+ union U {
+ int32_t imm32_;
+ jit::Register gpr_;
+ jit::FloatRegister fpu_;
+ jit::Address addr_;
+ U() {}
+ } arg;
+
+ public:
+ JitCallStackArg() : tag_(Tag::Undefined) {}
+ explicit JitCallStackArg(int32_t imm32) : tag_(Tag::Imm32) {
+ arg.imm32_ = imm32;
+ }
+ explicit JitCallStackArg(jit::Register gpr) : tag_(Tag::GPR) {
+ arg.gpr_ = gpr;
+ }
+ explicit JitCallStackArg(jit::FloatRegister fpu) : tag_(Tag::FPU) {
+ new (&arg) jit::FloatRegister(fpu);
+ }
+ explicit JitCallStackArg(const jit::Address& addr) : tag_(Tag::Address) {
+ new (&arg) jit::Address(addr);
+ }
+
+ Tag tag() const { return tag_; }
+ int32_t imm32() const {
+ MOZ_ASSERT(tag_ == Tag::Imm32);
+ return arg.imm32_;
+ }
+ jit::Register gpr() const {
+ MOZ_ASSERT(tag_ == Tag::GPR);
+ return arg.gpr_;
+ }
+ jit::FloatRegister fpu() const {
+ MOZ_ASSERT(tag_ == Tag::FPU);
+ return arg.fpu_;
+ }
+ const jit::Address& addr() const {
+ MOZ_ASSERT(tag_ == Tag::Address);
+ return arg.addr_;
+ }
+};
+
+using JitCallStackArgVector = Vector<JitCallStackArg, 4, SystemAllocPolicy>;
+
+// Generates an inline wasm call (during jit compilation) to a specific wasm
+// function (as specifed by the given FuncExport).
+// This call doesn't go through a wasm entry, but rather creates its own
+// inlined exit frame.
+// Assumes:
+// - all the registers have been preserved by the caller,
+// - all arguments passed in registers have been set up at the expected
+// locations,
+// - all arguments passed on stack slot are alive as defined by a corresponding
+// JitCallStackArg.
+
+extern void GenerateDirectCallFromJit(
+ jit::MacroAssembler& masm, const FuncExport& fe, const Instance& inst,
+ const JitCallStackArgVector& stackArgs, bool profilingEnabled,
+ jit::Register scratch, uint32_t* callOffset);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_stubs_h
diff --git a/js/src/wasm/WasmTable.cpp b/js/src/wasm/WasmTable.cpp
new file mode 100644
index 0000000000..e910140496
--- /dev/null
+++ b/js/src/wasm/WasmTable.cpp
@@ -0,0 +1,401 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmTable.h"
+
+#include "mozilla/CheckedInt.h"
+
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmJS.h"
+
+using namespace js;
+using namespace js::wasm;
+using mozilla::CheckedInt;
+
+Table::Table(JSContext* cx, const TableDesc& desc,
+ HandleWasmTableObject maybeObject, UniqueFuncRefArray functions)
+ : maybeObject_(maybeObject),
+ observers_(cx->zone()),
+ functions_(std::move(functions)),
+ elemType_(desc.elemType),
+ isAsmJS_(desc.isAsmJS),
+ length_(desc.initialLength),
+ maximum_(desc.maximumLength) {
+ MOZ_ASSERT(repr() == TableRepr::Func);
+}
+
+Table::Table(JSContext* cx, const TableDesc& desc,
+ HandleWasmTableObject maybeObject, TableAnyRefVector&& objects)
+ : maybeObject_(maybeObject),
+ observers_(cx->zone()),
+ objects_(std::move(objects)),
+ elemType_(desc.elemType),
+ isAsmJS_(desc.isAsmJS),
+ length_(desc.initialLength),
+ maximum_(desc.maximumLength) {
+ MOZ_ASSERT(repr() == TableRepr::Ref);
+}
+
+/* static */
+SharedTable Table::create(JSContext* cx, const TableDesc& desc,
+ HandleWasmTableObject maybeObject) {
+ // We don't support non-nullable references in tables yet.
+ MOZ_RELEASE_ASSERT(desc.elemType.isNullable());
+
+ switch (desc.elemType.tableRepr()) {
+ case TableRepr::Func: {
+ UniqueFuncRefArray functions(
+ cx->pod_calloc<FunctionTableElem>(desc.initialLength));
+ if (!functions) {
+ return nullptr;
+ }
+ return SharedTable(
+ cx->new_<Table>(cx, desc, maybeObject, std::move(functions)));
+ }
+ case TableRepr::Ref: {
+ TableAnyRefVector objects;
+ if (!objects.resize(desc.initialLength)) {
+ return nullptr;
+ }
+ return SharedTable(
+ cx->new_<Table>(cx, desc, maybeObject, std::move(objects)));
+ }
+ }
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("switch is exhaustive");
+}
+
+void Table::tracePrivate(JSTracer* trc) {
+ // If this table has a WasmTableObject, then this method is only called by
+ // WasmTableObject's trace hook so maybeObject_ must already be marked.
+ // TraceEdge is called so that the pointer can be updated during a moving
+ // GC.
+ if (maybeObject_) {
+ MOZ_ASSERT(!gc::IsAboutToBeFinalized(&maybeObject_));
+ TraceEdge(trc, &maybeObject_, "wasm table object");
+ }
+
+ switch (repr()) {
+ case TableRepr::Func: {
+ if (isAsmJS_) {
+#ifdef DEBUG
+ for (uint32_t i = 0; i < length_; i++) {
+ MOZ_ASSERT(!functions_[i].tls);
+ }
+#endif
+ break;
+ }
+
+ for (uint32_t i = 0; i < length_; i++) {
+ if (functions_[i].tls) {
+ functions_[i].tls->instance->trace(trc);
+ } else {
+ MOZ_ASSERT(!functions_[i].code);
+ }
+ }
+ break;
+ }
+ case TableRepr::Ref: {
+ objects_.trace(trc);
+ break;
+ }
+ }
+}
+
+void Table::trace(JSTracer* trc) {
+ // The trace hook of WasmTableObject will call Table::tracePrivate at
+ // which point we can mark the rest of the children. If there is no
+ // WasmTableObject, call Table::tracePrivate directly. Redirecting through
+ // the WasmTableObject avoids marking the entire Table on each incoming
+ // edge (once per dependent Instance).
+ if (maybeObject_) {
+ TraceEdge(trc, &maybeObject_, "wasm table object");
+ } else {
+ tracePrivate(trc);
+ }
+}
+
+uint8_t* Table::functionBase() const {
+ if (repr() == TableRepr::Ref) {
+ return nullptr;
+ }
+ return (uint8_t*)functions_.get();
+}
+
+const FunctionTableElem& Table::getFuncRef(uint32_t index) const {
+ MOZ_ASSERT(isFunction());
+ return functions_[index];
+}
+
+bool Table::getFuncRef(JSContext* cx, uint32_t index,
+ MutableHandleFunction fun) const {
+ MOZ_ASSERT(isFunction());
+
+ const FunctionTableElem& elem = getFuncRef(index);
+ if (!elem.code) {
+ fun.set(nullptr);
+ return true;
+ }
+
+ Instance& instance = *elem.tls->instance;
+ const CodeRange& codeRange = *instance.code().lookupFuncRange(elem.code);
+
+ RootedWasmInstanceObject instanceObj(cx, instance.object());
+ return instanceObj->getExportedFunction(cx, instanceObj,
+ codeRange.funcIndex(), fun);
+}
+
+void Table::setFuncRef(uint32_t index, void* code, const Instance* instance) {
+ MOZ_ASSERT(isFunction());
+
+ FunctionTableElem& elem = functions_[index];
+ if (elem.tls) {
+ gc::PreWriteBarrier(elem.tls->instance->objectUnbarriered());
+ }
+
+ if (!isAsmJS_) {
+ elem.code = code;
+ elem.tls = instance->tlsData();
+ MOZ_ASSERT(elem.tls->instance->objectUnbarriered()->isTenured(),
+ "no postWriteBarrier (Table::set)");
+ } else {
+ elem.code = code;
+ elem.tls = nullptr;
+ }
+}
+
+void Table::fillFuncRef(uint32_t index, uint32_t fillCount, FuncRef ref,
+ JSContext* cx) {
+ MOZ_ASSERT(isFunction());
+
+ if (ref.isNull()) {
+ for (uint32_t i = index, end = index + fillCount; i != end; i++) {
+ setNull(i);
+ }
+ return;
+ }
+
+ RootedFunction fun(cx, ref.asJSFunction());
+ MOZ_RELEASE_ASSERT(IsWasmExportedFunction(fun));
+
+ RootedWasmInstanceObject instanceObj(cx,
+ ExportedFunctionToInstanceObject(fun));
+ uint32_t funcIndex = ExportedFunctionToFuncIndex(fun);
+
+#ifdef DEBUG
+ RootedFunction f(cx);
+ MOZ_ASSERT(instanceObj->getExportedFunction(cx, instanceObj, funcIndex, &f));
+ MOZ_ASSERT(fun == f);
+#endif
+
+ Instance& instance = instanceObj->instance();
+ Tier tier = instance.code().bestTier();
+ const MetadataTier& metadata = instance.metadata(tier);
+ const CodeRange& codeRange =
+ metadata.codeRange(metadata.lookupFuncExport(funcIndex));
+ void* code = instance.codeBase(tier) + codeRange.funcCheckedCallEntry();
+ for (uint32_t i = index, end = index + fillCount; i != end; i++) {
+ setFuncRef(i, code, &instance);
+ }
+}
+
+AnyRef Table::getAnyRef(uint32_t index) const {
+ MOZ_ASSERT(!isFunction());
+ // TODO/AnyRef-boxing: With boxed immediates and strings, the write barrier
+ // is going to have to be more complicated.
+ ASSERT_ANYREF_IS_JSOBJECT;
+ return AnyRef::fromJSObject(objects_[index]);
+}
+
+void Table::fillAnyRef(uint32_t index, uint32_t fillCount, AnyRef ref) {
+ MOZ_ASSERT(!isFunction());
+ // TODO/AnyRef-boxing: With boxed immediates and strings, the write barrier
+ // is going to have to be more complicated.
+ ASSERT_ANYREF_IS_JSOBJECT;
+ for (uint32_t i = index, end = index + fillCount; i != end; i++) {
+ objects_[i] = ref.asJSObject();
+ }
+}
+
+void Table::setNull(uint32_t index) {
+ switch (repr()) {
+ case TableRepr::Func: {
+ MOZ_RELEASE_ASSERT(!isAsmJS_);
+ FunctionTableElem& elem = functions_[index];
+ if (elem.tls) {
+ gc::PreWriteBarrier(elem.tls->instance->objectUnbarriered());
+ }
+
+ elem.code = nullptr;
+ elem.tls = nullptr;
+ break;
+ }
+ case TableRepr::Ref: {
+ fillAnyRef(index, 1, AnyRef::null());
+ break;
+ }
+ }
+}
+
+bool Table::copy(const Table& srcTable, uint32_t dstIndex, uint32_t srcIndex) {
+ MOZ_RELEASE_ASSERT(!srcTable.isAsmJS_);
+ switch (repr()) {
+ case TableRepr::Func: {
+ MOZ_RELEASE_ASSERT(elemType().isFunc() && srcTable.elemType().isFunc());
+ FunctionTableElem& dst = functions_[dstIndex];
+ if (dst.tls) {
+ gc::PreWriteBarrier(dst.tls->instance->objectUnbarriered());
+ }
+
+ FunctionTableElem& src = srcTable.functions_[srcIndex];
+ dst.code = src.code;
+ dst.tls = src.tls;
+
+ if (dst.tls) {
+ MOZ_ASSERT(dst.code);
+ MOZ_ASSERT(dst.tls->instance->objectUnbarriered()->isTenured(),
+ "no postWriteBarrier (Table::copy)");
+ } else {
+ MOZ_ASSERT(!dst.code);
+ }
+ break;
+ }
+ case TableRepr::Ref: {
+ switch (srcTable.repr()) {
+ case TableRepr::Ref: {
+ fillAnyRef(dstIndex, 1, srcTable.getAnyRef(srcIndex));
+ break;
+ }
+ case TableRepr::Func: {
+ MOZ_RELEASE_ASSERT(srcTable.elemType().isFunc());
+ // Upcast. Possibly suboptimal to grab the cx here for every iteration
+ // of the outer copy loop.
+ JSContext* cx = TlsContext.get();
+ RootedFunction fun(cx);
+ if (!srcTable.getFuncRef(cx, srcIndex, &fun)) {
+ // OOM, so just pass it on.
+ return false;
+ }
+ fillAnyRef(dstIndex, 1, AnyRef::fromJSObject(fun));
+ break;
+ }
+ }
+ break;
+ }
+ }
+ return true;
+}
+
+uint32_t Table::grow(uint32_t delta) {
+ // This isn't just an optimization: movingGrowable() assumes that
+ // onMovingGrowTable does not fire when length == maximum.
+ if (!delta) {
+ return length_;
+ }
+
+ uint32_t oldLength = length_;
+
+ CheckedInt<uint32_t> newLength = oldLength;
+ newLength += delta;
+ if (!newLength.isValid() || newLength.value() > MaxTableLength) {
+ return -1;
+ }
+
+ if (maximum_ && newLength.value() > maximum_.value()) {
+ return -1;
+ }
+
+ MOZ_ASSERT(movingGrowable());
+
+ switch (repr()) {
+ case TableRepr::Func: {
+ MOZ_RELEASE_ASSERT(!isAsmJS_);
+ // Note that realloc does not release functions_'s pointee on failure
+ // which is exactly what we need here.
+ FunctionTableElem* newFunctions = js_pod_realloc<FunctionTableElem>(
+ functions_.get(), length_, newLength.value());
+ if (!newFunctions) {
+ return -1;
+ }
+ Unused << functions_.release();
+ functions_.reset(newFunctions);
+
+ // Realloc does not zero the delta for us.
+ PodZero(newFunctions + length_, delta);
+ break;
+ }
+ case TableRepr::Ref: {
+ if (!objects_.resize(newLength.value())) {
+ return -1;
+ }
+ break;
+ }
+ }
+
+ if (auto object = maybeObject_.unbarrieredGet()) {
+ RemoveCellMemory(object, gcMallocBytes(), MemoryUse::WasmTableTable);
+ }
+
+ length_ = newLength.value();
+
+ if (auto object = maybeObject_.unbarrieredGet()) {
+ AddCellMemory(object, gcMallocBytes(), MemoryUse::WasmTableTable);
+ }
+
+ for (InstanceSet::Range r = observers_.all(); !r.empty(); r.popFront()) {
+ r.front()->instance().onMovingGrowTable(this);
+ }
+
+ return oldLength;
+}
+
+bool Table::movingGrowable() const {
+ return !maximum_ || length_ < maximum_.value();
+}
+
+bool Table::addMovingGrowObserver(JSContext* cx, WasmInstanceObject* instance) {
+ MOZ_ASSERT(movingGrowable());
+
+ // A table can be imported multiple times into an instance, but we only
+ // register the instance as an observer once.
+
+ if (!observers_.put(instance)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+size_t Table::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ if (isFunction()) {
+ return mallocSizeOf(functions_.get());
+ }
+ return objects_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t Table::gcMallocBytes() const {
+ size_t size = sizeof(*this);
+ if (isFunction()) {
+ size += length() * sizeof(FunctionTableElem);
+ } else {
+ size += length() * sizeof(TableAnyRefVector::ElementType);
+ }
+ return size;
+}
diff --git a/js/src/wasm/WasmTable.h b/js/src/wasm/WasmTable.h
new file mode 100644
index 0000000000..ef1eafccbe
--- /dev/null
+++ b/js/src/wasm/WasmTable.h
@@ -0,0 +1,126 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_table_h
+#define wasm_table_h
+
+#include "gc/Policy.h"
+#include "wasm/WasmCode.h"
+
+namespace js {
+namespace wasm {
+
+// A Table is an indexable array of opaque values. Tables are first-class
+// stateful objects exposed to WebAssembly. asm.js also uses Tables to represent
+// its homogeneous function-pointer tables.
+//
+// A table of FuncRef holds FunctionTableElems, which are (code*,tls*) pairs,
+// where the tls must be traced.
+//
+// A table of AnyRef holds JSObject pointers, which must be traced.
+
+// TODO/AnyRef-boxing: With boxed immediates and strings, JSObject* is no longer
+// the most appropriate representation for Cell::anyref.
+STATIC_ASSERT_ANYREF_IS_JSOBJECT;
+
+typedef GCVector<HeapPtr<JSObject*>, 0, SystemAllocPolicy> TableAnyRefVector;
+
+class Table : public ShareableBase<Table> {
+ using InstanceSet =
+ JS::WeakCache<GCHashSet<WeakHeapPtrWasmInstanceObject,
+ MovableCellHasher<WeakHeapPtrWasmInstanceObject>,
+ SystemAllocPolicy>>;
+ using UniqueFuncRefArray = UniquePtr<FunctionTableElem[], JS::FreePolicy>;
+
+ WeakHeapPtrWasmTableObject maybeObject_;
+ InstanceSet observers_;
+ UniqueFuncRefArray functions_; // either functions_ has data
+ TableAnyRefVector objects_; // or objects_, but not both
+ const RefType elemType_;
+ const bool isAsmJS_;
+ uint32_t length_;
+ const Maybe<uint32_t> maximum_;
+
+ template <class>
+ friend struct js::MallocProvider;
+ Table(JSContext* cx, const TableDesc& td, HandleWasmTableObject maybeObject,
+ UniqueFuncRefArray functions);
+ Table(JSContext* cx, const TableDesc& td, HandleWasmTableObject maybeObject,
+ TableAnyRefVector&& objects);
+
+ void tracePrivate(JSTracer* trc);
+ friend class js::WasmTableObject;
+
+ public:
+ static RefPtr<Table> create(JSContext* cx, const TableDesc& desc,
+ HandleWasmTableObject maybeObject);
+ void trace(JSTracer* trc);
+
+ RefType elemType() const { return elemType_; }
+ TableRepr repr() const { return elemType_.tableRepr(); }
+
+ bool isAsmJS() const {
+ MOZ_ASSERT(elemType_.isFunc());
+ return isAsmJS_;
+ }
+ bool isFunction() const { return elemType().isFunc(); }
+ uint32_t length() const { return length_; }
+ Maybe<uint32_t> maximum() const { return maximum_; }
+
+ // Only for function values. Raw pointer to the table.
+ uint8_t* functionBase() const;
+
+ // set/get/fillFuncRef is allowed only on table-of-funcref.
+ // get/fillAnyRef is allowed only on table-of-anyref.
+ // setNull is allowed on either.
+
+ const FunctionTableElem& getFuncRef(uint32_t index) const;
+ bool getFuncRef(JSContext* cx, uint32_t index,
+ MutableHandleFunction fun) const;
+ void setFuncRef(uint32_t index, void* code, const Instance* instance);
+ void fillFuncRef(uint32_t index, uint32_t fillCount, FuncRef ref,
+ JSContext* cx);
+
+ AnyRef getAnyRef(uint32_t index) const;
+ void fillAnyRef(uint32_t index, uint32_t fillCount, AnyRef ref);
+
+ void setNull(uint32_t index);
+
+ // Copy entry from |srcTable| at |srcIndex| to this table at |dstIndex|. Used
+ // by table.copy. May OOM if it needs to box up a function during an upcast.
+ bool copy(const Table& srcTable, uint32_t dstIndex, uint32_t srcIndex);
+
+ // grow() returns (uint32_t)-1 if it could not grow.
+ uint32_t grow(uint32_t delta);
+ bool movingGrowable() const;
+ bool addMovingGrowObserver(JSContext* cx, WasmInstanceObject* instance);
+
+ // about:memory reporting:
+
+ size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const;
+
+ size_t gcMallocBytes() const;
+};
+
+using SharedTable = RefPtr<Table>;
+typedef Vector<SharedTable, 0, SystemAllocPolicy> SharedTableVector;
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_table_h
diff --git a/js/src/wasm/WasmTypes.cpp b/js/src/wasm/WasmTypes.cpp
new file mode 100644
index 0000000000..75963a1a0e
--- /dev/null
+++ b/js/src/wasm/WasmTypes.cpp
@@ -0,0 +1,1554 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmTypes.h"
+
+#include <algorithm>
+
+#include "jsmath.h"
+#include "js/friend/ErrorMessages.h" // JSMSG_*
+#include "js/Printf.h"
+#include "util/Memory.h"
+#include "vm/ArrayBufferObject.h"
+#include "vm/Warnings.h" // js:WarnNumberASCII
+#include "wasm/WasmBaselineCompile.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmJS.h"
+#include "wasm/WasmSerialize.h"
+#include "wasm/WasmStubs.h"
+
+#include "vm/JSObject-inl.h"
+#include "vm/NativeObject-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::CheckedInt32;
+using mozilla::IsPowerOfTwo;
+using mozilla::MakeEnumeratedRange;
+
+// We have only tested huge memory on x64 and arm64.
+
+#if defined(WASM_SUPPORTS_HUGE_MEMORY)
+# if !(defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64))
+# error "Not an expected configuration"
+# endif
+#endif
+
+// All plausible targets must be able to do at least IEEE754 double
+// loads/stores, hence the lower limit of 8. Some Intel processors support
+// AVX-512 loads/stores, hence the upper limit of 64.
+static_assert(MaxMemoryAccessSize >= 8, "MaxMemoryAccessSize too low");
+static_assert(MaxMemoryAccessSize <= 64, "MaxMemoryAccessSize too high");
+static_assert((MaxMemoryAccessSize & (MaxMemoryAccessSize - 1)) == 0,
+ "MaxMemoryAccessSize is not a power of two");
+
+#if defined(WASM_SUPPORTS_HUGE_MEMORY)
+static_assert(HugeMappedSize > MaxMemory32Bytes,
+ "Normal array buffer could be confused with huge memory");
+#endif
+
+Val::Val(const LitVal& val) {
+ type_ = val.type();
+ switch (type_.kind()) {
+ case ValType::I32:
+ cell_.i32_ = val.i32();
+ return;
+ case ValType::F32:
+ cell_.f32_ = val.f32();
+ return;
+ case ValType::I64:
+ cell_.i64_ = val.i64();
+ return;
+ case ValType::F64:
+ cell_.f64_ = val.f64();
+ return;
+ case ValType::V128:
+ cell_.v128_ = val.v128();
+ return;
+ case ValType::Ref:
+ cell_.ref_ = val.ref();
+ return;
+ }
+ MOZ_CRASH();
+}
+
+bool Val::fromJSValue(JSContext* cx, ValType targetType, HandleValue val,
+ MutableHandleVal rval) {
+ rval.get().type_ = targetType;
+ // No pre/post barrier needed as rval is rooted
+ return ToWebAssemblyValue(cx, val, targetType, &rval.get().cell_,
+ targetType.size() == 8);
+}
+
+bool Val::toJSValue(JSContext* cx, MutableHandleValue rval) const {
+ return ToJSValue(cx, &cell_, type_, rval);
+}
+
+void Val::trace(JSTracer* trc) const {
+ if (isJSObject()) {
+ // TODO/AnyRef-boxing: With boxed immediates and strings, the write
+ // barrier is going to have to be more complicated.
+ ASSERT_ANYREF_IS_JSOBJECT;
+ TraceManuallyBarrieredEdge(trc, asJSObjectAddress(), "wasm val");
+ }
+}
+
+bool wasm::CheckRefType(JSContext* cx, RefType targetType, HandleValue v,
+ MutableHandleFunction fnval,
+ MutableHandleAnyRef refval) {
+ if (!targetType.isNullable() && v.isNull()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_REF_NONNULLABLE_VALUE);
+ return false;
+ }
+ switch (targetType.kind()) {
+ case RefType::Func:
+ if (!CheckFuncRefValue(cx, v, fnval)) {
+ return false;
+ }
+ break;
+ case RefType::Extern:
+ if (!BoxAnyRef(cx, v, refval)) {
+ return false;
+ }
+ break;
+ case RefType::Eq:
+ if (!CheckEqRefValue(cx, v, refval)) {
+ return false;
+ }
+ break;
+ case RefType::TypeIndex:
+ MOZ_CRASH("temporarily unsupported Ref type");
+ }
+ return true;
+}
+
+bool wasm::CheckFuncRefValue(JSContext* cx, HandleValue v,
+ MutableHandleFunction fun) {
+ if (v.isNull()) {
+ MOZ_ASSERT(!fun);
+ return true;
+ }
+
+ if (v.isObject()) {
+ JSObject& obj = v.toObject();
+ if (obj.is<JSFunction>()) {
+ JSFunction* f = &obj.as<JSFunction>();
+ if (IsWasmExportedFunction(f)) {
+ fun.set(f);
+ return true;
+ }
+ }
+ }
+
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_FUNCREF_VALUE);
+ return false;
+}
+
+bool wasm::CheckEqRefValue(JSContext* cx, HandleValue v,
+ MutableHandleAnyRef vp) {
+ if (v.isNull()) {
+ vp.set(AnyRef::null());
+ return true;
+ }
+
+ if (v.isObject()) {
+ JSObject& obj = v.toObject();
+ if (obj.is<TypedObject>()) {
+ vp.set(AnyRef::fromJSObject(&obj.as<TypedObject>()));
+ return true;
+ }
+ }
+
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_EQREF_VALUE);
+ return false;
+}
+
+class wasm::NoDebug {
+ public:
+ template <typename T>
+ static void print(T v) {}
+};
+
+class wasm::DebugCodegenVal {
+ template <typename T>
+ static void print(const char* fmt, T v) {
+ DebugCodegen(DebugChannel::Function, fmt, v);
+ }
+
+ public:
+ static void print(int32_t v) { print(" i32(%d)", v); }
+ static void print(int64_t v) { print(" i64(%" PRId64 ")", v); }
+ static void print(float v) { print(" f32(%f)", v); }
+ static void print(double v) { print(" f64(%lf)", v); }
+ static void print(void* v) { print(" ptr(%p)", v); }
+};
+
+template bool wasm::ToWebAssemblyValue<NoDebug>(JSContext* cx, HandleValue val,
+ ValType type, void* loc,
+ bool mustWrite64);
+template bool wasm::ToWebAssemblyValue<DebugCodegenVal>(JSContext* cx,
+ HandleValue val,
+ ValType type, void* loc,
+ bool mustWrite64);
+template bool wasm::ToJSValue<NoDebug>(JSContext* cx, const void* src,
+ ValType type, MutableHandleValue dst);
+template bool wasm::ToJSValue<DebugCodegenVal>(JSContext* cx, const void* src,
+ ValType type,
+ MutableHandleValue dst);
+
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_i32(JSContext* cx, HandleValue val, int32_t* loc,
+ bool mustWrite64) {
+ bool ok = ToInt32(cx, val, loc);
+ if (ok && mustWrite64) {
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ loc[1] = loc[0] >> 31;
+#else
+ loc[1] = 0;
+#endif
+ }
+ Debug::print(*loc);
+ return ok;
+}
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_i64(JSContext* cx, HandleValue val, int64_t* loc,
+ bool mustWrite64) {
+ MOZ_ASSERT(mustWrite64);
+ JS_TRY_VAR_OR_RETURN_FALSE(cx, *loc, ToBigInt64(cx, val));
+ Debug::print(*loc);
+ return true;
+}
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_f32(JSContext* cx, HandleValue val, float* loc,
+ bool mustWrite64) {
+ bool ok = RoundFloat32(cx, val, loc);
+ if (ok && mustWrite64) {
+ loc[1] = 0.0;
+ }
+ Debug::print(*loc);
+ return ok;
+}
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_f64(JSContext* cx, HandleValue val, double* loc,
+ bool mustWrite64) {
+ MOZ_ASSERT(mustWrite64);
+ bool ok = ToNumber(cx, val, loc);
+ Debug::print(*loc);
+ return ok;
+}
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_externref(JSContext* cx, HandleValue val, void** loc,
+ bool mustWrite64) {
+ RootedAnyRef result(cx, AnyRef::null());
+ if (!BoxAnyRef(cx, val, &result)) {
+ return false;
+ }
+ *loc = result.get().forCompiledCode();
+#ifndef JS_64BIT
+ if (mustWrite64) {
+ loc[1] = nullptr;
+ }
+#endif
+ Debug::print(*loc);
+ return true;
+}
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_eqref(JSContext* cx, HandleValue val, void** loc,
+ bool mustWrite64) {
+ RootedAnyRef result(cx, AnyRef::null());
+ if (!CheckEqRefValue(cx, val, &result)) {
+ return false;
+ }
+ *loc = result.get().forCompiledCode();
+#ifndef JS_64BIT
+ if (mustWrite64) {
+ loc[1] = nullptr;
+ }
+#endif
+ Debug::print(*loc);
+ return true;
+}
+template <typename Debug = NoDebug>
+bool ToWebAssemblyValue_funcref(JSContext* cx, HandleValue val, void** loc,
+ bool mustWrite64) {
+ RootedFunction fun(cx);
+ if (!CheckFuncRefValue(cx, val, &fun)) {
+ return false;
+ }
+ *loc = fun;
+#ifndef JS_64BIT
+ if (mustWrite64) {
+ loc[1] = nullptr;
+ }
+#endif
+ Debug::print(*loc);
+ return true;
+}
+
+template <typename Debug>
+bool wasm::ToWebAssemblyValue(JSContext* cx, HandleValue val, ValType type,
+ void* loc, bool mustWrite64) {
+ switch (type.kind()) {
+ case ValType::I32:
+ return ToWebAssemblyValue_i32<Debug>(cx, val, (int32_t*)loc, mustWrite64);
+ case ValType::I64:
+ return ToWebAssemblyValue_i64<Debug>(cx, val, (int64_t*)loc, mustWrite64);
+ case ValType::F32:
+ return ToWebAssemblyValue_f32<Debug>(cx, val, (float*)loc, mustWrite64);
+ case ValType::F64:
+ return ToWebAssemblyValue_f64<Debug>(cx, val, (double*)loc, mustWrite64);
+ case ValType::V128:
+ break;
+ case ValType::Ref:
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ if (!type.isNullable() && val.isNull()) {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_REF_NONNULLABLE_VALUE);
+ return false;
+ }
+#else
+ MOZ_ASSERT(type.isNullable());
+#endif
+ switch (type.refTypeKind()) {
+ case RefType::Func:
+ return ToWebAssemblyValue_funcref<Debug>(cx, val, (void**)loc,
+ mustWrite64);
+ case RefType::Extern:
+ return ToWebAssemblyValue_externref<Debug>(cx, val, (void**)loc,
+ mustWrite64);
+ case RefType::Eq:
+ return ToWebAssemblyValue_eqref<Debug>(cx, val, (void**)loc,
+ mustWrite64);
+ case RefType::TypeIndex:
+ break;
+ }
+ }
+ MOZ_ASSERT(!type.isExposable());
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_WASM_BAD_VAL_TYPE);
+ return false;
+}
+
+template <typename Debug = NoDebug>
+bool ToJSValue_i32(JSContext* cx, int32_t src, MutableHandleValue dst) {
+ dst.set(Int32Value(src));
+ Debug::print(src);
+ return true;
+}
+template <typename Debug = NoDebug>
+bool ToJSValue_i64(JSContext* cx, int64_t src, MutableHandleValue dst) {
+ // If bi is manipulated other than test & storing, it would need
+ // to be rooted here.
+ BigInt* bi = BigInt::createFromInt64(cx, src);
+ if (!bi) {
+ return false;
+ }
+ dst.set(BigIntValue(bi));
+ Debug::print(src);
+ return true;
+}
+template <typename Debug = NoDebug>
+bool ToJSValue_f32(JSContext* cx, float src, MutableHandleValue dst) {
+ dst.set(JS::CanonicalizedDoubleValue(src));
+ Debug::print(src);
+ return true;
+}
+template <typename Debug = NoDebug>
+bool ToJSValue_f64(JSContext* cx, double src, MutableHandleValue dst) {
+ dst.set(JS::CanonicalizedDoubleValue(src));
+ Debug::print(src);
+ return true;
+}
+template <typename Debug = NoDebug>
+bool ToJSValue_funcref(JSContext* cx, void* src, MutableHandleValue dst) {
+ dst.set(UnboxFuncRef(FuncRef::fromCompiledCode(src)));
+ Debug::print(src);
+ return true;
+}
+template <typename Debug = NoDebug>
+bool ToJSValue_anyref(JSContext* cx, void* src, MutableHandleValue dst) {
+ dst.set(UnboxAnyRef(AnyRef::fromCompiledCode(src)));
+ Debug::print(src);
+ return true;
+}
+
+template <typename Debug>
+bool wasm::ToJSValue(JSContext* cx, const void* src, ValType type,
+ MutableHandleValue dst) {
+ switch (type.kind()) {
+ case ValType::I32:
+ return ToJSValue_i32<Debug>(cx, *reinterpret_cast<const int32_t*>(src),
+ dst);
+ case ValType::I64:
+ return ToJSValue_i64<Debug>(cx, *reinterpret_cast<const int64_t*>(src),
+ dst);
+ case ValType::F32:
+ return ToJSValue_f32<Debug>(cx, *reinterpret_cast<const float*>(src),
+ dst);
+ case ValType::F64:
+ return ToJSValue_f64<Debug>(cx, *reinterpret_cast<const double*>(src),
+ dst);
+ case ValType::V128:
+ break;
+ case ValType::Ref:
+ switch (type.refTypeKind()) {
+ case RefType::Func:
+ return ToJSValue_funcref<Debug>(
+ cx, *reinterpret_cast<void* const*>(src), dst);
+ case RefType::Extern:
+ return ToJSValue_anyref<Debug>(
+ cx, *reinterpret_cast<void* const*>(src), dst);
+ case RefType::Eq:
+ return ToJSValue_anyref<Debug>(
+ cx, *reinterpret_cast<void* const*>(src), dst);
+ case RefType::TypeIndex:
+ break;
+ }
+ }
+ MOZ_ASSERT(!type.isExposable());
+ Debug::print(nullptr);
+ dst.setUndefined();
+ return true;
+}
+
+void AnyRef::trace(JSTracer* trc) {
+ if (value_) {
+ TraceManuallyBarrieredEdge(trc, &value_, "wasm anyref referent");
+ }
+}
+
+const JSClass WasmValueBox::class_ = {
+ "WasmValueBox", JSCLASS_HAS_RESERVED_SLOTS(RESERVED_SLOTS)};
+
+WasmValueBox* WasmValueBox::create(JSContext* cx, HandleValue val) {
+ WasmValueBox* obj = NewObjectWithGivenProto<WasmValueBox>(cx, nullptr);
+ if (!obj) {
+ return nullptr;
+ }
+ obj->setFixedSlot(VALUE_SLOT, val);
+ return obj;
+}
+
+bool wasm::BoxAnyRef(JSContext* cx, HandleValue val, MutableHandleAnyRef addr) {
+ if (val.isNull()) {
+ addr.set(AnyRef::null());
+ return true;
+ }
+
+ if (val.isObject()) {
+ JSObject* obj = &val.toObject();
+ MOZ_ASSERT(!obj->is<WasmValueBox>());
+ MOZ_ASSERT(obj->compartment() == cx->compartment());
+ addr.set(AnyRef::fromJSObject(obj));
+ return true;
+ }
+
+ WasmValueBox* box = WasmValueBox::create(cx, val);
+ if (!box) return false;
+ addr.set(AnyRef::fromJSObject(box));
+ return true;
+}
+
+JSObject* wasm::BoxBoxableValue(JSContext* cx, HandleValue val) {
+ MOZ_ASSERT(!val.isNull() && !val.isObject());
+ return WasmValueBox::create(cx, val);
+}
+
+Value wasm::UnboxAnyRef(AnyRef val) {
+ // If UnboxAnyRef needs to allocate then we need a more complicated API, and
+ // we need to root the value in the callers, see comments in callExport().
+ JSObject* obj = val.asJSObject();
+ Value result;
+ if (obj == nullptr) {
+ result.setNull();
+ } else if (obj->is<WasmValueBox>()) {
+ result = obj->as<WasmValueBox>().value();
+ } else {
+ result.setObjectOrNull(obj);
+ }
+ return result;
+}
+
+/* static */
+wasm::FuncRef wasm::FuncRef::fromAnyRefUnchecked(AnyRef p) {
+#ifdef DEBUG
+ Value v = UnboxAnyRef(p);
+ if (v.isNull()) {
+ return FuncRef(nullptr);
+ }
+ if (v.toObject().is<JSFunction>()) {
+ return FuncRef(&v.toObject().as<JSFunction>());
+ }
+ MOZ_CRASH("Bad value");
+#else
+ return FuncRef(&p.asJSObject()->as<JSFunction>());
+#endif
+}
+
+Value wasm::UnboxFuncRef(FuncRef val) {
+ JSFunction* fn = val.asJSFunction();
+ Value result;
+ MOZ_ASSERT_IF(fn, fn->is<JSFunction>());
+ result.setObjectOrNull(fn);
+ return result;
+}
+
+bool wasm::IsRoundingFunction(SymbolicAddress callee, jit::RoundingMode* mode) {
+ switch (callee) {
+ case SymbolicAddress::FloorD:
+ case SymbolicAddress::FloorF:
+ *mode = jit::RoundingMode::Down;
+ return true;
+ case SymbolicAddress::CeilD:
+ case SymbolicAddress::CeilF:
+ *mode = jit::RoundingMode::Up;
+ return true;
+ case SymbolicAddress::TruncD:
+ case SymbolicAddress::TruncF:
+ *mode = jit::RoundingMode::TowardsZero;
+ return true;
+ case SymbolicAddress::NearbyIntD:
+ case SymbolicAddress::NearbyIntF:
+ *mode = jit::RoundingMode::NearestTiesToEven;
+ return true;
+ default:
+ return false;
+ }
+}
+
+size_t FuncType::serializedSize() const {
+ return SerializedPodVectorSize(results_) + SerializedPodVectorSize(args_);
+}
+
+uint8_t* FuncType::serialize(uint8_t* cursor) const {
+ cursor = SerializePodVector(cursor, results_);
+ cursor = SerializePodVector(cursor, args_);
+ return cursor;
+}
+
+const uint8_t* FuncType::deserialize(const uint8_t* cursor) {
+ cursor = DeserializePodVector(cursor, &results_);
+ if (!cursor) {
+ return nullptr;
+ }
+ return DeserializePodVector(cursor, &args_);
+}
+
+size_t FuncType::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return args_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+using ImmediateType = uint32_t; // for 32/64 consistency
+static const unsigned sTotalBits = sizeof(ImmediateType) * 8;
+static const unsigned sTagBits = 1;
+static const unsigned sReturnBit = 1;
+static const unsigned sLengthBits = 4;
+static const unsigned sTypeBits = 3;
+static const unsigned sMaxTypes =
+ (sTotalBits - sTagBits - sReturnBit - sLengthBits) / sTypeBits;
+
+static bool IsImmediateType(ValType vt) {
+ switch (vt.kind()) {
+ case ValType::I32:
+ case ValType::I64:
+ case ValType::F32:
+ case ValType::F64:
+ case ValType::V128:
+ return true;
+ case ValType::Ref:
+ switch (vt.refTypeKind()) {
+ case RefType::Func:
+ case RefType::Extern:
+ case RefType::Eq:
+ return true;
+ case RefType::TypeIndex:
+ return false;
+ }
+ break;
+ }
+ MOZ_CRASH("bad ValType");
+}
+
+static unsigned EncodeImmediateType(ValType vt) {
+ static_assert(4 < (1 << sTypeBits), "fits");
+ switch (vt.kind()) {
+ case ValType::I32:
+ return 0;
+ case ValType::I64:
+ return 1;
+ case ValType::F32:
+ return 2;
+ case ValType::F64:
+ return 3;
+ case ValType::V128:
+ return 4;
+ case ValType::Ref:
+ switch (vt.refTypeKind()) {
+ case RefType::Func:
+ return 5;
+ case RefType::Extern:
+ return 6;
+ case RefType::Eq:
+ return 7;
+ case RefType::TypeIndex:
+ break;
+ }
+ break;
+ }
+ MOZ_CRASH("bad ValType");
+}
+
+/* static */
+bool TypeIdDesc::isGlobal(const TypeDef& type) {
+ if (!type.isFuncType()) {
+ return true;
+ }
+ const FuncType& funcType = type.funcType();
+ const ValTypeVector& results = funcType.results();
+ const ValTypeVector& args = funcType.args();
+ if (results.length() + args.length() > sMaxTypes) {
+ return true;
+ }
+
+ if (results.length() > 1) {
+ return true;
+ }
+
+ for (ValType v : results) {
+ if (!IsImmediateType(v)) {
+ return true;
+ }
+ }
+
+ for (ValType v : args) {
+ if (!IsImmediateType(v)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* static */
+TypeIdDesc TypeIdDesc::global(const TypeDef& type, uint32_t globalDataOffset) {
+ MOZ_ASSERT(isGlobal(type));
+ return TypeIdDesc(TypeIdDescKind::Global, globalDataOffset);
+}
+
+static ImmediateType LengthToBits(uint32_t length) {
+ static_assert(sMaxTypes <= ((1 << sLengthBits) - 1), "fits");
+ MOZ_ASSERT(length <= sMaxTypes);
+ return length;
+}
+
+/* static */
+TypeIdDesc TypeIdDesc::immediate(const TypeDef& type) {
+ const FuncType& funcType = type.funcType();
+
+ ImmediateType immediate = ImmediateBit;
+ uint32_t shift = sTagBits;
+
+ if (funcType.results().length() > 0) {
+ MOZ_ASSERT(funcType.results().length() == 1);
+ immediate |= (1 << shift);
+ shift += sReturnBit;
+
+ immediate |= EncodeImmediateType(funcType.results()[0]) << shift;
+ shift += sTypeBits;
+ } else {
+ shift += sReturnBit;
+ }
+
+ immediate |= LengthToBits(funcType.args().length()) << shift;
+ shift += sLengthBits;
+
+ for (ValType argType : funcType.args()) {
+ immediate |= EncodeImmediateType(argType) << shift;
+ shift += sTypeBits;
+ }
+
+ MOZ_ASSERT(shift <= sTotalBits);
+ return TypeIdDesc(TypeIdDescKind::Immediate, immediate);
+}
+
+size_t TypeDef::serializedSize() const {
+ size_t size = sizeof(tag_);
+ switch (tag_) {
+ case TypeDef::IsStructType: {
+ size += sizeof(structType_);
+ break;
+ }
+ case TypeDef::IsFuncType: {
+ size += sizeof(funcType_);
+ break;
+ }
+ case TypeDef::IsNone: {
+ break;
+ }
+ default:
+ MOZ_ASSERT_UNREACHABLE();
+ }
+ return size;
+}
+
+uint8_t* TypeDef::serialize(uint8_t* cursor) const {
+ cursor = WriteBytes(cursor, &tag_, sizeof(tag_));
+ switch (tag_) {
+ case TypeDef::IsStructType: {
+ cursor = structType_.serialize(cursor);
+ break;
+ }
+ case TypeDef::IsFuncType: {
+ cursor = funcType_.serialize(cursor);
+ break;
+ }
+ case TypeDef::IsNone: {
+ break;
+ }
+ default:
+ MOZ_ASSERT_UNREACHABLE();
+ }
+ return cursor;
+}
+
+const uint8_t* TypeDef::deserialize(const uint8_t* cursor) {
+ cursor = ReadBytes(cursor, &tag_, sizeof(tag_));
+ switch (tag_) {
+ case TypeDef::IsStructType: {
+ cursor = structType_.deserialize(cursor);
+ break;
+ }
+ case TypeDef::IsFuncType: {
+ cursor = funcType_.deserialize(cursor);
+ break;
+ }
+ case TypeDef::IsNone: {
+ break;
+ }
+ default:
+ MOZ_ASSERT_UNREACHABLE();
+ }
+ return cursor;
+}
+
+size_t TypeDef::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ switch (tag_) {
+ case TypeDef::IsStructType: {
+ return structType_.sizeOfExcludingThis(mallocSizeOf);
+ }
+ case TypeDef::IsFuncType: {
+ return funcType_.sizeOfExcludingThis(mallocSizeOf);
+ }
+ case TypeDef::IsNone: {
+ return 0;
+ }
+ default:
+ break;
+ }
+ MOZ_ASSERT_UNREACHABLE();
+ return 0;
+}
+
+size_t TypeDefWithId::serializedSize() const {
+ return TypeDef::serializedSize() + sizeof(TypeIdDesc);
+}
+
+uint8_t* TypeDefWithId::serialize(uint8_t* cursor) const {
+ cursor = TypeDef::serialize(cursor);
+ cursor = WriteBytes(cursor, &id, sizeof(id));
+ return cursor;
+}
+
+const uint8_t* TypeDefWithId::deserialize(const uint8_t* cursor) {
+ cursor = TypeDef::deserialize(cursor);
+ cursor = ReadBytes(cursor, &id, sizeof(id));
+ return cursor;
+}
+
+size_t TypeDefWithId::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return TypeDef::sizeOfExcludingThis(mallocSizeOf);
+}
+
+ArgTypeVector::ArgTypeVector(const FuncType& funcType)
+ : args_(funcType.args()),
+ hasStackResults_(ABIResultIter::HasStackResults(
+ ResultType::Vector(funcType.results()))) {}
+
+static inline CheckedInt32 RoundUpToAlignment(CheckedInt32 address,
+ uint32_t align) {
+ MOZ_ASSERT(IsPowerOfTwo(align));
+
+ // Note: Be careful to order operators such that we first make the
+ // value smaller and then larger, so that we don't get false
+ // overflow errors due to (e.g.) adding `align` and then
+ // subtracting `1` afterwards when merely adding `align-1` would
+ // not have overflowed. Note that due to the nature of two's
+ // complement representation, if `address` is already aligned,
+ // then adding `align-1` cannot itself cause an overflow.
+
+ return ((address + (align - 1)) / align) * align;
+}
+
+class StructLayout {
+ CheckedInt32 sizeSoFar = 0;
+ uint32_t structAlignment = 1;
+
+ public:
+ // The field adders return the offset of the the field.
+ CheckedInt32 addField(ValType type) {
+ uint32_t fieldSize = type.size();
+ uint32_t fieldAlignment = type.alignmentInStruct();
+
+ // Alignment of the struct is the max of the alignment of its fields.
+ structAlignment = std::max(structAlignment, fieldAlignment);
+
+ // Align the pointer.
+ CheckedInt32 offset = RoundUpToAlignment(sizeSoFar, fieldAlignment);
+ if (!offset.isValid()) {
+ return offset;
+ }
+
+ // Allocate space.
+ sizeSoFar = offset + fieldSize;
+ if (!sizeSoFar.isValid()) {
+ return sizeSoFar;
+ }
+
+ return offset;
+ }
+
+ // The close method rounds up the structure size to the appropriate
+ // alignment and returns that size.
+ CheckedInt32 close() {
+ return RoundUpToAlignment(sizeSoFar, structAlignment);
+ }
+};
+
+bool StructType::computeLayout() {
+ StructLayout layout;
+ for (StructField& field : fields_) {
+ CheckedInt32 offset = layout.addField(field.type);
+ if (!offset.isValid()) {
+ return false;
+ }
+ field.offset = offset.value();
+ }
+
+ CheckedInt32 size = layout.close();
+ if (!size.isValid()) {
+ return false;
+ }
+ size_ = size.value();
+ isInline_ = InlineTypedObject::canAccommodateSize(size_);
+
+ return true;
+}
+
+uint32_t StructType::objectBaseFieldOffset(uint32_t fieldIndex) const {
+ return fields_[fieldIndex].offset +
+ (isInline_ ? InlineTypedObject::offsetOfDataStart() : 0);
+}
+
+// A simple notion of prefix: types and mutability must match exactly.
+
+bool StructType::hasPrefix(const StructType& other) const {
+ if (fields_.length() < other.fields_.length()) {
+ return false;
+ }
+ uint32_t limit = other.fields_.length();
+ for (uint32_t i = 0; i < limit; i++) {
+ if (fields_[i].type != other.fields_[i].type ||
+ fields_[i].isMutable != other.fields_[i].isMutable) {
+ return false;
+ }
+ }
+ return true;
+}
+
+size_t StructType::serializedSize() const {
+ return SerializedPodVectorSize(fields_) + sizeof(size_) + sizeof(isInline_);
+}
+
+uint8_t* StructType::serialize(uint8_t* cursor) const {
+ cursor = SerializePodVector(cursor, fields_);
+ cursor = WriteBytes(cursor, &size_, sizeof(size_));
+ cursor = WriteBytes(cursor, &isInline_, sizeof(isInline_));
+ return cursor;
+}
+
+const uint8_t* StructType::deserialize(const uint8_t* cursor) {
+ (cursor = DeserializePodVector(cursor, &fields_)) &&
+ (cursor = ReadBytes(cursor, &size_, sizeof(size_))) &&
+ (cursor = ReadBytes(cursor, &isInline_, sizeof(isInline_)));
+ return cursor;
+}
+
+size_t StructType::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return fields_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t Import::serializedSize() const {
+ return module.serializedSize() + field.serializedSize() + sizeof(kind);
+}
+
+uint8_t* Import::serialize(uint8_t* cursor) const {
+ cursor = module.serialize(cursor);
+ cursor = field.serialize(cursor);
+ cursor = WriteScalar<DefinitionKind>(cursor, kind);
+ return cursor;
+}
+
+const uint8_t* Import::deserialize(const uint8_t* cursor) {
+ (cursor = module.deserialize(cursor)) &&
+ (cursor = field.deserialize(cursor)) &&
+ (cursor = ReadScalar<DefinitionKind>(cursor, &kind));
+ return cursor;
+}
+
+size_t Import::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return module.sizeOfExcludingThis(mallocSizeOf) +
+ field.sizeOfExcludingThis(mallocSizeOf);
+}
+
+Export::Export(UniqueChars fieldName, uint32_t index, DefinitionKind kind)
+ : fieldName_(std::move(fieldName)) {
+ pod.kind_ = kind;
+ pod.index_ = index;
+}
+
+Export::Export(UniqueChars fieldName, DefinitionKind kind)
+ : fieldName_(std::move(fieldName)) {
+ pod.kind_ = kind;
+ pod.index_ = 0;
+}
+
+uint32_t Export::funcIndex() const {
+ MOZ_ASSERT(pod.kind_ == DefinitionKind::Function);
+ return pod.index_;
+}
+
+uint32_t Export::globalIndex() const {
+ MOZ_ASSERT(pod.kind_ == DefinitionKind::Global);
+ return pod.index_;
+}
+
+#ifdef ENABLE_WASM_EXCEPTIONS
+uint32_t Export::eventIndex() const {
+ MOZ_ASSERT(pod.kind_ == DefinitionKind::Event);
+ return pod.index_;
+}
+#endif
+
+uint32_t Export::tableIndex() const {
+ MOZ_ASSERT(pod.kind_ == DefinitionKind::Table);
+ return pod.index_;
+}
+
+size_t Export::serializedSize() const {
+ return fieldName_.serializedSize() + sizeof(pod);
+}
+
+uint8_t* Export::serialize(uint8_t* cursor) const {
+ cursor = fieldName_.serialize(cursor);
+ cursor = WriteBytes(cursor, &pod, sizeof(pod));
+ return cursor;
+}
+
+const uint8_t* Export::deserialize(const uint8_t* cursor) {
+ (cursor = fieldName_.deserialize(cursor)) &&
+ (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
+ return cursor;
+}
+
+size_t Export::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return fieldName_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t ElemSegment::serializedSize() const {
+ return sizeof(kind) + sizeof(tableIndex) + sizeof(elemType) +
+ sizeof(offsetIfActive) + SerializedPodVectorSize(elemFuncIndices);
+}
+
+uint8_t* ElemSegment::serialize(uint8_t* cursor) const {
+ cursor = WriteBytes(cursor, &kind, sizeof(kind));
+ cursor = WriteBytes(cursor, &tableIndex, sizeof(tableIndex));
+ cursor = WriteBytes(cursor, &elemType, sizeof(elemType));
+ cursor = WriteBytes(cursor, &offsetIfActive, sizeof(offsetIfActive));
+ cursor = SerializePodVector(cursor, elemFuncIndices);
+ return cursor;
+}
+
+const uint8_t* ElemSegment::deserialize(const uint8_t* cursor) {
+ (cursor = ReadBytes(cursor, &kind, sizeof(kind))) &&
+ (cursor = ReadBytes(cursor, &tableIndex, sizeof(tableIndex))) &&
+ (cursor = ReadBytes(cursor, &elemType, sizeof(elemType))) &&
+ (cursor = ReadBytes(cursor, &offsetIfActive, sizeof(offsetIfActive))) &&
+ (cursor = DeserializePodVector(cursor, &elemFuncIndices));
+ return cursor;
+}
+
+size_t ElemSegment::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return elemFuncIndices.sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t DataSegment::serializedSize() const {
+ return sizeof(offsetIfActive) + SerializedPodVectorSize(bytes);
+}
+
+uint8_t* DataSegment::serialize(uint8_t* cursor) const {
+ cursor = WriteBytes(cursor, &offsetIfActive, sizeof(offsetIfActive));
+ cursor = SerializePodVector(cursor, bytes);
+ return cursor;
+}
+
+const uint8_t* DataSegment::deserialize(const uint8_t* cursor) {
+ (cursor = ReadBytes(cursor, &offsetIfActive, sizeof(offsetIfActive))) &&
+ (cursor = DeserializePodVector(cursor, &bytes));
+ return cursor;
+}
+
+size_t DataSegment::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return bytes.sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t CustomSection::serializedSize() const {
+ return SerializedPodVectorSize(name) +
+ SerializedPodVectorSize(payload->bytes);
+}
+
+uint8_t* CustomSection::serialize(uint8_t* cursor) const {
+ cursor = SerializePodVector(cursor, name);
+ cursor = SerializePodVector(cursor, payload->bytes);
+ return cursor;
+}
+
+const uint8_t* CustomSection::deserialize(const uint8_t* cursor) {
+ cursor = DeserializePodVector(cursor, &name);
+ if (!cursor) {
+ return nullptr;
+ }
+
+ Bytes bytes;
+ cursor = DeserializePodVector(cursor, &bytes);
+ if (!cursor) {
+ return nullptr;
+ }
+ payload = js_new<ShareableBytes>(std::move(bytes));
+ if (!payload) {
+ return nullptr;
+ }
+
+ return cursor;
+}
+
+size_t CustomSection::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return name.sizeOfExcludingThis(mallocSizeOf) + sizeof(*payload) +
+ payload->sizeOfExcludingThis(mallocSizeOf);
+}
+
+// Heap length on ARM should fit in an ARM immediate. We approximate the set
+// of valid ARM immediates with the predicate:
+// 2^n for n in [16, 24)
+// or
+// 2^24 * n for n >= 1.
+bool wasm::IsValidARMImmediate(uint32_t i) {
+ bool valid = (IsPowerOfTwo(i) || (i & 0x00ffffff) == 0);
+
+ MOZ_ASSERT_IF(valid, i % PageSize == 0);
+
+ return valid;
+}
+
+uint64_t wasm::RoundUpToNextValidARMImmediate(uint64_t i) {
+ MOZ_ASSERT(i <= HighestValidARMImmediate);
+ static_assert(HighestValidARMImmediate == 0xff000000,
+ "algorithm relies on specific constant");
+
+ if (i <= 16 * 1024 * 1024) {
+ i = i ? mozilla::RoundUpPow2(i) : 0;
+ } else {
+ i = (i + 0x00ffffff) & ~0x00ffffff;
+ }
+
+ MOZ_ASSERT(IsValidARMImmediate(i));
+
+ return i;
+}
+
+bool wasm::IsValidBoundsCheckImmediate(uint32_t i) {
+#ifdef JS_CODEGEN_ARM
+ return IsValidARMImmediate(i);
+#else
+ return true;
+#endif
+}
+
+size_t wasm::ComputeMappedSize(uint64_t maxSize) {
+ MOZ_ASSERT(maxSize % PageSize == 0);
+
+ // It is the bounds-check limit, not the mapped size, that gets baked into
+ // code. Thus round up the maxSize to the next valid immediate value
+ // *before* adding in the guard page.
+
+#ifdef JS_CODEGEN_ARM
+ uint64_t boundsCheckLimit = RoundUpToNextValidARMImmediate(maxSize);
+#else
+ uint64_t boundsCheckLimit = maxSize;
+#endif
+ MOZ_ASSERT(IsValidBoundsCheckImmediate(boundsCheckLimit));
+
+ MOZ_ASSERT(boundsCheckLimit % gc::SystemPageSize() == 0);
+ MOZ_ASSERT(GuardSize % gc::SystemPageSize() == 0);
+ return boundsCheckLimit + GuardSize;
+}
+
+/* static */
+DebugFrame* DebugFrame::from(Frame* fp) {
+ MOZ_ASSERT(
+ GetNearestEffectiveTls(fp)->instance->code().metadata().debugEnabled);
+ auto* df =
+ reinterpret_cast<DebugFrame*>((uint8_t*)fp - DebugFrame::offsetOfFrame());
+ MOZ_ASSERT(GetNearestEffectiveTls(fp)->instance == df->instance());
+ return df;
+}
+
+void DebugFrame::alignmentStaticAsserts() {
+ // VS2017 doesn't consider offsetOfFrame() to be a constexpr, so we have
+ // to use offsetof directly. These asserts can't be at class-level
+ // because the type is incomplete.
+
+ static_assert(WasmStackAlignment >= Alignment,
+ "Aligned by ABI before pushing DebugFrame");
+ static_assert((offsetof(DebugFrame, frame_) + sizeof(Frame)) % Alignment == 0,
+ "Aligned after pushing DebugFrame");
+#ifdef JS_CODEGEN_ARM64
+ // This constraint may or may not be necessary. If you hit this because
+ // you've changed the frame size then feel free to remove it, but be extra
+ // aware of possible problems.
+ static_assert(sizeof(DebugFrame) % 16 == 0, "ARM64 SP alignment");
+#endif
+}
+
+Instance* DebugFrame::instance() const {
+ return GetNearestEffectiveTls(&frame_)->instance;
+}
+
+GlobalObject* DebugFrame::global() const {
+ return &instance()->object()->global();
+}
+
+bool DebugFrame::hasGlobal(const GlobalObject* global) const {
+ return global == &instance()->objectUnbarriered()->global();
+}
+
+JSObject* DebugFrame::environmentChain() const {
+ return &global()->lexicalEnvironment();
+}
+
+bool DebugFrame::getLocal(uint32_t localIndex, MutableHandleValue vp) {
+ ValTypeVector locals;
+ size_t argsLength;
+ StackResults stackResults;
+ if (!instance()->debug().debugGetLocalTypes(funcIndex(), &locals, &argsLength,
+ &stackResults)) {
+ return false;
+ }
+
+ ValTypeVector args;
+ MOZ_ASSERT(argsLength <= locals.length());
+ if (!args.append(locals.begin(), argsLength)) {
+ return false;
+ }
+ ArgTypeVector abiArgs(args, stackResults);
+
+ BaseLocalIter iter(locals, abiArgs, /* debugEnabled = */ true);
+ while (!iter.done() && iter.index() < localIndex) {
+ iter++;
+ }
+ MOZ_ALWAYS_TRUE(!iter.done());
+
+ uint8_t* frame = static_cast<uint8_t*>((void*)this) + offsetOfFrame();
+ void* dataPtr = frame - iter.frameOffset();
+ switch (iter.mirType()) {
+ case jit::MIRType::Int32:
+ vp.set(Int32Value(*static_cast<int32_t*>(dataPtr)));
+ break;
+ case jit::MIRType::Int64:
+ // Just display as a Number; it's ok if we lose some precision
+ vp.set(NumberValue((double)*static_cast<int64_t*>(dataPtr)));
+ break;
+ case jit::MIRType::Float32:
+ vp.set(NumberValue(JS::CanonicalizeNaN(*static_cast<float*>(dataPtr))));
+ break;
+ case jit::MIRType::Double:
+ vp.set(NumberValue(JS::CanonicalizeNaN(*static_cast<double*>(dataPtr))));
+ break;
+ case jit::MIRType::RefOrNull:
+ vp.set(ObjectOrNullValue(*(JSObject**)dataPtr));
+ break;
+#ifdef ENABLE_WASM_SIMD
+ case jit::MIRType::Simd128:
+ vp.set(NumberValue(0));
+ break;
+#endif
+ default:
+ MOZ_CRASH("local type");
+ }
+ return true;
+}
+
+bool DebugFrame::updateReturnJSValue(JSContext* cx) {
+ MutableHandleValue rval =
+ MutableHandleValue::fromMarkedLocation(&cachedReturnJSValue_);
+ rval.setUndefined();
+ flags_.hasCachedReturnJSValue = true;
+ ResultType resultType = instance()->debug().debugGetResultType(funcIndex());
+ Maybe<char*> stackResultsLoc;
+ if (ABIResultIter::HasStackResults(resultType)) {
+ stackResultsLoc = Some(static_cast<char*>(stackResultsPointer_));
+ }
+ DebugCodegen(DebugChannel::Function,
+ "wasm-function[%d] updateReturnJSValue [", funcIndex());
+ bool ok =
+ ResultsToJSValue(cx, resultType, registerResults_, stackResultsLoc, rval);
+ DebugCodegen(DebugChannel::Function, "]\n");
+ return ok;
+}
+
+HandleValue DebugFrame::returnValue() const {
+ MOZ_ASSERT(flags_.hasCachedReturnJSValue);
+ return HandleValue::fromMarkedLocation(&cachedReturnJSValue_);
+}
+
+void DebugFrame::clearReturnJSValue() {
+ flags_.hasCachedReturnJSValue = true;
+ cachedReturnJSValue_.setUndefined();
+}
+
+void DebugFrame::observe(JSContext* cx) {
+ if (!flags_.observing) {
+ instance()->debug().adjustEnterAndLeaveFrameTrapsState(
+ cx, /* enabled = */ true);
+ flags_.observing = true;
+ }
+}
+
+void DebugFrame::leave(JSContext* cx) {
+ if (flags_.observing) {
+ instance()->debug().adjustEnterAndLeaveFrameTrapsState(
+ cx, /* enabled = */ false);
+ flags_.observing = false;
+ }
+}
+
+bool TrapSiteVectorArray::empty() const {
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ if (!(*this)[trap].empty()) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+void TrapSiteVectorArray::clear() {
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ (*this)[trap].clear();
+ }
+}
+
+void TrapSiteVectorArray::swap(TrapSiteVectorArray& rhs) {
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ (*this)[trap].swap(rhs[trap]);
+ }
+}
+
+void TrapSiteVectorArray::shrinkStorageToFit() {
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ (*this)[trap].shrinkStorageToFit();
+ }
+}
+
+size_t TrapSiteVectorArray::serializedSize() const {
+ size_t ret = 0;
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ ret += SerializedPodVectorSize((*this)[trap]);
+ }
+ return ret;
+}
+
+uint8_t* TrapSiteVectorArray::serialize(uint8_t* cursor) const {
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ cursor = SerializePodVector(cursor, (*this)[trap]);
+ }
+ return cursor;
+}
+
+const uint8_t* TrapSiteVectorArray::deserialize(const uint8_t* cursor) {
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ cursor = DeserializePodVector(cursor, &(*this)[trap]);
+ if (!cursor) {
+ return nullptr;
+ }
+ }
+ return cursor;
+}
+
+size_t TrapSiteVectorArray::sizeOfExcludingThis(
+ MallocSizeOf mallocSizeOf) const {
+ size_t ret = 0;
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ ret += (*this)[trap].sizeOfExcludingThis(mallocSizeOf);
+ }
+ return ret;
+}
+
+CodeRange::CodeRange(Kind kind, Offsets offsets)
+ : begin_(offsets.begin), ret_(0), end_(offsets.end), kind_(kind) {
+ MOZ_ASSERT(begin_ <= end_);
+ PodZero(&u);
+#ifdef DEBUG
+ switch (kind_) {
+ case FarJumpIsland:
+ case TrapExit:
+ case Throw:
+ break;
+ default:
+ MOZ_CRASH("should use more specific constructor");
+ }
+#endif
+}
+
+CodeRange::CodeRange(Kind kind, uint32_t funcIndex, Offsets offsets)
+ : begin_(offsets.begin), ret_(0), end_(offsets.end), kind_(kind) {
+ u.funcIndex_ = funcIndex;
+ u.func.lineOrBytecode_ = 0;
+ u.func.beginToUncheckedCallEntry_ = 0;
+ u.func.beginToTierEntry_ = 0;
+ MOZ_ASSERT(isEntry());
+ MOZ_ASSERT(begin_ <= end_);
+}
+
+CodeRange::CodeRange(Kind kind, CallableOffsets offsets)
+ : begin_(offsets.begin), ret_(offsets.ret), end_(offsets.end), kind_(kind) {
+ MOZ_ASSERT(begin_ < ret_);
+ MOZ_ASSERT(ret_ < end_);
+ PodZero(&u);
+#ifdef DEBUG
+ switch (kind_) {
+ case DebugTrap:
+ case BuiltinThunk:
+ break;
+ default:
+ MOZ_CRASH("should use more specific constructor");
+ }
+#endif
+}
+
+CodeRange::CodeRange(Kind kind, uint32_t funcIndex, CallableOffsets offsets)
+ : begin_(offsets.begin), ret_(offsets.ret), end_(offsets.end), kind_(kind) {
+ MOZ_ASSERT(isImportExit() && !isImportJitExit());
+ MOZ_ASSERT(begin_ < ret_);
+ MOZ_ASSERT(ret_ < end_);
+ u.funcIndex_ = funcIndex;
+ u.func.lineOrBytecode_ = 0;
+ u.func.beginToUncheckedCallEntry_ = 0;
+ u.func.beginToTierEntry_ = 0;
+}
+
+CodeRange::CodeRange(uint32_t funcIndex, JitExitOffsets offsets)
+ : begin_(offsets.begin),
+ ret_(offsets.ret),
+ end_(offsets.end),
+ kind_(ImportJitExit) {
+ MOZ_ASSERT(isImportJitExit());
+ MOZ_ASSERT(begin_ < ret_);
+ MOZ_ASSERT(ret_ < end_);
+ u.funcIndex_ = funcIndex;
+ u.jitExit.beginToUntrustedFPStart_ = offsets.untrustedFPStart - begin_;
+ u.jitExit.beginToUntrustedFPEnd_ = offsets.untrustedFPEnd - begin_;
+ MOZ_ASSERT(jitExitUntrustedFPStart() == offsets.untrustedFPStart);
+ MOZ_ASSERT(jitExitUntrustedFPEnd() == offsets.untrustedFPEnd);
+}
+
+CodeRange::CodeRange(uint32_t funcIndex, uint32_t funcLineOrBytecode,
+ FuncOffsets offsets)
+ : begin_(offsets.begin),
+ ret_(offsets.ret),
+ end_(offsets.end),
+ kind_(Function) {
+ MOZ_ASSERT(begin_ < ret_);
+ MOZ_ASSERT(ret_ < end_);
+ MOZ_ASSERT(offsets.uncheckedCallEntry - begin_ <= UINT8_MAX);
+ MOZ_ASSERT(offsets.tierEntry - begin_ <= UINT8_MAX);
+ u.funcIndex_ = funcIndex;
+ u.func.lineOrBytecode_ = funcLineOrBytecode;
+ u.func.beginToUncheckedCallEntry_ = offsets.uncheckedCallEntry - begin_;
+ u.func.beginToTierEntry_ = offsets.tierEntry - begin_;
+}
+
+const CodeRange* wasm::LookupInSorted(const CodeRangeVector& codeRanges,
+ CodeRange::OffsetInCode target) {
+ size_t lowerBound = 0;
+ size_t upperBound = codeRanges.length();
+
+ size_t match;
+ if (!BinarySearch(codeRanges, lowerBound, upperBound, target, &match)) {
+ return nullptr;
+ }
+
+ return &codeRanges[match];
+}
+
+UniqueTlsData wasm::CreateTlsData(uint32_t globalDataLength) {
+ void* allocatedBase = js_calloc(TlsDataAlign + offsetof(TlsData, globalArea) +
+ globalDataLength);
+ if (!allocatedBase) {
+ return nullptr;
+ }
+
+ auto* tlsData = reinterpret_cast<TlsData*>(
+ AlignBytes(uintptr_t(allocatedBase), TlsDataAlign));
+ tlsData->allocatedBase = allocatedBase;
+
+ return UniqueTlsData(tlsData);
+}
+
+void TlsData::setInterrupt() {
+ interrupt = true;
+ stackLimit = UINTPTR_MAX;
+}
+
+bool TlsData::isInterrupted() const {
+ return interrupt || stackLimit == UINTPTR_MAX;
+}
+
+void TlsData::resetInterrupt(JSContext* cx) {
+ interrupt = false;
+ stackLimit = cx->stackLimitForJitCode(JS::StackForUntrustedScript);
+}
+
+void wasm::Log(JSContext* cx, const char* fmt, ...) {
+ MOZ_ASSERT(!cx->isExceptionPending());
+
+ if (!cx->options().wasmVerbose()) {
+ return;
+ }
+
+ va_list args;
+ va_start(args, fmt);
+
+ if (UniqueChars chars = JS_vsmprintf(fmt, args)) {
+ WarnNumberASCII(cx, JSMSG_WASM_VERBOSE, chars.get());
+ if (cx->isExceptionPending()) {
+ cx->clearPendingException();
+ }
+ }
+
+ va_end(args);
+}
+
+#ifdef WASM_CODEGEN_DEBUG
+bool wasm::IsCodegenDebugEnabled(DebugChannel channel) {
+ switch (channel) {
+ case DebugChannel::Function:
+ return JitOptions.enableWasmFuncCallSpew;
+ case DebugChannel::Import:
+ return JitOptions.enableWasmImportCallSpew;
+ }
+ return false;
+}
+#endif
+
+void wasm::DebugCodegen(DebugChannel channel, const char* fmt, ...) {
+#ifdef WASM_CODEGEN_DEBUG
+ if (!IsCodegenDebugEnabled(channel)) {
+ return;
+ }
+ va_list ap;
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+#endif
+}
+
+UniqueChars wasm::ToString(ValType type) {
+ const char* literal = nullptr;
+ switch (type.kind()) {
+ case ValType::I32:
+ literal = "i32";
+ break;
+ case ValType::I64:
+ literal = "i64";
+ break;
+ case ValType::V128:
+ literal = "v128";
+ break;
+ case ValType::F32:
+ literal = "f32";
+ break;
+ case ValType::F64:
+ literal = "f64";
+ break;
+ case ValType::Ref:
+ if (type.isNullable() && !type.isTypeIndex()) {
+ switch (type.refTypeKind()) {
+ case RefType::Func:
+ literal = "funcref";
+ break;
+ case RefType::Extern:
+ literal = "externref";
+ break;
+ case RefType::Eq:
+ literal = "eqref";
+ break;
+ case RefType::TypeIndex:
+ MOZ_ASSERT_UNREACHABLE();
+ }
+ } else {
+ const char* heapType = nullptr;
+ switch (type.refTypeKind()) {
+ case RefType::Func:
+ heapType = "func";
+ break;
+ case RefType::Extern:
+ heapType = "extern";
+ break;
+ case RefType::Eq:
+ heapType = "eq";
+ break;
+ case RefType::TypeIndex:
+ return JS_smprintf("(ref %s%d)", type.isNullable() ? "null " : "",
+ type.refType().typeIndex());
+ }
+ return JS_smprintf("(ref %s%s)", type.isNullable() ? "null " : "",
+ heapType);
+ }
+ break;
+ }
+ return JS_smprintf("%s", literal);
+}
+
+UniqueChars wasm::ToString(const Maybe<ValType>& type) {
+ return type ? ToString(type.ref()) : JS_smprintf("%s", "void");
+}
diff --git a/js/src/wasm/WasmTypes.h b/js/src/wasm/WasmTypes.h
new file mode 100644
index 0000000000..4d3070018f
--- /dev/null
+++ b/js/src/wasm/WasmTypes.h
@@ -0,0 +1,4000 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_types_h
+#define wasm_types_h
+
+#include "mozilla/Alignment.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/BinarySearch.h"
+#include "mozilla/EnumeratedArray.h"
+#include "mozilla/HashFunctions.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/Unused.h"
+
+#include <type_traits>
+
+#include "NamespaceImports.h"
+
+#include "ds/LifoAlloc.h"
+#include "jit/IonTypes.h"
+#include "js/RefCounted.h"
+#include "js/UniquePtr.h"
+#include "js/Utility.h"
+#include "js/Vector.h"
+#include "vm/MallocProvider.h"
+#include "vm/NativeObject.h"
+#include "wasm/WasmConstants.h"
+#include "wasm/WasmUtility.h"
+
+namespace js {
+
+namespace jit {
+enum class RoundingMode;
+template <class VecT, class ABIArgGeneratorT>
+class ABIArgIterBase;
+} // namespace jit
+
+// This is a widespread header, so lets keep out the core wasm impl types.
+
+typedef GCVector<JSFunction*, 0, SystemAllocPolicy> JSFunctionVector;
+
+class WasmMemoryObject;
+using GCPtrWasmMemoryObject = GCPtr<WasmMemoryObject*>;
+using RootedWasmMemoryObject = Rooted<WasmMemoryObject*>;
+using HandleWasmMemoryObject = Handle<WasmMemoryObject*>;
+using MutableHandleWasmMemoryObject = MutableHandle<WasmMemoryObject*>;
+
+class WasmModuleObject;
+using RootedWasmModuleObject = Rooted<WasmModuleObject*>;
+using HandleWasmModuleObject = Handle<WasmModuleObject*>;
+using MutableHandleWasmModuleObject = MutableHandle<WasmModuleObject*>;
+
+class WasmInstanceObject;
+using WasmInstanceObjectVector = GCVector<WasmInstanceObject*>;
+using RootedWasmInstanceObject = Rooted<WasmInstanceObject*>;
+using HandleWasmInstanceObject = Handle<WasmInstanceObject*>;
+using MutableHandleWasmInstanceObject = MutableHandle<WasmInstanceObject*>;
+
+class WasmTableObject;
+typedef GCVector<WasmTableObject*, 0, SystemAllocPolicy> WasmTableObjectVector;
+using RootedWasmTableObject = Rooted<WasmTableObject*>;
+using HandleWasmTableObject = Handle<WasmTableObject*>;
+using MutableHandleWasmTableObject = MutableHandle<WasmTableObject*>;
+
+class WasmGlobalObject;
+typedef GCVector<WasmGlobalObject*, 0, SystemAllocPolicy>
+ WasmGlobalObjectVector;
+using RootedWasmGlobalObject = Rooted<WasmGlobalObject*>;
+
+class WasmExceptionObject;
+typedef GCVector<WasmExceptionObject*, 0, SystemAllocPolicy>
+ WasmExceptionObjectVector;
+using RootedWasmExceptionObject = Rooted<WasmExceptionObject*>;
+
+namespace wasm {
+
+using mozilla::Atomic;
+using mozilla::DebugOnly;
+using mozilla::EnumeratedArray;
+using mozilla::MallocSizeOf;
+using mozilla::Maybe;
+using mozilla::Nothing;
+using mozilla::PodCopy;
+using mozilla::PodZero;
+using mozilla::Some;
+using mozilla::Unused;
+
+class Code;
+class DebugState;
+class GeneratedSourceMap;
+class Memory;
+class Module;
+class Instance;
+class Table;
+
+// Uint32Vector has initial size 8 on the basis that the dominant use cases
+// (line numbers and control stacks) tend to have a small but nonzero number
+// of elements.
+typedef Vector<uint32_t, 8, SystemAllocPolicy> Uint32Vector;
+
+typedef Vector<uint8_t, 0, SystemAllocPolicy> Bytes;
+using UniqueBytes = UniquePtr<Bytes>;
+using UniqueConstBytes = UniquePtr<const Bytes>;
+typedef Vector<char, 0, SystemAllocPolicy> UTF8Bytes;
+typedef Vector<Instance*, 0, SystemAllocPolicy> InstanceVector;
+typedef Vector<UniqueChars, 0, SystemAllocPolicy> UniqueCharsVector;
+typedef HashMap<uint32_t, uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy>
+ RenumberMap;
+
+// Bit set as the lowest bit of a frame pointer, used in two different mutually
+// exclusive situations:
+// - either it's a low bit tag in a FramePointer value read from the
+// Frame::callerFP of an inner wasm frame. This indicates the previous call
+// frame has been set up by a JIT caller that directly called into a wasm
+// function's body. This is only stored in Frame::callerFP for a wasm frame
+// called from JIT code, and thus it can not appear in a JitActivation's
+// exitFP.
+// - or it's the low big tag set when exiting wasm code in JitActivation's
+// exitFP.
+
+constexpr uintptr_t ExitOrJitEntryFPTag = 0x1;
+
+// To call Vector::shrinkStorageToFit , a type must specialize mozilla::IsPod
+// which is pretty verbose to do within js::wasm, so factor that process out
+// into a macro.
+
+#define WASM_DECLARE_POD_VECTOR(Type, VectorName) \
+ } \
+ } \
+ namespace mozilla { \
+ template <> \
+ struct IsPod<js::wasm::Type> : std::true_type {}; \
+ } \
+ namespace js { \
+ namespace wasm { \
+ typedef Vector<Type, 0, SystemAllocPolicy> VectorName;
+
+// A wasm Module and everything it contains must support serialization and
+// deserialization. Some data can be simply copied as raw bytes and,
+// as a convention, is stored in an inline CacheablePod struct. Everything else
+// should implement the below methods which are called recusively by the
+// containing Module.
+
+#define WASM_DECLARE_SERIALIZABLE(Type) \
+ size_t serializedSize() const; \
+ uint8_t* serialize(uint8_t* cursor) const; \
+ const uint8_t* deserialize(const uint8_t* cursor); \
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+template <class T>
+struct SerializableRefPtr : RefPtr<T> {
+ using RefPtr<T>::operator=;
+
+ SerializableRefPtr() = default;
+
+ template <class U>
+ MOZ_IMPLICIT SerializableRefPtr(U&& u) : RefPtr<T>(std::forward<U>(u)) {}
+
+ WASM_DECLARE_SERIALIZABLE(SerializableRefPtr)
+};
+
+// This reusable base class factors out the logic for a resource that is shared
+// by multiple instances/modules but should only be counted once when computing
+// about:memory stats.
+
+template <class T>
+struct ShareableBase : AtomicRefCounted<T> {
+ using SeenSet = HashSet<const T*, DefaultHasher<const T*>, SystemAllocPolicy>;
+
+ size_t sizeOfIncludingThisIfNotSeen(MallocSizeOf mallocSizeOf,
+ SeenSet* seen) const {
+ const T* self = static_cast<const T*>(this);
+ typename SeenSet::AddPtr p = seen->lookupForAdd(self);
+ if (p) {
+ return 0;
+ }
+ bool ok = seen->add(p, self);
+ (void)ok; // oh well
+ return mallocSizeOf(self) + self->sizeOfExcludingThis(mallocSizeOf);
+ }
+};
+
+// ShareableBytes is a reference-counted Vector of bytes.
+
+struct ShareableBytes : ShareableBase<ShareableBytes> {
+ // Vector is 'final', so instead make Vector a member and add boilerplate.
+ Bytes bytes;
+
+ ShareableBytes() = default;
+ explicit ShareableBytes(Bytes&& bytes) : bytes(std::move(bytes)) {}
+ size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return bytes.sizeOfExcludingThis(mallocSizeOf);
+ }
+ const uint8_t* begin() const { return bytes.begin(); }
+ const uint8_t* end() const { return bytes.end(); }
+ size_t length() const { return bytes.length(); }
+ bool append(const uint8_t* start, size_t len) {
+ return bytes.append(start, len);
+ }
+};
+
+using MutableBytes = RefPtr<ShareableBytes>;
+using SharedBytes = RefPtr<const ShareableBytes>;
+
+// The Opcode compactly and safely represents the primary opcode plus any
+// extension, with convenient predicates and accessors.
+
+class Opcode {
+ uint32_t bits_;
+
+ public:
+ MOZ_IMPLICIT Opcode(Op op) : bits_(uint32_t(op)) {
+ static_assert(size_t(Op::Limit) == 256, "fits");
+ MOZ_ASSERT(size_t(op) < size_t(Op::Limit));
+ }
+ MOZ_IMPLICIT Opcode(MiscOp op)
+ : bits_((uint32_t(op) << 8) | uint32_t(Op::MiscPrefix)) {
+ static_assert(size_t(MiscOp::Limit) <= 0xFFFFFF, "fits");
+ MOZ_ASSERT(size_t(op) < size_t(MiscOp::Limit));
+ }
+ MOZ_IMPLICIT Opcode(ThreadOp op)
+ : bits_((uint32_t(op) << 8) | uint32_t(Op::ThreadPrefix)) {
+ static_assert(size_t(ThreadOp::Limit) <= 0xFFFFFF, "fits");
+ MOZ_ASSERT(size_t(op) < size_t(ThreadOp::Limit));
+ }
+ MOZ_IMPLICIT Opcode(MozOp op)
+ : bits_((uint32_t(op) << 8) | uint32_t(Op::MozPrefix)) {
+ static_assert(size_t(MozOp::Limit) <= 0xFFFFFF, "fits");
+ MOZ_ASSERT(size_t(op) < size_t(MozOp::Limit));
+ }
+ MOZ_IMPLICIT Opcode(SimdOp op)
+ : bits_((uint32_t(op) << 8) | uint32_t(Op::SimdPrefix)) {
+ static_assert(size_t(SimdOp::Limit) <= 0xFFFFFF, "fits");
+ MOZ_ASSERT(size_t(op) < size_t(SimdOp::Limit));
+ }
+
+ bool isOp() const { return bits_ < uint32_t(Op::FirstPrefix); }
+ bool isMisc() const { return (bits_ & 255) == uint32_t(Op::MiscPrefix); }
+ bool isThread() const { return (bits_ & 255) == uint32_t(Op::ThreadPrefix); }
+ bool isMoz() const { return (bits_ & 255) == uint32_t(Op::MozPrefix); }
+ bool isSimd() const { return (bits_ & 255) == uint32_t(Op::SimdPrefix); }
+
+ Op asOp() const {
+ MOZ_ASSERT(isOp());
+ return Op(bits_);
+ }
+ MiscOp asMisc() const {
+ MOZ_ASSERT(isMisc());
+ return MiscOp(bits_ >> 8);
+ }
+ ThreadOp asThread() const {
+ MOZ_ASSERT(isThread());
+ return ThreadOp(bits_ >> 8);
+ }
+ MozOp asMoz() const {
+ MOZ_ASSERT(isMoz());
+ return MozOp(bits_ >> 8);
+ }
+ SimdOp asSimd() const {
+ MOZ_ASSERT(isSimd());
+ return SimdOp(bits_ >> 8);
+ }
+
+ uint32_t bits() const { return bits_; }
+
+ bool operator==(const Opcode& that) const { return bits_ == that.bits_; }
+ bool operator!=(const Opcode& that) const { return bits_ != that.bits_; }
+};
+
+// A PackedTypeCode represents a TypeCode paired with a refTypeIndex (valid only
+// for AbstractReferenceTypeIndexCode). PackedTypeCode is guaranteed to be POD.
+// The TypeCode spans the full range of type codes including the specialized
+// ExternRef, and FuncRef.
+//
+// PackedTypeCode is an enum class, as opposed to the more natural
+// struct-with-bitfields, because bitfields would make it non-POD.
+//
+// DO NOT use PackedTypeCode as a cast. ALWAYS go via PackTypeCode().
+
+enum class PackedTypeCode : uint32_t {};
+
+static_assert(std::is_pod_v<PackedTypeCode>,
+ "must be POD to be simply serialized/deserialized");
+
+// A PackedTypeCode should be representable in a single word, so in the
+// smallest case, 32 bits. However sometimes 2 bits of the word may be taken
+// by a pointer tag; for that reason, limit to 30 bits; and then there's the
+// 8-bit typecode and nullable flag, so 21 bits left for the type index.
+constexpr uint32_t PointerTagBits = 2;
+constexpr uint32_t TypeCodeBits = 8;
+constexpr uint32_t NullableBits = 1;
+constexpr uint32_t TypeIndexBits =
+ 32 - PointerTagBits - TypeCodeBits - NullableBits;
+static_assert(MaxTypes < (1 << TypeIndexBits), "enough bits");
+
+constexpr uint32_t PackedTypeCodeMask = (1 << TypeCodeBits) - 1;
+constexpr uint32_t PackedTypeIndexShift = TypeCodeBits;
+constexpr uint32_t PackedTypeIndexMask = (1 << TypeIndexBits) - 1;
+constexpr uint32_t PackedTypeNullableShift = TypeCodeBits + TypeIndexBits;
+
+// Only use these with PackedTypeCode
+constexpr uint32_t NoTypeCode = PackedTypeCodeMask;
+constexpr uint32_t NoRefTypeIndex = PackedTypeIndexMask;
+
+static inline PackedTypeCode PackTypeCode(TypeCode tc, uint32_t refTypeIndex,
+ bool isNullable) {
+ MOZ_ASSERT(uint32_t(tc) <= PackedTypeCodeMask);
+ MOZ_ASSERT_IF(tc != AbstractReferenceTypeIndexCode,
+ refTypeIndex == NoRefTypeIndex);
+ MOZ_ASSERT_IF(tc == AbstractReferenceTypeIndexCode, refTypeIndex <= MaxTypes);
+ uint32_t shiftedTypeIndex = refTypeIndex << PackedTypeIndexShift;
+ uint32_t shiftedNullable = uint32_t(isNullable) << PackedTypeNullableShift;
+ return PackedTypeCode(shiftedNullable | shiftedTypeIndex | uint32_t(tc));
+}
+
+static inline PackedTypeCode PackTypeCode(TypeCode tc, bool nullable) {
+ return PackTypeCode(tc, NoRefTypeIndex, nullable);
+}
+
+static inline PackedTypeCode PackTypeCode(TypeCode tc) {
+ return PackTypeCode(tc, NoRefTypeIndex, false);
+}
+
+static inline PackedTypeCode InvalidPackedTypeCode() {
+ return PackedTypeCode(NoTypeCode);
+}
+
+static inline PackedTypeCode PackedTypeCodeFromBits(uint32_t bits) {
+ return PackTypeCode(TypeCode(bits & PackedTypeCodeMask),
+ (bits >> PackedTypeIndexShift) & PackedTypeIndexMask,
+ bits >> PackedTypeNullableShift);
+}
+
+static inline bool IsValid(PackedTypeCode ptc) {
+ return (uint32_t(ptc) & PackedTypeCodeMask) != NoTypeCode;
+}
+
+static inline uint32_t PackedTypeCodeToBits(PackedTypeCode ptc) {
+ return uint32_t(ptc);
+}
+
+static inline TypeCode UnpackTypeCodeType(PackedTypeCode ptc) {
+ MOZ_ASSERT(IsValid(ptc));
+ return TypeCode(uint32_t(ptc) & PackedTypeCodeMask);
+}
+
+static inline uint32_t UnpackTypeCodeIndex(PackedTypeCode ptc) {
+ MOZ_ASSERT(UnpackTypeCodeType(ptc) == AbstractReferenceTypeIndexCode);
+ return (uint32_t(ptc) >> PackedTypeIndexShift) & PackedTypeIndexMask;
+}
+
+static inline uint32_t UnpackTypeCodeIndexUnchecked(PackedTypeCode ptc) {
+ return (uint32_t(ptc) >> PackedTypeIndexShift) & PackedTypeIndexMask;
+}
+
+static inline bool UnpackTypeCodeNullable(PackedTypeCode ptc) {
+ return (uint32_t(ptc) >> PackedTypeNullableShift) == 1;
+}
+
+// Return the TypeCode, but return AbstractReferenceTypeCode for any reference
+// type.
+//
+// This function is very, very hot, hence what would normally be a switch on the
+// value `c` to map the reference types to AbstractReferenceTypeCode has been
+// distilled into a simple comparison; this is fastest. Should type codes
+// become too complicated for this to work then a lookup table also has better
+// performance than a switch.
+//
+// An alternative is for the PackedTypeCode to represent something closer to
+// what ValType needs, so that this decoding step is not necessary, but that
+// moves complexity elsewhere, and the perf gain here would be only about 1% for
+// baseline compilation throughput.
+
+static inline TypeCode UnpackTypeCodeTypeAbstracted(PackedTypeCode ptc) {
+ TypeCode c = UnpackTypeCodeType(ptc);
+ return c < LowestPrimitiveTypeCode ? AbstractReferenceTypeCode : c;
+}
+
+static inline bool IsReferenceType(PackedTypeCode ptc) {
+ return UnpackTypeCodeTypeAbstracted(ptc) == AbstractReferenceTypeCode;
+}
+
+// Return a TypeCode with the nullable bit cleared, must only be used on
+// reference types.
+
+static inline PackedTypeCode RepackTypeCodeAsNonNullable(PackedTypeCode ptc) {
+ MOZ_ASSERT(IsReferenceType(ptc));
+ constexpr uint32_t NonNullableMask = ~(1 << PackedTypeNullableShift);
+ return PackedTypeCode(uint32_t(ptc) & NonNullableMask);
+}
+
+// An enum that describes the representation classes for tables; The table
+// element type is mapped into this by Table::repr().
+
+enum class TableRepr { Ref, Func };
+
+// The RefType carries more information about types t for which t.isReference()
+// is true.
+
+class RefType {
+ public:
+ enum Kind {
+ Func = uint8_t(TypeCode::FuncRef),
+ Extern = uint8_t(TypeCode::ExternRef),
+ Eq = uint8_t(TypeCode::EqRef),
+ TypeIndex = uint8_t(AbstractReferenceTypeIndexCode)
+ };
+
+ private:
+ PackedTypeCode ptc_;
+
+#ifdef DEBUG
+ bool isValid() const {
+ switch (UnpackTypeCodeType(ptc_)) {
+ case TypeCode::FuncRef:
+ case TypeCode::ExternRef:
+ case TypeCode::EqRef:
+ MOZ_ASSERT(UnpackTypeCodeIndexUnchecked(ptc_) == NoRefTypeIndex);
+ return true;
+ case AbstractReferenceTypeIndexCode:
+ MOZ_ASSERT(UnpackTypeCodeIndexUnchecked(ptc_) != NoRefTypeIndex);
+ return true;
+ default:
+ return false;
+ }
+ }
+#endif
+ RefType(Kind kind, bool nullable)
+ : ptc_(PackTypeCode(TypeCode(kind), nullable)) {
+ MOZ_ASSERT(isValid());
+ }
+
+ RefType(uint32_t refTypeIndex, bool nullable)
+ : ptc_(PackTypeCode(AbstractReferenceTypeIndexCode, refTypeIndex,
+ nullable)) {
+ MOZ_ASSERT(isValid());
+ }
+
+ public:
+ RefType() : ptc_(InvalidPackedTypeCode()) {}
+ explicit RefType(PackedTypeCode ptc) : ptc_(ptc) { MOZ_ASSERT(isValid()); }
+
+ static RefType fromTypeCode(TypeCode tc, bool nullable) {
+ MOZ_ASSERT(tc != AbstractReferenceTypeIndexCode);
+ return RefType(Kind(tc), nullable);
+ }
+
+ static RefType fromTypeIndex(uint32_t refTypeIndex, bool nullable) {
+ return RefType(refTypeIndex, nullable);
+ }
+
+ Kind kind() const { return Kind(UnpackTypeCodeType(ptc_)); }
+
+ uint32_t typeIndex() const { return UnpackTypeCodeIndex(ptc_); }
+
+ PackedTypeCode packed() const { return ptc_; }
+
+ static RefType func() { return RefType(Func, true); }
+ static RefType extern_() { return RefType(Extern, true); }
+ static RefType eq() { return RefType(Eq, true); }
+
+ bool isFunc() const { return kind() == RefType::Func; }
+ bool isExtern() const { return kind() == RefType::Extern; }
+ bool isEq() const { return kind() == RefType::Eq; }
+ bool isTypeIndex() const { return kind() == RefType::TypeIndex; }
+
+ bool isNullable() const { return UnpackTypeCodeNullable(ptc_); }
+
+ TableRepr tableRepr() const {
+ switch (kind()) {
+ case RefType::Func:
+ return TableRepr::Func;
+ case RefType::Extern:
+ case RefType::Eq:
+ return TableRepr::Ref;
+ case RefType::TypeIndex:
+ MOZ_CRASH("NYI");
+ }
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("switch is exhaustive");
+ }
+
+ bool operator==(const RefType& that) const { return ptc_ == that.ptc_; }
+ bool operator!=(const RefType& that) const { return ptc_ != that.ptc_; }
+};
+
+// The ValType represents the storage type of a WebAssembly location, whether
+// parameter, local, or global.
+
+class ValType {
+ PackedTypeCode tc_;
+
+#ifdef DEBUG
+ bool isValidTypeCode() {
+ MOZ_ASSERT(isValid());
+ switch (UnpackTypeCodeType(tc_)) {
+ case TypeCode::I32:
+ case TypeCode::I64:
+ case TypeCode::F32:
+ case TypeCode::F64:
+ case TypeCode::V128:
+ case TypeCode::FuncRef:
+ case TypeCode::ExternRef:
+ case TypeCode::EqRef:
+ case AbstractReferenceTypeIndexCode:
+ return true;
+ default:
+ return false;
+ }
+ }
+#endif
+
+ public:
+ enum Kind {
+ I32 = uint8_t(TypeCode::I32),
+ I64 = uint8_t(TypeCode::I64),
+ F32 = uint8_t(TypeCode::F32),
+ F64 = uint8_t(TypeCode::F64),
+ V128 = uint8_t(TypeCode::V128),
+ Ref = uint8_t(AbstractReferenceTypeCode),
+ };
+
+ private:
+ explicit ValType(TypeCode c) : tc_(PackTypeCode(c)) {
+ MOZ_ASSERT(c != AbstractReferenceTypeIndexCode);
+ MOZ_ASSERT(isValid());
+ }
+
+ TypeCode typeCode() const {
+ MOZ_ASSERT(isValid());
+ return UnpackTypeCodeType(tc_);
+ }
+
+ public:
+ ValType() : tc_(InvalidPackedTypeCode()) {}
+
+ MOZ_IMPLICIT ValType(Kind c) : tc_(PackTypeCode(TypeCode(c))) {
+ MOZ_ASSERT(c != Ref);
+ MOZ_ASSERT(isValidTypeCode());
+ }
+
+ MOZ_IMPLICIT ValType(RefType rt) : tc_(rt.packed()) {
+ MOZ_ASSERT(isValidTypeCode());
+ }
+
+ explicit ValType(PackedTypeCode ptc) : tc_(ptc) {
+ MOZ_ASSERT(isValidTypeCode());
+ }
+
+ explicit ValType(jit::MIRType mty) {
+ switch (mty) {
+ case jit::MIRType::Int32:
+ tc_ = PackTypeCode(TypeCode::I32);
+ break;
+ case jit::MIRType::Int64:
+ tc_ = PackTypeCode(TypeCode::I64);
+ break;
+ case jit::MIRType::Float32:
+ tc_ = PackTypeCode(TypeCode::F32);
+ break;
+ case jit::MIRType::Double:
+ tc_ = PackTypeCode(TypeCode::F64);
+ break;
+ case jit::MIRType::Simd128:
+ tc_ = PackTypeCode(TypeCode::V128);
+ break;
+ default:
+ MOZ_CRASH("ValType(MIRType): unexpected type");
+ }
+ }
+
+ static ValType fromNonRefTypeCode(TypeCode tc) {
+#ifdef DEBUG
+ switch (tc) {
+ case TypeCode::I32:
+ case TypeCode::I64:
+ case TypeCode::F32:
+ case TypeCode::F64:
+ case TypeCode::V128:
+ break;
+ default:
+ MOZ_CRASH("Bad type code");
+ }
+#endif
+ return ValType(tc);
+ }
+
+ static ValType fromBitsUnsafe(uint32_t bits) {
+ return ValType(PackedTypeCodeFromBits(bits));
+ }
+
+ bool isValid() const { return IsValid(tc_); }
+
+ PackedTypeCode packed() const {
+ MOZ_ASSERT(isValid());
+ return tc_;
+ }
+
+ uint32_t bitsUnsafe() const {
+ MOZ_ASSERT(isValid());
+ return PackedTypeCodeToBits(tc_);
+ }
+
+ bool isFuncRef() const {
+ return UnpackTypeCodeType(tc_) == TypeCode::FuncRef;
+ }
+
+ bool isExternRef() const {
+ return UnpackTypeCodeType(tc_) == TypeCode::ExternRef;
+ }
+
+ bool isEqRef() const { return UnpackTypeCodeType(tc_) == TypeCode::EqRef; }
+
+ bool isNullable() const {
+ MOZ_ASSERT(isReference());
+ return refType().isNullable();
+ }
+
+ bool isTypeIndex() const {
+ MOZ_ASSERT(isValid());
+ return UnpackTypeCodeType(tc_) == AbstractReferenceTypeIndexCode;
+ }
+
+ bool isReference() const {
+ MOZ_ASSERT(isValid());
+ return IsReferenceType(tc_);
+ }
+
+ // Returns whether the type has a default value.
+ bool isDefaultable() const {
+ MOZ_ASSERT(isValid());
+ return !isReference() || isNullable();
+ }
+
+ // Returns whether the type has a representation in JS.
+ bool isExposable() const {
+ MOZ_ASSERT(isValid());
+#if defined(ENABLE_WASM_SIMD) || defined(ENABLE_WASM_GC)
+ return kind() != ValType::V128 && !isTypeIndex();
+#else
+ return true;
+#endif
+ }
+
+ Kind kind() const {
+ MOZ_ASSERT(isValid());
+ return Kind(UnpackTypeCodeTypeAbstracted(tc_));
+ }
+
+ RefType refType() const {
+ MOZ_ASSERT(isReference());
+ return RefType(tc_);
+ }
+
+ RefType::Kind refTypeKind() const {
+ MOZ_ASSERT(isReference());
+ return RefType(tc_).kind();
+ }
+
+ // Some types are encoded as JS::Value when they escape from Wasm (when passed
+ // as parameters to imports or returned from exports). For ExternRef the
+ // Value encoding is pretty much a requirement. For other types it's a choice
+ // that may (temporarily) simplify some code.
+ bool isEncodedAsJSValueOnEscape() const {
+ switch (typeCode()) {
+ case TypeCode::FuncRef:
+ case TypeCode::ExternRef:
+ case TypeCode::EqRef:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ uint32_t size() const {
+ switch (kind()) {
+ case ValType::I32:
+ return 4;
+ case ValType::I64:
+ return 8;
+ case ValType::F32:
+ return 4;
+ case ValType::F64:
+ return 8;
+ case ValType::V128:
+ return 16;
+ case ValType::Ref:
+ return sizeof(void*);
+ }
+ MOZ_ASSERT_UNREACHABLE();
+ return 0;
+ }
+
+ uint32_t alignmentInStruct() { return size(); }
+
+ void renumber(const RenumberMap& map) {
+ if (!isTypeIndex()) {
+ return;
+ }
+
+ if (RenumberMap::Ptr p = map.lookup(refType().typeIndex())) {
+ *this = RefType::fromTypeIndex(p->value(), isNullable());
+ }
+ }
+
+ void offsetTypeIndex(uint32_t offsetBy) {
+ if (!isTypeIndex()) {
+ return;
+ }
+ *this =
+ RefType::fromTypeIndex(refType().typeIndex() + offsetBy, isNullable());
+ }
+
+ bool operator==(const ValType& that) const {
+ MOZ_ASSERT(isValid() && that.isValid());
+ return tc_ == that.tc_;
+ }
+
+ bool operator!=(const ValType& that) const {
+ MOZ_ASSERT(isValid() && that.isValid());
+ return tc_ != that.tc_;
+ }
+
+ bool operator==(Kind that) const {
+ MOZ_ASSERT(isValid());
+ MOZ_ASSERT(that != Kind::Ref);
+ return Kind(typeCode()) == that;
+ }
+
+ bool operator!=(Kind that) const { return !(*this == that); }
+};
+
+struct V128 {
+ uint8_t bytes[16]; // Little-endian
+
+ V128() { memset(bytes, 0, sizeof(bytes)); }
+
+ template <typename T>
+ T extractLane(unsigned lane) const {
+ T result;
+ MOZ_ASSERT(lane < 16 / sizeof(T));
+ memcpy(&result, bytes + sizeof(T) * lane, sizeof(T));
+ return result;
+ }
+
+ template <typename T>
+ void insertLane(unsigned lane, T value) {
+ MOZ_ASSERT(lane < 16 / sizeof(T));
+ memcpy(bytes + sizeof(T) * lane, &value, sizeof(T));
+ }
+
+ bool operator==(const V128& rhs) const {
+ for (size_t i = 0; i < sizeof(bytes); i++) {
+ if (bytes[i] != rhs.bytes[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ bool operator!=(const V128& rhs) const { return !(*this == rhs); }
+};
+
+static_assert(sizeof(V128) == 16, "Invariant");
+
+// The dominant use of this data type is for locals and args, and profiling
+// with ZenGarden and Tanks suggests an initial size of 16 minimises heap
+// allocation, both in terms of blocks and bytes.
+typedef Vector<ValType, 16, SystemAllocPolicy> ValTypeVector;
+
+// ValType utilities
+
+static inline unsigned SizeOf(ValType vt) {
+ switch (vt.kind()) {
+ case ValType::I32:
+ case ValType::F32:
+ return 4;
+ case ValType::I64:
+ case ValType::F64:
+ return 8;
+ case ValType::V128:
+ return 16;
+ case ValType::Ref:
+ return sizeof(intptr_t);
+ }
+ MOZ_CRASH("Invalid ValType");
+}
+
+// Note, ToMIRType is only correct within Wasm, where an AnyRef is represented
+// as a pointer. At the JS/wasm boundary, an AnyRef can be represented as a
+// JS::Value, and the type translation may have to be handled specially and on a
+// case-by-case basis.
+
+static inline jit::MIRType ToMIRType(ValType vt) {
+ switch (vt.kind()) {
+ case ValType::I32:
+ return jit::MIRType::Int32;
+ case ValType::I64:
+ return jit::MIRType::Int64;
+ case ValType::F32:
+ return jit::MIRType::Float32;
+ case ValType::F64:
+ return jit::MIRType::Double;
+ case ValType::V128:
+ return jit::MIRType::Simd128;
+ case ValType::Ref:
+ return jit::MIRType::RefOrNull;
+ }
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("bad type");
+}
+
+static inline bool IsNumberType(ValType vt) { return !vt.isReference(); }
+
+static inline jit::MIRType ToMIRType(const Maybe<ValType>& t) {
+ return t ? ToMIRType(ValType(t.ref())) : jit::MIRType::None;
+}
+
+extern UniqueChars ToString(ValType type);
+
+extern UniqueChars ToString(const Maybe<ValType>& type);
+
+// An AnyRef is a boxed value that can represent any wasm reference type and any
+// host type that the host system allows to flow into and out of wasm
+// transparently. It is a pointer-sized datum that has the same representation
+// as all its subtypes (funcref, externref, eqref, (ref T), et al) due to the
+// non-coercive subtyping of the wasm type system. Its current representation
+// is a plain JSObject*, and the private JSObject subtype WasmValueBox is used
+// to box non-object non-null JS values.
+//
+// The C++/wasm boundary always uses a 'void*' type to express AnyRef values, to
+// emphasize the pointer-ness of the value. The C++ code must transform the
+// void* into an AnyRef by calling AnyRef::fromCompiledCode(), and transform an
+// AnyRef into a void* by calling AnyRef::toCompiledCode(). Once in C++, we use
+// AnyRef everywhere. A JS Value is transformed into an AnyRef by calling
+// AnyRef::box(), and the AnyRef is transformed into a JS Value by calling
+// AnyRef::unbox().
+//
+// NOTE that AnyRef values may point to GC'd storage and as such need to be
+// rooted if they are kept live in boxed form across code that may cause GC!
+// Use RootedAnyRef / HandleAnyRef / MutableHandleAnyRef where necessary.
+//
+// The lowest bits of the pointer value are used for tagging, to allow for some
+// representation optimizations and to distinguish various types.
+
+// For version 0, we simply equate AnyRef and JSObject* (this means that there
+// are technically no tags at all yet). We use a simple boxing scheme that
+// wraps a JS value that is not already JSObject in a distinguishable JSObject
+// that holds the value, see WasmTypes.cpp for details. Knowledge of this
+// mapping is embedded in CodeGenerator.cpp (in WasmBoxValue and
+// WasmAnyRefFromJSObject) and in WasmStubs.cpp (in functions Box* and Unbox*).
+
+class AnyRef {
+ // mutable so that tracing may access a JSObject* from a `const Val` or
+ // `const AnyRef`.
+ mutable JSObject* value_;
+
+ explicit AnyRef() : value_((JSObject*)-1) {}
+ explicit AnyRef(JSObject* p) : value_(p) {
+ MOZ_ASSERT(((uintptr_t)p & 0x03) == 0);
+ }
+
+ public:
+ // An invalid AnyRef cannot arise naturally from wasm and so can be used as
+ // a sentinel value to indicate failure from an AnyRef-returning function.
+ static AnyRef invalid() { return AnyRef(); }
+
+ // Given a void* that comes from compiled wasm code, turn it into AnyRef.
+ static AnyRef fromCompiledCode(void* p) { return AnyRef((JSObject*)p); }
+
+ // Given a JSObject* that comes from JS, turn it into AnyRef.
+ static AnyRef fromJSObject(JSObject* p) { return AnyRef(p); }
+
+ // Generate an AnyRef null pointer.
+ static AnyRef null() { return AnyRef(nullptr); }
+
+ bool isNull() const { return value_ == nullptr; }
+
+ bool operator==(const AnyRef& rhs) const {
+ return this->value_ == rhs.value_;
+ }
+
+ bool operator!=(const AnyRef& rhs) const { return !(*this == rhs); }
+
+ void* forCompiledCode() const { return value_; }
+
+ JSObject* asJSObject() const { return value_; }
+
+ JSObject** asJSObjectAddress() const { return &value_; }
+
+ void trace(JSTracer* trc);
+
+ // Tags (to be developed further)
+ static constexpr uintptr_t AnyRefTagMask = 1;
+ static constexpr uintptr_t AnyRefObjTag = 0;
+};
+
+using RootedAnyRef = Rooted<AnyRef>;
+using HandleAnyRef = Handle<AnyRef>;
+using MutableHandleAnyRef = MutableHandle<AnyRef>;
+
+// TODO/AnyRef-boxing: With boxed immediates and strings, these will be defined
+// as MOZ_CRASH or similar so that we can find all locations that need to be
+// fixed.
+
+#define ASSERT_ANYREF_IS_JSOBJECT (void)(0)
+#define STATIC_ASSERT_ANYREF_IS_JSOBJECT static_assert(1, "AnyRef is JSObject")
+
+// Given any JS value, box it as an AnyRef and store it in *result. Returns
+// false on OOM.
+
+bool BoxAnyRef(JSContext* cx, HandleValue val, MutableHandleAnyRef result);
+
+// Given a JS value that requires an object box, box it as an AnyRef and return
+// it, returning nullptr on OOM.
+//
+// Currently the values requiring a box are those other than JSObject* or
+// nullptr, but in the future more values will be represented without an
+// allocation.
+JSObject* BoxBoxableValue(JSContext* cx, HandleValue val);
+
+// Given any AnyRef, unbox it as a JS Value. If it is a reference to a wasm
+// object it will be reflected as a JSObject* representing some TypedObject
+// instance.
+
+Value UnboxAnyRef(AnyRef val);
+
+class WasmValueBox : public NativeObject {
+ static const unsigned VALUE_SLOT = 0;
+
+ public:
+ static const unsigned RESERVED_SLOTS = 1;
+ static const JSClass class_;
+
+ static WasmValueBox* create(JSContext* cx, HandleValue val);
+ Value value() const { return getFixedSlot(VALUE_SLOT); }
+ static size_t offsetOfValue() {
+ return NativeObject::getFixedSlotOffset(VALUE_SLOT);
+ }
+};
+
+// A FuncRef is a JSFunction* and is hence also an AnyRef, and the remarks above
+// about AnyRef apply also to FuncRef. When 'funcref' is used as a value type
+// in wasm code, the value that is held is "the canonical function value", which
+// is a function for which IsWasmExportedFunction() is true, and which has the
+// correct identity wrt reference equality of functions. Notably, if a function
+// is imported then its ref.func value compares === in JS to the function that
+// was passed as an import when the instance was created.
+//
+// These rules ensure that casts from funcref to anyref are non-converting
+// (generate no code), and that no wrapping or unwrapping needs to happen when a
+// funcref or anyref flows across the JS/wasm boundary, and that functions have
+// the necessary identity when observed from JS, and in the future, from wasm.
+//
+// Functions stored in tables, whether wasm tables or internal tables, can be
+// stored in a form that optimizes for eg call speed, however.
+//
+// Reading a funcref from a funcref table, writing a funcref to a funcref table,
+// and generating the value for a ref.func instruction are therefore nontrivial
+// operations that require mapping between the canonical JSFunction and the
+// optimized table representation. Once we get an instruction to call a
+// ref.func directly it too will require such a mapping.
+
+// In many cases, a FuncRef is exactly the same as AnyRef and we can use AnyRef
+// functionality on funcref values. The FuncRef class exists mostly to add more
+// checks and to make it clear, when we need to, that we're manipulating funcref
+// values. FuncRef does not currently subclass AnyRef because there's been no
+// need to, but it probably could.
+
+class FuncRef {
+ JSFunction* value_;
+
+ explicit FuncRef() : value_((JSFunction*)-1) {}
+ explicit FuncRef(JSFunction* p) : value_(p) {
+ MOZ_ASSERT(((uintptr_t)p & 0x03) == 0);
+ }
+
+ public:
+ // Given a void* that comes from compiled wasm code, turn it into FuncRef.
+ static FuncRef fromCompiledCode(void* p) { return FuncRef((JSFunction*)p); }
+
+ // Given a JSFunction* that comes from JS, turn it into FuncRef.
+ static FuncRef fromJSFunction(JSFunction* p) { return FuncRef(p); }
+
+ // Given an AnyRef that represents a possibly-null funcref, turn it into a
+ // FuncRef.
+ static FuncRef fromAnyRefUnchecked(AnyRef p);
+
+ AnyRef asAnyRef() { return AnyRef::fromJSObject((JSObject*)value_); }
+
+ void* forCompiledCode() const { return value_; }
+
+ JSFunction* asJSFunction() { return value_; }
+
+ bool isNull() { return value_ == nullptr; }
+};
+
+using RootedFuncRef = Rooted<FuncRef>;
+using HandleFuncRef = Handle<FuncRef>;
+using MutableHandleFuncRef = MutableHandle<FuncRef>;
+
+// Given any FuncRef, unbox it as a JS Value -- always a JSFunction*.
+
+Value UnboxFuncRef(FuncRef val);
+
+// Exception tags are used to uniquely identify exceptions. They are stored
+// in a vector in Instances and used by both WebAssembly.Exception for import
+// and export, and by the representation of thrown exceptions.
+//
+// Since an exception tag is a (trivial) substructure of AtomicRefCounted, the
+// RefPtr SharedExceptionTag can have many instances/modules referencing a
+// single constant exception tag.
+
+struct ExceptionTag : AtomicRefCounted<ExceptionTag> {
+ ExceptionTag() = default;
+};
+using SharedExceptionTag = RefPtr<ExceptionTag>;
+typedef Vector<SharedExceptionTag, 0, SystemAllocPolicy>
+ SharedExceptionTagVector;
+
+// Code can be compiled either with the Baseline compiler or the Ion compiler,
+// and tier-variant data are tagged with the Tier value.
+//
+// A tier value is used to request tier-variant aspects of code, metadata, or
+// linkdata. The tiers are normally explicit (Baseline and Ion); implicit tiers
+// can be obtained through accessors on Code objects (eg, stableTier).
+
+enum class Tier {
+ Baseline,
+ Debug = Baseline,
+ Optimized,
+ Serialized = Optimized
+};
+
+// Iterator over tiers present in a tiered data structure.
+
+class Tiers {
+ Tier t_[2];
+ uint32_t n_;
+
+ public:
+ explicit Tiers() { n_ = 0; }
+ explicit Tiers(Tier t) {
+ t_[0] = t;
+ n_ = 1;
+ }
+ explicit Tiers(Tier t, Tier u) {
+ MOZ_ASSERT(t != u);
+ t_[0] = t;
+ t_[1] = u;
+ n_ = 2;
+ }
+
+ Tier* begin() { return t_; }
+ Tier* end() { return t_ + n_; }
+};
+
+// A Module can either be asm.js or wasm.
+
+enum ModuleKind { Wasm, AsmJS };
+
+enum class Shareable { False, True };
+
+// Describes the features that control wasm compilation.
+
+struct FeatureArgs {
+ FeatureArgs()
+ : sharedMemory(Shareable::False),
+ refTypes(false),
+ functionReferences(false),
+ gcTypes(false),
+ multiValue(false),
+ v128(false),
+ hugeMemory(false),
+ simdWormhole(false),
+ exceptions(false) {}
+ FeatureArgs(const FeatureArgs&) = default;
+ FeatureArgs& operator=(const FeatureArgs&) = default;
+ FeatureArgs(FeatureArgs&&) = default;
+
+ static FeatureArgs build(JSContext* cx);
+
+ FeatureArgs withRefTypes(bool refTypes) const {
+ FeatureArgs features = *this;
+ features.refTypes = refTypes;
+ return features;
+ }
+
+ Shareable sharedMemory;
+ bool refTypes;
+ bool functionReferences;
+ bool gcTypes;
+ bool multiValue;
+ bool v128;
+ bool hugeMemory;
+ bool simdWormhole;
+ bool exceptions;
+};
+
+// The LitVal class represents a single WebAssembly value of a given value
+// type, mostly for the purpose of numeric literals and initializers. A LitVal
+// does not directly map to a JS value since there is not (currently) a precise
+// representation of i64 values. A LitVal may contain non-canonical NaNs since,
+// within WebAssembly, floats are not canonicalized. Canonicalization must
+// happen at the JS boundary.
+
+class LitVal {
+ public:
+ union Cell {
+ int32_t i32_;
+ int64_t i64_;
+ float f32_;
+ double f64_;
+ wasm::V128 v128_;
+ wasm::AnyRef ref_;
+ Cell() : v128_() {}
+ ~Cell() = default;
+ };
+
+ protected:
+ ValType type_;
+ Cell cell_;
+
+ public:
+ LitVal() : type_(ValType()), cell_{} {}
+
+ explicit LitVal(ValType type) : type_(type) {
+ MOZ_ASSERT(type.isDefaultable());
+ switch (type.kind()) {
+ case ValType::Kind::I32: {
+ cell_.i32_ = 0;
+ break;
+ }
+ case ValType::Kind::I64: {
+ cell_.i64_ = 0;
+ break;
+ }
+ case ValType::Kind::F32: {
+ cell_.f32_ = 0;
+ break;
+ }
+ case ValType::Kind::F64: {
+ cell_.f64_ = 0;
+ break;
+ }
+ case ValType::Kind::V128: {
+ new (&cell_.v128_) V128();
+ break;
+ }
+ case ValType::Kind::Ref: {
+ cell_.ref_ = AnyRef::null();
+ break;
+ }
+ }
+ }
+
+ explicit LitVal(uint32_t i32) : type_(ValType::I32) { cell_.i32_ = i32; }
+ explicit LitVal(uint64_t i64) : type_(ValType::I64) { cell_.i64_ = i64; }
+
+ explicit LitVal(float f32) : type_(ValType::F32) { cell_.f32_ = f32; }
+ explicit LitVal(double f64) : type_(ValType::F64) { cell_.f64_ = f64; }
+
+ explicit LitVal(V128 v128) : type_(ValType::V128) { cell_.v128_ = v128; }
+
+ explicit LitVal(ValType type, AnyRef any) : type_(type) {
+ MOZ_ASSERT(type.isReference());
+ MOZ_ASSERT(any.isNull(),
+ "use Val for non-nullptr ref types to get tracing");
+ cell_.ref_ = any;
+ }
+
+ ValType type() const { return type_; }
+ static constexpr size_t sizeofLargestValue() { return sizeof(cell_); }
+
+ Cell& cell() { return cell_; }
+ const Cell& cell() const { return cell_; }
+
+ uint32_t i32() const {
+ MOZ_ASSERT(type_ == ValType::I32);
+ return cell_.i32_;
+ }
+ uint64_t i64() const {
+ MOZ_ASSERT(type_ == ValType::I64);
+ return cell_.i64_;
+ }
+ const float& f32() const {
+ MOZ_ASSERT(type_ == ValType::F32);
+ return cell_.f32_;
+ }
+ const double& f64() const {
+ MOZ_ASSERT(type_ == ValType::F64);
+ return cell_.f64_;
+ }
+ AnyRef ref() const {
+ MOZ_ASSERT(type_.isReference());
+ return cell_.ref_;
+ }
+ const V128& v128() const {
+ MOZ_ASSERT(type_ == ValType::V128);
+ return cell_.v128_;
+ }
+};
+
+// A Val is a LitVal that can contain (non-null) pointers to GC things. All Vals
+// must be used with the rooting APIs as they may contain JS objects.
+
+class MOZ_NON_PARAM Val : public LitVal {
+ public:
+ Val() : LitVal() {}
+ explicit Val(ValType type) : LitVal(type) {}
+ explicit Val(const LitVal& val);
+ explicit Val(uint32_t i32) : LitVal(i32) {}
+ explicit Val(uint64_t i64) : LitVal(i64) {}
+ explicit Val(float f32) : LitVal(f32) {}
+ explicit Val(double f64) : LitVal(f64) {}
+ explicit Val(V128 v128) : LitVal(v128) {}
+ explicit Val(ValType type, AnyRef val) : LitVal(type, AnyRef::null()) {
+ MOZ_ASSERT(type.isReference());
+ cell_.ref_ = val;
+ }
+ explicit Val(ValType type, FuncRef val) : LitVal(type, AnyRef::null()) {
+ MOZ_ASSERT(type.isFuncRef());
+ cell_.ref_ = val.asAnyRef();
+ }
+
+ Val(const Val&) = default;
+ Val& operator=(const Val&) = default;
+
+ bool operator==(const Val& rhs) const {
+ if (type_ != rhs.type_) {
+ return false;
+ }
+ switch (type_.kind()) {
+ case ValType::I32:
+ return cell_.i32_ == rhs.cell_.i32_;
+ case ValType::I64:
+ return cell_.i64_ == rhs.cell_.i64_;
+ case ValType::F32:
+ return cell_.f32_ == rhs.cell_.f32_;
+ case ValType::F64:
+ return cell_.f64_ == rhs.cell_.f64_;
+ case ValType::V128:
+ return cell_.v128_ == rhs.cell_.v128_;
+ case ValType::Ref:
+ return cell_.ref_ == rhs.cell_.ref_;
+ }
+ MOZ_ASSERT_UNREACHABLE();
+ return false;
+ }
+ bool operator!=(const Val& rhs) const { return !(*this == rhs); }
+
+ bool isJSObject() const {
+ return type_.isValid() && type_.isReference() && !cell_.ref_.isNull();
+ }
+
+ JSObject* asJSObject() const {
+ MOZ_ASSERT(isJSObject());
+ return cell_.ref_.asJSObject();
+ }
+
+ JSObject** asJSObjectAddress() const {
+ return cell_.ref_.asJSObjectAddress();
+ }
+
+ // See the comment for `ToWebAssemblyValue` below.
+ static bool fromJSValue(JSContext* cx, ValType targetType, HandleValue val,
+ MutableHandle<Val> rval);
+ // See the comment for `ToJSValue` below.
+ bool toJSValue(JSContext* cx, MutableHandleValue rval) const;
+
+ void trace(JSTracer* trc) const;
+};
+
+using GCPtrVal = GCPtr<Val>;
+using RootedVal = Rooted<Val>;
+using HandleVal = Handle<Val>;
+using MutableHandleVal = MutableHandle<Val>;
+
+typedef GCVector<Val, 0, SystemAllocPolicy> ValVector;
+using RootedValVector = Rooted<ValVector>;
+using HandleValVector = Handle<ValVector>;
+using MutableHandleValVector = MutableHandle<ValVector>;
+
+// Check a value against the given reference type. If the targetType
+// is RefType::Extern then the test always passes, but the value may be boxed.
+// If the test passes then the value is stored either in fnval (for
+// RefType::Func) or in refval (for other types); this split is not strictly
+// necessary but is convenient for the users of this function.
+//
+// This can return false if the type check fails, or if a boxing into AnyRef
+// throws an OOM.
+[[nodiscard]] extern bool CheckRefType(JSContext* cx, RefType targetType,
+ HandleValue v,
+ MutableHandleFunction fnval,
+ MutableHandleAnyRef refval);
+
+// The same as above for when the target type is 'funcref'.
+[[nodiscard]] extern bool CheckFuncRefValue(JSContext* cx, HandleValue v,
+ MutableHandleFunction fun);
+
+// The same as above for when the target type is 'eqref'.
+[[nodiscard]] extern bool CheckEqRefValue(JSContext* cx, HandleValue v,
+ MutableHandleAnyRef vp);
+class NoDebug;
+class DebugCodegenVal;
+
+// Coercion function from a JS value to a WebAssembly value [1].
+//
+// This function may fail for any of the following reasons:
+// * The input value has an incorrect type for the targetType
+// * The targetType is not exposable
+// * An OOM ocurred
+// An error will be set upon failure.
+//
+// [1] https://webassembly.github.io/spec/js-api/index.html#towebassemblyvalue
+template <typename Debug = NoDebug>
+extern bool ToWebAssemblyValue(JSContext* cx, HandleValue val, ValType type,
+ void* loc, bool mustWrite64);
+
+// Coercion function from a WebAssembly value to a JS value [1].
+//
+// This function will only fail if an OOM ocurred. If the type of WebAssembly
+// value being coerced is not exposable to JS, then it will be coerced to
+// 'undefined'. Callers are responsible for guarding against this if this is
+// not desirable.
+//
+// [1] https://webassembly.github.io/spec/js-api/index.html#tojsvalue
+template <typename Debug = NoDebug>
+extern bool ToJSValue(JSContext* cx, const void* src, ValType type,
+ MutableHandleValue dst);
+
+// The FuncType class represents a WebAssembly function signature which takes a
+// list of value types and returns an expression type. The engine uses two
+// in-memory representations of the argument Vector's memory (when elements do
+// not fit inline): normal malloc allocation (via SystemAllocPolicy) and
+// allocation in a LifoAlloc (via LifoAllocPolicy). The former FuncType objects
+// can have any lifetime since they own the memory. The latter FuncType objects
+// must not outlive the associated LifoAlloc mark/release interval (which is
+// currently the duration of module validation+compilation). Thus, long-lived
+// objects like WasmModule must use malloced allocation.
+
+class FuncType {
+ ValTypeVector args_;
+ ValTypeVector results_;
+
+ public:
+ FuncType() : args_(), results_() {}
+ FuncType(ValTypeVector&& args, ValTypeVector&& results)
+ : args_(std::move(args)), results_(std::move(results)) {}
+
+ [[nodiscard]] bool clone(const FuncType& src) {
+ MOZ_ASSERT(args_.empty());
+ MOZ_ASSERT(results_.empty());
+ return args_.appendAll(src.args_) && results_.appendAll(src.results_);
+ }
+
+ void renumber(const RenumberMap& map) {
+ for (auto& arg : args_) {
+ arg.renumber(map);
+ }
+ for (auto& result : results_) {
+ result.renumber(map);
+ }
+ }
+ void offsetTypeIndex(uint32_t offsetBy) {
+ for (auto& arg : args_) {
+ arg.offsetTypeIndex(offsetBy);
+ }
+ for (auto& result : results_) {
+ result.offsetTypeIndex(offsetBy);
+ }
+ }
+
+ ValType arg(unsigned i) const { return args_[i]; }
+ const ValTypeVector& args() const { return args_; }
+ ValType result(unsigned i) const { return results_[i]; }
+ const ValTypeVector& results() const { return results_; }
+
+ HashNumber hash() const {
+ HashNumber hn = 0;
+ for (const ValType& vt : args_) {
+ hn = mozilla::AddToHash(hn, HashNumber(vt.packed()));
+ }
+ for (const ValType& vt : results_) {
+ hn = mozilla::AddToHash(hn, HashNumber(vt.packed()));
+ }
+ return hn;
+ }
+ bool operator==(const FuncType& rhs) const {
+ return EqualContainers(args(), rhs.args()) &&
+ EqualContainers(results(), rhs.results());
+ }
+ bool operator!=(const FuncType& rhs) const { return !(*this == rhs); }
+
+ // Entry from JS to wasm via the JIT is currently unimplemented for
+ // functions that return multiple values.
+ bool temporarilyUnsupportedResultCountForJitEntry() const {
+ return results().length() > MaxResultsForJitEntry;
+ }
+ // Calls out from wasm to JS that return multiple values is currently
+ // unsupported.
+ bool temporarilyUnsupportedResultCountForJitExit() const {
+ return results().length() > MaxResultsForJitExit;
+ }
+ bool hasUnexposableArgOrRet() const {
+ for (ValType arg : args()) {
+ if (!arg.isExposable()) {
+ return true;
+ }
+ }
+ for (ValType result : results()) {
+ if (!result.isExposable()) {
+ return true;
+ }
+ }
+ return false;
+ }
+ // For JS->wasm jit entries, temporarily disallow certain types until the
+ // stubs generator is improved.
+ // * ref params may be nullable externrefs
+ // * ref results may not be type indices
+ // V128 types are excluded per spec but are guarded against separately.
+ bool temporarilyUnsupportedReftypeForEntry() const {
+ for (ValType arg : args()) {
+ if (arg.isReference() && (!arg.isExternRef() || !arg.isNullable())) {
+ return true;
+ }
+ }
+ for (ValType result : results()) {
+ if (result.isTypeIndex()) {
+ return true;
+ }
+ }
+ return false;
+ }
+ // For inline JS->wasm jit entries, temporarily disallow certain types until
+ // the stubs generator is improved.
+ // * ref params may be nullable externrefs
+ // * ref results may not be type indices
+ // V128 types are excluded per spec but are guarded against separately.
+ bool temporarilyUnsupportedReftypeForInlineEntry() const {
+ for (ValType arg : args()) {
+ if (arg.isReference() && (!arg.isExternRef() || !arg.isNullable())) {
+ return true;
+ }
+ }
+ for (ValType result : results()) {
+ if (result.isTypeIndex()) {
+ return true;
+ }
+ }
+ return false;
+ }
+ // For wasm->JS jit exits, temporarily disallow certain types until
+ // the stubs generator is improved.
+ // * ref results may be nullable externrefs
+ // Unexposable types must be guarded against separately.
+ bool temporarilyUnsupportedReftypeForExit() const {
+ for (ValType result : results()) {
+ if (result.isReference() &&
+ (!result.isExternRef() || !result.isNullable())) {
+ return true;
+ }
+ }
+ return false;
+ }
+#ifdef WASM_PRIVATE_REFTYPES
+ bool exposesTypeIndex() const {
+ for (const ValType& arg : args()) {
+ if (arg.isTypeIndex()) {
+ return true;
+ }
+ }
+ for (const ValType& result : results()) {
+ if (result.isTypeIndex()) {
+ return true;
+ }
+ }
+ return false;
+ }
+#endif
+
+ WASM_DECLARE_SERIALIZABLE(FuncType)
+};
+
+struct FuncTypeHashPolicy {
+ using Lookup = const FuncType&;
+ static HashNumber hash(Lookup ft) { return ft.hash(); }
+ static bool match(const FuncType* lhs, Lookup rhs) { return *lhs == rhs; }
+};
+
+// ArgTypeVector type.
+//
+// Functions usually receive one ABI argument per WebAssembly argument. However
+// if a function has multiple results and some of those results go to the stack,
+// then it additionally receives a synthetic ABI argument holding a pointer to
+// the stack result area.
+//
+// Given the presence of synthetic arguments, sometimes we need a name for
+// non-synthetic arguments. We call those "natural" arguments.
+
+enum class StackResults { HasStackResults, NoStackResults };
+
+class ArgTypeVector {
+ const ValTypeVector& args_;
+ bool hasStackResults_;
+
+ // To allow ABIArgIterBase<VecT, ABIArgGeneratorT>, we define a private
+ // length() method. To prevent accidental errors, other users need to be
+ // explicit and call lengthWithStackResults() or
+ // lengthWithoutStackResults().
+ size_t length() const { return args_.length() + size_t(hasStackResults_); }
+ template <class VecT, class ABIArgGeneratorT>
+ friend class jit::ABIArgIterBase;
+
+ public:
+ ArgTypeVector(const ValTypeVector& args, StackResults stackResults)
+ : args_(args),
+ hasStackResults_(stackResults == StackResults::HasStackResults) {}
+ explicit ArgTypeVector(const FuncType& funcType);
+
+ bool hasSyntheticStackResultPointerArg() const { return hasStackResults_; }
+ StackResults stackResults() const {
+ return hasSyntheticStackResultPointerArg() ? StackResults::HasStackResults
+ : StackResults::NoStackResults;
+ }
+ size_t lengthWithoutStackResults() const { return args_.length(); }
+ bool isSyntheticStackResultPointerArg(size_t idx) const {
+ // The pointer to stack results area, if present, is a synthetic argument
+ // tacked on at the end.
+ MOZ_ASSERT(idx < lengthWithStackResults());
+ return idx == args_.length();
+ }
+ bool isNaturalArg(size_t idx) const {
+ return !isSyntheticStackResultPointerArg(idx);
+ }
+ size_t naturalIndex(size_t idx) const {
+ MOZ_ASSERT(isNaturalArg(idx));
+ // Because the synthetic argument, if present, is tacked on the end, an
+ // argument index that isn't synthetic is natural.
+ return idx;
+ }
+
+ size_t lengthWithStackResults() const { return length(); }
+ jit::MIRType operator[](size_t i) const {
+ MOZ_ASSERT(i < lengthWithStackResults());
+ if (isSyntheticStackResultPointerArg(i)) {
+ return jit::MIRType::StackResults;
+ }
+ return ToMIRType(args_[naturalIndex(i)]);
+ }
+};
+
+template <typename PointerType>
+class TaggedValue {
+ public:
+ enum Kind {
+ ImmediateKind1 = 0,
+ ImmediateKind2 = 1,
+ PointerKind1 = 2,
+ PointerKind2 = 3
+ };
+
+ private:
+ uintptr_t bits_;
+
+ static constexpr uintptr_t PayloadShift = 2;
+ static constexpr uintptr_t KindMask = 0x3;
+ static constexpr uintptr_t PointerKindBit = 0x2;
+
+ constexpr static bool IsPointerKind(Kind kind) {
+ return uintptr_t(kind) & PointerKindBit;
+ }
+ constexpr static bool IsImmediateKind(Kind kind) {
+ return !IsPointerKind(kind);
+ }
+
+ static_assert(IsImmediateKind(ImmediateKind1), "immediate kind 1");
+ static_assert(IsImmediateKind(ImmediateKind2), "immediate kind 2");
+ static_assert(IsPointerKind(PointerKind1), "pointer kind 1");
+ static_assert(IsPointerKind(PointerKind2), "pointer kind 2");
+
+ static uintptr_t PackImmediate(Kind kind, uint32_t imm) {
+ MOZ_ASSERT(IsImmediateKind(kind));
+ MOZ_ASSERT((uintptr_t(kind) & KindMask) == kind);
+ MOZ_ASSERT((imm & (uint32_t(KindMask) << (32 - PayloadShift))) == 0);
+ return uintptr_t(kind) | (uintptr_t(imm) << PayloadShift);
+ }
+
+ static uintptr_t PackPointer(Kind kind, PointerType* ptr) {
+ uintptr_t ptrBits = reinterpret_cast<uintptr_t>(ptr);
+ MOZ_ASSERT(IsPointerKind(kind));
+ MOZ_ASSERT((uintptr_t(kind) & KindMask) == kind);
+ MOZ_ASSERT((ptrBits & KindMask) == 0);
+ return uintptr_t(kind) | ptrBits;
+ }
+
+ public:
+ TaggedValue(Kind kind, uint32_t imm) : bits_(PackImmediate(kind, imm)) {}
+ TaggedValue(Kind kind, PointerType* ptr) : bits_(PackPointer(kind, ptr)) {}
+
+ uintptr_t bits() const { return bits_; }
+ Kind kind() const { return Kind(bits() & KindMask); }
+ uint32_t immediate() const {
+ MOZ_ASSERT(IsImmediateKind(kind()));
+ return mozilla::AssertedCast<uint32_t>(bits() >> PayloadShift);
+ }
+ PointerType* pointer() const {
+ MOZ_ASSERT(IsPointerKind(kind()));
+ return reinterpret_cast<PointerType*>(bits() & ~KindMask);
+ }
+};
+
+// ResultType represents the WebAssembly spec's `resulttype`. Semantically, a
+// result type is just a vec(valtype). For effiency, though, the ResultType
+// value is packed into a word, with separate encodings for these 3 cases:
+// []
+// [valtype]
+// pointer to ValTypeVector
+//
+// Additionally there is an encoding indicating uninitialized ResultType
+// values.
+//
+// Generally in the latter case the ValTypeVector is the args() or results() of
+// a FuncType in the compilation unit, so as long as the lifetime of the
+// ResultType value is less than the OpIter, we can just borrow the pointer
+// without ownership or copying.
+class ResultType {
+ using Tagged = TaggedValue<const ValTypeVector>;
+ Tagged tagged_;
+
+ enum Kind {
+ EmptyKind = Tagged::ImmediateKind1,
+ SingleKind = Tagged::ImmediateKind2,
+#ifdef ENABLE_WASM_MULTI_VALUE
+ VectorKind = Tagged::PointerKind1,
+#endif
+ InvalidKind = Tagged::PointerKind2,
+ };
+
+ ResultType(Kind kind, uint32_t imm) : tagged_(Tagged::Kind(kind), imm) {}
+#ifdef ENABLE_WASM_MULTI_VALUE
+ explicit ResultType(const ValTypeVector* ptr)
+ : tagged_(Tagged::Kind(VectorKind), ptr) {}
+#endif
+
+ Kind kind() const { return Kind(tagged_.kind()); }
+
+ ValType singleValType() const {
+ MOZ_ASSERT(kind() == SingleKind);
+ return ValType(PackedTypeCodeFromBits(tagged_.immediate()));
+ }
+
+#ifdef ENABLE_WASM_MULTI_VALUE
+ const ValTypeVector& values() const {
+ MOZ_ASSERT(kind() == VectorKind);
+ return *tagged_.pointer();
+ }
+#endif
+
+ public:
+ ResultType() : tagged_(Tagged::Kind(InvalidKind), nullptr) {}
+
+ static ResultType Empty() { return ResultType(EmptyKind, uint32_t(0)); }
+ static ResultType Single(ValType vt) {
+ return ResultType(SingleKind, vt.bitsUnsafe());
+ }
+ static ResultType Vector(const ValTypeVector& vals) {
+ switch (vals.length()) {
+ case 0:
+ return Empty();
+ case 1:
+ return Single(vals[0]);
+ default:
+#ifdef ENABLE_WASM_MULTI_VALUE
+ return ResultType(&vals);
+#else
+ MOZ_CRASH("multi-value returns not supported");
+#endif
+ }
+ }
+
+ bool empty() const { return kind() == EmptyKind; }
+
+ size_t length() const {
+ switch (kind()) {
+ case EmptyKind:
+ return 0;
+ case SingleKind:
+ return 1;
+#ifdef ENABLE_WASM_MULTI_VALUE
+ case VectorKind:
+ return values().length();
+#endif
+ default:
+ MOZ_CRASH("bad resulttype");
+ }
+ }
+
+ ValType operator[](size_t i) const {
+ switch (kind()) {
+ case SingleKind:
+ MOZ_ASSERT(i == 0);
+ return singleValType();
+#ifdef ENABLE_WASM_MULTI_VALUE
+ case VectorKind:
+ return values()[i];
+#endif
+ default:
+ MOZ_CRASH("bad resulttype");
+ }
+ }
+
+ bool operator==(ResultType rhs) const {
+ switch (kind()) {
+ case EmptyKind:
+ case SingleKind:
+ case InvalidKind:
+ return tagged_.bits() == rhs.tagged_.bits();
+#ifdef ENABLE_WASM_MULTI_VALUE
+ case VectorKind: {
+ if (rhs.kind() != VectorKind) {
+ return false;
+ }
+ return EqualContainers(values(), rhs.values());
+ }
+#endif
+ default:
+ MOZ_CRASH("bad resulttype");
+ }
+ }
+ bool operator!=(ResultType rhs) const { return !(*this == rhs); }
+};
+
+// BlockType represents the WebAssembly spec's `blocktype`. Semantically, a
+// block type is just a (vec(valtype) -> vec(valtype)) with four special
+// encodings which are represented explicitly in BlockType:
+// [] -> []
+// [] -> [valtype]
+// [params] -> [results] via pointer to FuncType
+// [] -> [results] via pointer to FuncType (ignoring [params])
+
+class BlockType {
+ using Tagged = TaggedValue<const FuncType>;
+ Tagged tagged_;
+
+ enum Kind {
+ VoidToVoidKind = Tagged::ImmediateKind1,
+ VoidToSingleKind = Tagged::ImmediateKind2,
+#ifdef ENABLE_WASM_MULTI_VALUE
+ FuncKind = Tagged::PointerKind1,
+ FuncResultsKind = Tagged::PointerKind2
+#endif
+ };
+
+ BlockType(Kind kind, uint32_t imm) : tagged_(Tagged::Kind(kind), imm) {}
+#ifdef ENABLE_WASM_MULTI_VALUE
+ BlockType(Kind kind, const FuncType& type)
+ : tagged_(Tagged::Kind(kind), &type) {}
+#endif
+
+ Kind kind() const { return Kind(tagged_.kind()); }
+ ValType singleValType() const {
+ MOZ_ASSERT(kind() == VoidToSingleKind);
+ return ValType(PackedTypeCodeFromBits(tagged_.immediate()));
+ }
+
+#ifdef ENABLE_WASM_MULTI_VALUE
+ const FuncType& funcType() const { return *tagged_.pointer(); }
+#endif
+
+ public:
+ BlockType()
+ : tagged_(Tagged::Kind(VoidToVoidKind),
+ uint32_t(InvalidPackedTypeCode())) {}
+
+ static BlockType VoidToVoid() {
+ return BlockType(VoidToVoidKind, uint32_t(0));
+ }
+ static BlockType VoidToSingle(ValType vt) {
+ return BlockType(VoidToSingleKind, vt.bitsUnsafe());
+ }
+ static BlockType Func(const FuncType& type) {
+#ifdef ENABLE_WASM_MULTI_VALUE
+ if (type.args().length() == 0) {
+ return FuncResults(type);
+ }
+ return BlockType(FuncKind, type);
+#else
+ MOZ_ASSERT(type.args().length() == 0);
+ return FuncResults(type);
+#endif
+ }
+ static BlockType FuncResults(const FuncType& type) {
+ switch (type.results().length()) {
+ case 0:
+ return VoidToVoid();
+ case 1:
+ return VoidToSingle(type.results()[0]);
+ default:
+#ifdef ENABLE_WASM_MULTI_VALUE
+ return BlockType(FuncResultsKind, type);
+#else
+ MOZ_CRASH("multi-value returns not supported");
+#endif
+ }
+ }
+
+ ResultType params() const {
+ switch (kind()) {
+ case VoidToVoidKind:
+ case VoidToSingleKind:
+#ifdef ENABLE_WASM_MULTI_VALUE
+ case FuncResultsKind:
+#endif
+ return ResultType::Empty();
+#ifdef ENABLE_WASM_MULTI_VALUE
+ case FuncKind:
+ return ResultType::Vector(funcType().args());
+#endif
+ default:
+ MOZ_CRASH("unexpected kind");
+ }
+ }
+
+ ResultType results() const {
+ switch (kind()) {
+ case VoidToVoidKind:
+ return ResultType::Empty();
+ case VoidToSingleKind:
+ return ResultType::Single(singleValType());
+#ifdef ENABLE_WASM_MULTI_VALUE
+ case FuncKind:
+ case FuncResultsKind:
+ return ResultType::Vector(funcType().results());
+#endif
+ default:
+ MOZ_CRASH("unexpected kind");
+ }
+ }
+
+ bool operator==(BlockType rhs) const {
+ if (kind() != rhs.kind()) {
+ return false;
+ }
+ switch (kind()) {
+ case VoidToVoidKind:
+ case VoidToSingleKind:
+ return tagged_.bits() == rhs.tagged_.bits();
+#ifdef ENABLE_WASM_MULTI_VALUE
+ case FuncKind:
+ return funcType() == rhs.funcType();
+ case FuncResultsKind:
+ return EqualContainers(funcType().results(), rhs.funcType().results());
+#endif
+ default:
+ MOZ_CRASH("unexpected kind");
+ }
+ }
+
+ bool operator!=(BlockType rhs) const { return !(*this == rhs); }
+};
+
+// Structure type.
+//
+// The Module owns a dense array of StructType values that represent the
+// structure types that the module knows about. It is created from the sparse
+// array of types in the ModuleEnvironment when the Module is created.
+
+struct StructField {
+ ValType type;
+ uint32_t offset;
+ bool isMutable;
+};
+
+typedef Vector<StructField, 0, SystemAllocPolicy> StructFieldVector;
+
+class StructType {
+ public:
+ StructFieldVector fields_; // Field type, offset, and mutability
+ uint32_t size_; // The size of the type in bytes.
+ bool isInline_; // True if this is an InlineTypedObject and we
+ // interpret the offsets from the object pointer;
+ // if false this is an OutlineTypedObject and we
+ // interpret everything relative to the pointer to
+ // the attached storage.
+ public:
+ StructType() : fields_(), size_(0), isInline_(true) {}
+
+ explicit StructType(StructFieldVector&& fields)
+ : fields_(std::move(fields)), size_(0), isInline_(true) {}
+
+ StructType(StructType&&) = default;
+ StructType& operator=(StructType&&) = default;
+
+ [[nodiscard]] bool clone(const StructType& src) {
+ if (!fields_.appendAll(src.fields_)) {
+ return false;
+ }
+ size_ = src.size_;
+ isInline_ = src.isInline_;
+ return true;
+ }
+
+ void renumber(const RenumberMap& map) {
+ for (auto& field : fields_) {
+ field.type.renumber(map);
+ }
+ }
+ void offsetTypeIndex(uint32_t offsetBy) {
+ for (auto& field : fields_) {
+ field.type.offsetTypeIndex(offsetBy);
+ }
+ }
+
+ [[nodiscard]] bool computeLayout();
+
+ // Get the offset to a field from the base of the struct object. This
+ // is just the field offset for outline typed objects, but includes
+ // the header for inline typed objects.
+ uint32_t objectBaseFieldOffset(uint32_t fieldIndex) const;
+
+ bool hasPrefix(const StructType& other) const;
+
+ WASM_DECLARE_SERIALIZABLE(StructType)
+};
+
+typedef Vector<StructType, 0, SystemAllocPolicy> StructTypeVector;
+typedef Vector<const StructType*, 0, SystemAllocPolicy> StructTypePtrVector;
+
+// An InitExpr describes a deferred initializer expression, used to initialize
+// a global or a table element offset. Such expressions are created during
+// decoding and actually executed on module instantiation.
+
+class InitExpr {
+ public:
+ enum class Kind { Constant, GetGlobal, RefFunc };
+
+ private:
+ // Note: all this private data is currently (de)serialized via memcpy().
+ Kind kind_;
+ union U {
+ LitVal val_;
+ struct {
+ uint32_t index_;
+ ValType type_;
+ } global;
+ uint32_t refFuncIndex_;
+ U() : global{} {}
+ } u;
+
+ public:
+ InitExpr() = default;
+
+ static InitExpr fromConstant(LitVal val) {
+ InitExpr expr;
+ expr.kind_ = Kind::Constant;
+ expr.u.val_ = val;
+ return expr;
+ }
+
+ static InitExpr fromGetGlobal(uint32_t globalIndex, ValType type) {
+ InitExpr expr;
+ expr.kind_ = Kind::GetGlobal;
+ expr.u.global.index_ = globalIndex;
+ expr.u.global.type_ = type;
+ return expr;
+ }
+
+ static InitExpr fromRefFunc(uint32_t refFuncIndex) {
+ InitExpr expr;
+ expr.kind_ = Kind::RefFunc;
+ expr.u.refFuncIndex_ = refFuncIndex;
+ return expr;
+ }
+
+ Kind kind() const { return kind_; }
+
+ bool isVal() const { return kind() == Kind::Constant; }
+ LitVal val() const {
+ MOZ_ASSERT(isVal());
+ return u.val_;
+ }
+
+ uint32_t globalIndex() const {
+ MOZ_ASSERT(kind() == Kind::GetGlobal);
+ return u.global.index_;
+ }
+
+ uint32_t refFuncIndex() const {
+ MOZ_ASSERT(kind() == Kind::RefFunc);
+ return u.refFuncIndex_;
+ }
+
+ ValType type() const {
+ switch (kind()) {
+ case Kind::Constant:
+ return u.val_.type();
+ case Kind::GetGlobal:
+ return u.global.type_;
+ case Kind::RefFunc:
+ return ValType(RefType::func());
+ }
+ MOZ_CRASH("unexpected initExpr type");
+ }
+};
+
+// CacheableChars is used to cacheably store UniqueChars.
+
+struct CacheableChars : UniqueChars {
+ CacheableChars() = default;
+ explicit CacheableChars(char* ptr) : UniqueChars(ptr) {}
+ MOZ_IMPLICIT CacheableChars(UniqueChars&& rhs)
+ : UniqueChars(std::move(rhs)) {}
+ WASM_DECLARE_SERIALIZABLE(CacheableChars)
+};
+
+typedef Vector<CacheableChars, 0, SystemAllocPolicy> CacheableCharsVector;
+
+// Import describes a single wasm import. An ImportVector describes all
+// of a single module's imports.
+//
+// ImportVector is built incrementally by ModuleGenerator and then stored
+// immutably by Module.
+
+struct Import {
+ CacheableChars module;
+ CacheableChars field;
+ DefinitionKind kind;
+
+ Import() = default;
+ Import(UniqueChars&& module, UniqueChars&& field, DefinitionKind kind)
+ : module(std::move(module)), field(std::move(field)), kind(kind) {}
+
+ WASM_DECLARE_SERIALIZABLE(Import)
+};
+
+typedef Vector<Import, 0, SystemAllocPolicy> ImportVector;
+
+// Export describes the export of a definition in a Module to a field in the
+// export object. The Export stores the index of the exported item in the
+// appropriate type-specific module data structure (function table, global
+// table, table table, and - eventually - memory table).
+//
+// Note a single definition can be exported by multiple Exports in the
+// ExportVector.
+//
+// ExportVector is built incrementally by ModuleGenerator and then stored
+// immutably by Module.
+
+class Export {
+ CacheableChars fieldName_;
+ struct CacheablePod {
+ DefinitionKind kind_;
+ uint32_t index_;
+ } pod;
+
+ public:
+ Export() = default;
+ explicit Export(UniqueChars fieldName, uint32_t index, DefinitionKind kind);
+ explicit Export(UniqueChars fieldName, DefinitionKind kind);
+
+ const char* fieldName() const { return fieldName_.get(); }
+
+ DefinitionKind kind() const { return pod.kind_; }
+ uint32_t funcIndex() const;
+#ifdef ENABLE_WASM_EXCEPTIONS
+ uint32_t eventIndex() const;
+#endif
+ uint32_t globalIndex() const;
+ uint32_t tableIndex() const;
+
+ WASM_DECLARE_SERIALIZABLE(Export)
+};
+
+typedef Vector<Export, 0, SystemAllocPolicy> ExportVector;
+
+// A FuncDesc describes a single function.
+
+class TypeIdDesc;
+
+struct FuncDesc {
+ FuncType* type;
+ TypeIdDesc* typeId;
+ uint32_t typeIndex;
+
+ FuncDesc() = default;
+ FuncDesc(FuncType* type, TypeIdDesc* typeId, uint32_t typeIndex)
+ : type(type), typeId(typeId), typeIndex(typeIndex) {}
+};
+
+typedef Vector<FuncDesc, 0, SystemAllocPolicy> FuncDescVector;
+
+// A GlobalDesc describes a single global variable.
+//
+// wasm can import and export mutable and immutable globals.
+//
+// asm.js can import mutable and immutable globals, but a mutable global has a
+// location that is private to the module, and its initial value is copied into
+// that cell from the environment. asm.js cannot export globals.
+
+enum class GlobalKind { Import, Constant, Variable };
+
+class GlobalDesc {
+ union V {
+ struct {
+ union U {
+ InitExpr initial_;
+ struct {
+ ValType type_;
+ uint32_t index_;
+ } import;
+ U() : import{} {}
+ } val;
+ unsigned offset_;
+ bool isMutable_;
+ bool isWasm_;
+ bool isExport_;
+ } var;
+ LitVal cst_;
+ V() {}
+ } u;
+ GlobalKind kind_;
+
+ // Private, as they have unusual semantics.
+
+ bool isExport() const { return !isConstant() && u.var.isExport_; }
+ bool isWasm() const { return !isConstant() && u.var.isWasm_; }
+
+ public:
+ GlobalDesc() = default;
+
+ explicit GlobalDesc(InitExpr initial, bool isMutable,
+ ModuleKind kind = ModuleKind::Wasm)
+ : kind_((isMutable || !initial.isVal()) ? GlobalKind::Variable
+ : GlobalKind::Constant) {
+ if (isVariable()) {
+ u.var.val.initial_ = initial;
+ u.var.isMutable_ = isMutable;
+ u.var.isWasm_ = kind == Wasm;
+ u.var.isExport_ = false;
+ u.var.offset_ = UINT32_MAX;
+ } else {
+ u.cst_ = initial.val();
+ }
+ }
+
+ explicit GlobalDesc(ValType type, bool isMutable, uint32_t importIndex,
+ ModuleKind kind = ModuleKind::Wasm)
+ : kind_(GlobalKind::Import) {
+ u.var.val.import.type_ = type;
+ u.var.val.import.index_ = importIndex;
+ u.var.isMutable_ = isMutable;
+ u.var.isWasm_ = kind == Wasm;
+ u.var.isExport_ = false;
+ u.var.offset_ = UINT32_MAX;
+ }
+
+ void setOffset(unsigned offset) {
+ MOZ_ASSERT(!isConstant());
+ MOZ_ASSERT(u.var.offset_ == UINT32_MAX);
+ u.var.offset_ = offset;
+ }
+ unsigned offset() const {
+ MOZ_ASSERT(!isConstant());
+ MOZ_ASSERT(u.var.offset_ != UINT32_MAX);
+ return u.var.offset_;
+ }
+
+ void setIsExport() {
+ if (!isConstant()) {
+ u.var.isExport_ = true;
+ }
+ }
+
+ GlobalKind kind() const { return kind_; }
+ bool isVariable() const { return kind_ == GlobalKind::Variable; }
+ bool isConstant() const { return kind_ == GlobalKind::Constant; }
+ bool isImport() const { return kind_ == GlobalKind::Import; }
+
+ bool isMutable() const { return !isConstant() && u.var.isMutable_; }
+ LitVal constantValue() const {
+ MOZ_ASSERT(isConstant());
+ return u.cst_;
+ }
+ const InitExpr& initExpr() const {
+ MOZ_ASSERT(isVariable());
+ return u.var.val.initial_;
+ }
+ uint32_t importIndex() const {
+ MOZ_ASSERT(isImport());
+ return u.var.val.import.index_;
+ }
+
+ // If isIndirect() is true then storage for the value is not in the
+ // instance's global area, but in a WasmGlobalObject::Cell hanging off a
+ // WasmGlobalObject; the global area contains a pointer to the Cell.
+ //
+ // We don't want to indirect unless we must, so only mutable, exposed
+ // globals are indirected - in all other cases we copy values into and out
+ // of their module.
+ //
+ // Note that isIndirect() isn't equivalent to getting a WasmGlobalObject:
+ // an immutable exported global will still get an object, but will not be
+ // indirect.
+ bool isIndirect() const {
+ return isMutable() && isWasm() && (isImport() || isExport());
+ }
+
+ ValType type() const {
+ switch (kind_) {
+ case GlobalKind::Import:
+ return u.var.val.import.type_;
+ case GlobalKind::Variable:
+ return u.var.val.initial_.type();
+ case GlobalKind::Constant:
+ return u.cst_.type();
+ }
+ MOZ_CRASH("unexpected global kind");
+ }
+};
+
+typedef Vector<GlobalDesc, 0, SystemAllocPolicy> GlobalDescVector;
+
+// An EventDesc describes a single event for non-local control flow, such as
+// for exceptions.
+
+#ifdef ENABLE_WASM_EXCEPTIONS
+struct EventDesc {
+ EventKind kind;
+ ValTypeVector type;
+ bool isExport;
+
+ EventDesc(EventKind kind, ValTypeVector&& type, bool isExport = false)
+ : kind(kind), type(std::move(type)), isExport(isExport) {}
+
+ ResultType resultType() const { return ResultType::Vector(type); }
+};
+
+typedef Vector<EventDesc, 0, SystemAllocPolicy> EventDescVector;
+#endif
+
+// When a ElemSegment is "passive" it is shared between a wasm::Module and its
+// wasm::Instances. To allow each segment to be released as soon as the last
+// Instance elem.drops it and the Module is destroyed, each ElemSegment is
+// individually atomically ref-counted.
+
+struct ElemSegment : AtomicRefCounted<ElemSegment> {
+ enum class Kind {
+ Active,
+ Passive,
+ Declared,
+ };
+
+ Kind kind;
+ uint32_t tableIndex;
+ RefType elemType;
+ Maybe<InitExpr> offsetIfActive;
+ Uint32Vector elemFuncIndices; // Element may be NullFuncIndex
+
+ bool active() const { return kind == Kind::Active; }
+
+ InitExpr offset() const { return *offsetIfActive; }
+
+ size_t length() const { return elemFuncIndices.length(); }
+
+ WASM_DECLARE_SERIALIZABLE(ElemSegment)
+};
+
+// NullFuncIndex represents the case when an element segment (of type funcref)
+// contains a null element.
+constexpr uint32_t NullFuncIndex = UINT32_MAX;
+static_assert(NullFuncIndex > MaxFuncs, "Invariant");
+
+using MutableElemSegment = RefPtr<ElemSegment>;
+using SharedElemSegment = SerializableRefPtr<const ElemSegment>;
+typedef Vector<SharedElemSegment, 0, SystemAllocPolicy> ElemSegmentVector;
+
+// DataSegmentEnv holds the initial results of decoding a data segment from the
+// bytecode and is stored in the ModuleEnvironment during compilation. When
+// compilation completes, (non-Env) DataSegments are created and stored in
+// the wasm::Module which contain copies of the data segment payload. This
+// allows non-compilation uses of wasm validation to avoid expensive copies.
+//
+// When a DataSegment is "passive" it is shared between a wasm::Module and its
+// wasm::Instances. To allow each segment to be released as soon as the last
+// Instance mem.drops it and the Module is destroyed, each DataSegment is
+// individually atomically ref-counted.
+
+struct DataSegmentEnv {
+ Maybe<InitExpr> offsetIfActive;
+ uint32_t bytecodeOffset;
+ uint32_t length;
+};
+
+typedef Vector<DataSegmentEnv, 0, SystemAllocPolicy> DataSegmentEnvVector;
+
+struct DataSegment : AtomicRefCounted<DataSegment> {
+ Maybe<InitExpr> offsetIfActive;
+ Bytes bytes;
+
+ DataSegment() = default;
+ explicit DataSegment(const DataSegmentEnv& src)
+ : offsetIfActive(src.offsetIfActive) {}
+
+ bool active() const { return !!offsetIfActive; }
+
+ InitExpr offset() const { return *offsetIfActive; }
+
+ WASM_DECLARE_SERIALIZABLE(DataSegment)
+};
+
+using MutableDataSegment = RefPtr<DataSegment>;
+using SharedDataSegment = SerializableRefPtr<const DataSegment>;
+typedef Vector<SharedDataSegment, 0, SystemAllocPolicy> DataSegmentVector;
+
+// The CustomSection(Env) structs are like DataSegment(Env): CustomSectionEnv is
+// stored in the ModuleEnvironment and CustomSection holds a copy of the payload
+// and is stored in the wasm::Module.
+
+struct CustomSectionEnv {
+ uint32_t nameOffset;
+ uint32_t nameLength;
+ uint32_t payloadOffset;
+ uint32_t payloadLength;
+};
+
+typedef Vector<CustomSectionEnv, 0, SystemAllocPolicy> CustomSectionEnvVector;
+
+struct CustomSection {
+ Bytes name;
+ SharedBytes payload;
+
+ WASM_DECLARE_SERIALIZABLE(CustomSection)
+};
+
+typedef Vector<CustomSection, 0, SystemAllocPolicy> CustomSectionVector;
+
+// A Name represents a string of utf8 chars embedded within the name custom
+// section. The offset of a name is expressed relative to the beginning of the
+// name section's payload so that Names can stored in wasm::Code, which only
+// holds the name section's bytes, not the whole bytecode.
+
+struct Name {
+ // All fields are treated as cacheable POD:
+ uint32_t offsetInNamePayload;
+ uint32_t length;
+
+ Name() : offsetInNamePayload(UINT32_MAX), length(0) {}
+};
+
+typedef Vector<Name, 0, SystemAllocPolicy> NameVector;
+
+// A tagged container for the various types that can be present in a wasm
+// module's type section.
+
+class TypeDef {
+ enum { IsFuncType, IsStructType, IsNone } tag_;
+ union {
+ FuncType funcType_;
+ StructType structType_;
+ };
+
+ public:
+ TypeDef() : tag_(IsNone) {}
+
+ explicit TypeDef(FuncType&& funcType)
+ : tag_(IsFuncType), funcType_(std::move(funcType)) {}
+
+ explicit TypeDef(StructType&& structType)
+ : tag_(IsStructType), structType_(std::move(structType)) {}
+
+ TypeDef(TypeDef&& td) : tag_(td.tag_) {
+ switch (tag_) {
+ case IsFuncType:
+ new (&funcType_) FuncType(std::move(td.funcType_));
+ break;
+ case IsStructType:
+ new (&structType_) StructType(std::move(td.structType_));
+ break;
+ case IsNone:
+ break;
+ }
+ }
+
+ ~TypeDef() {
+ switch (tag_) {
+ case IsFuncType:
+ funcType_.~FuncType();
+ break;
+ case IsStructType:
+ structType_.~StructType();
+ break;
+ case IsNone:
+ break;
+ }
+ }
+
+ TypeDef& operator=(TypeDef&& that) {
+ MOZ_ASSERT(isNone());
+ switch (that.tag_) {
+ case IsFuncType:
+ new (&funcType_) FuncType(std::move(that.funcType_));
+ break;
+ case IsStructType:
+ new (&structType_) StructType(std::move(that.structType_));
+ break;
+ case IsNone:
+ break;
+ }
+ tag_ = that.tag_;
+ return *this;
+ }
+
+ [[nodiscard]] bool clone(const TypeDef& src) {
+ MOZ_ASSERT(isNone());
+ tag_ = src.tag_;
+ switch (src.tag_) {
+ case IsFuncType:
+ new (&funcType_) FuncType();
+ return funcType_.clone(src.funcType());
+ case IsStructType:
+ new (&structType_) StructType();
+ return structType_.clone(src.structType());
+ case IsNone:
+ break;
+ }
+ MOZ_ASSERT_UNREACHABLE();
+ return false;
+ }
+
+ bool isFuncType() const { return tag_ == IsFuncType; }
+
+ bool isNone() const { return tag_ == IsNone; }
+
+ bool isStructType() const { return tag_ == IsStructType; }
+
+ const FuncType& funcType() const {
+ MOZ_ASSERT(isFuncType());
+ return funcType_;
+ }
+
+ FuncType& funcType() {
+ MOZ_ASSERT(isFuncType());
+ return funcType_;
+ }
+
+ const StructType& structType() const {
+ MOZ_ASSERT(isStructType());
+ return structType_;
+ }
+
+ StructType& structType() {
+ MOZ_ASSERT(isStructType());
+ return structType_;
+ }
+
+ void renumber(const RenumberMap& map) {
+ switch (tag_) {
+ case IsFuncType:
+ funcType_.renumber(map);
+ break;
+ case IsStructType:
+ structType_.renumber(map);
+ break;
+ case IsNone:
+ break;
+ }
+ }
+ void offsetTypeIndex(uint32_t offsetBy) {
+ switch (tag_) {
+ case IsFuncType:
+ funcType_.offsetTypeIndex(offsetBy);
+ break;
+ case IsStructType:
+ structType_.offsetTypeIndex(offsetBy);
+ break;
+ case IsNone:
+ break;
+ }
+ }
+
+ WASM_DECLARE_SERIALIZABLE(TypeDef)
+};
+
+typedef Vector<TypeDef, 0, SystemAllocPolicy> TypeDefVector;
+
+// TypeIdDesc describes the runtime representation of a TypeDef suitable for
+// type equality checks. The kind of representation depends on whether the type
+// is a function or a struct. This will likely be simplified in the future once
+// mutually recursives types are able to be collected.
+//
+// For functions, a FuncType is allocated and stored in a process-wide hash
+// table, so that pointer equality implies structural equality. As an
+// optimization for the 99% case where the FuncType has a small number of
+// parameters, the FuncType is bit-packed into a uint32 immediate value so that
+// integer equality implies structural equality. Both cases can be handled with
+// a single comparison by always setting the LSB for the immediates
+// (the LSB is necessarily 0 for allocated FuncType pointers due to alignment).
+//
+// TODO: Write description for StructTypes once it is well formed.
+
+class TypeIdDesc {
+ public:
+ static const uintptr_t ImmediateBit = 0x1;
+
+ private:
+ TypeIdDescKind kind_;
+ size_t bits_;
+
+ TypeIdDesc(TypeIdDescKind kind, size_t bits) : kind_(kind), bits_(bits) {}
+
+ public:
+ TypeIdDescKind kind() const { return kind_; }
+ static bool isGlobal(const TypeDef& type);
+
+ TypeIdDesc() : kind_(TypeIdDescKind::None), bits_(0) {}
+ static TypeIdDesc global(const TypeDef& type, uint32_t globalDataOffset);
+ static TypeIdDesc immediate(const TypeDef& type);
+
+ bool isGlobal() const { return kind_ == TypeIdDescKind::Global; }
+
+ size_t immediate() const {
+ MOZ_ASSERT(kind_ == TypeIdDescKind::Immediate);
+ return bits_;
+ }
+ uint32_t globalDataOffset() const {
+ MOZ_ASSERT(kind_ == TypeIdDescKind::Global);
+ return bits_;
+ }
+};
+
+typedef Vector<TypeIdDesc, 0, SystemAllocPolicy> TypeIdDescVector;
+
+// TypeDefWithId pairs a FuncType with TypeIdDesc, describing either how to
+// compile code that compares this signature's id or, at instantiation what
+// signature ids to allocate in the global hash and where to put them.
+
+struct TypeDefWithId : public TypeDef {
+ TypeIdDesc id;
+
+ TypeDefWithId() = default;
+ explicit TypeDefWithId(TypeDef&& typeDef)
+ : TypeDef(std::move(typeDef)), id() {}
+ TypeDefWithId(TypeDef&& typeDef, TypeIdDesc id)
+ : TypeDef(std::move(typeDef)), id(id) {}
+
+ WASM_DECLARE_SERIALIZABLE(TypeDefWithId)
+};
+
+typedef Vector<TypeDefWithId, 0, SystemAllocPolicy> TypeDefWithIdVector;
+typedef Vector<const TypeDefWithId*, 0, SystemAllocPolicy>
+ TypeDefWithIdPtrVector;
+
+// A type context maintains an index space for TypeDef's that can be used to
+// give ValType's meaning. It is used during compilation for modules, and
+// during runtime for all instances.
+
+class TypeContext {
+ FeatureArgs features_;
+ TypeDefVector types_;
+
+ public:
+ TypeContext(const FeatureArgs& features, TypeDefVector&& types)
+ : features_(features), types_(std::move(types)) {}
+
+ // Disallow copy, allow move initialization
+ TypeContext(const TypeContext&) = delete;
+ TypeContext& operator=(const TypeContext&) = delete;
+ TypeContext(TypeContext&&) = default;
+ TypeContext& operator=(TypeContext&&) = default;
+
+ TypeDef& type(uint32_t index) { return types_[index]; }
+ const TypeDef& type(uint32_t index) const { return types_[index]; }
+
+ TypeDef& operator[](uint32_t index) { return types_[index]; }
+ const TypeDef& operator[](uint32_t index) const { return types_[index]; }
+
+ uint32_t length() const { return types_.length(); }
+
+ template <typename U>
+ [[nodiscard]] bool append(U&& typeDef) {
+ return types_.append(std::move(typeDef));
+ }
+ [[nodiscard]] bool resize(uint32_t length) { return types_.resize(length); }
+
+ [[nodiscard]] bool transferTypes(const TypeDefWithIdVector& types,
+ uint32_t* baseIndex) {
+ *baseIndex = length();
+ if (!resize(*baseIndex + types.length())) {
+ return false;
+ }
+ for (uint32_t i = 0; i < types.length(); i++) {
+ if (!types_[*baseIndex + i].clone(types[i])) {
+ return false;
+ }
+ types_[*baseIndex + i].offsetTypeIndex(*baseIndex);
+ }
+ return true;
+ }
+
+ // FuncType accessors
+
+ bool isFuncType(uint32_t index) const { return types_[index].isFuncType(); }
+ bool isFuncType(RefType t) const {
+ return t.isTypeIndex() && isFuncType(t.typeIndex());
+ }
+
+ FuncType& funcType(uint32_t index) { return types_[index].funcType(); }
+ const FuncType& funcType(uint32_t index) const {
+ return types_[index].funcType();
+ }
+ FuncType& funcType(RefType t) { return funcType(t.typeIndex()); }
+ const FuncType& funcType(RefType t) const { return funcType(t.typeIndex()); }
+
+ // StructType accessors
+
+ bool isStructType(uint32_t index) const {
+ return types_[index].isStructType();
+ }
+ bool isStructType(RefType t) const {
+ return t.isTypeIndex() && isStructType(t.typeIndex());
+ }
+
+ StructType& structType(uint32_t index) { return types_[index].structType(); }
+ const StructType& structType(uint32_t index) const {
+ return types_[index].structType();
+ }
+ StructType& structType(RefType t) { return structType(t.typeIndex()); }
+ const StructType& structType(RefType t) const {
+ return structType(t.typeIndex());
+ }
+
+ bool isSubtypeOf(ValType one, ValType two) const {
+ // Anything's a subtype of itself.
+ if (one == two) {
+ return true;
+ }
+
+ // A reference may be a subtype of another reference
+ return one.isReference() && two.isReference() &&
+ isRefSubtypeOf(one.refType(), two.refType());
+ }
+
+ bool isRefSubtypeOf(RefType one, RefType two) const {
+ // Anything's a subtype of itself.
+ if (one == two) {
+ return true;
+ }
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ if (features_.functionReferences) {
+ // A subtype must have the same nullability as the supertype or the
+ // supertype must be nullable.
+ if (!(one.isNullable() == two.isNullable() || two.isNullable())) {
+ return false;
+ }
+
+ // Non type-index reftypes are subtypes if they are equal
+ if (!one.isTypeIndex() && !two.isTypeIndex() &&
+ one.kind() == two.kind()) {
+ return true;
+ }
+
+# ifdef ENABLE_WASM_GC
+ // gc can only be enabled if function-references is enabled
+ if (features_.gcTypes) {
+ // Structs are subtypes of EqRef.
+ if (isStructType(one) && two.isEq()) {
+ return true;
+ }
+ // Struct One is a subtype of struct Two if Two is a prefix of One.
+ if (isStructType(one) && isStructType(two)) {
+ return structType(one).hasPrefix(structType(two));
+ }
+ }
+# endif
+ return false;
+ }
+#endif
+ return false;
+ }
+
+ size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const {
+ return types_.sizeOfExcludingThis(mallocSizeOf);
+ }
+};
+
+class TypeHandle {
+ private:
+ uint32_t index_;
+
+ public:
+ explicit TypeHandle(uint32_t index) : index_(index) {}
+
+ TypeHandle(const TypeHandle&) = default;
+ TypeHandle& operator=(const TypeHandle&) = default;
+
+ TypeDef& get(TypeContext* tycx) const { return tycx->type(index_); }
+ const TypeDef& get(const TypeContext* tycx) const {
+ return tycx->type(index_);
+ }
+
+ uint32_t index() const { return index_; }
+};
+
+// A wrapper around the bytecode offset of a wasm instruction within a whole
+// module, used for trap offsets or call offsets. These offsets should refer to
+// the first byte of the instruction that triggered the trap / did the call and
+// should ultimately derive from OpIter::bytecodeOffset.
+
+class BytecodeOffset {
+ static const uint32_t INVALID = -1;
+ uint32_t offset_;
+
+ public:
+ BytecodeOffset() : offset_(INVALID) {}
+ explicit BytecodeOffset(uint32_t offset) : offset_(offset) {}
+
+ bool isValid() const { return offset_ != INVALID; }
+ uint32_t offset() const {
+ MOZ_ASSERT(isValid());
+ return offset_;
+ }
+};
+
+// A TrapSite (in the TrapSiteVector for a given Trap code) represents a wasm
+// instruction at a given bytecode offset that can fault at the given pc offset.
+// When such a fault occurs, a signal/exception handler looks up the TrapSite to
+// confirm the fault is intended/safe and redirects pc to the trap stub.
+
+struct TrapSite {
+ uint32_t pcOffset;
+ BytecodeOffset bytecode;
+
+ TrapSite() : pcOffset(-1), bytecode() {}
+ TrapSite(uint32_t pcOffset, BytecodeOffset bytecode)
+ : pcOffset(pcOffset), bytecode(bytecode) {}
+
+ void offsetBy(uint32_t offset) { pcOffset += offset; }
+};
+
+WASM_DECLARE_POD_VECTOR(TrapSite, TrapSiteVector)
+
+struct TrapSiteVectorArray
+ : EnumeratedArray<Trap, Trap::Limit, TrapSiteVector> {
+ bool empty() const;
+ void clear();
+ void swap(TrapSiteVectorArray& rhs);
+ void shrinkStorageToFit();
+
+ WASM_DECLARE_SERIALIZABLE(TrapSiteVectorArray)
+};
+
+// On trap, the bytecode offset to be reported in callstacks is saved.
+
+struct TrapData {
+ // The resumePC indicates where, if the trap doesn't throw, the trap stub
+ // should jump to after restoring all register state.
+ void* resumePC;
+
+ // The unwoundPC is the PC after adjustment by wasm::StartUnwinding(), which
+ // basically unwinds partially-construted wasm::Frames when pc is in the
+ // prologue/epilogue. Stack traces during a trap should use this PC since
+ // it corresponds to the JitActivation::wasmExitFP.
+ void* unwoundPC;
+
+ Trap trap;
+ uint32_t bytecodeOffset;
+};
+
+// The (,Callable,Func)Offsets classes are used to record the offsets of
+// different key points in a CodeRange during compilation.
+
+struct Offsets {
+ explicit Offsets(uint32_t begin = 0, uint32_t end = 0)
+ : begin(begin), end(end) {}
+
+ // These define a [begin, end) contiguous range of instructions compiled
+ // into a CodeRange.
+ uint32_t begin;
+ uint32_t end;
+};
+
+struct CallableOffsets : Offsets {
+ MOZ_IMPLICIT CallableOffsets(uint32_t ret = 0) : Offsets(), ret(ret) {}
+
+ // The offset of the return instruction precedes 'end' by a variable number
+ // of instructions due to out-of-line codegen.
+ uint32_t ret;
+};
+
+struct JitExitOffsets : CallableOffsets {
+ MOZ_IMPLICIT JitExitOffsets()
+ : CallableOffsets(), untrustedFPStart(0), untrustedFPEnd(0) {}
+
+ // There are a few instructions in the Jit exit where FP may be trash
+ // (because it may have been clobbered by the JS Jit), known as the
+ // untrusted FP zone.
+ uint32_t untrustedFPStart;
+ uint32_t untrustedFPEnd;
+};
+
+struct FuncOffsets : CallableOffsets {
+ MOZ_IMPLICIT FuncOffsets()
+ : CallableOffsets(), uncheckedCallEntry(0), tierEntry(0) {}
+
+ // Function CodeRanges have a checked call entry which takes an extra
+ // signature argument which is checked against the callee's signature before
+ // falling through to the normal prologue. The checked call entry is thus at
+ // the beginning of the CodeRange and the unchecked call entry is at some
+ // offset after the checked call entry.
+ uint32_t uncheckedCallEntry;
+
+ // The tierEntry is the point within a function to which the patching code
+ // within a Tier-1 function jumps. It could be the instruction following
+ // the jump in the Tier-1 function, or the point following the standard
+ // prologue within a Tier-2 function.
+ uint32_t tierEntry;
+};
+
+typedef Vector<FuncOffsets, 0, SystemAllocPolicy> FuncOffsetsVector;
+
+// A CodeRange describes a single contiguous range of code within a wasm
+// module's code segment. A CodeRange describes what the code does and, for
+// function bodies, the name and source coordinates of the function.
+
+class CodeRange {
+ public:
+ enum Kind {
+ Function, // function definition
+ InterpEntry, // calls into wasm from C++
+ JitEntry, // calls into wasm from jit code
+ ImportInterpExit, // slow-path calling from wasm into C++ interp
+ ImportJitExit, // fast-path calling from wasm into jit code
+ BuiltinThunk, // fast-path calling from wasm into a C++ native
+ TrapExit, // calls C++ to report and jumps to throw stub
+ DebugTrap, // calls C++ to handle debug event
+ FarJumpIsland, // inserted to connect otherwise out-of-range insns
+ Throw // special stack-unwinding stub jumped to by other stubs
+ };
+
+ private:
+ // All fields are treated as cacheable POD:
+ uint32_t begin_;
+ uint32_t ret_;
+ uint32_t end_;
+ union {
+ struct {
+ uint32_t funcIndex_;
+ union {
+ struct {
+ uint32_t lineOrBytecode_;
+ uint8_t beginToUncheckedCallEntry_;
+ uint8_t beginToTierEntry_;
+ } func;
+ struct {
+ uint16_t beginToUntrustedFPStart_;
+ uint16_t beginToUntrustedFPEnd_;
+ } jitExit;
+ };
+ };
+ Trap trap_;
+ } u;
+ Kind kind_ : 8;
+
+ public:
+ CodeRange() = default;
+ CodeRange(Kind kind, Offsets offsets);
+ CodeRange(Kind kind, uint32_t funcIndex, Offsets offsets);
+ CodeRange(Kind kind, CallableOffsets offsets);
+ CodeRange(Kind kind, uint32_t funcIndex, CallableOffsets);
+ CodeRange(uint32_t funcIndex, JitExitOffsets offsets);
+ CodeRange(uint32_t funcIndex, uint32_t lineOrBytecode, FuncOffsets offsets);
+
+ void offsetBy(uint32_t offset) {
+ begin_ += offset;
+ end_ += offset;
+ if (hasReturn()) {
+ ret_ += offset;
+ }
+ }
+
+ // All CodeRanges have a begin and end.
+
+ uint32_t begin() const { return begin_; }
+ uint32_t end() const { return end_; }
+
+ // Other fields are only available for certain CodeRange::Kinds.
+
+ Kind kind() const { return kind_; }
+
+ bool isFunction() const { return kind() == Function; }
+ bool isImportExit() const {
+ return kind() == ImportJitExit || kind() == ImportInterpExit ||
+ kind() == BuiltinThunk;
+ }
+ bool isImportInterpExit() const { return kind() == ImportInterpExit; }
+ bool isImportJitExit() const { return kind() == ImportJitExit; }
+ bool isTrapExit() const { return kind() == TrapExit; }
+ bool isDebugTrap() const { return kind() == DebugTrap; }
+ bool isThunk() const { return kind() == FarJumpIsland; }
+
+ // Function, import exits and trap exits have standard callable prologues
+ // and epilogues. Asynchronous frame iteration needs to know the offset of
+ // the return instruction to calculate the frame pointer.
+
+ bool hasReturn() const {
+ return isFunction() || isImportExit() || isDebugTrap();
+ }
+ uint32_t ret() const {
+ MOZ_ASSERT(hasReturn());
+ return ret_;
+ }
+
+ // Functions, export stubs and import stubs all have an associated function
+ // index.
+
+ bool isJitEntry() const { return kind() == JitEntry; }
+ bool isInterpEntry() const { return kind() == InterpEntry; }
+ bool isEntry() const { return isInterpEntry() || isJitEntry(); }
+ bool hasFuncIndex() const {
+ return isFunction() || isImportExit() || isEntry();
+ }
+ uint32_t funcIndex() const {
+ MOZ_ASSERT(hasFuncIndex());
+ return u.funcIndex_;
+ }
+
+ // TrapExit CodeRanges have a Trap field.
+
+ Trap trap() const {
+ MOZ_ASSERT(isTrapExit());
+ return u.trap_;
+ }
+
+ // Function CodeRanges have two entry points: one for normal calls (with a
+ // known signature) and one for table calls (which involves dynamic
+ // signature checking).
+
+ uint32_t funcCheckedCallEntry() const {
+ MOZ_ASSERT(isFunction());
+ return begin_;
+ }
+ uint32_t funcUncheckedCallEntry() const {
+ MOZ_ASSERT(isFunction());
+ return begin_ + u.func.beginToUncheckedCallEntry_;
+ }
+ uint32_t funcTierEntry() const {
+ MOZ_ASSERT(isFunction());
+ return begin_ + u.func.beginToTierEntry_;
+ }
+ uint32_t funcLineOrBytecode() const {
+ MOZ_ASSERT(isFunction());
+ return u.func.lineOrBytecode_;
+ }
+
+ // ImportJitExit have a particular range where the value of FP can't be
+ // trusted for profiling and thus must be ignored.
+
+ uint32_t jitExitUntrustedFPStart() const {
+ MOZ_ASSERT(isImportJitExit());
+ return begin_ + u.jitExit.beginToUntrustedFPStart_;
+ }
+ uint32_t jitExitUntrustedFPEnd() const {
+ MOZ_ASSERT(isImportJitExit());
+ return begin_ + u.jitExit.beginToUntrustedFPEnd_;
+ }
+
+ // A sorted array of CodeRanges can be looked up via BinarySearch and
+ // OffsetInCode.
+
+ struct OffsetInCode {
+ size_t offset;
+ explicit OffsetInCode(size_t offset) : offset(offset) {}
+ bool operator==(const CodeRange& rhs) const {
+ return offset >= rhs.begin() && offset < rhs.end();
+ }
+ bool operator<(const CodeRange& rhs) const { return offset < rhs.begin(); }
+ };
+};
+
+WASM_DECLARE_POD_VECTOR(CodeRange, CodeRangeVector)
+
+extern const CodeRange* LookupInSorted(const CodeRangeVector& codeRanges,
+ CodeRange::OffsetInCode target);
+
+// While the frame-pointer chain allows the stack to be unwound without
+// metadata, Error.stack still needs to know the line/column of every call in
+// the chain. A CallSiteDesc describes a single callsite to which CallSite adds
+// the metadata necessary to walk up to the next frame. Lastly CallSiteAndTarget
+// adds the function index of the callee.
+
+class CallSiteDesc {
+ static constexpr size_t LINE_OR_BYTECODE_BITS_SIZE = 29;
+ uint32_t lineOrBytecode_ : LINE_OR_BYTECODE_BITS_SIZE;
+ uint32_t kind_ : 3;
+
+ public:
+ static constexpr uint32_t MAX_LINE_OR_BYTECODE_VALUE =
+ (1 << LINE_OR_BYTECODE_BITS_SIZE) - 1;
+
+ enum Kind {
+ Func, // pc-relative call to a specific function
+ Dynamic, // dynamic callee called via register
+ Symbolic, // call to a single symbolic callee
+ EnterFrame, // call to a enter frame handler
+ LeaveFrame, // call to a leave frame handler
+ Breakpoint // call to instruction breakpoint
+ };
+ CallSiteDesc() : lineOrBytecode_(0), kind_(0) {}
+ explicit CallSiteDesc(Kind kind) : lineOrBytecode_(0), kind_(kind) {
+ MOZ_ASSERT(kind == Kind(kind_));
+ }
+ CallSiteDesc(uint32_t lineOrBytecode, Kind kind)
+ : lineOrBytecode_(lineOrBytecode), kind_(kind) {
+ MOZ_ASSERT(kind == Kind(kind_));
+ MOZ_ASSERT(lineOrBytecode == lineOrBytecode_);
+ }
+ uint32_t lineOrBytecode() const { return lineOrBytecode_; }
+ Kind kind() const { return Kind(kind_); }
+ bool mightBeCrossInstance() const { return kind() == CallSiteDesc::Dynamic; }
+};
+
+class CallSite : public CallSiteDesc {
+ uint32_t returnAddressOffset_;
+
+ public:
+ CallSite() : returnAddressOffset_(0) {}
+
+ CallSite(CallSiteDesc desc, uint32_t returnAddressOffset)
+ : CallSiteDesc(desc), returnAddressOffset_(returnAddressOffset) {}
+
+ void offsetBy(int32_t delta) { returnAddressOffset_ += delta; }
+ uint32_t returnAddressOffset() const { return returnAddressOffset_; }
+};
+
+WASM_DECLARE_POD_VECTOR(CallSite, CallSiteVector)
+
+// A CallSiteTarget describes the callee of a CallSite, either a function or a
+// trap exit. Although checked in debug builds, a CallSiteTarget doesn't
+// officially know whether it targets a function or trap, relying on the Kind of
+// the CallSite to discriminate.
+
+class CallSiteTarget {
+ uint32_t packed_;
+#ifdef DEBUG
+ enum Kind { None, FuncIndex, TrapExit } kind_;
+#endif
+
+ public:
+ explicit CallSiteTarget()
+ : packed_(UINT32_MAX)
+#ifdef DEBUG
+ ,
+ kind_(None)
+#endif
+ {
+ }
+
+ explicit CallSiteTarget(uint32_t funcIndex)
+ : packed_(funcIndex)
+#ifdef DEBUG
+ ,
+ kind_(FuncIndex)
+#endif
+ {
+ }
+
+ explicit CallSiteTarget(Trap trap)
+ : packed_(uint32_t(trap))
+#ifdef DEBUG
+ ,
+ kind_(TrapExit)
+#endif
+ {
+ }
+
+ uint32_t funcIndex() const {
+ MOZ_ASSERT(kind_ == FuncIndex);
+ return packed_;
+ }
+
+ Trap trap() const {
+ MOZ_ASSERT(kind_ == TrapExit);
+ MOZ_ASSERT(packed_ < uint32_t(Trap::Limit));
+ return Trap(packed_);
+ }
+};
+
+typedef Vector<CallSiteTarget, 0, SystemAllocPolicy> CallSiteTargetVector;
+
+// A wasm::SymbolicAddress represents a pointer to a well-known function that is
+// embedded in wasm code. Since wasm code is serialized and later deserialized
+// into a different address space, symbolic addresses must be used for *all*
+// pointers into the address space. The MacroAssembler records a list of all
+// SymbolicAddresses and the offsets of their use in the code for later patching
+// during static linking.
+
+enum class SymbolicAddress {
+ ToInt32,
+#if defined(JS_CODEGEN_ARM)
+ aeabi_idivmod,
+ aeabi_uidivmod,
+#endif
+ ModD,
+ SinD,
+ CosD,
+ TanD,
+ ASinD,
+ ACosD,
+ ATanD,
+ CeilD,
+ CeilF,
+ FloorD,
+ FloorF,
+ TruncD,
+ TruncF,
+ NearbyIntD,
+ NearbyIntF,
+ ExpD,
+ LogD,
+ PowD,
+ ATan2D,
+ HandleDebugTrap,
+ HandleThrow,
+ HandleTrap,
+ ReportV128JSCall,
+ CallImport_General,
+ CoerceInPlace_ToInt32,
+ CoerceInPlace_ToNumber,
+ CoerceInPlace_JitEntry,
+ CoerceInPlace_ToBigInt,
+ AllocateBigInt,
+ BoxValue_Anyref,
+ DivI64,
+ UDivI64,
+ ModI64,
+ UModI64,
+ TruncateDoubleToInt64,
+ TruncateDoubleToUint64,
+ SaturatingTruncateDoubleToInt64,
+ SaturatingTruncateDoubleToUint64,
+ Uint64ToFloat32,
+ Uint64ToDouble,
+ Int64ToFloat32,
+ Int64ToDouble,
+ MemoryGrow,
+ MemorySize,
+ WaitI32,
+ WaitI64,
+ Wake,
+ MemCopy,
+ MemCopyShared,
+ DataDrop,
+ MemFill,
+ MemFillShared,
+ MemInit,
+ TableCopy,
+ ElemDrop,
+ TableFill,
+ TableGet,
+ TableGrow,
+ TableInit,
+ TableSet,
+ TableSize,
+ RefFunc,
+ PreBarrierFiltering,
+ PostBarrier,
+ PostBarrierFiltering,
+ StructNew,
+ StructNarrow,
+#if defined(JS_CODEGEN_MIPS32)
+ js_jit_gAtomic64Lock,
+#endif
+#ifdef WASM_CODEGEN_DEBUG
+ PrintI32,
+ PrintPtr,
+ PrintF32,
+ PrintF64,
+ PrintText,
+#endif
+ Limit
+};
+
+// The FailureMode indicates whether, immediately after a call to a builtin
+// returns, the return value should be checked against an error condition
+// (and if so, which one) which signals that the C++ calle has already
+// reported an error and thus wasm needs to wasmTrap(Trap::ThrowReported).
+
+enum class FailureMode : uint8_t {
+ Infallible,
+ FailOnNegI32,
+ FailOnNullPtr,
+ FailOnInvalidRef
+};
+
+// SymbolicAddressSignature carries type information for a function referred
+// to by a SymbolicAddress. In order that |argTypes| can be written out as a
+// static initialiser, it has to have fixed length. At present
+// SymbolicAddressType is used to describe functions with at most 6 arguments,
+// so |argTypes| has 7 entries in order to allow the last value to be
+// MIRType::None, in the hope of catching any accidental overruns of the
+// defined section of the array.
+
+static constexpr size_t SymbolicAddressSignatureMaxArgs = 6;
+
+struct SymbolicAddressSignature {
+ // The SymbolicAddress that is described.
+ const SymbolicAddress identity;
+ // The return type, or MIRType::None to denote 'void'.
+ const jit::MIRType retType;
+ // The failure mode, which is checked by masm.wasmCallBuiltinInstanceMethod.
+ const FailureMode failureMode;
+ // The number of arguments, 0 .. SymbolicAddressSignatureMaxArgs only.
+ const uint8_t numArgs;
+ // The argument types; SymbolicAddressSignatureMaxArgs + 1 guard, which
+ // should be MIRType::None.
+ const jit::MIRType argTypes[SymbolicAddressSignatureMaxArgs + 1];
+};
+
+// The 16 in this assertion is derived as follows: SymbolicAddress is probably
+// size-4 aligned-4, but it's at the start of the struct, so there's no
+// alignment hole before it. All other components (MIRType and uint8_t) are
+// size-1 aligned-1, and there are 8 in total, so it is reasonable to assume
+// that they also don't create any alignment holes. Hence it is also
+// reasonable to assume that the actual size is 1 * 4 + 8 * 1 == 12. The
+// worst-plausible-case rounding will take that up to 16. Hence, the
+// assertion uses 16.
+
+static_assert(sizeof(SymbolicAddressSignature) <= 16,
+ "SymbolicAddressSignature unexpectedly large");
+
+bool IsRoundingFunction(SymbolicAddress callee, jit::RoundingMode* mode);
+
+// Represents the resizable limits of memories and tables.
+
+struct Limits {
+ uint64_t initial;
+ Maybe<uint64_t> maximum;
+
+ // `shared` is Shareable::False for tables but may be Shareable::True for
+ // memories.
+ Shareable shared;
+
+ Limits() = default;
+ explicit Limits(uint64_t initial, const Maybe<uint64_t>& maximum = Nothing(),
+ Shareable shared = Shareable::False)
+ : initial(initial), maximum(maximum), shared(shared) {}
+};
+
+// TableDesc describes a table as well as the offset of the table's base pointer
+// in global memory.
+//
+// A TableDesc contains the element type and whether the table is for asm.js,
+// which determines the table representation.
+// - ExternRef: a wasm anyref word (wasm::AnyRef)
+// - FuncRef: a two-word FunctionTableElem (wasm indirect call ABI)
+// - FuncRef (if `isAsmJS`): a two-word FunctionTableElem (asm.js ABI)
+// Eventually there should be a single unified AnyRef representation.
+
+struct TableDesc {
+ RefType elemType;
+ bool importedOrExported;
+ bool isAsmJS;
+ uint32_t globalDataOffset;
+ uint32_t initialLength;
+ Maybe<uint32_t> maximumLength;
+
+ TableDesc() = default;
+ TableDesc(RefType elemType, uint32_t initialLength,
+ Maybe<uint32_t> maximumLength, bool isAsmJS,
+ bool importedOrExported = false)
+ : elemType(elemType),
+ importedOrExported(importedOrExported),
+ isAsmJS(isAsmJS),
+ globalDataOffset(UINT32_MAX),
+ initialLength(initialLength),
+ maximumLength(maximumLength) {}
+};
+
+typedef Vector<TableDesc, 0, SystemAllocPolicy> TableDescVector;
+
+// TLS data for a single module instance.
+//
+// Every WebAssembly function expects to be passed a hidden TLS pointer argument
+// in WasmTlsReg. The TLS pointer argument points to a TlsData struct.
+// Compiled functions expect that the TLS pointer does not change for the
+// lifetime of the thread.
+//
+// There is a TlsData per module instance per thread, so inter-module calls need
+// to pass the TLS pointer appropriate for the callee module.
+//
+// After the TlsData struct follows the module's declared TLS variables.
+
+struct TlsData {
+ // Pointer to the base of the default memory (or null if there is none).
+ uint8_t* memoryBase;
+
+ // Bounds check limit of 32-bit memory, in bytes (or zero if there is no
+ // memory).
+ uint32_t boundsCheckLimit32;
+
+ // Pointer to the Instance that contains this TLS data.
+ Instance* instance;
+
+ // Equal to instance->realm_.
+ JS::Realm* realm;
+
+ // The containing JSContext.
+ JSContext* cx;
+
+ // The class_ of WasmValueBox, this is a per-process value.
+ const JSClass* valueBoxClass;
+
+ // Usually equal to cx->stackLimitForJitCode(JS::StackForUntrustedScript),
+ // but can be racily set to trigger immediate trap as an opportunity to
+ // CheckForInterrupt without an additional branch.
+ Atomic<uintptr_t, mozilla::Relaxed> stackLimit;
+
+ // Set to 1 when wasm should call CheckForInterrupt.
+ Atomic<uint32_t, mozilla::Relaxed> interrupt;
+
+ uint8_t* addressOfNeedsIncrementalBarrier;
+
+ // Methods to set, test and clear the above two fields. Both interrupt
+ // fields are Relaxed and so no consistency/ordering can be assumed.
+ void setInterrupt();
+ bool isInterrupted() const;
+ void resetInterrupt(JSContext* cx);
+
+ // Pointer that should be freed (due to padding before the TlsData).
+ void* allocatedBase;
+
+ // When compiling with tiering, the jumpTable has one entry for each
+ // baseline-compiled function.
+ void** jumpTable;
+
+ // The globalArea must be the last field. Globals for the module start here
+ // and are inline in this structure. 16-byte alignment is required for SIMD
+ // data.
+ MOZ_ALIGNED_DECL(16, char globalArea);
+};
+
+static const size_t TlsDataAlign = 16; // = Simd128DataSize
+static_assert(offsetof(TlsData, globalArea) % TlsDataAlign == 0, "aligned");
+
+struct TlsDataDeleter {
+ void operator()(TlsData* tlsData) { js_free(tlsData->allocatedBase); }
+};
+
+typedef UniquePtr<TlsData, TlsDataDeleter> UniqueTlsData;
+
+extern UniqueTlsData CreateTlsData(uint32_t globalDataLength);
+
+// ExportArg holds the unboxed operands to the wasm entry trampoline which can
+// be called through an ExportFuncPtr.
+
+struct ExportArg {
+ uint64_t lo;
+ uint64_t hi;
+};
+
+using ExportFuncPtr = int32_t (*)(ExportArg*, TlsData*);
+
+// FuncImportTls describes the region of wasm global memory allocated in the
+// instance's thread-local storage for a function import. This is accessed
+// directly from JIT code and mutated by Instance as exits become optimized and
+// deoptimized.
+
+struct FuncImportTls {
+ // The code to call at an import site: a wasm callee, a thunk into C++, or a
+ // thunk into JIT code.
+ void* code;
+
+ // The callee's TlsData pointer, which must be loaded to WasmTlsReg (along
+ // with any pinned registers) before calling 'code'.
+ TlsData* tls;
+
+ // The callee function's realm.
+ JS::Realm* realm;
+
+ // A GC pointer which keeps the callee alive and is used to recover import
+ // values for lazy table initialization.
+ GCPtrFunction fun;
+ static_assert(sizeof(GCPtrFunction) == sizeof(void*), "for JIT access");
+};
+
+// TableTls describes the region of wasm global memory allocated in the
+// instance's thread-local storage which is accessed directly from JIT code
+// to bounds-check and index the table.
+
+struct TableTls {
+ // Length of the table in number of elements (not bytes).
+ uint32_t length;
+
+ // Pointer to the array of elements (which can have various representations).
+ // For tables of anyref this is null.
+ void* functionBase;
+};
+
+// Table element for TableRepr::Func which carries both the code pointer and
+// a tls pointer (and thus anything reachable through the tls, including the
+// instance).
+
+struct FunctionTableElem {
+ // The code to call when calling this element. The table ABI is the system
+ // ABI with the additional ABI requirements that:
+ // - WasmTlsReg and any pinned registers have been loaded appropriately
+ // - if this is a heterogeneous table that requires a signature check,
+ // WasmTableCallSigReg holds the signature id.
+ void* code;
+
+ // The pointer to the callee's instance's TlsData. This must be loaded into
+ // WasmTlsReg before calling 'code'.
+ TlsData* tls;
+};
+
+// CalleeDesc describes how to compile one of the variety of asm.js/wasm calls.
+// This is hoisted into WasmTypes.h for sharing between Ion and Baseline.
+
+class CalleeDesc {
+ public:
+ enum Which {
+ // Calls a function defined in the same module by its index.
+ Func,
+
+ // Calls the import identified by the offset of its FuncImportTls in
+ // thread-local data.
+ Import,
+
+ // Calls a WebAssembly table (heterogeneous, index must be bounds
+ // checked, callee instance depends on TableDesc).
+ WasmTable,
+
+ // Calls an asm.js table (homogeneous, masked index, same-instance).
+ AsmJSTable,
+
+ // Call a C++ function identified by SymbolicAddress.
+ Builtin,
+
+ // Like Builtin, but automatically passes Instance* as first argument.
+ BuiltinInstanceMethod
+ };
+
+ private:
+ // which_ shall be initialized in the static constructors
+ MOZ_INIT_OUTSIDE_CTOR Which which_;
+ union U {
+ U() : funcIndex_(0) {}
+ uint32_t funcIndex_;
+ struct {
+ uint32_t globalDataOffset_;
+ } import;
+ struct {
+ uint32_t globalDataOffset_;
+ uint32_t minLength_;
+ TypeIdDesc funcTypeId_;
+ } table;
+ SymbolicAddress builtin_;
+ } u;
+
+ public:
+ CalleeDesc() = default;
+ static CalleeDesc function(uint32_t funcIndex) {
+ CalleeDesc c;
+ c.which_ = Func;
+ c.u.funcIndex_ = funcIndex;
+ return c;
+ }
+ static CalleeDesc import(uint32_t globalDataOffset) {
+ CalleeDesc c;
+ c.which_ = Import;
+ c.u.import.globalDataOffset_ = globalDataOffset;
+ return c;
+ }
+ static CalleeDesc wasmTable(const TableDesc& desc, TypeIdDesc funcTypeId) {
+ CalleeDesc c;
+ c.which_ = WasmTable;
+ c.u.table.globalDataOffset_ = desc.globalDataOffset;
+ c.u.table.minLength_ = desc.initialLength;
+ c.u.table.funcTypeId_ = funcTypeId;
+ return c;
+ }
+ static CalleeDesc asmJSTable(const TableDesc& desc) {
+ CalleeDesc c;
+ c.which_ = AsmJSTable;
+ c.u.table.globalDataOffset_ = desc.globalDataOffset;
+ return c;
+ }
+ static CalleeDesc builtin(SymbolicAddress callee) {
+ CalleeDesc c;
+ c.which_ = Builtin;
+ c.u.builtin_ = callee;
+ return c;
+ }
+ static CalleeDesc builtinInstanceMethod(SymbolicAddress callee) {
+ CalleeDesc c;
+ c.which_ = BuiltinInstanceMethod;
+ c.u.builtin_ = callee;
+ return c;
+ }
+ Which which() const { return which_; }
+ uint32_t funcIndex() const {
+ MOZ_ASSERT(which_ == Func);
+ return u.funcIndex_;
+ }
+ uint32_t importGlobalDataOffset() const {
+ MOZ_ASSERT(which_ == Import);
+ return u.import.globalDataOffset_;
+ }
+ bool isTable() const { return which_ == WasmTable || which_ == AsmJSTable; }
+ uint32_t tableLengthGlobalDataOffset() const {
+ MOZ_ASSERT(isTable());
+ return u.table.globalDataOffset_ + offsetof(TableTls, length);
+ }
+ uint32_t tableFunctionBaseGlobalDataOffset() const {
+ MOZ_ASSERT(isTable());
+ return u.table.globalDataOffset_ + offsetof(TableTls, functionBase);
+ }
+ TypeIdDesc wasmTableSigId() const {
+ MOZ_ASSERT(which_ == WasmTable);
+ return u.table.funcTypeId_;
+ }
+ uint32_t wasmTableMinLength() const {
+ MOZ_ASSERT(which_ == WasmTable);
+ return u.table.minLength_;
+ }
+ SymbolicAddress builtin() const {
+ MOZ_ASSERT(which_ == Builtin || which_ == BuiltinInstanceMethod);
+ return u.builtin_;
+ }
+};
+
+// Memories can be 32-bit (indices are 32 bits and the max is 4GB) or 64-bit
+// (indices are 64 bits and the max is XXX).
+
+enum class MemoryKind { Memory32, Memory64 };
+
+// Because ARM has a fixed-width instruction encoding, ARM can only express a
+// limited subset of immediates (in a single instruction).
+
+static const uint64_t HighestValidARMImmediate = 0xff000000;
+
+extern bool IsValidARMImmediate(uint32_t i);
+
+extern uint64_t RoundUpToNextValidARMImmediate(uint64_t i);
+
+// Bounds checks always compare the base of the memory access with the bounds
+// check limit. If the memory access is unaligned, this means that, even if the
+// bounds check succeeds, a few bytes of the access can extend past the end of
+// memory. To guard against this, extra space is included in the guard region to
+// catch the overflow. MaxMemoryAccessSize is a conservative approximation of
+// the maximum guard space needed to catch all unaligned overflows.
+
+static const unsigned MaxMemoryAccessSize = LitVal::sizeofLargestValue();
+
+#ifdef WASM_SUPPORTS_HUGE_MEMORY
+
+// On WASM_SUPPORTS_HUGE_MEMORY platforms, every asm.js or WebAssembly memory
+// unconditionally allocates a huge region of virtual memory of size
+// wasm::HugeMappedSize. This allows all memory resizing to work without
+// reallocation and provides enough guard space for all offsets to be folded
+// into memory accesses.
+
+static const uint64_t HugeIndexRange = uint64_t(UINT32_MAX) + 1;
+static const uint64_t HugeOffsetGuardLimit = uint64_t(INT32_MAX) + 1;
+static const uint64_t HugeUnalignedGuardPage = PageSize;
+static const uint64_t HugeMappedSize =
+ HugeIndexRange + HugeOffsetGuardLimit + HugeUnalignedGuardPage;
+
+static_assert(MaxMemoryAccessSize <= HugeUnalignedGuardPage,
+ "rounded up to static page size");
+static_assert(HugeOffsetGuardLimit < UINT32_MAX,
+ "checking for overflow against OffsetGuardLimit is enough.");
+
+#endif
+
+// On !WASM_SUPPORTS_HUGE_MEMORY platforms:
+// - To avoid OOM in ArrayBuffer::prepareForAsmJS, asm.js continues to use the
+// original ArrayBuffer allocation which has no guard region at all.
+// - For WebAssembly memories, an additional GuardSize is mapped after the
+// accessible region of the memory to catch folded (base+offset) accesses
+// where `offset < OffsetGuardLimit` as well as the overflow from unaligned
+// accesses, as described above for MaxMemoryAccessSize.
+
+static const size_t OffsetGuardLimit = PageSize - MaxMemoryAccessSize;
+static const size_t GuardSize = PageSize;
+
+static_assert(MaxMemoryAccessSize < GuardSize,
+ "Guard page handles partial out-of-bounds");
+static_assert(OffsetGuardLimit < UINT32_MAX,
+ "checking for overflow against OffsetGuardLimit is enough.");
+
+static constexpr size_t GetMaxOffsetGuardLimit(bool hugeMemory) {
+#ifdef WASM_SUPPORTS_HUGE_MEMORY
+ return hugeMemory ? HugeOffsetGuardLimit : OffsetGuardLimit;
+#else
+ return OffsetGuardLimit;
+#endif
+}
+
+static const size_t MinOffsetGuardLimit = OffsetGuardLimit;
+
+// Return whether the given immediate satisfies the constraints of the platform
+// (viz. that, on ARM, IsValidARMImmediate).
+
+extern bool IsValidBoundsCheckImmediate(uint32_t i);
+
+// For a given WebAssembly/asm.js max size, return the number of bytes to
+// map which will necessarily be a multiple of the system page size and greater
+// than maxSize. For a returned mappedSize:
+// boundsCheckLimit = mappedSize - GuardSize
+// IsValidBoundsCheckImmediate(boundsCheckLimit)
+
+extern size_t ComputeMappedSize(uint64_t maxSize);
+
+// The following thresholds were derived from a microbenchmark. If we begin to
+// ship this optimization for more platforms, we will need to extend this list.
+
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64)
+static const uint32_t MaxInlineMemoryCopyLength = 64;
+static const uint32_t MaxInlineMemoryFillLength = 64;
+#elif defined(JS_CODEGEN_X86)
+static const uint32_t MaxInlineMemoryCopyLength = 32;
+static const uint32_t MaxInlineMemoryFillLength = 32;
+#else
+static const uint32_t MaxInlineMemoryCopyLength = 0;
+static const uint32_t MaxInlineMemoryFillLength = 0;
+#endif
+
+static_assert(MaxInlineMemoryCopyLength < MinOffsetGuardLimit, "precondition");
+static_assert(MaxInlineMemoryFillLength < MinOffsetGuardLimit, "precondition");
+
+// wasm::Frame represents the bytes pushed by the call instruction and the
+// fixed prologue generated by wasm::GenerateCallablePrologue.
+//
+// Across all architectures it is assumed that, before the call instruction, the
+// stack pointer is WasmStackAlignment-aligned. Thus after the prologue, and
+// before the function has made its stack reservation, the stack alignment is
+// sizeof(Frame) % WasmStackAlignment.
+//
+// During MacroAssembler code generation, the bytes pushed after the wasm::Frame
+// are counted by masm.framePushed. Thus, the stack alignment at any point in
+// time is (sizeof(wasm::Frame) + masm.framePushed) % WasmStackAlignment.
+
+class Frame {
+ // See GenerateCallableEpilogue for why this must be
+ // the first field of wasm::Frame (in a downward-growing stack).
+ // It's either the caller's Frame*, for wasm callers, or the JIT caller frame
+ // plus a tag otherwise.
+ uint8_t* callerFP_;
+
+ // The return address pushed by the call (in the case of ARM/MIPS the return
+ // address is pushed by the first instruction of the prologue).
+ void* returnAddress_;
+
+ public:
+ static constexpr uint32_t callerFPOffset() {
+ return offsetof(Frame, callerFP_);
+ }
+ static constexpr uint32_t returnAddressOffset() {
+ return offsetof(Frame, returnAddress_);
+ }
+
+ uint8_t* returnAddress() const {
+ return reinterpret_cast<uint8_t*>(returnAddress_);
+ }
+
+ void** addressOfReturnAddress() {
+ return reinterpret_cast<void**>(&returnAddress_);
+ }
+
+ uint8_t* rawCaller() const { return callerFP_; }
+
+ Frame* wasmCaller() const {
+ MOZ_ASSERT(!callerIsExitOrJitEntryFP());
+ return reinterpret_cast<Frame*>(callerFP_);
+ }
+
+ bool callerIsExitOrJitEntryFP() const {
+ return isExitOrJitEntryFP(callerFP_);
+ }
+
+ uint8_t* jitEntryCaller() const { return toJitEntryCaller(callerFP_); }
+
+ static const Frame* fromUntaggedWasmExitFP(const void* savedFP) {
+ MOZ_ASSERT(!isExitOrJitEntryFP(savedFP));
+ return reinterpret_cast<const Frame*>(savedFP);
+ }
+
+ static bool isExitOrJitEntryFP(const void* fp) {
+ return reinterpret_cast<uintptr_t>(fp) & ExitOrJitEntryFPTag;
+ }
+
+ static uint8_t* toJitEntryCaller(const void* fp) {
+ MOZ_ASSERT(isExitOrJitEntryFP(fp));
+ return reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(fp) &
+ ~ExitOrJitEntryFPTag);
+ }
+
+ static uint8_t* addExitOrJitEntryFPTag(const Frame* fp) {
+ MOZ_ASSERT(!isExitOrJitEntryFP(fp));
+ return reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(fp) |
+ ExitOrJitEntryFPTag);
+ }
+};
+
+static_assert(!std::is_polymorphic_v<Frame>, "Frame doesn't need a vtable.");
+static_assert(sizeof(Frame) == 2 * sizeof(void*),
+ "Frame is a two pointer structure");
+
+class FrameWithTls : public Frame {
+ TlsData* calleeTls_;
+ TlsData* callerTls_;
+
+ public:
+ TlsData* calleeTls() { return calleeTls_; }
+ TlsData* callerTls() { return callerTls_; }
+
+ constexpr static uint32_t sizeWithoutFrame() {
+ return sizeof(wasm::FrameWithTls) - sizeof(wasm::Frame);
+ }
+
+ constexpr static uint32_t calleeTLSOffset() {
+ return offsetof(FrameWithTls, calleeTls_) - sizeof(wasm::Frame);
+ }
+
+ constexpr static uint32_t callerTLSOffset() {
+ return offsetof(FrameWithTls, callerTls_) - sizeof(wasm::Frame);
+ }
+};
+
+static_assert(FrameWithTls::calleeTLSOffset() == 0u,
+ "Callee tls stored right above the return address.");
+static_assert(FrameWithTls::callerTLSOffset() == sizeof(void*),
+ "Caller tls stored right above the callee tls.");
+
+static_assert(FrameWithTls::sizeWithoutFrame() == 2 * sizeof(void*),
+ "There are only two additional slots");
+
+#if defined(JS_CODEGEN_ARM64)
+static_assert(sizeof(Frame) % 16 == 0, "frame is aligned");
+#endif
+
+// A DebugFrame is a Frame with additional fields that are added after the
+// normal function prologue by the baseline compiler. If a Module is compiled
+// with debugging enabled, then all its code creates DebugFrames on the stack
+// instead of just Frames. These extra fields are used by the Debugger API.
+
+class DebugFrame {
+ // The register results field. Initialized only during the baseline
+ // compiler's return sequence to allow the debugger to inspect and
+ // modify the return values of a frame being debugged.
+ union SpilledRegisterResult {
+ private:
+ int32_t i32_;
+ int64_t i64_;
+ intptr_t ref_;
+ AnyRef anyref_;
+ float f32_;
+ double f64_;
+#ifdef ENABLE_WASM_SIMD
+ V128 v128_;
+#endif
+#ifdef DEBUG
+ // Should we add a new value representation, this will remind us to update
+ // SpilledRegisterResult.
+ static inline void assertAllValueTypesHandled(ValType type) {
+ switch (type.kind()) {
+ case ValType::I32:
+ case ValType::I64:
+ case ValType::F32:
+ case ValType::F64:
+ case ValType::V128:
+ return;
+ case ValType::Ref:
+ switch (type.refTypeKind()) {
+ case RefType::Func:
+ case RefType::Extern:
+ case RefType::Eq:
+ case RefType::TypeIndex:
+ return;
+ }
+ }
+ }
+#endif
+ };
+ SpilledRegisterResult registerResults_[MaxRegisterResults];
+
+ // The returnValue() method returns a HandleValue pointing to this field.
+ js::Value cachedReturnJSValue_;
+
+ // If the function returns multiple results, this field is initialized
+ // to a pointer to the stack results.
+ void* stackResultsPointer_;
+
+ // The function index of this frame. Technically, this could be derived
+ // given a PC into this frame (which could lookup the CodeRange which has
+ // the function index), but this isn't always readily available.
+ uint32_t funcIndex_;
+
+ // Flags whose meaning are described below.
+ union Flags {
+ struct {
+ uint32_t observing : 1;
+ uint32_t isDebuggee : 1;
+ uint32_t prevUpToDate : 1;
+ uint32_t hasCachedSavedFrame : 1;
+ uint32_t hasCachedReturnJSValue : 1;
+ uint32_t hasSpilledRefRegisterResult : MaxRegisterResults;
+ };
+ uint32_t allFlags;
+ } flags_;
+
+ // Avoid -Wunused-private-field warnings.
+ protected:
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_ARM) || \
+ defined(JS_CODEGEN_X86)
+ // See alignmentStaticAsserts(). For MIPS32, ARM32 and X86 DebugFrame is only
+ // 4-byte aligned, so we add another word to get up to 8-byte
+ // alignment.
+ uint32_t padding_;
+#endif
+#if defined(ENABLE_WASM_SIMD) && defined(JS_CODEGEN_ARM64)
+ uint64_t padding_;
+#endif
+
+ private:
+ // The Frame goes at the end since the stack grows down.
+ Frame frame_;
+
+ public:
+ static DebugFrame* from(Frame* fp);
+ Frame& frame() { return frame_; }
+ uint32_t funcIndex() const { return funcIndex_; }
+ Instance* instance() const;
+ GlobalObject* global() const;
+ bool hasGlobal(const GlobalObject* global) const;
+ JSObject* environmentChain() const;
+ bool getLocal(uint32_t localIndex, MutableHandleValue vp);
+
+ // The return value must be written from the unboxed representation in the
+ // results union into cachedReturnJSValue_ by updateReturnJSValue() before
+ // returnValue() can return a Handle to it.
+
+ bool hasCachedReturnJSValue() const { return flags_.hasCachedReturnJSValue; }
+ [[nodiscard]] bool updateReturnJSValue(JSContext* cx);
+ HandleValue returnValue() const;
+ void clearReturnJSValue();
+
+ // Once the debugger observes a frame, it must be notified via
+ // onLeaveFrame() before the frame is popped. Calling observe() ensures the
+ // leave frame traps are enabled. Both methods are idempotent so the caller
+ // doesn't have to worry about calling them more than once.
+
+ void observe(JSContext* cx);
+ void leave(JSContext* cx);
+
+ // The 'isDebugge' bit is initialized to false and set by the WebAssembly
+ // runtime right before a frame is exposed to the debugger, as required by
+ // the Debugger API. The bit is then used for Debugger-internal purposes
+ // afterwards.
+
+ bool isDebuggee() const { return flags_.isDebuggee; }
+ void setIsDebuggee() { flags_.isDebuggee = true; }
+ void unsetIsDebuggee() { flags_.isDebuggee = false; }
+
+ // These are opaque boolean flags used by the debugger to implement
+ // AbstractFramePtr. They are initialized to false and not otherwise read or
+ // written by wasm code or runtime.
+
+ bool prevUpToDate() const { return flags_.prevUpToDate; }
+ void setPrevUpToDate() { flags_.prevUpToDate = true; }
+ void unsetPrevUpToDate() { flags_.prevUpToDate = false; }
+
+ bool hasCachedSavedFrame() const { return flags_.hasCachedSavedFrame; }
+ void setHasCachedSavedFrame() { flags_.hasCachedSavedFrame = true; }
+ void clearHasCachedSavedFrame() { flags_.hasCachedSavedFrame = false; }
+
+ bool hasSpilledRegisterRefResult(size_t n) const {
+ uint32_t mask = hasSpilledRegisterRefResultBitMask(n);
+ return (flags_.allFlags & mask) != 0;
+ }
+
+ // DebugFrame is accessed directly by JIT code.
+
+ static constexpr size_t offsetOfRegisterResults() {
+ return offsetof(DebugFrame, registerResults_);
+ }
+ static constexpr size_t offsetOfRegisterResult(size_t n) {
+ MOZ_ASSERT(n < MaxRegisterResults);
+ return offsetOfRegisterResults() + n * sizeof(SpilledRegisterResult);
+ }
+ static constexpr size_t offsetOfCachedReturnJSValue() {
+ return offsetof(DebugFrame, cachedReturnJSValue_);
+ }
+ static constexpr size_t offsetOfStackResultsPointer() {
+ return offsetof(DebugFrame, stackResultsPointer_);
+ }
+ static constexpr size_t offsetOfFlags() {
+ return offsetof(DebugFrame, flags_);
+ }
+ static constexpr uint32_t hasSpilledRegisterRefResultBitMask(size_t n) {
+ MOZ_ASSERT(n < MaxRegisterResults);
+ union Flags flags = {.allFlags = 0};
+ flags.hasSpilledRefRegisterResult = 1 << n;
+ MOZ_ASSERT(flags.allFlags != 0);
+ return flags.allFlags;
+ }
+ static constexpr size_t offsetOfFuncIndex() {
+ return offsetof(DebugFrame, funcIndex_);
+ }
+ static constexpr size_t offsetOfFrame() {
+ return offsetof(DebugFrame, frame_);
+ }
+
+ // DebugFrames are aligned to 8-byte aligned, allowing them to be placed in
+ // an AbstractFramePtr.
+
+ static const unsigned Alignment = 8;
+ static void alignmentStaticAsserts();
+};
+
+// Verbose logging support.
+
+extern void Log(JSContext* cx, const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3);
+
+// Codegen debug support.
+
+enum class DebugChannel {
+ Function,
+ Import,
+};
+
+#ifdef WASM_CODEGEN_DEBUG
+bool IsCodegenDebugEnabled(DebugChannel channel);
+#endif
+
+void DebugCodegen(DebugChannel channel, const char* fmt, ...)
+ MOZ_FORMAT_PRINTF(2, 3);
+
+using PrintCallback = void (*)(const char*);
+
+} // namespace wasm
+
+template <>
+struct InternalBarrierMethods<wasm::Val> {
+ STATIC_ASSERT_ANYREF_IS_JSOBJECT;
+
+ static bool isMarkable(const wasm::Val& v) { return v.isJSObject(); }
+
+ static void preBarrier(const wasm::Val& v) {
+ if (v.isJSObject()) {
+ gc::PreWriteBarrier(v.asJSObject());
+ }
+ }
+
+ static MOZ_ALWAYS_INLINE void postBarrier(wasm::Val* vp,
+ const wasm::Val& prev,
+ const wasm::Val& next) {
+ MOZ_RELEASE_ASSERT(!prev.type().isValid() || prev.type() == next.type());
+ JSObject* prevObj = prev.isJSObject() ? prev.asJSObject() : nullptr;
+ JSObject* nextObj = next.isJSObject() ? next.asJSObject() : nullptr;
+ if (nextObj) {
+ JSObject::postWriteBarrier(vp->asJSObjectAddress(), prevObj, nextObj);
+ }
+ }
+
+ static void readBarrier(const wasm::Val& v) {
+ if (v.isJSObject()) {
+ gc::ReadBarrier(v.asJSObject());
+ }
+ }
+
+#ifdef DEBUG
+ static void assertThingIsNotGray(const wasm::Val& v) {
+ if (v.isJSObject()) {
+ JS::AssertObjectIsNotGray(v.asJSObject());
+ }
+ }
+#endif
+};
+
+} // namespace js
+
+#endif // wasm_types_h
diff --git a/js/src/wasm/WasmUtility.h b/js/src/wasm/WasmUtility.h
new file mode 100644
index 0000000000..58ac87e74b
--- /dev/null
+++ b/js/src/wasm/WasmUtility.h
@@ -0,0 +1,23 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef wasm_utility_h
+#define wasm_utility_h
+
+#include <algorithm>
+namespace js {
+namespace wasm {
+
+template <class Container1, class Container2>
+static inline bool EqualContainers(const Container1& lhs,
+ const Container2& rhs) {
+ return std::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
+}
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_utility_h
diff --git a/js/src/wasm/WasmValidate.cpp b/js/src/wasm/WasmValidate.cpp
new file mode 100644
index 0000000000..f9c61833f6
--- /dev/null
+++ b/js/src/wasm/WasmValidate.cpp
@@ -0,0 +1,3382 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmValidate.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/Unused.h"
+#include "mozilla/Utf8.h"
+
+#include "jit/JitOptions.h"
+#include "js/Printf.h"
+#include "js/String.h" // JS::MaxStringLength
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+#include "wasm/TypedObject.h"
+#include "wasm/WasmOpIter.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::AsChars;
+using mozilla::CheckedInt;
+using mozilla::CheckedInt32;
+using mozilla::IsUtf8;
+using mozilla::Span;
+using mozilla::Unused;
+
+// Decoder implementation.
+
+bool Decoder::failf(const char* msg, ...) {
+ va_list ap;
+ va_start(ap, msg);
+ UniqueChars str(JS_vsmprintf(msg, ap));
+ va_end(ap);
+ if (!str) {
+ return false;
+ }
+
+ return fail(str.get());
+}
+
+void Decoder::warnf(const char* msg, ...) {
+ if (!warnings_) {
+ return;
+ }
+
+ va_list ap;
+ va_start(ap, msg);
+ UniqueChars str(JS_vsmprintf(msg, ap));
+ va_end(ap);
+ if (!str) {
+ return;
+ }
+
+ Unused << warnings_->append(std::move(str));
+}
+
+bool Decoder::fail(size_t errorOffset, const char* msg) {
+ MOZ_ASSERT(error_);
+ UniqueChars strWithOffset(JS_smprintf("at offset %zu: %s", errorOffset, msg));
+ if (!strWithOffset) {
+ return false;
+ }
+
+ *error_ = std::move(strWithOffset);
+ return false;
+}
+
+bool Decoder::readSectionHeader(uint8_t* id, SectionRange* range) {
+ if (!readFixedU8(id)) {
+ return false;
+ }
+
+ uint32_t size;
+ if (!readVarU32(&size)) {
+ return false;
+ }
+
+ range->start = currentOffset();
+ range->size = size;
+ return true;
+}
+
+bool Decoder::startSection(SectionId id, ModuleEnvironment* env,
+ MaybeSectionRange* range, const char* sectionName) {
+ MOZ_ASSERT(!*range);
+
+ // Record state at beginning of section to allow rewinding to this point
+ // if, after skipping through several custom sections, we don't find the
+ // section 'id'.
+ const uint8_t* const initialCur = cur_;
+ const size_t initialCustomSectionsLength = env->customSections.length();
+
+ // Maintain a pointer to the current section that gets updated as custom
+ // sections are skipped.
+ const uint8_t* currentSectionStart = cur_;
+
+ // Only start a section with 'id', skipping any custom sections before it.
+
+ uint8_t idValue;
+ if (!readFixedU8(&idValue)) {
+ goto rewind;
+ }
+
+ while (idValue != uint8_t(id)) {
+ if (idValue != uint8_t(SectionId::Custom)) {
+ goto rewind;
+ }
+
+ // Rewind to the beginning of the current section since this is what
+ // skipCustomSection() assumes.
+ cur_ = currentSectionStart;
+ if (!skipCustomSection(env)) {
+ return false;
+ }
+
+ // Having successfully skipped a custom section, consider the next
+ // section.
+ currentSectionStart = cur_;
+ if (!readFixedU8(&idValue)) {
+ goto rewind;
+ }
+ }
+
+ // Don't check the size since the range of bytes being decoded might not
+ // contain the section body. (This is currently the case when streaming: the
+ // code section header is decoded with the module environment bytes, the
+ // body of the code section is streamed in separately.)
+
+ uint32_t size;
+ if (!readVarU32(&size)) {
+ goto fail;
+ }
+
+ range->emplace();
+ (*range)->start = currentOffset();
+ (*range)->size = size;
+ return true;
+
+rewind:
+ cur_ = initialCur;
+ env->customSections.shrinkTo(initialCustomSectionsLength);
+ return true;
+
+fail:
+ return failf("failed to start %s section", sectionName);
+}
+
+bool Decoder::finishSection(const SectionRange& range,
+ const char* sectionName) {
+ if (resilientMode_) {
+ return true;
+ }
+ if (range.size != currentOffset() - range.start) {
+ return failf("byte size mismatch in %s section", sectionName);
+ }
+ return true;
+}
+
+bool Decoder::startCustomSection(const char* expected, size_t expectedLength,
+ ModuleEnvironment* env,
+ MaybeSectionRange* range) {
+ // Record state at beginning of section to allow rewinding to this point
+ // if, after skipping through several custom sections, we don't find the
+ // section 'id'.
+ const uint8_t* const initialCur = cur_;
+ const size_t initialCustomSectionsLength = env->customSections.length();
+
+ while (true) {
+ // Try to start a custom section. If we can't, rewind to the beginning
+ // since we may have skipped several custom sections already looking for
+ // 'expected'.
+ if (!startSection(SectionId::Custom, env, range, "custom")) {
+ return false;
+ }
+ if (!*range) {
+ goto rewind;
+ }
+
+ if (bytesRemain() < (*range)->size) {
+ goto fail;
+ }
+
+ CustomSectionEnv sec;
+ if (!readVarU32(&sec.nameLength) || sec.nameLength > bytesRemain()) {
+ goto fail;
+ }
+
+ sec.nameOffset = currentOffset();
+ sec.payloadOffset = sec.nameOffset + sec.nameLength;
+
+ uint32_t payloadEnd = (*range)->start + (*range)->size;
+ if (sec.payloadOffset > payloadEnd) {
+ goto fail;
+ }
+
+ sec.payloadLength = payloadEnd - sec.payloadOffset;
+
+ // Now that we have a valid custom section, record its offsets in the
+ // metadata which can be queried by the user via Module.customSections.
+ // Note: after an entry is appended, it may be popped if this loop or
+ // the loop in startSection needs to rewind.
+ if (!env->customSections.append(sec)) {
+ return false;
+ }
+
+ // If this is the expected custom section, we're done.
+ if (!expected || (expectedLength == sec.nameLength &&
+ !memcmp(cur_, expected, sec.nameLength))) {
+ cur_ += sec.nameLength;
+ return true;
+ }
+
+ // Otherwise, blindly skip the custom section and keep looking.
+ skipAndFinishCustomSection(**range);
+ range->reset();
+ }
+ MOZ_CRASH("unreachable");
+
+rewind:
+ cur_ = initialCur;
+ env->customSections.shrinkTo(initialCustomSectionsLength);
+ return true;
+
+fail:
+ return fail("failed to start custom section");
+}
+
+void Decoder::finishCustomSection(const char* name, const SectionRange& range) {
+ MOZ_ASSERT(cur_ >= beg_);
+ MOZ_ASSERT(cur_ <= end_);
+
+ if (error_ && *error_) {
+ warnf("in the '%s' custom section: %s", name, error_->get());
+ skipAndFinishCustomSection(range);
+ return;
+ }
+
+ uint32_t actualSize = currentOffset() - range.start;
+ if (range.size != actualSize) {
+ if (actualSize < range.size) {
+ warnf("in the '%s' custom section: %" PRIu32 " unconsumed bytes", name,
+ uint32_t(range.size - actualSize));
+ } else {
+ warnf("in the '%s' custom section: %" PRIu32
+ " bytes consumed past the end",
+ name, uint32_t(actualSize - range.size));
+ }
+ skipAndFinishCustomSection(range);
+ return;
+ }
+
+ // Nothing to do! (c.f. skipAndFinishCustomSection())
+}
+
+void Decoder::skipAndFinishCustomSection(const SectionRange& range) {
+ MOZ_ASSERT(cur_ >= beg_);
+ MOZ_ASSERT(cur_ <= end_);
+ cur_ = (beg_ + (range.start - offsetInModule_)) + range.size;
+ MOZ_ASSERT(cur_ <= end_);
+ clearError();
+}
+
+bool Decoder::skipCustomSection(ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!startCustomSection(nullptr, 0, env, &range)) {
+ return false;
+ }
+ if (!range) {
+ return fail("expected custom section");
+ }
+
+ skipAndFinishCustomSection(*range);
+ return true;
+}
+
+bool Decoder::startNameSubsection(NameType nameType,
+ Maybe<uint32_t>* endOffset) {
+ MOZ_ASSERT(!*endOffset);
+
+ const uint8_t* const initialPosition = cur_;
+
+ uint8_t nameTypeValue;
+ if (!readFixedU8(&nameTypeValue)) {
+ goto rewind;
+ }
+
+ if (nameTypeValue != uint8_t(nameType)) {
+ goto rewind;
+ }
+
+ uint32_t payloadLength;
+ if (!readVarU32(&payloadLength) || payloadLength > bytesRemain()) {
+ return fail("bad name subsection payload length");
+ }
+
+ *endOffset = Some(currentOffset() + payloadLength);
+ return true;
+
+rewind:
+ cur_ = initialPosition;
+ return true;
+}
+
+bool Decoder::finishNameSubsection(uint32_t expected) {
+ uint32_t actual = currentOffset();
+ if (expected != actual) {
+ return failf("bad name subsection length (expected: %" PRIu32
+ ", actual: %" PRIu32 ")",
+ expected, actual);
+ }
+
+ return true;
+}
+
+bool Decoder::skipNameSubsection() {
+ uint8_t nameTypeValue;
+ if (!readFixedU8(&nameTypeValue)) {
+ return fail("unable to read name subsection id");
+ }
+
+ switch (nameTypeValue) {
+ case uint8_t(NameType::Module):
+ case uint8_t(NameType::Function):
+ return fail("out of order name subsections");
+ default:
+ break;
+ }
+
+ uint32_t payloadLength;
+ if (!readVarU32(&payloadLength) || !readBytes(payloadLength)) {
+ return fail("bad name subsection payload length");
+ }
+
+ return true;
+}
+
+// Misc helpers.
+
+bool wasm::EncodeLocalEntries(Encoder& e, const ValTypeVector& locals) {
+ if (locals.length() > MaxLocals) {
+ return false;
+ }
+
+ uint32_t numLocalEntries = 0;
+ if (locals.length()) {
+ ValType prev = locals[0];
+ numLocalEntries++;
+ for (ValType t : locals) {
+ if (t != prev) {
+ numLocalEntries++;
+ prev = t;
+ }
+ }
+ }
+
+ if (!e.writeVarU32(numLocalEntries)) {
+ return false;
+ }
+
+ if (numLocalEntries) {
+ ValType prev = locals[0];
+ uint32_t count = 1;
+ for (uint32_t i = 1; i < locals.length(); i++, count++) {
+ if (prev != locals[i]) {
+ if (!e.writeVarU32(count)) {
+ return false;
+ }
+ if (!e.writeValType(prev)) {
+ return false;
+ }
+ prev = locals[i];
+ count = 0;
+ }
+ }
+ if (!e.writeVarU32(count)) {
+ return false;
+ }
+ if (!e.writeValType(prev)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool wasm::DecodeLocalEntries(Decoder& d, const TypeContext& types,
+ const FeatureArgs& features,
+ ValTypeVector* locals) {
+ uint32_t numLocalEntries;
+ if (!d.readVarU32(&numLocalEntries)) {
+ return d.fail("failed to read number of local entries");
+ }
+
+ for (uint32_t i = 0; i < numLocalEntries; i++) {
+ uint32_t count;
+ if (!d.readVarU32(&count)) {
+ return d.fail("failed to read local entry count");
+ }
+
+ if (MaxLocals - locals->length() < count) {
+ return d.fail("too many locals");
+ }
+
+ ValType type;
+ if (!d.readValType(types, features, &type)) {
+ return false;
+ }
+
+ if (!type.isDefaultable()) {
+ return d.fail("cannot have a non-defaultable local");
+ }
+
+ if (!locals->appendN(type, count)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool wasm::DecodeValidatedLocalEntries(Decoder& d, ValTypeVector* locals) {
+ uint32_t numLocalEntries;
+ MOZ_ALWAYS_TRUE(d.readVarU32(&numLocalEntries));
+
+ for (uint32_t i = 0; i < numLocalEntries; i++) {
+ uint32_t count = d.uncheckedReadVarU32();
+ MOZ_ASSERT(MaxLocals - locals->length() >= count);
+ if (!locals->appendN(d.uncheckedReadValType(), count)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// Function body validation.
+
+class NothingVector {
+ Nothing unused_;
+
+ public:
+ bool resize(size_t length) { return true; }
+ Nothing& operator[](size_t) { return unused_; }
+ Nothing& back() { return unused_; }
+};
+
+struct ValidatingPolicy {
+ using Value = Nothing;
+ using ValueVector = NothingVector;
+ using ControlItem = Nothing;
+};
+
+using ValidatingOpIter = OpIter<ValidatingPolicy>;
+
+static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
+ uint32_t funcIndex,
+ const ValTypeVector& locals,
+ const uint8_t* bodyEnd, Decoder* d) {
+ ValidatingOpIter iter(env, *d);
+
+ if (!iter.readFunctionStart(funcIndex)) {
+ return false;
+ }
+
+#define CHECK(c) \
+ if (!(c)) return false; \
+ break
+
+#ifdef ENABLE_WASM_SIMD_EXPERIMENTAL
+# define CHECK_SIMD_EXPERIMENTAL() (void)(0)
+#else
+# define CHECK_SIMD_EXPERIMENTAL() return iter.unrecognizedOpcode(&op)
+#endif
+
+ while (true) {
+ OpBytes op;
+ if (!iter.readOp(&op)) {
+ return false;
+ }
+
+ Nothing nothing;
+ NothingVector nothings;
+ ResultType unusedType;
+
+ switch (op.b0) {
+ case uint16_t(Op::End): {
+ LabelKind unusedKind;
+ if (!iter.readEnd(&unusedKind, &unusedType, &nothings, &nothings)) {
+ return false;
+ }
+ iter.popEnd();
+ if (iter.controlStackEmpty()) {
+ return iter.readFunctionEnd(bodyEnd);
+ }
+ break;
+ }
+ case uint16_t(Op::Nop):
+ CHECK(iter.readNop());
+ case uint16_t(Op::Drop):
+ CHECK(iter.readDrop());
+ case uint16_t(Op::Call): {
+ uint32_t unusedIndex;
+ NothingVector unusedArgs;
+ CHECK(iter.readCall(&unusedIndex, &unusedArgs));
+ }
+ case uint16_t(Op::CallIndirect): {
+ uint32_t unusedIndex, unusedIndex2;
+ NothingVector unusedArgs;
+ CHECK(iter.readCallIndirect(&unusedIndex, &unusedIndex2, &nothing,
+ &unusedArgs));
+ }
+ case uint16_t(Op::I32Const): {
+ int32_t unused;
+ CHECK(iter.readI32Const(&unused));
+ }
+ case uint16_t(Op::I64Const): {
+ int64_t unused;
+ CHECK(iter.readI64Const(&unused));
+ }
+ case uint16_t(Op::F32Const): {
+ float unused;
+ CHECK(iter.readF32Const(&unused));
+ }
+ case uint16_t(Op::F64Const): {
+ double unused;
+ CHECK(iter.readF64Const(&unused));
+ }
+ case uint16_t(Op::GetLocal): {
+ uint32_t unused;
+ CHECK(iter.readGetLocal(locals, &unused));
+ }
+ case uint16_t(Op::SetLocal): {
+ uint32_t unused;
+ CHECK(iter.readSetLocal(locals, &unused, &nothing));
+ }
+ case uint16_t(Op::TeeLocal): {
+ uint32_t unused;
+ CHECK(iter.readTeeLocal(locals, &unused, &nothing));
+ }
+ case uint16_t(Op::GetGlobal): {
+ uint32_t unused;
+ CHECK(iter.readGetGlobal(&unused));
+ }
+ case uint16_t(Op::SetGlobal): {
+ uint32_t unused;
+ CHECK(iter.readSetGlobal(&unused, &nothing));
+ }
+#ifdef ENABLE_WASM_REFTYPES
+ case uint16_t(Op::TableGet): {
+ if (!env.refTypesEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ uint32_t unusedTableIndex;
+ CHECK(iter.readTableGet(&unusedTableIndex, &nothing));
+ }
+ case uint16_t(Op::TableSet): {
+ if (!env.refTypesEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ uint32_t unusedTableIndex;
+ CHECK(iter.readTableSet(&unusedTableIndex, &nothing, &nothing));
+ }
+#endif
+ case uint16_t(Op::SelectNumeric): {
+ StackType unused;
+ CHECK(iter.readSelect(/*typed*/ false, &unused, &nothing, &nothing,
+ &nothing));
+ }
+ case uint16_t(Op::SelectTyped): {
+ if (!env.refTypesEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ StackType unused;
+ CHECK(iter.readSelect(/*typed*/ true, &unused, &nothing, &nothing,
+ &nothing));
+ }
+ case uint16_t(Op::Block):
+ CHECK(iter.readBlock(&unusedType));
+ case uint16_t(Op::Loop):
+ CHECK(iter.readLoop(&unusedType));
+ case uint16_t(Op::If):
+ CHECK(iter.readIf(&unusedType, &nothing));
+ case uint16_t(Op::Else):
+ CHECK(iter.readElse(&unusedType, &unusedType, &nothings));
+ case uint16_t(Op::I32Clz):
+ case uint16_t(Op::I32Ctz):
+ case uint16_t(Op::I32Popcnt):
+ CHECK(iter.readUnary(ValType::I32, &nothing));
+ case uint16_t(Op::I64Clz):
+ case uint16_t(Op::I64Ctz):
+ case uint16_t(Op::I64Popcnt):
+ CHECK(iter.readUnary(ValType::I64, &nothing));
+ case uint16_t(Op::F32Abs):
+ case uint16_t(Op::F32Neg):
+ case uint16_t(Op::F32Ceil):
+ case uint16_t(Op::F32Floor):
+ case uint16_t(Op::F32Sqrt):
+ case uint16_t(Op::F32Trunc):
+ case uint16_t(Op::F32Nearest):
+ CHECK(iter.readUnary(ValType::F32, &nothing));
+ case uint16_t(Op::F64Abs):
+ case uint16_t(Op::F64Neg):
+ case uint16_t(Op::F64Ceil):
+ case uint16_t(Op::F64Floor):
+ case uint16_t(Op::F64Sqrt):
+ case uint16_t(Op::F64Trunc):
+ case uint16_t(Op::F64Nearest):
+ CHECK(iter.readUnary(ValType::F64, &nothing));
+ case uint16_t(Op::I32Add):
+ case uint16_t(Op::I32Sub):
+ case uint16_t(Op::I32Mul):
+ case uint16_t(Op::I32DivS):
+ case uint16_t(Op::I32DivU):
+ case uint16_t(Op::I32RemS):
+ case uint16_t(Op::I32RemU):
+ case uint16_t(Op::I32And):
+ case uint16_t(Op::I32Or):
+ case uint16_t(Op::I32Xor):
+ case uint16_t(Op::I32Shl):
+ case uint16_t(Op::I32ShrS):
+ case uint16_t(Op::I32ShrU):
+ case uint16_t(Op::I32Rotl):
+ case uint16_t(Op::I32Rotr):
+ CHECK(iter.readBinary(ValType::I32, &nothing, &nothing));
+ case uint16_t(Op::I64Add):
+ case uint16_t(Op::I64Sub):
+ case uint16_t(Op::I64Mul):
+ case uint16_t(Op::I64DivS):
+ case uint16_t(Op::I64DivU):
+ case uint16_t(Op::I64RemS):
+ case uint16_t(Op::I64RemU):
+ case uint16_t(Op::I64And):
+ case uint16_t(Op::I64Or):
+ case uint16_t(Op::I64Xor):
+ case uint16_t(Op::I64Shl):
+ case uint16_t(Op::I64ShrS):
+ case uint16_t(Op::I64ShrU):
+ case uint16_t(Op::I64Rotl):
+ case uint16_t(Op::I64Rotr):
+ CHECK(iter.readBinary(ValType::I64, &nothing, &nothing));
+ case uint16_t(Op::F32Add):
+ case uint16_t(Op::F32Sub):
+ case uint16_t(Op::F32Mul):
+ case uint16_t(Op::F32Div):
+ case uint16_t(Op::F32Min):
+ case uint16_t(Op::F32Max):
+ case uint16_t(Op::F32CopySign):
+ CHECK(iter.readBinary(ValType::F32, &nothing, &nothing));
+ case uint16_t(Op::F64Add):
+ case uint16_t(Op::F64Sub):
+ case uint16_t(Op::F64Mul):
+ case uint16_t(Op::F64Div):
+ case uint16_t(Op::F64Min):
+ case uint16_t(Op::F64Max):
+ case uint16_t(Op::F64CopySign):
+ CHECK(iter.readBinary(ValType::F64, &nothing, &nothing));
+ case uint16_t(Op::I32Eq):
+ case uint16_t(Op::I32Ne):
+ case uint16_t(Op::I32LtS):
+ case uint16_t(Op::I32LtU):
+ case uint16_t(Op::I32LeS):
+ case uint16_t(Op::I32LeU):
+ case uint16_t(Op::I32GtS):
+ case uint16_t(Op::I32GtU):
+ case uint16_t(Op::I32GeS):
+ case uint16_t(Op::I32GeU):
+ CHECK(iter.readComparison(ValType::I32, &nothing, &nothing));
+ case uint16_t(Op::I64Eq):
+ case uint16_t(Op::I64Ne):
+ case uint16_t(Op::I64LtS):
+ case uint16_t(Op::I64LtU):
+ case uint16_t(Op::I64LeS):
+ case uint16_t(Op::I64LeU):
+ case uint16_t(Op::I64GtS):
+ case uint16_t(Op::I64GtU):
+ case uint16_t(Op::I64GeS):
+ case uint16_t(Op::I64GeU):
+ CHECK(iter.readComparison(ValType::I64, &nothing, &nothing));
+ case uint16_t(Op::F32Eq):
+ case uint16_t(Op::F32Ne):
+ case uint16_t(Op::F32Lt):
+ case uint16_t(Op::F32Le):
+ case uint16_t(Op::F32Gt):
+ case uint16_t(Op::F32Ge):
+ CHECK(iter.readComparison(ValType::F32, &nothing, &nothing));
+ case uint16_t(Op::F64Eq):
+ case uint16_t(Op::F64Ne):
+ case uint16_t(Op::F64Lt):
+ case uint16_t(Op::F64Le):
+ case uint16_t(Op::F64Gt):
+ case uint16_t(Op::F64Ge):
+ CHECK(iter.readComparison(ValType::F64, &nothing, &nothing));
+ case uint16_t(Op::I32Eqz):
+ CHECK(iter.readConversion(ValType::I32, ValType::I32, &nothing));
+ case uint16_t(Op::I64Eqz):
+ case uint16_t(Op::I32WrapI64):
+ CHECK(iter.readConversion(ValType::I64, ValType::I32, &nothing));
+ case uint16_t(Op::I32TruncSF32):
+ case uint16_t(Op::I32TruncUF32):
+ case uint16_t(Op::I32ReinterpretF32):
+ CHECK(iter.readConversion(ValType::F32, ValType::I32, &nothing));
+ case uint16_t(Op::I32TruncSF64):
+ case uint16_t(Op::I32TruncUF64):
+ CHECK(iter.readConversion(ValType::F64, ValType::I32, &nothing));
+ case uint16_t(Op::I64ExtendSI32):
+ case uint16_t(Op::I64ExtendUI32):
+ CHECK(iter.readConversion(ValType::I32, ValType::I64, &nothing));
+ case uint16_t(Op::I64TruncSF32):
+ case uint16_t(Op::I64TruncUF32):
+ CHECK(iter.readConversion(ValType::F32, ValType::I64, &nothing));
+ case uint16_t(Op::I64TruncSF64):
+ case uint16_t(Op::I64TruncUF64):
+ case uint16_t(Op::I64ReinterpretF64):
+ CHECK(iter.readConversion(ValType::F64, ValType::I64, &nothing));
+ case uint16_t(Op::F32ConvertSI32):
+ case uint16_t(Op::F32ConvertUI32):
+ case uint16_t(Op::F32ReinterpretI32):
+ CHECK(iter.readConversion(ValType::I32, ValType::F32, &nothing));
+ case uint16_t(Op::F32ConvertSI64):
+ case uint16_t(Op::F32ConvertUI64):
+ CHECK(iter.readConversion(ValType::I64, ValType::F32, &nothing));
+ case uint16_t(Op::F32DemoteF64):
+ CHECK(iter.readConversion(ValType::F64, ValType::F32, &nothing));
+ case uint16_t(Op::F64ConvertSI32):
+ case uint16_t(Op::F64ConvertUI32):
+ CHECK(iter.readConversion(ValType::I32, ValType::F64, &nothing));
+ case uint16_t(Op::F64ConvertSI64):
+ case uint16_t(Op::F64ConvertUI64):
+ case uint16_t(Op::F64ReinterpretI64):
+ CHECK(iter.readConversion(ValType::I64, ValType::F64, &nothing));
+ case uint16_t(Op::F64PromoteF32):
+ CHECK(iter.readConversion(ValType::F32, ValType::F64, &nothing));
+ case uint16_t(Op::I32Extend8S):
+ case uint16_t(Op::I32Extend16S):
+ CHECK(iter.readConversion(ValType::I32, ValType::I32, &nothing));
+ case uint16_t(Op::I64Extend8S):
+ case uint16_t(Op::I64Extend16S):
+ case uint16_t(Op::I64Extend32S):
+ CHECK(iter.readConversion(ValType::I64, ValType::I64, &nothing));
+ case uint16_t(Op::I32Load8S):
+ case uint16_t(Op::I32Load8U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoad(ValType::I32, 1, &addr));
+ }
+ case uint16_t(Op::I32Load16S):
+ case uint16_t(Op::I32Load16U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoad(ValType::I32, 2, &addr));
+ }
+ case uint16_t(Op::I32Load): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoad(ValType::I32, 4, &addr));
+ }
+ case uint16_t(Op::I64Load8S):
+ case uint16_t(Op::I64Load8U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoad(ValType::I64, 1, &addr));
+ }
+ case uint16_t(Op::I64Load16S):
+ case uint16_t(Op::I64Load16U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoad(ValType::I64, 2, &addr));
+ }
+ case uint16_t(Op::I64Load32S):
+ case uint16_t(Op::I64Load32U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoad(ValType::I64, 4, &addr));
+ }
+ case uint16_t(Op::I64Load): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoad(ValType::I64, 8, &addr));
+ }
+ case uint16_t(Op::F32Load): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoad(ValType::F32, 4, &addr));
+ }
+ case uint16_t(Op::F64Load): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoad(ValType::F64, 8, &addr));
+ }
+ case uint16_t(Op::I32Store8): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStore(ValType::I32, 1, &addr, &nothing));
+ }
+ case uint16_t(Op::I32Store16): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStore(ValType::I32, 2, &addr, &nothing));
+ }
+ case uint16_t(Op::I32Store): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStore(ValType::I32, 4, &addr, &nothing));
+ }
+ case uint16_t(Op::I64Store8): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStore(ValType::I64, 1, &addr, &nothing));
+ }
+ case uint16_t(Op::I64Store16): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStore(ValType::I64, 2, &addr, &nothing));
+ }
+ case uint16_t(Op::I64Store32): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStore(ValType::I64, 4, &addr, &nothing));
+ }
+ case uint16_t(Op::I64Store): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStore(ValType::I64, 8, &addr, &nothing));
+ }
+ case uint16_t(Op::F32Store): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStore(ValType::F32, 4, &addr, &nothing));
+ }
+ case uint16_t(Op::F64Store): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStore(ValType::F64, 8, &addr, &nothing));
+ }
+ case uint16_t(Op::MemoryGrow):
+ CHECK(iter.readMemoryGrow(&nothing));
+ case uint16_t(Op::MemorySize):
+ CHECK(iter.readMemorySize());
+ case uint16_t(Op::Br): {
+ uint32_t unusedDepth;
+ CHECK(iter.readBr(&unusedDepth, &unusedType, &nothings));
+ }
+ case uint16_t(Op::BrIf): {
+ uint32_t unusedDepth;
+ CHECK(iter.readBrIf(&unusedDepth, &unusedType, &nothings, &nothing));
+ }
+ case uint16_t(Op::BrTable): {
+ Uint32Vector unusedDepths;
+ uint32_t unusedDefault;
+ CHECK(iter.readBrTable(&unusedDepths, &unusedDefault, &unusedType,
+ &nothings, &nothing));
+ }
+ case uint16_t(Op::Return):
+ CHECK(iter.readReturn(&nothings));
+ case uint16_t(Op::Unreachable):
+ CHECK(iter.readUnreachable());
+#ifdef ENABLE_WASM_GC
+ case uint16_t(Op::GcPrefix): {
+ switch (op.b1) {
+ case uint32_t(GcOp::StructNew): {
+ if (!env.gcTypesEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ uint32_t unusedUint;
+ NothingVector unusedArgs;
+ CHECK(iter.readStructNew(&unusedUint, &unusedArgs));
+ }
+ case uint32_t(GcOp::StructGet): {
+ if (!env.gcTypesEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ uint32_t unusedUint1, unusedUint2;
+ CHECK(iter.readStructGet(&unusedUint1, &unusedUint2, &nothing));
+ }
+ case uint32_t(GcOp::StructSet): {
+ if (!env.gcTypesEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ uint32_t unusedUint1, unusedUint2;
+ CHECK(iter.readStructSet(&unusedUint1, &unusedUint2, &nothing,
+ &nothing));
+ }
+ case uint32_t(GcOp::StructNarrow): {
+ if (!env.gcTypesEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ ValType unusedTy, unusedTy2;
+ CHECK(iter.readStructNarrow(&unusedTy, &unusedTy2, &nothing));
+ }
+ default:
+ return iter.unrecognizedOpcode(&op);
+ }
+ break;
+ }
+#endif
+
+#ifdef ENABLE_WASM_SIMD
+ case uint16_t(Op::SimdPrefix): {
+ if (!env.v128Enabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ uint32_t noIndex;
+ switch (op.b1) {
+ case uint32_t(SimdOp::I8x16ExtractLaneS):
+ case uint32_t(SimdOp::I8x16ExtractLaneU):
+ CHECK(iter.readExtractLane(ValType::I32, 16, &noIndex, &nothing));
+ case uint32_t(SimdOp::I16x8ExtractLaneS):
+ case uint32_t(SimdOp::I16x8ExtractLaneU):
+ CHECK(iter.readExtractLane(ValType::I32, 8, &noIndex, &nothing));
+ case uint32_t(SimdOp::I32x4ExtractLane):
+ CHECK(iter.readExtractLane(ValType::I32, 4, &noIndex, &nothing));
+ case uint32_t(SimdOp::I64x2ExtractLane):
+ CHECK(iter.readExtractLane(ValType::I64, 2, &noIndex, &nothing));
+ case uint32_t(SimdOp::F32x4ExtractLane):
+ CHECK(iter.readExtractLane(ValType::F32, 4, &noIndex, &nothing));
+ case uint32_t(SimdOp::F64x2ExtractLane):
+ CHECK(iter.readExtractLane(ValType::F64, 2, &noIndex, &nothing));
+
+ case uint32_t(SimdOp::I8x16Splat):
+ case uint32_t(SimdOp::I16x8Splat):
+ case uint32_t(SimdOp::I32x4Splat):
+ CHECK(iter.readConversion(ValType::I32, ValType::V128, &nothing));
+ case uint32_t(SimdOp::I64x2Splat):
+ CHECK(iter.readConversion(ValType::I64, ValType::V128, &nothing));
+ case uint32_t(SimdOp::F32x4Splat):
+ CHECK(iter.readConversion(ValType::F32, ValType::V128, &nothing));
+ case uint32_t(SimdOp::F64x2Splat):
+ CHECK(iter.readConversion(ValType::F64, ValType::V128, &nothing));
+
+ case uint32_t(SimdOp::I8x16AnyTrue):
+ case uint32_t(SimdOp::I8x16AllTrue):
+ case uint32_t(SimdOp::I16x8AnyTrue):
+ case uint32_t(SimdOp::I16x8AllTrue):
+ case uint32_t(SimdOp::I32x4AnyTrue):
+ case uint32_t(SimdOp::I32x4AllTrue):
+ case uint32_t(SimdOp::I8x16Bitmask):
+ case uint32_t(SimdOp::I16x8Bitmask):
+ case uint32_t(SimdOp::I32x4Bitmask):
+ CHECK(iter.readConversion(ValType::V128, ValType::I32, &nothing));
+
+ case uint32_t(SimdOp::I8x16ReplaceLane):
+ CHECK(iter.readReplaceLane(ValType::I32, 16, &noIndex, &nothing,
+ &nothing));
+ case uint32_t(SimdOp::I16x8ReplaceLane):
+ CHECK(iter.readReplaceLane(ValType::I32, 8, &noIndex, &nothing,
+ &nothing));
+ case uint32_t(SimdOp::I32x4ReplaceLane):
+ CHECK(iter.readReplaceLane(ValType::I32, 4, &noIndex, &nothing,
+ &nothing));
+ case uint32_t(SimdOp::I64x2ReplaceLane):
+ CHECK(iter.readReplaceLane(ValType::I64, 2, &noIndex, &nothing,
+ &nothing));
+ case uint32_t(SimdOp::F32x4ReplaceLane):
+ CHECK(iter.readReplaceLane(ValType::F32, 4, &noIndex, &nothing,
+ &nothing));
+ case uint32_t(SimdOp::F64x2ReplaceLane):
+ CHECK(iter.readReplaceLane(ValType::F64, 2, &noIndex, &nothing,
+ &nothing));
+
+ case uint32_t(SimdOp::I8x16Eq):
+ case uint32_t(SimdOp::I8x16Ne):
+ case uint32_t(SimdOp::I8x16LtS):
+ case uint32_t(SimdOp::I8x16LtU):
+ case uint32_t(SimdOp::I8x16GtS):
+ case uint32_t(SimdOp::I8x16GtU):
+ case uint32_t(SimdOp::I8x16LeS):
+ case uint32_t(SimdOp::I8x16LeU):
+ case uint32_t(SimdOp::I8x16GeS):
+ case uint32_t(SimdOp::I8x16GeU):
+ case uint32_t(SimdOp::I16x8Eq):
+ case uint32_t(SimdOp::I16x8Ne):
+ case uint32_t(SimdOp::I16x8LtS):
+ case uint32_t(SimdOp::I16x8LtU):
+ case uint32_t(SimdOp::I16x8GtS):
+ case uint32_t(SimdOp::I16x8GtU):
+ case uint32_t(SimdOp::I16x8LeS):
+ case uint32_t(SimdOp::I16x8LeU):
+ case uint32_t(SimdOp::I16x8GeS):
+ case uint32_t(SimdOp::I16x8GeU):
+ case uint32_t(SimdOp::I32x4Eq):
+ case uint32_t(SimdOp::I32x4Ne):
+ case uint32_t(SimdOp::I32x4LtS):
+ case uint32_t(SimdOp::I32x4LtU):
+ case uint32_t(SimdOp::I32x4GtS):
+ case uint32_t(SimdOp::I32x4GtU):
+ case uint32_t(SimdOp::I32x4LeS):
+ case uint32_t(SimdOp::I32x4LeU):
+ case uint32_t(SimdOp::I32x4GeS):
+ case uint32_t(SimdOp::I32x4GeU):
+ case uint32_t(SimdOp::F32x4Eq):
+ case uint32_t(SimdOp::F32x4Ne):
+ case uint32_t(SimdOp::F32x4Lt):
+ case uint32_t(SimdOp::F32x4Gt):
+ case uint32_t(SimdOp::F32x4Le):
+ case uint32_t(SimdOp::F32x4Ge):
+ case uint32_t(SimdOp::F64x2Eq):
+ case uint32_t(SimdOp::F64x2Ne):
+ case uint32_t(SimdOp::F64x2Lt):
+ case uint32_t(SimdOp::F64x2Gt):
+ case uint32_t(SimdOp::F64x2Le):
+ case uint32_t(SimdOp::F64x2Ge):
+ case uint32_t(SimdOp::V128And):
+ case uint32_t(SimdOp::V128Or):
+ case uint32_t(SimdOp::V128Xor):
+ case uint32_t(SimdOp::V128AndNot):
+ case uint32_t(SimdOp::I8x16AvgrU):
+ case uint32_t(SimdOp::I16x8AvgrU):
+ case uint32_t(SimdOp::I8x16Add):
+ case uint32_t(SimdOp::I8x16AddSaturateS):
+ case uint32_t(SimdOp::I8x16AddSaturateU):
+ case uint32_t(SimdOp::I8x16Sub):
+ case uint32_t(SimdOp::I8x16SubSaturateS):
+ case uint32_t(SimdOp::I8x16SubSaturateU):
+ case uint32_t(SimdOp::I8x16MinS):
+ case uint32_t(SimdOp::I8x16MinU):
+ case uint32_t(SimdOp::I8x16MaxS):
+ case uint32_t(SimdOp::I8x16MaxU):
+ case uint32_t(SimdOp::I16x8Add):
+ case uint32_t(SimdOp::I16x8AddSaturateS):
+ case uint32_t(SimdOp::I16x8AddSaturateU):
+ case uint32_t(SimdOp::I16x8Sub):
+ case uint32_t(SimdOp::I16x8SubSaturateS):
+ case uint32_t(SimdOp::I16x8SubSaturateU):
+ case uint32_t(SimdOp::I16x8Mul):
+ case uint32_t(SimdOp::I16x8MinS):
+ case uint32_t(SimdOp::I16x8MinU):
+ case uint32_t(SimdOp::I16x8MaxS):
+ case uint32_t(SimdOp::I16x8MaxU):
+ case uint32_t(SimdOp::I32x4Add):
+ case uint32_t(SimdOp::I32x4Sub):
+ case uint32_t(SimdOp::I32x4Mul):
+ case uint32_t(SimdOp::I32x4MinS):
+ case uint32_t(SimdOp::I32x4MinU):
+ case uint32_t(SimdOp::I32x4MaxS):
+ case uint32_t(SimdOp::I32x4MaxU):
+ case uint32_t(SimdOp::I64x2Add):
+ case uint32_t(SimdOp::I64x2Sub):
+ case uint32_t(SimdOp::I64x2Mul):
+ case uint32_t(SimdOp::F32x4Add):
+ case uint32_t(SimdOp::F32x4Sub):
+ case uint32_t(SimdOp::F32x4Mul):
+ case uint32_t(SimdOp::F32x4Div):
+ case uint32_t(SimdOp::F32x4Min):
+ case uint32_t(SimdOp::F32x4Max):
+ case uint32_t(SimdOp::F64x2Add):
+ case uint32_t(SimdOp::F64x2Sub):
+ case uint32_t(SimdOp::F64x2Mul):
+ case uint32_t(SimdOp::F64x2Div):
+ case uint32_t(SimdOp::F64x2Min):
+ case uint32_t(SimdOp::F64x2Max):
+ case uint32_t(SimdOp::I8x16NarrowSI16x8):
+ case uint32_t(SimdOp::I8x16NarrowUI16x8):
+ case uint32_t(SimdOp::I16x8NarrowSI32x4):
+ case uint32_t(SimdOp::I16x8NarrowUI32x4):
+ case uint32_t(SimdOp::V8x16Swizzle):
+ case uint32_t(SimdOp::F32x4PMax):
+ case uint32_t(SimdOp::F32x4PMin):
+ case uint32_t(SimdOp::F64x2PMax):
+ case uint32_t(SimdOp::F64x2PMin):
+ case uint32_t(SimdOp::I32x4DotSI16x8):
+ CHECK(iter.readBinary(ValType::V128, &nothing, &nothing));
+
+ case uint32_t(SimdOp::I8x16Neg):
+ case uint32_t(SimdOp::I16x8Neg):
+ case uint32_t(SimdOp::I16x8WidenLowSI8x16):
+ case uint32_t(SimdOp::I16x8WidenHighSI8x16):
+ case uint32_t(SimdOp::I16x8WidenLowUI8x16):
+ case uint32_t(SimdOp::I16x8WidenHighUI8x16):
+ case uint32_t(SimdOp::I32x4Neg):
+ case uint32_t(SimdOp::I32x4WidenLowSI16x8):
+ case uint32_t(SimdOp::I32x4WidenHighSI16x8):
+ case uint32_t(SimdOp::I32x4WidenLowUI16x8):
+ case uint32_t(SimdOp::I32x4WidenHighUI16x8):
+ case uint32_t(SimdOp::I32x4TruncSSatF32x4):
+ case uint32_t(SimdOp::I32x4TruncUSatF32x4):
+ case uint32_t(SimdOp::I64x2Neg):
+ case uint32_t(SimdOp::F32x4Abs):
+ case uint32_t(SimdOp::F32x4Neg):
+ case uint32_t(SimdOp::F32x4Sqrt):
+ case uint32_t(SimdOp::F32x4ConvertSI32x4):
+ case uint32_t(SimdOp::F32x4ConvertUI32x4):
+ case uint32_t(SimdOp::F64x2Abs):
+ case uint32_t(SimdOp::F64x2Neg):
+ case uint32_t(SimdOp::F64x2Sqrt):
+ case uint32_t(SimdOp::V128Not):
+ case uint32_t(SimdOp::I8x16Abs):
+ case uint32_t(SimdOp::I16x8Abs):
+ case uint32_t(SimdOp::I32x4Abs):
+ case uint32_t(SimdOp::F32x4Ceil):
+ case uint32_t(SimdOp::F32x4Floor):
+ case uint32_t(SimdOp::F32x4Trunc):
+ case uint32_t(SimdOp::F32x4Nearest):
+ case uint32_t(SimdOp::F64x2Ceil):
+ case uint32_t(SimdOp::F64x2Floor):
+ case uint32_t(SimdOp::F64x2Trunc):
+ case uint32_t(SimdOp::F64x2Nearest):
+ CHECK(iter.readUnary(ValType::V128, &nothing));
+
+ case uint32_t(SimdOp::I8x16Shl):
+ case uint32_t(SimdOp::I8x16ShrS):
+ case uint32_t(SimdOp::I8x16ShrU):
+ case uint32_t(SimdOp::I16x8Shl):
+ case uint32_t(SimdOp::I16x8ShrS):
+ case uint32_t(SimdOp::I16x8ShrU):
+ case uint32_t(SimdOp::I32x4Shl):
+ case uint32_t(SimdOp::I32x4ShrS):
+ case uint32_t(SimdOp::I32x4ShrU):
+ case uint32_t(SimdOp::I64x2Shl):
+ case uint32_t(SimdOp::I64x2ShrS):
+ case uint32_t(SimdOp::I64x2ShrU):
+ CHECK(iter.readVectorShift(&nothing, &nothing));
+
+ case uint32_t(SimdOp::V128Bitselect):
+ CHECK(iter.readVectorSelect(&nothing, &nothing, &nothing));
+
+ case uint32_t(SimdOp::V8x16Shuffle): {
+ V128 mask;
+ CHECK(iter.readVectorShuffle(&nothing, &nothing, &mask));
+ }
+
+ case uint32_t(SimdOp::V128Const): {
+ V128 noVector;
+ CHECK(iter.readV128Const(&noVector));
+ }
+
+ case uint32_t(SimdOp::V128Load): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoad(ValType::V128, 16, &addr));
+ }
+
+ case uint32_t(SimdOp::V8x16LoadSplat): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadSplat(1, &addr));
+ }
+
+ case uint32_t(SimdOp::V16x8LoadSplat): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadSplat(2, &addr));
+ }
+
+ case uint32_t(SimdOp::V32x4LoadSplat): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadSplat(4, &addr));
+ }
+
+ case uint32_t(SimdOp::V64x2LoadSplat): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadSplat(8, &addr));
+ }
+
+ case uint32_t(SimdOp::I16x8LoadS8x8):
+ case uint32_t(SimdOp::I16x8LoadU8x8): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadExtend(&addr));
+ }
+
+ case uint32_t(SimdOp::I32x4LoadS16x4):
+ case uint32_t(SimdOp::I32x4LoadU16x4): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadExtend(&addr));
+ }
+
+ case uint32_t(SimdOp::I64x2LoadS32x2):
+ case uint32_t(SimdOp::I64x2LoadU32x2): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadExtend(&addr));
+ }
+
+ case uint32_t(SimdOp::V128Store): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readStore(ValType::V128, 16, &addr, &nothing));
+ }
+
+ case uint32_t(SimdOp::V128Load32Zero): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadSplat(4, &addr));
+ }
+
+ case uint32_t(SimdOp::V128Load64Zero): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readLoadSplat(8, &addr));
+ }
+
+ default:
+ return iter.unrecognizedOpcode(&op);
+ }
+ break;
+ }
+#endif // ENABLE_WASM_SIMD
+
+ case uint16_t(Op::MiscPrefix): {
+ switch (op.b1) {
+ case uint32_t(MiscOp::I32TruncSSatF32):
+ case uint32_t(MiscOp::I32TruncUSatF32):
+ CHECK(iter.readConversion(ValType::F32, ValType::I32, &nothing));
+ case uint32_t(MiscOp::I32TruncSSatF64):
+ case uint32_t(MiscOp::I32TruncUSatF64):
+ CHECK(iter.readConversion(ValType::F64, ValType::I32, &nothing));
+ case uint32_t(MiscOp::I64TruncSSatF32):
+ case uint32_t(MiscOp::I64TruncUSatF32):
+ CHECK(iter.readConversion(ValType::F32, ValType::I64, &nothing));
+ case uint32_t(MiscOp::I64TruncSSatF64):
+ case uint32_t(MiscOp::I64TruncUSatF64):
+ CHECK(iter.readConversion(ValType::F64, ValType::I64, &nothing));
+ case uint32_t(MiscOp::MemCopy): {
+ uint32_t unusedDestMemIndex;
+ uint32_t unusedSrcMemIndex;
+ CHECK(iter.readMemOrTableCopy(/*isMem=*/true, &unusedDestMemIndex,
+ &nothing, &unusedSrcMemIndex,
+ &nothing, &nothing));
+ }
+ case uint32_t(MiscOp::DataDrop): {
+ uint32_t unusedSegIndex;
+ CHECK(iter.readDataOrElemDrop(/*isData=*/true, &unusedSegIndex));
+ }
+ case uint32_t(MiscOp::MemFill):
+ CHECK(iter.readMemFill(&nothing, &nothing, &nothing));
+ case uint32_t(MiscOp::MemInit): {
+ uint32_t unusedSegIndex;
+ uint32_t unusedTableIndex;
+ CHECK(iter.readMemOrTableInit(/*isMem=*/true, &unusedSegIndex,
+ &unusedTableIndex, &nothing, &nothing,
+ &nothing));
+ }
+ case uint32_t(MiscOp::TableCopy): {
+ uint32_t unusedDestTableIndex;
+ uint32_t unusedSrcTableIndex;
+ CHECK(iter.readMemOrTableCopy(
+ /*isMem=*/false, &unusedDestTableIndex, &nothing,
+ &unusedSrcTableIndex, &nothing, &nothing));
+ }
+ case uint32_t(MiscOp::ElemDrop): {
+ uint32_t unusedSegIndex;
+ CHECK(iter.readDataOrElemDrop(/*isData=*/false, &unusedSegIndex));
+ }
+ case uint32_t(MiscOp::TableInit): {
+ uint32_t unusedSegIndex;
+ uint32_t unusedTableIndex;
+ CHECK(iter.readMemOrTableInit(/*isMem=*/false, &unusedSegIndex,
+ &unusedTableIndex, &nothing, &nothing,
+ &nothing));
+ }
+#ifdef ENABLE_WASM_REFTYPES
+ case uint32_t(MiscOp::TableFill): {
+ if (!env.refTypesEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ uint32_t unusedTableIndex;
+ CHECK(iter.readTableFill(&unusedTableIndex, &nothing, &nothing,
+ &nothing));
+ }
+ case uint32_t(MiscOp::TableGrow): {
+ if (!env.refTypesEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ uint32_t unusedTableIndex;
+ CHECK(iter.readTableGrow(&unusedTableIndex, &nothing, &nothing));
+ }
+ case uint32_t(MiscOp::TableSize): {
+ if (!env.refTypesEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ uint32_t unusedTableIndex;
+ CHECK(iter.readTableSize(&unusedTableIndex));
+ }
+#endif
+ default:
+ return iter.unrecognizedOpcode(&op);
+ }
+ break;
+ }
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ case uint16_t(Op::RefAsNonNull): {
+ if (!env.functionReferencesEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ CHECK(iter.readRefAsNonNull(&nothing));
+ }
+ case uint16_t(Op::BrOnNull): {
+ if (!env.functionReferencesEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ uint32_t unusedDepth;
+ CHECK(
+ iter.readBrOnNull(&unusedDepth, &unusedType, &nothings, &nothing));
+ }
+#endif
+#ifdef ENABLE_WASM_GC
+ case uint16_t(Op::RefEq): {
+ if (!env.gcTypesEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ CHECK(iter.readComparison(RefType::eq(), &nothing, &nothing));
+ }
+#endif
+#ifdef ENABLE_WASM_REFTYPES
+ case uint16_t(Op::RefFunc): {
+ uint32_t unusedIndex;
+ CHECK(iter.readRefFunc(&unusedIndex));
+ }
+ case uint16_t(Op::RefNull): {
+ if (!env.refTypesEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ CHECK(iter.readRefNull());
+ }
+ case uint16_t(Op::RefIsNull): {
+ if (!env.refTypesEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ Nothing nothing;
+ CHECK(iter.readRefIsNull(&nothing));
+ }
+#endif
+#ifdef ENABLE_WASM_EXCEPTIONS
+ case uint16_t(Op::Try):
+ if (!env.exceptionsEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ CHECK(iter.readTry(&unusedType));
+ case uint16_t(Op::Catch): {
+ if (!env.exceptionsEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ LabelKind unusedKind;
+ uint32_t unusedIndex;
+ CHECK(iter.readCatch(&unusedKind, &unusedIndex, &unusedType,
+ &unusedType, &nothings));
+ }
+ case uint16_t(Op::Throw): {
+ if (!env.exceptionsEnabled()) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ uint32_t unusedIndex;
+ CHECK(iter.readThrow(&unusedIndex, &nothings));
+ }
+#endif
+ case uint16_t(Op::ThreadPrefix): {
+ if (env.sharedMemoryEnabled() == Shareable::False) {
+ return iter.unrecognizedOpcode(&op);
+ }
+ switch (op.b1) {
+ case uint32_t(ThreadOp::Wake): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readWake(&addr, &nothing));
+ }
+ case uint32_t(ThreadOp::I32Wait): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readWait(&addr, ValType::I32, 4, &nothing, &nothing));
+ }
+ case uint32_t(ThreadOp::I64Wait): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readWait(&addr, ValType::I64, 8, &nothing, &nothing));
+ }
+ case uint32_t(ThreadOp::Fence): {
+ CHECK(iter.readFence());
+ }
+ case uint32_t(ThreadOp::I32AtomicLoad): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicLoad(&addr, ValType::I32, 4));
+ }
+ case uint32_t(ThreadOp::I64AtomicLoad): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicLoad(&addr, ValType::I64, 8));
+ }
+ case uint32_t(ThreadOp::I32AtomicLoad8U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicLoad(&addr, ValType::I32, 1));
+ }
+ case uint32_t(ThreadOp::I32AtomicLoad16U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicLoad(&addr, ValType::I32, 2));
+ }
+ case uint32_t(ThreadOp::I64AtomicLoad8U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicLoad(&addr, ValType::I64, 1));
+ }
+ case uint32_t(ThreadOp::I64AtomicLoad16U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicLoad(&addr, ValType::I64, 2));
+ }
+ case uint32_t(ThreadOp::I64AtomicLoad32U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicLoad(&addr, ValType::I64, 4));
+ }
+ case uint32_t(ThreadOp::I32AtomicStore): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicStore(&addr, ValType::I32, 4, &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicStore): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicStore(&addr, ValType::I64, 8, &nothing));
+ }
+ case uint32_t(ThreadOp::I32AtomicStore8U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicStore(&addr, ValType::I32, 1, &nothing));
+ }
+ case uint32_t(ThreadOp::I32AtomicStore16U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicStore(&addr, ValType::I32, 2, &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicStore8U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicStore(&addr, ValType::I64, 1, &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicStore16U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicStore(&addr, ValType::I64, 2, &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicStore32U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicStore(&addr, ValType::I64, 4, &nothing));
+ }
+ case uint32_t(ThreadOp::I32AtomicAdd):
+ case uint32_t(ThreadOp::I32AtomicSub):
+ case uint32_t(ThreadOp::I32AtomicAnd):
+ case uint32_t(ThreadOp::I32AtomicOr):
+ case uint32_t(ThreadOp::I32AtomicXor):
+ case uint32_t(ThreadOp::I32AtomicXchg): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicRMW(&addr, ValType::I32, 4, &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicAdd):
+ case uint32_t(ThreadOp::I64AtomicSub):
+ case uint32_t(ThreadOp::I64AtomicAnd):
+ case uint32_t(ThreadOp::I64AtomicOr):
+ case uint32_t(ThreadOp::I64AtomicXor):
+ case uint32_t(ThreadOp::I64AtomicXchg): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicRMW(&addr, ValType::I64, 8, &nothing));
+ }
+ case uint32_t(ThreadOp::I32AtomicAdd8U):
+ case uint32_t(ThreadOp::I32AtomicSub8U):
+ case uint32_t(ThreadOp::I32AtomicAnd8U):
+ case uint32_t(ThreadOp::I32AtomicOr8U):
+ case uint32_t(ThreadOp::I32AtomicXor8U):
+ case uint32_t(ThreadOp::I32AtomicXchg8U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicRMW(&addr, ValType::I32, 1, &nothing));
+ }
+ case uint32_t(ThreadOp::I32AtomicAdd16U):
+ case uint32_t(ThreadOp::I32AtomicSub16U):
+ case uint32_t(ThreadOp::I32AtomicAnd16U):
+ case uint32_t(ThreadOp::I32AtomicOr16U):
+ case uint32_t(ThreadOp::I32AtomicXor16U):
+ case uint32_t(ThreadOp::I32AtomicXchg16U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicRMW(&addr, ValType::I32, 2, &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicAdd8U):
+ case uint32_t(ThreadOp::I64AtomicSub8U):
+ case uint32_t(ThreadOp::I64AtomicAnd8U):
+ case uint32_t(ThreadOp::I64AtomicOr8U):
+ case uint32_t(ThreadOp::I64AtomicXor8U):
+ case uint32_t(ThreadOp::I64AtomicXchg8U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicRMW(&addr, ValType::I64, 1, &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicAdd16U):
+ case uint32_t(ThreadOp::I64AtomicSub16U):
+ case uint32_t(ThreadOp::I64AtomicAnd16U):
+ case uint32_t(ThreadOp::I64AtomicOr16U):
+ case uint32_t(ThreadOp::I64AtomicXor16U):
+ case uint32_t(ThreadOp::I64AtomicXchg16U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicRMW(&addr, ValType::I64, 2, &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicAdd32U):
+ case uint32_t(ThreadOp::I64AtomicSub32U):
+ case uint32_t(ThreadOp::I64AtomicAnd32U):
+ case uint32_t(ThreadOp::I64AtomicOr32U):
+ case uint32_t(ThreadOp::I64AtomicXor32U):
+ case uint32_t(ThreadOp::I64AtomicXchg32U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicRMW(&addr, ValType::I64, 4, &nothing));
+ }
+ case uint32_t(ThreadOp::I32AtomicCmpXchg): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicCmpXchg(&addr, ValType::I32, 4, &nothing,
+ &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicCmpXchg): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicCmpXchg(&addr, ValType::I64, 8, &nothing,
+ &nothing));
+ }
+ case uint32_t(ThreadOp::I32AtomicCmpXchg8U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicCmpXchg(&addr, ValType::I32, 1, &nothing,
+ &nothing));
+ }
+ case uint32_t(ThreadOp::I32AtomicCmpXchg16U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicCmpXchg(&addr, ValType::I32, 2, &nothing,
+ &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicCmpXchg8U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicCmpXchg(&addr, ValType::I64, 1, &nothing,
+ &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicCmpXchg16U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicCmpXchg(&addr, ValType::I64, 2, &nothing,
+ &nothing));
+ }
+ case uint32_t(ThreadOp::I64AtomicCmpXchg32U): {
+ LinearMemoryAddress<Nothing> addr;
+ CHECK(iter.readAtomicCmpXchg(&addr, ValType::I64, 4, &nothing,
+ &nothing));
+ }
+ default:
+ return iter.unrecognizedOpcode(&op);
+ }
+ break;
+ }
+ case uint16_t(Op::MozPrefix):
+ return iter.unrecognizedOpcode(&op);
+ default:
+ return iter.unrecognizedOpcode(&op);
+ }
+ }
+
+ MOZ_CRASH("unreachable");
+
+#undef CHECK
+#undef CHECK_SIMD_EXPERIMENTAL
+}
+
+bool wasm::ValidateFunctionBody(const ModuleEnvironment& env,
+ uint32_t funcIndex, uint32_t bodySize,
+ Decoder& d) {
+ ValTypeVector locals;
+ if (!locals.appendAll(env.funcs[funcIndex].type->args())) {
+ return false;
+ }
+
+ const uint8_t* bodyBegin = d.currentPosition();
+
+ if (!DecodeLocalEntries(d, env.types, env.features, &locals)) {
+ return false;
+ }
+
+ if (!DecodeFunctionBodyExprs(env, funcIndex, locals, bodyBegin + bodySize,
+ &d)) {
+ return false;
+ }
+
+ return true;
+}
+
+// Section macros.
+
+static bool DecodePreamble(Decoder& d) {
+ if (d.bytesRemain() > MaxModuleBytes) {
+ return d.fail("module too big");
+ }
+
+ uint32_t u32;
+ if (!d.readFixedU32(&u32) || u32 != MagicNumber) {
+ return d.fail("failed to match magic number");
+ }
+
+ if (!d.readFixedU32(&u32) || u32 != EncodingVersion) {
+ return d.failf("binary version 0x%" PRIx32
+ " does not match expected version 0x%" PRIx32,
+ u32, EncodingVersion);
+ }
+
+ return true;
+}
+
+enum class TypeState { None, Struct, ForwardStruct, Func };
+
+typedef Vector<TypeState, 0, SystemAllocPolicy> TypeStateVector;
+
+static bool ValidateTypeState(Decoder& d, TypeStateVector* typeState,
+ ValType type) {
+ if (!type.isTypeIndex()) {
+ return true;
+ }
+
+ uint32_t refTypeIndex = type.refType().typeIndex();
+ switch ((*typeState)[refTypeIndex]) {
+ case TypeState::None:
+ (*typeState)[refTypeIndex] = TypeState::ForwardStruct;
+ break;
+ case TypeState::Struct:
+ case TypeState::ForwardStruct:
+ break;
+ case TypeState::Func:
+ return d.fail("ref does not reference a struct type");
+ }
+ return true;
+}
+
+#ifdef WASM_PRIVATE_REFTYPES
+static bool FuncTypeIsJSCompatible(Decoder& d, const FuncType& ft) {
+ if (ft.exposesTypeIndex()) {
+ return d.fail("cannot expose indexed reference type");
+ }
+ return true;
+}
+#endif
+
+static bool DecodeTypeVector(Decoder& d, ModuleEnvironment* env,
+ TypeStateVector* typeState, uint32_t count,
+ ValTypeVector* types) {
+ if (!types->resize(count)) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < count; i++) {
+ if (!d.readValType(env->types.length(), env->features, &(*types)[i])) {
+ return false;
+ }
+ if (!ValidateTypeState(d, typeState, (*types)[i])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool DecodeFuncType(Decoder& d, ModuleEnvironment* env,
+ TypeStateVector* typeState, uint32_t typeIndex) {
+ uint32_t numArgs;
+ if (!d.readVarU32(&numArgs)) {
+ return d.fail("bad number of function args");
+ }
+ if (numArgs > MaxParams) {
+ return d.fail("too many arguments in signature");
+ }
+ ValTypeVector args;
+ if (!DecodeTypeVector(d, env, typeState, numArgs, &args)) {
+ return false;
+ }
+
+ uint32_t numResults;
+ if (!d.readVarU32(&numResults)) {
+ return d.fail("bad number of function returns");
+ }
+ if (numResults > env->funcMaxResults()) {
+ return d.fail("too many returns in signature");
+ }
+ ValTypeVector results;
+ if (!DecodeTypeVector(d, env, typeState, numResults, &results)) {
+ return false;
+ }
+
+ if ((*typeState)[typeIndex] != TypeState::None) {
+ return d.fail("function type entry referenced as struct");
+ }
+
+ env->types[typeIndex] =
+ TypeDef(FuncType(std::move(args), std::move(results)));
+ (*typeState)[typeIndex] = TypeState::Func;
+
+ return true;
+}
+
+static bool DecodeStructType(Decoder& d, ModuleEnvironment* env,
+ TypeStateVector* typeState, uint32_t typeIndex) {
+ if (!env->gcTypesEnabled()) {
+ return d.fail("Structure types not enabled");
+ }
+
+ if ((*typeState)[typeIndex] != TypeState::None &&
+ (*typeState)[typeIndex] != TypeState::ForwardStruct) {
+ return d.fail("struct type entry referenced as function");
+ }
+
+ uint32_t numFields;
+ if (!d.readVarU32(&numFields)) {
+ return d.fail("Bad number of fields");
+ }
+
+ if (numFields > MaxStructFields) {
+ return d.fail("too many fields in struct");
+ }
+
+ StructFieldVector fields;
+ if (!fields.resize(numFields)) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < numFields; i++) {
+ if (!d.readValType(env->types.length(), env->features, &fields[i].type)) {
+ return false;
+ }
+
+ uint8_t flags;
+ if (!d.readFixedU8(&flags)) {
+ return d.fail("expected flag");
+ }
+ if ((flags & ~uint8_t(FieldFlags::AllowedMask)) != 0) {
+ return d.fail("garbage flag bits");
+ }
+ fields[i].isMutable = flags & uint8_t(FieldFlags::Mutable);
+
+ if (!ValidateTypeState(d, typeState, fields[i].type)) {
+ return false;
+ }
+ }
+
+ StructType structType = StructType(std::move(fields));
+
+ if (!structType.computeLayout()) {
+ return d.fail("Struct type too large");
+ }
+
+ env->types[typeIndex] = TypeDef(std::move(structType));
+ (*typeState)[typeIndex] = TypeState::Struct;
+
+ return true;
+}
+
+static bool DecodeTypeSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Type, env, &range, "type")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ uint32_t numTypes;
+ if (!d.readVarU32(&numTypes)) {
+ return d.fail("expected number of types");
+ }
+
+ if (numTypes > MaxTypes) {
+ return d.fail("too many types");
+ }
+
+ if (!env->types.resize(numTypes) || !env->typeIds.resize(numTypes)) {
+ return false;
+ }
+
+ TypeStateVector typeState;
+ if (!typeState.appendN(TypeState::None, numTypes)) {
+ return false;
+ }
+
+ for (uint32_t typeIndex = 0; typeIndex < numTypes; typeIndex++) {
+ uint8_t form;
+ if (!d.readFixedU8(&form)) {
+ return d.fail("expected type form");
+ }
+
+ switch (form) {
+ case uint8_t(TypeCode::Func):
+ if (!DecodeFuncType(d, env, &typeState, typeIndex)) {
+ return false;
+ }
+ break;
+ case uint8_t(TypeCode::Struct):
+ if (!DecodeStructType(d, env, &typeState, typeIndex)) {
+ return false;
+ }
+ break;
+ default:
+ return d.fail("expected type form");
+ }
+ }
+
+ return d.finishSection(*range, "type");
+}
+
+static UniqueChars DecodeName(Decoder& d) {
+ uint32_t numBytes;
+ if (!d.readVarU32(&numBytes)) {
+ return nullptr;
+ }
+
+ if (numBytes > MaxStringBytes) {
+ return nullptr;
+ }
+
+ const uint8_t* bytes;
+ if (!d.readBytes(numBytes, &bytes)) {
+ return nullptr;
+ }
+
+ if (!IsUtf8(AsChars(Span(bytes, numBytes)))) {
+ return nullptr;
+ }
+
+ UniqueChars name(js_pod_malloc<char>(numBytes + 1));
+ if (!name) {
+ return nullptr;
+ }
+
+ memcpy(name.get(), bytes, numBytes);
+ name[numBytes] = '\0';
+
+ return name;
+}
+
+static bool DecodeFuncTypeIndex(Decoder& d, const TypeContext& types,
+ uint32_t* funcTypeIndex) {
+ if (!d.readVarU32(funcTypeIndex)) {
+ return d.fail("expected signature index");
+ }
+
+ if (*funcTypeIndex >= types.length()) {
+ return d.fail("signature index out of range");
+ }
+
+ const TypeDef& def = types[*funcTypeIndex];
+
+ if (!def.isFuncType()) {
+ return d.fail("signature index references non-signature");
+ }
+
+ return true;
+}
+
+static bool DecodeLimits(Decoder& d, Limits* limits,
+ Shareable allowShared = Shareable::False) {
+ uint8_t flags;
+ if (!d.readFixedU8(&flags)) {
+ return d.fail("expected flags");
+ }
+
+ uint8_t mask = allowShared == Shareable::True
+ ? uint8_t(MemoryMasks::AllowShared)
+ : uint8_t(MemoryMasks::AllowUnshared);
+
+ if (flags & ~uint8_t(mask)) {
+ return d.failf("unexpected bits set in flags: %" PRIu32,
+ uint32_t(flags & ~uint8_t(mask)));
+ }
+
+ uint32_t initial;
+ if (!d.readVarU32(&initial)) {
+ return d.fail("expected initial length");
+ }
+ limits->initial = initial;
+
+ if (flags & uint8_t(MemoryTableFlags::HasMaximum)) {
+ uint32_t maximum;
+ if (!d.readVarU32(&maximum)) {
+ return d.fail("expected maximum length");
+ }
+
+ if (limits->initial > maximum) {
+ return d.failf(
+ "memory size minimum must not be greater than maximum; "
+ "maximum length %" PRIu32 " is less than initial length %" PRIu64,
+ maximum, limits->initial);
+ }
+
+ limits->maximum.emplace(uint64_t(maximum));
+ }
+
+ limits->shared = Shareable::False;
+
+ if (allowShared == Shareable::True) {
+ if ((flags & uint8_t(MemoryTableFlags::IsShared)) &&
+ !(flags & uint8_t(MemoryTableFlags::HasMaximum))) {
+ return d.fail("maximum length required for shared memory");
+ }
+
+ limits->shared = (flags & uint8_t(MemoryTableFlags::IsShared))
+ ? Shareable::True
+ : Shareable::False;
+ }
+
+ return true;
+}
+
+static bool DecodeTableTypeAndLimits(Decoder& d, const FeatureArgs& features,
+ const TypeContext& types,
+ TableDescVector* tables) {
+ RefType tableElemType;
+ if (!d.readRefType(types, features.withRefTypes(true), &tableElemType)) {
+ return false;
+ }
+ if (!features.refTypes && !tableElemType.isFunc()) {
+ return d.fail("expected 'funcref' element type");
+ }
+ if (!tableElemType.isNullable()) {
+ return d.fail("non-nullable references not supported in tables");
+ }
+
+ Limits limits;
+ if (!DecodeLimits(d, &limits)) {
+ return false;
+ }
+
+ // If there's a maximum, check it is in range. The check to exclude
+ // initial > maximum is carried out by the DecodeLimits call above, so
+ // we don't repeat it here.
+ if (limits.initial > MaxTableLimitField ||
+ ((limits.maximum.isSome() &&
+ limits.maximum.value() > MaxTableLimitField))) {
+ return d.fail("too many table elements");
+ }
+
+ if (tables->length() >= MaxTables) {
+ return d.fail("too many tables");
+ }
+
+ // The rest of the runtime expects table limits to be within a 32-bit range.
+ static_assert(MaxTableLimitField <= UINT32_MAX, "invariant");
+ uint32_t initialLength = uint32_t(limits.initial);
+ Maybe<uint32_t> maximumLength;
+ if (limits.maximum) {
+ maximumLength = Some(uint32_t(*limits.maximum));
+ }
+
+ return tables->emplaceBack(tableElemType, initialLength, maximumLength,
+ /* isAsmJS */ false);
+}
+
+static bool GlobalIsJSCompatible(Decoder& d, ValType type) {
+ switch (type.kind()) {
+ case ValType::I32:
+ case ValType::F32:
+ case ValType::F64:
+ case ValType::I64:
+ case ValType::V128:
+ break;
+ case ValType::Ref:
+ switch (type.refTypeKind()) {
+ case RefType::Func:
+ case RefType::Extern:
+ case RefType::Eq:
+ break;
+ case RefType::TypeIndex:
+#ifdef WASM_PRIVATE_REFTYPES
+ return d.fail("cannot expose indexed reference type");
+#else
+ break;
+#endif
+ default:
+ return d.fail("unexpected variable type in global import/export");
+ }
+ break;
+ default:
+ return d.fail("unexpected variable type in global import/export");
+ }
+
+ return true;
+}
+
+static bool DecodeGlobalType(Decoder& d, const TypeContext& types,
+ const FeatureArgs& features, ValType* type,
+ bool* isMutable) {
+ if (!d.readValType(types, features, type)) {
+ return d.fail("expected global type");
+ }
+
+ if (type->isReference() && !type->isNullable()) {
+ return d.fail("non-nullable references not supported in globals");
+ }
+
+ uint8_t flags;
+ if (!d.readFixedU8(&flags)) {
+ return d.fail("expected global flags");
+ }
+
+ if (flags & ~uint8_t(GlobalTypeImmediate::AllowedMask)) {
+ return d.fail("unexpected bits set in global flags");
+ }
+
+ *isMutable = flags & uint8_t(GlobalTypeImmediate::IsMutable);
+ return true;
+}
+
+void wasm::ConvertMemoryPagesToBytes(Limits* memory) {
+ memory->initial *= PageSize;
+
+ if (!memory->maximum) {
+ return;
+ }
+ *memory->maximum *= PageSize;
+}
+
+static bool DecodeMemoryLimits(Decoder& d, ModuleEnvironment* env) {
+ if (env->usesMemory()) {
+ return d.fail("already have default memory");
+ }
+
+ Limits memory;
+ if (!DecodeLimits(d, &memory, Shareable::True)) {
+ return false;
+ }
+
+ if (memory.initial > MaxMemory32LimitField) {
+ return d.fail("initial memory size too big");
+ }
+
+ if (memory.maximum && *memory.maximum > MaxMemory32LimitField) {
+ return d.fail("maximum memory size too big");
+ }
+
+ ConvertMemoryPagesToBytes(&memory);
+
+ if (memory.shared == Shareable::True &&
+ env->sharedMemoryEnabled() == Shareable::False) {
+ return d.fail("shared memory is disabled");
+ }
+
+ env->memoryUsage = memory.shared == Shareable::True ? MemoryUsage::Shared
+ : MemoryUsage::Unshared;
+ env->minMemoryLength = memory.initial;
+ env->maxMemoryLength = memory.maximum;
+ return true;
+}
+
+#ifdef ENABLE_WASM_EXCEPTIONS
+static bool EventIsJSCompatible(Decoder& d, const ValTypeVector& type) {
+ for (uint32_t i = 0; i < type.length(); i++) {
+ if (type[i].isTypeIndex()) {
+ return d.fail("cannot expose indexed reference type");
+ }
+ }
+
+ return true;
+}
+
+static bool DecodeEvent(Decoder& d, ModuleEnvironment* env,
+ EventKind* eventKind, uint32_t* funcTypeIndex) {
+ uint32_t eventCode;
+ if (!d.readVarU32(&eventCode)) {
+ return d.fail("expected event kind");
+ }
+
+ if (EventKind(eventCode) != EventKind::Exception) {
+ return d.fail("illegal event kind");
+ }
+ *eventKind = EventKind(eventCode);
+
+ if (!d.readVarU32(funcTypeIndex)) {
+ return d.fail("expected function index in event");
+ }
+ if (*funcTypeIndex >= env->numTypes()) {
+ return d.fail("function type index in event out of bounds");
+ }
+ if (!env->types[*funcTypeIndex].isFuncType()) {
+ return d.fail("function type index must index a function type");
+ }
+ if (env->types[*funcTypeIndex].funcType().results().length() != 0) {
+ return d.fail("exception function types must not return anything");
+ }
+ return true;
+}
+#endif
+
+struct CStringPair {
+ const char* first;
+ const char* second;
+
+ CStringPair(const char* first, const char* second)
+ : first(first), second(second) {}
+
+ using Key = CStringPair;
+ using Lookup = CStringPair;
+
+ static mozilla::HashNumber hash(const Lookup& l) {
+ return mozilla::AddToHash(mozilla::HashString(l.first),
+ mozilla::HashString(l.second));
+ }
+ static bool match(const Key& k, const Lookup& l) {
+ return !strcmp(k.first, l.first) && !strcmp(k.second, l.second);
+ }
+};
+
+using CStringPairSet = HashSet<CStringPair, CStringPair, SystemAllocPolicy>;
+
+static bool DecodeImport(Decoder& d, ModuleEnvironment* env,
+ CStringPairSet* dupSet) {
+ UniqueChars moduleName = DecodeName(d);
+ if (!moduleName) {
+ return d.fail("expected valid import module name");
+ }
+
+ UniqueChars funcName = DecodeName(d);
+ if (!funcName) {
+ return d.fail("expected valid import func name");
+ }
+
+ // It is valid to store raw pointers in dupSet because moduleName and funcName
+ // become owned by env->imports on all non-error paths, outliving dupSet.
+ CStringPair pair(moduleName.get(), funcName.get());
+ CStringPairSet::AddPtr p = dupSet->lookupForAdd(pair);
+ if (p) {
+ env->usesDuplicateImports = true;
+ } else if (!dupSet->add(p, pair)) {
+ return false;
+ }
+
+ uint8_t rawImportKind;
+ if (!d.readFixedU8(&rawImportKind)) {
+ return d.fail("failed to read import kind");
+ }
+
+ DefinitionKind importKind = DefinitionKind(rawImportKind);
+
+ switch (importKind) {
+ case DefinitionKind::Function: {
+ uint32_t funcTypeIndex;
+ if (!DecodeFuncTypeIndex(d, env->types, &funcTypeIndex)) {
+ return false;
+ }
+#ifdef WASM_PRIVATE_REFTYPES
+ if (!FuncTypeIsJSCompatible(d, env->types.funcType(funcTypeIndex))) {
+ return false;
+ }
+#endif
+ if (!env->funcs.append(FuncDesc(&env->types.funcType(funcTypeIndex),
+ &env->typeIds[funcTypeIndex],
+ funcTypeIndex))) {
+ return false;
+ }
+ if (env->funcs.length() > MaxFuncs) {
+ return d.fail("too many functions");
+ }
+ break;
+ }
+ case DefinitionKind::Table: {
+ if (!DecodeTableTypeAndLimits(d, env->features, env->types,
+ &env->tables)) {
+ return false;
+ }
+ env->tables.back().importedOrExported = true;
+ break;
+ }
+ case DefinitionKind::Memory: {
+ if (!DecodeMemoryLimits(d, env)) {
+ return false;
+ }
+ break;
+ }
+ case DefinitionKind::Global: {
+ ValType type;
+ bool isMutable;
+ if (!DecodeGlobalType(d, env->types, env->features, &type, &isMutable)) {
+ return false;
+ }
+ if (!GlobalIsJSCompatible(d, type)) {
+ return false;
+ }
+ if (!env->globals.append(
+ GlobalDesc(type, isMutable, env->globals.length()))) {
+ return false;
+ }
+ if (env->globals.length() > MaxGlobals) {
+ return d.fail("too many globals");
+ }
+ break;
+ }
+#ifdef ENABLE_WASM_EXCEPTIONS
+ case DefinitionKind::Event: {
+ EventKind eventKind;
+ uint32_t funcTypeIndex;
+ if (!DecodeEvent(d, env, &eventKind, &funcTypeIndex)) {
+ return false;
+ }
+ const ValTypeVector& args = env->types[funcTypeIndex].funcType().args();
+# ifdef WASM_PRIVATE_REFTYPES
+ if (!EventIsJSCompatible(d, args)) {
+ return false;
+ }
+# endif
+ ValTypeVector eventArgs;
+ if (!eventArgs.appendAll(args)) {
+ return false;
+ }
+ if (!env->events.emplaceBack(eventKind, std::move(eventArgs))) {
+ return false;
+ }
+ if (env->events.length() > MaxEvents) {
+ return d.fail("too many events");
+ }
+ break;
+ }
+#endif
+ default:
+ return d.fail("unsupported import kind");
+ }
+
+ return env->imports.emplaceBack(std::move(moduleName), std::move(funcName),
+ importKind);
+}
+
+static bool DecodeImportSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Import, env, &range, "import")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ uint32_t numImports;
+ if (!d.readVarU32(&numImports)) {
+ return d.fail("failed to read number of imports");
+ }
+
+ if (numImports > MaxImports) {
+ return d.fail("too many imports");
+ }
+
+ CStringPairSet dupSet;
+ for (uint32_t i = 0; i < numImports; i++) {
+ if (!DecodeImport(d, env, &dupSet)) {
+ return false;
+ }
+ }
+
+ if (!d.finishSection(*range, "import")) {
+ return false;
+ }
+
+ // The global data offsets will be filled in by ModuleGenerator::init.
+ if (!env->funcImportGlobalDataOffsets.resize(env->funcs.length())) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool DecodeFunctionSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Function, env, &range, "function")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ uint32_t numDefs;
+ if (!d.readVarU32(&numDefs)) {
+ return d.fail("expected number of function definitions");
+ }
+
+ CheckedInt<uint32_t> numFuncs = env->funcs.length();
+ numFuncs += numDefs;
+ if (!numFuncs.isValid() || numFuncs.value() > MaxFuncs) {
+ return d.fail("too many functions");
+ }
+
+ if (!env->funcs.reserve(numFuncs.value())) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < numDefs; i++) {
+ uint32_t funcTypeIndex;
+ if (!DecodeFuncTypeIndex(d, env->types, &funcTypeIndex)) {
+ return false;
+ }
+ env->funcs.infallibleAppend(FuncDesc(&env->types.funcType(funcTypeIndex),
+ &env->typeIds[funcTypeIndex],
+ funcTypeIndex));
+ }
+
+ return d.finishSection(*range, "function");
+}
+
+static bool DecodeTableSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Table, env, &range, "table")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ uint32_t numTables;
+ if (!d.readVarU32(&numTables)) {
+ return d.fail("failed to read number of tables");
+ }
+
+ for (uint32_t i = 0; i < numTables; ++i) {
+ if (!DecodeTableTypeAndLimits(d, env->features, env->types, &env->tables)) {
+ return false;
+ }
+ }
+
+ return d.finishSection(*range, "table");
+}
+
+static bool DecodeMemorySection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Memory, env, &range, "memory")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ uint32_t numMemories;
+ if (!d.readVarU32(&numMemories)) {
+ return d.fail("failed to read number of memories");
+ }
+
+ if (numMemories > 1) {
+ return d.fail("the number of memories must be at most one");
+ }
+
+ for (uint32_t i = 0; i < numMemories; ++i) {
+ if (!DecodeMemoryLimits(d, env)) {
+ return false;
+ }
+ }
+
+ return d.finishSection(*range, "memory");
+}
+
+static bool DecodeInitializerExpression(Decoder& d, ModuleEnvironment* env,
+ ValType expected, InitExpr* init) {
+ OpBytes op;
+ if (!d.readOp(&op)) {
+ return d.fail("failed to read initializer type");
+ }
+
+ switch (op.b0) {
+ case uint16_t(Op::I32Const): {
+ int32_t i32;
+ if (!d.readVarS32(&i32)) {
+ return d.fail("failed to read initializer i32 expression");
+ }
+ *init = InitExpr::fromConstant(LitVal(uint32_t(i32)));
+ break;
+ }
+ case uint16_t(Op::I64Const): {
+ int64_t i64;
+ if (!d.readVarS64(&i64)) {
+ return d.fail("failed to read initializer i64 expression");
+ }
+ *init = InitExpr::fromConstant(LitVal(uint64_t(i64)));
+ break;
+ }
+ case uint16_t(Op::F32Const): {
+ float f32;
+ if (!d.readFixedF32(&f32)) {
+ return d.fail("failed to read initializer f32 expression");
+ }
+ *init = InitExpr::fromConstant(LitVal(f32));
+ break;
+ }
+ case uint16_t(Op::F64Const): {
+ double f64;
+ if (!d.readFixedF64(&f64)) {
+ return d.fail("failed to read initializer f64 expression");
+ }
+ *init = InitExpr::fromConstant(LitVal(f64));
+ break;
+ }
+#ifdef ENABLE_WASM_SIMD
+ case uint16_t(Op::SimdPrefix): {
+ if (!env->v128Enabled()) {
+ return d.fail("v128 not enabled");
+ }
+ if (op.b1 != uint32_t(SimdOp::V128Const)) {
+ return d.fail("unexpected initializer expression");
+ }
+ V128 v128;
+ if (!d.readFixedV128(&v128)) {
+ return d.fail("failed to read initializer v128 expression");
+ }
+ *init = InitExpr::fromConstant(LitVal(v128));
+ break;
+ }
+#endif
+#ifdef ENABLE_WASM_REFTYPES
+ case uint16_t(Op::RefNull): {
+ MOZ_ASSERT_IF(
+ expected.isReference() && env->types.isStructType(expected.refType()),
+ env->gcTypesEnabled());
+ RefType initType;
+ if (!d.readHeapType(env->types, env->features, true, &initType)) {
+ return false;
+ }
+ if (!expected.isReference() ||
+ !env->types.isRefSubtypeOf(initType, expected.refType())) {
+ return d.fail(
+ "type mismatch: initializer type and expected type don't match");
+ }
+ *init = InitExpr::fromConstant(LitVal(expected, AnyRef::null()));
+ break;
+ }
+ case uint16_t(Op::RefFunc): {
+ if (!expected.isReference() || expected.refType() != RefType::func()) {
+ return d.fail(
+ "type mismatch: initializer type and expected type don't match");
+ }
+ uint32_t i;
+ if (!d.readVarU32(&i)) {
+ return d.fail(
+ "failed to read ref.func index in initializer expression");
+ }
+ if (i >= env->numFuncs()) {
+ return d.fail("function index out of range in initializer expression");
+ }
+ env->validForRefFunc.setBit(i);
+ *init = InitExpr::fromRefFunc(i);
+ break;
+ }
+#endif
+ case uint16_t(Op::GetGlobal): {
+ uint32_t i;
+ const GlobalDescVector& globals = env->globals;
+ if (!d.readVarU32(&i)) {
+ return d.fail(
+ "failed to read global.get index in initializer expression");
+ }
+ if (i >= globals.length()) {
+ return d.fail("global index out of range in initializer expression");
+ }
+ if (!globals[i].isImport() || globals[i].isMutable()) {
+ return d.fail(
+ "initializer expression must reference a global immutable import");
+ }
+ if (expected.isReference()) {
+ bool fail = false;
+ if (!globals[i].type().isReference()) {
+ fail = true;
+ } else if ((env->types.isStructType(expected.refType()) ||
+ env->types.isStructType(globals[i].type().refType())) &&
+ !env->gcTypesEnabled()) {
+ fail = true;
+ } else if (!env->types.isRefSubtypeOf(globals[i].type().refType(),
+ expected.refType())) {
+ fail = true;
+ }
+ if (fail) {
+ return d.fail(
+ "type mismatch: initializer type and expected type don't match");
+ }
+ *init = InitExpr::fromGetGlobal(i, expected);
+ } else {
+ *init = InitExpr::fromGetGlobal(i, globals[i].type());
+ }
+ break;
+ }
+ default: {
+ return d.fail("unexpected initializer expression");
+ }
+ }
+
+ if (expected != init->type()) {
+ return d.fail(
+ "type mismatch: initializer type and expected type don't match");
+ }
+
+ OpBytes end;
+ if (!d.readOp(&end) || end.b0 != uint16_t(Op::End)) {
+ return d.fail("failed to read end of initializer expression");
+ }
+
+ return true;
+}
+
+static bool DecodeGlobalSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Global, env, &range, "global")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ uint32_t numDefs;
+ if (!d.readVarU32(&numDefs)) {
+ return d.fail("expected number of globals");
+ }
+
+ CheckedInt<uint32_t> numGlobals = env->globals.length();
+ numGlobals += numDefs;
+ if (!numGlobals.isValid() || numGlobals.value() > MaxGlobals) {
+ return d.fail("too many globals");
+ }
+
+ if (!env->globals.reserve(numGlobals.value())) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < numDefs; i++) {
+ ValType type;
+ bool isMutable;
+ if (!DecodeGlobalType(d, env->types, env->features, &type, &isMutable)) {
+ return false;
+ }
+
+ InitExpr initializer;
+ if (!DecodeInitializerExpression(d, env, type, &initializer)) {
+ return false;
+ }
+
+ env->globals.infallibleAppend(GlobalDesc(initializer, isMutable));
+ }
+
+ return d.finishSection(*range, "global");
+}
+
+#ifdef ENABLE_WASM_EXCEPTIONS
+static bool DecodeEventSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Event, env, &range, "event")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ uint32_t numDefs;
+ if (!d.readVarU32(&numDefs)) {
+ return d.fail("expected number of events");
+ }
+
+ CheckedInt<uint32_t> numEvents = env->events.length();
+ numEvents += numDefs;
+ if (!numEvents.isValid() || numEvents.value() > MaxEvents) {
+ return d.fail("too many events");
+ }
+
+ if (!env->events.reserve(numEvents.value())) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < numDefs; i++) {
+ EventKind eventKind;
+ uint32_t funcTypeIndex;
+ if (!DecodeEvent(d, env, &eventKind, &funcTypeIndex)) {
+ return false;
+ }
+ const ValTypeVector& args = env->types[funcTypeIndex].funcType().args();
+ ValTypeVector eventArgs;
+ if (!eventArgs.appendAll(args)) {
+ return false;
+ }
+ env->events.infallibleEmplaceBack(eventKind, std::move(eventArgs));
+ }
+
+ return d.finishSection(*range, "event");
+}
+#endif
+
+typedef HashSet<const char*, mozilla::CStringHasher, SystemAllocPolicy>
+ CStringSet;
+
+static UniqueChars DecodeExportName(Decoder& d, CStringSet* dupSet) {
+ UniqueChars exportName = DecodeName(d);
+ if (!exportName) {
+ d.fail("expected valid export name");
+ return nullptr;
+ }
+
+ CStringSet::AddPtr p = dupSet->lookupForAdd(exportName.get());
+ if (p) {
+ d.fail("duplicate export");
+ return nullptr;
+ }
+
+ if (!dupSet->add(p, exportName.get())) {
+ return nullptr;
+ }
+
+ return exportName;
+}
+
+static bool DecodeExport(Decoder& d, ModuleEnvironment* env,
+ CStringSet* dupSet) {
+ UniqueChars fieldName = DecodeExportName(d, dupSet);
+ if (!fieldName) {
+ return false;
+ }
+
+ uint8_t exportKind;
+ if (!d.readFixedU8(&exportKind)) {
+ return d.fail("failed to read export kind");
+ }
+
+ switch (DefinitionKind(exportKind)) {
+ case DefinitionKind::Function: {
+ uint32_t funcIndex;
+ if (!d.readVarU32(&funcIndex)) {
+ return d.fail("expected function index");
+ }
+
+ if (funcIndex >= env->numFuncs()) {
+ return d.fail("exported function index out of bounds");
+ }
+#ifdef WASM_PRIVATE_REFTYPES
+ if (!FuncTypeIsJSCompatible(d, *env->funcs[funcIndex].type)) {
+ return false;
+ }
+#endif
+
+ env->validForRefFunc.setBit(funcIndex);
+ return env->exports.emplaceBack(std::move(fieldName), funcIndex,
+ DefinitionKind::Function);
+ }
+ case DefinitionKind::Table: {
+ uint32_t tableIndex;
+ if (!d.readVarU32(&tableIndex)) {
+ return d.fail("expected table index");
+ }
+
+ if (tableIndex >= env->tables.length()) {
+ return d.fail("exported table index out of bounds");
+ }
+ env->tables[tableIndex].importedOrExported = true;
+ return env->exports.emplaceBack(std::move(fieldName), tableIndex,
+ DefinitionKind::Table);
+ }
+ case DefinitionKind::Memory: {
+ uint32_t memoryIndex;
+ if (!d.readVarU32(&memoryIndex)) {
+ return d.fail("expected memory index");
+ }
+
+ if (memoryIndex > 0 || !env->usesMemory()) {
+ return d.fail("exported memory index out of bounds");
+ }
+
+ return env->exports.emplaceBack(std::move(fieldName),
+ DefinitionKind::Memory);
+ }
+ case DefinitionKind::Global: {
+ uint32_t globalIndex;
+ if (!d.readVarU32(&globalIndex)) {
+ return d.fail("expected global index");
+ }
+
+ if (globalIndex >= env->globals.length()) {
+ return d.fail("exported global index out of bounds");
+ }
+
+ GlobalDesc* global = &env->globals[globalIndex];
+ global->setIsExport();
+ if (!GlobalIsJSCompatible(d, global->type())) {
+ return false;
+ }
+
+ return env->exports.emplaceBack(std::move(fieldName), globalIndex,
+ DefinitionKind::Global);
+ }
+#ifdef ENABLE_WASM_EXCEPTIONS
+ case DefinitionKind::Event: {
+ uint32_t eventIndex;
+ if (!d.readVarU32(&eventIndex)) {
+ return d.fail("expected event index");
+ }
+ if (eventIndex >= env->events.length()) {
+ return d.fail("exported event index out of bounds");
+ }
+
+# ifdef WASM_PRIVATE_REFTYPES
+ if (!EventIsJSCompatible(d, env->events[eventIndex].type)) {
+ return false;
+ }
+# endif
+
+ env->events[eventIndex].isExport = true;
+ return env->exports.emplaceBack(std::move(fieldName), eventIndex,
+ DefinitionKind::Event);
+ }
+#endif
+ default:
+ return d.fail("unexpected export kind");
+ }
+
+ MOZ_CRASH("unreachable");
+}
+
+static bool DecodeExportSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Export, env, &range, "export")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ CStringSet dupSet;
+
+ uint32_t numExports;
+ if (!d.readVarU32(&numExports)) {
+ return d.fail("failed to read number of exports");
+ }
+
+ if (numExports > MaxExports) {
+ return d.fail("too many exports");
+ }
+
+ for (uint32_t i = 0; i < numExports; i++) {
+ if (!DecodeExport(d, env, &dupSet)) {
+ return false;
+ }
+ }
+
+ return d.finishSection(*range, "export");
+}
+
+static bool DecodeStartSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Start, env, &range, "start")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ uint32_t funcIndex;
+ if (!d.readVarU32(&funcIndex)) {
+ return d.fail("failed to read start func index");
+ }
+
+ if (funcIndex >= env->numFuncs()) {
+ return d.fail("unknown start function");
+ }
+
+ const FuncType& funcType = *env->funcs[funcIndex].type;
+ if (funcType.results().length() > 0) {
+ return d.fail("start function must not return anything");
+ }
+
+ if (funcType.args().length()) {
+ return d.fail("start function must be nullary");
+ }
+
+ env->startFuncIndex = Some(funcIndex);
+
+ return d.finishSection(*range, "start");
+}
+
+static inline ElemSegment::Kind NormalizeElemSegmentKind(
+ ElemSegmentKind decodedKind) {
+ switch (decodedKind) {
+ case ElemSegmentKind::Active:
+ case ElemSegmentKind::ActiveWithTableIndex: {
+ return ElemSegment::Kind::Active;
+ }
+ case ElemSegmentKind::Passive: {
+ return ElemSegment::Kind::Passive;
+ }
+ case ElemSegmentKind::Declared: {
+ return ElemSegment::Kind::Declared;
+ }
+ }
+ MOZ_CRASH("unexpected elem segment kind");
+}
+
+static bool DecodeElemSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Elem, env, &range, "elem")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ uint32_t numSegments;
+ if (!d.readVarU32(&numSegments)) {
+ return d.fail("failed to read number of elem segments");
+ }
+
+ if (numSegments > MaxElemSegments) {
+ return d.fail("too many elem segments");
+ }
+
+ if (!env->elemSegments.reserve(numSegments)) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < numSegments; i++) {
+ uint32_t segmentFlags;
+ if (!d.readVarU32(&segmentFlags)) {
+ return d.fail("expected elem segment flags field");
+ }
+
+ Maybe<ElemSegmentFlags> flags = ElemSegmentFlags::construct(segmentFlags);
+ if (!flags) {
+ return d.fail("invalid elem segment flags field");
+ }
+
+ MutableElemSegment seg = js_new<ElemSegment>();
+ if (!seg) {
+ return false;
+ }
+
+ ElemSegmentKind kind = flags->kind();
+ seg->kind = NormalizeElemSegmentKind(kind);
+
+ if (kind == ElemSegmentKind::Active ||
+ kind == ElemSegmentKind::ActiveWithTableIndex) {
+ if (env->tables.length() == 0) {
+ return d.fail("active elem segment requires a table");
+ }
+
+ uint32_t tableIndex = 0;
+ if (kind == ElemSegmentKind::ActiveWithTableIndex &&
+ !d.readVarU32(&tableIndex)) {
+ return d.fail("expected table index");
+ }
+ if (tableIndex >= env->tables.length()) {
+ return d.fail("table index out of range for element segment");
+ }
+ seg->tableIndex = tableIndex;
+
+ InitExpr offset;
+ if (!DecodeInitializerExpression(d, env, ValType::I32, &offset)) {
+ return false;
+ }
+ seg->offsetIfActive.emplace(offset);
+ } else {
+ // Too many bugs result from keeping this value zero. For passive
+ // or declared segments, there really is no table index, and we should
+ // never touch the field.
+ MOZ_ASSERT(kind == ElemSegmentKind::Passive ||
+ kind == ElemSegmentKind::Declared);
+ seg->tableIndex = (uint32_t)-1;
+ }
+
+ ElemSegmentPayload payload = flags->payload();
+ RefType elemType;
+
+ // `ActiveWithTableIndex`, `Declared`, and `Passive` element segments encode
+ // the type or definition kind of the payload. `Active` element segments are
+ // restricted to MVP behavior, which assumes only function indices.
+ if (kind == ElemSegmentKind::Active) {
+ elemType = RefType::func();
+ } else {
+ switch (payload) {
+ case ElemSegmentPayload::ElemExpression: {
+ if (!d.readRefType(env->types, env->features, &elemType)) {
+ return false;
+ }
+ break;
+ }
+ case ElemSegmentPayload::ExternIndex: {
+ uint8_t form;
+ if (!d.readFixedU8(&form)) {
+ return d.fail("expected type or extern kind");
+ }
+
+ if (form != uint8_t(DefinitionKind::Function)) {
+ return d.fail(
+ "segments with extern indices can only contain function "
+ "references");
+ }
+ elemType = RefType::func();
+ }
+ }
+ }
+
+ // Check constraints on the element type.
+ switch (kind) {
+ case ElemSegmentKind::Active:
+ case ElemSegmentKind::ActiveWithTableIndex: {
+ RefType tblElemType = env->tables[seg->tableIndex].elemType;
+ if (!env->types.isRefSubtypeOf(elemType, tblElemType)) {
+ return d.fail(
+ "segment's element type must be subtype of table's element type");
+ }
+ break;
+ }
+ case ElemSegmentKind::Declared:
+ case ElemSegmentKind::Passive: {
+ // Passive segment element types are checked when used with a
+ // `table.init` instruction.
+ break;
+ }
+ }
+ seg->elemType = elemType;
+
+ uint32_t numElems;
+ if (!d.readVarU32(&numElems)) {
+ return d.fail("expected segment size");
+ }
+
+ if (numElems > MaxElemSegmentLength) {
+ return d.fail("too many table elements");
+ }
+
+ if (!seg->elemFuncIndices.reserve(numElems)) {
+ return false;
+ }
+
+#ifdef WASM_PRIVATE_REFTYPES
+ // We assume that passive or declared segments may be applied to external
+ // tables. We can do slightly better: if there are no external tables in
+ // the module then we don't need to worry about passive or declared
+ // segments either. But this is a temporary restriction.
+ bool exportedTable = kind == ElemSegmentKind::Passive ||
+ kind == ElemSegmentKind::Declared ||
+ env->tables[seg->tableIndex].importedOrExported;
+#endif
+
+ // For passive segments we should use DecodeInitializerExpression() but we
+ // don't really want to generalize that function yet, so instead read the
+ // required Ref.Func and End here.
+
+ for (uint32_t i = 0; i < numElems; i++) {
+ bool needIndex = true;
+
+ if (payload == ElemSegmentPayload::ElemExpression) {
+ OpBytes op;
+ if (!d.readOp(&op)) {
+ return d.fail("failed to read initializer operation");
+ }
+
+ RefType initType = RefType::extern_();
+ switch (op.b0) {
+ case uint16_t(Op::RefFunc):
+ initType = RefType::func();
+ break;
+ case uint16_t(Op::RefNull):
+ if (!d.readHeapType(env->types, env->features, true, &initType)) {
+ return false;
+ }
+ needIndex = false;
+ break;
+ default:
+ return d.fail("failed to read initializer operation");
+ }
+ if (!env->types.isRefSubtypeOf(initType, elemType)) {
+ return d.fail("initializer type must be subtype of element type");
+ }
+ }
+
+ uint32_t funcIndex = NullFuncIndex;
+ if (needIndex) {
+ if (!d.readVarU32(&funcIndex)) {
+ return d.fail("failed to read element function index");
+ }
+ if (funcIndex >= env->numFuncs()) {
+ return d.fail("table element out of range");
+ }
+#ifdef WASM_PRIVATE_REFTYPES
+ if (exportedTable &&
+ !FuncTypeIsJSCompatible(d, *env->funcs[funcIndex].type)) {
+ return false;
+ }
+#endif
+ }
+
+ if (payload == ElemSegmentPayload::ElemExpression) {
+ OpBytes end;
+ if (!d.readOp(&end) || end.b0 != uint16_t(Op::End)) {
+ return d.fail("failed to read end of initializer expression");
+ }
+ }
+
+ seg->elemFuncIndices.infallibleAppend(funcIndex);
+ if (funcIndex != NullFuncIndex) {
+ env->validForRefFunc.setBit(funcIndex);
+ }
+ }
+
+ env->elemSegments.infallibleAppend(std::move(seg));
+ }
+
+ return d.finishSection(*range, "elem");
+}
+
+static bool DecodeDataCountSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::DataCount, env, &range, "datacount")) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ uint32_t dataCount;
+ if (!d.readVarU32(&dataCount)) {
+ return d.fail("expected data segment count");
+ }
+
+ env->dataCount.emplace(dataCount);
+
+ return d.finishSection(*range, "datacount");
+}
+
+bool wasm::StartsCodeSection(const uint8_t* begin, const uint8_t* end,
+ SectionRange* codeSection) {
+ UniqueChars unused;
+ Decoder d(begin, end, 0, &unused);
+
+ if (!DecodePreamble(d)) {
+ return false;
+ }
+
+ while (!d.done()) {
+ uint8_t id;
+ SectionRange range;
+ if (!d.readSectionHeader(&id, &range)) {
+ return false;
+ }
+
+ if (id == uint8_t(SectionId::Code)) {
+ *codeSection = range;
+ return true;
+ }
+
+ if (!d.readBytes(range.size)) {
+ return false;
+ }
+ }
+
+ return false;
+}
+
+bool wasm::DecodeModuleEnvironment(Decoder& d, ModuleEnvironment* env) {
+ if (!DecodePreamble(d)) {
+ return false;
+ }
+
+ if (!DecodeTypeSection(d, env)) {
+ return false;
+ }
+
+ if (!DecodeImportSection(d, env)) {
+ return false;
+ }
+
+ if (!DecodeFunctionSection(d, env)) {
+ return false;
+ }
+
+ if (!DecodeTableSection(d, env)) {
+ return false;
+ }
+
+ if (!DecodeMemorySection(d, env)) {
+ return false;
+ }
+
+#ifdef ENABLE_WASM_EXCEPTIONS
+ if (!DecodeEventSection(d, env)) {
+ return false;
+ }
+#endif
+
+ if (!DecodeGlobalSection(d, env)) {
+ return false;
+ }
+
+ if (!DecodeExportSection(d, env)) {
+ return false;
+ }
+
+ if (!DecodeStartSection(d, env)) {
+ return false;
+ }
+
+ if (!DecodeElemSection(d, env)) {
+ return false;
+ }
+
+ if (!DecodeDataCountSection(d, env)) {
+ return false;
+ }
+
+ if (!d.startSection(SectionId::Code, env, &env->codeSection, "code")) {
+ return false;
+ }
+
+ if (env->codeSection && env->codeSection->size > MaxCodeSectionBytes) {
+ return d.fail("code section too big");
+ }
+
+ return true;
+}
+
+static bool DecodeFunctionBody(Decoder& d, const ModuleEnvironment& env,
+ uint32_t funcIndex) {
+ uint32_t bodySize;
+ if (!d.readVarU32(&bodySize)) {
+ return d.fail("expected number of function body bytes");
+ }
+
+ if (bodySize > MaxFunctionBytes) {
+ return d.fail("function body too big");
+ }
+
+ if (d.bytesRemain() < bodySize) {
+ return d.fail("function body length too big");
+ }
+
+ if (!ValidateFunctionBody(env, funcIndex, bodySize, d)) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool DecodeCodeSection(Decoder& d, ModuleEnvironment* env) {
+ if (!env->codeSection) {
+ if (env->numFuncDefs() != 0) {
+ return d.fail("expected code section");
+ }
+ return true;
+ }
+
+ uint32_t numFuncDefs;
+ if (!d.readVarU32(&numFuncDefs)) {
+ return d.fail("expected function body count");
+ }
+
+ if (numFuncDefs != env->numFuncDefs()) {
+ return d.fail(
+ "function body count does not match function signature count");
+ }
+
+ for (uint32_t funcDefIndex = 0; funcDefIndex < numFuncDefs; funcDefIndex++) {
+ if (!DecodeFunctionBody(d, *env, env->numFuncImports() + funcDefIndex)) {
+ return false;
+ }
+ }
+
+ return d.finishSection(*env->codeSection, "code");
+}
+
+static bool DecodeDataSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startSection(SectionId::Data, env, &range, "data")) {
+ return false;
+ }
+ if (!range) {
+ if (env->dataCount.isSome() && *env->dataCount > 0) {
+ return d.fail("number of data segments does not match declared count");
+ }
+ return true;
+ }
+
+ uint32_t numSegments;
+ if (!d.readVarU32(&numSegments)) {
+ return d.fail("failed to read number of data segments");
+ }
+
+ if (numSegments > MaxDataSegments) {
+ return d.fail("too many data segments");
+ }
+
+ if (env->dataCount.isSome() && numSegments != *env->dataCount) {
+ return d.fail("number of data segments does not match declared count");
+ }
+
+ for (uint32_t i = 0; i < numSegments; i++) {
+ uint32_t initializerKindVal;
+ if (!d.readVarU32(&initializerKindVal)) {
+ return d.fail("expected data initializer-kind field");
+ }
+
+ switch (initializerKindVal) {
+ case uint32_t(DataSegmentKind::Active):
+ case uint32_t(DataSegmentKind::Passive):
+ case uint32_t(DataSegmentKind::ActiveWithMemoryIndex):
+ break;
+ default:
+ return d.fail("invalid data initializer-kind field");
+ }
+
+ DataSegmentKind initializerKind = DataSegmentKind(initializerKindVal);
+
+ if (initializerKind != DataSegmentKind::Passive && !env->usesMemory()) {
+ return d.fail("active data segment requires a memory section");
+ }
+
+ uint32_t memIndex = 0;
+ if (initializerKind == DataSegmentKind::ActiveWithMemoryIndex) {
+ if (!d.readVarU32(&memIndex)) {
+ return d.fail("expected memory index");
+ }
+ if (memIndex > 0) {
+ return d.fail("memory index must be zero");
+ }
+ }
+
+ DataSegmentEnv seg;
+ if (initializerKind == DataSegmentKind::Active ||
+ initializerKind == DataSegmentKind::ActiveWithMemoryIndex) {
+ InitExpr segOffset;
+ if (!DecodeInitializerExpression(d, env, ValType::I32, &segOffset)) {
+ return false;
+ }
+ seg.offsetIfActive.emplace(segOffset);
+ }
+
+ if (!d.readVarU32(&seg.length)) {
+ return d.fail("expected segment size");
+ }
+
+ if (seg.length > MaxDataSegmentLengthPages * PageSize) {
+ return d.fail("segment size too big");
+ }
+
+ seg.bytecodeOffset = d.currentOffset();
+
+ if (!d.readBytes(seg.length)) {
+ return d.fail("data segment shorter than declared");
+ }
+
+ if (!env->dataSegments.append(seg)) {
+ return false;
+ }
+ }
+
+ return d.finishSection(*range, "data");
+}
+
+static bool DecodeModuleNameSubsection(Decoder& d,
+ const CustomSectionEnv& nameSection,
+ ModuleEnvironment* env) {
+ Maybe<uint32_t> endOffset;
+ if (!d.startNameSubsection(NameType::Module, &endOffset)) {
+ return false;
+ }
+ if (!endOffset) {
+ return true;
+ }
+
+ Name moduleName;
+ if (!d.readVarU32(&moduleName.length)) {
+ return d.fail("failed to read module name length");
+ }
+
+ MOZ_ASSERT(d.currentOffset() >= nameSection.payloadOffset);
+ moduleName.offsetInNamePayload =
+ d.currentOffset() - nameSection.payloadOffset;
+
+ const uint8_t* bytes;
+ if (!d.readBytes(moduleName.length, &bytes)) {
+ return d.fail("failed to read module name bytes");
+ }
+
+ if (!d.finishNameSubsection(*endOffset)) {
+ return false;
+ }
+
+ // Only save the module name if the whole subsection validates.
+ env->moduleName.emplace(moduleName);
+ return true;
+}
+
+static bool DecodeFunctionNameSubsection(Decoder& d,
+ const CustomSectionEnv& nameSection,
+ ModuleEnvironment* env) {
+ Maybe<uint32_t> endOffset;
+ if (!d.startNameSubsection(NameType::Function, &endOffset)) {
+ return false;
+ }
+ if (!endOffset) {
+ return true;
+ }
+
+ uint32_t nameCount = 0;
+ if (!d.readVarU32(&nameCount) || nameCount > MaxFuncs) {
+ return d.fail("bad function name count");
+ }
+
+ NameVector funcNames;
+
+ for (uint32_t i = 0; i < nameCount; ++i) {
+ uint32_t funcIndex = 0;
+ if (!d.readVarU32(&funcIndex)) {
+ return d.fail("unable to read function index");
+ }
+
+ // Names must refer to real functions and be given in ascending order.
+ if (funcIndex >= env->numFuncs() || funcIndex < funcNames.length()) {
+ return d.fail("invalid function index");
+ }
+
+ Name funcName;
+ if (!d.readVarU32(&funcName.length) ||
+ funcName.length > JS::MaxStringLength) {
+ return d.fail("unable to read function name length");
+ }
+
+ if (!funcName.length) {
+ continue;
+ }
+
+ if (!funcNames.resize(funcIndex + 1)) {
+ return false;
+ }
+
+ MOZ_ASSERT(d.currentOffset() >= nameSection.payloadOffset);
+ funcName.offsetInNamePayload =
+ d.currentOffset() - nameSection.payloadOffset;
+
+ if (!d.readBytes(funcName.length)) {
+ return d.fail("unable to read function name bytes");
+ }
+
+ funcNames[funcIndex] = funcName;
+ }
+
+ if (!d.finishNameSubsection(*endOffset)) {
+ return false;
+ }
+
+ // To encourage fully valid function names subsections; only save names if
+ // the entire subsection decoded correctly.
+ env->funcNames = std::move(funcNames);
+ return true;
+}
+
+static bool DecodeNameSection(Decoder& d, ModuleEnvironment* env) {
+ MaybeSectionRange range;
+ if (!d.startCustomSection(NameSectionName, env, &range)) {
+ return false;
+ }
+ if (!range) {
+ return true;
+ }
+
+ env->nameCustomSectionIndex = Some(env->customSections.length() - 1);
+ const CustomSectionEnv& nameSection = env->customSections.back();
+
+ // Once started, custom sections do not report validation errors.
+
+ if (!DecodeModuleNameSubsection(d, nameSection, env)) {
+ goto finish;
+ }
+
+ if (!DecodeFunctionNameSubsection(d, nameSection, env)) {
+ goto finish;
+ }
+
+ while (d.currentOffset() < range->end()) {
+ if (!d.skipNameSubsection()) {
+ goto finish;
+ }
+ }
+
+finish:
+ d.finishCustomSection(NameSectionName, *range);
+ return true;
+}
+
+bool wasm::DecodeModuleTail(Decoder& d, ModuleEnvironment* env) {
+ if (!DecodeDataSection(d, env)) {
+ return false;
+ }
+
+ if (!DecodeNameSection(d, env)) {
+ return false;
+ }
+
+ while (!d.done()) {
+ if (!d.skipCustomSection(env)) {
+ if (d.resilientMode()) {
+ d.clearError();
+ return true;
+ }
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// Validate algorithm.
+
+bool wasm::Validate(JSContext* cx, const ShareableBytes& bytecode,
+ UniqueChars* error) {
+ Decoder d(bytecode.bytes, 0, error);
+
+ FeatureArgs features = FeatureArgs::build(cx);
+ ModuleEnvironment env(features);
+ if (!DecodeModuleEnvironment(d, &env)) {
+ return false;
+ }
+
+ if (!DecodeCodeSection(d, &env)) {
+ return false;
+ }
+
+ if (!DecodeModuleTail(d, &env)) {
+ return false;
+ }
+
+ MOZ_ASSERT(!*error, "unreported error in decoding");
+ return true;
+}
diff --git a/js/src/wasm/WasmValidate.h b/js/src/wasm/WasmValidate.h
new file mode 100644
index 0000000000..736054126d
--- /dev/null
+++ b/js/src/wasm/WasmValidate.h
@@ -0,0 +1,960 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_validate_h
+#define wasm_validate_h
+
+#include <type_traits>
+
+#include "ds/Bitmap.h"
+
+#include "wasm/WasmCompile.h"
+#include "wasm/WasmTypes.h"
+
+namespace js {
+namespace wasm {
+
+// This struct captures the bytecode offset of a section's payload (so not
+// including the header) and the size of the payload.
+
+struct SectionRange {
+ uint32_t start;
+ uint32_t size;
+
+ uint32_t end() const { return start + size; }
+ bool operator==(const SectionRange& rhs) const {
+ return start == rhs.start && size == rhs.size;
+ }
+};
+
+using MaybeSectionRange = Maybe<SectionRange>;
+
+// CompilerEnvironment holds any values that will be needed to compute
+// compilation parameters once the module's feature opt-in sections have been
+// parsed.
+//
+// Subsequent to construction a computeParameters() call will compute the final
+// compilation parameters, and the object can then be queried for their values.
+
+struct CompileArgs;
+class Decoder;
+
+struct CompilerEnvironment {
+ // The object starts in one of two "initial" states; computeParameters moves
+ // it into the "computed" state.
+ enum State { InitialWithArgs, InitialWithModeTierDebug, Computed };
+
+ State state_;
+ union {
+ // Value if the state_ == InitialWithArgs.
+ const CompileArgs* args_;
+
+ // Value in the other two states.
+ struct {
+ CompileMode mode_;
+ Tier tier_;
+ OptimizedBackend optimizedBackend_;
+ DebugEnabled debug_;
+ };
+ };
+
+ public:
+ // Retain a reference to the CompileArgs. A subsequent computeParameters()
+ // will compute all parameters from the CompileArgs and additional values.
+ explicit CompilerEnvironment(const CompileArgs& args);
+
+ // Save the provided values for mode, tier, and debug, and the initial value
+ // for gcTypes/refTypes. A subsequent computeParameters() will compute the
+ // final value of gcTypes/refTypes.
+ CompilerEnvironment(CompileMode mode, Tier tier,
+ OptimizedBackend optimizedBackend,
+ DebugEnabled debugEnabled);
+
+ // Compute any remaining compilation parameters.
+ void computeParameters(Decoder& d);
+
+ // Compute any remaining compilation parameters. Only use this method if
+ // the CompilerEnvironment was created with values for mode, tier, and
+ // debug.
+ void computeParameters();
+
+ bool isComputed() const { return state_ == Computed; }
+ CompileMode mode() const {
+ MOZ_ASSERT(isComputed());
+ return mode_;
+ }
+ Tier tier() const {
+ MOZ_ASSERT(isComputed());
+ return tier_;
+ }
+ OptimizedBackend optimizedBackend() const {
+ MOZ_ASSERT(isComputed());
+ return optimizedBackend_;
+ }
+ DebugEnabled debug() const {
+ MOZ_ASSERT(isComputed());
+ return debug_;
+ }
+ bool debugEnabled() const { return debug() == DebugEnabled::True; }
+};
+
+// ModuleEnvironment contains all the state necessary to process or render
+// functions, and all of the state necessary to validate all aspects of the
+// functions.
+//
+// A ModuleEnvironment is created by decoding all the sections before the wasm
+// code section and then used immutably during. When compiling a module using a
+// ModuleGenerator, the ModuleEnvironment holds state shared between the
+// ModuleGenerator thread and background compile threads. All the threads
+// are given a read-only view of the ModuleEnvironment, thus preventing race
+// conditions.
+
+struct ModuleEnvironment {
+ // Constant parameters for the entire compilation:
+ const ModuleKind kind;
+ const FeatureArgs features;
+
+ // Module fields decoded from the module environment (or initialized while
+ // validating an asm.js module) and immutable during compilation:
+ Maybe<uint32_t> dataCount;
+ MemoryUsage memoryUsage;
+ uint64_t minMemoryLength;
+ Maybe<uint64_t> maxMemoryLength;
+ TypeContext types;
+ TypeIdDescVector typeIds;
+ FuncDescVector funcs;
+ Uint32Vector funcImportGlobalDataOffsets;
+
+ GlobalDescVector globals;
+#ifdef ENABLE_WASM_EXCEPTIONS
+ EventDescVector events;
+#endif
+ TableDescVector tables;
+ Uint32Vector asmJSSigToTableIndex;
+ ImportVector imports;
+ ExportVector exports;
+ Maybe<uint32_t> startFuncIndex;
+ ElemSegmentVector elemSegments;
+ MaybeSectionRange codeSection;
+ SparseBitmap validForRefFunc;
+ bool usesDuplicateImports;
+
+ // Fields decoded as part of the wasm module tail:
+ DataSegmentEnvVector dataSegments;
+ CustomSectionEnvVector customSections;
+ Maybe<uint32_t> nameCustomSectionIndex;
+ Maybe<Name> moduleName;
+ NameVector funcNames;
+
+ explicit ModuleEnvironment(FeatureArgs features,
+ ModuleKind kind = ModuleKind::Wasm)
+ : kind(kind),
+ features(features),
+ memoryUsage(MemoryUsage::None),
+ minMemoryLength(0),
+ types(features, TypeDefVector()),
+ usesDuplicateImports(false) {}
+
+ size_t numTables() const { return tables.length(); }
+ size_t numTypes() const { return types.length(); }
+ size_t numFuncs() const { return funcs.length(); }
+ size_t numFuncImports() const { return funcImportGlobalDataOffsets.length(); }
+ size_t numFuncDefs() const {
+ return funcs.length() - funcImportGlobalDataOffsets.length();
+ }
+ Shareable sharedMemoryEnabled() const { return features.sharedMemory; }
+ bool refTypesEnabled() const { return features.refTypes; }
+ bool functionReferencesEnabled() const { return features.functionReferences; }
+ bool gcTypesEnabled() const { return features.gcTypes; }
+ bool multiValueEnabled() const { return features.multiValue; }
+ bool v128Enabled() const { return features.v128; }
+ bool hugeMemoryEnabled() const { return !isAsmJS() && features.hugeMemory; }
+ bool exceptionsEnabled() const { return features.exceptions; }
+ bool usesMemory() const { return memoryUsage != MemoryUsage::None; }
+ bool usesSharedMemory() const { return memoryUsage == MemoryUsage::Shared; }
+ bool isAsmJS() const { return kind == ModuleKind::AsmJS; }
+
+ uint32_t funcMaxResults() const {
+ return multiValueEnabled() ? MaxResults : 1;
+ }
+ bool funcIsImport(uint32_t funcIndex) const {
+ return funcIndex < funcImportGlobalDataOffsets.length();
+ }
+};
+
+// ElemSegmentFlags provides methods for decoding and encoding the flags field
+// of an element segment. This is needed as the flags field has a non-trivial
+// encoding that is effectively split into independent `kind` and `payload`
+// enums.
+class ElemSegmentFlags {
+ enum class Flags : uint32_t {
+ Passive = 0x1,
+ WithIndexOrDeclared = 0x2,
+ ElemExpression = 0x4,
+ // Below this line are convenient combinations of flags
+ KindMask = Passive | WithIndexOrDeclared,
+ PayloadMask = ElemExpression,
+ AllFlags = Passive | WithIndexOrDeclared | ElemExpression,
+ };
+ uint32_t encoded_;
+
+ explicit ElemSegmentFlags(uint32_t encoded) : encoded_(encoded) {}
+
+ public:
+ ElemSegmentFlags(ElemSegmentKind kind, ElemSegmentPayload payload) {
+ encoded_ = uint32_t(kind) | uint32_t(payload);
+ }
+
+ static Maybe<ElemSegmentFlags> construct(uint32_t encoded) {
+ if (encoded > uint32_t(Flags::AllFlags)) {
+ return Nothing();
+ }
+ return Some(ElemSegmentFlags(encoded));
+ }
+
+ uint32_t encoded() const { return encoded_; }
+
+ ElemSegmentKind kind() const {
+ return static_cast<ElemSegmentKind>(encoded_ & uint32_t(Flags::KindMask));
+ }
+ ElemSegmentPayload payload() const {
+ return static_cast<ElemSegmentPayload>(encoded_ &
+ uint32_t(Flags::PayloadMask));
+ }
+};
+
+// The Encoder class appends bytes to the Bytes object it is given during
+// construction. The client is responsible for the Bytes's lifetime and must
+// keep the Bytes alive as long as the Encoder is used.
+
+class Encoder {
+ Bytes& bytes_;
+
+ template <class T>
+ [[nodiscard]] bool write(const T& v) {
+ return bytes_.append(reinterpret_cast<const uint8_t*>(&v), sizeof(T));
+ }
+
+ template <typename UInt>
+ [[nodiscard]] bool writeVarU(UInt i) {
+ do {
+ uint8_t byte = i & 0x7f;
+ i >>= 7;
+ if (i != 0) {
+ byte |= 0x80;
+ }
+ if (!bytes_.append(byte)) {
+ return false;
+ }
+ } while (i != 0);
+ return true;
+ }
+
+ template <typename SInt>
+ [[nodiscard]] bool writeVarS(SInt i) {
+ bool done;
+ do {
+ uint8_t byte = i & 0x7f;
+ i >>= 7;
+ done = ((i == 0) && !(byte & 0x40)) || ((i == -1) && (byte & 0x40));
+ if (!done) {
+ byte |= 0x80;
+ }
+ if (!bytes_.append(byte)) {
+ return false;
+ }
+ } while (!done);
+ return true;
+ }
+
+ void patchVarU32(size_t offset, uint32_t patchBits, uint32_t assertBits) {
+ do {
+ uint8_t assertByte = assertBits & 0x7f;
+ uint8_t patchByte = patchBits & 0x7f;
+ assertBits >>= 7;
+ patchBits >>= 7;
+ if (assertBits != 0) {
+ assertByte |= 0x80;
+ patchByte |= 0x80;
+ }
+ MOZ_ASSERT(assertByte == bytes_[offset]);
+ bytes_[offset] = patchByte;
+ offset++;
+ } while (assertBits != 0);
+ }
+
+ void patchFixedU7(size_t offset, uint8_t patchBits, uint8_t assertBits) {
+ MOZ_ASSERT(patchBits <= uint8_t(INT8_MAX));
+ patchFixedU8(offset, patchBits, assertBits);
+ }
+
+ void patchFixedU8(size_t offset, uint8_t patchBits, uint8_t assertBits) {
+ MOZ_ASSERT(bytes_[offset] == assertBits);
+ bytes_[offset] = patchBits;
+ }
+
+ uint32_t varU32ByteLength(size_t offset) const {
+ size_t start = offset;
+ while (bytes_[offset] & 0x80) {
+ offset++;
+ }
+ return offset - start + 1;
+ }
+
+ public:
+ explicit Encoder(Bytes& bytes) : bytes_(bytes) { MOZ_ASSERT(empty()); }
+
+ size_t currentOffset() const { return bytes_.length(); }
+ bool empty() const { return currentOffset() == 0; }
+
+ // Fixed-size encoding operations simply copy the literal bytes (without
+ // attempting to align).
+
+ [[nodiscard]] bool writeFixedU7(uint8_t i) {
+ MOZ_ASSERT(i <= uint8_t(INT8_MAX));
+ return writeFixedU8(i);
+ }
+ [[nodiscard]] bool writeFixedU8(uint8_t i) { return write<uint8_t>(i); }
+ [[nodiscard]] bool writeFixedU32(uint32_t i) { return write<uint32_t>(i); }
+ [[nodiscard]] bool writeFixedF32(float f) { return write<float>(f); }
+ [[nodiscard]] bool writeFixedF64(double d) { return write<double>(d); }
+
+ // Variable-length encodings that all use LEB128.
+
+ [[nodiscard]] bool writeVarU32(uint32_t i) { return writeVarU<uint32_t>(i); }
+ [[nodiscard]] bool writeVarS32(int32_t i) { return writeVarS<int32_t>(i); }
+ [[nodiscard]] bool writeVarU64(uint64_t i) { return writeVarU<uint64_t>(i); }
+ [[nodiscard]] bool writeVarS64(int64_t i) { return writeVarS<int64_t>(i); }
+ [[nodiscard]] bool writeValType(ValType type) {
+ static_assert(size_t(TypeCode::Limit) <= UINT8_MAX, "fits");
+ if (type.isTypeIndex()) {
+ return writeFixedU8(uint8_t(TypeCode::NullableRef)) &&
+ writeVarU32(type.refType().typeIndex());
+ }
+ TypeCode tc = UnpackTypeCodeType(type.packed());
+ MOZ_ASSERT(size_t(tc) < size_t(TypeCode::Limit));
+ return writeFixedU8(uint8_t(tc));
+ }
+ [[nodiscard]] bool writeOp(Opcode opcode) {
+ // The Opcode constructor has asserted that `opcode` is meaningful, so no
+ // further correctness checking is necessary here.
+ uint32_t bits = opcode.bits();
+ if (!writeFixedU8(bits & 255)) {
+ return false;
+ }
+ if (opcode.isOp()) {
+ return true;
+ }
+ return writeVarU32(bits >> 8);
+ }
+
+ // Fixed-length encodings that allow back-patching.
+
+ [[nodiscard]] bool writePatchableFixedU7(size_t* offset) {
+ *offset = bytes_.length();
+ return writeFixedU8(UINT8_MAX);
+ }
+ void patchFixedU7(size_t offset, uint8_t patchBits) {
+ return patchFixedU7(offset, patchBits, UINT8_MAX);
+ }
+
+ // Variable-length encodings that allow back-patching.
+
+ [[nodiscard]] bool writePatchableVarU32(size_t* offset) {
+ *offset = bytes_.length();
+ return writeVarU32(UINT32_MAX);
+ }
+ void patchVarU32(size_t offset, uint32_t patchBits) {
+ return patchVarU32(offset, patchBits, UINT32_MAX);
+ }
+
+ // Byte ranges start with an LEB128 length followed by an arbitrary sequence
+ // of bytes. When used for strings, bytes are to be interpreted as utf8.
+
+ [[nodiscard]] bool writeBytes(const void* bytes, uint32_t numBytes) {
+ return writeVarU32(numBytes) &&
+ bytes_.append(reinterpret_cast<const uint8_t*>(bytes), numBytes);
+ }
+
+ // A "section" is a contiguous range of bytes that stores its own size so
+ // that it may be trivially skipped without examining the payload. Sections
+ // require backpatching since the size of the section is only known at the
+ // end while the size's varU32 must be stored at the beginning. Immediately
+ // after the section length is the string id of the section.
+
+ [[nodiscard]] bool startSection(SectionId id, size_t* offset) {
+ MOZ_ASSERT(uint32_t(id) < 128);
+ return writeVarU32(uint32_t(id)) && writePatchableVarU32(offset);
+ }
+ void finishSection(size_t offset) {
+ return patchVarU32(offset,
+ bytes_.length() - offset - varU32ByteLength(offset));
+ }
+};
+
+// The Decoder class decodes the bytes in the range it is given during
+// construction. The client is responsible for keeping the byte range alive as
+// long as the Decoder is used.
+
+class Decoder {
+ const uint8_t* const beg_;
+ const uint8_t* const end_;
+ const uint8_t* cur_;
+ const size_t offsetInModule_;
+ UniqueChars* error_;
+ UniqueCharsVector* warnings_;
+ bool resilientMode_;
+
+ template <class T>
+ [[nodiscard]] bool read(T* out) {
+ if (bytesRemain() < sizeof(T)) {
+ return false;
+ }
+ memcpy((void*)out, cur_, sizeof(T));
+ cur_ += sizeof(T);
+ return true;
+ }
+
+ template <class T>
+ T uncheckedRead() {
+ MOZ_ASSERT(bytesRemain() >= sizeof(T));
+ T ret;
+ memcpy(&ret, cur_, sizeof(T));
+ cur_ += sizeof(T);
+ return ret;
+ }
+
+ template <class T>
+ void uncheckedRead(T* ret) {
+ MOZ_ASSERT(bytesRemain() >= sizeof(T));
+ memcpy(ret, cur_, sizeof(T));
+ cur_ += sizeof(T);
+ }
+
+ template <typename UInt>
+ [[nodiscard]] bool readVarU(UInt* out) {
+ DebugOnly<const uint8_t*> before = cur_;
+ const unsigned numBits = sizeof(UInt) * CHAR_BIT;
+ const unsigned remainderBits = numBits % 7;
+ const unsigned numBitsInSevens = numBits - remainderBits;
+ UInt u = 0;
+ uint8_t byte;
+ UInt shift = 0;
+ do {
+ if (!readFixedU8(&byte)) {
+ return false;
+ }
+ if (!(byte & 0x80)) {
+ *out = u | UInt(byte) << shift;
+ return true;
+ }
+ u |= UInt(byte & 0x7F) << shift;
+ shift += 7;
+ } while (shift != numBitsInSevens);
+ if (!readFixedU8(&byte) || (byte & (unsigned(-1) << remainderBits))) {
+ return false;
+ }
+ *out = u | (UInt(byte) << numBitsInSevens);
+ MOZ_ASSERT_IF(sizeof(UInt) == 4,
+ unsigned(cur_ - before) <= MaxVarU32DecodedBytes);
+ return true;
+ }
+
+ template <typename SInt>
+ [[nodiscard]] bool readVarS(SInt* out) {
+ using UInt = std::make_unsigned_t<SInt>;
+ const unsigned numBits = sizeof(SInt) * CHAR_BIT;
+ const unsigned remainderBits = numBits % 7;
+ const unsigned numBitsInSevens = numBits - remainderBits;
+ SInt s = 0;
+ uint8_t byte;
+ unsigned shift = 0;
+ do {
+ if (!readFixedU8(&byte)) {
+ return false;
+ }
+ s |= SInt(byte & 0x7f) << shift;
+ shift += 7;
+ if (!(byte & 0x80)) {
+ if (byte & 0x40) {
+ s |= UInt(-1) << shift;
+ }
+ *out = s;
+ return true;
+ }
+ } while (shift < numBitsInSevens);
+ if (!remainderBits || !readFixedU8(&byte) || (byte & 0x80)) {
+ return false;
+ }
+ uint8_t mask = 0x7f & (uint8_t(-1) << remainderBits);
+ if ((byte & mask) != ((byte & (1 << (remainderBits - 1))) ? mask : 0)) {
+ return false;
+ }
+ *out = s | UInt(byte) << shift;
+ return true;
+ }
+
+ public:
+ Decoder(const uint8_t* begin, const uint8_t* end, size_t offsetInModule,
+ UniqueChars* error, UniqueCharsVector* warnings = nullptr,
+ bool resilientMode = false)
+ : beg_(begin),
+ end_(end),
+ cur_(begin),
+ offsetInModule_(offsetInModule),
+ error_(error),
+ warnings_(warnings),
+ resilientMode_(resilientMode) {
+ MOZ_ASSERT(begin <= end);
+ }
+ explicit Decoder(const Bytes& bytes, size_t offsetInModule = 0,
+ UniqueChars* error = nullptr,
+ UniqueCharsVector* warnings = nullptr)
+ : beg_(bytes.begin()),
+ end_(bytes.end()),
+ cur_(bytes.begin()),
+ offsetInModule_(offsetInModule),
+ error_(error),
+ warnings_(warnings),
+ resilientMode_(false) {}
+
+ // These convenience functions use currentOffset() as the errorOffset.
+ bool fail(const char* msg) { return fail(currentOffset(), msg); }
+ bool failf(const char* msg, ...) MOZ_FORMAT_PRINTF(2, 3);
+ void warnf(const char* msg, ...) MOZ_FORMAT_PRINTF(2, 3);
+
+ // Report an error at the given offset (relative to the whole module).
+ bool fail(size_t errorOffset, const char* msg);
+
+ UniqueChars* error() { return error_; }
+
+ void clearError() {
+ if (error_) {
+ error_->reset();
+ }
+ }
+
+ bool done() const {
+ MOZ_ASSERT(cur_ <= end_);
+ return cur_ == end_;
+ }
+ bool resilientMode() const { return resilientMode_; }
+
+ size_t bytesRemain() const {
+ MOZ_ASSERT(end_ >= cur_);
+ return size_t(end_ - cur_);
+ }
+ // pos must be a value previously returned from currentPosition.
+ void rollbackPosition(const uint8_t* pos) { cur_ = pos; }
+ const uint8_t* currentPosition() const { return cur_; }
+ size_t currentOffset() const { return offsetInModule_ + (cur_ - beg_); }
+ const uint8_t* begin() const { return beg_; }
+ const uint8_t* end() const { return end_; }
+
+ // Peek at the next byte, if it exists, without advancing the position.
+
+ bool peekByte(uint8_t* byte) {
+ if (done()) {
+ return false;
+ }
+ *byte = *cur_;
+ return true;
+ }
+
+ // Fixed-size encoding operations simply copy the literal bytes (without
+ // attempting to align).
+
+ [[nodiscard]] bool readFixedU8(uint8_t* i) { return read<uint8_t>(i); }
+ [[nodiscard]] bool readFixedU32(uint32_t* u) { return read<uint32_t>(u); }
+ [[nodiscard]] bool readFixedF32(float* f) { return read<float>(f); }
+ [[nodiscard]] bool readFixedF64(double* d) { return read<double>(d); }
+#ifdef ENABLE_WASM_SIMD
+ [[nodiscard]] bool readFixedV128(V128* d) {
+ for (unsigned i = 0; i < 16; i++) {
+ if (!read<uint8_t>(d->bytes + i)) {
+ return false;
+ }
+ }
+ return true;
+ }
+#endif
+
+ // Variable-length encodings that all use LEB128.
+
+ [[nodiscard]] bool readVarU32(uint32_t* out) {
+ return readVarU<uint32_t>(out);
+ }
+ [[nodiscard]] bool readVarS32(int32_t* out) { return readVarS<int32_t>(out); }
+ [[nodiscard]] bool readVarU64(uint64_t* out) {
+ return readVarU<uint64_t>(out);
+ }
+ [[nodiscard]] bool readVarS64(int64_t* out) { return readVarS<int64_t>(out); }
+
+ [[nodiscard]] ValType uncheckedReadValType() {
+ uint8_t code = uncheckedReadFixedU8();
+ switch (code) {
+ case uint8_t(TypeCode::FuncRef):
+ case uint8_t(TypeCode::ExternRef):
+ return RefType::fromTypeCode(TypeCode(code), true);
+ case uint8_t(TypeCode::Ref):
+ case uint8_t(TypeCode::NullableRef): {
+ bool nullable = code == uint8_t(TypeCode::NullableRef);
+
+ uint8_t nextByte;
+ peekByte(&nextByte);
+
+ if ((nextByte & SLEB128SignMask) == SLEB128SignBit) {
+ uint8_t code = uncheckedReadFixedU8();
+ return RefType::fromTypeCode(TypeCode(code), nullable);
+ }
+
+ int32_t x = uncheckedReadVarS32();
+ return RefType::fromTypeIndex(x, nullable);
+ }
+ default:
+ return ValType::fromNonRefTypeCode(TypeCode(code));
+ }
+ }
+ [[nodiscard]] bool readValType(uint32_t numTypes, const FeatureArgs& features,
+ ValType* type) {
+ static_assert(uint8_t(TypeCode::Limit) <= UINT8_MAX, "fits");
+ uint8_t code;
+ if (!readFixedU8(&code)) {
+ return fail("expected type code");
+ }
+ switch (code) {
+ case uint8_t(TypeCode::I32):
+ case uint8_t(TypeCode::F32):
+ case uint8_t(TypeCode::F64):
+ case uint8_t(TypeCode::I64):
+ *type = ValType::fromNonRefTypeCode(TypeCode(code));
+ return true;
+#ifdef ENABLE_WASM_SIMD
+ case uint8_t(TypeCode::V128):
+ if (!features.v128) {
+ return fail("v128 not enabled");
+ }
+ *type = ValType::fromNonRefTypeCode(TypeCode(code));
+ return true;
+#endif
+#ifdef ENABLE_WASM_REFTYPES
+ case uint8_t(TypeCode::FuncRef):
+ case uint8_t(TypeCode::ExternRef):
+ if (!features.refTypes) {
+ return fail("reference types not enabled");
+ }
+ *type = RefType::fromTypeCode(TypeCode(code), true);
+ return true;
+#endif
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ case uint8_t(TypeCode::Ref):
+ case uint8_t(TypeCode::NullableRef): {
+ if (!features.functionReferences) {
+ return fail("(ref T) types not enabled");
+ }
+ bool nullable = code == uint8_t(TypeCode::NullableRef);
+ RefType refType;
+ if (!readHeapType(numTypes, features, nullable, &refType)) {
+ return false;
+ }
+ *type = refType;
+ return true;
+ }
+#endif
+#ifdef ENABLE_WASM_GC
+ case uint8_t(TypeCode::EqRef):
+ if (!features.gcTypes) {
+ return fail("gc types not enabled");
+ }
+ *type = RefType::fromTypeCode(TypeCode(code), true);
+ return true;
+#endif
+ default:
+ return fail("bad type");
+ }
+ }
+ [[nodiscard]] bool readValType(const TypeContext& types,
+ const FeatureArgs& features, ValType* type) {
+ if (!readValType(types.length(), features, type)) {
+ return false;
+ }
+ if (type->isTypeIndex() &&
+ !validateTypeIndex(types, features, type->refType())) {
+ return false;
+ }
+ return true;
+ }
+ [[nodiscard]] bool readHeapType(uint32_t numTypes,
+ const FeatureArgs& features, bool nullable,
+ RefType* type) {
+ uint8_t nextByte;
+ if (!peekByte(&nextByte)) {
+ return fail("expected heap type code");
+ }
+
+ if ((nextByte & SLEB128SignMask) == SLEB128SignBit) {
+ uint8_t code;
+ if (!readFixedU8(&code)) {
+ return false;
+ }
+
+ switch (code) {
+ case uint8_t(TypeCode::FuncRef):
+ case uint8_t(TypeCode::ExternRef):
+ *type = RefType::fromTypeCode(TypeCode(code), nullable);
+ return true;
+#ifdef ENABLE_WASM_GC
+ case uint8_t(TypeCode::EqRef):
+ if (!features.gcTypes) {
+ return fail("gc types not enabled");
+ }
+ *type = RefType::fromTypeCode(TypeCode(code), nullable);
+ return true;
+#endif
+ default:
+ return fail("invalid heap type");
+ }
+ }
+
+#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+ if (features.functionReferences) {
+ int32_t x;
+ if (!readVarS32(&x) || x < 0 || uint32_t(x) >= numTypes) {
+ return fail("invalid heap type index");
+ }
+ *type = RefType::fromTypeIndex(x, nullable);
+ return true;
+ }
+#endif
+ return fail("invalid heap type");
+ }
+ [[nodiscard]] bool readHeapType(const TypeContext& types,
+ const FeatureArgs& features, bool nullable,
+ RefType* type) {
+ if (!readHeapType(types.length(), features, nullable, type)) {
+ return false;
+ }
+
+ if (type->isTypeIndex() && !validateTypeIndex(types, features, *type)) {
+ return false;
+ }
+ return true;
+ }
+ [[nodiscard]] bool readRefType(uint32_t numTypes, const FeatureArgs& features,
+ RefType* type) {
+ ValType valType;
+ if (!readValType(numTypes, features, &valType)) {
+ return false;
+ }
+ if (!valType.isReference()) {
+ return fail("bad type");
+ }
+ *type = valType.refType();
+ return true;
+ }
+ [[nodiscard]] bool readRefType(const TypeContext& types,
+ const FeatureArgs& features, RefType* type) {
+ ValType valType;
+ if (!readValType(types, features, &valType)) {
+ return false;
+ }
+ if (!valType.isReference()) {
+ return fail("bad type");
+ }
+ *type = valType.refType();
+ return true;
+ }
+ [[nodiscard]] bool validateTypeIndex(const TypeContext& types,
+ const FeatureArgs& features,
+ RefType type) {
+ MOZ_ASSERT(type.isTypeIndex());
+
+ if (features.gcTypes && types[type.typeIndex()].isStructType()) {
+ return true;
+ }
+ return fail("type index references an invalid type");
+ }
+ [[nodiscard]] bool readOp(OpBytes* op) {
+ static_assert(size_t(Op::Limit) == 256, "fits");
+ uint8_t u8;
+ if (!readFixedU8(&u8)) {
+ return false;
+ }
+ op->b0 = u8;
+ if (MOZ_LIKELY(!IsPrefixByte(u8))) {
+ return true;
+ }
+ if (!readVarU32(&op->b1)) {
+ return false;
+ }
+ return true;
+ }
+
+ // See writeBytes comment.
+
+ [[nodiscard]] bool readBytes(uint32_t numBytes,
+ const uint8_t** bytes = nullptr) {
+ if (bytes) {
+ *bytes = cur_;
+ }
+ if (bytesRemain() < numBytes) {
+ return false;
+ }
+ cur_ += numBytes;
+ return true;
+ }
+
+ // See "section" description in Encoder.
+
+ [[nodiscard]] bool readSectionHeader(uint8_t* id, SectionRange* range);
+
+ [[nodiscard]] bool startSection(SectionId id, ModuleEnvironment* env,
+ MaybeSectionRange* range,
+ const char* sectionName);
+ [[nodiscard]] bool finishSection(const SectionRange& range,
+ const char* sectionName);
+
+ // Custom sections do not cause validation errors unless the error is in
+ // the section header itself.
+
+ [[nodiscard]] bool startCustomSection(const char* expected,
+ size_t expectedLength,
+ ModuleEnvironment* env,
+ MaybeSectionRange* range);
+
+ template <size_t NameSizeWith0>
+ [[nodiscard]] bool startCustomSection(const char (&name)[NameSizeWith0],
+ ModuleEnvironment* env,
+ MaybeSectionRange* range) {
+ MOZ_ASSERT(name[NameSizeWith0 - 1] == '\0');
+ return startCustomSection(name, NameSizeWith0 - 1, env, range);
+ }
+
+ void finishCustomSection(const char* name, const SectionRange& range);
+ void skipAndFinishCustomSection(const SectionRange& range);
+
+ [[nodiscard]] bool skipCustomSection(ModuleEnvironment* env);
+
+ // The Name section has its own optional subsections.
+
+ [[nodiscard]] bool startNameSubsection(NameType nameType,
+ Maybe<uint32_t>* endOffset);
+ [[nodiscard]] bool finishNameSubsection(uint32_t endOffset);
+ [[nodiscard]] bool skipNameSubsection();
+
+ // The infallible "unchecked" decoding functions can be used when we are
+ // sure that the bytes are well-formed (by construction or due to previous
+ // validation).
+
+ uint8_t uncheckedReadFixedU8() { return uncheckedRead<uint8_t>(); }
+ uint32_t uncheckedReadFixedU32() { return uncheckedRead<uint32_t>(); }
+ void uncheckedReadFixedF32(float* out) { uncheckedRead<float>(out); }
+ void uncheckedReadFixedF64(double* out) { uncheckedRead<double>(out); }
+ template <typename UInt>
+ UInt uncheckedReadVarU() {
+ static const unsigned numBits = sizeof(UInt) * CHAR_BIT;
+ static const unsigned remainderBits = numBits % 7;
+ static const unsigned numBitsInSevens = numBits - remainderBits;
+ UInt decoded = 0;
+ uint32_t shift = 0;
+ do {
+ uint8_t byte = *cur_++;
+ if (!(byte & 0x80)) {
+ return decoded | (UInt(byte) << shift);
+ }
+ decoded |= UInt(byte & 0x7f) << shift;
+ shift += 7;
+ } while (shift != numBitsInSevens);
+ uint8_t byte = *cur_++;
+ MOZ_ASSERT(!(byte & 0xf0));
+ return decoded | (UInt(byte) << numBitsInSevens);
+ }
+ uint32_t uncheckedReadVarU32() { return uncheckedReadVarU<uint32_t>(); }
+ int32_t uncheckedReadVarS32() {
+ int32_t i32 = 0;
+ MOZ_ALWAYS_TRUE(readVarS32(&i32));
+ return i32;
+ }
+ uint64_t uncheckedReadVarU64() { return uncheckedReadVarU<uint64_t>(); }
+ int64_t uncheckedReadVarS64() {
+ int64_t i64 = 0;
+ MOZ_ALWAYS_TRUE(readVarS64(&i64));
+ return i64;
+ }
+ Op uncheckedReadOp() {
+ static_assert(size_t(Op::Limit) == 256, "fits");
+ uint8_t u8 = uncheckedReadFixedU8();
+ return u8 != UINT8_MAX ? Op(u8) : Op(uncheckedReadFixedU8() + UINT8_MAX);
+ }
+};
+
+// The local entries are part of function bodies and thus serialized by both
+// wasm and asm.js and decoded as part of both validation and compilation.
+
+[[nodiscard]] bool EncodeLocalEntries(Encoder& d, const ValTypeVector& locals);
+
+// This performs no validation; the local entries must already have been
+// validated by an earlier pass.
+
+[[nodiscard]] bool DecodeValidatedLocalEntries(Decoder& d,
+ ValTypeVector* locals);
+
+// This validates the entries.
+
+[[nodiscard]] bool DecodeLocalEntries(Decoder& d, const TypeContext& types,
+ const FeatureArgs& features,
+ ValTypeVector* locals);
+
+// Returns whether the given [begin, end) prefix of a module's bytecode starts a
+// code section and, if so, returns the SectionRange of that code section.
+// Note that, even if this function returns 'false', [begin, end) may actually
+// be a valid module in the special case when there are no function defs and the
+// code section is not present. Such modules can be valid so the caller must
+// handle this special case.
+
+[[nodiscard]] bool StartsCodeSection(const uint8_t* begin, const uint8_t* end,
+ SectionRange* range);
+
+// Calling DecodeModuleEnvironment decodes all sections up to the code section
+// and performs full validation of all those sections. The client must then
+// decode the code section itself, reusing ValidateFunctionBody if necessary,
+// and finally call DecodeModuleTail to decode all remaining sections after the
+// code section (again, performing full validation).
+
+[[nodiscard]] bool DecodeModuleEnvironment(Decoder& d, ModuleEnvironment* env);
+
+[[nodiscard]] bool ValidateFunctionBody(const ModuleEnvironment& env,
+ uint32_t funcIndex, uint32_t bodySize,
+ Decoder& d);
+
+[[nodiscard]] bool DecodeModuleTail(Decoder& d, ModuleEnvironment* env);
+
+void ConvertMemoryPagesToBytes(Limits* memory);
+
+// Validate an entire module, returning true if the module was validated
+// successfully. If Validate returns false:
+// - if *error is null, the caller should report out-of-memory
+// - otherwise, there was a legitimate error described by *error
+
+[[nodiscard]] bool Validate(JSContext* cx, const ShareableBytes& bytecode,
+ UniqueChars* error);
+
+} // namespace wasm
+} // namespace js
+
+#endif // namespace wasm_validate_h
diff --git a/js/src/wasm/cranelift/Cargo.toml b/js/src/wasm/cranelift/Cargo.toml
new file mode 100644
index 0000000000..663d2cd281
--- /dev/null
+++ b/js/src/wasm/cranelift/Cargo.toml
@@ -0,0 +1,37 @@
+[package]
+name = "baldrdash"
+version = "0.1.0"
+authors = ["The Spidermonkey and Cranelift developers"]
+edition = "2018"
+
+[lib]
+crate-type = ["rlib"]
+name = "baldrdash"
+
+[dependencies]
+# The build system redirects the versions of cranelift-codegen and
+# cranelift-wasm to pinned commits. If you want to update Cranelift in Gecko,
+# you should update the following $TOP_LEVEL/Cargo.toml file: look for the
+# revision (rev) hashes of both cranelift dependencies (codegen and wasm).
+cranelift-codegen = { version = "0.68.0", default-features = false }
+cranelift-wasm = { version = "0.68.0" }
+log = { version = "0.4.6", default-features = false, features = ["release_max_level_info"] }
+env_logger = "0.8"
+smallvec = "1.0"
+
+[build-dependencies]
+bindgen = {version = "0.56", default-features = false} # disable `logging` to reduce code size
+
+[features]
+default = ['cranelift-codegen/std']
+cranelift_x86 = ['cranelift-codegen/x64']
+cranelift_arm32 = ['cranelift-codegen/arm32']
+cranelift_arm64 = ['cranelift-codegen/arm64']
+
+# The "none" support is a lie (so far): Cranelift has to include support for
+# one ISA at the moment, so request to include support for a small one: riscv.
+cranelift_none = ['cranelift-codegen/riscv']
+
+# Uncomment this to enable perf support in release mode.
+#[profile.release]
+#debug = true
diff --git a/js/src/wasm/cranelift/baldrapi.h b/js/src/wasm/cranelift/baldrapi.h
new file mode 100644
index 0000000000..58a999785b
--- /dev/null
+++ b/js/src/wasm/cranelift/baldrapi.h
@@ -0,0 +1,283 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2018 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// This is an ADT-style C API to the WebAssembly per-function compilation state,
+// allowing Rust to access constant metadata and produce output.
+//
+// This file is input to Rust's bindgen, so as to create primitive APIs for the
+// Cranelift pipeline to access compilation metadata. The actual Rust API then
+// wraps these primitive APIs. See src/bindings/mod.rs.
+//
+// This file can be included in SpiderMonkey's C++ code, where all the prefixes
+// must be obeyed. The purpose of the prefixes is to avoid type confusion. See
+// js/src/wasm/WasmCraneliftCompile.cpp.
+
+#ifndef wasm_cranelift_baldrapi_h
+#define wasm_cranelift_baldrapi_h
+
+// DO NOT INCLUDE SPIDERMONKEY HEADER FILES INTO THIS FILE.
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stddef.h>
+
+#include "wasm/WasmConstants.h"
+
+// wasm/*.{cpp,h}, class and struct types that are opaque to us here
+namespace js {
+namespace wasm {
+// wasm/WasmGenerator.h
+struct FuncCompileInput;
+// wasm/WasmTypes.h
+class GlobalDesc;
+class FuncType;
+class TypeIdDesc;
+struct TableDesc;
+// wasm/WasmValidate.h
+struct ModuleEnvironment;
+} // namespace wasm
+} // namespace js
+
+// This struct contains all the information that can be computed once for the
+// entire process and then should never change. It contains a mix of CPU
+// feature detection flags, and static information the C++ compile has access
+// to, but which can't be automatically provided to Rust.
+
+struct CraneliftStaticEnvironment {
+ bool has_sse2;
+ bool has_sse3;
+ bool has_sse41;
+ bool has_sse42;
+ bool has_popcnt;
+ bool has_avx;
+ bool has_bmi1;
+ bool has_bmi2;
+ bool has_lzcnt;
+ bool platform_is_windows;
+ bool ref_types_enabled;
+ bool threads_enabled;
+ bool v128_enabled;
+ size_t static_memory_bound;
+ size_t memory_guard_size;
+ size_t memory_base_tls_offset;
+ size_t instance_tls_offset;
+ size_t interrupt_tls_offset;
+ size_t cx_tls_offset;
+ size_t realm_cx_offset;
+ size_t realm_tls_offset;
+ size_t realm_func_import_tls_offset;
+ size_t size_of_wasm_frame;
+
+ // Not bindgen'd because it's inlined.
+ inline CraneliftStaticEnvironment();
+};
+
+// This structure proxies the C++ ModuleEnvironment and the information it
+// contains.
+
+struct CraneliftModuleEnvironment {
+ // This is a pointer and not a reference to work-around a bug in bindgen.
+ const js::wasm::ModuleEnvironment* env;
+ uint32_t min_memory_length;
+
+ // Not bindgen'd because it's inlined.
+ explicit inline CraneliftModuleEnvironment(
+ const js::wasm::ModuleEnvironment& env);
+};
+
+struct BD_Stackmaps;
+
+// Data for a single wasm function to be compiled by Cranelift.
+// This information is all from the corresponding `js::wasm::FuncCompileInput`
+// struct, but formatted in a Rust-friendly way.
+
+struct CraneliftFuncCompileInput {
+ const uint8_t* bytecode;
+ size_t bytecode_size;
+ uint32_t index;
+ uint32_t offset_in_module;
+
+ // The stackmaps sink to use when compiling this function.
+ BD_Stackmaps* stackmaps;
+
+ // Not bindgen'd because it's inlined.
+ explicit inline CraneliftFuncCompileInput(const js::wasm::FuncCompileInput&);
+};
+
+// A single entry in all the metadata array provided after the compilation of a
+// single wasm function. The meaning of the field extra depends on the enum
+// value.
+//
+// XXX should we use a union for this instead? bindgen seems to be able to
+// handle them, with a lot of unsafe'ing.
+
+struct CraneliftMetadataEntry {
+ enum Which { DirectCall, IndirectCall, Trap, SymbolicAccess } which;
+ uint32_t code_offset;
+ uint32_t module_bytecode_offset;
+ size_t extra;
+};
+
+// The result of a single function compilation, containing the machine code
+// generated by Cranelift, as well as some useful metadata to generate the
+// prologue/epilogue etc.
+
+struct CraneliftCompiledFunc {
+ size_t num_metadata;
+ const CraneliftMetadataEntry* metadatas;
+
+ size_t frame_pushed;
+ bool contains_calls;
+
+ // The compiled code comprises machine code, relocatable jump tables, and
+ // copyable read-only data, concatenated without padding. The "...Size"
+ // members give the sizes of the individual sections. The code starts at
+ // offsets 0; the other offsets can be derived from the sizes.
+ const uint8_t* code;
+ size_t code_size;
+ size_t jumptables_size;
+ size_t rodata_size;
+ size_t total_size;
+
+ // Relocation information for instructions that reference into the jump tables
+ // and read-only data segments. The relocation information is
+ // machine-specific.
+ size_t num_rodata_relocs;
+ const uint32_t* rodata_relocs;
+};
+
+// Possible constant values for initializing globals.
+
+struct BD_ConstantValue {
+ js::wasm::TypeCode t;
+ union {
+ int32_t i32;
+ int64_t i64;
+ float f32;
+ double f64;
+ void* r;
+ uint8_t v128[16]; // Little-endian
+ } u;
+};
+
+struct BD_ValType {
+ uint32_t packed;
+};
+
+// A subset of the wasm SymbolicAddress enum. This is converted to wasm using
+// ToSymbolicAddress in WasmCraneliftCompile.
+
+enum class BD_SymbolicAddress : uint32_t {
+ MemoryGrow = 0,
+ MemorySize,
+ MemoryCopy,
+ MemoryCopyShared,
+ DataDrop,
+ MemoryFill,
+ MemoryFillShared,
+ MemoryInit,
+ TableSize,
+ TableGrow,
+ TableGet,
+ TableSet,
+ TableCopy,
+ TableFill,
+ TableInit,
+ ElemDrop,
+ RefFunc,
+ FloorF32,
+ FloorF64,
+ CeilF32,
+ CeilF64,
+ NearestF32,
+ NearestF64,
+ TruncF32,
+ TruncF64,
+ PreBarrier,
+ PostBarrier,
+ WaitI32,
+ WaitI64,
+ Wake,
+ Limit
+};
+
+extern "C" {
+js::wasm::TypeCode env_unpack(BD_ValType type);
+
+size_t env_num_tables(const CraneliftModuleEnvironment* env);
+size_t env_num_globals(const CraneliftModuleEnvironment* env);
+size_t env_num_types(const CraneliftModuleEnvironment* env);
+size_t env_num_funcs(const CraneliftModuleEnvironment* env);
+size_t env_num_elems(const CraneliftModuleEnvironment* env);
+size_t env_num_datas(const CraneliftModuleEnvironment* env);
+js::wasm::TypeCode env_elem_typecode(const CraneliftModuleEnvironment* env,
+ uint32_t index);
+bool env_is_func_valid_for_ref(const CraneliftModuleEnvironment* env,
+ uint32_t index);
+/// Returns the maximum memory size as an uint32, or UINT32_MAX if not defined.
+uint32_t env_max_memory(const CraneliftModuleEnvironment* env);
+
+bool env_uses_shared_memory(const CraneliftModuleEnvironment* env);
+bool env_has_memory(const CraneliftModuleEnvironment* env);
+const js::wasm::FuncType* env_type(const CraneliftModuleEnvironment* env,
+ size_t typeIndex);
+const js::wasm::FuncType* env_func_sig(const CraneliftModuleEnvironment* env,
+ size_t funcIndex);
+const js::wasm::TypeIdDesc* env_func_sig_id(
+ const CraneliftModuleEnvironment* env, size_t funcIndex);
+size_t env_func_sig_index(const CraneliftModuleEnvironment* env,
+ size_t funcIndex);
+size_t env_func_import_tls_offset(const CraneliftModuleEnvironment* env,
+ size_t funcIndex);
+bool env_func_is_import(const CraneliftModuleEnvironment* env,
+ size_t funcIndex);
+const js::wasm::FuncType* env_signature(const CraneliftModuleEnvironment* env,
+ size_t sigIndex);
+const js::wasm::TypeIdDesc* env_signature_id(
+ const CraneliftModuleEnvironment* env, size_t sigIndex);
+const js::wasm::TableDesc* env_table(const CraneliftModuleEnvironment* env,
+ size_t tableIndex);
+const js::wasm::GlobalDesc* env_global(const CraneliftModuleEnvironment* env,
+ size_t globalIndex);
+
+bool global_isConstant(const js::wasm::GlobalDesc*);
+bool global_isIndirect(const js::wasm::GlobalDesc*);
+BD_ConstantValue global_constantValue(const js::wasm::GlobalDesc*);
+js::wasm::TypeCode global_type(const js::wasm::GlobalDesc*);
+size_t global_tlsOffset(const js::wasm::GlobalDesc*);
+
+size_t table_tlsOffset(const js::wasm::TableDesc*);
+uint32_t table_initialLimit(const js::wasm::TableDesc*);
+// Returns the maximum limit as an uint32, or UINT32_MAX if not defined.
+uint32_t table_maximumLimit(const js::wasm::TableDesc*);
+js::wasm::TypeCode table_elementTypeCode(const js::wasm::TableDesc*);
+
+size_t funcType_numArgs(const js::wasm::FuncType*);
+const BD_ValType* funcType_args(const js::wasm::FuncType*);
+size_t funcType_numResults(const js::wasm::FuncType*);
+const BD_ValType* funcType_results(const js::wasm::FuncType*);
+
+js::wasm::TypeIdDescKind funcType_idKind(const js::wasm::TypeIdDesc*);
+size_t funcType_idImmediate(const js::wasm::TypeIdDesc*);
+size_t funcType_idTlsOffset(const js::wasm::TypeIdDesc*);
+
+void stackmaps_add(BD_Stackmaps* sink, const uint32_t* bitMap,
+ size_t mappedWords, size_t argsSize, size_t codeOffset);
+
+} // extern "C"
+
+#endif // wasm_cranelift_baldrapi_h
diff --git a/js/src/wasm/cranelift/build.rs b/js/src/wasm/cranelift/build.rs
new file mode 100644
index 0000000000..3c963b373c
--- /dev/null
+++ b/js/src/wasm/cranelift/build.rs
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2018 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Build script for the Baldr <-> Cranelift bindings.
+//!
+//! This file is executed by cargo when this crate is built. It generates the
+//! `$OUT_DIR/bindings.rs` file which is then included by `src/bindings/low_level.rs`.
+
+extern crate bindgen;
+
+use std::env;
+use std::fs::File;
+use std::io::prelude::*;
+use std::path::PathBuf;
+
+fn main() {
+ // Tell Cargo to regenerate the bindings if the header file changes.
+ println!("cargo:rerun-if-changed=baldrapi.h");
+
+ let mut generator = bindgen::builder()
+ .parse_callbacks(Box::new(bindgen::CargoCallbacks))
+ .disable_name_namespacing()
+ .size_t_is_usize(true)
+ // We whitelist the Baldr C functions and get the associated types for free.
+ .whitelist_function("env_.*")
+ .whitelist_function("global_.*")
+ .whitelist_function("table_.*")
+ .whitelist_function("funcType_.*")
+ .whitelist_function("stackmaps_.*")
+ .whitelist_type("Cranelift.*")
+ // The enum classes defined in baldrapi.h and WasmBinaryConstants are all Rust-safe.
+ .rustified_enum("BD_.*|Trap|TypeCode|TypeIdDescKind")
+ .whitelist_type("BD_.*|Trap|TypeCode|TypeIdDescKind")
+ .header("baldrapi.h")
+ .clang_args(&[
+ "-x",
+ "c++",
+ "-std=gnu++14",
+ "-fno-sized-deallocation",
+ "-fno-aligned-new",
+ "-DRUST_BINDGEN",
+ ])
+ .clang_arg("-I../..");
+
+ match env::var_os("MOZ_TOPOBJDIR") {
+ Some(path) => {
+ let path = PathBuf::from(path).join("js/src/rust/extra-bindgen-flags");
+
+ let mut extra_flags = String::new();
+ File::open(&path)
+ .expect("Failed to open extra-bindgen-flags file")
+ .read_to_string(&mut extra_flags)
+ .expect("Failed to read extra-bindgen-flags file");
+
+ let display_path = path.to_str().expect("path is utf8 encoded");
+ println!("cargo:rerun-if-changed={}", display_path);
+
+ let extra_flags: Vec<String> = extra_flags
+ .split_whitespace()
+ .map(|s| s.to_owned())
+ .collect();
+ for flag in extra_flags {
+ generator = generator.clang_arg(flag);
+ }
+ }
+ None => {
+ println!("cargo:warning=MOZ_TOPOBJDIR should be set by default, otherwise the build is not guaranted to finish.");
+ }
+ }
+
+ let command_line_opts = generator.command_line_flags();
+
+ // In case of error, bindgen prints to stderr, and the yielded error is the empty type ().
+ let bindings = generator.generate().unwrap_or_else(|_err| {
+ panic!(
+ r#"Unable to generate baldrapi.h bindings:
+- flags: {}
+"#,
+ command_line_opts.join(" "),
+ );
+ });
+
+ // Write the bindings to the $OUT_DIR/bindings.rs file.
+ let out_path = PathBuf::from(env::var("OUT_DIR").unwrap());
+ bindings
+ .write_to_file(out_path.join("bindings.rs"))
+ .expect("Couldn't write bindings!");
+}
diff --git a/js/src/wasm/cranelift/clifapi.h b/js/src/wasm/cranelift/clifapi.h
new file mode 100644
index 0000000000..f792ecc104
--- /dev/null
+++ b/js/src/wasm/cranelift/clifapi.h
@@ -0,0 +1,77 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2018 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Interface for calling from C++ into Cranelift.
+//
+// The functions declared here are implemented in src/lib.rs
+
+#ifndef wasm_cranelift_clifapi_h
+#define wasm_cranelift_clifapi_h
+
+#include "wasm/cranelift/baldrapi.h"
+
+// A handle to a Cranelift compiler context.
+// This type is always opaque on the C++ side.
+struct CraneliftCompiler;
+
+extern "C" {
+
+// Returns true if the platform is supported by Cranelift.
+bool cranelift_supports_platform();
+
+// A static initializer, that must be called only once.
+void cranelift_initialize();
+
+// Allocate a Cranelift compiler for compiling functions in `env`.
+//
+// The compiler can be used for compiling multiple functions, but it must only
+// be used from a single thread.
+//
+// Returns NULL is a Cranelift compiler could not be created for the current CPU
+// architecture.
+//
+// The memory associated with the compiler must be freed by calling
+// `cranelift_compiler_destroy`.
+CraneliftCompiler* cranelift_compiler_create(
+ const CraneliftStaticEnvironment* staticEnv,
+ const CraneliftModuleEnvironment* env);
+
+// Destroy a Cranelift compiler object.
+//
+// This releases all resources used by the compiler.
+void cranelift_compiler_destroy(CraneliftCompiler* compiler);
+
+// Compile a single function with `compiler`.
+//
+// The function described by `data` is compiled.
+//
+// Returns true on success.
+//
+// If this function returns false, an error message is returned in `*error`.
+// This string must be freed by `cranelift_compiler_free_error()` (it is on the
+// Rust heap so must not be freed by `free()` or similar).
+bool cranelift_compile_function(CraneliftCompiler* compiler,
+ const CraneliftFuncCompileInput* data,
+ CraneliftCompiledFunc* result, char** error);
+
+// Free an error string returned by `cranelift_compile_function()`.
+void cranelift_compiler_free_error(char* error);
+
+} // extern "C"
+
+#endif // wasm_cranelift_clifapi_h
diff --git a/js/src/wasm/cranelift/rustfmt.toml b/js/src/wasm/cranelift/rustfmt.toml
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/js/src/wasm/cranelift/rustfmt.toml
diff --git a/js/src/wasm/cranelift/src/bindings/low_level.rs b/js/src/wasm/cranelift/src/bindings/low_level.rs
new file mode 100644
index 0000000000..b3e066e0b6
--- /dev/null
+++ b/js/src/wasm/cranelift/src/bindings/low_level.rs
@@ -0,0 +1,27 @@
+/* Copyright 2018 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! This module exports the bindings generated by bindgen form the baldrapi.h file.
+//!
+//! The Baldr API consists of a set of C functions and some associated types.
+
+#![allow(non_upper_case_globals)]
+#![allow(non_camel_case_types)]
+#![allow(non_snake_case)]
+// We need to allow dead code because the Rustc compiler complains about variants never being
+// constructed in TypeCode, which is true because these values come from C++.
+#![allow(dead_code)]
+
+include!(concat!(env!("OUT_DIR"), "/bindings.rs"));
diff --git a/js/src/wasm/cranelift/src/bindings/mod.rs b/js/src/wasm/cranelift/src/bindings/mod.rs
new file mode 100644
index 0000000000..88ba07539c
--- /dev/null
+++ b/js/src/wasm/cranelift/src/bindings/mod.rs
@@ -0,0 +1,528 @@
+/* Copyright 2018 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Safe wrappers to the low-level ABI. This re-exports all types in low_level but none of the
+// functions.
+
+use std::{mem, slice};
+
+use cranelift_codegen::binemit::CodeOffset;
+use cranelift_codegen::cursor::FuncCursor;
+use cranelift_codegen::entity::EntityRef;
+use cranelift_codegen::ir::immediates::{Ieee32, Ieee64};
+use cranelift_codegen::ir::{self, InstBuilder, SourceLoc};
+use cranelift_codegen::isa;
+
+use cranelift_wasm::{
+ wasmparser, FuncIndex, GlobalIndex, SignatureIndex, TableIndex, TypeIndex, WasmResult,
+};
+
+use crate::compile;
+use crate::utils::BasicError;
+use crate::wasm2clif::REF_TYPE;
+
+use self::low_level::*;
+
+pub use self::low_level::BD_SymbolicAddress as SymbolicAddress;
+pub use self::low_level::CraneliftCompiledFunc as CompiledFunc;
+pub use self::low_level::CraneliftFuncCompileInput as FuncCompileInput;
+pub use self::low_level::CraneliftMetadataEntry as MetadataEntry;
+pub use self::low_level::CraneliftModuleEnvironment as LowLevelModuleEnvironment;
+pub use self::low_level::CraneliftStaticEnvironment as StaticEnvironment;
+pub use self::low_level::Trap;
+pub use self::low_level::TypeIdDescKind;
+
+mod low_level;
+
+/// Converts a `TypeCode` into the equivalent Cranelift type, if it's a known type, or an error
+/// otherwise.
+#[inline]
+fn typecode_to_type(type_code: TypeCode) -> WasmResult<Option<ir::Type>> {
+ match type_code {
+ TypeCode::I32 => Ok(Some(ir::types::I32)),
+ TypeCode::I64 => Ok(Some(ir::types::I64)),
+ TypeCode::F32 => Ok(Some(ir::types::F32)),
+ TypeCode::F64 => Ok(Some(ir::types::F64)),
+ TypeCode::V128 => Ok(Some(ir::types::I8X16)),
+ TypeCode::FuncRef => Ok(Some(REF_TYPE)),
+ TypeCode::ExternRef => Ok(Some(REF_TYPE)),
+ TypeCode::BlockVoid => Ok(None),
+ _ => Err(BasicError::new(format!("unknown type code: {:?}", type_code)).into()),
+ }
+}
+
+/// Convert a non-void `TypeCode` into the equivalent Cranelift type.
+#[inline]
+pub(crate) fn typecode_to_nonvoid_type(type_code: TypeCode) -> WasmResult<ir::Type> {
+ Ok(typecode_to_type(type_code)?.expect("unexpected void type"))
+}
+
+/// Convert a u32 into a `BD_SymbolicAddress`.
+impl From<u32> for SymbolicAddress {
+ fn from(x: u32) -> SymbolicAddress {
+ assert!(x < SymbolicAddress::Limit as u32);
+ unsafe { mem::transmute(x) }
+ }
+}
+
+#[derive(Clone, Copy)]
+pub struct GlobalDesc(*const low_level::GlobalDesc);
+
+impl GlobalDesc {
+ pub fn value_type(self) -> WasmResult<ir::Type> {
+ let type_code = unsafe { low_level::global_type(self.0) };
+ typecode_to_nonvoid_type(type_code)
+ }
+
+ pub fn is_constant(self) -> bool {
+ unsafe { low_level::global_isConstant(self.0) }
+ }
+
+ pub fn is_indirect(self) -> bool {
+ unsafe { low_level::global_isIndirect(self.0) }
+ }
+
+ /// Insert an instruction at `pos` that materializes the constant value.
+ pub fn emit_constant(self, pos: &mut FuncCursor) -> WasmResult<ir::Value> {
+ unsafe {
+ let v = low_level::global_constantValue(self.0);
+ match v.t {
+ TypeCode::I32 => Ok(pos.ins().iconst(ir::types::I32, i64::from(v.u.i32_))),
+ TypeCode::I64 => Ok(pos.ins().iconst(ir::types::I64, v.u.i64_)),
+ TypeCode::F32 => Ok(pos.ins().f32const(Ieee32::with_bits(v.u.i32_ as u32))),
+ TypeCode::F64 => Ok(pos.ins().f64const(Ieee64::with_bits(v.u.i64_ as u64))),
+ TypeCode::V128 => {
+ let c = pos
+ .func
+ .dfg
+ .constants
+ .insert(ir::ConstantData::from(&v.u.v128 as &[u8]));
+ Ok(pos.ins().vconst(ir::types::I8X16, c))
+ }
+ TypeCode::NullableRef | TypeCode::ExternRef | TypeCode::FuncRef => {
+ assert!(v.u.r as usize == 0);
+ Ok(pos.ins().null(REF_TYPE))
+ }
+ _ => Err(BasicError::new(format!("unexpected type: {}", v.t as u64)).into()),
+ }
+ }
+ }
+
+ /// Get the offset from the `WasmTlsReg` to the memory representing this global variable.
+ pub fn tls_offset(self) -> usize {
+ unsafe { low_level::global_tlsOffset(self.0) }
+ }
+
+ pub fn content_type(self) -> wasmparser::Type {
+ typecode_to_parser_type(unsafe { low_level::global_type(self.0) })
+ }
+}
+
+#[derive(Clone, Copy)]
+pub struct TableDesc(*const low_level::TableDesc);
+
+impl TableDesc {
+ /// Get the offset from the `WasmTlsReg` to the `wasm::TableTls` representing this table.
+ pub fn tls_offset(self) -> usize {
+ unsafe { low_level::table_tlsOffset(self.0) }
+ }
+
+ pub fn element_type(self) -> wasmparser::Type {
+ typecode_to_parser_type(unsafe { low_level::table_elementTypeCode(self.0) })
+ }
+
+ pub fn resizable_limits(self) -> wasmparser::ResizableLimits {
+ let initial = unsafe { low_level::table_initialLimit(self.0) };
+ let maximum = unsafe { low_level::table_initialLimit(self.0) };
+ let maximum = if maximum == u32::max_value() {
+ None
+ } else {
+ Some(maximum)
+ };
+ wasmparser::ResizableLimits { initial, maximum }
+ }
+}
+
+#[derive(Clone)]
+pub struct FuncType {
+ ptr: *const low_level::FuncType,
+ args: Vec<TypeCode>,
+ results: Vec<TypeCode>,
+}
+
+impl FuncType {
+ /// Creates a new FuncType, caching all the values it requires.
+ pub(crate) fn new(ptr: *const low_level::FuncType) -> Self {
+ let num_args = unsafe { low_level::funcType_numArgs(ptr) };
+ let args = unsafe { slice::from_raw_parts(low_level::funcType_args(ptr), num_args) };
+ let args = args
+ .iter()
+ .map(|val_type| unsafe { low_level::env_unpack(*val_type) })
+ .collect();
+
+ let num_results = unsafe { low_level::funcType_numResults(ptr) };
+ let results =
+ unsafe { slice::from_raw_parts(low_level::funcType_results(ptr), num_results) };
+ let results = results
+ .iter()
+ .map(|val_type| unsafe { low_level::env_unpack(*val_type) })
+ .collect();
+
+ Self { ptr, args, results }
+ }
+
+ pub(crate) fn args(&self) -> &[TypeCode] {
+ &self.args
+ }
+ pub(crate) fn results(&self) -> &[TypeCode] {
+ &self.results
+ }
+}
+
+#[derive(Clone)]
+pub struct TypeIdDesc {
+ ptr: *const low_level::TypeIdDesc,
+}
+
+impl TypeIdDesc {
+ pub(crate) fn new(ptr: *const low_level::TypeIdDesc) -> Self {
+ Self { ptr }
+ }
+
+ pub(crate) fn id_kind(&self) -> TypeIdDescKind {
+ unsafe { low_level::funcType_idKind(self.ptr) }
+ }
+ pub(crate) fn id_immediate(&self) -> usize {
+ unsafe { low_level::funcType_idImmediate(self.ptr) }
+ }
+ pub(crate) fn id_tls_offset(&self) -> usize {
+ unsafe { low_level::funcType_idTlsOffset(self.ptr) }
+ }
+}
+
+fn typecode_to_parser_type(ty: TypeCode) -> wasmparser::Type {
+ match ty {
+ TypeCode::I32 => wasmparser::Type::I32,
+ TypeCode::I64 => wasmparser::Type::I64,
+ TypeCode::F32 => wasmparser::Type::F32,
+ TypeCode::F64 => wasmparser::Type::F64,
+ TypeCode::V128 => wasmparser::Type::V128,
+ TypeCode::FuncRef => wasmparser::Type::FuncRef,
+ TypeCode::ExternRef => wasmparser::Type::ExternRef,
+ TypeCode::BlockVoid => wasmparser::Type::EmptyBlockType,
+ _ => panic!("unknown type code: {:?}", ty),
+ }
+}
+
+impl wasmparser::WasmFuncType for FuncType {
+ fn len_inputs(&self) -> usize {
+ self.args.len()
+ }
+ fn len_outputs(&self) -> usize {
+ self.results.len()
+ }
+ fn input_at(&self, at: u32) -> Option<wasmparser::Type> {
+ self.args
+ .get(at as usize)
+ .map(|ty| typecode_to_parser_type(*ty))
+ }
+ fn output_at(&self, at: u32) -> Option<wasmparser::Type> {
+ self.results
+ .get(at as usize)
+ .map(|ty| typecode_to_parser_type(*ty))
+ }
+}
+
+/// Thin wrapper for the CraneliftModuleEnvironment structure.
+
+pub struct ModuleEnvironment<'a> {
+ env: &'a CraneliftModuleEnvironment,
+ /// The `WasmModuleResources` trait requires us to return a borrow to a `FuncType`, so we
+ /// eagerly construct these.
+ types: Vec<FuncType>,
+ /// Similar to `types`, we need to have a persistently-stored `FuncType` to return. The
+ /// types in `func_sigs` are a subset of those in `types`, but we don't want to have to
+ /// maintain an index from function to signature ID, so we store these directly.
+ func_sigs: Vec<FuncType>,
+}
+
+impl<'a> ModuleEnvironment<'a> {
+ pub(crate) fn new(env: &'a CraneliftModuleEnvironment) -> Self {
+ let num_types = unsafe { low_level::env_num_types(env) };
+ let mut types = Vec::with_capacity(num_types);
+ for i in 0..num_types {
+ let t = FuncType::new(unsafe { low_level::env_signature(env, i) });
+ types.push(t);
+ }
+ let num_func_sigs = unsafe { low_level::env_num_funcs(env) };
+ let mut func_sigs = Vec::with_capacity(num_func_sigs);
+ for i in 0..num_func_sigs {
+ let t = FuncType::new(unsafe { low_level::env_func_sig(env, i) });
+ func_sigs.push(t);
+ }
+ Self {
+ env,
+ types,
+ func_sigs,
+ }
+ }
+ pub fn has_memory(&self) -> bool {
+ unsafe { low_level::env_has_memory(self.env) }
+ }
+ pub fn uses_shared_memory(&self) -> bool {
+ unsafe { low_level::env_uses_shared_memory(self.env) }
+ }
+ pub fn num_tables(&self) -> usize {
+ unsafe { low_level::env_num_tables(self.env) }
+ }
+ pub fn num_types(&self) -> usize {
+ self.types.len()
+ }
+ pub fn type_(&self, index: usize) -> FuncType {
+ self.types[index].clone()
+ }
+ pub fn num_func_sigs(&self) -> usize {
+ self.func_sigs.len()
+ }
+ pub fn func_sig(&self, func_index: FuncIndex) -> FuncType {
+ self.func_sigs[func_index.index()].clone()
+ }
+ pub fn func_sig_index(&self, func_index: FuncIndex) -> SignatureIndex {
+ SignatureIndex::new(unsafe { low_level::env_func_sig_index(self.env, func_index.index()) })
+ }
+ pub fn func_import_tls_offset(&self, func_index: FuncIndex) -> usize {
+ unsafe { low_level::env_func_import_tls_offset(self.env, func_index.index()) }
+ }
+ pub fn func_is_import(&self, func_index: FuncIndex) -> bool {
+ unsafe { low_level::env_func_is_import(self.env, func_index.index()) }
+ }
+ pub fn signature(&self, type_index: TypeIndex) -> FuncType {
+ // This function takes `TypeIndex` rather than the `SignatureIndex` that one
+ // might expect. Why? https://github.com/bytecodealliance/wasmtime/pull/2115
+ // introduces two new types to the type section as viewed by Cranelift. This is
+ // in support of the module linking proposal. So now a type index (for
+ // Cranelift) can refer to a func, module, or instance type. When the type index
+ // refers to a func type, it can also be used to get the signature index which
+ // can be used to get the ir::Signature for that func type. For us, Cranelift is
+ // only used with function types so we can just assume type index and signature
+ // index are 1:1. If and when we come to support the module linking proposal,
+ // this will need to be revisited.
+ FuncType::new(unsafe { low_level::env_signature(self.env, type_index.index()) })
+ }
+ pub fn signature_id(&self, type_index: TypeIndex) -> TypeIdDesc {
+ TypeIdDesc::new(unsafe { low_level::env_signature_id(self.env, type_index.index()) })
+ }
+ pub fn table(&self, table_index: TableIndex) -> TableDesc {
+ TableDesc(unsafe { low_level::env_table(self.env, table_index.index()) })
+ }
+ pub fn global(&self, global_index: GlobalIndex) -> GlobalDesc {
+ GlobalDesc(unsafe { low_level::env_global(self.env, global_index.index()) })
+ }
+ pub fn min_memory_length(&self) -> u32 {
+ self.env.min_memory_length
+ }
+ pub fn max_memory_length(&self) -> Option<u32> {
+ let max = unsafe { low_level::env_max_memory(self.env) };
+ if max == u32::max_value() {
+ None
+ } else {
+ Some(max)
+ }
+ }
+}
+
+impl<'module> wasmparser::WasmModuleResources for ModuleEnvironment<'module> {
+ type FuncType = FuncType;
+ fn table_at(&self, at: u32) -> Option<wasmparser::TableType> {
+ if (at as usize) < self.num_tables() {
+ let desc = TableDesc(unsafe { low_level::env_table(self.env, at as usize) });
+ let element_type = desc.element_type();
+ let limits = desc.resizable_limits();
+ Some(wasmparser::TableType {
+ element_type,
+ limits,
+ })
+ } else {
+ None
+ }
+ }
+ fn memory_at(&self, at: u32) -> Option<wasmparser::MemoryType> {
+ if at == 0 {
+ let has_memory = self.has_memory();
+ if has_memory {
+ let shared = self.uses_shared_memory();
+ let initial = self.min_memory_length() as u32;
+ let maximum = self.max_memory_length();
+ Some(wasmparser::MemoryType::M32 {
+ limits: wasmparser::ResizableLimits { initial, maximum },
+ shared,
+ })
+ } else {
+ None
+ }
+ } else {
+ None
+ }
+ }
+ fn global_at(&self, at: u32) -> Option<wasmparser::GlobalType> {
+ let num_globals = unsafe { low_level::env_num_globals(self.env) };
+ if (at as usize) < num_globals {
+ let desc = self.global(GlobalIndex::new(at as usize));
+ let mutable = !desc.is_constant();
+ let content_type = desc.content_type();
+ Some(wasmparser::GlobalType {
+ mutable,
+ content_type,
+ })
+ } else {
+ None
+ }
+ }
+ fn func_type_at(&self, type_idx: u32) -> Option<&Self::FuncType> {
+ if (type_idx as usize) < self.types.len() {
+ Some(&self.types[type_idx as usize])
+ } else {
+ None
+ }
+ }
+ fn type_of_function(&self, func_idx: u32) -> Option<&Self::FuncType> {
+ if (func_idx as usize) < self.func_sigs.len() {
+ Some(&self.func_sigs[func_idx as usize])
+ } else {
+ None
+ }
+ }
+ fn element_type_at(&self, at: u32) -> Option<wasmparser::Type> {
+ let num_elems = self.element_count();
+ if at < num_elems {
+ let elem_type = unsafe { low_level::env_elem_typecode(self.env, at) };
+ Some(typecode_to_parser_type(elem_type))
+ } else {
+ None
+ }
+ }
+ fn element_count(&self) -> u32 {
+ unsafe { low_level::env_num_elems(self.env) as u32 }
+ }
+ fn data_count(&self) -> u32 {
+ unsafe { low_level::env_num_datas(self.env) as u32 }
+ }
+ fn is_function_referenced(&self, idx: u32) -> bool {
+ unsafe { low_level::env_is_func_valid_for_ref(self.env, idx) }
+ }
+}
+
+/// Extra methods for some C++ wrappers.
+
+impl FuncCompileInput {
+ pub fn bytecode(&self) -> &[u8] {
+ unsafe { slice::from_raw_parts(self.bytecode, self.bytecode_size) }
+ }
+
+ pub fn stackmaps(&self) -> Stackmaps {
+ Stackmaps(self.stackmaps)
+ }
+}
+
+impl CompiledFunc {
+ pub fn reset(&mut self, compiled_func: &compile::CompiledFunc) {
+ self.num_metadata = compiled_func.metadata.len();
+ self.metadatas = compiled_func.metadata.as_ptr();
+
+ self.frame_pushed = compiled_func.frame_pushed as usize;
+ self.contains_calls = compiled_func.contains_calls;
+
+ self.code = compiled_func.code_buffer.as_ptr();
+ self.code_size = compiled_func.code_size as usize;
+ self.jumptables_size = compiled_func.jumptables_size as usize;
+ self.rodata_size = compiled_func.rodata_size as usize;
+ self.total_size = compiled_func.code_buffer.len();
+
+ self.num_rodata_relocs = compiled_func.rodata_relocs.len();
+ self.rodata_relocs = compiled_func.rodata_relocs.as_ptr();
+ }
+}
+
+impl MetadataEntry {
+ pub fn direct_call(code_offset: CodeOffset, srcloc: SourceLoc, func_index: FuncIndex) -> Self {
+ Self {
+ which: CraneliftMetadataEntry_Which_DirectCall,
+ code_offset,
+ module_bytecode_offset: srcloc.bits(),
+ extra: func_index.index(),
+ }
+ }
+ pub fn indirect_call(ret_addr: CodeOffset, srcloc: SourceLoc) -> Self {
+ Self {
+ which: CraneliftMetadataEntry_Which_IndirectCall,
+ code_offset: ret_addr,
+ module_bytecode_offset: srcloc.bits(),
+ extra: 0,
+ }
+ }
+ pub fn trap(code_offset: CodeOffset, srcloc: SourceLoc, which: Trap) -> Self {
+ Self {
+ which: CraneliftMetadataEntry_Which_Trap,
+ code_offset,
+ module_bytecode_offset: srcloc.bits(),
+ extra: which as usize,
+ }
+ }
+ pub fn symbolic_access(
+ code_offset: CodeOffset,
+ srcloc: SourceLoc,
+ sym: SymbolicAddress,
+ ) -> Self {
+ Self {
+ which: CraneliftMetadataEntry_Which_SymbolicAccess,
+ code_offset,
+ module_bytecode_offset: srcloc.bits(),
+ extra: sym as usize,
+ }
+ }
+}
+
+impl StaticEnvironment {
+ /// Returns the default calling convention on this machine.
+ pub fn call_conv(&self) -> isa::CallConv {
+ if self.platform_is_windows {
+ unimplemented!("No FastCall variant of Baldrdash2020")
+ } else {
+ isa::CallConv::Baldrdash2020
+ }
+ }
+}
+
+pub struct Stackmaps(*mut self::low_level::BD_Stackmaps);
+
+impl Stackmaps {
+ pub fn add_stackmap(
+ &mut self,
+ inbound_args_size: u32,
+ offset: CodeOffset,
+ map: &cranelift_codegen::binemit::StackMap,
+ ) {
+ unsafe {
+ let bitslice = map.as_slice();
+ low_level::stackmaps_add(
+ self.0,
+ std::mem::transmute(bitslice.as_ptr()),
+ map.mapped_words() as usize,
+ inbound_args_size as usize,
+ offset as usize,
+ );
+ }
+ }
+}
diff --git a/js/src/wasm/cranelift/src/compile.rs b/js/src/wasm/cranelift/src/compile.rs
new file mode 100644
index 0000000000..7b9b0dff9a
--- /dev/null
+++ b/js/src/wasm/cranelift/src/compile.rs
@@ -0,0 +1,538 @@
+/* Copyright 2018 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! Cranelift WebAssembly function compiler.
+//!
+//! This module defines the `compile()` function which uses Cranelift to compile a single
+//! WebAssembly function.
+
+use log::{debug, info};
+use std::fmt;
+use std::mem;
+use std::rc::Rc;
+
+use cranelift_codegen::binemit::{
+ Addend, CodeInfo, CodeOffset, NullStackMapSink, Reloc, RelocSink, TrapSink,
+};
+use cranelift_codegen::entity::EntityRef;
+use cranelift_codegen::ir::{
+ self, constant::ConstantOffset, stackslot::StackSize, ExternalName, JumpTable, SourceLoc,
+ TrapCode,
+};
+use cranelift_codegen::isa::TargetIsa;
+use cranelift_codegen::machinst::MachStackMap;
+use cranelift_codegen::CodegenResult;
+use cranelift_codegen::Context;
+use cranelift_wasm::wasmparser::{FuncValidator, WasmFeatures};
+use cranelift_wasm::{FuncIndex, FuncTranslator, WasmResult};
+
+use crate::bindings;
+use crate::isa::make_isa;
+use crate::utils::DashResult;
+use crate::wasm2clif::{init_sig, TransEnv, TRAP_THROW_REPORTED};
+
+// Namespace for user-defined functions.
+const USER_FUNCTION_NAMESPACE: u32 = 0;
+
+// Namespace for builtins functions that are translated to symbolic accesses in Spidermonkey.
+const SYMBOLIC_FUNCTION_NAMESPACE: u32 = 1;
+
+/// The result of a function's compilation: code + metadata.
+pub struct CompiledFunc {
+ pub frame_pushed: StackSize,
+ pub contains_calls: bool,
+ pub metadata: Vec<bindings::MetadataEntry>,
+ // rodata_relocs is Vec<CodeOffset>, but u32 is C++-friendlier
+ pub rodata_relocs: Vec<u32>,
+ // TODO(bbouvier) should just be a pointer into the masm buffer
+ pub code_buffer: Vec<u8>,
+ pub code_size: CodeOffset,
+ pub jumptables_size: CodeOffset,
+ pub rodata_size: CodeOffset,
+}
+
+impl CompiledFunc {
+ fn new() -> Self {
+ Self {
+ frame_pushed: 0,
+ contains_calls: false,
+ metadata: vec![],
+ rodata_relocs: vec![],
+ code_buffer: vec![],
+ code_size: 0,
+ jumptables_size: 0,
+ rodata_size: 0,
+ }
+ }
+
+ fn clear(&mut self) {
+ self.frame_pushed = 0;
+ self.contains_calls = false;
+ self.metadata.clear();
+ self.rodata_relocs.clear();
+ self.code_buffer.clear();
+ self.code_size = 0;
+ self.jumptables_size = 0;
+ self.rodata_size = 0;
+ }
+}
+
+/// A batch compiler holds on to data structures that can be recycled for multiple function
+/// compilations.
+pub struct BatchCompiler<'static_env, 'module_env> {
+ // Attributes that are constant accross multiple compilations.
+ static_env: &'static_env bindings::StaticEnvironment,
+
+ module_env: Rc<bindings::ModuleEnvironment<'module_env>>,
+
+ isa: Box<dyn TargetIsa>,
+
+ // Stateless attributes.
+ func_translator: FuncTranslator,
+
+ // Mutable attributes.
+ /// Cranelift overall context.
+ context: Context,
+
+ /// Temporary storage for trap relocations before they're moved back to the CompiledFunc.
+ trap_relocs: Traps,
+
+ /// The translation from wasm to clif environment.
+ trans_env: TransEnv<'static_env, 'module_env>,
+
+ /// Results of the current compilation.
+ pub current_func: CompiledFunc,
+}
+
+impl<'static_env, 'module_env> BatchCompiler<'static_env, 'module_env> {
+ pub fn new(
+ static_env: &'static_env bindings::StaticEnvironment,
+ module_env: bindings::ModuleEnvironment<'module_env>,
+ ) -> DashResult<Self> {
+ let isa = make_isa(static_env)?;
+ let module_env = Rc::new(module_env);
+ let trans_env = TransEnv::new(&*isa, module_env.clone(), static_env);
+ Ok(BatchCompiler {
+ static_env,
+ module_env,
+ isa,
+ func_translator: FuncTranslator::new(),
+ context: Context::new(),
+ trap_relocs: Traps::new(),
+ trans_env,
+ current_func: CompiledFunc::new(),
+ })
+ }
+
+ /// Clears internal data structures.
+ pub fn clear(&mut self) {
+ self.context.clear();
+ self.trap_relocs.clear();
+ self.trans_env.clear();
+ self.current_func.clear();
+ }
+
+ pub fn compile(&mut self, stackmaps: bindings::Stackmaps) -> CodegenResult<()> {
+ debug!("=== BatchCompiler::compile: BEGIN ==============================");
+ let info = self.context.compile(&*self.isa)?;
+ let res = self.binemit(info, stackmaps);
+ debug!("=== BatchCompiler::compile: END ================================");
+ debug!("");
+ res
+ }
+
+ /// Translate the WebAssembly code to Cranelift IR.
+ pub fn translate_wasm(&mut self, func: &bindings::FuncCompileInput) -> WasmResult<()> {
+ // Set up the signature before translating the WebAssembly byte code.
+ // The translator refers to it.
+ let index = FuncIndex::new(func.index as usize);
+
+ self.context.func.signature =
+ init_sig(&*self.module_env, self.static_env.call_conv(), index)?;
+ self.context.func.name = wasm_function_name(index);
+
+ let features = WasmFeatures {
+ reference_types: self.static_env.ref_types_enabled,
+ module_linking: false,
+ simd: self.static_env.v128_enabled,
+ multi_value: true,
+ threads: self.static_env.threads_enabled,
+ tail_call: false,
+ bulk_memory: true,
+ deterministic_only: true,
+ memory64: false,
+ multi_memory: false,
+ };
+ let sig_index = self.module_env.func_sig_index(index);
+ let mut validator =
+ FuncValidator::new(sig_index.index() as u32, 0, &*self.module_env, &features)?;
+
+ self.func_translator.translate(
+ &mut validator,
+ func.bytecode(),
+ func.offset_in_module as usize,
+ &mut self.context.func,
+ &mut self.trans_env,
+ )?;
+
+ info!("Translated wasm function {}.", func.index);
+ debug!("Translated wasm function IR: {}", self);
+ Ok(())
+ }
+
+ /// Emit binary machine code to `emitter`.
+ fn binemit(&mut self, info: CodeInfo, stackmaps: bindings::Stackmaps) -> CodegenResult<()> {
+ let total_size = info.total_size as usize;
+ let frame_pushed = self.frame_pushed();
+ let contains_calls = self.contains_calls();
+
+ info!(
+ "Emitting {} bytes, frame_pushed={}.",
+ total_size, frame_pushed
+ );
+
+ self.current_func.frame_pushed = frame_pushed;
+ self.current_func.contains_calls = contains_calls;
+
+ // TODO: If we can get a pointer into `size` pre-allocated bytes of memory, we wouldn't
+ // have to allocate and copy here.
+ // TODO(bbouvier) try to get this pointer from the C++ caller, with an unlikely callback to
+ // C++ if the remaining size is smaller than needed.
+ if self.current_func.code_buffer.len() < total_size {
+ let current_size = self.current_func.code_buffer.len();
+ // There's no way to do a proper uninitialized reserve, so first reserve and then
+ // unsafely set the final size.
+ self.current_func
+ .code_buffer
+ .reserve(total_size - current_size);
+ unsafe { self.current_func.code_buffer.set_len(total_size) };
+ }
+
+ {
+ let mut relocs = Relocations::new(
+ &mut self.current_func.metadata,
+ &mut self.current_func.rodata_relocs,
+ );
+
+ let code_buffer = &mut self.current_func.code_buffer;
+ unsafe {
+ self.context.emit_to_memory(
+ &*self.isa,
+ code_buffer.as_mut_ptr(),
+ &mut relocs,
+ &mut self.trap_relocs,
+ &mut NullStackMapSink {},
+ )
+ };
+
+ self.current_func
+ .metadata
+ .append(&mut self.trap_relocs.metadata);
+ }
+
+ if self.static_env.ref_types_enabled {
+ self.emit_stackmaps(stackmaps);
+ }
+
+ self.current_func.code_size = info.code_size;
+ self.current_func.jumptables_size = info.jumptables_size;
+ self.current_func.rodata_size = info.rodata_size;
+
+ Ok(())
+ }
+
+ /// Iterate over safepoint information contained in the returned `MachBufferFinalized`.
+ fn emit_stackmaps(&self, mut stackmaps: bindings::Stackmaps) {
+ let mach_buf = &self.context.mach_compile_result.as_ref().unwrap().buffer;
+ let mach_stackmaps = mach_buf.stack_maps();
+
+ for &MachStackMap {
+ offset_end,
+ ref stack_map,
+ ..
+ } in mach_stackmaps
+ {
+ debug!(
+ "Stack map at end-of-insn offset {}: {:?}",
+ offset_end, stack_map
+ );
+ stackmaps.add_stackmap(/* inbound_args_size = */ 0, offset_end, stack_map);
+ }
+ }
+
+ /// Compute the `framePushed` argument to pass to `GenerateFunctionPrologue`. This is the
+ /// number of frame bytes used by Cranelift, not counting the values pushed by the standard
+ /// prologue generated by `GenerateFunctionPrologue`.
+ fn frame_pushed(&self) -> StackSize {
+ // Cranelift computes the total stack frame size including the pushed return address,
+ // standard SM prologue pushes, and its own stack slots.
+ let total = self
+ .context
+ .mach_compile_result
+ .as_ref()
+ .expect("always use Mach backend")
+ .frame_size;
+
+ let sm_pushed = StackSize::from(self.isa.flags().baldrdash_prologue_words())
+ * mem::size_of::<usize>() as StackSize;
+
+ total
+ .checked_sub(sm_pushed)
+ .expect("SpiderMonkey prologue pushes not counted")
+ }
+
+ /// Determine whether the current function may contain calls.
+ fn contains_calls(&self) -> bool {
+ // Conservatively, just check to see if it contains any function
+ // signatures which could be called.
+ !self.context.func.dfg.signatures.is_empty()
+ }
+}
+
+impl<'static_env, 'module_env> fmt::Display for BatchCompiler<'static_env, 'module_env> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "{}", self.context.func.display(self.isa.as_ref()))
+ }
+}
+
+/// Create a Cranelift function name representing a WebAssembly function with `index`.
+pub fn wasm_function_name(func: FuncIndex) -> ExternalName {
+ ExternalName::User {
+ namespace: USER_FUNCTION_NAMESPACE,
+ index: func.index() as u32,
+ }
+}
+
+/// Create a Cranelift function name representing a builtin function.
+pub fn symbolic_function_name(sym: bindings::SymbolicAddress) -> ExternalName {
+ ExternalName::User {
+ namespace: SYMBOLIC_FUNCTION_NAMESPACE,
+ index: sym as u32,
+ }
+}
+
+struct Relocations<'a> {
+ metadata: &'a mut Vec<bindings::MetadataEntry>,
+ rodata_relocs: &'a mut Vec<CodeOffset>,
+}
+
+impl<'a> Relocations<'a> {
+ fn new(
+ metadata: &'a mut Vec<bindings::MetadataEntry>,
+ rodata_relocs: &'a mut Vec<CodeOffset>,
+ ) -> Self {
+ Self {
+ metadata,
+ rodata_relocs,
+ }
+ }
+}
+
+impl<'a> RelocSink for Relocations<'a> {
+ /// Add a relocation referencing an external symbol at the current offset.
+ fn reloc_external(
+ &mut self,
+ at: CodeOffset,
+ srcloc: SourceLoc,
+ reloc: Reloc,
+ name: &ExternalName,
+ _addend: Addend,
+ ) {
+ debug_assert!(!srcloc.is_default());
+
+ match *name {
+ ExternalName::User {
+ namespace: USER_FUNCTION_NAMESPACE,
+ index,
+ } => {
+ // A simple function call to another wasm function.
+ let func_index = FuncIndex::new(index as usize);
+
+ // On x86, the Spidermonkey relocation must point to the next instruction.
+ // Cranelift gives us the exact offset to the immediate, so fix it up by the
+ // relocation's size.
+ #[cfg(feature = "cranelift_x86")]
+ let offset = at
+ + match reloc {
+ Reloc::X86CallPCRel4 => 4,
+ _ => unreachable!(),
+ };
+
+ // Spidermonkey Aarch64 requires the relocation to point just after the start of
+ // the actual relocation, for historical reasons.
+ #[cfg(feature = "cranelift_arm64")]
+ let offset = match reloc {
+ Reloc::Arm64Call => at + 4,
+ _ => unreachable!(),
+ };
+
+ #[cfg(not(any(feature = "cranelift_x86", feature = "cranelift_arm64")))]
+ let offset = {
+ // Avoid warning about unused relocation.
+ let _reloc = reloc;
+ at
+ };
+
+ self.metadata.push(bindings::MetadataEntry::direct_call(
+ offset, srcloc, func_index,
+ ));
+ }
+
+ ExternalName::User {
+ namespace: SYMBOLIC_FUNCTION_NAMESPACE,
+ index,
+ } => {
+ // This is a symbolic function reference encoded by `symbolic_function_name()`.
+ let sym = index.into();
+
+ // See comments about offsets in the User match arm above.
+
+ #[cfg(feature = "cranelift_x86")]
+ let offset = at
+ + match reloc {
+ Reloc::Abs8 => 8,
+ _ => unreachable!(),
+ };
+
+ #[cfg(feature = "cranelift_arm64")]
+ let offset = match reloc {
+ Reloc::Abs8 => at + 4,
+ _ => unreachable!(),
+ };
+
+ #[cfg(not(any(feature = "cranelift_x86", feature = "cranelift_arm64")))]
+ let offset = at;
+
+ self.metadata.push(bindings::MetadataEntry::symbolic_access(
+ offset, srcloc, sym,
+ ));
+ }
+
+ ExternalName::LibCall(call) => {
+ let sym = match call {
+ ir::LibCall::CeilF32 => bindings::SymbolicAddress::CeilF32,
+ ir::LibCall::CeilF64 => bindings::SymbolicAddress::CeilF64,
+ ir::LibCall::FloorF32 => bindings::SymbolicAddress::FloorF32,
+ ir::LibCall::FloorF64 => bindings::SymbolicAddress::FloorF64,
+ ir::LibCall::NearestF32 => bindings::SymbolicAddress::NearestF32,
+ ir::LibCall::NearestF64 => bindings::SymbolicAddress::NearestF64,
+ ir::LibCall::TruncF32 => bindings::SymbolicAddress::TruncF32,
+ ir::LibCall::TruncF64 => bindings::SymbolicAddress::TruncF64,
+ _ => {
+ panic!("Don't understand external {}", name);
+ }
+ };
+
+ // The Spidermonkey relocation must point to the next instruction, on x86.
+ #[cfg(feature = "cranelift_x86")]
+ let offset = at
+ + match reloc {
+ Reloc::Abs8 => 8,
+ _ => unreachable!(),
+ };
+
+ // Spidermonkey AArch64 doesn't expect a relocation offset, in this case.
+ #[cfg(feature = "cranelift_arm64")]
+ let offset = match reloc {
+ Reloc::Abs8 => at,
+ _ => unreachable!(),
+ };
+
+ #[cfg(not(any(feature = "cranelift_x86", feature = "cranelift_arm64")))]
+ let offset = at;
+
+ self.metadata.push(bindings::MetadataEntry::symbolic_access(
+ offset, srcloc, sym,
+ ));
+ }
+
+ _ => {
+ panic!("Don't understand external {}", name);
+ }
+ }
+ }
+
+ /// Add a relocation referencing a constant.
+ fn reloc_constant(&mut self, _at: CodeOffset, _reloc: Reloc, _const_offset: ConstantOffset) {
+ unimplemented!("constant pool relocations NYI");
+ }
+
+ /// Add a relocation referencing a jump table.
+ fn reloc_jt(&mut self, at: CodeOffset, reloc: Reloc, _jt: JumpTable) {
+ match reloc {
+ Reloc::X86PCRelRodata4 => {
+ self.rodata_relocs.push(at);
+ }
+ _ => {
+ panic!("Unhandled/unexpected reloc type");
+ }
+ }
+ }
+
+ /// Track call sites information, giving us the return address offset.
+ fn add_call_site(&mut self, opcode: ir::Opcode, ret_addr: CodeOffset, srcloc: SourceLoc) {
+ // Direct calls need a plain relocation, so we don't need to handle them again.
+ if opcode == ir::Opcode::CallIndirect {
+ self.metadata
+ .push(bindings::MetadataEntry::indirect_call(ret_addr, srcloc));
+ }
+ }
+}
+
+struct Traps {
+ metadata: Vec<bindings::MetadataEntry>,
+}
+
+impl Traps {
+ fn new() -> Self {
+ Self {
+ metadata: Vec::new(),
+ }
+ }
+ fn clear(&mut self) {
+ self.metadata.clear();
+ }
+}
+
+impl TrapSink for Traps {
+ /// Add trap information for a specific offset.
+ fn trap(&mut self, trap_offset: CodeOffset, loc: SourceLoc, trap: TrapCode) {
+ // Translate the trap code into one of BaldrMonkey's trap codes.
+ use ir::TrapCode::*;
+ let bd_trap = match trap {
+ StackOverflow => {
+ // Cranelift will give us trap information for every spill/push/call. But
+ // Spidermonkey takes care of tracking stack overflows itself in the function
+ // entries, so we don't have to.
+ return;
+ }
+ HeapOutOfBounds | TableOutOfBounds => bindings::Trap::OutOfBounds,
+ HeapMisaligned => bindings::Trap::UnalignedAccess,
+ IndirectCallToNull => bindings::Trap::IndirectCallToNull,
+ BadSignature => bindings::Trap::IndirectCallBadSig,
+ IntegerOverflow => bindings::Trap::IntegerOverflow,
+ IntegerDivisionByZero => bindings::Trap::IntegerDivideByZero,
+ BadConversionToInteger => bindings::Trap::InvalidConversionToInteger,
+ Interrupt => bindings::Trap::CheckInterrupt,
+ UnreachableCodeReached => bindings::Trap::Unreachable,
+ User(x) if x == TRAP_THROW_REPORTED => bindings::Trap::ThrowReported,
+ User(_) => panic!("Uncovered trap code {}", trap),
+ };
+
+ debug_assert!(!loc.is_default());
+ self.metadata
+ .push(bindings::MetadataEntry::trap(trap_offset, loc, bd_trap));
+ }
+}
diff --git a/js/src/wasm/cranelift/src/isa.rs b/js/src/wasm/cranelift/src/isa.rs
new file mode 100644
index 0000000000..efaf0e8837
--- /dev/null
+++ b/js/src/wasm/cranelift/src/isa.rs
@@ -0,0 +1,253 @@
+/* Copyright 2018 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! CPU detection and configuration of Cranelift's `TargetISA`.
+//!
+//! This module deals with the configuration of Cranelift to generate code for the current CPU that
+//! is compatible with the SpiderMonkey JIT.
+//!
+//! The main entry point is the `make_isa()` function which allocates a configured `TargetISA`
+//! object.
+
+use log::warn;
+use std::env;
+
+use cranelift_codegen::isa;
+use cranelift_codegen::settings::{self, Configurable};
+
+use crate::bindings::StaticEnvironment;
+use crate::utils::{BasicError, DashResult};
+
+#[cfg(target_pointer_width = "64")]
+pub const POINTER_SIZE: usize = 8;
+#[cfg(target_pointer_width = "32")]
+pub const POINTER_SIZE: usize = 4;
+
+#[cfg(feature = "cranelift_x86")]
+pub mod platform {
+ use super::*;
+
+ pub const IS_SUPPORTED: bool = true;
+ pub const USES_HEAP_REG: bool = true;
+
+ pub(crate) fn make_isa_builder(env: &StaticEnvironment) -> DashResult<isa::Builder> {
+ let mut ib = isa::lookup_by_name("x86_64-unknown-unknown").map_err(BasicError::from)?;
+
+ if !env.has_sse2 {
+ return Err("SSE2 is mandatory for Baldrdash!".into());
+ }
+
+ if env.has_sse3 {
+ ib.enable("has_sse3").map_err(BasicError::from)?;
+ }
+ if env.has_sse41 {
+ ib.enable("has_sse41").map_err(BasicError::from)?;
+ }
+ if env.has_sse42 {
+ ib.enable("has_sse42").map_err(BasicError::from)?;
+ }
+ if env.has_popcnt {
+ ib.enable("has_popcnt").map_err(BasicError::from)?;
+ }
+ if env.has_avx {
+ ib.enable("has_avx").map_err(BasicError::from)?;
+ }
+ if env.has_bmi1 {
+ ib.enable("has_bmi1").map_err(BasicError::from)?;
+ }
+ if env.has_bmi2 {
+ ib.enable("has_bmi2").map_err(BasicError::from)?;
+ }
+ if env.has_lzcnt {
+ ib.enable("has_lzcnt").map_err(BasicError::from)?;
+ }
+
+ Ok(ib)
+ }
+}
+
+#[cfg(feature = "cranelift_arm64")]
+pub mod platform {
+ use super::*;
+
+ pub const IS_SUPPORTED: bool = true;
+ pub const USES_HEAP_REG: bool = true;
+
+ pub(crate) fn make_isa_builder(env: &StaticEnvironment) -> DashResult<isa::Builder> {
+ let mut ib = isa::lookup_by_name("aarch64-unknown-unknown").map_err(BasicError::from)?;
+ if env.v128_enabled {
+ ib.enable("enable_simd").map_err(BasicError::from)?;
+ }
+
+ Ok(ib)
+ }
+}
+
+#[cfg(not(any(feature = "cranelift_x86", feature = "cranelift_arm64")))]
+pub mod platform {
+ use super::*;
+
+ pub const IS_SUPPORTED: bool = false;
+ pub const USES_HEAP_REG: bool = false;
+
+ pub(crate) fn make_isa_builder(_env: &StaticEnvironment) -> DashResult<isa::Builder> {
+ Err("Platform not supported yet!".into())
+ }
+}
+
+impl From<isa::LookupError> for BasicError {
+ fn from(err: isa::LookupError) -> BasicError {
+ BasicError::new(err.to_string())
+ }
+}
+
+impl From<settings::SetError> for BasicError {
+ fn from(err: settings::SetError) -> BasicError {
+ BasicError::new(err.to_string())
+ }
+}
+
+struct EnvVariableFlags<'env> {
+ opt_level: Option<&'env str>,
+ jump_tables: Option<bool>,
+}
+
+#[inline]
+fn str_to_bool(value: &str) -> bool {
+ value == "true" || value == "on" || value == "yes" || value == "1"
+}
+
+impl<'env> EnvVariableFlags<'env> {
+ fn parse(input: &'env Result<String, env::VarError>) -> Option<Self> {
+ let input = match input {
+ Ok(input) => input.as_str(),
+ Err(_) => return None,
+ };
+
+ let mut flags = EnvVariableFlags {
+ opt_level: None,
+ jump_tables: None,
+ };
+
+ for entry in input.split(',') {
+ if let Some(equals_index) = entry.find('=') {
+ let (key, value) = entry.split_at(equals_index);
+
+ // value starts with the =, remove it.
+ let value = &value[1..];
+
+ match key {
+ "opt_level" => {
+ // Invalid values will be reported by Cranelift.
+ flags.opt_level = Some(value);
+ }
+ "jump_tables" => {
+ flags.jump_tables = Some(str_to_bool(value));
+ }
+ _ => {
+ warn!("Unknown setting with key {}", key);
+ }
+ }
+ } else {
+ warn!("Missing = in pair: {}", entry);
+ }
+ }
+
+ Some(flags)
+ }
+}
+
+/// Create a `Flags` object for the shared settings.
+///
+/// This only fails if one of Cranelift's settings has been removed or renamed.
+fn make_shared_flags(
+ env: &StaticEnvironment,
+ env_flags: &Option<EnvVariableFlags>,
+) -> settings::SetResult<settings::Flags> {
+ let mut sb = settings::builder();
+
+ // We don't install SIGFPE handlers, but depend on explicit traps around divisions.
+ sb.enable("avoid_div_traps")?;
+
+ // Cranelift needs to know how many words are pushed by `GenerateFunctionPrologue` so it can
+ // compute frame pointer offsets accurately. C++'s "sizeof" gives us the number of bytes, which
+ // we translate to the number of words, as expected by Cranelift.
+ debug_assert_eq!(env.size_of_wasm_frame % POINTER_SIZE, 0);
+ let num_words = env.size_of_wasm_frame / POINTER_SIZE;
+ sb.set("baldrdash_prologue_words", &num_words.to_string())?;
+
+ // Make sure that libcalls use the supplementary VMContext argument.
+ let libcall_call_conv = if env.platform_is_windows {
+ "baldrdash_windows"
+ } else {
+ "baldrdash_system_v"
+ };
+ sb.set("libcall_call_conv", libcall_call_conv)?;
+
+ // Assembler::PatchDataWithValueCheck expects -1 stored where a function address should be
+ // patched in.
+ sb.enable("emit_all_ones_funcaddrs")?;
+
+ // Enable the verifier if assertions are enabled. Otherwise leave it disabled,
+ // as it's quite slow.
+ if !cfg!(debug_assertions) {
+ sb.set("enable_verifier", "false")?;
+ }
+
+ // Baldrdash does its own stack overflow checks, so we don't need Cranelift doing any for us.
+ sb.set("enable_probestack", "false")?;
+
+ // Let's optimize for speed by default.
+ let opt_level = match env_flags {
+ Some(env_flags) => env_flags.opt_level,
+ None => None,
+ }
+ .unwrap_or("speed");
+ sb.set("opt_level", opt_level)?;
+
+ // Enable jump tables by default.
+ let enable_jump_tables = match env_flags {
+ Some(env_flags) => env_flags.jump_tables,
+ None => None,
+ }
+ .unwrap_or(true);
+ sb.set(
+ "enable_jump_tables",
+ if enable_jump_tables { "true" } else { "false" },
+ )?;
+
+ if platform::USES_HEAP_REG {
+ sb.enable("enable_pinned_reg")?;
+ sb.enable("use_pinned_reg_as_heap_base")?;
+ }
+
+ if env.ref_types_enabled {
+ sb.enable("enable_safepoints")?;
+ }
+
+ Ok(settings::Flags::new(sb))
+}
+
+/// Allocate a `TargetISA` object that can be used to generate code for the CPU we're running on.
+pub fn make_isa(env: &StaticEnvironment) -> DashResult<Box<dyn isa::TargetIsa>> {
+ // Parse flags defined by the environment variable.
+ let env_flags_str = std::env::var("CRANELIFT_FLAGS");
+ let env_flags = EnvVariableFlags::parse(&env_flags_str);
+
+ let shared_flags = make_shared_flags(env, &env_flags).map_err(BasicError::from)?;
+
+ let ib = platform::make_isa_builder(env)?;
+ Ok(ib.finish(shared_flags))
+}
diff --git a/js/src/wasm/cranelift/src/lib.rs b/js/src/wasm/cranelift/src/lib.rs
new file mode 100644
index 0000000000..3aae394372
--- /dev/null
+++ b/js/src/wasm/cranelift/src/lib.rs
@@ -0,0 +1,272 @@
+/* Copyright 2018 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! This code bridges Spidermonkey to Cranelift.
+//!
+//! This documentation explains the role of each high-level function, each notable submodule, and
+//! the Spidermonkey idiosyncrasies that are visible here and leak into Cranelift. This is not a
+//! technical presentation of how Cranelift works or what it intends to achieve, a task much more
+//! suited to the Wasmtime documentation itself:
+//!
+//! https://github.com/bytecodealliance/wasmtime/blob/master/cranelift/docs/index.md
+//!
+//! At the time of writing (April 14th, 2020), this code is only used for WebAssembly (wasm)
+//! compilation, so this documentation focuses on the wasm integration. As a matter of fact, this
+//! glue crate between Baldrmonkey and Cranelift is called Baldrdash, thanks to the usual punsters.
+//!
+//! ## Relationships to other files
+//!
+//! * WasmCraneliftCompile.cpp contains all the C++ code that calls into this crate.
+//! * clifapi.h describes the C-style bindings to this crate's public functions, used by the C++
+//! code to call into Rust. They're maintained by hand, and thus manual review must ensure the
+//! signatures match those of the functions exposed in this lib.rs file.
+//! * baldrapi.h describes the C-style functions exposed through `bindgen` so they can be called
+//! from Rust. Bindings are automatically generated, such that they're safe to use in general.
+//! WasmConstants.h is also exposed in through this file, which makes sharing some code easier.
+//!
+//! ## High-level functions
+//!
+//! * `cranelift_initialize` performs per-process initialization.
+//! * `cranelift_compiler_create` will return a `BatchCompiler`, the high-level data structure
+//! controlling the compilation of a group (batch) of wasm functions. The created compiler should
+//! later be deallocated with `cranelift_compiler_destroy`, once it's not needed anymore.
+//! * `cranelift_compile_function` takes care of translating a single wasm function into Cranelift
+//! IR, and compiles it down to machine code. Input data is passed through a const pointer to a
+//! `FuncCompilerInput` data structure (defined in bindings), and the return values are stored in
+//! an in-out parameter named `CompiledFunc` (also defined in bindings).
+//!
+//! ## Submodules
+//!
+//! The list of submodules here is voluntarily put in a specific order, so as to make it easier to
+//! discover and read.
+//!
+//! * The `isa` module configures Cranelift, applying some target-independent settings, as well as
+//! target-specific settings. These settings are used both during translation of wasm to Cranelift
+//! IR and compilation to machine code.
+//! * The `wasm2clif` module contains the code doing the translation of the wasm code section to
+//! Cranelift IR, implementing all the Spidermonkey specific behaviors.
+//! * The `compile` module takes care of optimizing the Cranelift IR and compiles it down to
+//! machine code, noting down relocations in the process.
+//!
+//! A few other helper modules are also defined:
+//!
+//! * The `bindings` module contains C++ bindings automatically generated by `bindgen` in the Cargo
+//! build script (`build.rs`), as well as thin wrappers over these data structures to make these
+//! more ergonomic to use in Rust.
+//! * No code base would be feature complete without a bunch of random helpers and functions that
+//! don't really belong anywhere else: the `utils` module contains error handling helpers, to unify
+//! all the Cranelift Error types into one that can be passed around in Baldrdash.
+//!
+//! ## Spidermonkey idiosyncrasies
+//!
+//! Most of the Spidermonkey-specific behavior is reflected during conversion of the wasm code to
+//! Cranelift IR (in the `wasm2clif` module), but there are some other aspects worth mentioning
+//! here.
+//!
+//! ### Code generation, prologues/epilogues, ABI
+//!
+//! Cranelift may call into and be called from other functions using the Spidermonkey wasm ABI:
+//! that is, code generated by the wasm baseline compiler during tiering, any other wasm stub, even
+//! Ion (through the JIT entries and exits).
+//!
+//! As a matter of fact, it must push the same C++ `wasm::Frame` on the stack before a call, and
+//! unwind it properly on exit. To keep this detail orthogonal to Cranelift, the function's
+//! prologue and epilogue are **not** generated by Cranelift itself; the C++ code generates them
+//! for us. Here, Cranelift only generates the code section and appropriate relocations.
+//! The C++ code writes the prologue, copies the machine code section, writes the epilogue, and
+//! translates the Cranelift relocations into Spidermonkey relocations.
+//!
+//! * To not generate the prologue and epilogue, Cranelift uses a special calling convention called
+//! Baldrdash in its code. This is set upon creation of the `TargetISA`.
+//! * Cranelift must know the offset to the stack argument's base, that is, the size of the
+//! wasm::Frame. The `baldrdash_prologue_words` setting is used to propagate this information to
+//! Cranelift.
+//! * Since Cranelift generated functions interact with Ion-ABI functions (Ionmonkey, other wasm
+//! functions), and native (host) functions, it has to respect both calling conventions. Especially
+//! when it comes to function calls it must preserve callee-saved and caller-saved registers in a
+//! way compatible with both ABIs. In practice, it means Cranelift must consider Ion's callee-saved
+//! as its callee-saved, and native's caller-saved as its caller-saved (since it deals with both
+//! ABIs, it has to union the sets).
+//!
+//! ### Maintaining HeapReg
+//!
+//! On some targets, Spidermonkey pins one register to keep the heap-base accessible at all-times,
+//! making memory accesses cheaper. This register is excluded from Ion's register allocation, and
+//! is manually maintained by Spidermonkey before and after calls.
+//!
+//! Cranelift has two settings to mimic the same behavior:
+//! - `enable_pinned_reg` makes it possible to pin a register and gives access to two Cranelift
+//! instructions for reading it and writing to it.
+//! - `use_pinned_reg_as_heap_base` makes the code generator use the pinned register as the heap
+//! base for all Cranelift IR memory accesses.
+//!
+//! Using both settings allows to reproduce Spidermonkey's behavior. One caveat is that the pinned
+//! register used in Cranelift must match the HeapReg register in Spidermonkey, for this to work
+//! properly.
+//!
+//! Not using the pinned register as the heap base, when there's a heap register on the platform,
+//! means that we have to explicitly maintain it in the prologue and epilogue (because of tiering),
+//! which would be another source of slowness.
+//!
+//! ### Non-streaming validation
+//!
+//! Ionmonkey is able to iterate over the wasm code section's body, validating and emitting the
+//! internal Ionmonkey's IR at the same time.
+//!
+//! Cranelift uses `wasmparser` to parse the wasm binary section, which isn't able to add
+//! per-opcode hooks. Instead, Cranelift validates (off the main thread) the function's body before
+//! compiling it, function per function.
+
+mod bindings;
+mod compile;
+mod isa;
+mod utils;
+mod wasm2clif;
+
+use log::{self, error};
+use std::ffi::CString;
+use std::fmt::Display;
+use std::os::raw::c_char;
+use std::ptr;
+
+use crate::bindings::{CompiledFunc, FuncCompileInput, ModuleEnvironment, StaticEnvironment};
+use crate::compile::BatchCompiler;
+use cranelift_codegen::CodegenError;
+
+/// Initializes all the process-wide Cranelift state. It must be called at least once, before any
+/// other use of this crate. It is not an issue if it is called more than once; subsequent calls
+/// are useless though.
+#[no_mangle]
+pub extern "C" fn cranelift_initialize() {
+ // Gecko might set a logger before we do, which is all fine; try to initialize ours, and reset
+ // the FilterLevel env_logger::try_init might have set to what it was in case of initialization
+ // failure
+ let filter = log::max_level();
+ match env_logger::try_init() {
+ Ok(_) => {}
+ Err(_) => {
+ log::set_max_level(filter);
+ }
+ }
+}
+
+/// Allocate a compiler for a module environment and return an opaque handle.
+///
+/// It is the caller's responsability to deallocate the returned BatchCompiler later, passing back
+/// the opaque handle to a call to `cranelift_compiler_destroy`.
+///
+/// This is declared in `clifapi.h`.
+#[no_mangle]
+pub unsafe extern "C" fn cranelift_compiler_create<'a, 'b>(
+ static_env: *const StaticEnvironment,
+ env: *const bindings::LowLevelModuleEnvironment,
+) -> *mut BatchCompiler<'a, 'b> {
+ let env = env.as_ref().unwrap();
+ let static_env = static_env.as_ref().unwrap();
+ match BatchCompiler::new(static_env, ModuleEnvironment::new(env)) {
+ Ok(compiler) => Box::into_raw(Box::new(compiler)),
+ Err(err) => {
+ error!("When constructing the batch compiler: {}", err);
+ ptr::null_mut()
+ }
+ }
+}
+
+/// Deallocate a BatchCompiler created by `cranelift_compiler_create`.
+///
+/// Passing any other kind of pointer to this function is technically undefined behavior, thus
+/// making the function unsafe to use.
+///
+/// This is declared in `clifapi.h`.
+#[no_mangle]
+pub unsafe extern "C" fn cranelift_compiler_destroy(compiler: *mut BatchCompiler) {
+ assert!(
+ !compiler.is_null(),
+ "NULL pointer passed to cranelift_compiler_destroy"
+ );
+ // Convert the pointer back into the box it came from. Then drop it.
+ let _box = Box::from_raw(compiler);
+}
+
+fn error_to_cstring<D: Display>(err: D) -> *mut c_char {
+ use std::fmt::Write;
+ let mut s = String::new();
+ let _ = write!(&mut s, "{}", err);
+ let cstr = CString::new(s).unwrap();
+ cstr.into_raw()
+}
+
+/// Compile a single function.
+///
+/// This is declared in `clifapi.h`.
+///
+/// If a Wasm validation error is returned in *error, then it *must* be later
+/// freed by `cranelift_compiler_free_error()`.
+#[no_mangle]
+pub unsafe extern "C" fn cranelift_compile_function(
+ compiler: *mut BatchCompiler,
+ data: *const FuncCompileInput,
+ result: *mut CompiledFunc,
+ error: *mut *mut c_char,
+) -> bool {
+ let compiler = compiler.as_mut().unwrap();
+ let data = data.as_ref().unwrap();
+
+ // Reset the compiler to a clean state.
+ compiler.clear();
+
+ if let Err(e) = compiler.translate_wasm(data) {
+ let cstr = error_to_cstring(e);
+ *error = cstr;
+ return false;
+ };
+
+ if let Err(e) = compiler.compile(data.stackmaps()) {
+ // Make sure to panic on verifier errors, so that fuzzers see those. Other errors are about
+ // unsupported features or implementation limits, so just report them as a user-facing
+ // error.
+ match e {
+ CodegenError::Verifier(verifier_error) => {
+ panic!("Cranelift verifier error: {}", verifier_error);
+ }
+ CodegenError::ImplLimitExceeded
+ | CodegenError::CodeTooLarge
+ | CodegenError::Unsupported(_) => {
+ let cstr = error_to_cstring(e);
+ *error = cstr;
+ return false;
+ }
+ }
+ };
+
+ // TODO(bbouvier) if destroy is called while one of these objects is alive, you're going to
+ // have a bad time. Would be nice to be able to enforce lifetimes accross languages, somehow.
+ let result = result.as_mut().unwrap();
+ result.reset(&compiler.current_func);
+
+ true
+}
+
+#[no_mangle]
+pub unsafe extern "C" fn cranelift_compiler_free_error(s: *mut c_char) {
+ // Convert back into a `CString` and then let it drop.
+ let _cstr = CString::from_raw(s);
+}
+
+/// Returns true whether a platform (target ISA) is supported or not.
+#[no_mangle]
+pub unsafe extern "C" fn cranelift_supports_platform() -> bool {
+ isa::platform::IS_SUPPORTED
+}
diff --git a/js/src/wasm/cranelift/src/utils.rs b/js/src/wasm/cranelift/src/utils.rs
new file mode 100644
index 0000000000..9bba288ff6
--- /dev/null
+++ b/js/src/wasm/cranelift/src/utils.rs
@@ -0,0 +1,55 @@
+/* Copyright 2018 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/// Helpers common to other source files here.
+use std::error;
+use std::fmt;
+
+use cranelift_wasm::WasmError;
+
+type DashError = Box<dyn error::Error>;
+pub type DashResult<T> = Result<T, DashError>;
+
+/// A simple error type that contains a string message, used to wrap raw Cranelift error types
+/// which don't implement std::error::Error.
+
+#[derive(Debug)]
+pub struct BasicError {
+ msg: String,
+}
+
+impl BasicError {
+ pub fn new(msg: String) -> Self {
+ Self { msg }
+ }
+}
+
+impl fmt::Display for BasicError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ write!(f, "BaldrDash error: {}", self.msg)
+ }
+}
+
+impl error::Error for BasicError {
+ fn description(&self) -> &str {
+ &self.msg
+ }
+}
+
+impl Into<WasmError> for BasicError {
+ fn into(self) -> WasmError {
+ WasmError::User(self.msg)
+ }
+}
diff --git a/js/src/wasm/cranelift/src/wasm2clif.rs b/js/src/wasm/cranelift/src/wasm2clif.rs
new file mode 100644
index 0000000000..86e37bf2f0
--- /dev/null
+++ b/js/src/wasm/cranelift/src/wasm2clif.rs
@@ -0,0 +1,1433 @@
+/* Copyright 2018 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//! This module deals with the translation of WebAssembly binary functions to Cranelift IR.
+//!
+//! The code here deals with adapting the `cranelift_wasm` module to the specifics of BaldrMonkey's
+//! internal data structures.
+
+use std::collections::HashMap;
+use std::rc::Rc;
+
+use cranelift_codegen::cursor::{Cursor, FuncCursor};
+use cranelift_codegen::entity::{EntityRef, PrimaryMap, SecondaryMap};
+use cranelift_codegen::ir;
+use cranelift_codegen::ir::condcodes::IntCC;
+use cranelift_codegen::ir::immediates::Offset32;
+use cranelift_codegen::ir::InstBuilder;
+use cranelift_codegen::isa::{CallConv, TargetFrontendConfig, TargetIsa};
+use cranelift_codegen::packed_option::PackedOption;
+use cranelift_wasm::{
+ FuncEnvironment, FuncIndex, FunctionBuilder, GlobalIndex, GlobalVariable, MemoryIndex,
+ ReturnMode, TableIndex, TargetEnvironment, TypeIndex, WasmError, WasmResult,
+};
+
+use crate::bindings::{self, GlobalDesc, SymbolicAddress};
+use crate::compile::{symbolic_function_name, wasm_function_name};
+use crate::isa::{platform::USES_HEAP_REG, POINTER_SIZE};
+use bindings::typecode_to_nonvoid_type;
+
+#[cfg(target_pointer_width = "64")]
+pub const POINTER_TYPE: ir::Type = ir::types::I64;
+#[cfg(target_pointer_width = "32")]
+pub const POINTER_TYPE: ir::Type = ir::types::I32;
+
+#[cfg(target_pointer_width = "64")]
+pub const REF_TYPE: ir::Type = ir::types::R64;
+#[cfg(target_pointer_width = "32")]
+pub const REF_TYPE: ir::Type = ir::types::R32;
+
+/// Convert a TlsData offset into a `Offset32` for a global decl.
+fn offset32(offset: usize) -> ir::immediates::Offset32 {
+ assert!(offset <= i32::max_value() as usize);
+ (offset as i32).into()
+}
+
+/// Convert a usize offset into a `Imm64` for an iadd_imm.
+fn imm64(offset: usize) -> ir::immediates::Imm64 {
+ (offset as i64).into()
+}
+
+/// Initialize a `Signature` from a wasm signature.
+///
+/// These signatures are used by Cranelift both to perform calls (e.g., to other
+/// Wasm functions, or back to JS or native code) and to generate code that
+/// accesses its own args and sets its return value(s) properly.
+///
+/// Note that the extension modes are in principle applicable to *both* sides of
+/// the call. They must be respected when setting up args for a callee, and when
+/// setting up a return value to a caller; they may be used/relied upon when
+/// using an arg that came from a caller, or using a return value that came from
+/// a callee.
+fn init_sig_from_wsig(call_conv: CallConv, wsig: &bindings::FuncType) -> WasmResult<ir::Signature> {
+ let mut sig = ir::Signature::new(call_conv);
+
+ for arg_type in wsig.args() {
+ let ty = typecode_to_nonvoid_type(*arg_type)?;
+ let arg = match ty {
+ // SpiderMonkey requires i32 arguments to callees (e.g., from Wasm
+ // back into JS or native code) to have their high 32 bits zero so
+ // that it can directly box them.
+ ir::types::I32 => ir::AbiParam::new(ty).uext(),
+ _ => ir::AbiParam::new(ty),
+ };
+ sig.params.push(arg);
+ }
+
+ for ret_type in wsig.results() {
+ let ty = typecode_to_nonvoid_type(*ret_type)?;
+ let ret = match ty {
+ // SpiderMonkey requires i32 returns to have their high 32 bits
+ // zero so that it can directly box them.
+ ir::types::I32 => ir::AbiParam::new(ty).uext(),
+ _ => ir::AbiParam::new(ty),
+ };
+ sig.returns.push(ret);
+ }
+
+ // Add a VM context pointer argument.
+ // This corresponds to SpiderMonkey's `WasmTlsReg` hidden argument.
+ sig.params.push(ir::AbiParam::special(
+ POINTER_TYPE,
+ ir::ArgumentPurpose::VMContext,
+ ));
+
+ // Add a callee-TLS and caller-TLS argument.
+ sig.params.push(ir::AbiParam::special(
+ POINTER_TYPE,
+ ir::ArgumentPurpose::CalleeTLS,
+ ));
+ sig.params.push(ir::AbiParam::special(
+ POINTER_TYPE,
+ ir::ArgumentPurpose::CallerTLS,
+ ));
+
+ Ok(sig)
+}
+
+/// Initialize the signature `sig` to match the function with `index` in `env`.
+pub fn init_sig(
+ env: &bindings::ModuleEnvironment,
+ call_conv: CallConv,
+ func_index: FuncIndex,
+) -> WasmResult<ir::Signature> {
+ let wsig = env.func_sig(func_index);
+ init_sig_from_wsig(call_conv, &wsig)
+}
+
+/// An instance call may return a special value to indicate that the operation
+/// failed and we need to trap. This indicates what kind of value to check for,
+/// if any.
+enum FailureMode {
+ Infallible,
+ /// The value returned by the function must be checked. internal_ret set to true indicates that
+ /// the returned value is only used internally, and should not be passed back to wasm.
+ NotZero {
+ internal_ret: bool,
+ },
+ /// The value returned by the function must be checked. An error is deemed to have
+ /// happened if the value, when viewed as a signed 32-bit int, is negative.
+ IsNegativeI32,
+ InvalidRef,
+}
+
+/// A description of builtin call to the `wasm::Instance`.
+struct InstanceCall {
+ address: SymbolicAddress,
+ arguments: &'static [ir::Type],
+ ret: Option<ir::Type>,
+ failure_mode: FailureMode,
+}
+
+// The following are a list of the instance calls used to implement operations.
+
+const FN_MEMORY_GROW: InstanceCall = InstanceCall {
+ address: SymbolicAddress::MemoryGrow,
+ arguments: &[ir::types::I32],
+ ret: Some(ir::types::I32),
+ failure_mode: FailureMode::Infallible,
+};
+const FN_MEMORY_SIZE: InstanceCall = InstanceCall {
+ address: SymbolicAddress::MemorySize,
+ arguments: &[],
+ ret: Some(ir::types::I32),
+ failure_mode: FailureMode::Infallible,
+};
+const FN_MEMORY_COPY: InstanceCall = InstanceCall {
+ address: SymbolicAddress::MemoryCopy,
+ arguments: &[ir::types::I32, ir::types::I32, ir::types::I32, POINTER_TYPE],
+ ret: Some(ir::types::I32),
+ failure_mode: FailureMode::NotZero { internal_ret: true },
+};
+const FN_MEMORY_COPY_SHARED: InstanceCall = InstanceCall {
+ address: SymbolicAddress::MemoryCopyShared,
+ arguments: &[ir::types::I32, ir::types::I32, ir::types::I32, POINTER_TYPE],
+ ret: Some(ir::types::I32),
+ failure_mode: FailureMode::NotZero { internal_ret: true },
+};
+const FN_MEMORY_FILL: InstanceCall = InstanceCall {
+ address: SymbolicAddress::MemoryFill,
+ arguments: &[ir::types::I32, ir::types::I32, ir::types::I32, POINTER_TYPE],
+ ret: Some(ir::types::I32),
+ failure_mode: FailureMode::NotZero { internal_ret: true },
+};
+const FN_MEMORY_FILL_SHARED: InstanceCall = InstanceCall {
+ address: SymbolicAddress::MemoryFillShared,
+ arguments: &[ir::types::I32, ir::types::I32, ir::types::I32, POINTER_TYPE],
+ ret: Some(ir::types::I32),
+ failure_mode: FailureMode::NotZero { internal_ret: true },
+};
+const FN_MEMORY_INIT: InstanceCall = InstanceCall {
+ address: SymbolicAddress::MemoryInit,
+ arguments: &[
+ ir::types::I32,
+ ir::types::I32,
+ ir::types::I32,
+ ir::types::I32,
+ ],
+ ret: Some(ir::types::I32),
+ failure_mode: FailureMode::NotZero { internal_ret: true },
+};
+const FN_DATA_DROP: InstanceCall = InstanceCall {
+ address: SymbolicAddress::DataDrop,
+ arguments: &[ir::types::I32],
+ ret: Some(ir::types::I32),
+ failure_mode: FailureMode::NotZero { internal_ret: true },
+};
+const FN_TABLE_SIZE: InstanceCall = InstanceCall {
+ address: SymbolicAddress::TableSize,
+ arguments: &[ir::types::I32],
+ ret: Some(ir::types::I32),
+ failure_mode: FailureMode::Infallible,
+};
+const FN_TABLE_GROW: InstanceCall = InstanceCall {
+ address: SymbolicAddress::TableGrow,
+ arguments: &[REF_TYPE, ir::types::I32, ir::types::I32],
+ ret: Some(ir::types::I32),
+ failure_mode: FailureMode::Infallible,
+};
+const FN_TABLE_GET: InstanceCall = InstanceCall {
+ address: SymbolicAddress::TableGet,
+ arguments: &[ir::types::I32, ir::types::I32],
+ ret: Some(REF_TYPE),
+ failure_mode: FailureMode::InvalidRef,
+};
+const FN_TABLE_SET: InstanceCall = InstanceCall {
+ address: SymbolicAddress::TableSet,
+ arguments: &[ir::types::I32, REF_TYPE, ir::types::I32],
+ ret: Some(ir::types::I32),
+ failure_mode: FailureMode::NotZero { internal_ret: true },
+};
+const FN_TABLE_COPY: InstanceCall = InstanceCall {
+ address: SymbolicAddress::TableCopy,
+ arguments: &[
+ ir::types::I32,
+ ir::types::I32,
+ ir::types::I32,
+ ir::types::I32,
+ ir::types::I32,
+ ],
+ ret: Some(ir::types::I32),
+ failure_mode: FailureMode::NotZero { internal_ret: true },
+};
+const FN_TABLE_FILL: InstanceCall = InstanceCall {
+ address: SymbolicAddress::TableFill,
+ arguments: &[ir::types::I32, REF_TYPE, ir::types::I32, ir::types::I32],
+ ret: Some(ir::types::I32),
+ failure_mode: FailureMode::NotZero { internal_ret: true },
+};
+const FN_TABLE_INIT: InstanceCall = InstanceCall {
+ address: SymbolicAddress::TableInit,
+ arguments: &[
+ ir::types::I32,
+ ir::types::I32,
+ ir::types::I32,
+ ir::types::I32,
+ ir::types::I32,
+ ],
+ ret: Some(ir::types::I32),
+ failure_mode: FailureMode::NotZero { internal_ret: true },
+};
+const FN_ELEM_DROP: InstanceCall = InstanceCall {
+ address: SymbolicAddress::ElemDrop,
+ arguments: &[ir::types::I32],
+ ret: Some(ir::types::I32),
+ failure_mode: FailureMode::NotZero { internal_ret: true },
+};
+const FN_REF_FUNC: InstanceCall = InstanceCall {
+ address: SymbolicAddress::RefFunc,
+ arguments: &[ir::types::I32],
+ ret: Some(REF_TYPE),
+ failure_mode: FailureMode::InvalidRef,
+};
+const FN_PRE_BARRIER: InstanceCall = InstanceCall {
+ address: SymbolicAddress::PreBarrier,
+ arguments: &[POINTER_TYPE],
+ ret: None,
+ failure_mode: FailureMode::Infallible,
+};
+const FN_POST_BARRIER: InstanceCall = InstanceCall {
+ address: SymbolicAddress::PostBarrier,
+ arguments: &[POINTER_TYPE],
+ ret: None,
+ failure_mode: FailureMode::Infallible,
+};
+const FN_WAIT_I32: InstanceCall = InstanceCall {
+ address: SymbolicAddress::WaitI32,
+ arguments: &[ir::types::I32, ir::types::I32, ir::types::I64],
+ ret: Some(ir::types::I32),
+ failure_mode: FailureMode::IsNegativeI32,
+};
+const FN_WAIT_I64: InstanceCall = InstanceCall {
+ address: SymbolicAddress::WaitI64,
+ arguments: &[ir::types::I32, ir::types::I64, ir::types::I64],
+ ret: Some(ir::types::I32),
+ failure_mode: FailureMode::IsNegativeI32,
+};
+const FN_WAKE: InstanceCall = InstanceCall {
+ address: SymbolicAddress::Wake,
+ arguments: &[ir::types::I32, ir::types::I32],
+ ret: Some(ir::types::I32),
+ failure_mode: FailureMode::IsNegativeI32,
+};
+
+// Custom trap codes specific to this embedding
+
+pub const TRAP_THROW_REPORTED: u16 = 1;
+
+/// A translation context that implements `FuncEnvironment` for the specific Spidermonkey
+/// translation bits.
+pub struct TransEnv<'static_env, 'module_env> {
+ static_env: &'static_env bindings::StaticEnvironment,
+ module_env: Rc<bindings::ModuleEnvironment<'module_env>>,
+
+ target_frontend_config: TargetFrontendConfig,
+
+ /// Information about the function pointer tables `self.module_env` knowns about. Indexed by
+ /// table index.
+ tables: PrimaryMap<TableIndex, TableInfo>,
+
+ /// For those signatures whose ID is stored in a global, keep track of the globals we have
+ /// created so far.
+ ///
+ /// Note that most signatures are of the immediate form, and we don't keep any records for
+ /// those.
+ ///
+ /// The key to this table is the TLS offset returned by `sig_idTlsOffset()`.
+ signatures: HashMap<i32, ir::GlobalValue>,
+
+ /// Global variables containing `FuncImportTls` information about imported functions.
+ /// This vector is indexed by a `FuncIndex`, taking advantage of the fact that WebAssembly
+ /// imported functions are numbered starting from 0.
+ ///
+ /// Any `None` entries in this table are simply global variables that have not yet been created.
+ func_gvs: SecondaryMap<FuncIndex, PackedOption<ir::GlobalValue>>,
+
+ /// The `vmctx` global value.
+ vmctx_gv: PackedOption<ir::GlobalValue>,
+
+ /// Global variable representing the `TlsData::instance` field which points to the current
+ /// instance.
+ instance_gv: PackedOption<ir::GlobalValue>,
+
+ /// Global variable representing the `TlsData::interrupt` field which points to the current
+ /// interrupt flag.
+ interrupt_gv: PackedOption<ir::GlobalValue>,
+
+ /// Allocated `FuncRef` for symbolic addresses.
+ /// See the `SymbolicAddress` enum in `baldrapi.h`.
+ symbolic: [PackedOption<ir::FuncRef>; bindings::SymbolicAddress::Limit as usize],
+
+ /// The address of the `cx` field in the `wasm::TlsData` struct.
+ cx_addr: PackedOption<ir::GlobalValue>,
+
+ /// The address of the `realm` field in the `wasm::TlsData` struct.
+ realm_addr: PackedOption<ir::GlobalValue>,
+}
+
+impl<'static_env, 'module_env> TransEnv<'static_env, 'module_env> {
+ pub fn new(
+ isa: &dyn TargetIsa,
+ module_env: Rc<bindings::ModuleEnvironment<'module_env>>,
+ static_env: &'static_env bindings::StaticEnvironment,
+ ) -> Self {
+ TransEnv {
+ static_env,
+ module_env,
+ target_frontend_config: isa.frontend_config(),
+ tables: PrimaryMap::new(),
+ signatures: HashMap::new(),
+ func_gvs: SecondaryMap::new(),
+ vmctx_gv: None.into(),
+ instance_gv: None.into(),
+ interrupt_gv: None.into(),
+ symbolic: [None.into(); bindings::SymbolicAddress::Limit as usize],
+ cx_addr: None.into(),
+ realm_addr: None.into(),
+ }
+ }
+
+ pub fn clear(&mut self) {
+ self.tables.clear();
+ self.signatures.clear();
+ self.func_gvs.clear();
+ self.vmctx_gv = None.into();
+ self.instance_gv = None.into();
+ self.interrupt_gv = None.into();
+ for entry in self.symbolic.iter_mut() {
+ *entry = None.into();
+ }
+ self.cx_addr = None.into();
+ self.realm_addr = None.into();
+ }
+
+ /// Get the `vmctx` global value.
+ fn get_vmctx_gv(&mut self, func: &mut ir::Function) -> ir::GlobalValue {
+ match self.vmctx_gv.expand() {
+ Some(gv) => gv,
+ None => {
+ // We need to allocate the global variable.
+ let gv = func.create_global_value(ir::GlobalValueData::VMContext);
+ self.vmctx_gv = Some(gv).into();
+ gv
+ }
+ }
+ }
+
+ /// Get information about `table`.
+ /// Create it if necessary.
+ fn get_table(&mut self, func: &mut ir::Function, table: TableIndex) -> TableInfo {
+ // Allocate all tables up to the requested index.
+ let vmctx = self.get_vmctx_gv(func);
+ while self.tables.len() <= table.index() {
+ let wtab = self.module_env.table(TableIndex::new(self.tables.len()));
+ self.tables.push(TableInfo::new(wtab, func, vmctx));
+ }
+ self.tables[table].clone()
+ }
+
+ /// Get the global variable storing the ID of the given signature.
+ fn sig_global(&mut self, func: &mut ir::Function, offset: usize) -> ir::GlobalValue {
+ let vmctx = self.get_vmctx_gv(func);
+ *self.signatures.entry(offset as i32).or_insert_with(|| {
+ func.create_global_value(ir::GlobalValueData::IAddImm {
+ base: vmctx,
+ offset: imm64(offset),
+ global_type: POINTER_TYPE,
+ })
+ })
+ }
+
+ /// Get the global variable storing the `FuncImportTls` struct for an imported function.
+ fn func_import_global(&mut self, func: &mut ir::Function, index: FuncIndex) -> ir::GlobalValue {
+ // See if we already allocated a global for this import.
+ if let Some(gv) = self.func_gvs.get(index).and_then(|gv| gv.expand()) {
+ return gv;
+ }
+ // We need to create a global variable for `import_index`.
+ let vmctx = self.get_vmctx_gv(func);
+ let gv = func.create_global_value(ir::GlobalValueData::IAddImm {
+ base: vmctx,
+ offset: imm64(self.module_env.func_import_tls_offset(index)),
+ global_type: POINTER_TYPE,
+ });
+ // Save it for next time.
+ self.func_gvs[index] = gv.into();
+ gv
+ }
+
+ /// Generate code that loads the current instance pointer.
+ fn load_instance(&mut self, pos: &mut FuncCursor) -> ir::Value {
+ let gv = match self.instance_gv.expand() {
+ Some(gv) => gv,
+ None => {
+ // We need to allocate the global variable.
+ let vmctx = self.get_vmctx_gv(pos.func);
+ let gv = pos.func.create_global_value(ir::GlobalValueData::IAddImm {
+ base: vmctx,
+ offset: imm64(self.static_env.instance_tls_offset),
+ global_type: POINTER_TYPE,
+ });
+ self.instance_gv = gv.into();
+ gv
+ }
+ };
+ let ga = pos.ins().global_value(POINTER_TYPE, gv);
+ pos.ins().load(POINTER_TYPE, ir::MemFlags::trusted(), ga, 0)
+ }
+
+ /// Generate code that loads the current instance pointer.
+ fn load_interrupt_flag(&mut self, pos: &mut FuncCursor) -> ir::Value {
+ let gv = match self.interrupt_gv.expand() {
+ Some(gv) => gv,
+ None => {
+ // We need to allocate the global variable.
+ let vmctx = self.get_vmctx_gv(pos.func);
+ let gv = pos.func.create_global_value(ir::GlobalValueData::IAddImm {
+ base: vmctx,
+ offset: imm64(self.static_env.interrupt_tls_offset),
+ global_type: POINTER_TYPE,
+ });
+ self.interrupt_gv = gv.into();
+ gv
+ }
+ };
+ let ga = pos.ins().global_value(POINTER_TYPE, gv);
+ pos.ins()
+ .load(ir::types::I32, ir::MemFlags::trusted(), ga, 0)
+ }
+
+ /// Get a `FuncRef` for the given symbolic address.
+ /// Uses the closure to create the signature if necessary.
+ fn symbolic_funcref<MKSIG: FnOnce() -> ir::Signature>(
+ &mut self,
+ func: &mut ir::Function,
+ sym: bindings::SymbolicAddress,
+ make_sig: MKSIG,
+ ) -> (ir::FuncRef, ir::SigRef) {
+ let symidx = sym as usize;
+ if let Some(fnref) = self.symbolic[symidx].expand() {
+ return (fnref, func.dfg.ext_funcs[fnref].signature);
+ }
+
+ // We need to allocate a signature and func-ref.
+ let signature = func.import_signature(make_sig());
+ let fnref = func.import_function(ir::ExtFuncData {
+ signature,
+ name: symbolic_function_name(sym),
+ colocated: false,
+ });
+
+ self.symbolic[symidx] = fnref.into();
+ (fnref, signature)
+ }
+
+ /// Update the JSContext's realm value. This is called after a call to restore the
+ /// realm value, in case the call has used a different realm.
+ fn switch_to_wasm_tls_realm(&mut self, pos: &mut FuncCursor) {
+ if self.cx_addr.is_none() {
+ let vmctx = self.get_vmctx_gv(&mut pos.func);
+ self.cx_addr = pos
+ .func
+ .create_global_value(ir::GlobalValueData::IAddImm {
+ base: vmctx,
+ offset: imm64(self.static_env.cx_tls_offset),
+ global_type: POINTER_TYPE,
+ })
+ .into();
+ }
+
+ if self.realm_addr.is_none() {
+ let vmctx = self.get_vmctx_gv(&mut pos.func);
+ self.realm_addr = pos
+ .func
+ .create_global_value(ir::GlobalValueData::IAddImm {
+ base: vmctx,
+ offset: imm64(self.static_env.realm_tls_offset),
+ global_type: POINTER_TYPE,
+ })
+ .into();
+ }
+
+ let ptr = POINTER_TYPE;
+ let flags = ir::MemFlags::trusted();
+ let cx_addr_val = pos.ins().global_value(ptr, self.cx_addr.unwrap());
+ let cx = pos.ins().load(ptr, flags, cx_addr_val, 0);
+ let realm_addr_val = pos.ins().global_value(ptr, self.realm_addr.unwrap());
+ let realm = pos.ins().load(ptr, flags, realm_addr_val, 0);
+ pos.ins()
+ .store(flags, realm, cx, offset32(self.static_env.realm_cx_offset));
+ }
+
+ /// Update the JSContext's realm value in preparation for making an indirect call through
+ /// an external table.
+ fn switch_to_indirect_callee_realm(&mut self, pos: &mut FuncCursor, vmctx: ir::Value) {
+ let ptr = POINTER_TYPE;
+ let flags = ir::MemFlags::trusted();
+ let cx = pos
+ .ins()
+ .load(ptr, flags, vmctx, offset32(self.static_env.cx_tls_offset));
+ let realm = pos.ins().load(
+ ptr,
+ flags,
+ vmctx,
+ offset32(self.static_env.realm_tls_offset),
+ );
+ pos.ins()
+ .store(flags, realm, cx, offset32(self.static_env.realm_cx_offset));
+ }
+
+ /// Update the JSContext's realm value in preparation for making a call to an imported
+ /// function.
+ fn switch_to_import_realm(
+ &mut self,
+ pos: &mut FuncCursor,
+ vmctx: ir::Value,
+ gv_addr: ir::Value,
+ ) {
+ let ptr = POINTER_TYPE;
+ let flags = ir::MemFlags::trusted();
+ let cx = pos
+ .ins()
+ .load(ptr, flags, vmctx, offset32(self.static_env.cx_tls_offset));
+ let realm = pos.ins().load(
+ ptr,
+ flags,
+ gv_addr,
+ offset32(self.static_env.realm_func_import_tls_offset),
+ );
+ pos.ins()
+ .store(flags, realm, cx, offset32(self.static_env.realm_cx_offset));
+ }
+
+ fn load_pinned_reg(&self, pos: &mut FuncCursor, vmctx: ir::Value) {
+ if USES_HEAP_REG {
+ let heap_base = pos.ins().load(
+ POINTER_TYPE,
+ ir::MemFlags::trusted(),
+ vmctx,
+ self.static_env.memory_base_tls_offset as i32,
+ );
+ pos.ins().set_pinned_reg(heap_base);
+ }
+ }
+
+ fn reload_tls_and_pinned_regs(&mut self, pos: &mut FuncCursor) {
+ let vmctx_gv = self.get_vmctx_gv(&mut pos.func);
+ let vmctx = pos.ins().global_value(POINTER_TYPE, vmctx_gv);
+ self.load_pinned_reg(pos, vmctx);
+ }
+
+ fn instance_call(
+ &mut self,
+ pos: &mut FuncCursor,
+ call: &InstanceCall,
+ arguments: &[ir::Value],
+ ) -> Option<ir::Value> {
+ debug_assert!(call.arguments.len() == arguments.len());
+
+ let call_conv = self.static_env.call_conv();
+ let (fnref, sigref) = self.symbolic_funcref(pos.func, call.address, || {
+ let mut sig = ir::Signature::new(call_conv);
+ sig.params.push(ir::AbiParam::new(POINTER_TYPE));
+ for argument in call.arguments {
+ sig.params.push(ir::AbiParam::new(*argument));
+ }
+ sig.params.push(ir::AbiParam::special(
+ POINTER_TYPE,
+ ir::ArgumentPurpose::VMContext,
+ ));
+ // Add a callee-TLS and caller-TLS argument.
+ sig.params.push(ir::AbiParam::special(
+ POINTER_TYPE,
+ ir::ArgumentPurpose::CalleeTLS,
+ ));
+ sig.params.push(ir::AbiParam::special(
+ POINTER_TYPE,
+ ir::ArgumentPurpose::CallerTLS,
+ ));
+ if let Some(ret) = &call.ret {
+ sig.returns.push(ir::AbiParam::new(*ret));
+ }
+ sig
+ });
+
+ let instance = self.load_instance(pos);
+ let vmctx = pos
+ .func
+ .special_param(ir::ArgumentPurpose::VMContext)
+ .expect("Missing vmctx arg");
+
+ // We must use `func_addr` for symbolic references since the stubs can be far away, and the
+ // C++ `SymbolicAccess` linker expects it.
+
+ let func_addr = pos.ins().func_addr(POINTER_TYPE, fnref);
+ let call_ins = pos.ins().call_indirect(sigref, func_addr, &[]);
+ let mut built_arguments = pos.func.dfg[call_ins].take_value_list().unwrap();
+ built_arguments.push(instance, &mut pos.func.dfg.value_lists);
+ built_arguments.extend(arguments.iter().cloned(), &mut pos.func.dfg.value_lists);
+ built_arguments.push(vmctx, &mut pos.func.dfg.value_lists);
+ built_arguments.push(vmctx, &mut pos.func.dfg.value_lists); // callee_tls
+ built_arguments.push(vmctx, &mut pos.func.dfg.value_lists); // caller_tls
+ pos.func.dfg[call_ins].put_value_list(built_arguments);
+
+ self.switch_to_wasm_tls_realm(pos);
+ self.reload_tls_and_pinned_regs(pos);
+
+ if call.ret.is_none() {
+ return None;
+ }
+
+ let ret = pos.func.dfg.first_result(call_ins);
+ match call.failure_mode {
+ FailureMode::Infallible => Some(ret),
+ FailureMode::NotZero { internal_ret } => {
+ pos.ins()
+ .trapnz(ret, ir::TrapCode::User(TRAP_THROW_REPORTED));
+ if internal_ret {
+ None
+ } else {
+ Some(ret)
+ }
+ }
+ FailureMode::IsNegativeI32 => {
+ let ty = pos.func.dfg.value_type(ret);
+ assert!(ty == ir::types::I32);
+ let f = pos.ins().ifcmp_imm(ret, i64::from(0));
+ pos.ins().trapif(
+ IntCC::SignedLessThan,
+ f,
+ ir::TrapCode::User(TRAP_THROW_REPORTED),
+ );
+ Some(ret)
+ }
+ FailureMode::InvalidRef => {
+ let invalid = pos.ins().is_invalid(ret);
+ pos.ins()
+ .trapnz(invalid, ir::TrapCode::User(TRAP_THROW_REPORTED));
+ Some(ret)
+ }
+ }
+ }
+
+ fn global_address(
+ &mut self,
+ func: &mut ir::Function,
+ global: &GlobalDesc,
+ ) -> (ir::GlobalValue, Offset32) {
+ assert!(!global.is_constant());
+
+ // This is a global variable. Here we don't care if it is mutable or not.
+ let vmctx_gv = self.get_vmctx_gv(func);
+ let offset = global.tls_offset();
+
+ // Some globals are represented as a pointer to the actual data, in which case we
+ // must do an extra dereference to get to them. Also, in that case, the pointer
+ // itself is immutable, so we mark it `readonly` here to assist Cranelift in commoning
+ // up what would otherwise be multiple adjacent reads of the value.
+ if global.is_indirect() {
+ let gv = func.create_global_value(ir::GlobalValueData::Load {
+ base: vmctx_gv,
+ offset: offset32(offset),
+ global_type: POINTER_TYPE,
+ readonly: true,
+ });
+ (gv, 0.into())
+ } else {
+ (vmctx_gv, offset32(offset))
+ }
+ }
+}
+
+impl<'static_env, 'module_env> TargetEnvironment for TransEnv<'static_env, 'module_env> {
+ fn target_config(&self) -> TargetFrontendConfig {
+ self.target_frontend_config
+ }
+ fn pointer_type(&self) -> ir::Type {
+ POINTER_TYPE
+ }
+}
+
+impl<'static_env, 'module_env> FuncEnvironment for TransEnv<'static_env, 'module_env> {
+ fn make_global(
+ &mut self,
+ func: &mut ir::Function,
+ index: GlobalIndex,
+ ) -> WasmResult<GlobalVariable> {
+ let global = self.module_env.global(index);
+ if global.is_constant() {
+ // Constant globals have a known value at compile time. We insert an instruction to
+ // materialize the constant at the front of the entry block.
+ let mut pos = FuncCursor::new(func);
+ pos.next_block().expect("empty function");
+ pos.next_inst();
+ return Ok(GlobalVariable::Const(global.emit_constant(&mut pos)?));
+ }
+
+ match global.value_type()? {
+ ir::types::R32 | ir::types::R64 => Ok(GlobalVariable::Custom),
+ _ => {
+ let (base_gv, offset) = self.global_address(func, &global);
+ let mem_ty = global.value_type()?;
+
+ Ok(GlobalVariable::Memory {
+ gv: base_gv,
+ ty: mem_ty,
+ offset,
+ })
+ }
+ }
+ }
+
+ fn make_heap(&mut self, func: &mut ir::Function, index: MemoryIndex) -> WasmResult<ir::Heap> {
+ // Currently, Baldrdash doesn't support multiple memories.
+ if index.index() != 0 {
+ return Err(WasmError::Unsupported(
+ "only one wasm memory supported".to_string(),
+ ));
+ }
+
+ let vcmtx = self.get_vmctx_gv(func);
+
+ let bound = self.static_env.static_memory_bound as u64;
+ let is_static = bound > 0;
+
+ // Get the `TlsData::memoryBase` field.
+ let base = func.create_global_value(ir::GlobalValueData::Load {
+ base: vcmtx,
+ offset: offset32(0),
+ global_type: POINTER_TYPE,
+ readonly: is_static,
+ });
+
+ let style = if is_static {
+ // We have a static heap.
+ let bound = bound.into();
+ ir::HeapStyle::Static { bound }
+ } else {
+ // Get the `TlsData::boundsCheckLimit` field.
+ let bound_gv = func.create_global_value(ir::GlobalValueData::Load {
+ base: vcmtx,
+ offset: (POINTER_SIZE as i32).into(),
+ global_type: ir::types::I32,
+ readonly: false,
+ });
+ ir::HeapStyle::Dynamic { bound_gv }
+ };
+
+ let min_size = (self.module_env.min_memory_length() as u64).into();
+ let offset_guard_size = (self.static_env.memory_guard_size as u64).into();
+
+ Ok(func.create_heap(ir::HeapData {
+ base,
+ min_size,
+ offset_guard_size,
+ style,
+ index_type: ir::types::I32,
+ }))
+ }
+
+ fn make_indirect_sig(
+ &mut self,
+ func: &mut ir::Function,
+ index: TypeIndex,
+ ) -> WasmResult<ir::SigRef> {
+ let wsig = self.module_env.signature(index);
+ let wsig_id = self.module_env.signature_id(index);
+ let mut sigdata = init_sig_from_wsig(self.static_env.call_conv(), &wsig)?;
+
+ if wsig_id.id_kind() != bindings::TypeIdDescKind::None {
+ // A signature to be used for an indirect call also takes a signature id.
+ sigdata.params.push(ir::AbiParam::special(
+ POINTER_TYPE,
+ ir::ArgumentPurpose::SignatureId,
+ ));
+ }
+
+ Ok(func.import_signature(sigdata))
+ }
+
+ fn make_table(&mut self, func: &mut ir::Function, index: TableIndex) -> WasmResult<ir::Table> {
+ let table_desc = self.get_table(func, index);
+
+ // TODO we'd need a better way to synchronize the shape of GlobalDataDesc and these
+ // offsets.
+ let bound_gv = func.create_global_value(ir::GlobalValueData::Load {
+ base: table_desc.global,
+ offset: 0.into(),
+ global_type: ir::types::I32,
+ readonly: false,
+ });
+
+ let base_gv = func.create_global_value(ir::GlobalValueData::Load {
+ base: table_desc.global,
+ offset: offset32(POINTER_SIZE as usize),
+ global_type: POINTER_TYPE,
+ readonly: false,
+ });
+
+ Ok(func.create_table(ir::TableData {
+ base_gv,
+ min_size: 0.into(),
+ bound_gv,
+ element_size: (u64::from(self.pointer_bytes()) * 2).into(),
+ index_type: ir::types::I32,
+ }))
+ }
+
+ fn make_direct_func(
+ &mut self,
+ func: &mut ir::Function,
+ index: FuncIndex,
+ ) -> WasmResult<ir::FuncRef> {
+ // Create a signature.
+ let sigdata = init_sig(&*self.module_env, self.static_env.call_conv(), index)?;
+ let signature = func.import_signature(sigdata);
+
+ Ok(func.import_function(ir::ExtFuncData {
+ name: wasm_function_name(index),
+ signature,
+ colocated: true,
+ }))
+ }
+
+ fn translate_call_indirect(
+ &mut self,
+ mut pos: FuncCursor,
+ table_index: TableIndex,
+ table: ir::Table,
+ sig_index: TypeIndex,
+ sig_ref: ir::SigRef,
+ callee: ir::Value,
+ call_args: &[ir::Value],
+ ) -> WasmResult<ir::Inst> {
+ let wsig_id = self.module_env.signature_id(sig_index);
+
+ let wtable = self.get_table(pos.func, table_index);
+
+ // Follows `MacroAssembler::wasmCallIndirect`:
+
+ // 1. Materialize the signature ID.
+ let sigid_value = match wsig_id.id_kind() {
+ bindings::TypeIdDescKind::None => None,
+ bindings::TypeIdDescKind::Immediate => {
+ // The signature is represented as an immediate pointer-sized value.
+ let imm = wsig_id.id_immediate() as i64;
+ Some(pos.ins().iconst(POINTER_TYPE, imm))
+ }
+ bindings::TypeIdDescKind::Global => {
+ let gv = self.sig_global(pos.func, wsig_id.id_tls_offset());
+ let addr = pos.ins().global_value(POINTER_TYPE, gv);
+ Some(
+ pos.ins()
+ .load(POINTER_TYPE, ir::MemFlags::trusted(), addr, 0),
+ )
+ }
+ };
+
+ // 2. Bounds check the callee against the table length.
+ let (bound_gv, base_gv) = {
+ let table_data = &pos.func.tables[table];
+ (table_data.bound_gv, table_data.base_gv)
+ };
+
+ let tlength = pos.ins().global_value(ir::types::I32, bound_gv);
+
+ let oob = pos
+ .ins()
+ .icmp(IntCC::UnsignedGreaterThanOrEqual, callee, tlength);
+ pos.ins().trapnz(oob, ir::TrapCode::TableOutOfBounds);
+
+ // 3. Load the wtable base pointer from a global.
+ let tbase = pos.ins().global_value(POINTER_TYPE, base_gv);
+
+ // 4. Load callee pointer from wtable.
+ let callee_x = if POINTER_TYPE != ir::types::I32 {
+ pos.ins().uextend(POINTER_TYPE, callee)
+ } else {
+ callee
+ };
+ let callee_scaled = pos.ins().imul_imm(callee_x, wtable.entry_size());
+
+ let entry = pos.ins().iadd(tbase, callee_scaled);
+ let callee_func = pos
+ .ins()
+ .load(POINTER_TYPE, ir::MemFlags::trusted(), entry, 0);
+
+ // Check for a null callee.
+ pos.ins()
+ .trapz(callee_func, ir::TrapCode::IndirectCallToNull);
+
+ // Get the caller TLS value.
+ let vmctx_gv = self.get_vmctx_gv(&mut pos.func);
+ let caller_vmctx = pos.ins().global_value(POINTER_TYPE, vmctx_gv);
+
+ // Handle external tables, set up environment.
+ // A function table call could redirect execution to another module with a different realm,
+ // so switch to this realm just in case.
+ let callee_vmctx = pos.ins().load(
+ POINTER_TYPE,
+ ir::MemFlags::trusted(),
+ entry,
+ POINTER_SIZE as i32,
+ );
+ self.switch_to_indirect_callee_realm(&mut pos, callee_vmctx);
+ self.load_pinned_reg(&mut pos, callee_vmctx);
+
+ // First the wasm args.
+ let mut args = ir::ValueList::default();
+ args.push(callee_func, &mut pos.func.dfg.value_lists);
+ args.extend(call_args.iter().cloned(), &mut pos.func.dfg.value_lists);
+ args.push(callee_vmctx, &mut pos.func.dfg.value_lists);
+ args.push(callee_vmctx, &mut pos.func.dfg.value_lists);
+ args.push(caller_vmctx, &mut pos.func.dfg.value_lists);
+ if let Some(sigid) = sigid_value {
+ args.push(sigid, &mut pos.func.dfg.value_lists);
+ }
+
+ let call = pos
+ .ins()
+ .CallIndirect(ir::Opcode::CallIndirect, ir::types::INVALID, sig_ref, args)
+ .0;
+
+ self.switch_to_wasm_tls_realm(&mut pos);
+ self.reload_tls_and_pinned_regs(&mut pos);
+
+ Ok(call)
+ }
+
+ fn translate_call(
+ &mut self,
+ mut pos: FuncCursor,
+ callee_index: FuncIndex,
+ callee: ir::FuncRef,
+ call_args: &[ir::Value],
+ ) -> WasmResult<ir::Inst> {
+ // First the wasm args.
+ let mut args = ir::ValueList::default();
+ args.extend(call_args.iter().cloned(), &mut pos.func.dfg.value_lists);
+
+ // Is this an imported function in a different instance, or a local function?
+ if self.module_env.func_is_import(callee_index) {
+ // This is a call to an imported function. We need to load the callee address and vmctx
+ // from the associated `FuncImportTls` struct in a global.
+ let gv = self.func_import_global(pos.func, callee_index);
+ let gv_addr = pos.ins().global_value(POINTER_TYPE, gv);
+
+ // We need the first two pointer-sized fields from the `FuncImportTls` struct: `code`
+ // and `tls`.
+ let fit_code = pos
+ .ins()
+ .load(POINTER_TYPE, ir::MemFlags::trusted(), gv_addr, 0);
+ let fit_tls = pos.ins().load(
+ POINTER_TYPE,
+ ir::MemFlags::trusted(),
+ gv_addr,
+ POINTER_SIZE as i32,
+ );
+
+ // Save the caller TLS value.
+ let vmctx_gv = self.get_vmctx_gv(&mut pos.func);
+ let caller_vmctx = pos.ins().global_value(POINTER_TYPE, vmctx_gv);
+
+ // Switch to the callee's realm.
+ self.switch_to_import_realm(&mut pos, fit_tls, gv_addr);
+ self.load_pinned_reg(&mut pos, fit_tls);
+
+ // The `tls` field is the VM context pointer for the callee.
+ args.push(fit_tls, &mut pos.func.dfg.value_lists);
+
+ // callee-TLS slot (ABI-2020).
+ args.push(fit_tls, &mut pos.func.dfg.value_lists);
+ // caller-TLS slot (ABI-2020).
+ args.push(caller_vmctx, &mut pos.func.dfg.value_lists);
+
+ // Now make an indirect call to `fit_code`.
+ // TODO: We don't need the `FuncRef` that was allocated for this callee since we're
+ // using an indirect call. We would need to change the `FuncTranslator` interface to
+ // deal.
+ args.insert(0, fit_code, &mut pos.func.dfg.value_lists);
+ let sig = pos.func.dfg.ext_funcs[callee].signature;
+ let call = pos
+ .ins()
+ .CallIndirect(ir::Opcode::CallIndirect, ir::types::INVALID, sig, args)
+ .0;
+ self.switch_to_wasm_tls_realm(&mut pos);
+ self.reload_tls_and_pinned_regs(&mut pos);
+ Ok(call)
+ } else {
+ // This is a call to a local function.
+
+ // Then we need to pass on the VM context pointer.
+ let vmctx = pos
+ .func
+ .special_param(ir::ArgumentPurpose::VMContext)
+ .expect("Missing vmctx arg");
+ args.push(vmctx, &mut pos.func.dfg.value_lists);
+
+ // callee-TLS slot (ABI-2020).
+ args.push(vmctx, &mut pos.func.dfg.value_lists);
+ // caller-TLS slot (ABI-2020).
+ args.push(vmctx, &mut pos.func.dfg.value_lists);
+
+ Ok(pos
+ .ins()
+ .Call(ir::Opcode::Call, ir::types::INVALID, callee, args)
+ .0)
+ }
+ }
+
+ fn translate_memory_grow(
+ &mut self,
+ mut pos: FuncCursor,
+ _index: MemoryIndex,
+ _heap: ir::Heap,
+ val: ir::Value,
+ ) -> WasmResult<ir::Value> {
+ Ok(self
+ .instance_call(&mut pos, &FN_MEMORY_GROW, &[val])
+ .unwrap())
+ }
+
+ fn translate_memory_size(
+ &mut self,
+ mut pos: FuncCursor,
+ _index: MemoryIndex,
+ _heap: ir::Heap,
+ ) -> WasmResult<ir::Value> {
+ Ok(self.instance_call(&mut pos, &FN_MEMORY_SIZE, &[]).unwrap())
+ }
+
+ fn translate_memory_copy(
+ &mut self,
+ mut pos: FuncCursor,
+ _src_index: MemoryIndex,
+ src_heap: ir::Heap,
+ _dst_index: MemoryIndex,
+ dst_heap: ir::Heap,
+ dst: ir::Value,
+ src: ir::Value,
+ len: ir::Value,
+ ) -> WasmResult<()> {
+ if src_heap != dst_heap {
+ return Err(WasmError::Unsupported(
+ "memory_copy between different heaps is not supported".to_string(),
+ ));
+ }
+ let heap = src_heap;
+ let heap_gv = pos.func.heaps[heap].base;
+ let mem_base = pos.ins().global_value(POINTER_TYPE, heap_gv);
+
+ // We have a specialized version of `memory.copy` when we are using
+ // shared memory or not.
+ let ret = if self.module_env.uses_shared_memory() {
+ self.instance_call(&mut pos, &FN_MEMORY_COPY_SHARED, &[dst, src, len, mem_base])
+ } else {
+ self.instance_call(&mut pos, &FN_MEMORY_COPY, &[dst, src, len, mem_base])
+ };
+ debug_assert!(ret.is_none());
+ Ok(())
+ }
+
+ fn translate_memory_fill(
+ &mut self,
+ mut pos: FuncCursor,
+ _index: MemoryIndex,
+ heap: ir::Heap,
+ dst: ir::Value,
+ val: ir::Value,
+ len: ir::Value,
+ ) -> WasmResult<()> {
+ let mem_base_gv = pos.func.heaps[heap].base;
+ let mem_base = pos.ins().global_value(POINTER_TYPE, mem_base_gv);
+
+ // We have a specialized version of `memory.fill` when we are using
+ // shared memory or not.
+ let ret = if self.module_env.uses_shared_memory() {
+ self.instance_call(&mut pos, &FN_MEMORY_FILL_SHARED, &[dst, val, len, mem_base])
+ } else {
+ self.instance_call(&mut pos, &FN_MEMORY_FILL, &[dst, val, len, mem_base])
+ };
+ debug_assert!(ret.is_none());
+ Ok(())
+ }
+
+ fn translate_memory_init(
+ &mut self,
+ mut pos: FuncCursor,
+ _index: MemoryIndex,
+ _heap: ir::Heap,
+ seg_index: u32,
+ dst: ir::Value,
+ src: ir::Value,
+ len: ir::Value,
+ ) -> WasmResult<()> {
+ let seg_index = pos.ins().iconst(ir::types::I32, seg_index as i64);
+ let ret = self.instance_call(&mut pos, &FN_MEMORY_INIT, &[dst, src, len, seg_index]);
+ debug_assert!(ret.is_none());
+ Ok(())
+ }
+
+ fn translate_data_drop(&mut self, mut pos: FuncCursor, seg_index: u32) -> WasmResult<()> {
+ let seg_index = pos.ins().iconst(ir::types::I32, seg_index as i64);
+ let ret = self.instance_call(&mut pos, &FN_DATA_DROP, &[seg_index]);
+ debug_assert!(ret.is_none());
+ Ok(())
+ }
+
+ fn translate_table_size(
+ &mut self,
+ mut pos: FuncCursor,
+ table_index: TableIndex,
+ _table: ir::Table,
+ ) -> WasmResult<ir::Value> {
+ let table_index = pos.ins().iconst(ir::types::I32, table_index.index() as i64);
+ Ok(self
+ .instance_call(&mut pos, &FN_TABLE_SIZE, &[table_index])
+ .unwrap())
+ }
+
+ fn translate_table_grow(
+ &mut self,
+ mut pos: FuncCursor,
+ table_index: TableIndex,
+ _table: ir::Table,
+ delta: ir::Value,
+ init_value: ir::Value,
+ ) -> WasmResult<ir::Value> {
+ let table_index = pos.ins().iconst(ir::types::I32, table_index.index() as i64);
+ Ok(self
+ .instance_call(&mut pos, &FN_TABLE_GROW, &[init_value, delta, table_index])
+ .unwrap())
+ }
+
+ fn translate_table_get(
+ &mut self,
+ builder: &mut FunctionBuilder,
+ table_index: TableIndex,
+ _table: ir::Table,
+ index: ir::Value,
+ ) -> WasmResult<ir::Value> {
+ // TODO(bug 1650038): make use of the `FunctionBuilder` here and its
+ // ability to edit the CFG in order to add a fast-path.
+ let mut pos = builder.cursor();
+ let table_index = pos.ins().iconst(ir::types::I32, table_index.index() as i64);
+ Ok(self
+ .instance_call(&mut pos, &FN_TABLE_GET, &[index, table_index])
+ .unwrap())
+ }
+
+ fn translate_table_set(
+ &mut self,
+ builder: &mut FunctionBuilder,
+ table_index: TableIndex,
+ _table: ir::Table,
+ value: ir::Value,
+ index: ir::Value,
+ ) -> WasmResult<()> {
+ // TODO(bug 1650038): make use of the `FunctionBuilder` here and its
+ // ability to edit the CFG in order to add a fast-path.
+ let mut pos = builder.cursor();
+ let table_index = pos.ins().iconst(ir::types::I32, table_index.index() as i64);
+ self.instance_call(&mut pos, &FN_TABLE_SET, &[index, value, table_index]);
+ Ok(())
+ }
+
+ fn translate_table_copy(
+ &mut self,
+ mut pos: FuncCursor,
+ dst_table_index: TableIndex,
+ _dst_table: ir::Table,
+ src_table_index: TableIndex,
+ _src_table: ir::Table,
+ dst: ir::Value,
+ src: ir::Value,
+ len: ir::Value,
+ ) -> WasmResult<()> {
+ let dst_index = pos
+ .ins()
+ .iconst(ir::types::I32, dst_table_index.index() as i64);
+ let src_index = pos
+ .ins()
+ .iconst(ir::types::I32, src_table_index.index() as i64);
+ self.instance_call(
+ &mut pos,
+ &FN_TABLE_COPY,
+ &[dst, src, len, dst_index, src_index],
+ );
+ Ok(())
+ }
+
+ fn translate_table_fill(
+ &mut self,
+ mut pos: FuncCursor,
+ table_index: TableIndex,
+ dst: ir::Value,
+ val: ir::Value,
+ len: ir::Value,
+ ) -> WasmResult<()> {
+ let table_index = pos.ins().iconst(ir::types::I32, table_index.index() as i64);
+ self.instance_call(&mut pos, &FN_TABLE_FILL, &[dst, val, len, table_index]);
+ Ok(())
+ }
+
+ fn translate_table_init(
+ &mut self,
+ mut pos: FuncCursor,
+ seg_index: u32,
+ table_index: TableIndex,
+ _table: ir::Table,
+ dst: ir::Value,
+ src: ir::Value,
+ len: ir::Value,
+ ) -> WasmResult<()> {
+ let seg_index = pos.ins().iconst(ir::types::I32, seg_index as i64);
+ let table_index = pos.ins().iconst(ir::types::I32, table_index.index() as i64);
+ let ret = self.instance_call(
+ &mut pos,
+ &FN_TABLE_INIT,
+ &[dst, src, len, seg_index, table_index],
+ );
+ debug_assert!(ret.is_none());
+ Ok(())
+ }
+
+ fn translate_elem_drop(&mut self, mut pos: FuncCursor, seg_index: u32) -> WasmResult<()> {
+ let seg_index = pos.ins().iconst(ir::types::I32, seg_index as i64);
+ let ret = self.instance_call(&mut pos, &FN_ELEM_DROP, &[seg_index]);
+ debug_assert!(ret.is_none());
+ Ok(())
+ }
+
+ fn translate_ref_func(
+ &mut self,
+ mut pos: FuncCursor,
+ func_index: FuncIndex,
+ ) -> WasmResult<ir::Value> {
+ let func_index = pos.ins().iconst(ir::types::I32, func_index.index() as i64);
+ Ok(self
+ .instance_call(&mut pos, &FN_REF_FUNC, &[func_index])
+ .unwrap())
+ }
+
+ fn translate_custom_global_get(
+ &mut self,
+ mut pos: FuncCursor,
+ global_index: GlobalIndex,
+ ) -> WasmResult<ir::Value> {
+ let global = self.module_env.global(global_index);
+ let ty = global.value_type()?;
+ debug_assert!(ty == ir::types::R32 || ty == ir::types::R64);
+
+ let (base_gv, offset) = self.global_address(pos.func, &global);
+ let addr = pos.ins().global_value(POINTER_TYPE, base_gv);
+ let flags = ir::MemFlags::trusted();
+ Ok(pos.ins().load(ty, flags, addr, offset))
+ }
+
+ fn translate_custom_global_set(
+ &mut self,
+ mut pos: FuncCursor,
+ global_index: GlobalIndex,
+ val: ir::Value,
+ ) -> WasmResult<()> {
+ let global = self.module_env.global(global_index);
+ let ty = global.value_type()?;
+ debug_assert!(ty == ir::types::R32 || ty == ir::types::R64);
+
+ let (global_addr_gv, global_addr_offset) = self.global_address(pos.func, &global);
+ let global_addr = pos.ins().global_value(POINTER_TYPE, global_addr_gv);
+ let abs_global_addr = pos.ins().iadd_imm(
+ global_addr,
+ ir::immediates::Imm64::new(global_addr_offset.into()),
+ );
+
+ let res = self.instance_call(&mut pos, &FN_PRE_BARRIER, &[abs_global_addr]);
+ debug_assert!(res.is_none());
+
+ let flags = ir::MemFlags::trusted();
+ pos.ins().store(flags, val, abs_global_addr, offset32(0));
+
+ let res = self.instance_call(&mut pos, &FN_POST_BARRIER, &[abs_global_addr]);
+ debug_assert!(res.is_none());
+
+ Ok(())
+ }
+
+ fn translate_atomic_wait(
+ &mut self,
+ mut pos: FuncCursor,
+ _index: MemoryIndex,
+ _heap: ir::Heap,
+ addr: ir::Value,
+ expected: ir::Value,
+ timeout: ir::Value,
+ ) -> WasmResult<ir::Value> {
+ let callee = match pos.func.dfg.value_type(expected) {
+ ir::types::I64 => &FN_WAIT_I64,
+ ir::types::I32 => &FN_WAIT_I32,
+ _ => {
+ return Err(WasmError::Unsupported(
+ "atomic_wait is only supported for I32 and I64".to_string(),
+ ))
+ }
+ };
+ let ret = self.instance_call(&mut pos, callee, &[addr, expected, timeout]);
+ Ok(ret.unwrap())
+ }
+
+ fn translate_atomic_notify(
+ &mut self,
+ mut pos: FuncCursor,
+ _index: MemoryIndex,
+ _heap: ir::Heap,
+ addr: ir::Value,
+ count: ir::Value,
+ ) -> WasmResult<ir::Value> {
+ let ret = self.instance_call(&mut pos, &FN_WAKE, &[addr, count]);
+ Ok(ret.unwrap())
+ }
+
+ fn translate_loop_header(&mut self, mut pos: FuncCursor) -> WasmResult<()> {
+ let interrupt = self.load_interrupt_flag(&mut pos);
+ pos.ins()
+ .resumable_trapnz(interrupt, ir::TrapCode::Interrupt);
+ Ok(())
+ }
+
+ fn return_mode(&self) -> ReturnMode {
+ // Since we're using SM's epilogue insertion code, we can only handle a single return
+ // instruction at the end of the function.
+ ReturnMode::FallthroughReturn
+ }
+}
+
+/// Information about a function table.
+#[derive(Clone)]
+struct TableInfo {
+ /// Global variable containing a `wasm::TableTls` struct with two fields:
+ ///
+ /// 0: Unsigned 32-bit table length.
+ /// n: Pointer to table (n = sizeof(void*))
+ pub global: ir::GlobalValue,
+}
+
+impl TableInfo {
+ /// Create a TableInfo and its global variable in `func`.
+ pub fn new(
+ wtab: bindings::TableDesc,
+ func: &mut ir::Function,
+ vmctx: ir::GlobalValue,
+ ) -> TableInfo {
+ // Create the global variable.
+ let offset = wtab.tls_offset();
+ assert!(offset < i32::max_value() as usize);
+ let offset = imm64(offset);
+ let global = func.create_global_value(ir::GlobalValueData::IAddImm {
+ base: vmctx,
+ offset,
+ global_type: POINTER_TYPE,
+ });
+
+ TableInfo { global }
+ }
+
+ /// Get the size in bytes of each table entry.
+ pub fn entry_size(&self) -> i64 {
+ // Each entry is an `wasm::FunctionTableElem` which consists of the code pointer and a new
+ // VM context pointer.
+ (POINTER_SIZE * 2) as i64
+ }
+}
diff --git a/js/src/wasm/moz.build b/js/src/wasm/moz.build
new file mode 100644
index 0000000000..eb372a6938
--- /dev/null
+++ b/js/src/wasm/moz.build
@@ -0,0 +1,49 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+FINAL_LIBRARY = "js"
+
+# Includes should be relative to parent path
+LOCAL_INCLUDES += ["!..", ".."]
+
+include("../js-config.mozbuild")
+include("../js-cxxflags.mozbuild")
+
+if CONFIG["ENABLE_WASM_CRANELIFT"]:
+ UNIFIED_SOURCES += [
+ "WasmCraneliftCompile.cpp",
+ ]
+
+UNIFIED_SOURCES += [
+ "AsmJS.cpp",
+ "TypedObject.cpp",
+ "WasmBaselineCompile.cpp",
+ "WasmBuiltins.cpp",
+ "WasmCode.cpp",
+ "WasmCompile.cpp",
+ "WasmContext.cpp",
+ "WasmDebug.cpp",
+ "WasmFrameIter.cpp",
+ "WasmGC.cpp",
+ "WasmGenerator.cpp",
+ "WasmInstance.cpp",
+ "WasmIonCompile.cpp",
+ "WasmJS.cpp",
+ "WasmModule.cpp",
+ "WasmOpIter.cpp",
+ "WasmProcess.cpp",
+ "WasmRealm.cpp",
+ "WasmSignalHandlers.cpp",
+ "WasmStubs.cpp",
+ "WasmTable.cpp",
+ "WasmTypes.cpp",
+ "WasmValidate.cpp",
+]
+
+# Make sure all WebAssembly code is built with libfuzzer
+# coverage instrumentation in FUZZING mode.
+if CONFIG["FUZZING_INTERFACES"] and CONFIG["LIBFUZZER"]:
+ include("/tools/fuzzing/libfuzzer-config.mozbuild")