summaryrefslogtreecommitdiffstats
path: root/js/src/jit/shared
diff options
context:
space:
mode:
Diffstat (limited to 'js/src/jit/shared')
-rw-r--r--js/src/jit/shared/Architecture-shared.h18
-rw-r--r--js/src/jit/shared/Assembler-shared.h632
-rw-r--r--js/src/jit/shared/AtomicOperations-feeling-lucky-gcc.h453
-rw-r--r--js/src/jit/shared/AtomicOperations-feeling-lucky-msvc.h373
-rw-r--r--js/src/jit/shared/AtomicOperations-feeling-lucky.h18
-rw-r--r--js/src/jit/shared/AtomicOperations-shared-jit.cpp1037
-rw-r--r--js/src/jit/shared/AtomicOperations-shared-jit.h622
-rw-r--r--js/src/jit/shared/CodeGenerator-shared-inl.h329
-rw-r--r--js/src/jit/shared/CodeGenerator-shared.cpp1204
-rw-r--r--js/src/jit/shared/CodeGenerator-shared.h579
-rw-r--r--js/src/jit/shared/Disassembler-shared.cpp248
-rw-r--r--js/src/jit/shared/Disassembler-shared.h182
-rw-r--r--js/src/jit/shared/IonAssemblerBuffer.h437
-rw-r--r--js/src/jit/shared/IonAssemblerBufferWithConstantPools.h1215
-rw-r--r--js/src/jit/shared/LIR-shared.h8878
-rw-r--r--js/src/jit/shared/Lowering-shared-inl.h805
-rw-r--r--js/src/jit/shared/Lowering-shared.cpp1034
-rw-r--r--js/src/jit/shared/Lowering-shared.h384
18 files changed, 18448 insertions, 0 deletions
diff --git a/js/src/jit/shared/Architecture-shared.h b/js/src/jit/shared/Architecture-shared.h
new file mode 100644
index 0000000000..33085a6bdb
--- /dev/null
+++ b/js/src/jit/shared/Architecture-shared.h
@@ -0,0 +1,18 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_Architecture_shared_h
+#define jit_shared_Architecture_shared_h
+
+namespace js {
+namespace jit {
+
+enum class RegTypeName { GPR, Float32, Float64, Vector128, Any };
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_Architecture_shared_h */
diff --git a/js/src/jit/shared/Assembler-shared.h b/js/src/jit/shared/Assembler-shared.h
new file mode 100644
index 0000000000..3fad7ec8f8
--- /dev/null
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -0,0 +1,632 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_Assembler_shared_h
+#define jit_shared_Assembler_shared_h
+
+#include "mozilla/CheckedInt.h"
+
+#include <limits.h>
+
+#include "gc/Barrier.h"
+#include "jit/AtomicOp.h"
+#include "jit/JitAllocPolicy.h"
+#include "jit/JitCode.h"
+#include "jit/JitContext.h"
+#include "jit/Label.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "vm/HelperThreads.h"
+#include "vm/NativeObject.h"
+#include "wasm/WasmTypes.h"
+
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+// Push return addresses callee-side.
+# define JS_USE_LINK_REGISTER
+#endif
+
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
+ defined(JS_CODEGEN_ARM64)
+// JS_CODELABEL_LINKMODE gives labels additional metadata
+// describing how Bind() should patch them.
+# define JS_CODELABEL_LINKMODE
+#endif
+
+namespace js {
+namespace jit {
+
+enum class FrameType;
+
+namespace Disassembler {
+class HeapAccess;
+} // namespace Disassembler
+
+static constexpr uint32_t Simd128DataSize = 4 * sizeof(int32_t);
+static_assert(Simd128DataSize == 4 * sizeof(int32_t),
+ "SIMD data should be able to contain int32x4");
+static_assert(Simd128DataSize == 4 * sizeof(float),
+ "SIMD data should be able to contain float32x4");
+static_assert(Simd128DataSize == 2 * sizeof(double),
+ "SIMD data should be able to contain float64x2");
+
+enum Scale { TimesOne = 0, TimesTwo = 1, TimesFour = 2, TimesEight = 3 };
+
+static_assert(sizeof(JS::Value) == 8,
+ "required for TimesEight and 3 below to be correct");
+static const Scale ValueScale = TimesEight;
+static const size_t ValueShift = 3;
+
+static inline unsigned ScaleToShift(Scale scale) { return unsigned(scale); }
+
+static inline bool IsShiftInScaleRange(int i) {
+ return i >= TimesOne && i <= TimesEight;
+}
+
+static inline Scale ShiftToScale(int i) {
+ MOZ_ASSERT(IsShiftInScaleRange(i));
+ return Scale(i);
+}
+
+static inline Scale ScaleFromElemWidth(int shift) {
+ switch (shift) {
+ case 1:
+ return TimesOne;
+ case 2:
+ return TimesTwo;
+ case 4:
+ return TimesFour;
+ case 8:
+ return TimesEight;
+ }
+
+ MOZ_CRASH("Invalid scale");
+}
+
+// Used for 32-bit immediates which do not require relocation.
+struct Imm32 {
+ int32_t value;
+
+ explicit Imm32(int32_t value) : value(value) {}
+ explicit Imm32(FrameType type) : Imm32(int32_t(type)) {}
+
+ static inline Imm32 ShiftOf(enum Scale s) {
+ switch (s) {
+ case TimesOne:
+ return Imm32(0);
+ case TimesTwo:
+ return Imm32(1);
+ case TimesFour:
+ return Imm32(2);
+ case TimesEight:
+ return Imm32(3);
+ };
+ MOZ_CRASH("Invalid scale");
+ }
+
+ static inline Imm32 FactorOf(enum Scale s) {
+ return Imm32(1 << ShiftOf(s).value);
+ }
+};
+
+// Pointer-sized integer to be embedded as an immediate in an instruction.
+struct ImmWord {
+ uintptr_t value;
+
+ explicit ImmWord(uintptr_t value) : value(value) {}
+};
+
+// Used for 64-bit immediates which do not require relocation.
+struct Imm64 {
+ uint64_t value;
+
+ explicit Imm64(int64_t value) : value(value) {}
+
+ Imm32 low() const { return Imm32(int32_t(value)); }
+
+ Imm32 hi() const { return Imm32(int32_t(value >> 32)); }
+
+ inline Imm32 firstHalf() const;
+ inline Imm32 secondHalf() const;
+};
+
+#ifdef DEBUG
+static inline bool IsCompilingWasm() {
+ return GetJitContext()->isCompilingWasm();
+}
+#endif
+
+// Pointer to be embedded as an immediate in an instruction.
+struct ImmPtr {
+ void* value;
+
+ struct NoCheckToken {};
+
+ explicit ImmPtr(void* value, NoCheckToken) : value(value) {
+ // A special unchecked variant for contexts where we know it is safe to
+ // use an immptr. This is assuming the caller knows what they're doing.
+ }
+
+ explicit ImmPtr(const void* value) : value(const_cast<void*>(value)) {
+ // To make code serialization-safe, wasm compilation should only
+ // compile pointer immediates using a SymbolicAddress.
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ template <class R>
+ explicit ImmPtr(R (*pf)()) : value(JS_FUNC_TO_DATA_PTR(void*, pf)) {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ template <class R, class A1>
+ explicit ImmPtr(R (*pf)(A1)) : value(JS_FUNC_TO_DATA_PTR(void*, pf)) {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ template <class R, class A1, class A2>
+ explicit ImmPtr(R (*pf)(A1, A2)) : value(JS_FUNC_TO_DATA_PTR(void*, pf)) {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ template <class R, class A1, class A2, class A3>
+ explicit ImmPtr(R (*pf)(A1, A2, A3)) : value(JS_FUNC_TO_DATA_PTR(void*, pf)) {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ template <class R, class A1, class A2, class A3, class A4>
+ explicit ImmPtr(R (*pf)(A1, A2, A3, A4))
+ : value(JS_FUNC_TO_DATA_PTR(void*, pf)) {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+};
+
+// The same as ImmPtr except that the intention is to patch this
+// instruction. The initial value of the immediate is 'addr' and this value is
+// either clobbered or used in the patching process.
+struct PatchedImmPtr {
+ void* value;
+
+ explicit PatchedImmPtr() : value(nullptr) {}
+ explicit PatchedImmPtr(const void* value) : value(const_cast<void*>(value)) {}
+};
+
+class AssemblerShared;
+class ImmGCPtr;
+
+// Used for immediates which require relocation.
+class ImmGCPtr {
+ public:
+ const gc::Cell* value;
+
+ explicit ImmGCPtr(const gc::Cell* ptr) : value(ptr) {
+ // Nursery pointers can't be used if the main thread might be currently
+ // performing a minor GC.
+ MOZ_ASSERT_IF(ptr && !ptr->isTenured(), !CurrentThreadIsIonCompiling());
+
+ // wasm shouldn't be creating GC things
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ private:
+ ImmGCPtr() : value(0) {}
+};
+
+// Pointer to trampoline code. Trampoline code is kept alive until the runtime
+// is destroyed, so does not need to be traced.
+struct TrampolinePtr {
+ uint8_t* value;
+
+ TrampolinePtr() : value(nullptr) {}
+ explicit TrampolinePtr(uint8_t* value) : value(value) { MOZ_ASSERT(value); }
+};
+
+// Pointer to be embedded as an immediate that is loaded/stored from by an
+// instruction.
+struct AbsoluteAddress {
+ void* addr;
+
+ explicit AbsoluteAddress(const void* addr) : addr(const_cast<void*>(addr)) {
+ MOZ_ASSERT(!IsCompilingWasm());
+ }
+
+ AbsoluteAddress offset(ptrdiff_t delta) {
+ return AbsoluteAddress(((uint8_t*)addr) + delta);
+ }
+};
+
+// The same as AbsoluteAddress except that the intention is to patch this
+// instruction. The initial value of the immediate is 'addr' and this value is
+// either clobbered or used in the patching process.
+struct PatchedAbsoluteAddress {
+ void* addr;
+
+ explicit PatchedAbsoluteAddress() : addr(nullptr) {}
+ explicit PatchedAbsoluteAddress(const void* addr)
+ : addr(const_cast<void*>(addr)) {}
+ explicit PatchedAbsoluteAddress(uintptr_t addr)
+ : addr(reinterpret_cast<void*>(addr)) {}
+};
+
+// Specifies an address computed in the form of a register base and a constant,
+// 32-bit offset.
+struct Address {
+ RegisterOrSP base;
+ int32_t offset;
+
+ Address(Register base, int32_t offset)
+ : base(RegisterOrSP(base)), offset(offset) {}
+
+#ifdef JS_HAS_HIDDEN_SP
+ Address(RegisterOrSP base, int32_t offset) : base(base), offset(offset) {}
+#endif
+
+ Address() = delete;
+};
+
+#if JS_BITS_PER_WORD == 32
+
+static inline Address LowWord(const Address& address) {
+ using mozilla::CheckedInt;
+
+ CheckedInt<int32_t> offset =
+ CheckedInt<int32_t>(address.offset) + INT64LOW_OFFSET;
+ MOZ_ALWAYS_TRUE(offset.isValid());
+ return Address(address.base, offset.value());
+}
+
+static inline Address HighWord(const Address& address) {
+ using mozilla::CheckedInt;
+
+ CheckedInt<int32_t> offset =
+ CheckedInt<int32_t>(address.offset) + INT64HIGH_OFFSET;
+ MOZ_ALWAYS_TRUE(offset.isValid());
+ return Address(address.base, offset.value());
+}
+
+#endif
+
+// Specifies an address computed in the form of a register base, a register
+// index with a scale, and a constant, 32-bit offset.
+struct BaseIndex {
+ RegisterOrSP base;
+ Register index;
+ Scale scale;
+ int32_t offset;
+
+ BaseIndex(Register base, Register index, Scale scale, int32_t offset = 0)
+ : base(RegisterOrSP(base)), index(index), scale(scale), offset(offset) {}
+
+#ifdef JS_HAS_HIDDEN_SP
+ BaseIndex(RegisterOrSP base, Register index, Scale scale, int32_t offset = 0)
+ : base(base), index(index), scale(scale), offset(offset) {}
+#endif
+
+ BaseIndex() = delete;
+};
+
+#if JS_BITS_PER_WORD == 32
+
+static inline BaseIndex LowWord(const BaseIndex& address) {
+ using mozilla::CheckedInt;
+
+ CheckedInt<int32_t> offset =
+ CheckedInt<int32_t>(address.offset) + INT64LOW_OFFSET;
+ MOZ_ALWAYS_TRUE(offset.isValid());
+ return BaseIndex(address.base, address.index, address.scale, offset.value());
+}
+
+static inline BaseIndex HighWord(const BaseIndex& address) {
+ using mozilla::CheckedInt;
+
+ CheckedInt<int32_t> offset =
+ CheckedInt<int32_t>(address.offset) + INT64HIGH_OFFSET;
+ MOZ_ALWAYS_TRUE(offset.isValid());
+ return BaseIndex(address.base, address.index, address.scale, offset.value());
+}
+
+#endif
+
+// A BaseIndex used to access Values. Note that |offset| is *not* scaled by
+// sizeof(Value). Use this *only* if you're indexing into a series of Values
+// that aren't object elements or object slots (for example, values on the
+// stack, values in an arguments object, &c.). If you're indexing into an
+// object's elements or slots, don't use this directly! Use
+// BaseObject{Element,Slot}Index instead.
+struct BaseValueIndex : BaseIndex {
+ BaseValueIndex(Register base, Register index, int32_t offset = 0)
+ : BaseIndex(RegisterOrSP(base), index, ValueScale, offset) {}
+
+#ifdef JS_HAS_HIDDEN_SP
+ BaseValueIndex(RegisterOrSP base, Register index, int32_t offset = 0)
+ : BaseIndex(base, index, ValueScale, offset) {}
+#endif
+};
+
+// Specifies the address of an indexed Value within object elements from a
+// base. The index must not already be scaled by sizeof(Value)!
+struct BaseObjectElementIndex : BaseValueIndex {
+ BaseObjectElementIndex(Register base, Register index, int32_t offset = 0)
+ : BaseValueIndex(base, index, offset) {
+ NativeObject::elementsSizeMustNotOverflow();
+ }
+
+#ifdef JS_HAS_HIDDEN_SP
+ BaseObjectElementIndex(RegisterOrSP base, Register index, int32_t offset = 0)
+ : BaseValueIndex(base, index, offset) {
+ NativeObject::elementsSizeMustNotOverflow();
+ }
+#endif
+};
+
+// Like BaseObjectElementIndex, except for object slots.
+struct BaseObjectSlotIndex : BaseValueIndex {
+ BaseObjectSlotIndex(Register base, Register index)
+ : BaseValueIndex(base, index) {
+ NativeObject::slotsSizeMustNotOverflow();
+ }
+
+#ifdef JS_HAS_HIDDEN_SP
+ BaseObjectSlotIndex(RegisterOrSP base, Register index)
+ : BaseValueIndex(base, index) {
+ NativeObject::slotsSizeMustNotOverflow();
+ }
+#endif
+};
+
+enum class RelocationKind {
+ // The target is immovable, so patching is only needed if the source
+ // buffer is relocated and the reference is relative.
+ HARDCODED,
+
+ // The target is the start of a JitCode buffer, which must be traced
+ // during garbage collection. Relocations and patching may be needed.
+ JITCODE
+};
+
+class CodeOffset {
+ size_t offset_;
+
+ static const size_t NOT_BOUND = size_t(-1);
+
+ public:
+ explicit CodeOffset(size_t offset) : offset_(offset) {}
+ CodeOffset() : offset_(NOT_BOUND) {}
+
+ size_t offset() const {
+ MOZ_ASSERT(bound());
+ return offset_;
+ }
+
+ void bind(size_t offset) {
+ MOZ_ASSERT(!bound());
+ offset_ = offset;
+ MOZ_ASSERT(bound());
+ }
+ bool bound() const { return offset_ != NOT_BOUND; }
+
+ void offsetBy(size_t delta) {
+ MOZ_ASSERT(bound());
+ MOZ_ASSERT(offset_ + delta >= offset_, "no overflow");
+ offset_ += delta;
+ }
+};
+
+// A code label contains an absolute reference to a point in the code. Thus, it
+// cannot be patched until after linking.
+// When the source label is resolved into a memory address, this address is
+// patched into the destination address.
+// Some need to distinguish between multiple ways of patching that address.
+// See JS_CODELABEL_LINKMODE.
+class CodeLabel {
+ // The destination position, where the absolute reference should get
+ // patched into.
+ CodeOffset patchAt_;
+
+ // The source label (relative) in the code to where the destination should
+ // get patched to.
+ CodeOffset target_;
+
+#ifdef JS_CODELABEL_LINKMODE
+ public:
+ enum LinkMode { Uninitialized = 0, RawPointer, MoveImmediate, JumpImmediate };
+
+ private:
+ LinkMode linkMode_ = Uninitialized;
+#endif
+
+ public:
+ CodeLabel() = default;
+ explicit CodeLabel(const CodeOffset& patchAt) : patchAt_(patchAt) {}
+ CodeLabel(const CodeOffset& patchAt, const CodeOffset& target)
+ : patchAt_(patchAt), target_(target) {}
+ CodeOffset* patchAt() { return &patchAt_; }
+ CodeOffset* target() { return &target_; }
+ CodeOffset patchAt() const { return patchAt_; }
+ CodeOffset target() const { return target_; }
+#ifdef JS_CODELABEL_LINKMODE
+ LinkMode linkMode() const { return linkMode_; }
+ void setLinkMode(LinkMode value) { linkMode_ = value; }
+#endif
+};
+
+typedef Vector<CodeLabel, 0, SystemAllocPolicy> CodeLabelVector;
+
+class CodeLocationLabel {
+ uint8_t* raw_ = nullptr;
+
+ public:
+ CodeLocationLabel(JitCode* code, CodeOffset base) {
+ MOZ_ASSERT(base.offset() < code->instructionsSize());
+ raw_ = code->raw() + base.offset();
+ }
+ explicit CodeLocationLabel(JitCode* code) { raw_ = code->raw(); }
+ explicit CodeLocationLabel(uint8_t* raw) {
+ MOZ_ASSERT(raw);
+ raw_ = raw;
+ }
+
+ ptrdiff_t operator-(const CodeLocationLabel& other) {
+ return raw_ - other.raw_;
+ }
+
+ uint8_t* raw() const { return raw_; }
+};
+
+} // namespace jit
+
+namespace wasm {
+
+// Represents an instruction to be patched and the intended pointee. These
+// links are accumulated in the MacroAssembler, but patching is done outside
+// the MacroAssembler (in Module::staticallyLink).
+
+struct SymbolicAccess {
+ SymbolicAccess(jit::CodeOffset patchAt, SymbolicAddress target)
+ : patchAt(patchAt), target(target) {}
+
+ jit::CodeOffset patchAt;
+ SymbolicAddress target;
+};
+
+typedef Vector<SymbolicAccess, 0, SystemAllocPolicy> SymbolicAccessVector;
+
+// Describes a single wasm or asm.js memory access for the purpose of generating
+// code and metadata.
+
+class MemoryAccessDesc {
+ uint32_t offset_;
+ uint32_t align_;
+ Scalar::Type type_;
+ jit::Synchronization sync_;
+ wasm::BytecodeOffset trapOffset_;
+ wasm::SimdOp widenOp_;
+ enum { Plain, ZeroExtend, Splat, Widen } loadOp_;
+
+ public:
+ explicit MemoryAccessDesc(
+ Scalar::Type type, uint32_t align, uint32_t offset,
+ BytecodeOffset trapOffset,
+ const jit::Synchronization& sync = jit::Synchronization::None())
+ : offset_(offset),
+ align_(align),
+ type_(type),
+ sync_(sync),
+ trapOffset_(trapOffset),
+ widenOp_(wasm::SimdOp::Limit),
+ loadOp_(Plain) {
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
+ }
+
+ uint32_t offset() const { return offset_; }
+ uint32_t align() const { return align_; }
+ Scalar::Type type() const { return type_; }
+ unsigned byteSize() const { return Scalar::byteSize(type()); }
+ const jit::Synchronization& sync() const { return sync_; }
+ BytecodeOffset trapOffset() const { return trapOffset_; }
+ wasm::SimdOp widenSimdOp() const {
+ MOZ_ASSERT(isWidenSimd128Load());
+ return widenOp_;
+ }
+ bool isAtomic() const { return !sync_.isNone(); }
+ bool isZeroExtendSimd128Load() const { return loadOp_ == ZeroExtend; }
+ bool isSplatSimd128Load() const { return loadOp_ == Splat; }
+ bool isWidenSimd128Load() const { return loadOp_ == Widen; }
+
+ void setZeroExtendSimd128Load() {
+ MOZ_ASSERT(type() == Scalar::Float32 || type() == Scalar::Float64);
+ MOZ_ASSERT(!isAtomic());
+ MOZ_ASSERT(loadOp_ == Plain);
+ loadOp_ = ZeroExtend;
+ }
+
+ void setSplatSimd128Load() {
+ MOZ_ASSERT(type() == Scalar::Float64);
+ MOZ_ASSERT(!isAtomic());
+ MOZ_ASSERT(loadOp_ == Plain);
+ loadOp_ = Splat;
+ }
+
+ void setWidenSimd128Load(wasm::SimdOp op) {
+ MOZ_ASSERT(type() == Scalar::Float64);
+ MOZ_ASSERT(!isAtomic());
+ MOZ_ASSERT(loadOp_ == Plain);
+ widenOp_ = op;
+ loadOp_ = Widen;
+ }
+
+ void clearOffset() { offset_ = 0; }
+ void setOffset(uint32_t offset) { offset_ = offset; }
+};
+
+} // namespace wasm
+
+namespace jit {
+
+// The base class of all Assemblers for all archs.
+class AssemblerShared {
+ wasm::CallSiteVector callSites_;
+ wasm::CallSiteTargetVector callSiteTargets_;
+ wasm::TrapSiteVectorArray trapSites_;
+ wasm::SymbolicAccessVector symbolicAccesses_;
+
+ protected:
+ CodeLabelVector codeLabels_;
+
+ bool enoughMemory_;
+ bool embedsNurseryPointers_;
+
+ public:
+ AssemblerShared() : enoughMemory_(true), embedsNurseryPointers_(false) {}
+
+ void propagateOOM(bool success) { enoughMemory_ &= success; }
+
+ void setOOM() { enoughMemory_ = false; }
+
+ bool oom() const { return !enoughMemory_; }
+
+ bool embedsNurseryPointers() const { return embedsNurseryPointers_; }
+
+ void addCodeLabel(CodeLabel label) {
+ propagateOOM(codeLabels_.append(label));
+ }
+ size_t numCodeLabels() const { return codeLabels_.length(); }
+ CodeLabel codeLabel(size_t i) { return codeLabels_[i]; }
+ CodeLabelVector& codeLabels() { return codeLabels_; }
+
+ // WebAssembly metadata emitted by masm operations accumulated on the
+ // MacroAssembler, and swapped into a wasm::CompiledCode after finish().
+
+ template <typename... Args>
+ void append(const wasm::CallSiteDesc& desc, CodeOffset retAddr,
+ Args&&... args) {
+ enoughMemory_ &= callSites_.emplaceBack(desc, retAddr.offset());
+ enoughMemory_ &= callSiteTargets_.emplaceBack(std::forward<Args>(args)...);
+ }
+ void append(wasm::Trap trap, wasm::TrapSite site) {
+ enoughMemory_ &= trapSites_[trap].append(site);
+ }
+ void append(const wasm::MemoryAccessDesc& access, uint32_t pcOffset) {
+ appendOutOfBoundsTrap(access.trapOffset(), pcOffset);
+ }
+ void appendOutOfBoundsTrap(wasm::BytecodeOffset trapOffset,
+ uint32_t pcOffset) {
+ append(wasm::Trap::OutOfBounds, wasm::TrapSite(pcOffset, trapOffset));
+ }
+ void append(wasm::SymbolicAccess access) {
+ enoughMemory_ &= symbolicAccesses_.append(access);
+ }
+
+ wasm::CallSiteVector& callSites() { return callSites_; }
+ wasm::CallSiteTargetVector& callSiteTargets() { return callSiteTargets_; }
+ wasm::TrapSiteVectorArray& trapSites() { return trapSites_; }
+ wasm::SymbolicAccessVector& symbolicAccesses() { return symbolicAccesses_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_Assembler_shared_h */
diff --git a/js/src/jit/shared/AtomicOperations-feeling-lucky-gcc.h b/js/src/jit/shared/AtomicOperations-feeling-lucky-gcc.h
new file mode 100644
index 0000000000..1ce40efacc
--- /dev/null
+++ b/js/src/jit/shared/AtomicOperations-feeling-lucky-gcc.h
@@ -0,0 +1,453 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* For documentation, see jit/AtomicOperations.h, both the comment block at the
+ * beginning and the #ifdef nest near the end.
+ *
+ * This is a common file for tier-3 platforms (including simulators for our
+ * tier-1 platforms) that are not providing hardware-specific implementations of
+ * the atomic operations. Please keep it reasonably platform-independent by
+ * adding #ifdefs at the beginning as much as possible, not throughout the file.
+ *
+ *
+ * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ * !!!! NOTE !!!!
+ * !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ *
+ * The implementations in this file are NOT SAFE and cannot be safe even in
+ * principle because they rely on C++ undefined behavior. However, they are
+ * frequently good enough for tier-3 platforms.
+ */
+
+#ifndef jit_shared_AtomicOperations_feeling_lucky_gcc_h
+#define jit_shared_AtomicOperations_feeling_lucky_gcc_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Types.h"
+
+// Explicitly exclude tier-1 platforms.
+
+#if ((defined(__x86_64__) || defined(_M_X64)) && defined(JS_CODEGEN_X64)) || \
+ ((defined(__i386__) || defined(_M_IX86)) && defined(JS_CODEGEN_X86)) || \
+ (defined(__arm__) && defined(JS_CODEGEN_ARM)) || \
+ ((defined(__aarch64__) || defined(_M_ARM64)) && defined(JS_CODEGEN_ARM64))
+# error "Do not use this code on a tier-1 platform when a JIT is available"
+#endif
+
+#if !(defined(__clang__) || defined(__GNUC__))
+# error "This file only for gcc/Clang"
+#endif
+
+// 64-bit atomics are not required by the JS spec, and you can compile
+// SpiderMonkey without them. 64-bit atomics are required for BigInt
+// support.
+//
+// 64-bit lock-free atomics are required for WebAssembly, but gating in the
+// WebAssembly subsystem ensures that no WebAssembly-supporting platforms need
+// code in this file.
+
+#if defined(JS_SIMULATOR_ARM64) || defined(JS_SIMULATOR_ARM)
+// On some x86 (32-bit) systems this will not work because the compiler does not
+// open-code 64-bit atomics. If so, try linking with -latomic. If that doesn't
+// work, you're mostly on your own.
+# define HAS_64BIT_ATOMICS
+# define HAS_64BIT_LOCKFREE
+#endif
+
+#if defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || \
+ defined(__PPC64LE__)
+# define HAS_64BIT_ATOMICS
+# define HAS_64BIT_LOCKFREE
+#endif
+
+#if defined(__riscv) && __riscv_xlen == 64
+# define HAS_64BIT_ATOMICS
+# define HAS_64BIT_LOCKFREE
+#endif
+
+#ifdef __sparc__
+# ifdef __LP64__
+# define HAS_64BIT_ATOMICS
+# define HAS_64BIT_LOCKFREE
+# endif
+#endif
+
+#ifdef JS_CODEGEN_NONE
+# ifdef JS_64BIT
+# define HAS_64BIT_ATOMICS
+# define HAS_64BIT_LOCKFREE
+# endif
+#endif
+
+// The default implementation tactic for gcc/clang is to use the newer __atomic
+// intrinsics added for use in C++11 <atomic>. Where that isn't available, we
+// use GCC's older __sync functions instead.
+//
+// ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS is kept as a backward compatible
+// option for older compilers: enable this to use GCC's old __sync functions
+// instead of the newer __atomic functions. This will be required for GCC 4.6.x
+// and earlier, and probably for Clang 3.1, should we need to use those
+// versions. Firefox no longer supports compilers that old.
+
+//#define ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+
+// Sanity check.
+
+#if defined(HAS_64BIT_LOCKFREE) && !defined(HAS_64BIT_ATOMICS)
+# error "This combination of features is senseless, please fix"
+#endif
+
+// Try to avoid platform #ifdefs below this point.
+
+inline bool js::jit::AtomicOperations::Initialize() {
+ // Nothing
+ return true;
+}
+
+inline void js::jit::AtomicOperations::ShutDown() {
+ // Nothing
+}
+
+// When compiling with Clang on 32-bit linux it will be necessary to link with
+// -latomic to get the proper 64-bit intrinsics.
+
+inline bool js::jit::AtomicOperations::hasAtomic8() {
+#if defined(HAS_64BIT_ATOMICS)
+ return true;
+#else
+ return false;
+#endif
+}
+
+inline bool js::jit::AtomicOperations::isLockfree8() {
+#if defined(HAS_64BIT_LOCKFREE)
+ return true;
+#else
+ return false;
+#endif
+}
+
+inline void js::jit::AtomicOperations::fenceSeqCst() {
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+#else
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+#endif
+}
+
+template <typename T>
+inline T js::jit::AtomicOperations::loadSeqCst(T* addr) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+ T v = *addr;
+ __sync_synchronize();
+#else
+ T v;
+ __atomic_load(addr, &v, __ATOMIC_SEQ_CST);
+#endif
+ return v;
+}
+
+#ifndef HAS_64BIT_ATOMICS
+namespace js {
+namespace jit {
+
+template <>
+inline int64_t AtomicOperations::loadSeqCst(int64_t* addr) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+template <>
+inline uint64_t AtomicOperations::loadSeqCst(uint64_t* addr) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+} // namespace jit
+} // namespace js
+#endif
+
+template <typename T>
+inline void js::jit::AtomicOperations::storeSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ __sync_synchronize();
+ *addr = val;
+ __sync_synchronize();
+#else
+ __atomic_store(addr, &val, __ATOMIC_SEQ_CST);
+#endif
+}
+
+#ifndef HAS_64BIT_ATOMICS
+namespace js {
+namespace jit {
+
+template <>
+inline void AtomicOperations::storeSeqCst(int64_t* addr, int64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+template <>
+inline void AtomicOperations::storeSeqCst(uint64_t* addr, uint64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+} // namespace jit
+} // namespace js
+#endif
+
+template <typename T>
+inline T js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ T v;
+ __sync_synchronize();
+ do {
+ v = *addr;
+ } while (__sync_val_compare_and_swap(addr, v, val) != v);
+ return v;
+#else
+ T v;
+ __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
+ return v;
+#endif
+}
+
+#ifndef HAS_64BIT_ATOMICS
+namespace js {
+namespace jit {
+
+template <>
+inline int64_t AtomicOperations::exchangeSeqCst(int64_t* addr, int64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+template <>
+inline uint64_t AtomicOperations::exchangeSeqCst(uint64_t* addr, uint64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+} // namespace jit
+} // namespace js
+#endif
+
+template <typename T>
+inline T js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval,
+ T newval) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_val_compare_and_swap(addr, oldval, newval);
+#else
+ __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST,
+ __ATOMIC_SEQ_CST);
+ return oldval;
+#endif
+}
+
+#ifndef HAS_64BIT_ATOMICS
+namespace js {
+namespace jit {
+
+template <>
+inline int64_t AtomicOperations::compareExchangeSeqCst(int64_t* addr,
+ int64_t oldval,
+ int64_t newval) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+template <>
+inline uint64_t AtomicOperations::compareExchangeSeqCst(uint64_t* addr,
+ uint64_t oldval,
+ uint64_t newval) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+} // namespace jit
+} // namespace js
+#endif
+
+template <typename T>
+inline T js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_add(addr, val);
+#else
+ return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
+#endif
+}
+
+#ifndef HAS_64BIT_ATOMICS
+namespace js {
+namespace jit {
+
+template <>
+inline int64_t AtomicOperations::fetchAddSeqCst(int64_t* addr, int64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+template <>
+inline uint64_t AtomicOperations::fetchAddSeqCst(uint64_t* addr, uint64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+} // namespace jit
+} // namespace js
+#endif
+
+template <typename T>
+inline T js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_sub(addr, val);
+#else
+ return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
+#endif
+}
+
+#ifndef HAS_64BIT_ATOMICS
+namespace js {
+namespace jit {
+
+template <>
+inline int64_t AtomicOperations::fetchSubSeqCst(int64_t* addr, int64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+template <>
+inline uint64_t AtomicOperations::fetchSubSeqCst(uint64_t* addr, uint64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+} // namespace jit
+} // namespace js
+#endif
+
+template <typename T>
+inline T js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_and(addr, val);
+#else
+ return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
+#endif
+}
+
+#ifndef HAS_64BIT_ATOMICS
+namespace js {
+namespace jit {
+
+template <>
+inline int64_t AtomicOperations::fetchAndSeqCst(int64_t* addr, int64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+template <>
+inline uint64_t AtomicOperations::fetchAndSeqCst(uint64_t* addr, uint64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+} // namespace jit
+} // namespace js
+#endif
+
+template <typename T>
+inline T js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_or(addr, val);
+#else
+ return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
+#endif
+}
+
+#ifndef HAS_64BIT_ATOMICS
+namespace js {
+namespace jit {
+
+template <>
+inline int64_t AtomicOperations::fetchOrSeqCst(int64_t* addr, int64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+template <>
+inline uint64_t AtomicOperations::fetchOrSeqCst(uint64_t* addr, uint64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+} // namespace jit
+} // namespace js
+#endif
+
+template <typename T>
+inline T js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+#ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+ return __sync_fetch_and_xor(addr, val);
+#else
+ return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
+#endif
+}
+
+#ifndef HAS_64BIT_ATOMICS
+namespace js {
+namespace jit {
+
+template <>
+inline int64_t AtomicOperations::fetchXorSeqCst(int64_t* addr, int64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+template <>
+inline uint64_t AtomicOperations::fetchXorSeqCst(uint64_t* addr, uint64_t val) {
+ MOZ_CRASH("No 64-bit atomics");
+}
+
+} // namespace jit
+} // namespace js
+#endif
+
+template <typename T>
+inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+ // This is actually roughly right even on 32-bit platforms since in that
+ // case, double, int64, and uint64 loads need not be access-atomic.
+ //
+ // We could use __atomic_load, but it would be needlessly expensive on
+ // 32-bit platforms that could support it and just plain wrong on others.
+ return *addr;
+}
+
+template <typename T>
+inline void js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) {
+ static_assert(sizeof(T) <= 8, "atomics supported up to 8 bytes only");
+ // This is actually roughly right even on 32-bit platforms since in that
+ // case, double, int64, and uint64 loads need not be access-atomic.
+ //
+ // We could use __atomic_store, but it would be needlessly expensive on
+ // 32-bit platforms that could support it and just plain wrong on others.
+ *addr = val;
+}
+
+inline void js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest,
+ const void* src,
+ size_t nbytes) {
+ MOZ_ASSERT(!((char*)dest <= (char*)src && (char*)src < (char*)dest + nbytes));
+ MOZ_ASSERT(!((char*)src <= (char*)dest && (char*)dest < (char*)src + nbytes));
+ ::memcpy(dest, src, nbytes);
+}
+
+inline void js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest,
+ const void* src,
+ size_t nbytes) {
+ ::memmove(dest, src, nbytes);
+}
+
+#undef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
+#undef HAS_64BIT_ATOMICS
+#undef HAS_64BIT_LOCKFREE
+
+#endif // jit_shared_AtomicOperations_feeling_lucky_gcc_h
diff --git a/js/src/jit/shared/AtomicOperations-feeling-lucky-msvc.h b/js/src/jit/shared/AtomicOperations-feeling-lucky-msvc.h
new file mode 100644
index 0000000000..c1c90474a2
--- /dev/null
+++ b/js/src/jit/shared/AtomicOperations-feeling-lucky-msvc.h
@@ -0,0 +1,373 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_AtomicOperations_feeling_lucky_msvc_h
+#define jit_shared_AtomicOperations_feeling_lucky_msvc_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Types.h"
+
+// Explicitly exclude tier-1 platforms.
+
+#if ((defined(__x86_64__) || defined(_M_X64)) && defined(JS_CODEGEN_X64)) || \
+ ((defined(__i386__) || defined(_M_IX86)) && defined(JS_CODEGEN_X86)) || \
+ (defined(__arm__) && defined(JS_CODEGEN_ARM)) || \
+ ((defined(__aarch64__) || defined(_M_ARM64)) && defined(JS_CODEGEN_ARM64))
+# error "Do not use this code on a tier-1 platform when a JIT is available"
+#endif
+
+#if !defined(_MSC_VER)
+# error "This file only for Microsoft Visual C++"
+#endif
+
+// For overall documentation, see jit/AtomicOperations.h.
+
+// Below, _ReadWriteBarrier is a compiler directive, preventing reordering of
+// instructions and reuse of memory values across it in the compiler, but having
+// no impact on what the CPU does.
+
+// Note, here we use MSVC intrinsics directly. But MSVC supports a slightly
+// higher level of function which uses the intrinsic when possible (8, 16, and
+// 32-bit operations, and 64-bit operations on 64-bit systems) and otherwise
+// falls back on CMPXCHG8B for 64-bit operations on 32-bit systems. We could be
+// using those functions in many cases here (though not all). I have not done
+// so because I don't yet know how far back those functions are supported.
+
+// Note, _InterlockedCompareExchange takes the *new* value as the second
+// argument and the *comparand* (expected old value) as the third argument.
+
+inline bool js::jit::AtomicOperations::Initialize() {
+ // Nothing
+ return true;
+}
+
+inline void js::jit::AtomicOperations::ShutDown() {
+ // Nothing
+}
+
+inline bool js::jit::AtomicOperations::hasAtomic8() { return true; }
+
+inline bool js::jit::AtomicOperations::isLockfree8() {
+ // The MSDN docs suggest very strongly that if code is compiled for Pentium
+ // or better the 64-bit primitives will be lock-free, see eg the "Remarks"
+ // secion of the page for _InterlockedCompareExchange64, currently here:
+ // https://msdn.microsoft.com/en-us/library/ttk2z1ws%28v=vs.85%29.aspx
+ //
+ // But I've found no way to assert that at compile time or run time, there
+ // appears to be no WinAPI is_lock_free() test.
+
+ return true;
+}
+
+inline void js::jit::AtomicOperations::fenceSeqCst() {
+ _ReadWriteBarrier();
+#if defined(_M_IX86) || defined(_M_X64)
+ _mm_mfence();
+#elif defined(_M_ARM64)
+ // MemoryBarrier is defined in winnt.h, which we don't want to include here.
+ // This expression is the expansion of MemoryBarrier.
+ __dmb(_ARM64_BARRIER_SY);
+#else
+# error "Unknown hardware for MSVC"
+#endif
+}
+
+template <typename T>
+inline T js::jit::AtomicOperations::loadSeqCst(T* addr) {
+ _ReadWriteBarrier();
+ T v = *addr;
+ _ReadWriteBarrier();
+ return v;
+}
+
+#ifdef _M_IX86
+namespace js {
+namespace jit {
+
+# define MSC_LOADOP(T) \
+ template <> \
+ inline T AtomicOperations::loadSeqCst(T* addr) { \
+ _ReadWriteBarrier(); \
+ return (T)_InterlockedCompareExchange64((__int64 volatile*)addr, 0, 0); \
+ }
+
+MSC_LOADOP(int64_t)
+MSC_LOADOP(uint64_t)
+
+# undef MSC_LOADOP
+
+} // namespace jit
+} // namespace js
+#endif // _M_IX86
+
+template <typename T>
+inline void js::jit::AtomicOperations::storeSeqCst(T* addr, T val) {
+ _ReadWriteBarrier();
+ *addr = val;
+ fenceSeqCst();
+}
+
+#ifdef _M_IX86
+namespace js {
+namespace jit {
+
+# define MSC_STOREOP(T) \
+ template <> \
+ inline void AtomicOperations::storeSeqCst(T* addr, T val) { \
+ _ReadWriteBarrier(); \
+ T oldval = *addr; \
+ for (;;) { \
+ T nextval = (T)_InterlockedCompareExchange64( \
+ (__int64 volatile*)addr, (__int64)val, (__int64)oldval); \
+ if (nextval == oldval) break; \
+ oldval = nextval; \
+ } \
+ _ReadWriteBarrier(); \
+ }
+
+MSC_STOREOP(int64_t)
+MSC_STOREOP(uint64_t)
+
+# undef MSC_STOREOP
+
+} // namespace jit
+} // namespace js
+#endif // _M_IX86
+
+#define MSC_EXCHANGEOP(T, U, xchgop) \
+ template <> \
+ inline T AtomicOperations::exchangeSeqCst(T* addr, T val) { \
+ return (T)xchgop((U volatile*)addr, (U)val); \
+ }
+
+#ifdef _M_IX86
+# define MSC_EXCHANGEOP_CAS(T) \
+ template <> \
+ inline T AtomicOperations::exchangeSeqCst(T* addr, T val) { \
+ _ReadWriteBarrier(); \
+ T oldval = *addr; \
+ for (;;) { \
+ T nextval = (T)_InterlockedCompareExchange64( \
+ (__int64 volatile*)addr, (__int64)val, (__int64)oldval); \
+ if (nextval == oldval) break; \
+ oldval = nextval; \
+ } \
+ _ReadWriteBarrier(); \
+ return oldval; \
+ }
+#endif // _M_IX86
+
+namespace js {
+namespace jit {
+
+MSC_EXCHANGEOP(int8_t, char, _InterlockedExchange8)
+MSC_EXCHANGEOP(uint8_t, char, _InterlockedExchange8)
+MSC_EXCHANGEOP(int16_t, short, _InterlockedExchange16)
+MSC_EXCHANGEOP(uint16_t, short, _InterlockedExchange16)
+MSC_EXCHANGEOP(int32_t, long, _InterlockedExchange)
+MSC_EXCHANGEOP(uint32_t, long, _InterlockedExchange)
+
+#ifdef _M_IX86
+MSC_EXCHANGEOP_CAS(int64_t)
+MSC_EXCHANGEOP_CAS(uint64_t)
+#else
+MSC_EXCHANGEOP(int64_t, __int64, _InterlockedExchange64)
+MSC_EXCHANGEOP(uint64_t, __int64, _InterlockedExchange64)
+#endif
+
+} // namespace jit
+} // namespace js
+
+#undef MSC_EXCHANGEOP
+#undef MSC_EXCHANGEOP_CAS
+
+#define MSC_CAS(T, U, cmpxchg) \
+ template <> \
+ inline T AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, \
+ T newval) { \
+ return (T)cmpxchg((U volatile*)addr, (U)newval, (U)oldval); \
+ }
+
+namespace js {
+namespace jit {
+
+MSC_CAS(int8_t, char, _InterlockedCompareExchange8)
+MSC_CAS(uint8_t, char, _InterlockedCompareExchange8)
+MSC_CAS(int16_t, short, _InterlockedCompareExchange16)
+MSC_CAS(uint16_t, short, _InterlockedCompareExchange16)
+MSC_CAS(int32_t, long, _InterlockedCompareExchange)
+MSC_CAS(uint32_t, long, _InterlockedCompareExchange)
+MSC_CAS(int64_t, __int64, _InterlockedCompareExchange64)
+MSC_CAS(uint64_t, __int64, _InterlockedCompareExchange64)
+
+} // namespace jit
+} // namespace js
+
+#undef MSC_CAS
+
+#define MSC_FETCHADDOP(T, U, xadd) \
+ template <> \
+ inline T AtomicOperations::fetchAddSeqCst(T* addr, T val) { \
+ return (T)xadd((U volatile*)addr, (U)val); \
+ }
+
+#define MSC_FETCHSUBOP(T) \
+ template <> \
+ inline T AtomicOperations::fetchSubSeqCst(T* addr, T val) { \
+ return fetchAddSeqCst(addr, (T)(0 - val)); \
+ }
+
+#ifdef _M_IX86
+# define MSC_FETCHADDOP_CAS(T) \
+ template <> \
+ inline T AtomicOperations::fetchAddSeqCst(T* addr, T val) { \
+ _ReadWriteBarrier(); \
+ T oldval = *addr; \
+ for (;;) { \
+ T nextval = (T)_InterlockedCompareExchange64((__int64 volatile*)addr, \
+ (__int64)(oldval + val), \
+ (__int64)oldval); \
+ if (nextval == oldval) break; \
+ oldval = nextval; \
+ } \
+ _ReadWriteBarrier(); \
+ return oldval; \
+ }
+#endif // _M_IX86
+
+namespace js {
+namespace jit {
+
+MSC_FETCHADDOP(int8_t, char, _InterlockedExchangeAdd8)
+MSC_FETCHADDOP(uint8_t, char, _InterlockedExchangeAdd8)
+MSC_FETCHADDOP(int16_t, short, _InterlockedExchangeAdd16)
+MSC_FETCHADDOP(uint16_t, short, _InterlockedExchangeAdd16)
+MSC_FETCHADDOP(int32_t, long, _InterlockedExchangeAdd)
+MSC_FETCHADDOP(uint32_t, long, _InterlockedExchangeAdd)
+
+#ifdef _M_IX86
+MSC_FETCHADDOP_CAS(int64_t)
+MSC_FETCHADDOP_CAS(uint64_t)
+#else
+MSC_FETCHADDOP(int64_t, __int64, _InterlockedExchangeAdd64)
+MSC_FETCHADDOP(uint64_t, __int64, _InterlockedExchangeAdd64)
+#endif
+
+MSC_FETCHSUBOP(int8_t)
+MSC_FETCHSUBOP(uint8_t)
+MSC_FETCHSUBOP(int16_t)
+MSC_FETCHSUBOP(uint16_t)
+MSC_FETCHSUBOP(int32_t)
+MSC_FETCHSUBOP(uint32_t)
+MSC_FETCHSUBOP(int64_t)
+MSC_FETCHSUBOP(uint64_t)
+
+} // namespace jit
+} // namespace js
+
+#undef MSC_FETCHADDOP
+#undef MSC_FETCHADDOP_CAS
+#undef MSC_FETCHSUBOP
+
+#define MSC_FETCHBITOPX(T, U, name, op) \
+ template <> \
+ inline T AtomicOperations::name(T* addr, T val) { \
+ return (T)op((U volatile*)addr, (U)val); \
+ }
+
+#define MSC_FETCHBITOP(T, U, andop, orop, xorop) \
+ MSC_FETCHBITOPX(T, U, fetchAndSeqCst, andop) \
+ MSC_FETCHBITOPX(T, U, fetchOrSeqCst, orop) \
+ MSC_FETCHBITOPX(T, U, fetchXorSeqCst, xorop)
+
+#ifdef _M_IX86
+# define AND_OP &
+# define OR_OP |
+# define XOR_OP ^
+# define MSC_FETCHBITOPX_CAS(T, name, OP) \
+ template <> \
+ inline T AtomicOperations::name(T* addr, T val) { \
+ _ReadWriteBarrier(); \
+ T oldval = *addr; \
+ for (;;) { \
+ T nextval = (T)_InterlockedCompareExchange64((__int64 volatile*)addr, \
+ (__int64)(oldval OP val), \
+ (__int64)oldval); \
+ if (nextval == oldval) break; \
+ oldval = nextval; \
+ } \
+ _ReadWriteBarrier(); \
+ return oldval; \
+ }
+
+# define MSC_FETCHBITOP_CAS(T) \
+ MSC_FETCHBITOPX_CAS(T, fetchAndSeqCst, AND_OP) \
+ MSC_FETCHBITOPX_CAS(T, fetchOrSeqCst, OR_OP) \
+ MSC_FETCHBITOPX_CAS(T, fetchXorSeqCst, XOR_OP)
+
+#endif
+
+namespace js {
+namespace jit {
+
+MSC_FETCHBITOP(int8_t, char, _InterlockedAnd8, _InterlockedOr8,
+ _InterlockedXor8)
+MSC_FETCHBITOP(uint8_t, char, _InterlockedAnd8, _InterlockedOr8,
+ _InterlockedXor8)
+MSC_FETCHBITOP(int16_t, short, _InterlockedAnd16, _InterlockedOr16,
+ _InterlockedXor16)
+MSC_FETCHBITOP(uint16_t, short, _InterlockedAnd16, _InterlockedOr16,
+ _InterlockedXor16)
+MSC_FETCHBITOP(int32_t, long, _InterlockedAnd, _InterlockedOr, _InterlockedXor)
+MSC_FETCHBITOP(uint32_t, long, _InterlockedAnd, _InterlockedOr, _InterlockedXor)
+
+#ifdef _M_IX86
+MSC_FETCHBITOP_CAS(int64_t)
+MSC_FETCHBITOP_CAS(uint64_t)
+#else
+MSC_FETCHBITOP(int64_t, __int64, _InterlockedAnd64, _InterlockedOr64,
+ _InterlockedXor64)
+MSC_FETCHBITOP(uint64_t, __int64, _InterlockedAnd64, _InterlockedOr64,
+ _InterlockedXor64)
+#endif
+
+} // namespace jit
+} // namespace js
+
+#undef MSC_FETCHBITOPX_CAS
+#undef MSC_FETCHBITOPX
+#undef MSC_FETCHBITOP_CAS
+#undef MSC_FETCHBITOP
+
+template <typename T>
+inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) {
+ // This is also appropriate for double, int64, and uint64 on 32-bit
+ // platforms since there are no guarantees of access-atomicity.
+ return *addr;
+}
+
+template <typename T>
+inline void js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) {
+ // This is also appropriate for double, int64, and uint64 on 32-bit
+ // platforms since there are no guarantees of access-atomicity.
+ *addr = val;
+}
+
+inline void js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest,
+ const void* src,
+ size_t nbytes) {
+ MOZ_ASSERT(!((char*)dest <= (char*)src && (char*)src < (char*)dest + nbytes));
+ MOZ_ASSERT(!((char*)src <= (char*)dest && (char*)dest < (char*)src + nbytes));
+ ::memcpy(dest, src, nbytes);
+}
+
+inline void js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest,
+ const void* src,
+ size_t nbytes) {
+ ::memmove(dest, src, nbytes);
+}
+
+#endif // jit_shared_AtomicOperations_feeling_lucky_msvc_h
diff --git a/js/src/jit/shared/AtomicOperations-feeling-lucky.h b/js/src/jit/shared/AtomicOperations-feeling-lucky.h
new file mode 100644
index 0000000000..fd510ee49a
--- /dev/null
+++ b/js/src/jit/shared/AtomicOperations-feeling-lucky.h
@@ -0,0 +1,18 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_AtomicOperations_feeling_lucky_h
+#define jit_shared_AtomicOperations_feeling_lucky_h
+
+#if defined(__clang__) || defined(__GNUC__)
+# include "jit/shared/AtomicOperations-feeling-lucky-gcc.h"
+#elif defined(_MSC_VER)
+# include "jit/shared/AtomicOperations-feeling-lucky-msvc.h"
+#else
+# error "No AtomicOperations support for this platform+compiler combination"
+#endif
+
+#endif // jit_shared_AtomicOperations_feeling_lucky_h
diff --git a/js/src/jit/shared/AtomicOperations-shared-jit.cpp b/js/src/jit/shared/AtomicOperations-shared-jit.cpp
new file mode 100644
index 0000000000..79463f118b
--- /dev/null
+++ b/js/src/jit/shared/AtomicOperations-shared-jit.cpp
@@ -0,0 +1,1037 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Atomics.h"
+
+#ifdef JS_CODEGEN_ARM
+# include "jit/arm/Architecture-arm.h"
+#endif
+#include "jit/AtomicOperations.h"
+#include "jit/IonTypes.h"
+#include "jit/MacroAssembler.h"
+#include "jit/RegisterSets.h"
+#include "js/ScalarType.h" // js::Scalar::Type
+#include "util/Poison.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+// Assigned registers must follow these rules:
+//
+// - if they overlap the argument registers (for arguments we use) then they
+//
+// M M U U SSSS TTTTT
+// ====\ MM MM U U S T /====
+// =====> M M M U U SSS T <=====
+// ====/ M M U U S T \====
+// M M UUU SSSS T
+//
+// require no register movement, even for 64-bit registers. (If this becomes
+// too complex to handle then we need to create an abstraction that uses the
+// MoveResolver, see comments on bug 1394420.)
+//
+// - they should be volatile when possible so that we don't have to save and
+// restore them.
+//
+// Note that the functions we're generating have a very limited number of
+// signatures, and the register assignments need only work for these signatures.
+// The signatures are these:
+//
+// ()
+// (ptr)
+// (ptr, val/val64)
+// (ptr, ptr)
+// (ptr, val/val64, val/val64)
+//
+// It would be nice to avoid saving and restoring all the nonvolatile registers
+// for all the operations, and instead save and restore only the registers used
+// by each specific operation, but the amount of protocol needed to accomplish
+// that probably does not pay for itself.
+
+#if defined(JS_CODEGEN_X64)
+
+// Selected registers match the argument registers exactly, and none of them
+// overlap the result register.
+
+static const LiveRegisterSet AtomicNonVolatileRegs;
+
+static constexpr Register AtomicPtrReg = IntArgReg0;
+static constexpr Register AtomicPtr2Reg = IntArgReg1;
+static constexpr Register AtomicValReg = IntArgReg1;
+static constexpr Register64 AtomicValReg64(IntArgReg1);
+static constexpr Register AtomicVal2Reg = IntArgReg2;
+static constexpr Register64 AtomicVal2Reg64(IntArgReg2);
+static constexpr Register AtomicTemp = IntArgReg3;
+static constexpr Register64 AtomicTemp64(IntArgReg3);
+
+static constexpr Register64 AtomicReturnReg64 = ReturnReg64;
+
+#elif defined(JS_CODEGEN_ARM64)
+
+// Selected registers match the argument registers, except that the Ptr is not
+// in IntArgReg0 so as not to conflict with the result register.
+
+static const LiveRegisterSet AtomicNonVolatileRegs;
+
+static constexpr Register AtomicPtrReg = IntArgReg4;
+static constexpr Register AtomicPtr2Reg = IntArgReg1;
+static constexpr Register AtomicValReg = IntArgReg1;
+static constexpr Register64 AtomicValReg64(IntArgReg1);
+static constexpr Register AtomicVal2Reg = IntArgReg2;
+static constexpr Register64 AtomicVal2Reg64(IntArgReg2);
+static constexpr Register AtomicTemp = IntArgReg3;
+static constexpr Register64 AtomicTemp64(IntArgReg3);
+
+static constexpr Register64 AtomicReturnReg64 = ReturnReg64;
+
+#elif defined(JS_CODEGEN_ARM)
+
+// Assigned registers except temp are disjoint from the argument registers,
+// since accounting for both 32-bit and 64-bit arguments and constraints on the
+// result register is much too messy. The temp is in an argument register since
+// it won't be used until we've moved all arguments to other registers.
+//
+// Save LR because it's the second scratch register. The first scratch register
+// is r12 (IP). The atomics implementation in the MacroAssembler uses both.
+
+static const LiveRegisterSet AtomicNonVolatileRegs = LiveRegisterSet(
+ GeneralRegisterSet(
+ (uint32_t(1) << Registers::r4) | (uint32_t(1) << Registers::r5) |
+ (uint32_t(1) << Registers::r6) | (uint32_t(1) << Registers::r7) |
+ (uint32_t(1) << Registers::r8) | (uint32_t(1) << Registers::lr)),
+ FloatRegisterSet(0));
+
+static constexpr Register AtomicPtrReg = r8;
+static constexpr Register AtomicPtr2Reg = r6;
+static constexpr Register AtomicTemp = r3;
+static constexpr Register AtomicValReg = r6;
+static constexpr Register64 AtomicValReg64(r7, r6);
+static constexpr Register AtomicVal2Reg = r4;
+static constexpr Register64 AtomicVal2Reg64(r5, r4);
+
+static constexpr Register64 AtomicReturnReg64 = ReturnReg64;
+
+#elif defined(JS_CODEGEN_X86)
+
+// There are no argument registers.
+
+static const LiveRegisterSet AtomicNonVolatileRegs = LiveRegisterSet(
+ GeneralRegisterSet((1 << X86Encoding::rbx) | (1 << X86Encoding::rsi)),
+ FloatRegisterSet(0));
+
+static constexpr Register AtomicPtrReg = esi;
+static constexpr Register AtomicPtr2Reg = ebx;
+static constexpr Register AtomicValReg = ebx;
+static constexpr Register AtomicVal2Reg = ecx;
+static constexpr Register AtomicTemp = edx;
+
+// 64-bit registers for cmpxchg8b. ValReg/Val2Reg/Temp are not used in this
+// case.
+
+static constexpr Register64 AtomicValReg64(edx, eax);
+static constexpr Register64 AtomicVal2Reg64(ecx, ebx);
+
+// AtomicReturnReg64 is unused on x86.
+
+#else
+# error "Unsupported platform"
+#endif
+
+// These are useful shorthands and hide the meaningless uint/int distinction.
+
+static constexpr Scalar::Type SIZE8 = Scalar::Uint8;
+static constexpr Scalar::Type SIZE16 = Scalar::Uint16;
+static constexpr Scalar::Type SIZE32 = Scalar::Uint32;
+static constexpr Scalar::Type SIZE64 = Scalar::Int64;
+#ifdef JS_64BIT
+static constexpr Scalar::Type SIZEWORD = SIZE64;
+#else
+static constexpr Scalar::Type SIZEWORD = SIZE32;
+#endif
+
+// A "block" is a sequence of bytes that is a reasonable quantum to copy to
+// amortize call overhead when implementing memcpy and memmove. A block will
+// not fit in registers on all platforms and copying it without using
+// intermediate memory will therefore be sensitive to overlap.
+//
+// A "word" is an item that we can copy using only register intermediate storage
+// on all platforms; words can be individually copied without worrying about
+// overlap.
+//
+// Blocks and words can be aligned or unaligned; specific (generated) copying
+// functions handle this in platform-specific ways.
+
+static constexpr size_t WORDSIZE =
+ sizeof(uintptr_t); // Also see SIZEWORD above
+static constexpr size_t BLOCKSIZE = 8 * WORDSIZE; // Must be a power of 2
+
+static_assert(BLOCKSIZE % WORDSIZE == 0,
+ "A block is an integral number of words");
+
+static constexpr size_t WORDMASK = WORDSIZE - 1;
+static constexpr size_t BLOCKMASK = BLOCKSIZE - 1;
+
+struct ArgIterator {
+ ABIArgGenerator abi;
+ unsigned argBase = 0;
+};
+
+static void GenGprArg(MacroAssembler& masm, MIRType t, ArgIterator* iter,
+ Register reg) {
+ MOZ_ASSERT(t == MIRType::Pointer || t == MIRType::Int32);
+ ABIArg arg = iter->abi.next(t);
+ switch (arg.kind()) {
+ case ABIArg::GPR: {
+ if (arg.gpr() != reg) {
+ masm.movePtr(arg.gpr(), reg);
+ }
+ break;
+ }
+ case ABIArg::Stack: {
+ Address src(masm.getStackPointer(),
+ iter->argBase + arg.offsetFromArgBase());
+ masm.loadPtr(src, reg);
+ break;
+ }
+ default: {
+ MOZ_CRASH("Not possible");
+ }
+ }
+}
+
+static void GenGpr64Arg(MacroAssembler& masm, ArgIterator* iter,
+ Register64 reg) {
+ ABIArg arg = iter->abi.next(MIRType::Int64);
+ switch (arg.kind()) {
+ case ABIArg::GPR: {
+ if (arg.gpr64() != reg) {
+ masm.move64(arg.gpr64(), reg);
+ }
+ break;
+ }
+ case ABIArg::Stack: {
+ Address src(masm.getStackPointer(),
+ iter->argBase + arg.offsetFromArgBase());
+#ifdef JS_64BIT
+ masm.load64(src, reg);
+#else
+ masm.load32(LowWord(src), reg.low);
+ masm.load32(HighWord(src), reg.high);
+#endif
+ break;
+ }
+#if defined(JS_CODEGEN_REGISTER_PAIR)
+ case ABIArg::GPR_PAIR: {
+ if (arg.gpr64() != reg) {
+ masm.move32(arg.oddGpr(), reg.high);
+ masm.move32(arg.evenGpr(), reg.low);
+ }
+ break;
+ }
+#endif
+ default: {
+ MOZ_CRASH("Not possible");
+ }
+ }
+}
+
+static uint32_t GenPrologue(MacroAssembler& masm, ArgIterator* iter) {
+ masm.assumeUnreachable("Shouldn't get here");
+ masm.flushBuffer();
+ masm.haltingAlign(CodeAlignment);
+ masm.setFramePushed(0);
+ uint32_t start = masm.currentOffset();
+ masm.PushRegsInMask(AtomicNonVolatileRegs);
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+ // The return address is among the nonvolatile registers, if pushed at all.
+ iter->argBase = masm.framePushed();
+#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ // The return address is pushed separately.
+ iter->argBase = sizeof(void*) + masm.framePushed();
+#else
+# error "Unsupported platform"
+#endif
+ return start;
+}
+
+static void GenEpilogue(MacroAssembler& masm) {
+ masm.PopRegsInMask(AtomicNonVolatileRegs);
+ MOZ_ASSERT(masm.framePushed() == 0);
+#if defined(JS_CODEGEN_ARM64)
+ masm.Ret();
+#elif defined(JS_CODEGEN_ARM)
+ masm.mov(lr, pc);
+#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ masm.ret();
+#endif
+}
+
+#ifndef JS_64BIT
+static uint32_t GenNop(MacroAssembler& masm) {
+ ArgIterator iter;
+ uint32_t start = GenPrologue(masm, &iter);
+ GenEpilogue(masm);
+ return start;
+}
+#endif
+
+static uint32_t GenFenceSeqCst(MacroAssembler& masm) {
+ ArgIterator iter;
+ uint32_t start = GenPrologue(masm, &iter);
+ masm.memoryBarrier(MembarFull);
+ GenEpilogue(masm);
+ return start;
+}
+
+static uint32_t GenLoad(MacroAssembler& masm, Scalar::Type size,
+ Synchronization sync) {
+ ArgIterator iter;
+ uint32_t start = GenPrologue(masm, &iter);
+ GenGprArg(masm, MIRType::Pointer, &iter, AtomicPtrReg);
+
+ masm.memoryBarrier(sync.barrierBefore);
+ Address addr(AtomicPtrReg, 0);
+ switch (size) {
+ case SIZE8:
+ masm.load8ZeroExtend(addr, ReturnReg);
+ break;
+ case SIZE16:
+ masm.load16ZeroExtend(addr, ReturnReg);
+ break;
+ case SIZE32:
+ masm.load32(addr, ReturnReg);
+ break;
+ case SIZE64:
+#if defined(JS_64BIT)
+ masm.load64(addr, AtomicReturnReg64);
+ break;
+#else
+ MOZ_CRASH("64-bit atomic load not available on this platform");
+#endif
+ default:
+ MOZ_CRASH("Unknown size");
+ }
+ masm.memoryBarrier(sync.barrierAfter);
+
+ GenEpilogue(masm);
+ return start;
+}
+
+static uint32_t GenStore(MacroAssembler& masm, Scalar::Type size,
+ Synchronization sync) {
+ ArgIterator iter;
+ uint32_t start = GenPrologue(masm, &iter);
+ GenGprArg(masm, MIRType::Pointer, &iter, AtomicPtrReg);
+
+ masm.memoryBarrier(sync.barrierBefore);
+ Address addr(AtomicPtrReg, 0);
+ switch (size) {
+ case SIZE8:
+ GenGprArg(masm, MIRType::Int32, &iter, AtomicValReg);
+ masm.store8(AtomicValReg, addr);
+ break;
+ case SIZE16:
+ GenGprArg(masm, MIRType::Int32, &iter, AtomicValReg);
+ masm.store16(AtomicValReg, addr);
+ break;
+ case SIZE32:
+ GenGprArg(masm, MIRType::Int32, &iter, AtomicValReg);
+ masm.store32(AtomicValReg, addr);
+ break;
+ case SIZE64:
+#if defined(JS_64BIT)
+ GenGpr64Arg(masm, &iter, AtomicValReg64);
+ masm.store64(AtomicValReg64, addr);
+ break;
+#else
+ MOZ_CRASH("64-bit atomic store not available on this platform");
+#endif
+ default:
+ MOZ_CRASH("Unknown size");
+ }
+ masm.memoryBarrier(sync.barrierAfter);
+
+ GenEpilogue(masm);
+ return start;
+}
+
+enum class CopyDir {
+ DOWN, // Move data down, ie, iterate toward higher addresses
+ UP // The other way
+};
+
+static uint32_t GenCopy(MacroAssembler& masm, Scalar::Type size,
+ uint32_t unroll, CopyDir direction) {
+ ArgIterator iter;
+ uint32_t start = GenPrologue(masm, &iter);
+
+ Register dest = AtomicPtrReg;
+ Register src = AtomicPtr2Reg;
+
+ GenGprArg(masm, MIRType::Pointer, &iter, dest);
+ GenGprArg(masm, MIRType::Pointer, &iter, src);
+
+ uint32_t offset = direction == CopyDir::DOWN ? 0 : unroll - 1;
+ for (uint32_t i = 0; i < unroll; i++) {
+ switch (size) {
+ case SIZE8:
+ masm.load8ZeroExtend(Address(src, offset), AtomicTemp);
+ masm.store8(AtomicTemp, Address(dest, offset));
+ break;
+ case SIZE16:
+ masm.load16ZeroExtend(Address(src, offset * 2), AtomicTemp);
+ masm.store16(AtomicTemp, Address(dest, offset * 2));
+ break;
+ case SIZE32:
+ masm.load32(Address(src, offset * 4), AtomicTemp);
+ masm.store32(AtomicTemp, Address(dest, offset * 4));
+ break;
+ case SIZE64:
+#if defined(JS_64BIT)
+ masm.load64(Address(src, offset * 8), AtomicTemp64);
+ masm.store64(AtomicTemp64, Address(dest, offset * 8));
+ break;
+#else
+ MOZ_CRASH("64-bit atomic load/store not available on this platform");
+#endif
+ default:
+ MOZ_CRASH("Unknown size");
+ }
+ offset += direction == CopyDir::DOWN ? 1 : -1;
+ }
+
+ GenEpilogue(masm);
+ return start;
+}
+
+static uint32_t GenCmpxchg(MacroAssembler& masm, Scalar::Type size,
+ Synchronization sync) {
+ ArgIterator iter;
+ uint32_t start = GenPrologue(masm, &iter);
+ GenGprArg(masm, MIRType::Pointer, &iter, AtomicPtrReg);
+
+ Address addr(AtomicPtrReg, 0);
+ switch (size) {
+ case SIZE8:
+ case SIZE16:
+ case SIZE32:
+ GenGprArg(masm, MIRType::Int32, &iter, AtomicValReg);
+ GenGprArg(masm, MIRType::Int32, &iter, AtomicVal2Reg);
+ masm.compareExchange(size, sync, addr, AtomicValReg, AtomicVal2Reg,
+ ReturnReg);
+ break;
+ case SIZE64:
+ GenGpr64Arg(masm, &iter, AtomicValReg64);
+ GenGpr64Arg(masm, &iter, AtomicVal2Reg64);
+#if defined(JS_CODEGEN_X86)
+ static_assert(AtomicValReg64 == Register64(edx, eax));
+ static_assert(AtomicVal2Reg64 == Register64(ecx, ebx));
+
+ // The return register edx:eax is a compiler/ABI assumption that is *not*
+ // the same as ReturnReg64, so it's correct not to use that here.
+ masm.lock_cmpxchg8b(edx, eax, ecx, ebx, Operand(addr));
+#else
+ masm.compareExchange64(sync, addr, AtomicValReg64, AtomicVal2Reg64,
+ AtomicReturnReg64);
+#endif
+ break;
+ default:
+ MOZ_CRASH("Unknown size");
+ }
+
+ GenEpilogue(masm);
+ return start;
+}
+
+static uint32_t GenExchange(MacroAssembler& masm, Scalar::Type size,
+ Synchronization sync) {
+ ArgIterator iter;
+ uint32_t start = GenPrologue(masm, &iter);
+ GenGprArg(masm, MIRType::Pointer, &iter, AtomicPtrReg);
+
+ Address addr(AtomicPtrReg, 0);
+ switch (size) {
+ case SIZE8:
+ case SIZE16:
+ case SIZE32:
+ GenGprArg(masm, MIRType::Int32, &iter, AtomicValReg);
+ masm.atomicExchange(size, sync, addr, AtomicValReg, ReturnReg);
+ break;
+ case SIZE64:
+#if defined(JS_64BIT)
+ GenGpr64Arg(masm, &iter, AtomicValReg64);
+ masm.atomicExchange64(sync, addr, AtomicValReg64, AtomicReturnReg64);
+ break;
+#else
+ MOZ_CRASH("64-bit atomic exchange not available on this platform");
+#endif
+ default:
+ MOZ_CRASH("Unknown size");
+ }
+
+ GenEpilogue(masm);
+ return start;
+}
+
+static uint32_t GenFetchOp(MacroAssembler& masm, Scalar::Type size, AtomicOp op,
+ Synchronization sync) {
+ ArgIterator iter;
+ uint32_t start = GenPrologue(masm, &iter);
+ GenGprArg(masm, MIRType::Pointer, &iter, AtomicPtrReg);
+
+ Address addr(AtomicPtrReg, 0);
+ switch (size) {
+ case SIZE8:
+ case SIZE16:
+ case SIZE32: {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ Register tmp = op == AtomicFetchAddOp || op == AtomicFetchSubOp
+ ? Register::Invalid()
+ : AtomicTemp;
+#else
+ Register tmp = AtomicTemp;
+#endif
+ GenGprArg(masm, MIRType::Int32, &iter, AtomicValReg);
+ masm.atomicFetchOp(size, sync, op, AtomicValReg, addr, tmp, ReturnReg);
+ break;
+ }
+ case SIZE64: {
+#if defined(JS_64BIT)
+# if defined(JS_CODEGEN_X64)
+ Register64 tmp = op == AtomicFetchAddOp || op == AtomicFetchSubOp
+ ? Register64::Invalid()
+ : AtomicTemp64;
+# else
+ Register64 tmp = AtomicTemp64;
+# endif
+ GenGpr64Arg(masm, &iter, AtomicValReg64);
+ masm.atomicFetchOp64(sync, op, AtomicValReg64, addr, tmp,
+ AtomicReturnReg64);
+ break;
+#else
+ MOZ_CRASH("64-bit atomic fetchOp not available on this platform");
+#endif
+ }
+ default:
+ MOZ_CRASH("Unknown size");
+ }
+
+ GenEpilogue(masm);
+ return start;
+}
+
+namespace js {
+namespace jit {
+
+void (*AtomicFenceSeqCst)();
+
+#ifndef JS_64BIT
+void (*AtomicCompilerFence)();
+#endif
+
+uint8_t (*AtomicLoad8SeqCst)(const uint8_t* addr);
+uint16_t (*AtomicLoad16SeqCst)(const uint16_t* addr);
+uint32_t (*AtomicLoad32SeqCst)(const uint32_t* addr);
+#ifdef JS_64BIT
+uint64_t (*AtomicLoad64SeqCst)(const uint64_t* addr);
+#endif
+
+uint8_t (*AtomicLoad8Unsynchronized)(const uint8_t* addr);
+uint16_t (*AtomicLoad16Unsynchronized)(const uint16_t* addr);
+uint32_t (*AtomicLoad32Unsynchronized)(const uint32_t* addr);
+#ifdef JS_64BIT
+uint64_t (*AtomicLoad64Unsynchronized)(const uint64_t* addr);
+#endif
+
+uint8_t (*AtomicStore8SeqCst)(uint8_t* addr, uint8_t val);
+uint16_t (*AtomicStore16SeqCst)(uint16_t* addr, uint16_t val);
+uint32_t (*AtomicStore32SeqCst)(uint32_t* addr, uint32_t val);
+#ifdef JS_64BIT
+uint64_t (*AtomicStore64SeqCst)(uint64_t* addr, uint64_t val);
+#endif
+
+uint8_t (*AtomicStore8Unsynchronized)(uint8_t* addr, uint8_t val);
+uint16_t (*AtomicStore16Unsynchronized)(uint16_t* addr, uint16_t val);
+uint32_t (*AtomicStore32Unsynchronized)(uint32_t* addr, uint32_t val);
+#ifdef JS_64BIT
+uint64_t (*AtomicStore64Unsynchronized)(uint64_t* addr, uint64_t val);
+#endif
+
+// See the definitions of BLOCKSIZE and WORDSIZE earlier. The "unaligned"
+// functions perform individual byte copies (and must always be "down" or "up").
+// The others ignore alignment issues, and thus either depend on unaligned
+// accesses being OK or not being invoked on unaligned addresses.
+//
+// src and dest point to the lower addresses of the respective data areas
+// irrespective of "up" or "down".
+
+static void (*AtomicCopyUnalignedBlockDownUnsynchronized)(uint8_t* dest,
+ const uint8_t* src);
+static void (*AtomicCopyUnalignedBlockUpUnsynchronized)(uint8_t* dest,
+ const uint8_t* src);
+static void (*AtomicCopyUnalignedWordDownUnsynchronized)(uint8_t* dest,
+ const uint8_t* src);
+static void (*AtomicCopyUnalignedWordUpUnsynchronized)(uint8_t* dest,
+ const uint8_t* src);
+
+static void (*AtomicCopyBlockDownUnsynchronized)(uint8_t* dest,
+ const uint8_t* src);
+static void (*AtomicCopyBlockUpUnsynchronized)(uint8_t* dest,
+ const uint8_t* src);
+static void (*AtomicCopyWordUnsynchronized)(uint8_t* dest, const uint8_t* src);
+static void (*AtomicCopyByteUnsynchronized)(uint8_t* dest, const uint8_t* src);
+
+uint8_t (*AtomicCmpXchg8SeqCst)(uint8_t* addr, uint8_t oldval, uint8_t newval);
+uint16_t (*AtomicCmpXchg16SeqCst)(uint16_t* addr, uint16_t oldval,
+ uint16_t newval);
+uint32_t (*AtomicCmpXchg32SeqCst)(uint32_t* addr, uint32_t oldval,
+ uint32_t newval);
+uint64_t (*AtomicCmpXchg64SeqCst)(uint64_t* addr, uint64_t oldval,
+ uint64_t newval);
+
+uint8_t (*AtomicExchange8SeqCst)(uint8_t* addr, uint8_t val);
+uint16_t (*AtomicExchange16SeqCst)(uint16_t* addr, uint16_t val);
+uint32_t (*AtomicExchange32SeqCst)(uint32_t* addr, uint32_t val);
+#ifdef JS_64BIT
+uint64_t (*AtomicExchange64SeqCst)(uint64_t* addr, uint64_t val);
+#endif
+
+uint8_t (*AtomicAdd8SeqCst)(uint8_t* addr, uint8_t val);
+uint16_t (*AtomicAdd16SeqCst)(uint16_t* addr, uint16_t val);
+uint32_t (*AtomicAdd32SeqCst)(uint32_t* addr, uint32_t val);
+#ifdef JS_64BIT
+uint64_t (*AtomicAdd64SeqCst)(uint64_t* addr, uint64_t val);
+#endif
+
+uint8_t (*AtomicAnd8SeqCst)(uint8_t* addr, uint8_t val);
+uint16_t (*AtomicAnd16SeqCst)(uint16_t* addr, uint16_t val);
+uint32_t (*AtomicAnd32SeqCst)(uint32_t* addr, uint32_t val);
+#ifdef JS_64BIT
+uint64_t (*AtomicAnd64SeqCst)(uint64_t* addr, uint64_t val);
+#endif
+
+uint8_t (*AtomicOr8SeqCst)(uint8_t* addr, uint8_t val);
+uint16_t (*AtomicOr16SeqCst)(uint16_t* addr, uint16_t val);
+uint32_t (*AtomicOr32SeqCst)(uint32_t* addr, uint32_t val);
+#ifdef JS_64BIT
+uint64_t (*AtomicOr64SeqCst)(uint64_t* addr, uint64_t val);
+#endif
+
+uint8_t (*AtomicXor8SeqCst)(uint8_t* addr, uint8_t val);
+uint16_t (*AtomicXor16SeqCst)(uint16_t* addr, uint16_t val);
+uint32_t (*AtomicXor32SeqCst)(uint32_t* addr, uint32_t val);
+#ifdef JS_64BIT
+uint64_t (*AtomicXor64SeqCst)(uint64_t* addr, uint64_t val);
+#endif
+
+static bool UnalignedAccessesAreOK() {
+#ifdef DEBUG
+ const char* flag = getenv("JS_NO_UNALIGNED_MEMCPY");
+ if (flag && *flag == '1') return false;
+#endif
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ return true;
+#elif defined(JS_CODEGEN_ARM)
+ return !HasAlignmentFault();
+#elif defined(JS_CODEGEN_ARM64)
+ // This is not necessarily true but it's the best guess right now.
+ return true;
+#else
+# error "Unsupported platform"
+#endif
+}
+
+void AtomicMemcpyDownUnsynchronized(uint8_t* dest, const uint8_t* src,
+ size_t nbytes) {
+ const uint8_t* lim = src + nbytes;
+
+ // Set up bulk copying. The cases are ordered the way they are on the
+ // assumption that if we can achieve aligned copies even with a little
+ // preprocessing then that is better than unaligned copying on a platform
+ // that supports it.
+
+ if (nbytes >= WORDSIZE) {
+ void (*copyBlock)(uint8_t * dest, const uint8_t* src);
+ void (*copyWord)(uint8_t * dest, const uint8_t* src);
+
+ if (((uintptr_t(dest) ^ uintptr_t(src)) & WORDMASK) == 0) {
+ const uint8_t* cutoff = (const uint8_t*)RoundUp(uintptr_t(src), WORDSIZE);
+ MOZ_ASSERT(cutoff <= lim); // because nbytes >= WORDSIZE
+ while (src < cutoff) {
+ AtomicCopyByteUnsynchronized(dest++, src++);
+ }
+ copyBlock = AtomicCopyBlockDownUnsynchronized;
+ copyWord = AtomicCopyWordUnsynchronized;
+ } else if (UnalignedAccessesAreOK()) {
+ copyBlock = AtomicCopyBlockDownUnsynchronized;
+ copyWord = AtomicCopyWordUnsynchronized;
+ } else {
+ copyBlock = AtomicCopyUnalignedBlockDownUnsynchronized;
+ copyWord = AtomicCopyUnalignedWordDownUnsynchronized;
+ }
+
+ // Bulk copy, first larger blocks and then individual words.
+
+ const uint8_t* blocklim = src + ((lim - src) & ~BLOCKMASK);
+ while (src < blocklim) {
+ copyBlock(dest, src);
+ dest += BLOCKSIZE;
+ src += BLOCKSIZE;
+ }
+
+ const uint8_t* wordlim = src + ((lim - src) & ~WORDMASK);
+ while (src < wordlim) {
+ copyWord(dest, src);
+ dest += WORDSIZE;
+ src += WORDSIZE;
+ }
+ }
+
+ // Byte copy any remaining tail.
+
+ while (src < lim) {
+ AtomicCopyByteUnsynchronized(dest++, src++);
+ }
+}
+
+void AtomicMemcpyUpUnsynchronized(uint8_t* dest, const uint8_t* src,
+ size_t nbytes) {
+ const uint8_t* lim = src;
+
+ src += nbytes;
+ dest += nbytes;
+
+ if (nbytes >= WORDSIZE) {
+ void (*copyBlock)(uint8_t * dest, const uint8_t* src);
+ void (*copyWord)(uint8_t * dest, const uint8_t* src);
+
+ if (((uintptr_t(dest) ^ uintptr_t(src)) & WORDMASK) == 0) {
+ const uint8_t* cutoff = (const uint8_t*)(uintptr_t(src) & ~WORDMASK);
+ MOZ_ASSERT(cutoff >= lim); // Because nbytes >= WORDSIZE
+ while (src > cutoff) {
+ AtomicCopyByteUnsynchronized(--dest, --src);
+ }
+ copyBlock = AtomicCopyBlockUpUnsynchronized;
+ copyWord = AtomicCopyWordUnsynchronized;
+ } else if (UnalignedAccessesAreOK()) {
+ copyBlock = AtomicCopyBlockUpUnsynchronized;
+ copyWord = AtomicCopyWordUnsynchronized;
+ } else {
+ copyBlock = AtomicCopyUnalignedBlockUpUnsynchronized;
+ copyWord = AtomicCopyUnalignedWordUpUnsynchronized;
+ }
+
+ const uint8_t* blocklim = src - ((src - lim) & ~BLOCKMASK);
+ while (src > blocklim) {
+ dest -= BLOCKSIZE;
+ src -= BLOCKSIZE;
+ copyBlock(dest, src);
+ }
+
+ const uint8_t* wordlim = src - ((src - lim) & ~WORDMASK);
+ while (src > wordlim) {
+ dest -= WORDSIZE;
+ src -= WORDSIZE;
+ copyWord(dest, src);
+ }
+ }
+
+ while (src > lim) {
+ AtomicCopyByteUnsynchronized(--dest, --src);
+ }
+}
+
+// These will be read and written only by the main thread during startup and
+// shutdown.
+
+static uint8_t* codeSegment;
+static uint32_t codeSegmentSize;
+
+bool InitializeJittedAtomics() {
+ // We should only initialize once.
+ MOZ_ASSERT(!codeSegment);
+
+ LifoAlloc lifo(4096);
+ TempAllocator alloc(&lifo);
+ JitContext jcx(&alloc);
+ StackMacroAssembler masm;
+
+ uint32_t fenceSeqCst = GenFenceSeqCst(masm);
+
+#ifndef JS_64BIT
+ uint32_t nop = GenNop(masm);
+#endif
+
+ Synchronization Full = Synchronization::Full();
+ Synchronization None = Synchronization::None();
+
+ uint32_t load8SeqCst = GenLoad(masm, SIZE8, Full);
+ uint32_t load16SeqCst = GenLoad(masm, SIZE16, Full);
+ uint32_t load32SeqCst = GenLoad(masm, SIZE32, Full);
+#ifdef JS_64BIT
+ uint32_t load64SeqCst = GenLoad(masm, SIZE64, Full);
+#endif
+
+ uint32_t load8Unsynchronized = GenLoad(masm, SIZE8, None);
+ uint32_t load16Unsynchronized = GenLoad(masm, SIZE16, None);
+ uint32_t load32Unsynchronized = GenLoad(masm, SIZE32, None);
+#ifdef JS_64BIT
+ uint32_t load64Unsynchronized = GenLoad(masm, SIZE64, None);
+#endif
+
+ uint32_t store8SeqCst = GenStore(masm, SIZE8, Full);
+ uint32_t store16SeqCst = GenStore(masm, SIZE16, Full);
+ uint32_t store32SeqCst = GenStore(masm, SIZE32, Full);
+#ifdef JS_64BIT
+ uint32_t store64SeqCst = GenStore(masm, SIZE64, Full);
+#endif
+
+ uint32_t store8Unsynchronized = GenStore(masm, SIZE8, None);
+ uint32_t store16Unsynchronized = GenStore(masm, SIZE16, None);
+ uint32_t store32Unsynchronized = GenStore(masm, SIZE32, None);
+#ifdef JS_64BIT
+ uint32_t store64Unsynchronized = GenStore(masm, SIZE64, None);
+#endif
+
+ uint32_t copyUnalignedBlockDownUnsynchronized =
+ GenCopy(masm, SIZE8, BLOCKSIZE, CopyDir::DOWN);
+ uint32_t copyUnalignedBlockUpUnsynchronized =
+ GenCopy(masm, SIZE8, BLOCKSIZE, CopyDir::UP);
+ uint32_t copyUnalignedWordDownUnsynchronized =
+ GenCopy(masm, SIZE8, WORDSIZE, CopyDir::DOWN);
+ uint32_t copyUnalignedWordUpUnsynchronized =
+ GenCopy(masm, SIZE8, WORDSIZE, CopyDir::UP);
+
+ uint32_t copyBlockDownUnsynchronized =
+ GenCopy(masm, SIZEWORD, BLOCKSIZE / WORDSIZE, CopyDir::DOWN);
+ uint32_t copyBlockUpUnsynchronized =
+ GenCopy(masm, SIZEWORD, BLOCKSIZE / WORDSIZE, CopyDir::UP);
+ uint32_t copyWordUnsynchronized = GenCopy(masm, SIZEWORD, 1, CopyDir::DOWN);
+ uint32_t copyByteUnsynchronized = GenCopy(masm, SIZE8, 1, CopyDir::DOWN);
+
+ uint32_t cmpxchg8SeqCst = GenCmpxchg(masm, SIZE8, Full);
+ uint32_t cmpxchg16SeqCst = GenCmpxchg(masm, SIZE16, Full);
+ uint32_t cmpxchg32SeqCst = GenCmpxchg(masm, SIZE32, Full);
+ uint32_t cmpxchg64SeqCst = GenCmpxchg(masm, SIZE64, Full);
+
+ uint32_t exchange8SeqCst = GenExchange(masm, SIZE8, Full);
+ uint32_t exchange16SeqCst = GenExchange(masm, SIZE16, Full);
+ uint32_t exchange32SeqCst = GenExchange(masm, SIZE32, Full);
+#ifdef JS_64BIT
+ uint32_t exchange64SeqCst = GenExchange(masm, SIZE64, Full);
+#endif
+
+ uint32_t add8SeqCst = GenFetchOp(masm, SIZE8, AtomicFetchAddOp, Full);
+ uint32_t add16SeqCst = GenFetchOp(masm, SIZE16, AtomicFetchAddOp, Full);
+ uint32_t add32SeqCst = GenFetchOp(masm, SIZE32, AtomicFetchAddOp, Full);
+#ifdef JS_64BIT
+ uint32_t add64SeqCst = GenFetchOp(masm, SIZE64, AtomicFetchAddOp, Full);
+#endif
+
+ uint32_t and8SeqCst = GenFetchOp(masm, SIZE8, AtomicFetchAndOp, Full);
+ uint32_t and16SeqCst = GenFetchOp(masm, SIZE16, AtomicFetchAndOp, Full);
+ uint32_t and32SeqCst = GenFetchOp(masm, SIZE32, AtomicFetchAndOp, Full);
+#ifdef JS_64BIT
+ uint32_t and64SeqCst = GenFetchOp(masm, SIZE64, AtomicFetchAndOp, Full);
+#endif
+
+ uint32_t or8SeqCst = GenFetchOp(masm, SIZE8, AtomicFetchOrOp, Full);
+ uint32_t or16SeqCst = GenFetchOp(masm, SIZE16, AtomicFetchOrOp, Full);
+ uint32_t or32SeqCst = GenFetchOp(masm, SIZE32, AtomicFetchOrOp, Full);
+#ifdef JS_64BIT
+ uint32_t or64SeqCst = GenFetchOp(masm, SIZE64, AtomicFetchOrOp, Full);
+#endif
+
+ uint32_t xor8SeqCst = GenFetchOp(masm, SIZE8, AtomicFetchXorOp, Full);
+ uint32_t xor16SeqCst = GenFetchOp(masm, SIZE16, AtomicFetchXorOp, Full);
+ uint32_t xor32SeqCst = GenFetchOp(masm, SIZE32, AtomicFetchXorOp, Full);
+#ifdef JS_64BIT
+ uint32_t xor64SeqCst = GenFetchOp(masm, SIZE64, AtomicFetchXorOp, Full);
+#endif
+
+ masm.finish();
+ if (masm.oom()) {
+ return false;
+ }
+
+ // Allocate executable memory.
+ uint32_t codeLength = masm.bytesNeeded();
+ size_t roundedCodeLength = RoundUp(codeLength, ExecutableCodePageSize);
+ uint8_t* code = (uint8_t*)AllocateExecutableMemory(
+ roundedCodeLength, ProtectionSetting::Writable,
+ MemCheckKind::MakeUndefined);
+ if (!code) {
+ return false;
+ }
+
+ // Zero the padding.
+ memset(code + codeLength, 0, roundedCodeLength - codeLength);
+
+ // Copy the code into place.
+ masm.executableCopy(code);
+
+ // Reprotect the whole region to avoid having separate RW and RX mappings.
+ if (!ExecutableAllocator::makeExecutableAndFlushICache(
+ FlushICacheSpec::LocalThreadOnly, code, roundedCodeLength)) {
+ DeallocateExecutableMemory(code, roundedCodeLength);
+ return false;
+ }
+
+ // Create the function pointers.
+
+ AtomicFenceSeqCst = (void (*)())(code + fenceSeqCst);
+
+#ifndef JS_64BIT
+ AtomicCompilerFence = (void (*)())(code + nop);
+#endif
+
+ AtomicLoad8SeqCst = (uint8_t(*)(const uint8_t* addr))(code + load8SeqCst);
+ AtomicLoad16SeqCst = (uint16_t(*)(const uint16_t* addr))(code + load16SeqCst);
+ AtomicLoad32SeqCst = (uint32_t(*)(const uint32_t* addr))(code + load32SeqCst);
+#ifdef JS_64BIT
+ AtomicLoad64SeqCst = (uint64_t(*)(const uint64_t* addr))(code + load64SeqCst);
+#endif
+
+ AtomicLoad8Unsynchronized =
+ (uint8_t(*)(const uint8_t* addr))(code + load8Unsynchronized);
+ AtomicLoad16Unsynchronized =
+ (uint16_t(*)(const uint16_t* addr))(code + load16Unsynchronized);
+ AtomicLoad32Unsynchronized =
+ (uint32_t(*)(const uint32_t* addr))(code + load32Unsynchronized);
+#ifdef JS_64BIT
+ AtomicLoad64Unsynchronized =
+ (uint64_t(*)(const uint64_t* addr))(code + load64Unsynchronized);
+#endif
+
+ AtomicStore8SeqCst =
+ (uint8_t(*)(uint8_t * addr, uint8_t val))(code + store8SeqCst);
+ AtomicStore16SeqCst =
+ (uint16_t(*)(uint16_t * addr, uint16_t val))(code + store16SeqCst);
+ AtomicStore32SeqCst =
+ (uint32_t(*)(uint32_t * addr, uint32_t val))(code + store32SeqCst);
+#ifdef JS_64BIT
+ AtomicStore64SeqCst =
+ (uint64_t(*)(uint64_t * addr, uint64_t val))(code + store64SeqCst);
+#endif
+
+ AtomicStore8Unsynchronized =
+ (uint8_t(*)(uint8_t * addr, uint8_t val))(code + store8Unsynchronized);
+ AtomicStore16Unsynchronized = (uint16_t(*)(uint16_t * addr, uint16_t val))(
+ code + store16Unsynchronized);
+ AtomicStore32Unsynchronized = (uint32_t(*)(uint32_t * addr, uint32_t val))(
+ code + store32Unsynchronized);
+#ifdef JS_64BIT
+ AtomicStore64Unsynchronized = (uint64_t(*)(uint64_t * addr, uint64_t val))(
+ code + store64Unsynchronized);
+#endif
+
+ AtomicCopyUnalignedBlockDownUnsynchronized =
+ (void (*)(uint8_t * dest, const uint8_t* src))(
+ code + copyUnalignedBlockDownUnsynchronized);
+ AtomicCopyUnalignedBlockUpUnsynchronized =
+ (void (*)(uint8_t * dest, const uint8_t* src))(
+ code + copyUnalignedBlockUpUnsynchronized);
+ AtomicCopyUnalignedWordDownUnsynchronized =
+ (void (*)(uint8_t * dest, const uint8_t* src))(
+ code + copyUnalignedWordDownUnsynchronized);
+ AtomicCopyUnalignedWordUpUnsynchronized =
+ (void (*)(uint8_t * dest, const uint8_t* src))(
+ code + copyUnalignedWordUpUnsynchronized);
+
+ AtomicCopyBlockDownUnsynchronized = (void (*)(
+ uint8_t * dest, const uint8_t* src))(code + copyBlockDownUnsynchronized);
+ AtomicCopyBlockUpUnsynchronized = (void (*)(
+ uint8_t * dest, const uint8_t* src))(code + copyBlockUpUnsynchronized);
+ AtomicCopyWordUnsynchronized = (void (*)(uint8_t * dest, const uint8_t* src))(
+ code + copyWordUnsynchronized);
+ AtomicCopyByteUnsynchronized = (void (*)(uint8_t * dest, const uint8_t* src))(
+ code + copyByteUnsynchronized);
+
+ AtomicCmpXchg8SeqCst = (uint8_t(*)(uint8_t * addr, uint8_t oldval,
+ uint8_t newval))(code + cmpxchg8SeqCst);
+ AtomicCmpXchg16SeqCst =
+ (uint16_t(*)(uint16_t * addr, uint16_t oldval, uint16_t newval))(
+ code + cmpxchg16SeqCst);
+ AtomicCmpXchg32SeqCst =
+ (uint32_t(*)(uint32_t * addr, uint32_t oldval, uint32_t newval))(
+ code + cmpxchg32SeqCst);
+ AtomicCmpXchg64SeqCst =
+ (uint64_t(*)(uint64_t * addr, uint64_t oldval, uint64_t newval))(
+ code + cmpxchg64SeqCst);
+
+ AtomicExchange8SeqCst =
+ (uint8_t(*)(uint8_t * addr, uint8_t val))(code + exchange8SeqCst);
+ AtomicExchange16SeqCst =
+ (uint16_t(*)(uint16_t * addr, uint16_t val))(code + exchange16SeqCst);
+ AtomicExchange32SeqCst =
+ (uint32_t(*)(uint32_t * addr, uint32_t val))(code + exchange32SeqCst);
+#ifdef JS_64BIT
+ AtomicExchange64SeqCst =
+ (uint64_t(*)(uint64_t * addr, uint64_t val))(code + exchange64SeqCst);
+#endif
+
+ AtomicAdd8SeqCst =
+ (uint8_t(*)(uint8_t * addr, uint8_t val))(code + add8SeqCst);
+ AtomicAdd16SeqCst =
+ (uint16_t(*)(uint16_t * addr, uint16_t val))(code + add16SeqCst);
+ AtomicAdd32SeqCst =
+ (uint32_t(*)(uint32_t * addr, uint32_t val))(code + add32SeqCst);
+#ifdef JS_64BIT
+ AtomicAdd64SeqCst =
+ (uint64_t(*)(uint64_t * addr, uint64_t val))(code + add64SeqCst);
+#endif
+
+ AtomicAnd8SeqCst =
+ (uint8_t(*)(uint8_t * addr, uint8_t val))(code + and8SeqCst);
+ AtomicAnd16SeqCst =
+ (uint16_t(*)(uint16_t * addr, uint16_t val))(code + and16SeqCst);
+ AtomicAnd32SeqCst =
+ (uint32_t(*)(uint32_t * addr, uint32_t val))(code + and32SeqCst);
+#ifdef JS_64BIT
+ AtomicAnd64SeqCst =
+ (uint64_t(*)(uint64_t * addr, uint64_t val))(code + and64SeqCst);
+#endif
+
+ AtomicOr8SeqCst = (uint8_t(*)(uint8_t * addr, uint8_t val))(code + or8SeqCst);
+ AtomicOr16SeqCst =
+ (uint16_t(*)(uint16_t * addr, uint16_t val))(code + or16SeqCst);
+ AtomicOr32SeqCst =
+ (uint32_t(*)(uint32_t * addr, uint32_t val))(code + or32SeqCst);
+#ifdef JS_64BIT
+ AtomicOr64SeqCst =
+ (uint64_t(*)(uint64_t * addr, uint64_t val))(code + or64SeqCst);
+#endif
+
+ AtomicXor8SeqCst =
+ (uint8_t(*)(uint8_t * addr, uint8_t val))(code + xor8SeqCst);
+ AtomicXor16SeqCst =
+ (uint16_t(*)(uint16_t * addr, uint16_t val))(code + xor16SeqCst);
+ AtomicXor32SeqCst =
+ (uint32_t(*)(uint32_t * addr, uint32_t val))(code + xor32SeqCst);
+#ifdef JS_64BIT
+ AtomicXor64SeqCst =
+ (uint64_t(*)(uint64_t * addr, uint64_t val))(code + xor64SeqCst);
+#endif
+
+ codeSegment = code;
+ codeSegmentSize = roundedCodeLength;
+
+ return true;
+}
+
+void ShutDownJittedAtomics() {
+ // Must have been initialized.
+ MOZ_ASSERT(codeSegment);
+
+ DeallocateExecutableMemory(codeSegment, codeSegmentSize);
+ codeSegment = nullptr;
+ codeSegmentSize = 0;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/shared/AtomicOperations-shared-jit.h b/js/src/jit/shared/AtomicOperations-shared-jit.h
new file mode 100644
index 0000000000..39b8f23035
--- /dev/null
+++ b/js/src/jit/shared/AtomicOperations-shared-jit.h
@@ -0,0 +1,622 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* For overall documentation, see jit/AtomicOperations.h.
+ *
+ * NOTE CAREFULLY: This file is only applicable when we have configured a JIT
+ * and the JIT is for the same architecture that we're compiling the shell for.
+ * Simulators must use a different mechanism.
+ *
+ * See comments before the include nest near the end of jit/AtomicOperations.h
+ * if you didn't understand that.
+ */
+
+#ifndef jit_shared_AtomicOperations_shared_jit_h
+#define jit_shared_AtomicOperations_shared_jit_h
+
+#include "mozilla/Assertions.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "js/GCAPI.h"
+#include "vm/Uint8Clamped.h"
+
+namespace js {
+namespace jit {
+
+// The function pointers in this section all point to jitted code.
+//
+// On 32-bit systems we assume for simplicity's sake that we don't have any
+// 64-bit atomic operations except cmpxchg (this is a concession to x86 but it's
+// not a hardship). On 32-bit systems we therefore implement other 64-bit
+// atomic operations in terms of cmpxchg along with some C++ code and a local
+// reordering fence to prevent other loads and stores from being intermingled
+// with operations in the implementation of the atomic.
+
+// `fence` performs a full memory barrier.
+extern void (*AtomicFenceSeqCst)();
+
+#ifndef JS_64BIT
+// `compiler_fence` erects a reordering boundary for operations on the current
+// thread. We use it to prevent the compiler from reordering loads and stores
+// inside larger primitives that are synthesized from cmpxchg.
+extern void (*AtomicCompilerFence)();
+#endif
+
+extern uint8_t (*AtomicLoad8SeqCst)(const uint8_t* addr);
+extern uint16_t (*AtomicLoad16SeqCst)(const uint16_t* addr);
+extern uint32_t (*AtomicLoad32SeqCst)(const uint32_t* addr);
+#ifdef JS_64BIT
+extern uint64_t (*AtomicLoad64SeqCst)(const uint64_t* addr);
+#endif
+
+// These are access-atomic up to sizeof(uintptr_t).
+extern uint8_t (*AtomicLoad8Unsynchronized)(const uint8_t* addr);
+extern uint16_t (*AtomicLoad16Unsynchronized)(const uint16_t* addr);
+extern uint32_t (*AtomicLoad32Unsynchronized)(const uint32_t* addr);
+#ifdef JS_64BIT
+extern uint64_t (*AtomicLoad64Unsynchronized)(const uint64_t* addr);
+#endif
+
+extern uint8_t (*AtomicStore8SeqCst)(uint8_t* addr, uint8_t val);
+extern uint16_t (*AtomicStore16SeqCst)(uint16_t* addr, uint16_t val);
+extern uint32_t (*AtomicStore32SeqCst)(uint32_t* addr, uint32_t val);
+#ifdef JS_64BIT
+extern uint64_t (*AtomicStore64SeqCst)(uint64_t* addr, uint64_t val);
+#endif
+
+// These are access-atomic up to sizeof(uintptr_t).
+extern uint8_t (*AtomicStore8Unsynchronized)(uint8_t* addr, uint8_t val);
+extern uint16_t (*AtomicStore16Unsynchronized)(uint16_t* addr, uint16_t val);
+extern uint32_t (*AtomicStore32Unsynchronized)(uint32_t* addr, uint32_t val);
+#ifdef JS_64BIT
+extern uint64_t (*AtomicStore64Unsynchronized)(uint64_t* addr, uint64_t val);
+#endif
+
+// `exchange` takes a cell address and a value. It stores it in the cell and
+// returns the value previously in the cell.
+extern uint8_t (*AtomicExchange8SeqCst)(uint8_t* addr, uint8_t val);
+extern uint16_t (*AtomicExchange16SeqCst)(uint16_t* addr, uint16_t val);
+extern uint32_t (*AtomicExchange32SeqCst)(uint32_t* addr, uint32_t val);
+#ifdef JS_64BIT
+extern uint64_t (*AtomicExchange64SeqCst)(uint64_t* addr, uint64_t val);
+#endif
+
+// `add` adds a value atomically to the cell and returns the old value in the
+// cell. (There is no `sub`; just add the negated value.)
+extern uint8_t (*AtomicAdd8SeqCst)(uint8_t* addr, uint8_t val);
+extern uint16_t (*AtomicAdd16SeqCst)(uint16_t* addr, uint16_t val);
+extern uint32_t (*AtomicAdd32SeqCst)(uint32_t* addr, uint32_t val);
+#ifdef JS_64BIT
+extern uint64_t (*AtomicAdd64SeqCst)(uint64_t* addr, uint64_t val);
+#endif
+
+// `and` bitwise-ands a value atomically into the cell and returns the old value
+// in the cell.
+extern uint8_t (*AtomicAnd8SeqCst)(uint8_t* addr, uint8_t val);
+extern uint16_t (*AtomicAnd16SeqCst)(uint16_t* addr, uint16_t val);
+extern uint32_t (*AtomicAnd32SeqCst)(uint32_t* addr, uint32_t val);
+#ifdef JS_64BIT
+extern uint64_t (*AtomicAnd64SeqCst)(uint64_t* addr, uint64_t val);
+#endif
+
+// `or` bitwise-ors a value atomically into the cell and returns the old value
+// in the cell.
+extern uint8_t (*AtomicOr8SeqCst)(uint8_t* addr, uint8_t val);
+extern uint16_t (*AtomicOr16SeqCst)(uint16_t* addr, uint16_t val);
+extern uint32_t (*AtomicOr32SeqCst)(uint32_t* addr, uint32_t val);
+#ifdef JS_64BIT
+extern uint64_t (*AtomicOr64SeqCst)(uint64_t* addr, uint64_t val);
+#endif
+
+// `xor` bitwise-xors a value atomically into the cell and returns the old value
+// in the cell.
+extern uint8_t (*AtomicXor8SeqCst)(uint8_t* addr, uint8_t val);
+extern uint16_t (*AtomicXor16SeqCst)(uint16_t* addr, uint16_t val);
+extern uint32_t (*AtomicXor32SeqCst)(uint32_t* addr, uint32_t val);
+#ifdef JS_64BIT
+extern uint64_t (*AtomicXor64SeqCst)(uint64_t* addr, uint64_t val);
+#endif
+
+// `cmpxchg` takes a cell address, an expected value and a replacement value.
+// If the value in the cell equals the expected value then the replacement value
+// is stored in the cell. It always returns the value previously in the cell.
+extern uint8_t (*AtomicCmpXchg8SeqCst)(uint8_t* addr, uint8_t oldval,
+ uint8_t newval);
+extern uint16_t (*AtomicCmpXchg16SeqCst)(uint16_t* addr, uint16_t oldval,
+ uint16_t newval);
+extern uint32_t (*AtomicCmpXchg32SeqCst)(uint32_t* addr, uint32_t oldval,
+ uint32_t newval);
+extern uint64_t (*AtomicCmpXchg64SeqCst)(uint64_t* addr, uint64_t oldval,
+ uint64_t newval);
+
+// `...MemcpyDown` moves bytes toward lower addresses in memory: dest <= src.
+// `...MemcpyUp` moves bytes toward higher addresses in memory: dest >= src.
+extern void AtomicMemcpyDownUnsynchronized(uint8_t* dest, const uint8_t* src,
+ size_t nbytes);
+extern void AtomicMemcpyUpUnsynchronized(uint8_t* dest, const uint8_t* src,
+ size_t nbytes);
+
+} // namespace jit
+} // namespace js
+
+inline bool js::jit::AtomicOperations::hasAtomic8() { return true; }
+
+inline bool js::jit::AtomicOperations::isLockfree8() { return true; }
+
+inline void js::jit::AtomicOperations::fenceSeqCst() { AtomicFenceSeqCst(); }
+
+#define JIT_LOADOP(T, U, loadop) \
+ template <> \
+ inline T AtomicOperations::loadSeqCst(T* addr) { \
+ JS::AutoSuppressGCAnalysis nogc; \
+ return (T)loadop((U*)addr); \
+ }
+
+#ifndef JS_64BIT
+# define JIT_LOADOP_CAS(T) \
+ template <> \
+ inline T AtomicOperations::loadSeqCst(T* addr) { \
+ JS::AutoSuppressGCAnalysis nogc; \
+ AtomicCompilerFence(); \
+ return (T)AtomicCmpXchg64SeqCst((uint64_t*)addr, 0, 0); \
+ }
+#endif // !JS_64BIT
+
+namespace js {
+namespace jit {
+
+JIT_LOADOP(int8_t, uint8_t, AtomicLoad8SeqCst)
+JIT_LOADOP(uint8_t, uint8_t, AtomicLoad8SeqCst)
+JIT_LOADOP(int16_t, uint16_t, AtomicLoad16SeqCst)
+JIT_LOADOP(uint16_t, uint16_t, AtomicLoad16SeqCst)
+JIT_LOADOP(int32_t, uint32_t, AtomicLoad32SeqCst)
+JIT_LOADOP(uint32_t, uint32_t, AtomicLoad32SeqCst)
+
+#ifdef JIT_LOADOP_CAS
+JIT_LOADOP_CAS(int64_t)
+JIT_LOADOP_CAS(uint64_t)
+#else
+JIT_LOADOP(int64_t, uint64_t, AtomicLoad64SeqCst)
+JIT_LOADOP(uint64_t, uint64_t, AtomicLoad64SeqCst)
+#endif
+
+} // namespace jit
+} // namespace js
+
+#undef JIT_LOADOP
+#undef JIT_LOADOP_CAS
+
+#define JIT_STOREOP(T, U, storeop) \
+ template <> \
+ inline void AtomicOperations::storeSeqCst(T* addr, T val) { \
+ JS::AutoSuppressGCAnalysis nogc; \
+ storeop((U*)addr, val); \
+ }
+
+#ifndef JS_64BIT
+# define JIT_STOREOP_CAS(T) \
+ template <> \
+ inline void AtomicOperations::storeSeqCst(T* addr, T val) { \
+ JS::AutoSuppressGCAnalysis nogc; \
+ AtomicCompilerFence(); \
+ T oldval = *addr; /* good initial approximation */ \
+ for (;;) { \
+ T nextval = (T)AtomicCmpXchg64SeqCst((uint64_t*)addr, \
+ (uint64_t)oldval, (uint64_t)val); \
+ if (nextval == oldval) { \
+ break; \
+ } \
+ oldval = nextval; \
+ } \
+ AtomicCompilerFence(); \
+ }
+#endif // !JS_64BIT
+
+namespace js {
+namespace jit {
+
+JIT_STOREOP(int8_t, uint8_t, AtomicStore8SeqCst)
+JIT_STOREOP(uint8_t, uint8_t, AtomicStore8SeqCst)
+JIT_STOREOP(int16_t, uint16_t, AtomicStore16SeqCst)
+JIT_STOREOP(uint16_t, uint16_t, AtomicStore16SeqCst)
+JIT_STOREOP(int32_t, uint32_t, AtomicStore32SeqCst)
+JIT_STOREOP(uint32_t, uint32_t, AtomicStore32SeqCst)
+
+#ifdef JIT_STOREOP_CAS
+JIT_STOREOP_CAS(int64_t)
+JIT_STOREOP_CAS(uint64_t)
+#else
+JIT_STOREOP(int64_t, uint64_t, AtomicStore64SeqCst)
+JIT_STOREOP(uint64_t, uint64_t, AtomicStore64SeqCst)
+#endif
+
+} // namespace jit
+} // namespace js
+
+#undef JIT_STOREOP
+#undef JIT_STOREOP_CAS
+
+#define JIT_EXCHANGEOP(T, U, xchgop) \
+ template <> \
+ inline T AtomicOperations::exchangeSeqCst(T* addr, T val) { \
+ JS::AutoSuppressGCAnalysis nogc; \
+ return (T)xchgop((U*)addr, (U)val); \
+ }
+
+#ifndef JS_64BIT
+# define JIT_EXCHANGEOP_CAS(T) \
+ template <> \
+ inline T AtomicOperations::exchangeSeqCst(T* addr, T val) { \
+ JS::AutoSuppressGCAnalysis nogc; \
+ AtomicCompilerFence(); \
+ T oldval = *addr; \
+ for (;;) { \
+ T nextval = (T)AtomicCmpXchg64SeqCst((uint64_t*)addr, \
+ (uint64_t)oldval, (uint64_t)val); \
+ if (nextval == oldval) { \
+ break; \
+ } \
+ oldval = nextval; \
+ } \
+ AtomicCompilerFence(); \
+ return oldval; \
+ }
+#endif // !JS_64BIT
+
+namespace js {
+namespace jit {
+
+JIT_EXCHANGEOP(int8_t, uint8_t, AtomicExchange8SeqCst)
+JIT_EXCHANGEOP(uint8_t, uint8_t, AtomicExchange8SeqCst)
+JIT_EXCHANGEOP(int16_t, uint16_t, AtomicExchange16SeqCst)
+JIT_EXCHANGEOP(uint16_t, uint16_t, AtomicExchange16SeqCst)
+JIT_EXCHANGEOP(int32_t, uint32_t, AtomicExchange32SeqCst)
+JIT_EXCHANGEOP(uint32_t, uint32_t, AtomicExchange32SeqCst)
+
+#ifdef JIT_EXCHANGEOP_CAS
+JIT_EXCHANGEOP_CAS(int64_t)
+JIT_EXCHANGEOP_CAS(uint64_t)
+#else
+JIT_EXCHANGEOP(int64_t, uint64_t, AtomicExchange64SeqCst)
+JIT_EXCHANGEOP(uint64_t, uint64_t, AtomicExchange64SeqCst)
+#endif
+
+} // namespace jit
+} // namespace js
+
+#undef JIT_EXCHANGEOP
+#undef JIT_EXCHANGEOP_CAS
+
+#define JIT_CAS(T, U, cmpxchg) \
+ template <> \
+ inline T AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, \
+ T newval) { \
+ JS::AutoSuppressGCAnalysis nogc; \
+ return (T)cmpxchg((U*)addr, (U)oldval, (U)newval); \
+ }
+
+namespace js {
+namespace jit {
+
+JIT_CAS(int8_t, uint8_t, AtomicCmpXchg8SeqCst)
+JIT_CAS(uint8_t, uint8_t, AtomicCmpXchg8SeqCst)
+JIT_CAS(int16_t, uint16_t, AtomicCmpXchg16SeqCst)
+JIT_CAS(uint16_t, uint16_t, AtomicCmpXchg16SeqCst)
+JIT_CAS(int32_t, uint32_t, AtomicCmpXchg32SeqCst)
+JIT_CAS(uint32_t, uint32_t, AtomicCmpXchg32SeqCst)
+JIT_CAS(int64_t, uint64_t, AtomicCmpXchg64SeqCst)
+JIT_CAS(uint64_t, uint64_t, AtomicCmpXchg64SeqCst)
+
+} // namespace jit
+} // namespace js
+
+#undef JIT_CAS
+
+#define JIT_FETCHADDOP(T, U, xadd) \
+ template <> \
+ inline T AtomicOperations::fetchAddSeqCst(T* addr, T val) { \
+ JS::AutoSuppressGCAnalysis nogc; \
+ return (T)xadd((U*)addr, (U)val); \
+ }
+
+#define JIT_FETCHSUBOP(T) \
+ template <> \
+ inline T AtomicOperations::fetchSubSeqCst(T* addr, T val) { \
+ JS::AutoSuppressGCAnalysis nogc; \
+ return fetchAddSeqCst(addr, (T)(0 - val)); \
+ }
+
+#ifndef JS_64BIT
+# define JIT_FETCHADDOP_CAS(T) \
+ template <> \
+ inline T AtomicOperations::fetchAddSeqCst(T* addr, T val) { \
+ JS::AutoSuppressGCAnalysis nogc; \
+ AtomicCompilerFence(); \
+ T oldval = *addr; /* Good initial approximation */ \
+ for (;;) { \
+ T nextval = (T)AtomicCmpXchg64SeqCst( \
+ (uint64_t*)addr, (uint64_t)oldval, (uint64_t)(oldval + val)); \
+ if (nextval == oldval) { \
+ break; \
+ } \
+ oldval = nextval; \
+ } \
+ AtomicCompilerFence(); \
+ return oldval; \
+ }
+#endif // !JS_64BIT
+
+namespace js {
+namespace jit {
+
+JIT_FETCHADDOP(int8_t, uint8_t, AtomicAdd8SeqCst)
+JIT_FETCHADDOP(uint8_t, uint8_t, AtomicAdd8SeqCst)
+JIT_FETCHADDOP(int16_t, uint16_t, AtomicAdd16SeqCst)
+JIT_FETCHADDOP(uint16_t, uint16_t, AtomicAdd16SeqCst)
+JIT_FETCHADDOP(int32_t, uint32_t, AtomicAdd32SeqCst)
+JIT_FETCHADDOP(uint32_t, uint32_t, AtomicAdd32SeqCst)
+
+#ifdef JIT_FETCHADDOP_CAS
+JIT_FETCHADDOP_CAS(int64_t)
+JIT_FETCHADDOP_CAS(uint64_t)
+#else
+JIT_FETCHADDOP(int64_t, uint64_t, AtomicAdd64SeqCst)
+JIT_FETCHADDOP(uint64_t, uint64_t, AtomicAdd64SeqCst)
+#endif
+
+JIT_FETCHSUBOP(int8_t)
+JIT_FETCHSUBOP(uint8_t)
+JIT_FETCHSUBOP(int16_t)
+JIT_FETCHSUBOP(uint16_t)
+JIT_FETCHSUBOP(int32_t)
+JIT_FETCHSUBOP(uint32_t)
+JIT_FETCHSUBOP(int64_t)
+JIT_FETCHSUBOP(uint64_t)
+
+} // namespace jit
+} // namespace js
+
+#undef JIT_FETCHADDOP
+#undef JIT_FETCHADDOP_CAS
+#undef JIT_FETCHSUBOP
+
+#define JIT_FETCHBITOPX(T, U, name, op) \
+ template <> \
+ inline T AtomicOperations::name(T* addr, T val) { \
+ JS::AutoSuppressGCAnalysis nogc; \
+ return (T)op((U*)addr, (U)val); \
+ }
+
+#define JIT_FETCHBITOP(T, U, andop, orop, xorop) \
+ JIT_FETCHBITOPX(T, U, fetchAndSeqCst, andop) \
+ JIT_FETCHBITOPX(T, U, fetchOrSeqCst, orop) \
+ JIT_FETCHBITOPX(T, U, fetchXorSeqCst, xorop)
+
+#ifndef JS_64BIT
+
+# define AND_OP &
+# define OR_OP |
+# define XOR_OP ^
+
+# define JIT_FETCHBITOPX_CAS(T, name, OP) \
+ template <> \
+ inline T AtomicOperations::name(T* addr, T val) { \
+ JS::AutoSuppressGCAnalysis nogc; \
+ AtomicCompilerFence(); \
+ T oldval = *addr; \
+ for (;;) { \
+ T nextval = (T)AtomicCmpXchg64SeqCst( \
+ (uint64_t*)addr, (uint64_t)oldval, (uint64_t)(oldval OP val)); \
+ if (nextval == oldval) { \
+ break; \
+ } \
+ oldval = nextval; \
+ } \
+ AtomicCompilerFence(); \
+ return oldval; \
+ }
+
+# define JIT_FETCHBITOP_CAS(T) \
+ JIT_FETCHBITOPX_CAS(T, fetchAndSeqCst, AND_OP) \
+ JIT_FETCHBITOPX_CAS(T, fetchOrSeqCst, OR_OP) \
+ JIT_FETCHBITOPX_CAS(T, fetchXorSeqCst, XOR_OP)
+
+#endif // !JS_64BIT
+
+namespace js {
+namespace jit {
+
+JIT_FETCHBITOP(int8_t, uint8_t, AtomicAnd8SeqCst, AtomicOr8SeqCst,
+ AtomicXor8SeqCst)
+JIT_FETCHBITOP(uint8_t, uint8_t, AtomicAnd8SeqCst, AtomicOr8SeqCst,
+ AtomicXor8SeqCst)
+JIT_FETCHBITOP(int16_t, uint16_t, AtomicAnd16SeqCst, AtomicOr16SeqCst,
+ AtomicXor16SeqCst)
+JIT_FETCHBITOP(uint16_t, uint16_t, AtomicAnd16SeqCst, AtomicOr16SeqCst,
+ AtomicXor16SeqCst)
+JIT_FETCHBITOP(int32_t, uint32_t, AtomicAnd32SeqCst, AtomicOr32SeqCst,
+ AtomicXor32SeqCst)
+JIT_FETCHBITOP(uint32_t, uint32_t, AtomicAnd32SeqCst, AtomicOr32SeqCst,
+ AtomicXor32SeqCst)
+
+#ifdef JIT_FETCHBITOP_CAS
+JIT_FETCHBITOP_CAS(int64_t)
+JIT_FETCHBITOP_CAS(uint64_t)
+#else
+JIT_FETCHBITOP(int64_t, uint64_t, AtomicAnd64SeqCst, AtomicOr64SeqCst,
+ AtomicXor64SeqCst)
+JIT_FETCHBITOP(uint64_t, uint64_t, AtomicAnd64SeqCst, AtomicOr64SeqCst,
+ AtomicXor64SeqCst)
+#endif
+
+} // namespace jit
+} // namespace js
+
+#undef JIT_FETCHBITOPX_CAS
+#undef JIT_FETCHBITOPX
+#undef JIT_FETCHBITOP_CAS
+#undef JIT_FETCHBITOP
+
+#define JIT_LOADSAFE(T, U, loadop) \
+ template <> \
+ inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) { \
+ JS::AutoSuppressGCAnalysis nogc; \
+ union { \
+ U u; \
+ T t; \
+ }; \
+ u = loadop((U*)addr); \
+ return t; \
+ }
+
+#ifndef JS_64BIT
+# define JIT_LOADSAFE_TEARING(T) \
+ template <> \
+ inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) { \
+ JS::AutoSuppressGCAnalysis nogc; \
+ MOZ_ASSERT(sizeof(T) == 8); \
+ union { \
+ uint32_t u[2]; \
+ T t; \
+ }; \
+ uint32_t* ptr = (uint32_t*)addr; \
+ u[0] = AtomicLoad32Unsynchronized(ptr); \
+ u[1] = AtomicLoad32Unsynchronized(ptr + 1); \
+ return t; \
+ }
+#endif // !JS_64BIT
+
+namespace js {
+namespace jit {
+
+JIT_LOADSAFE(int8_t, uint8_t, AtomicLoad8Unsynchronized)
+JIT_LOADSAFE(uint8_t, uint8_t, AtomicLoad8Unsynchronized)
+JIT_LOADSAFE(int16_t, uint16_t, AtomicLoad16Unsynchronized)
+JIT_LOADSAFE(uint16_t, uint16_t, AtomicLoad16Unsynchronized)
+JIT_LOADSAFE(int32_t, uint32_t, AtomicLoad32Unsynchronized)
+JIT_LOADSAFE(uint32_t, uint32_t, AtomicLoad32Unsynchronized)
+#ifdef JIT_LOADSAFE_TEARING
+JIT_LOADSAFE_TEARING(int64_t)
+JIT_LOADSAFE_TEARING(uint64_t)
+JIT_LOADSAFE_TEARING(double)
+#else
+JIT_LOADSAFE(int64_t, uint64_t, AtomicLoad64Unsynchronized)
+JIT_LOADSAFE(uint64_t, uint64_t, AtomicLoad64Unsynchronized)
+JIT_LOADSAFE(double, uint64_t, AtomicLoad64Unsynchronized)
+#endif
+JIT_LOADSAFE(float, uint32_t, AtomicLoad32Unsynchronized)
+
+// Clang requires a specialization for uint8_clamped.
+template <>
+inline uint8_clamped js::jit::AtomicOperations::loadSafeWhenRacy(
+ uint8_clamped* addr) {
+ return uint8_clamped(loadSafeWhenRacy((uint8_t*)addr));
+}
+
+} // namespace jit
+} // namespace js
+
+#undef JIT_LOADSAFE
+#undef JIT_LOADSAFE_TEARING
+
+#define JIT_STORESAFE(T, U, storeop) \
+ template <> \
+ inline void js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) { \
+ JS::AutoSuppressGCAnalysis nogc; \
+ union { \
+ U u; \
+ T t; \
+ }; \
+ t = val; \
+ storeop((U*)addr, u); \
+ }
+
+#ifndef JS_64BIT
+# define JIT_STORESAFE_TEARING(T) \
+ template <> \
+ inline void js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) { \
+ JS::AutoSuppressGCAnalysis nogc; \
+ union { \
+ uint32_t u[2]; \
+ T t; \
+ }; \
+ t = val; \
+ uint32_t* ptr = (uint32_t*)addr; \
+ AtomicStore32Unsynchronized(ptr, u[0]); \
+ AtomicStore32Unsynchronized(ptr + 1, u[1]); \
+ }
+#endif // !JS_64BIT
+
+namespace js {
+namespace jit {
+
+JIT_STORESAFE(int8_t, uint8_t, AtomicStore8Unsynchronized)
+JIT_STORESAFE(uint8_t, uint8_t, AtomicStore8Unsynchronized)
+JIT_STORESAFE(int16_t, uint16_t, AtomicStore16Unsynchronized)
+JIT_STORESAFE(uint16_t, uint16_t, AtomicStore16Unsynchronized)
+JIT_STORESAFE(int32_t, uint32_t, AtomicStore32Unsynchronized)
+JIT_STORESAFE(uint32_t, uint32_t, AtomicStore32Unsynchronized)
+#ifdef JIT_STORESAFE_TEARING
+JIT_STORESAFE_TEARING(int64_t)
+JIT_STORESAFE_TEARING(uint64_t)
+JIT_STORESAFE_TEARING(double)
+#else
+JIT_STORESAFE(int64_t, uint64_t, AtomicStore64Unsynchronized)
+JIT_STORESAFE(uint64_t, uint64_t, AtomicStore64Unsynchronized)
+JIT_STORESAFE(double, uint64_t, AtomicStore64Unsynchronized)
+#endif
+JIT_STORESAFE(float, uint32_t, AtomicStore32Unsynchronized)
+
+// Clang requires a specialization for uint8_clamped.
+template <>
+inline void js::jit::AtomicOperations::storeSafeWhenRacy(uint8_clamped* addr,
+ uint8_clamped val) {
+ storeSafeWhenRacy((uint8_t*)addr, (uint8_t)val);
+}
+
+} // namespace jit
+} // namespace js
+
+#undef JIT_STORESAFE
+#undef JIT_STORESAFE_TEARING
+
+void js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest, const void* src,
+ size_t nbytes) {
+ JS::AutoSuppressGCAnalysis nogc;
+ MOZ_ASSERT(!((char*)dest <= (char*)src && (char*)src < (char*)dest + nbytes));
+ MOZ_ASSERT(!((char*)src <= (char*)dest && (char*)dest < (char*)src + nbytes));
+ AtomicMemcpyDownUnsynchronized((uint8_t*)dest, (const uint8_t*)src, nbytes);
+}
+
+inline void js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest,
+ const void* src,
+ size_t nbytes) {
+ JS::AutoSuppressGCAnalysis nogc;
+ if ((char*)dest <= (char*)src) {
+ AtomicMemcpyDownUnsynchronized((uint8_t*)dest, (const uint8_t*)src, nbytes);
+ } else {
+ AtomicMemcpyUpUnsynchronized((uint8_t*)dest, (const uint8_t*)src, nbytes);
+ }
+}
+
+namespace js {
+namespace jit {
+
+extern bool InitializeJittedAtomics();
+extern void ShutDownJittedAtomics();
+
+} // namespace jit
+} // namespace js
+
+inline bool js::jit::AtomicOperations::Initialize() {
+ return InitializeJittedAtomics();
+}
+
+inline void js::jit::AtomicOperations::ShutDown() { ShutDownJittedAtomics(); }
+
+#endif // jit_shared_AtomicOperations_shared_jit_h
diff --git a/js/src/jit/shared/CodeGenerator-shared-inl.h b/js/src/jit/shared/CodeGenerator-shared-inl.h
new file mode 100644
index 0000000000..984c7d048d
--- /dev/null
+++ b/js/src/jit/shared/CodeGenerator-shared-inl.h
@@ -0,0 +1,329 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_CodeGenerator_shared_inl_h
+#define jit_shared_CodeGenerator_shared_inl_h
+
+#include "jit/shared/CodeGenerator-shared.h"
+
+#include "jit/JitFrames.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+namespace js {
+namespace jit {
+
+static inline bool IsConstant(const LInt64Allocation& a) {
+#if JS_BITS_PER_WORD == 32
+ if (a.high().isConstantValue()) {
+ return true;
+ }
+ if (a.high().isConstantIndex()) {
+ return true;
+ }
+#else
+ if (a.value().isConstantValue()) {
+ return true;
+ }
+ if (a.value().isConstantIndex()) {
+ return true;
+ }
+#endif
+ return false;
+}
+
+static inline int32_t ToInt32(const LAllocation* a) {
+ if (a->isConstantValue()) {
+ return a->toConstant()->toInt32();
+ }
+ if (a->isConstantIndex()) {
+ return a->toConstantIndex()->index();
+ }
+ MOZ_CRASH("this is not a constant!");
+}
+
+static inline int64_t ToInt64(const LAllocation* a) {
+ if (a->isConstantValue()) {
+ return a->toConstant()->toInt64();
+ }
+ if (a->isConstantIndex()) {
+ return a->toConstantIndex()->index();
+ }
+ MOZ_CRASH("this is not a constant!");
+}
+
+static inline int64_t ToInt64(const LInt64Allocation& a) {
+#if JS_BITS_PER_WORD == 32
+ if (a.high().isConstantValue()) {
+ return a.high().toConstant()->toInt64();
+ }
+ if (a.high().isConstantIndex()) {
+ return a.high().toConstantIndex()->index();
+ }
+#else
+ if (a.value().isConstantValue()) {
+ return a.value().toConstant()->toInt64();
+ }
+ if (a.value().isConstantIndex()) {
+ return a.value().toConstantIndex()->index();
+ }
+#endif
+ MOZ_CRASH("this is not a constant!");
+}
+
+static inline double ToDouble(const LAllocation* a) {
+ return a->toConstant()->numberToDouble();
+}
+
+static inline bool ToBoolean(const LAllocation* a) {
+ return a->toConstant()->toBoolean();
+}
+
+static inline Register ToRegister(const LAllocation& a) {
+ MOZ_ASSERT(a.isGeneralReg());
+ return a.toGeneralReg()->reg();
+}
+
+static inline Register ToRegister(const LAllocation* a) {
+ return ToRegister(*a);
+}
+
+static inline Register ToRegister(const LDefinition* def) {
+ return ToRegister(*def->output());
+}
+
+static inline Register64 ToOutRegister64(LInstruction* ins) {
+#if JS_BITS_PER_WORD == 32
+ Register loReg = ToRegister(ins->getDef(INT64LOW_INDEX));
+ Register hiReg = ToRegister(ins->getDef(INT64HIGH_INDEX));
+ return Register64(hiReg, loReg);
+#else
+ return Register64(ToRegister(ins->getDef(0)));
+#endif
+}
+
+static inline Register64 ToRegister64(const LInt64Allocation& a) {
+#if JS_BITS_PER_WORD == 32
+ return Register64(ToRegister(a.high()), ToRegister(a.low()));
+#else
+ return Register64(ToRegister(a.value()));
+#endif
+}
+
+static inline Register64 ToRegister64(const LInt64Definition& a) {
+#if JS_BITS_PER_WORD == 32
+ return Register64(ToRegister(a.pointerHigh()), ToRegister(a.pointerLow()));
+#else
+ return Register64(ToRegister(a.pointer()));
+#endif
+}
+
+static inline Register ToTempRegisterOrInvalid(const LDefinition* def) {
+ if (def->isBogusTemp()) {
+ return InvalidReg;
+ }
+ return ToRegister(def);
+}
+
+static inline Register64 ToTempRegister64OrInvalid(
+ const LInt64Definition& def) {
+ if (def.isBogusTemp()) {
+ return Register64::Invalid();
+ }
+ return ToRegister64(def);
+}
+
+static inline Register ToTempUnboxRegister(const LDefinition* def) {
+ return ToTempRegisterOrInvalid(def);
+}
+
+static inline Register ToRegisterOrInvalid(const LDefinition* a) {
+ return a ? ToRegister(a) : InvalidReg;
+}
+
+static inline FloatRegister ToFloatRegister(const LAllocation& a) {
+ MOZ_ASSERT(a.isFloatReg());
+ return a.toFloatReg()->reg();
+}
+
+static inline FloatRegister ToFloatRegister(const LAllocation* a) {
+ return ToFloatRegister(*a);
+}
+
+static inline FloatRegister ToFloatRegister(const LDefinition* def) {
+ return ToFloatRegister(*def->output());
+}
+
+static inline FloatRegister ToTempFloatRegisterOrInvalid(
+ const LDefinition* def) {
+ if (def->isBogusTemp()) {
+ return InvalidFloatReg;
+ }
+ return ToFloatRegister(def);
+}
+
+static inline AnyRegister ToAnyRegister(const LAllocation& a) {
+ MOZ_ASSERT(a.isGeneralReg() || a.isFloatReg());
+ if (a.isGeneralReg()) {
+ return AnyRegister(ToRegister(a));
+ }
+ return AnyRegister(ToFloatRegister(a));
+}
+
+static inline AnyRegister ToAnyRegister(const LAllocation* a) {
+ return ToAnyRegister(*a);
+}
+
+static inline AnyRegister ToAnyRegister(const LDefinition* def) {
+ return ToAnyRegister(def->output());
+}
+
+static inline ValueOperand ToOutValue(LInstruction* ins) {
+#if defined(JS_NUNBOX32)
+ return ValueOperand(ToRegister(ins->getDef(TYPE_INDEX)),
+ ToRegister(ins->getDef(PAYLOAD_INDEX)));
+#elif defined(JS_PUNBOX64)
+ return ValueOperand(ToRegister(ins->getDef(0)));
+#else
+# error "Unknown"
+#endif
+}
+
+static inline ValueOperand GetTempValue(Register type, Register payload) {
+#if defined(JS_NUNBOX32)
+ return ValueOperand(type, payload);
+#elif defined(JS_PUNBOX64)
+ (void)type;
+ return ValueOperand(payload);
+#else
+# error "Unknown"
+#endif
+}
+
+int32_t CodeGeneratorShared::ArgToStackOffset(int32_t slot) const {
+ return masm.framePushed() +
+ (gen->compilingWasm() ? sizeof(wasm::Frame) : sizeof(JitFrameLayout)) +
+ slot;
+}
+
+int32_t CodeGeneratorShared::SlotToStackOffset(int32_t slot) const {
+ MOZ_ASSERT(slot > 0 && slot <= int32_t(graph.localSlotCount()));
+ int32_t offset = masm.framePushed() - slot;
+ MOZ_ASSERT(offset >= 0);
+ return offset;
+}
+
+int32_t CodeGeneratorShared::StackOffsetToSlot(int32_t offset) const {
+ // See: SlotToStackOffset. This is used to convert pushed arguments
+ // to a slot index that safepoints can use.
+ //
+ // offset = framePushed - frameInitialAdjustment - slot
+ // offset + slot = framePushed - frameInitialAdjustment
+ // slot = framePushed - frameInitialAdjustement - offset
+ return masm.framePushed() - offset;
+}
+
+// For argument construction for calls. Argslots are Value-sized.
+int32_t CodeGeneratorShared::StackOffsetOfPassedArg(int32_t slot) const {
+ // A slot of 0 is permitted only to calculate %esp offset for calls.
+ MOZ_ASSERT(slot >= 0 && slot <= int32_t(graph.argumentSlotCount()));
+ int32_t offset = masm.framePushed() - graph.paddedLocalSlotsSize() -
+ (slot * sizeof(Value));
+
+ // Passed arguments go below A function's local stack storage.
+ // When arguments are being pushed, there is nothing important on the stack.
+ // Therefore, It is safe to push the arguments down arbitrarily. Pushing
+ // by sizeof(Value) is desirable since everything on the stack is a Value.
+ // Note that paddedLocalSlotCount() aligns to at least a Value boundary
+ // specifically to support this.
+ MOZ_ASSERT(offset >= 0);
+ MOZ_ASSERT(offset % sizeof(Value) == 0);
+ return offset;
+}
+
+int32_t CodeGeneratorShared::ToStackOffset(LAllocation a) const {
+ if (a.isArgument()) {
+ return ArgToStackOffset(a.toArgument()->index());
+ }
+ return SlotToStackOffset(a.isStackSlot() ? a.toStackSlot()->slot()
+ : a.toStackArea()->base());
+}
+
+int32_t CodeGeneratorShared::ToStackOffset(const LAllocation* a) const {
+ return ToStackOffset(*a);
+}
+
+Address CodeGeneratorShared::ToAddress(const LAllocation& a) const {
+ MOZ_ASSERT(a.isMemory() || a.isStackArea());
+ if (useWasmStackArgumentAbi() && a.isArgument()) {
+ return Address(FramePointer, ToFramePointerOffset(a));
+ }
+ return Address(masm.getStackPointer(), ToStackOffset(&a));
+}
+
+Address CodeGeneratorShared::ToAddress(const LAllocation* a) const {
+ return ToAddress(*a);
+}
+
+int32_t CodeGeneratorShared::ToFramePointerOffset(LAllocation a) const {
+ MOZ_ASSERT(useWasmStackArgumentAbi());
+ MOZ_ASSERT(a.isArgument());
+ return a.toArgument()->index() + sizeof(wasm::Frame);
+}
+
+int32_t CodeGeneratorShared::ToFramePointerOffset(const LAllocation* a) const {
+ return ToFramePointerOffset(*a);
+}
+
+void CodeGeneratorShared::saveLive(LInstruction* ins) {
+ MOZ_ASSERT(!ins->isCall());
+ LSafepoint* safepoint = ins->safepoint();
+ masm.PushRegsInMask(safepoint->liveRegs());
+}
+
+void CodeGeneratorShared::restoreLive(LInstruction* ins) {
+ MOZ_ASSERT(!ins->isCall());
+ LSafepoint* safepoint = ins->safepoint();
+ masm.PopRegsInMask(safepoint->liveRegs());
+}
+
+void CodeGeneratorShared::restoreLiveIgnore(LInstruction* ins,
+ LiveRegisterSet ignore) {
+ MOZ_ASSERT(!ins->isCall());
+ LSafepoint* safepoint = ins->safepoint();
+ masm.PopRegsInMaskIgnore(safepoint->liveRegs(), ignore);
+}
+
+LiveRegisterSet CodeGeneratorShared::liveVolatileRegs(LInstruction* ins) {
+ MOZ_ASSERT(!ins->isCall());
+ LSafepoint* safepoint = ins->safepoint();
+ LiveRegisterSet regs;
+ regs.set() = RegisterSet::Intersect(safepoint->liveRegs().set(),
+ RegisterSet::Volatile());
+ return regs;
+}
+
+void CodeGeneratorShared::saveLiveVolatile(LInstruction* ins) {
+ LiveRegisterSet regs = liveVolatileRegs(ins);
+ masm.PushRegsInMask(regs);
+}
+
+void CodeGeneratorShared::restoreLiveVolatile(LInstruction* ins) {
+ LiveRegisterSet regs = liveVolatileRegs(ins);
+ masm.PopRegsInMask(regs);
+}
+
+inline bool CodeGeneratorShared::isGlobalObject(JSObject* object) {
+ // Calling object->is<GlobalObject>() is racy because this relies on
+ // checking the group and this can be changed while we are compiling off the
+ // main thread. Note that we only check for the script realm's global here.
+ return object == gen->realm->maybeGlobal();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_CodeGenerator_shared_inl_h */
diff --git a/js/src/jit/shared/CodeGenerator-shared.cpp b/js/src/jit/shared/CodeGenerator-shared.cpp
new file mode 100644
index 0000000000..12e48dfd56
--- /dev/null
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -0,0 +1,1204 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+#include "mozilla/DebugOnly.h"
+
+#include <utility>
+
+#include "jit/CodeGenerator.h"
+#include "jit/CompactBuffer.h"
+#include "jit/CompileInfo.h"
+#include "jit/InlineScriptTree.h"
+#include "jit/JitcodeMap.h"
+#include "jit/JitFrames.h"
+#include "jit/JitSpewer.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+#include "jit/SafepointIndex.h"
+#include "js/Conversions.h"
+#include "util/Memory.h"
+#include "vm/TraceLogging.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::BitwiseCast;
+using mozilla::DebugOnly;
+
+namespace js {
+namespace jit {
+
+MacroAssembler& CodeGeneratorShared::ensureMasm(MacroAssembler* masmArg) {
+ if (masmArg) {
+ return *masmArg;
+ }
+ maybeMasm_.emplace();
+ return *maybeMasm_;
+}
+
+CodeGeneratorShared::CodeGeneratorShared(MIRGenerator* gen, LIRGraph* graph,
+ MacroAssembler* masmArg)
+ : maybeMasm_(),
+ useWasmStackArgumentAbi_(false),
+ masm(ensureMasm(masmArg)),
+ gen(gen),
+ graph(*graph),
+ current(nullptr),
+ snapshots_(),
+ recovers_(),
+ deoptTable_(),
+#ifdef DEBUG
+ pushedArgs_(0),
+#endif
+ lastOsiPointOffset_(0),
+ safepoints_(graph->totalSlotCount(),
+ (gen->outerInfo().nargs() + 1) * sizeof(Value)),
+ returnLabel_(),
+ nativeToBytecodeMap_(nullptr),
+ nativeToBytecodeMapSize_(0),
+ nativeToBytecodeTableOffset_(0),
+ nativeToBytecodeNumRegions_(0),
+ nativeToBytecodeScriptList_(nullptr),
+ nativeToBytecodeScriptListLength_(0),
+#ifdef CHECK_OSIPOINT_REGISTERS
+ checkOsiPointRegisters(JitOptions.checkOsiPointRegisters),
+#endif
+ frameDepth_(graph->paddedLocalSlotsSize() + graph->argumentsSize()),
+ frameClass_(FrameSizeClass::None()) {
+ if (gen->isProfilerInstrumentationEnabled()) {
+ masm.enableProfilingInstrumentation();
+ }
+
+ if (gen->compilingWasm()) {
+ // Since wasm uses the system ABI which does not necessarily use a
+ // regular array where all slots are sizeof(Value), it maintains the max
+ // argument stack depth separately.
+ MOZ_ASSERT(graph->argumentSlotCount() == 0);
+ frameDepth_ += gen->wasmMaxStackArgBytes();
+
+#ifdef ENABLE_WASM_SIMD
+# if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || \
+ defined(JS_CODEGEN_ARM64)
+ // On X64/x86 and ARM64, we don't need alignment for Wasm SIMD at this time.
+# else
+# error \
+ "we may need padding so that local slots are SIMD-aligned and the stack must be kept SIMD-aligned too."
+# endif
+#endif
+
+ if (gen->needsStaticStackAlignment()) {
+ // An MWasmCall does not align the stack pointer at calls sites but
+ // instead relies on the a priori stack adjustment. This must be the
+ // last adjustment of frameDepth_.
+ frameDepth_ += ComputeByteAlignment(sizeof(wasm::Frame) + frameDepth_,
+ WasmStackAlignment);
+ }
+
+ // FrameSizeClass is only used for bailing, which cannot happen in
+ // wasm code.
+ MOZ_ASSERT(frameClass_ == FrameSizeClass::None());
+ } else {
+ frameClass_ = FrameSizeClass::FromDepth(frameDepth_);
+ }
+}
+
+bool CodeGeneratorShared::generatePrologue() {
+ MOZ_ASSERT(masm.framePushed() == 0);
+ MOZ_ASSERT(!gen->compilingWasm());
+
+#ifdef JS_USE_LINK_REGISTER
+ masm.pushReturnAddress();
+#endif
+
+ // If profiling, save the current frame pointer to a per-thread global field.
+ if (isProfilerInstrumentationEnabled()) {
+ masm.profilerEnterFrame(masm.getStackPointer(), CallTempReg0);
+ }
+
+ // Ensure that the Ion frame is properly aligned.
+ masm.assertStackAlignment(JitStackAlignment, 0);
+
+ // Note that this automatically sets MacroAssembler::framePushed().
+ masm.reserveStack(frameSize());
+ masm.checkStackAlignment();
+
+ if (JS::TraceLoggerSupported()) {
+ emitTracelogIonStart();
+ }
+
+ return true;
+}
+
+bool CodeGeneratorShared::generateEpilogue() {
+ MOZ_ASSERT(!gen->compilingWasm());
+ masm.bind(&returnLabel_);
+
+ if (JS::TraceLoggerSupported()) {
+ emitTracelogIonStop();
+ }
+
+ masm.freeStack(frameSize());
+ MOZ_ASSERT(masm.framePushed() == 0);
+
+ // If profiling, reset the per-thread global lastJitFrame to point to
+ // the previous frame.
+ if (isProfilerInstrumentationEnabled()) {
+ masm.profilerExitFrame();
+ }
+
+ masm.ret();
+
+ // On systems that use a constant pool, this is a good time to emit.
+ masm.flushBuffer();
+ return true;
+}
+
+bool CodeGeneratorShared::generateOutOfLineCode() {
+ // OOL paths should not attempt to use |current| as it's the last block
+ // instead of the block corresponding to the OOL path.
+ current = nullptr;
+
+ for (size_t i = 0; i < outOfLineCode_.length(); i++) {
+ // Add native => bytecode mapping entries for OOL sites.
+ // Not enabled on wasm yet since it doesn't contain bytecode mappings.
+ if (!gen->compilingWasm()) {
+ if (!addNativeToBytecodeEntry(outOfLineCode_[i]->bytecodeSite())) {
+ return false;
+ }
+ }
+
+ if (!gen->alloc().ensureBallast()) {
+ return false;
+ }
+
+ JitSpew(JitSpew_Codegen, "# Emitting out of line code");
+
+ masm.setFramePushed(outOfLineCode_[i]->framePushed());
+ lastPC_ = outOfLineCode_[i]->pc();
+ outOfLineCode_[i]->bind(&masm);
+
+ outOfLineCode_[i]->generate(this);
+ }
+
+ return !masm.oom();
+}
+
+void CodeGeneratorShared::addOutOfLineCode(OutOfLineCode* code,
+ const MInstruction* mir) {
+ MOZ_ASSERT(mir);
+ addOutOfLineCode(code, mir->trackedSite());
+}
+
+void CodeGeneratorShared::addOutOfLineCode(OutOfLineCode* code,
+ const BytecodeSite* site) {
+ code->setFramePushed(masm.framePushed());
+ code->setBytecodeSite(site);
+ MOZ_ASSERT_IF(!gen->compilingWasm(), code->script()->containsPC(code->pc()));
+ masm.propagateOOM(outOfLineCode_.append(code));
+}
+
+bool CodeGeneratorShared::addNativeToBytecodeEntry(const BytecodeSite* site) {
+ // Skip the table entirely if profiling is not enabled.
+ if (!isProfilerInstrumentationEnabled()) {
+ return true;
+ }
+
+ // Fails early if the last added instruction caused the macro assembler to
+ // run out of memory as continuity assumption below do not hold.
+ if (masm.oom()) {
+ return false;
+ }
+
+ MOZ_ASSERT(site);
+ MOZ_ASSERT(site->tree());
+ MOZ_ASSERT(site->pc());
+
+ InlineScriptTree* tree = site->tree();
+ jsbytecode* pc = site->pc();
+ uint32_t nativeOffset = masm.currentOffset();
+
+ MOZ_ASSERT_IF(nativeToBytecodeList_.empty(), nativeOffset == 0);
+
+ if (!nativeToBytecodeList_.empty()) {
+ size_t lastIdx = nativeToBytecodeList_.length() - 1;
+ NativeToBytecode& lastEntry = nativeToBytecodeList_[lastIdx];
+
+ MOZ_ASSERT(nativeOffset >= lastEntry.nativeOffset.offset());
+
+ // If the new entry is for the same inlineScriptTree and same
+ // bytecodeOffset, but the nativeOffset has changed, do nothing.
+ // The same site just generated some more code.
+ if (lastEntry.tree == tree && lastEntry.pc == pc) {
+ JitSpew(JitSpew_Profiling, " => In-place update [%zu-%" PRIu32 "]",
+ lastEntry.nativeOffset.offset(), nativeOffset);
+ return true;
+ }
+
+ // If the new entry is for the same native offset, then update the
+ // previous entry with the new bytecode site, since the previous
+ // bytecode site did not generate any native code.
+ if (lastEntry.nativeOffset.offset() == nativeOffset) {
+ lastEntry.tree = tree;
+ lastEntry.pc = pc;
+ JitSpew(JitSpew_Profiling, " => Overwriting zero-length native region.");
+
+ // This overwrite might have made the entry merge-able with a
+ // previous one. If so, merge it.
+ if (lastIdx > 0) {
+ NativeToBytecode& nextToLastEntry = nativeToBytecodeList_[lastIdx - 1];
+ if (nextToLastEntry.tree == lastEntry.tree &&
+ nextToLastEntry.pc == lastEntry.pc) {
+ JitSpew(JitSpew_Profiling, " => Merging with previous region");
+ nativeToBytecodeList_.erase(&lastEntry);
+ }
+ }
+
+ dumpNativeToBytecodeEntry(nativeToBytecodeList_.length() - 1);
+ return true;
+ }
+ }
+
+ // Otherwise, some native code was generated for the previous bytecode site.
+ // Add a new entry for code that is about to be generated.
+ NativeToBytecode entry;
+ entry.nativeOffset = CodeOffset(nativeOffset);
+ entry.tree = tree;
+ entry.pc = pc;
+ if (!nativeToBytecodeList_.append(entry)) {
+ return false;
+ }
+
+ JitSpew(JitSpew_Profiling, " => Push new entry.");
+ dumpNativeToBytecodeEntry(nativeToBytecodeList_.length() - 1);
+ return true;
+}
+
+void CodeGeneratorShared::dumpNativeToBytecodeEntries() {
+#ifdef JS_JITSPEW
+ InlineScriptTree* topTree = gen->outerInfo().inlineScriptTree();
+ JitSpewStart(JitSpew_Profiling, "Native To Bytecode Entries for %s:%u:%u\n",
+ topTree->script()->filename(), topTree->script()->lineno(),
+ topTree->script()->column());
+ for (unsigned i = 0; i < nativeToBytecodeList_.length(); i++) {
+ dumpNativeToBytecodeEntry(i);
+ }
+#endif
+}
+
+void CodeGeneratorShared::dumpNativeToBytecodeEntry(uint32_t idx) {
+#ifdef JS_JITSPEW
+ NativeToBytecode& ref = nativeToBytecodeList_[idx];
+ InlineScriptTree* tree = ref.tree;
+ JSScript* script = tree->script();
+ uint32_t nativeOffset = ref.nativeOffset.offset();
+ unsigned nativeDelta = 0;
+ unsigned pcDelta = 0;
+ if (idx + 1 < nativeToBytecodeList_.length()) {
+ NativeToBytecode* nextRef = &ref + 1;
+ nativeDelta = nextRef->nativeOffset.offset() - nativeOffset;
+ if (nextRef->tree == ref.tree) {
+ pcDelta = nextRef->pc - ref.pc;
+ }
+ }
+ JitSpewStart(
+ JitSpew_Profiling, " %08zx [+%-6u] => %-6ld [%-4u] {%-10s} (%s:%u:%u",
+ ref.nativeOffset.offset(), nativeDelta, (long)(ref.pc - script->code()),
+ pcDelta, CodeName(JSOp(*ref.pc)), script->filename(), script->lineno(),
+ script->column());
+
+ for (tree = tree->caller(); tree; tree = tree->caller()) {
+ JitSpewCont(JitSpew_Profiling, " <= %s:%u:%u", tree->script()->filename(),
+ tree->script()->lineno(), tree->script()->column());
+ }
+ JitSpewCont(JitSpew_Profiling, ")");
+ JitSpewFin(JitSpew_Profiling);
+#endif
+}
+
+// see OffsetOfFrameSlot
+static inline int32_t ToStackIndex(LAllocation* a) {
+ if (a->isStackSlot()) {
+ MOZ_ASSERT(a->toStackSlot()->slot() >= 1);
+ return a->toStackSlot()->slot();
+ }
+ return -int32_t(sizeof(JitFrameLayout) + a->toArgument()->index());
+}
+
+void CodeGeneratorShared::encodeAllocation(LSnapshot* snapshot,
+ MDefinition* mir,
+ uint32_t* allocIndex) {
+ if (mir->isBox()) {
+ mir = mir->toBox()->getOperand(0);
+ }
+
+ MIRType type = mir->isRecoveredOnBailout() ? MIRType::None
+ : mir->isUnused() ? MIRType::MagicOptimizedOut
+ : mir->type();
+
+ RValueAllocation alloc;
+
+ switch (type) {
+ case MIRType::None: {
+ MOZ_ASSERT(mir->isRecoveredOnBailout());
+ uint32_t index = 0;
+ LRecoverInfo* recoverInfo = snapshot->recoverInfo();
+ MNode** it = recoverInfo->begin();
+ MNode** end = recoverInfo->end();
+ while (it != end && mir != *it) {
+ ++it;
+ ++index;
+ }
+
+ // This MDefinition is recovered, thus it should be listed in the
+ // LRecoverInfo.
+ MOZ_ASSERT(it != end && mir == *it);
+
+ // Lambda should have a default value readable for iterating over the
+ // inner frames.
+ MConstant* functionOperand = nullptr;
+ if (mir->isLambda()) {
+ functionOperand = mir->toLambda()->functionOperand();
+ } else if (mir->isLambdaArrow()) {
+ functionOperand = mir->toLambdaArrow()->functionOperand();
+ } else if (mir->isFunctionWithProto()) {
+ functionOperand = mir->toFunctionWithProto()->functionOperand();
+ }
+ if (functionOperand) {
+ uint32_t cstIndex;
+ masm.propagateOOM(
+ graph.addConstantToPool(functionOperand->toJSValue(), &cstIndex));
+ alloc = RValueAllocation::RecoverInstruction(index, cstIndex);
+ break;
+ }
+
+ alloc = RValueAllocation::RecoverInstruction(index);
+ break;
+ }
+ case MIRType::Undefined:
+ alloc = RValueAllocation::Undefined();
+ break;
+ case MIRType::Null:
+ alloc = RValueAllocation::Null();
+ break;
+ case MIRType::Int32:
+ case MIRType::String:
+ case MIRType::Symbol:
+ case MIRType::BigInt:
+ case MIRType::Object:
+ case MIRType::Boolean:
+ case MIRType::Double: {
+ LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
+ if (payload->isConstant()) {
+ MConstant* constant = mir->toConstant();
+ uint32_t index;
+ masm.propagateOOM(
+ graph.addConstantToPool(constant->toJSValue(), &index));
+ alloc = RValueAllocation::ConstantPool(index);
+ break;
+ }
+
+ JSValueType valueType = ValueTypeFromMIRType(type);
+
+ MOZ_DIAGNOSTIC_ASSERT(payload->isMemory() || payload->isRegister());
+ if (payload->isMemory()) {
+ alloc = RValueAllocation::Typed(valueType, ToStackIndex(payload));
+ } else if (payload->isGeneralReg()) {
+ alloc = RValueAllocation::Typed(valueType, ToRegister(payload));
+ } else if (payload->isFloatReg()) {
+ alloc = RValueAllocation::Double(ToFloatRegister(payload));
+ } else {
+ MOZ_CRASH("Unexpected payload type.");
+ }
+ break;
+ }
+ case MIRType::Float32:
+ case MIRType::Simd128: {
+ LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
+ if (payload->isConstant()) {
+ MConstant* constant = mir->toConstant();
+ uint32_t index;
+ masm.propagateOOM(
+ graph.addConstantToPool(constant->toJSValue(), &index));
+ alloc = RValueAllocation::ConstantPool(index);
+ break;
+ }
+
+ MOZ_ASSERT(payload->isMemory() || payload->isFloatReg());
+ if (payload->isFloatReg()) {
+ alloc = RValueAllocation::AnyFloat(ToFloatRegister(payload));
+ } else {
+ alloc = RValueAllocation::AnyFloat(ToStackIndex(payload));
+ }
+ break;
+ }
+ case MIRType::MagicOptimizedArguments:
+ case MIRType::MagicOptimizedOut:
+ case MIRType::MagicUninitializedLexical:
+ case MIRType::MagicIsConstructing: {
+ uint32_t index;
+ JSWhyMagic why = JS_GENERIC_MAGIC;
+ switch (type) {
+ case MIRType::MagicOptimizedArguments:
+ why = JS_OPTIMIZED_ARGUMENTS;
+ break;
+ case MIRType::MagicOptimizedOut:
+ why = JS_OPTIMIZED_OUT;
+ break;
+ case MIRType::MagicUninitializedLexical:
+ why = JS_UNINITIALIZED_LEXICAL;
+ break;
+ case MIRType::MagicIsConstructing:
+ why = JS_IS_CONSTRUCTING;
+ break;
+ default:
+ MOZ_CRASH("Invalid Magic MIRType");
+ }
+
+ Value v = MagicValue(why);
+ masm.propagateOOM(graph.addConstantToPool(v, &index));
+ alloc = RValueAllocation::ConstantPool(index);
+ break;
+ }
+ default: {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+ LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
+#ifdef JS_NUNBOX32
+ LAllocation* type = snapshot->typeOfSlot(*allocIndex);
+ if (type->isRegister()) {
+ if (payload->isRegister()) {
+ alloc =
+ RValueAllocation::Untyped(ToRegister(type), ToRegister(payload));
+ } else {
+ alloc = RValueAllocation::Untyped(ToRegister(type),
+ ToStackIndex(payload));
+ }
+ } else {
+ if (payload->isRegister()) {
+ alloc = RValueAllocation::Untyped(ToStackIndex(type),
+ ToRegister(payload));
+ } else {
+ alloc = RValueAllocation::Untyped(ToStackIndex(type),
+ ToStackIndex(payload));
+ }
+ }
+#elif JS_PUNBOX64
+ if (payload->isRegister()) {
+ alloc = RValueAllocation::Untyped(ToRegister(payload));
+ } else {
+ alloc = RValueAllocation::Untyped(ToStackIndex(payload));
+ }
+#endif
+ break;
+ }
+ }
+ MOZ_DIAGNOSTIC_ASSERT(alloc.valid());
+
+ // This set an extra bit as part of the RValueAllocation, such that we know
+ // that recover instruction have to be executed without wrapping the
+ // instruction in a no-op recover instruction.
+ if (mir->isIncompleteObject()) {
+ alloc.setNeedSideEffect();
+ }
+
+ masm.propagateOOM(snapshots_.add(alloc));
+
+ *allocIndex += mir->isRecoveredOnBailout() ? 0 : 1;
+}
+
+void CodeGeneratorShared::encode(LRecoverInfo* recover) {
+ if (recover->recoverOffset() != INVALID_RECOVER_OFFSET) {
+ return;
+ }
+
+ uint32_t numInstructions = recover->numInstructions();
+ JitSpew(JitSpew_IonSnapshots,
+ "Encoding LRecoverInfo %p (frameCount %u, instructions %u)",
+ (void*)recover, recover->mir()->frameCount(), numInstructions);
+
+ MResumePoint::Mode mode = recover->mir()->mode();
+ MOZ_ASSERT(mode != MResumePoint::Outer);
+ bool resumeAfter = (mode == MResumePoint::ResumeAfter);
+
+ RecoverOffset offset = recovers_.startRecover(numInstructions, resumeAfter);
+
+ for (MNode* insn : *recover) {
+ recovers_.writeInstruction(insn);
+ }
+
+ recovers_.endRecover();
+ recover->setRecoverOffset(offset);
+ masm.propagateOOM(!recovers_.oom());
+}
+
+void CodeGeneratorShared::encode(LSnapshot* snapshot) {
+ if (snapshot->snapshotOffset() != INVALID_SNAPSHOT_OFFSET) {
+ return;
+ }
+
+ LRecoverInfo* recoverInfo = snapshot->recoverInfo();
+ encode(recoverInfo);
+
+ RecoverOffset recoverOffset = recoverInfo->recoverOffset();
+ MOZ_ASSERT(recoverOffset != INVALID_RECOVER_OFFSET);
+
+ JitSpew(JitSpew_IonSnapshots, "Encoding LSnapshot %p (LRecover %p)",
+ (void*)snapshot, (void*)recoverInfo);
+
+ SnapshotOffset offset =
+ snapshots_.startSnapshot(recoverOffset, snapshot->bailoutKind());
+
+#ifdef TRACK_SNAPSHOTS
+ uint32_t pcOpcode = 0;
+ uint32_t lirOpcode = 0;
+ uint32_t lirId = 0;
+ uint32_t mirOpcode = 0;
+ uint32_t mirId = 0;
+
+ if (LNode* ins = instruction()) {
+ lirOpcode = uint32_t(ins->op());
+ lirId = ins->id();
+ if (ins->mirRaw()) {
+ mirOpcode = uint32_t(ins->mirRaw()->op());
+ mirId = ins->mirRaw()->id();
+ if (ins->mirRaw()->trackedPc()) {
+ pcOpcode = *ins->mirRaw()->trackedPc();
+ }
+ }
+ }
+ snapshots_.trackSnapshot(pcOpcode, mirOpcode, mirId, lirOpcode, lirId);
+#endif
+
+ uint32_t allocIndex = 0;
+ for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
+ DebugOnly<uint32_t> allocWritten = snapshots_.allocWritten();
+ encodeAllocation(snapshot, *it, &allocIndex);
+ MOZ_ASSERT_IF(!snapshots_.oom(),
+ allocWritten + 1 == snapshots_.allocWritten());
+ }
+
+ MOZ_ASSERT(allocIndex == snapshot->numSlots());
+ snapshots_.endSnapshot();
+ snapshot->setSnapshotOffset(offset);
+ masm.propagateOOM(!snapshots_.oom());
+}
+
+bool CodeGeneratorShared::assignBailoutId(LSnapshot* snapshot) {
+ MOZ_ASSERT(snapshot->snapshotOffset() != INVALID_SNAPSHOT_OFFSET);
+
+ // Can we not use bailout tables at all?
+ if (!deoptTable_) {
+ return false;
+ }
+
+ MOZ_ASSERT(frameClass_ != FrameSizeClass::None());
+
+ if (snapshot->bailoutId() != INVALID_BAILOUT_ID) {
+ return true;
+ }
+
+ // Is the bailout table full?
+ if (bailouts_.length() >= BAILOUT_TABLE_SIZE) {
+ return false;
+ }
+
+ unsigned bailoutId = bailouts_.length();
+ snapshot->setBailoutId(bailoutId);
+ JitSpew(JitSpew_IonSnapshots, "Assigned snapshot bailout id %u", bailoutId);
+ masm.propagateOOM(bailouts_.append(snapshot->snapshotOffset()));
+ return true;
+}
+
+bool CodeGeneratorShared::encodeSafepoints() {
+ for (CodegenSafepointIndex& index : safepointIndices_) {
+ LSafepoint* safepoint = index.safepoint();
+
+ if (!safepoint->encoded()) {
+ safepoints_.encode(safepoint);
+ }
+ }
+
+ return !safepoints_.oom();
+}
+
+bool CodeGeneratorShared::createNativeToBytecodeScriptList(JSContext* cx) {
+ js::Vector<JSScript*, 0, SystemAllocPolicy> scriptList;
+ InlineScriptTree* tree = gen->outerInfo().inlineScriptTree();
+ for (;;) {
+ // Add script from current tree.
+ bool found = false;
+ for (uint32_t i = 0; i < scriptList.length(); i++) {
+ if (scriptList[i] == tree->script()) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ if (!scriptList.append(tree->script())) {
+ return false;
+ }
+ }
+
+ // Process rest of tree
+
+ // If children exist, emit children.
+ if (tree->hasChildren()) {
+ tree = tree->firstChild();
+ continue;
+ }
+
+ // Otherwise, find the first tree up the chain (including this one)
+ // that contains a next sibling.
+ while (!tree->hasNextCallee() && tree->hasCaller()) {
+ tree = tree->caller();
+ }
+
+ // If we found a sibling, use it.
+ if (tree->hasNextCallee()) {
+ tree = tree->nextCallee();
+ continue;
+ }
+
+ // Otherwise, we must have reached the top without finding any siblings.
+ MOZ_ASSERT(tree->isOutermostCaller());
+ break;
+ }
+
+ // Allocate array for list.
+ JSScript** data = cx->pod_malloc<JSScript*>(scriptList.length());
+ if (!data) {
+ return false;
+ }
+
+ for (uint32_t i = 0; i < scriptList.length(); i++) {
+ data[i] = scriptList[i];
+ }
+
+ // Success.
+ nativeToBytecodeScriptListLength_ = scriptList.length();
+ nativeToBytecodeScriptList_ = data;
+ return true;
+}
+
+bool CodeGeneratorShared::generateCompactNativeToBytecodeMap(JSContext* cx,
+ JitCode* code) {
+ MOZ_ASSERT(nativeToBytecodeScriptListLength_ == 0);
+ MOZ_ASSERT(nativeToBytecodeScriptList_ == nullptr);
+ MOZ_ASSERT(nativeToBytecodeMap_ == nullptr);
+ MOZ_ASSERT(nativeToBytecodeMapSize_ == 0);
+ MOZ_ASSERT(nativeToBytecodeTableOffset_ == 0);
+ MOZ_ASSERT(nativeToBytecodeNumRegions_ == 0);
+
+ if (!createNativeToBytecodeScriptList(cx)) {
+ return false;
+ }
+
+ MOZ_ASSERT(nativeToBytecodeScriptListLength_ > 0);
+ MOZ_ASSERT(nativeToBytecodeScriptList_ != nullptr);
+
+ CompactBufferWriter writer;
+ uint32_t tableOffset = 0;
+ uint32_t numRegions = 0;
+
+ if (!JitcodeIonTable::WriteIonTable(
+ writer, nativeToBytecodeScriptList_,
+ nativeToBytecodeScriptListLength_, &nativeToBytecodeList_[0],
+ &nativeToBytecodeList_[0] + nativeToBytecodeList_.length(),
+ &tableOffset, &numRegions)) {
+ js_free(nativeToBytecodeScriptList_);
+ return false;
+ }
+
+ MOZ_ASSERT(tableOffset > 0);
+ MOZ_ASSERT(numRegions > 0);
+
+ // Writer is done, copy it to sized buffer.
+ uint8_t* data = cx->pod_malloc<uint8_t>(writer.length());
+ if (!data) {
+ js_free(nativeToBytecodeScriptList_);
+ return false;
+ }
+
+ memcpy(data, writer.buffer(), writer.length());
+ nativeToBytecodeMap_ = data;
+ nativeToBytecodeMapSize_ = writer.length();
+ nativeToBytecodeTableOffset_ = tableOffset;
+ nativeToBytecodeNumRegions_ = numRegions;
+
+ verifyCompactNativeToBytecodeMap(code);
+
+ JitSpew(JitSpew_Profiling, "Compact Native To Bytecode Map [%p-%p]", data,
+ data + nativeToBytecodeMapSize_);
+
+ return true;
+}
+
+void CodeGeneratorShared::verifyCompactNativeToBytecodeMap(JitCode* code) {
+#ifdef DEBUG
+ MOZ_ASSERT(nativeToBytecodeScriptListLength_ > 0);
+ MOZ_ASSERT(nativeToBytecodeScriptList_ != nullptr);
+ MOZ_ASSERT(nativeToBytecodeMap_ != nullptr);
+ MOZ_ASSERT(nativeToBytecodeMapSize_ > 0);
+ MOZ_ASSERT(nativeToBytecodeTableOffset_ > 0);
+ MOZ_ASSERT(nativeToBytecodeNumRegions_ > 0);
+
+ // The pointer to the table must be 4-byte aligned
+ const uint8_t* tablePtr = nativeToBytecodeMap_ + nativeToBytecodeTableOffset_;
+ MOZ_ASSERT(uintptr_t(tablePtr) % sizeof(uint32_t) == 0);
+
+ // Verify that numRegions was encoded correctly.
+ const JitcodeIonTable* ionTable =
+ reinterpret_cast<const JitcodeIonTable*>(tablePtr);
+ MOZ_ASSERT(ionTable->numRegions() == nativeToBytecodeNumRegions_);
+
+ // Region offset for first region should be at the start of the payload
+ // region. Since the offsets are backward from the start of the table, the
+ // first entry backoffset should be equal to the forward table offset from the
+ // start of the allocated data.
+ MOZ_ASSERT(ionTable->regionOffset(0) == nativeToBytecodeTableOffset_);
+
+ // Verify each region.
+ for (uint32_t i = 0; i < ionTable->numRegions(); i++) {
+ // Back-offset must point into the payload region preceding the table, not
+ // before it.
+ MOZ_ASSERT(ionTable->regionOffset(i) <= nativeToBytecodeTableOffset_);
+
+ // Back-offset must point to a later area in the payload region than
+ // previous back-offset. This means that back-offsets decrease
+ // monotonically.
+ MOZ_ASSERT_IF(i > 0,
+ ionTable->regionOffset(i) < ionTable->regionOffset(i - 1));
+
+ JitcodeRegionEntry entry = ionTable->regionEntry(i);
+
+ // Ensure native code offset for region falls within jitcode.
+ MOZ_ASSERT(entry.nativeOffset() <= code->instructionsSize());
+
+ // Read out script/pc stack and verify.
+ JitcodeRegionEntry::ScriptPcIterator scriptPcIter =
+ entry.scriptPcIterator();
+ while (scriptPcIter.hasMore()) {
+ uint32_t scriptIdx = 0, pcOffset = 0;
+ scriptPcIter.readNext(&scriptIdx, &pcOffset);
+
+ // Ensure scriptIdx refers to a valid script in the list.
+ MOZ_ASSERT(scriptIdx < nativeToBytecodeScriptListLength_);
+ JSScript* script = nativeToBytecodeScriptList_[scriptIdx];
+
+ // Ensure pcOffset falls within the script.
+ MOZ_ASSERT(pcOffset < script->length());
+ }
+
+ // Obtain the original nativeOffset and pcOffset and script.
+ uint32_t curNativeOffset = entry.nativeOffset();
+ JSScript* script = nullptr;
+ uint32_t curPcOffset = 0;
+ {
+ uint32_t scriptIdx = 0;
+ scriptPcIter.reset();
+ scriptPcIter.readNext(&scriptIdx, &curPcOffset);
+ script = nativeToBytecodeScriptList_[scriptIdx];
+ }
+
+ // Read out nativeDeltas and pcDeltas and verify.
+ JitcodeRegionEntry::DeltaIterator deltaIter = entry.deltaIterator();
+ while (deltaIter.hasMore()) {
+ uint32_t nativeDelta = 0;
+ int32_t pcDelta = 0;
+ deltaIter.readNext(&nativeDelta, &pcDelta);
+
+ curNativeOffset += nativeDelta;
+ curPcOffset = uint32_t(int32_t(curPcOffset) + pcDelta);
+
+ // Ensure that nativeOffset still falls within jitcode after delta.
+ MOZ_ASSERT(curNativeOffset <= code->instructionsSize());
+
+ // Ensure that pcOffset still falls within bytecode after delta.
+ MOZ_ASSERT(curPcOffset < script->length());
+ }
+ }
+#endif // DEBUG
+}
+
+void CodeGeneratorShared::markSafepoint(LInstruction* ins) {
+ markSafepointAt(masm.currentOffset(), ins);
+}
+
+void CodeGeneratorShared::markSafepointAt(uint32_t offset, LInstruction* ins) {
+ MOZ_ASSERT_IF(
+ !safepointIndices_.empty() && !masm.oom(),
+ offset - safepointIndices_.back().displacement() >= sizeof(uint32_t));
+ masm.propagateOOM(safepointIndices_.append(
+ CodegenSafepointIndex(offset, ins->safepoint())));
+}
+
+void CodeGeneratorShared::ensureOsiSpace() {
+ // For a refresher, an invalidation point is of the form:
+ // 1: call <target>
+ // 2: ...
+ // 3: <osipoint>
+ //
+ // The four bytes *before* instruction 2 are overwritten with an offset.
+ // Callers must ensure that the instruction itself has enough bytes to
+ // support this.
+ //
+ // The bytes *at* instruction 3 are overwritten with an invalidation jump.
+ // jump. These bytes may be in a completely different IR sequence, but
+ // represent the join point of the call out of the function.
+ //
+ // At points where we want to ensure that invalidation won't corrupt an
+ // important instruction, we make sure to pad with nops.
+ if (masm.currentOffset() - lastOsiPointOffset_ <
+ Assembler::PatchWrite_NearCallSize()) {
+ int32_t paddingSize = Assembler::PatchWrite_NearCallSize();
+ paddingSize -= masm.currentOffset() - lastOsiPointOffset_;
+ for (int32_t i = 0; i < paddingSize; ++i) {
+ masm.nop();
+ }
+ }
+ MOZ_ASSERT_IF(!masm.oom(), masm.currentOffset() - lastOsiPointOffset_ >=
+ Assembler::PatchWrite_NearCallSize());
+ lastOsiPointOffset_ = masm.currentOffset();
+}
+
+uint32_t CodeGeneratorShared::markOsiPoint(LOsiPoint* ins) {
+ encode(ins->snapshot());
+ ensureOsiSpace();
+
+ uint32_t offset = masm.currentOffset();
+ SnapshotOffset so = ins->snapshot()->snapshotOffset();
+ masm.propagateOOM(osiIndices_.append(OsiIndex(offset, so)));
+
+ return offset;
+}
+
+class OutOfLineTruncateSlow : public OutOfLineCodeBase<CodeGeneratorShared> {
+ FloatRegister src_;
+ Register dest_;
+ bool widenFloatToDouble_;
+ wasm::BytecodeOffset bytecodeOffset_;
+ bool preserveTls_;
+
+ public:
+ OutOfLineTruncateSlow(
+ FloatRegister src, Register dest, bool widenFloatToDouble = false,
+ wasm::BytecodeOffset bytecodeOffset = wasm::BytecodeOffset(),
+ bool preserveTls = false)
+ : src_(src),
+ dest_(dest),
+ widenFloatToDouble_(widenFloatToDouble),
+ bytecodeOffset_(bytecodeOffset),
+ preserveTls_(preserveTls) {}
+
+ void accept(CodeGeneratorShared* codegen) override {
+ codegen->visitOutOfLineTruncateSlow(this);
+ }
+ FloatRegister src() const { return src_; }
+ Register dest() const { return dest_; }
+ bool widenFloatToDouble() const { return widenFloatToDouble_; }
+ bool preserveTls() const { return preserveTls_; }
+ wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+};
+
+OutOfLineCode* CodeGeneratorShared::oolTruncateDouble(
+ FloatRegister src, Register dest, MInstruction* mir,
+ wasm::BytecodeOffset bytecodeOffset, bool preserveTls) {
+ MOZ_ASSERT_IF(IsCompilingWasm(), bytecodeOffset.isValid());
+
+ OutOfLineTruncateSlow* ool = new (alloc()) OutOfLineTruncateSlow(
+ src, dest, /* float32 */ false, bytecodeOffset, preserveTls);
+ addOutOfLineCode(ool, mir);
+ return ool;
+}
+
+void CodeGeneratorShared::emitTruncateDouble(FloatRegister src, Register dest,
+ MInstruction* mir) {
+ MOZ_ASSERT(mir->isTruncateToInt32() || mir->isWasmBuiltinTruncateToInt32());
+ wasm::BytecodeOffset bytecodeOffset =
+ mir->isTruncateToInt32()
+ ? mir->toTruncateToInt32()->bytecodeOffset()
+ : mir->toWasmBuiltinTruncateToInt32()->bytecodeOffset();
+ OutOfLineCode* ool = oolTruncateDouble(src, dest, mir, bytecodeOffset);
+
+ masm.branchTruncateDoubleMaybeModUint32(src, dest, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGeneratorShared::emitTruncateFloat32(FloatRegister src, Register dest,
+ MInstruction* mir) {
+ MOZ_ASSERT(mir->isTruncateToInt32() || mir->isWasmBuiltinTruncateToInt32());
+ wasm::BytecodeOffset bytecodeOffset =
+ mir->isTruncateToInt32()
+ ? mir->toTruncateToInt32()->bytecodeOffset()
+ : mir->toWasmBuiltinTruncateToInt32()->bytecodeOffset();
+ OutOfLineTruncateSlow* ool = new (alloc())
+ OutOfLineTruncateSlow(src, dest, /* float32 */ true, bytecodeOffset);
+ addOutOfLineCode(ool, mir);
+
+ masm.branchTruncateFloat32MaybeModUint32(src, dest, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGeneratorShared::visitOutOfLineTruncateSlow(
+ OutOfLineTruncateSlow* ool) {
+ FloatRegister src = ool->src();
+ Register dest = ool->dest();
+
+ saveVolatile(dest);
+ masm.outOfLineTruncateSlow(src, dest, ool->widenFloatToDouble(),
+ gen->compilingWasm(), ool->bytecodeOffset());
+ restoreVolatile(dest);
+
+ masm.jump(ool->rejoin());
+}
+
+bool CodeGeneratorShared::omitOverRecursedCheck() const {
+ // If the current function makes no calls (which means it isn't recursive)
+ // and it uses only a small amount of stack space, it doesn't need a
+ // stack overflow check. Note that the actual number here is somewhat
+ // arbitrary, and codegen actually uses small bounded amounts of
+ // additional stack space in some cases too.
+ return frameSize() < MAX_UNCHECKED_LEAF_FRAME_SIZE &&
+ !gen->needsOverrecursedCheck();
+}
+
+void CodeGeneratorShared::emitPreBarrier(Register elements,
+ const LAllocation* index) {
+ if (index->isConstant()) {
+ Address address(elements, ToInt32(index) * sizeof(Value));
+ masm.guardedCallPreBarrier(address, MIRType::Value);
+ } else {
+ BaseObjectElementIndex address(elements, ToRegister(index));
+ masm.guardedCallPreBarrier(address, MIRType::Value);
+ }
+}
+
+void CodeGeneratorShared::emitPreBarrier(Address address) {
+ masm.guardedCallPreBarrier(address, MIRType::Value);
+}
+
+void CodeGeneratorShared::jumpToBlock(MBasicBlock* mir) {
+ // Skip past trivial blocks.
+ mir = skipTrivialBlocks(mir);
+
+ // No jump necessary if we can fall through to the next block.
+ if (isNextBlock(mir->lir())) {
+ return;
+ }
+
+ masm.jump(mir->lir()->label());
+}
+
+Label* CodeGeneratorShared::getJumpLabelForBranch(MBasicBlock* block) {
+ // Skip past trivial blocks.
+ return skipTrivialBlocks(block)->lir()->label();
+}
+
+// This function is not used for MIPS/MIPS64. MIPS has branchToBlock.
+#if !defined(JS_CODEGEN_MIPS32) && !defined(JS_CODEGEN_MIPS64)
+void CodeGeneratorShared::jumpToBlock(MBasicBlock* mir,
+ Assembler::Condition cond) {
+ // Skip past trivial blocks.
+ masm.j(cond, skipTrivialBlocks(mir)->lir()->label());
+}
+#endif
+
+ReciprocalMulConstants CodeGeneratorShared::computeDivisionConstants(
+ uint32_t d, int maxLog) {
+ MOZ_ASSERT(maxLog >= 2 && maxLog <= 32);
+ // In what follows, 0 < d < 2^maxLog and d is not a power of 2.
+ MOZ_ASSERT(d < (uint64_t(1) << maxLog) && (d & (d - 1)) != 0);
+
+ // Speeding up division by non power-of-2 constants is possible by
+ // calculating, during compilation, a value M such that high-order
+ // bits of M*n correspond to the result of the division of n by d.
+ // No value of M can serve this purpose for arbitrarily big values
+ // of n but, for optimizing integer division, we're just concerned
+ // with values of n whose absolute value is bounded (by fitting in
+ // an integer type, say). With this in mind, we'll find a constant
+ // M as above that works for -2^maxLog <= n < 2^maxLog; maxLog can
+ // then be 31 for signed division or 32 for unsigned division.
+ //
+ // The original presentation of this technique appears in Hacker's
+ // Delight, a book by Henry S. Warren, Jr.. A proof of correctness
+ // for our version follows; we'll denote maxLog by L in the proof,
+ // for conciseness.
+ //
+ // Formally, for |d| < 2^L, we'll compute two magic values M and s
+ // in the ranges 0 <= M < 2^(L+1) and 0 <= s <= L such that
+ // (M * n) >> (32 + s) = floor(n/d) if 0 <= n < 2^L
+ // (M * n) >> (32 + s) = ceil(n/d) - 1 if -2^L <= n < 0.
+ //
+ // Define p = 32 + s, M = ceil(2^p/d), and assume that s satisfies
+ // M - 2^p/d <= 2^(p-L)/d. (1)
+ // (Observe that p = CeilLog32(d) + L satisfies this, as the right
+ // side of (1) is at least one in this case). Then,
+ //
+ // a) If p <= CeilLog32(d) + L, then M < 2^(L+1) - 1.
+ // Proof: Indeed, M is monotone in p and, for p equal to the above
+ // value, the bounds 2^L > d >= 2^(p-L-1) + 1 readily imply that
+ // 2^p / d < 2^p/(d - 1) * (d - 1)/d
+ // <= 2^(L+1) * (1 - 1/d) < 2^(L+1) - 2.
+ // The claim follows by applying the ceiling function.
+ //
+ // b) For any 0 <= n < 2^L, floor(Mn/2^p) = floor(n/d).
+ // Proof: Put x = floor(Mn/2^p); it's the unique integer for which
+ // Mn/2^p - 1 < x <= Mn/2^p. (2)
+ // Using M >= 2^p/d on the LHS and (1) on the RHS, we get
+ // n/d - 1 < x <= n/d + n/(2^L d) < n/d + 1/d.
+ // Since x is an integer, it's not in the interval (n/d, (n+1)/d),
+ // and so n/d - 1 < x <= n/d, which implies x = floor(n/d).
+ //
+ // c) For any -2^L <= n < 0, floor(Mn/2^p) + 1 = ceil(n/d).
+ // Proof: The proof is similar. Equation (2) holds as above. Using
+ // M > 2^p/d (d isn't a power of 2) on the RHS and (1) on the LHS,
+ // n/d + n/(2^L d) - 1 < x < n/d.
+ // Using n >= -2^L and summing 1,
+ // n/d - 1/d < x + 1 < n/d + 1.
+ // Since x + 1 is an integer, this implies n/d <= x + 1 < n/d + 1.
+ // In other words, x + 1 = ceil(n/d).
+ //
+ // Condition (1) isn't necessary for the existence of M and s with
+ // the properties above. Hacker's Delight provides a slightly less
+ // restrictive condition when d >= 196611, at the cost of a 3-page
+ // proof of correctness, for the case L = 31.
+ //
+ // Note that, since d*M - 2^p = d - (2^p)%d, (1) can be written as
+ // 2^(p-L) >= d - (2^p)%d.
+ // In order to avoid overflow in the (2^p) % d calculation, we can
+ // compute it as (2^p-1) % d + 1, where 2^p-1 can then be computed
+ // without overflow as UINT64_MAX >> (64-p).
+
+ // We now compute the least p >= 32 with the property above...
+ int32_t p = 32;
+ while ((uint64_t(1) << (p - maxLog)) + (UINT64_MAX >> (64 - p)) % d + 1 < d) {
+ p++;
+ }
+
+ // ...and the corresponding M. For either the signed (L=31) or the
+ // unsigned (L=32) case, this value can be too large (cf. item a).
+ // Codegen can still multiply by M by multiplying by (M - 2^L) and
+ // adjusting the value afterwards, if this is the case.
+ ReciprocalMulConstants rmc;
+ rmc.multiplier = (UINT64_MAX >> (64 - p)) / d + 1;
+ rmc.shiftAmount = p - 32;
+
+ return rmc;
+}
+
+#ifdef JS_TRACE_LOGGING
+
+void CodeGeneratorShared::emitTracelogScript(bool isStart) {
+ if (!TraceLogTextIdEnabled(TraceLogger_Scripts)) {
+ return;
+ }
+
+ Label done;
+
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ Register logger = regs.takeAnyGeneral();
+ Register script = regs.takeAnyGeneral();
+
+ masm.Push(logger);
+
+ masm.loadTraceLogger(logger);
+ masm.branchTestPtr(Assembler::Zero, logger, logger, &done);
+
+ Address enabledAddress(logger, TraceLoggerThread::offsetOfEnabled());
+ masm.branch32(Assembler::Equal, enabledAddress, Imm32(0), &done);
+
+ masm.Push(script);
+
+ CodeOffset patchScript = masm.movWithPatch(ImmWord(0), script);
+ masm.propagateOOM(patchableTLScripts_.append(patchScript));
+
+ if (isStart) {
+ masm.tracelogStartId(logger, script);
+ } else {
+ masm.tracelogStopId(logger, script);
+ }
+
+ masm.Pop(script);
+
+ masm.bind(&done);
+
+ masm.Pop(logger);
+}
+
+void CodeGeneratorShared::emitTracelogTree(bool isStart, uint32_t textId) {
+ if (!TraceLogTextIdEnabled(textId)) {
+ return;
+ }
+
+ Label done;
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ Register logger = regs.takeAnyGeneral();
+
+ masm.Push(logger);
+
+ masm.loadTraceLogger(logger);
+ masm.branchTestPtr(Assembler::Zero, logger, logger, &done);
+
+ Address enabledAddress(logger, TraceLoggerThread::offsetOfEnabled());
+ masm.branch32(Assembler::Equal, enabledAddress, Imm32(0), &done);
+
+ if (isStart) {
+ masm.tracelogStartId(logger, textId);
+ } else {
+ masm.tracelogStopId(logger, textId);
+ }
+
+ masm.bind(&done);
+
+ masm.Pop(logger);
+}
+
+void CodeGeneratorShared::emitTracelogTree(bool isStart, const char* text,
+ TraceLoggerTextId enabledTextId) {
+ if (!TraceLogTextIdEnabled(enabledTextId)) {
+ return;
+ }
+
+ Label done;
+
+ AllocatableRegisterSet regs(RegisterSet::Volatile());
+ Register loggerReg = regs.takeAnyGeneral();
+ Register eventReg = regs.takeAnyGeneral();
+
+ masm.Push(loggerReg);
+
+ masm.loadTraceLogger(loggerReg);
+ masm.branchTestPtr(Assembler::Zero, loggerReg, loggerReg, &done);
+
+ Address enabledAddress(loggerReg, TraceLoggerThread::offsetOfEnabled());
+ masm.branch32(Assembler::Equal, enabledAddress, Imm32(0), &done);
+
+ masm.Push(eventReg);
+
+ PatchableTLEvent patchEvent(masm.movWithPatch(ImmWord(0), eventReg), text);
+ masm.propagateOOM(patchableTLEvents_.append(std::move(patchEvent)));
+
+ if (isStart) {
+ masm.tracelogStartId(loggerReg, eventReg);
+ } else {
+ masm.tracelogStopId(loggerReg, eventReg);
+ }
+
+ masm.Pop(eventReg);
+
+ masm.bind(&done);
+
+ masm.Pop(loggerReg);
+}
+#endif
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/shared/CodeGenerator-shared.h b/js/src/jit/shared/CodeGenerator-shared.h
new file mode 100644
index 0000000000..1f02f49c30
--- /dev/null
+++ b/js/src/jit/shared/CodeGenerator-shared.h
@@ -0,0 +1,579 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_CodeGenerator_shared_h
+#define jit_shared_CodeGenerator_shared_h
+
+#include "mozilla/Alignment.h"
+
+#include <utility>
+
+#include "jit/InlineScriptTree.h"
+#include "jit/JitcodeMap.h"
+#include "jit/LIR.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MIRGenerator.h"
+#include "jit/MIRGraph.h"
+#include "jit/SafepointIndex.h"
+#include "jit/Safepoints.h"
+#include "jit/Snapshots.h"
+#include "vm/TraceLoggingTypes.h"
+
+namespace js {
+namespace jit {
+
+class OutOfLineCode;
+class CodeGenerator;
+class MacroAssembler;
+class IonIC;
+
+class OutOfLineTruncateSlow;
+
+struct ReciprocalMulConstants {
+ int64_t multiplier;
+ int32_t shiftAmount;
+};
+
+class CodeGeneratorShared : public LElementVisitor {
+ js::Vector<OutOfLineCode*, 0, SystemAllocPolicy> outOfLineCode_;
+
+ MacroAssembler& ensureMasm(MacroAssembler* masm);
+ mozilla::Maybe<IonHeapMacroAssembler> maybeMasm_;
+
+ bool useWasmStackArgumentAbi_;
+
+ public:
+ MacroAssembler& masm;
+
+ protected:
+ MIRGenerator* gen;
+ LIRGraph& graph;
+ LBlock* current;
+ SnapshotWriter snapshots_;
+ RecoverWriter recovers_;
+ mozilla::Maybe<TrampolinePtr> deoptTable_;
+#ifdef DEBUG
+ uint32_t pushedArgs_;
+#endif
+ uint32_t lastOsiPointOffset_;
+ SafepointWriter safepoints_;
+ Label invalidate_;
+ CodeOffset invalidateEpilogueData_;
+
+ // Label for the common return path.
+ NonAssertingLabel returnLabel_;
+
+ js::Vector<CodegenSafepointIndex, 0, SystemAllocPolicy> safepointIndices_;
+ js::Vector<OsiIndex, 0, SystemAllocPolicy> osiIndices_;
+
+ // Mapping from bailout table ID to an offset in the snapshot buffer.
+ js::Vector<SnapshotOffset, 0, SystemAllocPolicy> bailouts_;
+
+ // Allocated data space needed at runtime.
+ js::Vector<uint8_t, 0, SystemAllocPolicy> runtimeData_;
+
+ // Vector mapping each IC index to its offset in runtimeData_.
+ js::Vector<uint32_t, 0, SystemAllocPolicy> icList_;
+
+ // IC data we need at compile-time. Discarded after creating the IonScript.
+ struct CompileTimeICInfo {
+ CodeOffset icOffsetForJump;
+ CodeOffset icOffsetForPush;
+ };
+ js::Vector<CompileTimeICInfo, 0, SystemAllocPolicy> icInfo_;
+
+#ifdef JS_TRACE_LOGGING
+ struct PatchableTLEvent {
+ CodeOffset offset;
+ const char* event;
+ PatchableTLEvent(CodeOffset offset, const char* event)
+ : offset(offset), event(event) {}
+ };
+ js::Vector<PatchableTLEvent, 0, SystemAllocPolicy> patchableTLEvents_;
+ js::Vector<CodeOffset, 0, SystemAllocPolicy> patchableTLScripts_;
+#endif
+
+ protected:
+ js::Vector<NativeToBytecode, 0, SystemAllocPolicy> nativeToBytecodeList_;
+ uint8_t* nativeToBytecodeMap_;
+ uint32_t nativeToBytecodeMapSize_;
+ uint32_t nativeToBytecodeTableOffset_;
+ uint32_t nativeToBytecodeNumRegions_;
+
+ JSScript** nativeToBytecodeScriptList_;
+ uint32_t nativeToBytecodeScriptListLength_;
+
+ bool isProfilerInstrumentationEnabled() {
+ return gen->isProfilerInstrumentationEnabled();
+ }
+
+ bool stringsCanBeInNursery() const { return gen->stringsCanBeInNursery(); }
+
+ bool bigIntsCanBeInNursery() const { return gen->bigIntsCanBeInNursery(); }
+
+ protected:
+ // The offset of the first instruction of the OSR entry block from the
+ // beginning of the code buffer.
+ mozilla::Maybe<size_t> osrEntryOffset_ = {};
+
+ TempAllocator& alloc() const { return graph.mir().alloc(); }
+
+ void setOsrEntryOffset(size_t offset) { osrEntryOffset_.emplace(offset); }
+
+ size_t getOsrEntryOffset() const {
+ MOZ_RELEASE_ASSERT(osrEntryOffset_.isSome());
+ return *osrEntryOffset_;
+ }
+
+ typedef js::Vector<CodegenSafepointIndex, 8, SystemAllocPolicy>
+ SafepointIndices;
+
+ protected:
+#ifdef CHECK_OSIPOINT_REGISTERS
+ // See JitOptions.checkOsiPointRegisters. We set this here to avoid
+ // races when enableOsiPointRegisterChecks is called while we're generating
+ // code off-thread.
+ bool checkOsiPointRegisters;
+#endif
+
+ // The initial size of the frame in bytes. These are bytes beyond the
+ // constant header present for every Ion frame, used for pre-determined
+ // spills.
+ int32_t frameDepth_;
+
+ // Frame class this frame's size falls into (see IonFrame.h).
+ FrameSizeClass frameClass_;
+
+ // For arguments to the current function.
+ inline int32_t ArgToStackOffset(int32_t slot) const;
+
+ inline int32_t SlotToStackOffset(int32_t slot) const;
+ inline int32_t StackOffsetToSlot(int32_t offset) const;
+
+ // For argument construction for calls. Argslots are Value-sized.
+ inline int32_t StackOffsetOfPassedArg(int32_t slot) const;
+
+ inline int32_t ToStackOffset(LAllocation a) const;
+ inline int32_t ToStackOffset(const LAllocation* a) const;
+
+ inline Address ToAddress(const LAllocation& a) const;
+ inline Address ToAddress(const LAllocation* a) const;
+
+ // Returns the offset from FP to address incoming stack arguments
+ // when we use wasm stack argument abi (useWasmStackArgumentAbi()).
+ inline int32_t ToFramePointerOffset(LAllocation a) const;
+ inline int32_t ToFramePointerOffset(const LAllocation* a) const;
+
+ uint32_t frameSize() const {
+ return frameClass_ == FrameSizeClass::None() ? frameDepth_
+ : frameClass_.frameSize();
+ }
+
+ protected:
+ bool addNativeToBytecodeEntry(const BytecodeSite* site);
+ void dumpNativeToBytecodeEntries();
+ void dumpNativeToBytecodeEntry(uint32_t idx);
+
+ void setUseWasmStackArgumentAbi() { useWasmStackArgumentAbi_ = true; }
+
+ bool useWasmStackArgumentAbi() const { return useWasmStackArgumentAbi_; }
+
+ public:
+ MIRGenerator& mirGen() const { return *gen; }
+
+ // When appending to runtimeData_, the vector might realloc, leaving pointers
+ // int the origianl vector stale and unusable. DataPtr acts like a pointer,
+ // but allows safety in the face of potentially realloc'ing vector appends.
+ friend class DataPtr;
+ template <typename T>
+ class DataPtr {
+ CodeGeneratorShared* cg_;
+ size_t index_;
+
+ T* lookup() { return reinterpret_cast<T*>(&cg_->runtimeData_[index_]); }
+
+ public:
+ DataPtr(CodeGeneratorShared* cg, size_t index) : cg_(cg), index_(index) {}
+
+ T* operator->() { return lookup(); }
+ T* operator*() { return lookup(); }
+ };
+
+ protected:
+ [[nodiscard]] bool allocateData(size_t size, size_t* offset) {
+ MOZ_ASSERT(size % sizeof(void*) == 0);
+ *offset = runtimeData_.length();
+ masm.propagateOOM(runtimeData_.appendN(0, size));
+ return !masm.oom();
+ }
+
+ template <typename T>
+ inline size_t allocateIC(const T& cache) {
+ static_assert(std::is_base_of_v<IonIC, T>, "T must inherit from IonIC");
+ size_t index;
+ masm.propagateOOM(
+ allocateData(sizeof(mozilla::AlignedStorage2<T>), &index));
+ masm.propagateOOM(icList_.append(index));
+ masm.propagateOOM(icInfo_.append(CompileTimeICInfo()));
+ if (masm.oom()) {
+ return SIZE_MAX;
+ }
+ // Use the copy constructor on the allocated space.
+ MOZ_ASSERT(index == icList_.back());
+ new (&runtimeData_[index]) T(cache);
+ return index;
+ }
+
+ protected:
+ // Encodes an LSnapshot into the compressed snapshot buffer.
+ void encode(LRecoverInfo* recover);
+ void encode(LSnapshot* snapshot);
+ void encodeAllocation(LSnapshot* snapshot, MDefinition* def,
+ uint32_t* startIndex);
+
+ // Attempts to assign a BailoutId to a snapshot, if one isn't already set.
+ // If the bailout table is full, this returns false, which is not a fatal
+ // error (the code generator may use a slower bailout mechanism).
+ bool assignBailoutId(LSnapshot* snapshot);
+
+ // Encode all encountered safepoints in CG-order, and resolve |indices| for
+ // safepoint offsets.
+ bool encodeSafepoints();
+
+ // Fixup offsets of native-to-bytecode map.
+ bool createNativeToBytecodeScriptList(JSContext* cx);
+ bool generateCompactNativeToBytecodeMap(JSContext* cx, JitCode* code);
+ void verifyCompactNativeToBytecodeMap(JitCode* code);
+
+ // Mark the safepoint on |ins| as corresponding to the current assembler
+ // location. The location should be just after a call.
+ void markSafepoint(LInstruction* ins);
+ void markSafepointAt(uint32_t offset, LInstruction* ins);
+
+ // Mark the OSI point |ins| as corresponding to the current
+ // assembler location inside the |osiIndices_|. Return the assembler
+ // location for the OSI point return location.
+ uint32_t markOsiPoint(LOsiPoint* ins);
+
+ // Ensure that there is enough room between the last OSI point and the
+ // current instruction, such that:
+ // (1) Invalidation will not overwrite the current instruction, and
+ // (2) Overwriting the current instruction will not overwrite
+ // an invalidation marker.
+ void ensureOsiSpace();
+
+ OutOfLineCode* oolTruncateDouble(
+ FloatRegister src, Register dest, MInstruction* mir,
+ wasm::BytecodeOffset callOffset = wasm::BytecodeOffset(),
+ bool preserveTls = false);
+ void emitTruncateDouble(FloatRegister src, Register dest, MInstruction* mir);
+ void emitTruncateFloat32(FloatRegister src, Register dest, MInstruction* mir);
+
+ void emitPreBarrier(Register elements, const LAllocation* index);
+ void emitPreBarrier(Address address);
+
+ // We don't emit code for trivial blocks, so if we want to branch to the
+ // given block, and it's trivial, return the ultimate block we should
+ // actually branch directly to.
+ MBasicBlock* skipTrivialBlocks(MBasicBlock* block) {
+ while (block->lir()->isTrivial()) {
+ LGoto* ins = block->lir()->rbegin()->toGoto();
+ MOZ_ASSERT(ins->numSuccessors() == 1);
+ block = ins->getSuccessor(0);
+ }
+ return block;
+ }
+
+ // Test whether the given block can be reached via fallthrough from the
+ // current block.
+ inline bool isNextBlock(LBlock* block) {
+ uint32_t target = skipTrivialBlocks(block->mir())->id();
+ uint32_t i = current->mir()->id() + 1;
+ if (target < i) {
+ return false;
+ }
+ // Trivial blocks can be crossed via fallthrough.
+ for (; i != target; ++i) {
+ if (!graph.getBlock(i)->isTrivial()) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ protected:
+ // Save and restore all volatile registers to/from the stack, excluding the
+ // specified register(s), before a function call made using callWithABI and
+ // after storing the function call's return value to an output register.
+ // (The only registers that don't need to be saved/restored are 1) the
+ // temporary register used to store the return value of the function call,
+ // if there is one [otherwise that stored value would be overwritten]; and
+ // 2) temporary registers whose values aren't needed in the rest of the LIR
+ // instruction [this is purely an optimization]. All other volatiles must
+ // be saved and restored in case future LIR instructions need those values.)
+ void saveVolatile(Register output) {
+ LiveRegisterSet regs(RegisterSet::Volatile());
+ regs.takeUnchecked(output);
+ masm.PushRegsInMask(regs);
+ }
+ void restoreVolatile(Register output) {
+ LiveRegisterSet regs(RegisterSet::Volatile());
+ regs.takeUnchecked(output);
+ masm.PopRegsInMask(regs);
+ }
+ void saveVolatile(FloatRegister output) {
+ LiveRegisterSet regs(RegisterSet::Volatile());
+ regs.takeUnchecked(output);
+ masm.PushRegsInMask(regs);
+ }
+ void restoreVolatile(FloatRegister output) {
+ LiveRegisterSet regs(RegisterSet::Volatile());
+ regs.takeUnchecked(output);
+ masm.PopRegsInMask(regs);
+ }
+ void saveVolatile(LiveRegisterSet temps) {
+ masm.PushRegsInMask(LiveRegisterSet(RegisterSet::VolatileNot(temps.set())));
+ }
+ void restoreVolatile(LiveRegisterSet temps) {
+ masm.PopRegsInMask(LiveRegisterSet(RegisterSet::VolatileNot(temps.set())));
+ }
+ void saveVolatile() {
+ masm.PushRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
+ }
+ void restoreVolatile() {
+ masm.PopRegsInMask(LiveRegisterSet(RegisterSet::Volatile()));
+ }
+
+ // These functions have to be called before and after any callVM and before
+ // any modifications of the stack. Modification of the stack made after
+ // these calls should update the framePushed variable, needed by the exit
+ // frame produced by callVM.
+ inline void saveLive(LInstruction* ins);
+ inline void restoreLive(LInstruction* ins);
+ inline void restoreLiveIgnore(LInstruction* ins, LiveRegisterSet reg);
+
+ // Get/save/restore all registers that are both live and volatile.
+ inline LiveRegisterSet liveVolatileRegs(LInstruction* ins);
+ inline void saveLiveVolatile(LInstruction* ins);
+ inline void restoreLiveVolatile(LInstruction* ins);
+
+ public:
+ template <typename T>
+ void pushArg(const T& t) {
+ masm.Push(t);
+#ifdef DEBUG
+ pushedArgs_++;
+#endif
+ }
+
+ void pushArg(jsid id, Register temp) {
+ masm.Push(id, temp);
+#ifdef DEBUG
+ pushedArgs_++;
+#endif
+ }
+
+ template <typename T>
+ CodeOffset pushArgWithPatch(const T& t) {
+#ifdef DEBUG
+ pushedArgs_++;
+#endif
+ return masm.PushWithPatch(t);
+ }
+
+ void storePointerResultTo(Register reg) { masm.storeCallPointerResult(reg); }
+
+ void storeFloatResultTo(FloatRegister reg) { masm.storeCallFloatResult(reg); }
+
+ template <typename T>
+ void storeResultValueTo(const T& t) {
+ masm.storeCallResultValue(t);
+ }
+
+ protected:
+ void addIC(LInstruction* lir, size_t cacheIndex);
+
+ ReciprocalMulConstants computeDivisionConstants(uint32_t d, int maxLog);
+
+ protected:
+ bool generatePrologue();
+ bool generateEpilogue();
+
+ void addOutOfLineCode(OutOfLineCode* code, const MInstruction* mir);
+ void addOutOfLineCode(OutOfLineCode* code, const BytecodeSite* site);
+ bool generateOutOfLineCode();
+
+ Label* getJumpLabelForBranch(MBasicBlock* block);
+
+ // Generate a jump to the start of the specified block. Use this in place of
+ // jumping directly to mir->lir()->label(), or use getJumpLabelForBranch()
+ // if a label to use directly is needed.
+ void jumpToBlock(MBasicBlock* mir);
+
+// This function is not used for MIPS. MIPS has branchToBlock.
+#if !defined(JS_CODEGEN_MIPS32) && !defined(JS_CODEGEN_MIPS64)
+ void jumpToBlock(MBasicBlock* mir, Assembler::Condition cond);
+#endif
+
+ private:
+ void generateInvalidateEpilogue();
+
+ public:
+ CodeGeneratorShared(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
+
+ public:
+ void visitOutOfLineTruncateSlow(OutOfLineTruncateSlow* ool);
+
+ bool omitOverRecursedCheck() const;
+
+#ifdef JS_TRACE_LOGGING
+ protected:
+ void emitTracelogScript(bool isStart);
+ void emitTracelogTree(bool isStart, uint32_t textId);
+ void emitTracelogTree(bool isStart, const char* text,
+ TraceLoggerTextId enabledTextId);
+#endif
+
+ public:
+#ifdef JS_TRACE_LOGGING
+ void emitTracelogScriptStart() { emitTracelogScript(/* isStart =*/true); }
+ void emitTracelogScriptStop() { emitTracelogScript(/* isStart =*/false); }
+ void emitTracelogStartEvent(uint32_t textId) {
+ emitTracelogTree(/* isStart =*/true, textId);
+ }
+ void emitTracelogStopEvent(uint32_t textId) {
+ emitTracelogTree(/* isStart =*/false, textId);
+ }
+ // Log an arbitrary text. The TraceloggerTextId is used to toggle the
+ // logging on and off.
+ // Note: the text is not copied and need to be kept alive until linking.
+ void emitTracelogStartEvent(const char* text,
+ TraceLoggerTextId enabledTextId) {
+ emitTracelogTree(/* isStart =*/true, text, enabledTextId);
+ }
+ void emitTracelogStopEvent(const char* text,
+ TraceLoggerTextId enabledTextId) {
+ emitTracelogTree(/* isStart =*/false, text, enabledTextId);
+ }
+ void emitTracelogIonStart() {
+ emitTracelogScriptStart();
+ emitTracelogStartEvent(TraceLogger_IonMonkey);
+ }
+ void emitTracelogIonStop() {
+ emitTracelogStopEvent(TraceLogger_IonMonkey);
+ emitTracelogScriptStop();
+ }
+#else
+ void emitTracelogScriptStart() {}
+ void emitTracelogScriptStop() {}
+ void emitTracelogStartEvent(uint32_t textId) {}
+ void emitTracelogStopEvent(uint32_t textId) {}
+ void emitTracelogStartEvent(const char* text,
+ TraceLoggerTextId enabledTextId) {}
+ void emitTracelogStopEvent(const char* text,
+ TraceLoggerTextId enabledTextId) {}
+ void emitTracelogIonStart() {}
+ void emitTracelogIonStop() {}
+#endif
+
+ bool isGlobalObject(JSObject* object);
+};
+
+// An out-of-line path is generated at the end of the function.
+class OutOfLineCode : public TempObject {
+ Label entry_;
+ Label rejoin_;
+ uint32_t framePushed_;
+ const BytecodeSite* site_;
+
+ public:
+ OutOfLineCode() : framePushed_(0), site_() {}
+
+ virtual void generate(CodeGeneratorShared* codegen) = 0;
+
+ Label* entry() { return &entry_; }
+ virtual void bind(MacroAssembler* masm) { masm->bind(entry()); }
+ Label* rejoin() { return &rejoin_; }
+ void setFramePushed(uint32_t framePushed) { framePushed_ = framePushed; }
+ uint32_t framePushed() const { return framePushed_; }
+ void setBytecodeSite(const BytecodeSite* site) { site_ = site; }
+ const BytecodeSite* bytecodeSite() const { return site_; }
+ jsbytecode* pc() const { return site_->pc(); }
+ JSScript* script() const { return site_->script(); }
+};
+
+// For OOL paths that want a specific-typed code generator.
+template <typename T>
+class OutOfLineCodeBase : public OutOfLineCode {
+ public:
+ virtual void generate(CodeGeneratorShared* codegen) override {
+ accept(static_cast<T*>(codegen));
+ }
+
+ public:
+ virtual void accept(T* codegen) = 0;
+};
+
+template <class CodeGen>
+class OutOfLineWasmTruncateCheckBase : public OutOfLineCodeBase<CodeGen> {
+ MIRType fromType_;
+ MIRType toType_;
+ FloatRegister input_;
+ Register output_;
+ Register64 output64_;
+ TruncFlags flags_;
+ wasm::BytecodeOffset bytecodeOffset_;
+
+ public:
+ OutOfLineWasmTruncateCheckBase(MWasmTruncateToInt32* mir, FloatRegister input,
+ Register output)
+ : fromType_(mir->input()->type()),
+ toType_(MIRType::Int32),
+ input_(input),
+ output_(output),
+ output64_(Register64::Invalid()),
+ flags_(mir->flags()),
+ bytecodeOffset_(mir->bytecodeOffset()) {}
+
+ OutOfLineWasmTruncateCheckBase(MWasmBuiltinTruncateToInt64* mir,
+ FloatRegister input, Register64 output)
+ : fromType_(mir->input()->type()),
+ toType_(MIRType::Int64),
+ input_(input),
+ output_(Register::Invalid()),
+ output64_(output),
+ flags_(mir->flags()),
+ bytecodeOffset_(mir->bytecodeOffset()) {}
+
+ OutOfLineWasmTruncateCheckBase(MWasmTruncateToInt64* mir, FloatRegister input,
+ Register64 output)
+ : fromType_(mir->input()->type()),
+ toType_(MIRType::Int64),
+ input_(input),
+ output_(Register::Invalid()),
+ output64_(output),
+ flags_(mir->flags()),
+ bytecodeOffset_(mir->bytecodeOffset()) {}
+
+ void accept(CodeGen* codegen) override {
+ codegen->visitOutOfLineWasmTruncateCheck(this);
+ }
+
+ FloatRegister input() const { return input_; }
+ Register output() const { return output_; }
+ Register64 output64() const { return output64_; }
+ MIRType toType() const { return toType_; }
+ MIRType fromType() const { return fromType_; }
+ bool isUnsigned() const { return flags_ & TRUNC_UNSIGNED; }
+ bool isSaturating() const { return flags_ & TRUNC_SATURATING; }
+ TruncFlags flags() const { return flags_; }
+ wasm::BytecodeOffset bytecodeOffset() const { return bytecodeOffset_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_CodeGenerator_shared_h */
diff --git a/js/src/jit/shared/Disassembler-shared.cpp b/js/src/jit/shared/Disassembler-shared.cpp
new file mode 100644
index 0000000000..11d062dd53
--- /dev/null
+++ b/js/src/jit/shared/Disassembler-shared.cpp
@@ -0,0 +1,248 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/shared/Disassembler-shared.h"
+
+#include "jit/JitSpewer.h"
+#include "jit/Label.h"
+#include "vm/Printer.h"
+
+using namespace js::jit;
+
+using js::Sprinter;
+
+#ifdef JS_DISASM_SUPPORTED
+// Concurrent assemblers are disambiguated by prefixing every disassembly with a
+// tag that is quasi-unique, and certainly unique enough in realistic cases
+// where we are debugging and looking at disassembler output. The tag is a
+// letter or digit between brackets prefixing the disassembly, eg, [X]. This
+// wraps around every 62 assemblers.
+//
+// When running with --no-threads we can still have concurrent assemblers in the
+// form of nested assemblers, as when an IC stub is created by one assembler
+// while a JS compilation is going on and producing output in another assembler.
+//
+// We generate the tag for an assembler by incrementing a global mod-2^32
+// counter every time a new disassembler is created.
+
+mozilla::Atomic<uint32_t> DisassemblerSpew::counter_(0);
+#endif
+
+DisassemblerSpew::DisassemblerSpew()
+ : printer_(nullptr)
+#ifdef JS_DISASM_SUPPORTED
+ ,
+ labelIndent_(""),
+ targetIndent_(""),
+ spewNext_(1000),
+ nodes_(nullptr),
+ tag_(0)
+#endif
+{
+#ifdef JS_DISASM_SUPPORTED
+ tag_ = counter_++;
+#endif
+}
+
+DisassemblerSpew::~DisassemblerSpew() {
+#ifdef JS_DISASM_SUPPORTED
+ Node* p = nodes_;
+ while (p) {
+ Node* victim = p;
+ p = p->next;
+ js_free(victim);
+ }
+#endif
+}
+
+void DisassemblerSpew::setPrinter(Sprinter* printer) { printer_ = printer; }
+
+bool DisassemblerSpew::isDisabled() {
+ return !(JitSpewEnabled(JitSpew_Codegen) || printer_);
+}
+
+void DisassemblerSpew::spew(const char* fmt, ...) {
+#ifdef JS_DISASM_SUPPORTED
+ static const char prefix_chars[] =
+ "0123456789"
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+ static const char prefix_fmt[] = "[%c] ";
+
+ char fmt2[1024];
+ if (sizeof(fmt2) >= strlen(fmt) + sizeof(prefix_fmt)) {
+ snprintf(fmt2, sizeof(prefix_fmt), prefix_fmt,
+ prefix_chars[tag_ % (sizeof(prefix_chars) - 1)]);
+ strcat(fmt2, fmt);
+ fmt = fmt2;
+ }
+#endif
+
+ va_list args;
+ va_start(args, fmt);
+ spewVA(fmt, args);
+ va_end(args);
+}
+
+void DisassemblerSpew::spewVA(const char* fmt, va_list va) {
+ if (printer_) {
+ printer_->vprintf(fmt, va);
+ printer_->put("\n");
+ }
+ js::jit::JitSpewVA(js::jit::JitSpew_Codegen, fmt, va);
+}
+
+#ifdef JS_DISASM_SUPPORTED
+
+void DisassemblerSpew::setLabelIndent(const char* s) { labelIndent_ = s; }
+
+void DisassemblerSpew::setTargetIndent(const char* s) { targetIndent_ = s; }
+
+DisassemblerSpew::LabelDoc DisassemblerSpew::refLabel(const Label* l) {
+ return l ? LabelDoc(internalResolve(l), l->bound()) : LabelDoc();
+}
+
+void DisassemblerSpew::spewRef(const LabelDoc& target) {
+ if (isDisabled()) {
+ return;
+ }
+ if (!target.valid) {
+ return;
+ }
+ spew("%s-> %d%s", targetIndent_, target.doc, !target.bound ? "f" : "");
+}
+
+void DisassemblerSpew::spewBind(const Label* label) {
+ if (isDisabled()) {
+ return;
+ }
+ uint32_t v = internalResolve(label);
+ Node* probe = lookup(label);
+ if (probe) {
+ probe->bound = true;
+ }
+ spew("%s%d:", labelIndent_, v);
+}
+
+void DisassemblerSpew::spewRetarget(const Label* label, const Label* target) {
+ if (isDisabled()) {
+ return;
+ }
+ LabelDoc labelDoc = LabelDoc(internalResolve(label), label->bound());
+ LabelDoc targetDoc = LabelDoc(internalResolve(target), target->bound());
+ Node* probe = lookup(label);
+ if (probe) {
+ probe->bound = true;
+ }
+ spew("%s%d: .retarget -> %d%s", labelIndent_, labelDoc.doc, targetDoc.doc,
+ !targetDoc.bound ? "f" : "");
+}
+
+void DisassemblerSpew::formatLiteral(const LiteralDoc& doc, char* buffer,
+ size_t bufsize) {
+ switch (doc.type) {
+ case LiteralDoc::Type::Patchable:
+ snprintf(buffer, bufsize, "patchable");
+ break;
+ case LiteralDoc::Type::I32:
+ snprintf(buffer, bufsize, "%d", doc.value.i32);
+ break;
+ case LiteralDoc::Type::U32:
+ snprintf(buffer, bufsize, "%u", doc.value.u32);
+ break;
+ case LiteralDoc::Type::I64:
+ snprintf(buffer, bufsize, "%" PRIi64, doc.value.i64);
+ break;
+ case LiteralDoc::Type::U64:
+ snprintf(buffer, bufsize, "%" PRIu64, doc.value.u64);
+ break;
+ case LiteralDoc::Type::F32:
+ snprintf(buffer, bufsize, "%g", doc.value.f32);
+ break;
+ case LiteralDoc::Type::F64:
+ snprintf(buffer, bufsize, "%g", doc.value.f64);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+void DisassemblerSpew::spewOrphans() {
+ for (Node* p = nodes_; p; p = p->next) {
+ if (!p->bound) {
+ spew("%s%d: ; .orphan", labelIndent_, p->value);
+ }
+ }
+}
+
+uint32_t DisassemblerSpew::internalResolve(const Label* l) {
+ // Note, internalResolve will sometimes return 0 when it is triggered by the
+ // profiler and not by a full disassembly, since in that case a label can be
+ // used or bound but not previously have been defined. In that case,
+ // internalResolve(l) will not necessarily create a binding for l!
+ // Consequently a subsequent lookup(l) may still return null.
+ return l->used() || l->bound() ? probe(l) : define(l);
+}
+
+uint32_t DisassemblerSpew::probe(const Label* l) {
+ Node* n = lookup(l);
+ return n ? n->value : 0;
+}
+
+uint32_t DisassemblerSpew::define(const Label* l) {
+ remove(l);
+ uint32_t value = spewNext_++;
+ if (!add(l, value)) {
+ return 0;
+ }
+ return value;
+}
+
+DisassemblerSpew::Node* DisassemblerSpew::lookup(const Label* key) {
+ Node* p;
+ for (p = nodes_; p && p->key != key; p = p->next) {
+ ;
+ }
+ return p;
+}
+
+DisassemblerSpew::Node* DisassemblerSpew::add(const Label* key,
+ uint32_t value) {
+ MOZ_ASSERT(!lookup(key));
+ Node* node = js_new<Node>();
+ if (node) {
+ node->key = key;
+ node->value = value;
+ node->bound = false;
+ node->next = nodes_;
+ nodes_ = node;
+ }
+ return node;
+}
+
+bool DisassemblerSpew::remove(const Label* key) {
+ // We do not require that there is a node matching the key.
+ for (Node *p = nodes_, *pp = nullptr; p; pp = p, p = p->next) {
+ if (p->key == key) {
+ if (pp) {
+ pp->next = p->next;
+ } else {
+ nodes_ = p->next;
+ }
+ js_free(p);
+ return true;
+ }
+ }
+ return false;
+}
+
+#else
+
+DisassemblerSpew::LabelDoc DisassemblerSpew::refLabel(const Label* l) {
+ return LabelDoc();
+}
+
+#endif
diff --git a/js/src/jit/shared/Disassembler-shared.h b/js/src/jit/shared/Disassembler-shared.h
new file mode 100644
index 0000000000..6bd82c48f5
--- /dev/null
+++ b/js/src/jit/shared/Disassembler-shared.h
@@ -0,0 +1,182 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_Disassembler_shared_h
+#define jit_shared_Disassembler_shared_h
+
+#include "mozilla/Atomics.h"
+#include "mozilla/Attributes.h"
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#if defined(JS_DISASM_ARM) || defined(JS_DISASM_ARM64)
+# define JS_DISASM_SUPPORTED
+#endif
+
+namespace js {
+
+class Sprinter;
+
+namespace jit {
+
+class Label;
+
+// A wrapper around spew/disassembly functionality. The disassembler is built
+// on a per-instruction disassembler (as in our ARM, ARM64 back-ends) and
+// formats labels with meaningful names and literals with meaningful values, if
+// the assembler creates documentation (with provided helpers) at appropriate
+// points.
+
+class DisassemblerSpew {
+#ifdef JS_DISASM_SUPPORTED
+ struct Node {
+ const Label* key; // Never dereferenced, only used for its value
+ uint32_t value; // The printable label value
+ bool bound; // If the label has been seen by spewBind()
+ Node* next;
+ };
+
+ Node* lookup(const Label* key);
+ Node* add(const Label* key, uint32_t value);
+ bool remove(const Label* key);
+
+ uint32_t probe(const Label* l);
+ uint32_t define(const Label* l);
+ uint32_t internalResolve(const Label* l);
+#endif
+
+ void spewVA(const char* fmt, va_list args) MOZ_FORMAT_PRINTF(2, 0);
+
+ public:
+ DisassemblerSpew();
+ ~DisassemblerSpew();
+
+#ifdef JS_DISASM_SUPPORTED
+ // Set indentation strings. The spewer retains a reference to s.
+ void setLabelIndent(const char* s);
+ void setTargetIndent(const char* s);
+#endif
+
+ // Set the spew printer, which will always be used if it is set, regardless
+ // of whether the system spew channel is enabled or not. The spewer retains
+ // a reference to sp.
+ void setPrinter(Sprinter* sp);
+
+ // Return true if disassembly spew is disabled and no additional printer is
+ // set.
+ bool isDisabled();
+
+ // Format and print text on the spew channel; output is suppressed if spew
+ // is disabled. The output is not indented, and is terminated by a newline.
+ void spew(const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3);
+
+ // Documentation for a label reference.
+ struct LabelDoc {
+#ifdef JS_DISASM_SUPPORTED
+ LabelDoc() : doc(0), bound(false), valid(false) {}
+ LabelDoc(uint32_t doc, bool bound) : doc(doc), bound(bound), valid(true) {}
+ const uint32_t doc;
+ const bool bound;
+ const bool valid;
+#else
+ LabelDoc() = default;
+ LabelDoc(uint32_t, bool) {}
+#endif
+ };
+
+ // Documentation for a literal load.
+ struct LiteralDoc {
+#ifdef JS_DISASM_SUPPORTED
+ enum class Type { Patchable, I32, U32, I64, U64, F32, F64 };
+ const Type type;
+ union {
+ int32_t i32;
+ uint32_t u32;
+ int64_t i64;
+ uint64_t u64;
+ float f32;
+ double f64;
+ } value;
+ LiteralDoc() : type(Type::Patchable) {}
+ explicit LiteralDoc(int32_t v) : type(Type::I32) { value.i32 = v; }
+ explicit LiteralDoc(uint32_t v) : type(Type::U32) { value.u32 = v; }
+ explicit LiteralDoc(int64_t v) : type(Type::I64) { value.i64 = v; }
+ explicit LiteralDoc(uint64_t v) : type(Type::U64) { value.u64 = v; }
+ explicit LiteralDoc(float v) : type(Type::F32) { value.f32 = v; }
+ explicit LiteralDoc(double v) : type(Type::F64) { value.f64 = v; }
+#else
+ LiteralDoc() = default;
+ explicit LiteralDoc(int32_t) {}
+ explicit LiteralDoc(uint32_t) {}
+ explicit LiteralDoc(int64_t) {}
+ explicit LiteralDoc(uint64_t) {}
+ explicit LiteralDoc(float) {}
+ explicit LiteralDoc(double) {}
+#endif
+ };
+
+ // Reference a label, resolving it to a printable representation.
+ //
+ // NOTE: The printable representation depends on the state of the label, so
+ // if we call resolve() when emitting & disassembling a branch instruction
+ // then it should be called before the label becomes Used, if emitting the
+ // branch can change the label's state.
+ //
+ // If the disassembler is not defined this returns a structure that is
+ // marked not valid.
+ LabelDoc refLabel(const Label* l);
+
+#ifdef JS_DISASM_SUPPORTED
+ // Spew the label information previously gathered by refLabel(), at a point
+ // where the label is referenced. The output is indented by targetIndent_
+ // and terminated by a newline.
+ void spewRef(const LabelDoc& target);
+
+ // Spew the label at the point where the label is bound. The output is
+ // indented by labelIndent_ and terminated by a newline.
+ void spewBind(const Label* label);
+
+ // Spew a retarget directive at the point where the retarget is recorded.
+ // The output is indented by labelIndent_ and terminated by a newline.
+ void spewRetarget(const Label* label, const Label* target);
+
+ // Format a literal value into the buffer. The buffer is always
+ // NUL-terminated even if this chops the formatted value.
+ void formatLiteral(const LiteralDoc& doc, char* buffer, size_t bufsize);
+
+ // Print any unbound labels, one per line, with normal label indent and with
+ // a comment indicating the label is not defined. Labels can be referenced
+ // but unbound in some legitimate cases, normally for traps. Printing them
+ // reduces confusion.
+ void spewOrphans();
+#endif
+
+ private:
+ Sprinter* printer_;
+#ifdef JS_DISASM_SUPPORTED
+ const char* labelIndent_;
+ const char* targetIndent_;
+ uint32_t spewNext_;
+ Node* nodes_;
+ uint32_t tag_;
+
+ // This global is used to disambiguate concurrently live assemblers, see
+ // comments in Disassembler-shared.cpp for why this is desirable.
+ //
+ // The variable is atomic to avoid any kind of complaint from thread
+ // sanitizers etc. However, trying to look at disassembly without using
+ // --no-threads is basically insane, so you can ignore the multi-threading
+ // implications here.
+ static mozilla::Atomic<uint32_t> counter_;
+#endif
+};
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_shared_Disassembler_shared_h
diff --git a/js/src/jit/shared/IonAssemblerBuffer.h b/js/src/jit/shared/IonAssemblerBuffer.h
new file mode 100644
index 0000000000..cf19ef1a6a
--- /dev/null
+++ b/js/src/jit/shared/IonAssemblerBuffer.h
@@ -0,0 +1,437 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_IonAssemblerBuffer_h
+#define jit_shared_IonAssemblerBuffer_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <algorithm>
+
+#include "jit/shared/Assembler-shared.h"
+
+namespace js {
+namespace jit {
+
+// The offset into a buffer, in bytes.
+class BufferOffset {
+ int offset;
+
+ public:
+ friend BufferOffset nextOffset();
+
+ BufferOffset() : offset(INT_MIN) {}
+
+ explicit BufferOffset(int offset_) : offset(offset_) {
+ MOZ_ASSERT(offset >= 0);
+ }
+
+ explicit BufferOffset(Label* l) : offset(l->offset()) {
+ MOZ_ASSERT(offset >= 0);
+ }
+
+ int getOffset() const { return offset; }
+ bool assigned() const { return offset != INT_MIN; }
+
+ // A BOffImm is a Branch Offset Immediate. It is an architecture-specific
+ // structure that holds the immediate for a pc relative branch. diffB takes
+ // the label for the destination of the branch, and encodes the immediate
+ // for the branch. This will need to be fixed up later, since A pool may be
+ // inserted between the branch and its destination.
+ template <class BOffImm>
+ BOffImm diffB(BufferOffset other) const {
+ if (!BOffImm::IsInRange(offset - other.offset)) {
+ return BOffImm();
+ }
+ return BOffImm(offset - other.offset);
+ }
+
+ template <class BOffImm>
+ BOffImm diffB(Label* other) const {
+ MOZ_ASSERT(other->bound());
+ if (!BOffImm::IsInRange(offset - other->offset())) {
+ return BOffImm();
+ }
+ return BOffImm(offset - other->offset());
+ }
+};
+
+inline bool operator<(BufferOffset a, BufferOffset b) {
+ return a.getOffset() < b.getOffset();
+}
+
+inline bool operator>(BufferOffset a, BufferOffset b) {
+ return a.getOffset() > b.getOffset();
+}
+
+inline bool operator<=(BufferOffset a, BufferOffset b) {
+ return a.getOffset() <= b.getOffset();
+}
+
+inline bool operator>=(BufferOffset a, BufferOffset b) {
+ return a.getOffset() >= b.getOffset();
+}
+
+inline bool operator==(BufferOffset a, BufferOffset b) {
+ return a.getOffset() == b.getOffset();
+}
+
+inline bool operator!=(BufferOffset a, BufferOffset b) {
+ return a.getOffset() != b.getOffset();
+}
+
+template <int SliceSize>
+class BufferSlice {
+ protected:
+ BufferSlice<SliceSize>* prev_;
+ BufferSlice<SliceSize>* next_;
+
+ size_t bytelength_;
+
+ public:
+ mozilla::Array<uint8_t, SliceSize> instructions;
+
+ public:
+ explicit BufferSlice() : prev_(nullptr), next_(nullptr), bytelength_(0) {}
+
+ size_t length() const { return bytelength_; }
+ static inline size_t Capacity() { return SliceSize; }
+
+ BufferSlice* getNext() const { return next_; }
+ BufferSlice* getPrev() const { return prev_; }
+
+ void setNext(BufferSlice<SliceSize>* next) {
+ MOZ_ASSERT(next_ == nullptr);
+ MOZ_ASSERT(next->prev_ == nullptr);
+ next_ = next;
+ next->prev_ = this;
+ }
+
+ void putBytes(size_t numBytes, const void* source) {
+ MOZ_ASSERT(bytelength_ + numBytes <= SliceSize);
+ if (source) {
+ memcpy(&instructions[length()], source, numBytes);
+ }
+ bytelength_ += numBytes;
+ }
+
+ MOZ_ALWAYS_INLINE
+ void putU32Aligned(uint32_t value) {
+ MOZ_ASSERT(bytelength_ + 4 <= SliceSize);
+ MOZ_ASSERT((bytelength_ & 3) == 0);
+ MOZ_ASSERT((uintptr_t(&instructions[0]) & 3) == 0);
+ *reinterpret_cast<uint32_t*>(&instructions[bytelength_]) = value;
+ bytelength_ += 4;
+ }
+};
+
+template <int SliceSize, class Inst>
+class AssemblerBuffer {
+ protected:
+ typedef BufferSlice<SliceSize> Slice;
+
+ // Doubly-linked list of BufferSlices, with the most recent in tail position.
+ Slice* head;
+ Slice* tail;
+
+ bool m_oom;
+
+ // How many bytes has been committed to the buffer thus far.
+ // Does not include tail.
+ uint32_t bufferSize;
+
+ // How many bytes can be in the buffer. Normally this is
+ // MaxCodeBytesPerBuffer, but for pasteup buffers where we handle far jumps
+ // explicitly it can be larger.
+ uint32_t maxSize;
+
+ // Finger for speeding up accesses.
+ Slice* finger;
+ int finger_offset;
+
+ LifoAlloc lifoAlloc_;
+
+ public:
+ explicit AssemblerBuffer()
+ : head(nullptr),
+ tail(nullptr),
+ m_oom(false),
+ bufferSize(0),
+ maxSize(MaxCodeBytesPerBuffer),
+ finger(nullptr),
+ finger_offset(0),
+ lifoAlloc_(8192) {}
+
+ public:
+ bool isAligned(size_t alignment) const {
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(alignment));
+ return !(size() & (alignment - 1));
+ }
+
+ void setUnlimited() { maxSize = MaxCodeBytesPerProcess; }
+
+ private:
+ Slice* newSlice(LifoAlloc& a) {
+ if (size() > maxSize - sizeof(Slice)) {
+ fail_oom();
+ return nullptr;
+ }
+ Slice* tmp = static_cast<Slice*>(a.alloc(sizeof(Slice)));
+ if (!tmp) {
+ fail_oom();
+ return nullptr;
+ }
+ return new (tmp) Slice;
+ }
+
+ public:
+ bool ensureSpace(size_t size) {
+ // Space can exist in the most recent Slice.
+ if (tail && tail->length() + size <= tail->Capacity()) {
+ // Simulate allocation failure even when we don't need a new slice.
+ if (js::oom::ShouldFailWithOOM()) {
+ return fail_oom();
+ }
+
+ return true;
+ }
+
+ // Otherwise, a new Slice must be added.
+ Slice* slice = newSlice(lifoAlloc_);
+ if (slice == nullptr) {
+ return fail_oom();
+ }
+
+ // If this is the first Slice in the buffer, add to head position.
+ if (!head) {
+ head = slice;
+ finger = slice;
+ finger_offset = 0;
+ }
+
+ // Finish the last Slice and add the new Slice to the linked list.
+ if (tail) {
+ bufferSize += tail->length();
+ tail->setNext(slice);
+ }
+ tail = slice;
+
+ return true;
+ }
+
+ BufferOffset putByte(uint8_t value) {
+ return putBytes(sizeof(value), &value);
+ }
+
+ BufferOffset putShort(uint16_t value) {
+ return putBytes(sizeof(value), &value);
+ }
+
+ BufferOffset putInt(uint32_t value) {
+ return putBytes(sizeof(value), &value);
+ }
+
+ MOZ_ALWAYS_INLINE
+ BufferOffset putU32Aligned(uint32_t value) {
+ if (!ensureSpace(sizeof(value))) {
+ return BufferOffset();
+ }
+
+ BufferOffset ret = nextOffset();
+ tail->putU32Aligned(value);
+ return ret;
+ }
+
+ // Add numBytes bytes to this buffer.
+ // The data must fit in a single slice.
+ BufferOffset putBytes(size_t numBytes, const void* inst) {
+ if (!ensureSpace(numBytes)) {
+ return BufferOffset();
+ }
+
+ BufferOffset ret = nextOffset();
+ tail->putBytes(numBytes, inst);
+ return ret;
+ }
+
+ // Add a potentially large amount of data to this buffer.
+ // The data may be distrubuted across multiple slices.
+ // Return the buffer offset of the first added byte.
+ BufferOffset putBytesLarge(size_t numBytes, const void* data) {
+ BufferOffset ret = nextOffset();
+ while (numBytes > 0) {
+ if (!ensureSpace(1)) {
+ return BufferOffset();
+ }
+ size_t avail = tail->Capacity() - tail->length();
+ size_t xfer = numBytes < avail ? numBytes : avail;
+ MOZ_ASSERT(xfer > 0, "ensureSpace should have allocated a slice");
+ tail->putBytes(xfer, data);
+ data = (const uint8_t*)data + xfer;
+ numBytes -= xfer;
+ }
+ return ret;
+ }
+
+ unsigned int size() const {
+ if (tail) {
+ return bufferSize + tail->length();
+ }
+ return bufferSize;
+ }
+ BufferOffset nextOffset() const { return BufferOffset(size()); }
+
+ bool oom() const { return m_oom; }
+
+ bool fail_oom() {
+ m_oom = true;
+#ifdef DEBUG
+ JitContext* context = MaybeGetJitContext();
+ if (context) {
+ context->setOOM();
+ }
+#endif
+ return false;
+ }
+
+ private:
+ void update_finger(Slice* finger_, int fingerOffset_) {
+ finger = finger_;
+ finger_offset = fingerOffset_;
+ }
+
+ static const unsigned SliceDistanceRequiringFingerUpdate = 3;
+
+ Inst* getInstForwards(BufferOffset off, Slice* start, int startOffset,
+ bool updateFinger = false) {
+ const int offset = off.getOffset();
+
+ int cursor = startOffset;
+ unsigned slicesSkipped = 0;
+
+ MOZ_ASSERT(offset >= cursor);
+
+ for (Slice* slice = start; slice != nullptr; slice = slice->getNext()) {
+ const int slicelen = slice->length();
+
+ // Is the offset within the bounds of this slice?
+ if (offset < cursor + slicelen) {
+ if (updateFinger ||
+ slicesSkipped >= SliceDistanceRequiringFingerUpdate) {
+ update_finger(slice, cursor);
+ }
+
+ MOZ_ASSERT(offset - cursor < (int)slice->length());
+ return (Inst*)&slice->instructions[offset - cursor];
+ }
+
+ cursor += slicelen;
+ slicesSkipped++;
+ }
+
+ MOZ_CRASH("Invalid instruction cursor.");
+ }
+
+ Inst* getInstBackwards(BufferOffset off, Slice* start, int startOffset,
+ bool updateFinger = false) {
+ const int offset = off.getOffset();
+
+ int cursor = startOffset; // First (lowest) offset in the start Slice.
+ unsigned slicesSkipped = 0;
+
+ MOZ_ASSERT(offset < int(cursor + start->length()));
+
+ for (Slice* slice = start; slice != nullptr;) {
+ // Is the offset within the bounds of this slice?
+ if (offset >= cursor) {
+ if (updateFinger ||
+ slicesSkipped >= SliceDistanceRequiringFingerUpdate) {
+ update_finger(slice, cursor);
+ }
+
+ MOZ_ASSERT(offset - cursor < (int)slice->length());
+ return (Inst*)&slice->instructions[offset - cursor];
+ }
+
+ // Move the cursor to the start of the previous slice.
+ Slice* prev = slice->getPrev();
+ cursor -= prev->length();
+
+ slice = prev;
+ slicesSkipped++;
+ }
+
+ MOZ_CRASH("Invalid instruction cursor.");
+ }
+
+ public:
+ Inst* getInstOrNull(BufferOffset off) {
+ if (!off.assigned()) {
+ return nullptr;
+ }
+ return getInst(off);
+ }
+
+ // Get a pointer to the instruction at offset |off| which must be within the
+ // bounds of the buffer. Use |getInstOrNull()| if |off| may be unassigned.
+ Inst* getInst(BufferOffset off) {
+ const int offset = off.getOffset();
+ // This function is hot, do not make the next line a RELEASE_ASSERT.
+ MOZ_ASSERT(off.assigned() && offset >= 0 && unsigned(offset) < size());
+
+ // Is the instruction in the last slice?
+ if (offset >= int(bufferSize)) {
+ return (Inst*)&tail->instructions[offset - bufferSize];
+ }
+
+ // How close is this offset to the previous one we looked up?
+ // If it is sufficiently far from the start and end of the buffer,
+ // use the finger to start midway through the list.
+ int finger_dist = abs(offset - finger_offset);
+ if (finger_dist < std::min(offset, int(bufferSize - offset))) {
+ if (finger_offset < offset) {
+ return getInstForwards(off, finger, finger_offset, true);
+ }
+ return getInstBackwards(off, finger, finger_offset, true);
+ }
+
+ // Is the instruction closer to the start or to the end?
+ if (offset < int(bufferSize - offset)) {
+ return getInstForwards(off, head, 0);
+ }
+
+ // The last slice was already checked above, so start at the
+ // second-to-last.
+ Slice* prev = tail->getPrev();
+ return getInstBackwards(off, prev, bufferSize - prev->length());
+ }
+
+ typedef AssemblerBuffer<SliceSize, Inst> ThisClass;
+
+ class AssemblerBufferInstIterator {
+ BufferOffset bo_;
+ ThisClass* buffer_;
+
+ public:
+ explicit AssemblerBufferInstIterator(BufferOffset bo, ThisClass* buffer)
+ : bo_(bo), buffer_(buffer) {}
+ void advance(int offset) { bo_ = BufferOffset(bo_.getOffset() + offset); }
+ Inst* next() {
+ advance(cur()->size());
+ return cur();
+ }
+ Inst* peek() {
+ return buffer_->getInst(BufferOffset(bo_.getOffset() + cur()->size()));
+ }
+ Inst* cur() const { return buffer_->getInst(bo_); }
+ };
+};
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_shared_IonAssemblerBuffer_h
diff --git a/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h b/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
new file mode 100644
index 0000000000..d21bb7f35b
--- /dev/null
+++ b/js/src/jit/shared/IonAssemblerBufferWithConstantPools.h
@@ -0,0 +1,1215 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_IonAssemblerBufferWithConstantPools_h
+#define jit_shared_IonAssemblerBufferWithConstantPools_h
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <algorithm>
+
+#include "jit/JitSpewer.h"
+#include "jit/shared/IonAssemblerBuffer.h"
+
+// [SMDOC] JIT AssemblerBuffer constant pooling (ARM/ARM64/MIPS)
+//
+// This code extends the AssemblerBuffer to support the pooling of values loaded
+// using program-counter relative addressing modes. This is necessary with the
+// ARM instruction set because it has a fixed instruction size that can not
+// encode all values as immediate arguments in instructions. Pooling the values
+// allows the values to be placed in large chunks which minimizes the number of
+// forced branches around them in the code. This is used for loading floating
+// point constants, for loading 32 bit constants on the ARMv6, for absolute
+// branch targets, and in future will be needed for large branches on the ARMv6.
+//
+// For simplicity of the implementation, the constant pools are always placed
+// after the loads referencing them. When a new constant pool load is added to
+// the assembler buffer, a corresponding pool entry is added to the current
+// pending pool. The finishPool() method copies the current pending pool entries
+// into the assembler buffer at the current offset and patches the pending
+// constant pool load instructions.
+//
+// Before inserting instructions or pool entries, it is necessary to determine
+// if doing so would place a pending pool entry out of reach of an instruction,
+// and if so then the pool must firstly be dumped. With the allocation algorithm
+// used below, the recalculation of all the distances between instructions and
+// their pool entries can be avoided by noting that there will be a limiting
+// instruction and pool entry pair that does not change when inserting more
+// instructions. Adding more instructions makes the same increase to the
+// distance, between instructions and their pool entries, for all such
+// pairs. This pair is recorded as the limiter, and it is updated when new pool
+// entries are added, see updateLimiter()
+//
+// The pools consist of: a guard instruction that branches around the pool, a
+// header word that helps identify a pool in the instruction stream, and then
+// the pool entries allocated in units of words. The guard instruction could be
+// omitted if control does not reach the pool, and this is referred to as a
+// natural guard below, but for simplicity the guard branch is always
+// emitted. The pool header is an identifiable word that in combination with the
+// guard uniquely identifies a pool in the instruction stream. The header also
+// encodes the pool size and a flag indicating if the guard is natural. It is
+// possible to iterate through the code instructions skipping or examining the
+// pools. E.g. it might be necessary to skip pools when search for, or patching,
+// an instruction sequence.
+//
+// It is often required to keep a reference to a pool entry, to patch it after
+// the buffer is finished. Each pool entry is assigned a unique index, counting
+// up from zero (see the poolEntryCount slot below). These can be mapped back to
+// the offset of the pool entry in the finished buffer, see poolEntryOffset().
+//
+// The code supports no-pool regions, and for these the size of the region, in
+// instructions, must be supplied. This size is used to determine if inserting
+// the instructions would place a pool entry out of range, and if so then a pool
+// is firstly flushed. The DEBUG code checks that the emitted code is within the
+// supplied size to detect programming errors. See enterNoPool() and
+// leaveNoPool().
+
+// The only planned instruction sets that require inline constant pools are the
+// ARM, ARM64, and MIPS, and these all have fixed 32-bit sized instructions so
+// for simplicity the code below is specialized for fixed 32-bit sized
+// instructions and makes no attempt to support variable length
+// instructions. The base assembler buffer which supports variable width
+// instruction is used by the x86 and x64 backends.
+
+// The AssemblerBufferWithConstantPools template class uses static callbacks to
+// the provided Asm template argument class:
+//
+// void Asm::InsertIndexIntoTag(uint8_t* load_, uint32_t index)
+//
+// When allocEntry() is called to add a constant pool load with an associated
+// constant pool entry, this callback is called to encode the index of the
+// allocated constant pool entry into the load instruction.
+//
+// After the constant pool has been placed, PatchConstantPoolLoad() is called
+// to update the load instruction with the right load offset.
+//
+// void Asm::WritePoolGuard(BufferOffset branch,
+// Instruction* dest,
+// BufferOffset afterPool)
+//
+// Write out the constant pool guard branch before emitting the pool.
+//
+// branch
+// Offset of the guard branch in the buffer.
+//
+// dest
+// Pointer into the buffer where the guard branch should be emitted. (Same
+// as getInst(branch)). Space for guardSize_ instructions has been reserved.
+//
+// afterPool
+// Offset of the first instruction after the constant pool. This includes
+// both pool entries and branch veneers added after the pool data.
+//
+// void Asm::WritePoolHeader(uint8_t* start, Pool* p, bool isNatural)
+//
+// Write out the pool header which follows the guard branch.
+//
+// void Asm::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
+//
+// Re-encode a load of a constant pool entry after the location of the
+// constant pool is known.
+//
+// The load instruction at loadAddr was previously passed to
+// InsertIndexIntoTag(). The constPoolAddr is the final address of the
+// constant pool in the assembler buffer.
+//
+// void Asm::PatchShortRangeBranchToVeneer(AssemblerBufferWithConstantPools*,
+// unsigned rangeIdx,
+// BufferOffset deadline,
+// BufferOffset veneer)
+//
+// Patch a short-range branch to jump through a veneer before it goes out of
+// range.
+//
+// rangeIdx, deadline
+// These arguments were previously passed to registerBranchDeadline(). It is
+// assumed that PatchShortRangeBranchToVeneer() knows how to compute the
+// offset of the short-range branch from this information.
+//
+// veneer
+// Space for a branch veneer, guaranteed to be <= deadline. At this
+// position, guardSize_ * InstSize bytes are allocated. They should be
+// initialized to the proper unconditional branch instruction.
+//
+// Unbound branches to the same unbound label are organized as a linked list:
+//
+// Label::offset -> Branch1 -> Branch2 -> Branch3 -> nil
+//
+// This callback should insert a new veneer branch into the list:
+//
+// Label::offset -> Branch1 -> Branch2 -> Veneer -> Branch3 -> nil
+//
+// When Assembler::bind() rewrites the branches with the real label offset, it
+// probably has to bind Branch2 to target the veneer branch instead of jumping
+// straight to the label.
+
+namespace js {
+namespace jit {
+
+// BranchDeadlineSet - Keep track of pending branch deadlines.
+//
+// Some architectures like arm and arm64 have branch instructions with limited
+// range. When assembling a forward branch, it is not always known if the final
+// target label will be in range of the branch instruction.
+//
+// The BranchDeadlineSet data structure is used to keep track of the set of
+// pending forward branches. It supports the following fast operations:
+//
+// 1. Get the earliest deadline in the set.
+// 2. Add a new branch deadline.
+// 3. Remove a branch deadline.
+//
+// Architectures may have different branch encodings with different ranges. Each
+// supported range is assigned a small integer starting at 0. This data
+// structure does not care about the actual range of branch instructions, just
+// the latest buffer offset that can be reached - the deadline offset.
+//
+// Branched are stored as (rangeIdx, deadline) tuples. The target-specific code
+// can compute the location of the branch itself from this information. This
+// data structure does not need to know.
+//
+template <unsigned NumRanges>
+class BranchDeadlineSet {
+ // Maintain a list of pending deadlines for each range separately.
+ //
+ // The offsets in each vector are always kept in ascending order.
+ //
+ // Because we have a separate vector for different ranges, as forward
+ // branches are added to the assembler buffer, their deadlines will
+ // always be appended to the vector corresponding to their range.
+ //
+ // When binding labels, we expect a more-or-less LIFO order of branch
+ // resolutions. This would always hold if we had strictly structured control
+ // flow.
+ //
+ // We allow branch deadlines to be added and removed in any order, but
+ // performance is best in the expected case of near LIFO order.
+ //
+ typedef Vector<BufferOffset, 8, LifoAllocPolicy<Fallible>> RangeVector;
+
+ // We really just want "RangeVector deadline_[NumRanges];", but each vector
+ // needs to be initialized with a LifoAlloc, and C++ doesn't bend that way.
+ //
+ // Use raw aligned storage instead and explicitly construct NumRanges
+ // vectors in our constructor.
+ mozilla::AlignedStorage2<RangeVector[NumRanges]> deadlineStorage_;
+
+ // Always access the range vectors through this method.
+ RangeVector& vectorForRange(unsigned rangeIdx) {
+ MOZ_ASSERT(rangeIdx < NumRanges, "Invalid branch range index");
+ return (*deadlineStorage_.addr())[rangeIdx];
+ }
+
+ const RangeVector& vectorForRange(unsigned rangeIdx) const {
+ MOZ_ASSERT(rangeIdx < NumRanges, "Invalid branch range index");
+ return (*deadlineStorage_.addr())[rangeIdx];
+ }
+
+ // Maintain a precomputed earliest deadline at all times.
+ // This is unassigned only when all deadline vectors are empty.
+ BufferOffset earliest_;
+
+ // The range vector owning earliest_. Uninitialized when empty.
+ unsigned earliestRange_;
+
+ // Recompute the earliest deadline after it's been invalidated.
+ void recomputeEarliest() {
+ earliest_ = BufferOffset();
+ for (unsigned r = 0; r < NumRanges; r++) {
+ auto& vec = vectorForRange(r);
+ if (!vec.empty() && (!earliest_.assigned() || vec[0] < earliest_)) {
+ earliest_ = vec[0];
+ earliestRange_ = r;
+ }
+ }
+ }
+
+ // Update the earliest deadline if needed after inserting (rangeIdx,
+ // deadline). Always return true for convenience:
+ // return insert() && updateEarliest().
+ bool updateEarliest(unsigned rangeIdx, BufferOffset deadline) {
+ if (!earliest_.assigned() || deadline < earliest_) {
+ earliest_ = deadline;
+ earliestRange_ = rangeIdx;
+ }
+ return true;
+ }
+
+ public:
+ explicit BranchDeadlineSet(LifoAlloc& alloc) : earliestRange_(0) {
+ // Manually construct vectors in the uninitialized aligned storage.
+ // This is because C++ arrays can otherwise only be constructed with
+ // the default constructor.
+ for (unsigned r = 0; r < NumRanges; r++) {
+ new (&vectorForRange(r)) RangeVector(alloc);
+ }
+ }
+
+ ~BranchDeadlineSet() {
+ // Aligned storage doesn't destruct its contents automatically.
+ for (unsigned r = 0; r < NumRanges; r++) {
+ vectorForRange(r).~RangeVector();
+ }
+ }
+
+ // Is this set completely empty?
+ bool empty() const { return !earliest_.assigned(); }
+
+ // Get the total number of deadlines in the set.
+ size_t size() const {
+ size_t count = 0;
+ for (unsigned r = 0; r < NumRanges; r++) {
+ count += vectorForRange(r).length();
+ }
+ return count;
+ }
+
+ // Get the number of deadlines for the range with the most elements.
+ size_t maxRangeSize() const {
+ size_t count = 0;
+ for (unsigned r = 0; r < NumRanges; r++) {
+ count = std::max(count, vectorForRange(r).length());
+ }
+ return count;
+ }
+
+ // Get the first deadline that is still in the set.
+ BufferOffset earliestDeadline() const {
+ MOZ_ASSERT(!empty());
+ return earliest_;
+ }
+
+ // Get the range index corresponding to earliestDeadlineRange().
+ unsigned earliestDeadlineRange() const {
+ MOZ_ASSERT(!empty());
+ return earliestRange_;
+ }
+
+ // Add a (rangeIdx, deadline) tuple to the set.
+ //
+ // It is assumed that this tuple is not already in the set.
+ // This function performs best id the added deadline is later than any
+ // existing deadline for the same range index.
+ //
+ // Return true if the tuple was added, false if the tuple could not be added
+ // because of an OOM error.
+ bool addDeadline(unsigned rangeIdx, BufferOffset deadline) {
+ MOZ_ASSERT(deadline.assigned(), "Can only store assigned buffer offsets");
+ // This is the vector where deadline should be saved.
+ auto& vec = vectorForRange(rangeIdx);
+
+ // Fast case: Simple append to the relevant array. This never affects
+ // the earliest deadline.
+ if (!vec.empty() && vec.back() < deadline) {
+ return vec.append(deadline);
+ }
+
+ // Fast case: First entry to the vector. We need to update earliest_.
+ if (vec.empty()) {
+ return vec.append(deadline) && updateEarliest(rangeIdx, deadline);
+ }
+
+ return addDeadlineSlow(rangeIdx, deadline);
+ }
+
+ private:
+ // General case of addDeadline. This is split into two functions such that
+ // the common case in addDeadline can be inlined while this part probably
+ // won't inline.
+ bool addDeadlineSlow(unsigned rangeIdx, BufferOffset deadline) {
+ auto& vec = vectorForRange(rangeIdx);
+
+ // Inserting into the middle of the vector. Use a log time binary search
+ // and a linear time insert().
+ // Is it worthwhile special-casing the empty vector?
+ auto at = std::lower_bound(vec.begin(), vec.end(), deadline);
+ MOZ_ASSERT(at == vec.end() || *at != deadline,
+ "Cannot insert duplicate deadlines");
+ return vec.insert(at, deadline) && updateEarliest(rangeIdx, deadline);
+ }
+
+ public:
+ // Remove a deadline from the set.
+ // If (rangeIdx, deadline) is not in the set, nothing happens.
+ void removeDeadline(unsigned rangeIdx, BufferOffset deadline) {
+ auto& vec = vectorForRange(rangeIdx);
+
+ if (vec.empty()) {
+ return;
+ }
+
+ if (deadline == vec.back()) {
+ // Expected fast case: Structured control flow causes forward
+ // branches to be bound in reverse order.
+ vec.popBack();
+ } else {
+ // Slow case: Binary search + linear erase.
+ auto where = std::lower_bound(vec.begin(), vec.end(), deadline);
+ if (where == vec.end() || *where != deadline) {
+ return;
+ }
+ vec.erase(where);
+ }
+ if (deadline == earliest_) {
+ recomputeEarliest();
+ }
+ }
+};
+
+// Specialization for architectures that don't need to track short-range
+// branches.
+template <>
+class BranchDeadlineSet<0u> {
+ public:
+ explicit BranchDeadlineSet(LifoAlloc& alloc) {}
+ bool empty() const { return true; }
+ size_t size() const { return 0; }
+ size_t maxRangeSize() const { return 0; }
+ BufferOffset earliestDeadline() const { MOZ_CRASH(); }
+ unsigned earliestDeadlineRange() const { MOZ_CRASH(); }
+ bool addDeadline(unsigned rangeIdx, BufferOffset deadline) { MOZ_CRASH(); }
+ void removeDeadline(unsigned rangeIdx, BufferOffset deadline) { MOZ_CRASH(); }
+};
+
+// The allocation unit size for pools.
+typedef int32_t PoolAllocUnit;
+
+// Hysteresis given to short-range branches.
+//
+// If any short-range branches will go out of range in the next N bytes,
+// generate a veneer for them in the current pool. The hysteresis prevents the
+// creation of many tiny constant pools for branch veneers.
+const size_t ShortRangeBranchHysteresis = 128;
+
+struct Pool {
+ private:
+ // The maximum program-counter relative offset below which the instruction
+ // set can encode. Different classes of intructions might support different
+ // ranges but for simplicity the minimum is used here, and for the ARM this
+ // is constrained to 1024 by the float load instructions.
+ const size_t maxOffset_;
+ // An offset to apply to program-counter relative offsets. The ARM has a
+ // bias of 8.
+ const unsigned bias_;
+
+ // The content of the pool entries.
+ Vector<PoolAllocUnit, 8, LifoAllocPolicy<Fallible>> poolData_;
+
+ // Flag that tracks OOM conditions. This is set after any append failed.
+ bool oom_;
+
+ // The limiting instruction and pool-entry pair. The instruction program
+ // counter relative offset of this limiting instruction will go out of range
+ // first as the pool position moves forward. It is more efficient to track
+ // just this limiting pair than to recheck all offsets when testing if the
+ // pool needs to be dumped.
+ //
+ // 1. The actual offset of the limiting instruction referencing the limiting
+ // pool entry.
+ BufferOffset limitingUser;
+ // 2. The pool entry index of the limiting pool entry.
+ unsigned limitingUsee;
+
+ public:
+ // A record of the code offset of instructions that reference pool
+ // entries. These instructions need to be patched when the actual position
+ // of the instructions and pools are known, and for the code below this
+ // occurs when each pool is finished, see finishPool().
+ Vector<BufferOffset, 8, LifoAllocPolicy<Fallible>> loadOffsets;
+
+ // Create a Pool. Don't allocate anything from lifoAloc, just capture its
+ // reference.
+ explicit Pool(size_t maxOffset, unsigned bias, LifoAlloc& lifoAlloc)
+ : maxOffset_(maxOffset),
+ bias_(bias),
+ poolData_(lifoAlloc),
+ oom_(false),
+ limitingUser(),
+ limitingUsee(INT_MIN),
+ loadOffsets(lifoAlloc) {}
+
+ // If poolData() returns nullptr then oom_ will also be true.
+ const PoolAllocUnit* poolData() const { return poolData_.begin(); }
+
+ unsigned numEntries() const { return poolData_.length(); }
+
+ size_t getPoolSize() const { return numEntries() * sizeof(PoolAllocUnit); }
+
+ bool oom() const { return oom_; }
+
+ // Update the instruction/pool-entry pair that limits the position of the
+ // pool. The nextInst is the actual offset of the new instruction being
+ // allocated.
+ //
+ // This is comparing the offsets, see checkFull() below for the equation,
+ // but common expressions on both sides have been canceled from the ranges
+ // being compared. Notably, the poolOffset cancels out, so the limiting pair
+ // does not depend on where the pool is placed.
+ void updateLimiter(BufferOffset nextInst) {
+ ptrdiff_t oldRange =
+ limitingUsee * sizeof(PoolAllocUnit) - limitingUser.getOffset();
+ ptrdiff_t newRange = getPoolSize() - nextInst.getOffset();
+ if (!limitingUser.assigned() || newRange > oldRange) {
+ // We have a new largest range!
+ limitingUser = nextInst;
+ limitingUsee = numEntries();
+ }
+ }
+
+ // Check if inserting a pool at the actual offset poolOffset would place
+ // pool entries out of reach. This is called before inserting instructions
+ // to check that doing so would not push pool entries out of reach, and if
+ // so then the pool would need to be firstly dumped. The poolOffset is the
+ // first word of the pool, after the guard and header and alignment fill.
+ bool checkFull(size_t poolOffset) const {
+ // Not full if there are no uses.
+ if (!limitingUser.assigned()) {
+ return false;
+ }
+ size_t offset = poolOffset + limitingUsee * sizeof(PoolAllocUnit) -
+ (limitingUser.getOffset() + bias_);
+ return offset >= maxOffset_;
+ }
+
+ static const unsigned OOM_FAIL = unsigned(-1);
+
+ unsigned insertEntry(unsigned num, uint8_t* data, BufferOffset off,
+ LifoAlloc& lifoAlloc) {
+ if (oom_) {
+ return OOM_FAIL;
+ }
+ unsigned ret = numEntries();
+ if (!poolData_.append((PoolAllocUnit*)data, num) ||
+ !loadOffsets.append(off)) {
+ oom_ = true;
+ return OOM_FAIL;
+ }
+ return ret;
+ }
+
+ void reset() {
+ poolData_.clear();
+ loadOffsets.clear();
+
+ limitingUser = BufferOffset();
+ limitingUsee = -1;
+ }
+};
+
+// Template arguments:
+//
+// SliceSize
+// Number of bytes in each allocated BufferSlice. See
+// AssemblerBuffer::SliceSize.
+//
+// InstSize
+// Size in bytes of the fixed-size instructions. This should be equal to
+// sizeof(Inst). This is only needed here because the buffer is defined before
+// the Instruction.
+//
+// Inst
+// The actual type used to represent instructions. This is only really used as
+// the return type of the getInst() method.
+//
+// Asm
+// Class defining the needed static callback functions. See documentation of
+// the Asm::* callbacks above.
+//
+// NumShortBranchRanges
+// The number of short branch ranges to support. This can be 0 if no support
+// for tracking short range branches is needed. The
+// AssemblerBufferWithConstantPools class does not need to know what the range
+// of branches is - it deals in branch 'deadlines' which is the last buffer
+// position that a short-range forward branch can reach. It is assumed that
+// the Asm class is able to find the actual branch instruction given a
+// (range-index, deadline) pair.
+//
+//
+template <size_t SliceSize, size_t InstSize, class Inst, class Asm,
+ unsigned NumShortBranchRanges = 0>
+struct AssemblerBufferWithConstantPools
+ : public AssemblerBuffer<SliceSize, Inst> {
+ private:
+ // The PoolEntry index counter. Each PoolEntry is given a unique index,
+ // counting up from zero, and these can be mapped back to the actual pool
+ // entry offset after finishing the buffer, see poolEntryOffset().
+ size_t poolEntryCount;
+
+ public:
+ class PoolEntry {
+ size_t index_;
+
+ public:
+ explicit PoolEntry(size_t index) : index_(index) {}
+
+ PoolEntry() : index_(-1) {}
+
+ size_t index() const { return index_; }
+ };
+
+ private:
+ typedef AssemblerBuffer<SliceSize, Inst> Parent;
+ using typename Parent::Slice;
+
+ // The size of a pool guard, in instructions. A branch around the pool.
+ const unsigned guardSize_;
+ // The size of the header that is put at the beginning of a full pool, in
+ // instruction sized units.
+ const unsigned headerSize_;
+
+ // The maximum pc relative offset encoded in instructions that reference
+ // pool entries. This is generally set to the maximum offset that can be
+ // encoded by the instructions, but for testing can be lowered to affect the
+ // pool placement and frequency of pool placement.
+ const size_t poolMaxOffset_;
+
+ // The bias on pc relative addressing mode offsets, in units of bytes. The
+ // ARM has a bias of 8 bytes.
+ const unsigned pcBias_;
+
+ // The current working pool. Copied out as needed before resetting.
+ Pool pool_;
+
+ // The buffer should be aligned to this address.
+ const size_t instBufferAlign_;
+
+ struct PoolInfo {
+ // The index of the first entry in this pool.
+ // Pool entries are numbered uniquely across all pools, starting from 0.
+ unsigned firstEntryIndex;
+
+ // The location of this pool's first entry in the main assembler buffer.
+ // Note that the pool guard and header come before this offset which
+ // points directly at the data.
+ BufferOffset offset;
+
+ explicit PoolInfo(unsigned index, BufferOffset data)
+ : firstEntryIndex(index), offset(data) {}
+ };
+
+ // Info for each pool that has already been dumped. This does not include
+ // any entries in pool_.
+ Vector<PoolInfo, 8, LifoAllocPolicy<Fallible>> poolInfo_;
+
+ // Set of short-range forward branches that have not yet been bound.
+ // We may need to insert veneers if the final label turns out to be out of
+ // range.
+ //
+ // This set stores (rangeIdx, deadline) pairs instead of the actual branch
+ // locations.
+ BranchDeadlineSet<NumShortBranchRanges> branchDeadlines_;
+
+ // When true dumping pools is inhibited.
+ bool canNotPlacePool_;
+
+#ifdef DEBUG
+ // State for validating the 'maxInst' argument to enterNoPool().
+ // The buffer offset when entering the no-pool region.
+ size_t canNotPlacePoolStartOffset_;
+ // The maximum number of word sized instructions declared for the no-pool
+ // region.
+ size_t canNotPlacePoolMaxInst_;
+#endif
+
+ // Instruction to use for alignment fill.
+ const uint32_t alignFillInst_;
+
+ // Insert a number of NOP instructions between each requested instruction at
+ // all locations at which a pool can potentially spill. This is useful for
+ // checking that instruction locations are correctly referenced and/or
+ // followed.
+ const uint32_t nopFillInst_;
+ const unsigned nopFill_;
+
+ // For inhibiting the insertion of fill NOPs in the dynamic context in which
+ // they are being inserted.
+ bool inhibitNops_;
+
+ public:
+ // A unique id within each JitContext, to identify pools in the debug
+ // spew. Set by the MacroAssembler, see getNextAssemblerId().
+ int id;
+
+ private:
+ // The buffer slices are in a double linked list.
+ Slice* getHead() const { return this->head; }
+ Slice* getTail() const { return this->tail; }
+
+ public:
+ // Create an assembler buffer.
+ // Note that this constructor is not allowed to actually allocate memory from
+ // this->lifoAlloc_ because the MacroAssembler constructor has not yet created
+ // an AutoJitContextAlloc.
+ AssemblerBufferWithConstantPools(unsigned guardSize, unsigned headerSize,
+ size_t instBufferAlign, size_t poolMaxOffset,
+ unsigned pcBias, uint32_t alignFillInst,
+ uint32_t nopFillInst, unsigned nopFill = 0)
+ : poolEntryCount(0),
+ guardSize_(guardSize),
+ headerSize_(headerSize),
+ poolMaxOffset_(poolMaxOffset),
+ pcBias_(pcBias),
+ pool_(poolMaxOffset, pcBias, this->lifoAlloc_),
+ instBufferAlign_(instBufferAlign),
+ poolInfo_(this->lifoAlloc_),
+ branchDeadlines_(this->lifoAlloc_),
+ canNotPlacePool_(false),
+#ifdef DEBUG
+ canNotPlacePoolStartOffset_(0),
+ canNotPlacePoolMaxInst_(0),
+#endif
+ alignFillInst_(alignFillInst),
+ nopFillInst_(nopFillInst),
+ nopFill_(nopFill),
+ inhibitNops_(false),
+ id(-1) {
+ }
+
+ // We need to wait until an AutoJitContextAlloc is created by the
+ // MacroAssembler before allocating any space.
+ void initWithAllocator() {
+ // We hand out references to lifoAlloc_ in the constructor.
+ // Check that no allocations were made then.
+ MOZ_ASSERT(this->lifoAlloc_.isEmpty(),
+ "Illegal LIFO allocations before AutoJitContextAlloc");
+ }
+
+ private:
+ size_t sizeExcludingCurrentPool() const {
+ // Return the actual size of the buffer, excluding the current pending
+ // pool.
+ return this->nextOffset().getOffset();
+ }
+
+ public:
+ size_t size() const {
+ // Return the current actual size of the buffer. This is only accurate
+ // if there are no pending pool entries to dump, check.
+ MOZ_ASSERT_IF(!this->oom(), pool_.numEntries() == 0);
+ return sizeExcludingCurrentPool();
+ }
+
+ private:
+ void insertNopFill() {
+ // Insert fill for testing.
+ if (nopFill_ > 0 && !inhibitNops_ && !canNotPlacePool_) {
+ inhibitNops_ = true;
+
+ // Fill using a branch-nop rather than a NOP so this can be
+ // distinguished and skipped.
+ for (size_t i = 0; i < nopFill_; i++) {
+ putInt(nopFillInst_);
+ }
+
+ inhibitNops_ = false;
+ }
+ }
+
+ static const unsigned OOM_FAIL = unsigned(-1);
+ static const unsigned DUMMY_INDEX = unsigned(-2);
+
+ // Check if it is possible to add numInst instructions and numPoolEntries
+ // constant pool entries without needing to flush the current pool.
+ bool hasSpaceForInsts(unsigned numInsts, unsigned numPoolEntries) const {
+ size_t nextOffset = sizeExcludingCurrentPool();
+ // Earliest starting offset for the current pool after adding numInsts.
+ // This is the beginning of the pool entries proper, after inserting a
+ // guard branch + pool header.
+ size_t poolOffset =
+ nextOffset + (numInsts + guardSize_ + headerSize_) * InstSize;
+
+ // Any constant pool loads that would go out of range?
+ if (pool_.checkFull(poolOffset)) {
+ return false;
+ }
+
+ // Any short-range branch that would go out of range?
+ if (!branchDeadlines_.empty()) {
+ size_t deadline = branchDeadlines_.earliestDeadline().getOffset();
+ size_t poolEnd = poolOffset + pool_.getPoolSize() +
+ numPoolEntries * sizeof(PoolAllocUnit);
+
+ // When NumShortBranchRanges > 1, is is possible for branch deadlines to
+ // expire faster than we can insert veneers. Suppose branches are 4 bytes
+ // each, we could have the following deadline set:
+ //
+ // Range 0: 40, 44, 48
+ // Range 1: 44, 48
+ //
+ // It is not good enough to start inserting veneers at the 40 deadline; we
+ // would not be able to create veneers for the second 44 deadline.
+ // Instead, we need to start at 32:
+ //
+ // 32: veneer(40)
+ // 36: veneer(44)
+ // 40: veneer(44)
+ // 44: veneer(48)
+ // 48: veneer(48)
+ //
+ // This is a pretty conservative solution to the problem: If we begin at
+ // the earliest deadline, we can always emit all veneers for the range
+ // that currently has the most pending deadlines. That may not leave room
+ // for veneers for the remaining ranges, so reserve space for those
+ // secondary range veneers assuming the worst case deadlines.
+
+ // Total pending secondary range veneer size.
+ size_t secondaryVeneers = guardSize_ * (branchDeadlines_.size() -
+ branchDeadlines_.maxRangeSize());
+
+ if (deadline < poolEnd + secondaryVeneers) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ unsigned insertEntryForwards(unsigned numInst, unsigned numPoolEntries,
+ uint8_t* inst, uint8_t* data) {
+ // If inserting pool entries then find a new limiter before we do the
+ // range check.
+ if (numPoolEntries) {
+ pool_.updateLimiter(BufferOffset(sizeExcludingCurrentPool()));
+ }
+
+ if (!hasSpaceForInsts(numInst, numPoolEntries)) {
+ if (numPoolEntries) {
+ JitSpew(JitSpew_Pools, "[%d] Inserting pool entry caused a spill", id);
+ } else {
+ JitSpew(JitSpew_Pools, "[%d] Inserting instruction(%zu) caused a spill",
+ id, sizeExcludingCurrentPool());
+ }
+
+ finishPool(numInst * InstSize);
+ if (this->oom()) {
+ return OOM_FAIL;
+ }
+ return insertEntryForwards(numInst, numPoolEntries, inst, data);
+ }
+ if (numPoolEntries) {
+ unsigned result = pool_.insertEntry(numPoolEntries, data,
+ this->nextOffset(), this->lifoAlloc_);
+ if (result == Pool::OOM_FAIL) {
+ this->fail_oom();
+ return OOM_FAIL;
+ }
+ return result;
+ }
+
+ // The pool entry index is returned above when allocating an entry, but
+ // when not allocating an entry a dummy value is returned - it is not
+ // expected to be used by the caller.
+ return DUMMY_INDEX;
+ }
+
+ public:
+ // Get the next buffer offset where an instruction would be inserted.
+ // This may flush the current constant pool before returning nextOffset().
+ BufferOffset nextInstrOffset() {
+ if (!hasSpaceForInsts(/* numInsts= */ 1, /* numPoolEntries= */ 0)) {
+ JitSpew(JitSpew_Pools,
+ "[%d] nextInstrOffset @ %d caused a constant pool spill", id,
+ this->nextOffset().getOffset());
+ finishPool(ShortRangeBranchHysteresis);
+ }
+ return this->nextOffset();
+ }
+
+ MOZ_NEVER_INLINE
+ BufferOffset allocEntry(size_t numInst, unsigned numPoolEntries,
+ uint8_t* inst, uint8_t* data,
+ PoolEntry* pe = nullptr) {
+ // The allocation of pool entries is not supported in a no-pool region,
+ // check.
+ MOZ_ASSERT_IF(numPoolEntries, !canNotPlacePool_);
+
+ if (this->oom()) {
+ return BufferOffset();
+ }
+
+ insertNopFill();
+
+#ifdef JS_JITSPEW
+ if (numPoolEntries && JitSpewEnabled(JitSpew_Pools)) {
+ JitSpew(JitSpew_Pools, "[%d] Inserting %d entries into pool", id,
+ numPoolEntries);
+ JitSpewStart(JitSpew_Pools, "[%d] data is: 0x", id);
+ size_t length = numPoolEntries * sizeof(PoolAllocUnit);
+ for (unsigned idx = 0; idx < length; idx++) {
+ JitSpewCont(JitSpew_Pools, "%02x", data[length - idx - 1]);
+ if (((idx & 3) == 3) && (idx + 1 != length)) {
+ JitSpewCont(JitSpew_Pools, "_");
+ }
+ }
+ JitSpewFin(JitSpew_Pools);
+ }
+#endif
+
+ // Insert the pool value.
+ unsigned index = insertEntryForwards(numInst, numPoolEntries, inst, data);
+ if (this->oom()) {
+ return BufferOffset();
+ }
+
+ // Now to get an instruction to write.
+ PoolEntry retPE;
+ if (numPoolEntries) {
+ JitSpew(JitSpew_Pools, "[%d] Entry has index %u, offset %zu", id, index,
+ sizeExcludingCurrentPool());
+ Asm::InsertIndexIntoTag(inst, index);
+ // Figure out the offset within the pool entries.
+ retPE = PoolEntry(poolEntryCount);
+ poolEntryCount += numPoolEntries;
+ }
+ // Now inst is a valid thing to insert into the instruction stream.
+ if (pe != nullptr) {
+ *pe = retPE;
+ }
+ return this->putBytes(numInst * InstSize, inst);
+ }
+
+ // putInt is the workhorse for the assembler and higher-level buffer
+ // abstractions: it places one instruction into the instruction stream.
+ // Under normal circumstances putInt should just check that the constant
+ // pool does not need to be flushed, that there is space for the single word
+ // of the instruction, and write that word and update the buffer pointer.
+ //
+ // To do better here we need a status variable that handles both nopFill_
+ // and capacity, so that we can quickly know whether to go the slow path.
+ // That could be a variable that has the remaining number of simple
+ // instructions that can be inserted before a more expensive check,
+ // which is set to zero when nopFill_ is set.
+ //
+ // We assume that we don't have to check this->oom() if there is space to
+ // insert a plain instruction; there will always come a later time when it
+ // will be checked anyway.
+
+ MOZ_ALWAYS_INLINE
+ BufferOffset putInt(uint32_t value) {
+ if (nopFill_ ||
+ !hasSpaceForInsts(/* numInsts= */ 1, /* numPoolEntries= */ 0)) {
+ return allocEntry(1, 0, (uint8_t*)&value, nullptr, nullptr);
+ }
+
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ return this->putU32Aligned(value);
+#else
+ return this->AssemblerBuffer<SliceSize, Inst>::putInt(value);
+#endif
+ }
+
+ // Register a short-range branch deadline.
+ //
+ // After inserting a short-range forward branch, call this method to
+ // register the branch 'deadline' which is the last buffer offset that the
+ // branch instruction can reach.
+ //
+ // When the branch is bound to a destination label, call
+ // unregisterBranchDeadline() to stop tracking this branch,
+ //
+ // If the assembled code is about to exceed the registered branch deadline,
+ // and unregisterBranchDeadline() has not yet been called, an
+ // instruction-sized constant pool entry is allocated before the branch
+ // deadline.
+ //
+ // rangeIdx
+ // A number < NumShortBranchRanges identifying the range of the branch.
+ //
+ // deadline
+ // The highest buffer offset the the short-range branch can reach
+ // directly.
+ //
+ void registerBranchDeadline(unsigned rangeIdx, BufferOffset deadline) {
+ if (!this->oom() && !branchDeadlines_.addDeadline(rangeIdx, deadline)) {
+ this->fail_oom();
+ }
+ }
+
+ // Un-register a short-range branch deadline.
+ //
+ // When a short-range branch has been successfully bound to its destination
+ // label, call this function to stop traching the branch.
+ //
+ // The (rangeIdx, deadline) pair must be previously registered.
+ //
+ void unregisterBranchDeadline(unsigned rangeIdx, BufferOffset deadline) {
+ if (!this->oom()) {
+ branchDeadlines_.removeDeadline(rangeIdx, deadline);
+ }
+ }
+
+ private:
+ // Are any short-range branches about to expire?
+ bool hasExpirableShortRangeBranches(size_t reservedBytes) const {
+ if (branchDeadlines_.empty()) {
+ return false;
+ }
+
+ // Include branches that would expire in the next N bytes. The reservedBytes
+ // argument avoids the needless creation of many tiny constant pools.
+ //
+ // As the reservedBytes could be of any sizes such as SIZE_MAX, in the case
+ // of flushPool, we have to check for overflow when comparing the deadline
+ // with our expected reserved bytes.
+ size_t deadline = branchDeadlines_.earliestDeadline().getOffset();
+ using CheckedSize = mozilla::CheckedInt<size_t>;
+ CheckedSize current(this->nextOffset().getOffset());
+ CheckedSize poolFreeSpace(reservedBytes);
+ auto future = current + poolFreeSpace;
+ return !future.isValid() || deadline < future.value();
+ }
+
+ bool isPoolEmptyFor(size_t bytes) const {
+ return pool_.numEntries() == 0 && !hasExpirableShortRangeBranches(bytes);
+ }
+ void finishPool(size_t reservedBytes) {
+ JitSpew(JitSpew_Pools,
+ "[%d] Attempting to finish pool %zu with %u entries.", id,
+ poolInfo_.length(), pool_.numEntries());
+
+ if (reservedBytes < ShortRangeBranchHysteresis) {
+ reservedBytes = ShortRangeBranchHysteresis;
+ }
+
+ if (isPoolEmptyFor(reservedBytes)) {
+ // If there is no data in the pool being dumped, don't dump anything.
+ JitSpew(JitSpew_Pools, "[%d] Aborting because the pool is empty", id);
+ return;
+ }
+
+ // Should not be placing a pool in a no-pool region, check.
+ MOZ_ASSERT(!canNotPlacePool_);
+
+ // Dump the pool with a guard branch around the pool.
+ BufferOffset guard = this->putBytes(guardSize_ * InstSize, nullptr);
+ BufferOffset header = this->putBytes(headerSize_ * InstSize, nullptr);
+ BufferOffset data = this->putBytesLarge(pool_.getPoolSize(),
+ (const uint8_t*)pool_.poolData());
+ if (this->oom()) {
+ return;
+ }
+
+ // Now generate branch veneers for any short-range branches that are
+ // about to expire.
+ while (hasExpirableShortRangeBranches(reservedBytes)) {
+ unsigned rangeIdx = branchDeadlines_.earliestDeadlineRange();
+ BufferOffset deadline = branchDeadlines_.earliestDeadline();
+
+ // Stop tracking this branch. The Asm callback below may register
+ // new branches to track.
+ branchDeadlines_.removeDeadline(rangeIdx, deadline);
+
+ // Make room for the veneer. Same as a pool guard branch.
+ BufferOffset veneer = this->putBytes(guardSize_ * InstSize, nullptr);
+ if (this->oom()) {
+ return;
+ }
+
+ // Fix the branch so it targets the veneer.
+ // The Asm class knows how to find the original branch given the
+ // (rangeIdx, deadline) pair.
+ Asm::PatchShortRangeBranchToVeneer(this, rangeIdx, deadline, veneer);
+ }
+
+ // We only reserved space for the guard branch and pool header.
+ // Fill them in.
+ BufferOffset afterPool = this->nextOffset();
+ Asm::WritePoolGuard(guard, this->getInst(guard), afterPool);
+ Asm::WritePoolHeader((uint8_t*)this->getInst(header), &pool_, false);
+
+ // With the pool's final position determined it is now possible to patch
+ // the instructions that reference entries in this pool, and this is
+ // done incrementally as each pool is finished.
+ size_t poolOffset = data.getOffset();
+
+ unsigned idx = 0;
+ for (BufferOffset* iter = pool_.loadOffsets.begin();
+ iter != pool_.loadOffsets.end(); ++iter, ++idx) {
+ // All entries should be before the pool.
+ MOZ_ASSERT(iter->getOffset() < guard.getOffset());
+
+ // Everything here is known so we can safely do the necessary
+ // substitutions.
+ Inst* inst = this->getInst(*iter);
+ size_t codeOffset = poolOffset - iter->getOffset();
+
+ // That is, PatchConstantPoolLoad wants to be handed the address of
+ // the pool entry that is being loaded. We need to do a non-trivial
+ // amount of math here, since the pool that we've made does not
+ // actually reside there in memory.
+ JitSpew(JitSpew_Pools, "[%d] Fixing entry %d offset to %zu", id, idx,
+ codeOffset);
+ Asm::PatchConstantPoolLoad(inst, (uint8_t*)inst + codeOffset);
+ }
+
+ // Record the pool info.
+ unsigned firstEntry = poolEntryCount - pool_.numEntries();
+ if (!poolInfo_.append(PoolInfo(firstEntry, data))) {
+ this->fail_oom();
+ return;
+ }
+
+ // Reset everything to the state that it was in when we started.
+ pool_.reset();
+ }
+
+ public:
+ void flushPool() {
+ if (this->oom()) {
+ return;
+ }
+ JitSpew(JitSpew_Pools, "[%d] Requesting a pool flush", id);
+ finishPool(SIZE_MAX);
+ }
+
+ void enterNoPool(size_t maxInst) {
+ if (this->oom()) {
+ return;
+ }
+ // Don't allow re-entry.
+ MOZ_ASSERT(!canNotPlacePool_);
+ insertNopFill();
+
+ // Check if the pool will spill by adding maxInst instructions, and if
+ // so then finish the pool before entering the no-pool region. It is
+ // assumed that no pool entries are allocated in a no-pool region and
+ // this is asserted when allocating entries.
+ if (!hasSpaceForInsts(maxInst, 0)) {
+ JitSpew(JitSpew_Pools, "[%d] No-Pool instruction(%zu) caused a spill.",
+ id, sizeExcludingCurrentPool());
+ finishPool(maxInst * InstSize);
+ MOZ_ASSERT(hasSpaceForInsts(maxInst, 0));
+ }
+
+#ifdef DEBUG
+ // Record the buffer position to allow validating maxInst when leaving
+ // the region.
+ canNotPlacePoolStartOffset_ = this->nextOffset().getOffset();
+ canNotPlacePoolMaxInst_ = maxInst;
+#endif
+
+ canNotPlacePool_ = true;
+ }
+
+ void leaveNoPool() {
+ if (this->oom()) {
+ canNotPlacePool_ = false;
+ return;
+ }
+ MOZ_ASSERT(canNotPlacePool_);
+ canNotPlacePool_ = false;
+
+ // Validate the maxInst argument supplied to enterNoPool().
+ MOZ_ASSERT(this->nextOffset().getOffset() - canNotPlacePoolStartOffset_ <=
+ canNotPlacePoolMaxInst_ * InstSize);
+ }
+
+ void enterNoNops() {
+ MOZ_ASSERT(!inhibitNops_);
+ inhibitNops_ = true;
+ }
+ void leaveNoNops() {
+ MOZ_ASSERT(inhibitNops_);
+ inhibitNops_ = false;
+ }
+ void assertNoPoolAndNoNops() {
+ MOZ_ASSERT(inhibitNops_);
+ MOZ_ASSERT_IF(!this->oom(), isPoolEmptyFor(InstSize) || canNotPlacePool_);
+ }
+
+ void align(unsigned alignment) { align(alignment, alignFillInst_); }
+
+ void align(unsigned alignment, uint32_t pattern) {
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(alignment));
+ MOZ_ASSERT(alignment >= InstSize);
+
+ // A pool many need to be dumped at this point, so insert NOP fill here.
+ insertNopFill();
+
+ // Check if the code position can be aligned without dumping a pool.
+ unsigned requiredFill = sizeExcludingCurrentPool() & (alignment - 1);
+ if (requiredFill == 0) {
+ return;
+ }
+ requiredFill = alignment - requiredFill;
+
+ // Add an InstSize because it is probably not useful for a pool to be
+ // dumped at the aligned code position.
+ if (!hasSpaceForInsts(requiredFill / InstSize + 1, 0)) {
+ // Alignment would cause a pool dump, so dump the pool now.
+ JitSpew(JitSpew_Pools, "[%d] Alignment of %d at %zu caused a spill.", id,
+ alignment, sizeExcludingCurrentPool());
+ finishPool(requiredFill);
+ }
+
+ bool prevInhibitNops = inhibitNops_;
+ inhibitNops_ = true;
+ while ((sizeExcludingCurrentPool() & (alignment - 1)) && !this->oom()) {
+ putInt(pattern);
+ }
+ inhibitNops_ = prevInhibitNops;
+ }
+
+ public:
+ void executableCopy(uint8_t* dest) {
+ if (this->oom()) {
+ return;
+ }
+ // The pools should have all been flushed, check.
+ MOZ_ASSERT(pool_.numEntries() == 0);
+ for (Slice* cur = getHead(); cur != nullptr; cur = cur->getNext()) {
+ memcpy(dest, &cur->instructions[0], cur->length());
+ dest += cur->length();
+ }
+ }
+
+ bool appendRawCode(const uint8_t* code, size_t numBytes) {
+ if (this->oom()) {
+ return false;
+ }
+ // The pools should have all been flushed, check.
+ MOZ_ASSERT(pool_.numEntries() == 0);
+ while (numBytes > SliceSize) {
+ this->putBytes(SliceSize, code);
+ numBytes -= SliceSize;
+ code += SliceSize;
+ }
+ this->putBytes(numBytes, code);
+ return !this->oom();
+ }
+
+ public:
+ size_t poolEntryOffset(PoolEntry pe) const {
+ MOZ_ASSERT(pe.index() < poolEntryCount - pool_.numEntries(),
+ "Invalid pool entry, or not flushed yet.");
+ // Find the pool containing pe.index().
+ // The array is sorted, so we can use a binary search.
+ auto b = poolInfo_.begin(), e = poolInfo_.end();
+ // A note on asymmetric types in the upper_bound comparator:
+ // http://permalink.gmane.org/gmane.comp.compilers.clang.devel/10101
+ auto i = std::upper_bound(b, e, pe.index(),
+ [](size_t value, const PoolInfo& entry) {
+ return value < entry.firstEntryIndex;
+ });
+ // Since upper_bound finds the first pool greater than pe,
+ // we want the previous one which is the last one less than or equal.
+ MOZ_ASSERT(i != b, "PoolInfo not sorted or empty?");
+ --i;
+ // The i iterator now points to the pool containing pe.index.
+ MOZ_ASSERT(i->firstEntryIndex <= pe.index() &&
+ (i + 1 == e || (i + 1)->firstEntryIndex > pe.index()));
+ // Compute the byte offset into the pool.
+ unsigned relativeIndex = pe.index() - i->firstEntryIndex;
+ return i->offset.getOffset() + relativeIndex * sizeof(PoolAllocUnit);
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif // jit_shared_IonAssemblerBufferWithConstantPools_h
diff --git a/js/src/jit/shared/LIR-shared.h b/js/src/jit/shared/LIR-shared.h
new file mode 100644
index 0000000000..54e6feb7c0
--- /dev/null
+++ b/js/src/jit/shared/LIR-shared.h
@@ -0,0 +1,8878 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_LIR_shared_h
+#define jit_shared_LIR_shared_h
+
+#include "jit/AtomicOp.h"
+#include "jit/shared/Assembler-shared.h"
+#include "util/Memory.h"
+
+// This file declares LIR instructions that are common to every platform.
+
+namespace js {
+namespace jit {
+
+class LBox : public LInstructionHelper<BOX_PIECES, 1, 0> {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(Box);
+
+ LBox(const LAllocation& payload, MIRType type)
+ : LInstructionHelper(classOpcode), type_(type) {
+ setOperand(0, payload);
+ }
+
+ MIRType type() const { return type_; }
+ const char* extraName() const { return StringFromMIRType(type_); }
+};
+
+template <size_t Temps, size_t ExtraUses = 0>
+class LBinaryMath : public LInstructionHelper<1, 2 + ExtraUses, Temps> {
+ protected:
+ explicit LBinaryMath(LNode::Opcode opcode)
+ : LInstructionHelper<1, 2 + ExtraUses, Temps>(opcode) {}
+
+ public:
+ const LAllocation* lhs() { return this->getOperand(0); }
+ const LAllocation* rhs() { return this->getOperand(1); }
+};
+
+template <size_t Temps, size_t ExtraUses = 0>
+class LUnaryMath : public LInstructionHelper<1, 1 + ExtraUses, Temps> {
+ protected:
+ explicit LUnaryMath(LNode::Opcode opcode)
+ : LInstructionHelper<1, 1 + ExtraUses, Temps>(opcode) {}
+
+ public:
+ const LAllocation* input() { return this->getOperand(0); }
+};
+
+// An LOsiPoint captures a snapshot after a call and ensures enough space to
+// patch in a call to the invalidation mechanism.
+//
+// Note: LSafepoints are 1:1 with LOsiPoints, so it holds a reference to the
+// corresponding LSafepoint to inform it of the LOsiPoint's masm offset when it
+// gets GC'd.
+class LOsiPoint : public LInstructionHelper<0, 0, 0> {
+ LSafepoint* safepoint_;
+
+ public:
+ LOsiPoint(LSafepoint* safepoint, LSnapshot* snapshot)
+ : LInstructionHelper(classOpcode), safepoint_(safepoint) {
+ MOZ_ASSERT(safepoint && snapshot);
+ assignSnapshot(snapshot);
+ }
+
+ LSafepoint* associatedSafepoint() { return safepoint_; }
+
+ LIR_HEADER(OsiPoint)
+};
+
+class LMove {
+ LAllocation from_;
+ LAllocation to_;
+ LDefinition::Type type_;
+
+ public:
+ LMove(LAllocation from, LAllocation to, LDefinition::Type type)
+ : from_(from), to_(to), type_(type) {}
+
+ LAllocation from() const { return from_; }
+ LAllocation to() const { return to_; }
+ LDefinition::Type type() const { return type_; }
+};
+
+class LMoveGroup : public LInstructionHelper<0, 0, 0> {
+ js::Vector<LMove, 2, JitAllocPolicy> moves_;
+
+#ifdef JS_CODEGEN_X86
+ // Optional general register available for use when executing moves.
+ LAllocation scratchRegister_;
+#endif
+
+ explicit LMoveGroup(TempAllocator& alloc)
+ : LInstructionHelper(classOpcode), moves_(alloc) {}
+
+ public:
+ LIR_HEADER(MoveGroup)
+
+ static LMoveGroup* New(TempAllocator& alloc) {
+ return new (alloc) LMoveGroup(alloc);
+ }
+
+ void printOperands(GenericPrinter& out);
+
+ // Add a move which takes place simultaneously with all others in the group.
+ bool add(LAllocation from, LAllocation to, LDefinition::Type type);
+
+ // Add a move which takes place after existing moves in the group.
+ bool addAfter(LAllocation from, LAllocation to, LDefinition::Type type);
+
+ size_t numMoves() const { return moves_.length(); }
+ const LMove& getMove(size_t i) const { return moves_[i]; }
+
+#ifdef JS_CODEGEN_X86
+ void setScratchRegister(Register reg) { scratchRegister_ = LGeneralReg(reg); }
+ LAllocation maybeScratchRegister() { return scratchRegister_; }
+#endif
+
+ bool uses(Register reg) {
+ for (size_t i = 0; i < numMoves(); i++) {
+ LMove move = getMove(i);
+ if (move.from() == LGeneralReg(reg) || move.to() == LGeneralReg(reg)) {
+ return true;
+ }
+ }
+ return false;
+ }
+};
+
+// Constant 32-bit integer.
+class LInteger : public LInstructionHelper<1, 0, 0> {
+ int32_t i32_;
+
+ public:
+ LIR_HEADER(Integer)
+
+ explicit LInteger(int32_t i32) : LInstructionHelper(classOpcode), i32_(i32) {}
+
+ int32_t getValue() const { return i32_; }
+};
+
+// Constant 64-bit integer.
+class LInteger64 : public LInstructionHelper<INT64_PIECES, 0, 0> {
+ int64_t i64_;
+
+ public:
+ LIR_HEADER(Integer64)
+
+ explicit LInteger64(int64_t i64)
+ : LInstructionHelper(classOpcode), i64_(i64) {}
+
+ int64_t getValue() const { return i64_; }
+};
+
+// Constant pointer.
+class LPointer : public LInstructionHelper<1, 0, 0> {
+ gc::Cell* ptr_;
+
+ public:
+ LIR_HEADER(Pointer)
+
+ explicit LPointer(gc::Cell* ptr)
+ : LInstructionHelper(classOpcode), ptr_(ptr) {}
+
+ gc::Cell* gcptr() const { return ptr_; }
+};
+
+// Constant double.
+class LDouble : public LInstructionHelper<1, 0, 0> {
+ double d_;
+
+ public:
+ LIR_HEADER(Double);
+
+ explicit LDouble(double d) : LInstructionHelper(classOpcode), d_(d) {}
+
+ const double& getDouble() const { return d_; }
+};
+
+// Constant float32.
+class LFloat32 : public LInstructionHelper<1, 0, 0> {
+ float f_;
+
+ public:
+ LIR_HEADER(Float32);
+
+ explicit LFloat32(float f) : LInstructionHelper(classOpcode), f_(f) {}
+
+ const float& getFloat() const { return f_; }
+};
+
+// A constant Value.
+class LValue : public LInstructionHelper<BOX_PIECES, 0, 0> {
+ Value v_;
+
+ public:
+ LIR_HEADER(Value)
+
+ explicit LValue(const Value& v) : LInstructionHelper(classOpcode), v_(v) {}
+
+ Value value() const { return v_; }
+};
+
+class LNurseryObject : public LInstructionHelper<1, 0, 0> {
+ public:
+ LIR_HEADER(NurseryObject);
+
+ LNurseryObject() : LInstructionHelper(classOpcode) {}
+
+ MNurseryObject* mir() const { return mir_->toNurseryObject(); }
+};
+
+// Formal argument for a function, returning a box. Formal arguments are
+// initially read from the stack.
+class LParameter : public LInstructionHelper<BOX_PIECES, 0, 0> {
+ public:
+ LIR_HEADER(Parameter)
+
+ LParameter() : LInstructionHelper(classOpcode) {}
+};
+
+// Stack offset for a word-sized immutable input value to a frame.
+class LCallee : public LInstructionHelper<1, 0, 0> {
+ public:
+ LIR_HEADER(Callee)
+
+ LCallee() : LInstructionHelper(classOpcode) {}
+};
+
+class LIsConstructing : public LInstructionHelper<1, 0, 0> {
+ public:
+ LIR_HEADER(IsConstructing)
+
+ LIsConstructing() : LInstructionHelper(classOpcode) {}
+};
+
+// Base class for control instructions (goto, branch, etc.)
+template <size_t Succs, size_t Operands, size_t Temps>
+class LControlInstructionHelper
+ : public LInstructionHelper<0, Operands, Temps> {
+ mozilla::Array<MBasicBlock*, Succs> successors_;
+
+ protected:
+ explicit LControlInstructionHelper(LNode::Opcode opcode)
+ : LInstructionHelper<0, Operands, Temps>(opcode) {}
+
+ public:
+ size_t numSuccessors() const { return Succs; }
+ MBasicBlock* getSuccessor(size_t i) const { return successors_[i]; }
+
+ void setSuccessor(size_t i, MBasicBlock* successor) {
+ successors_[i] = successor;
+ }
+};
+
+// Jumps to the start of a basic block.
+class LGoto : public LControlInstructionHelper<1, 0, 0> {
+ public:
+ LIR_HEADER(Goto)
+
+ explicit LGoto(MBasicBlock* block) : LControlInstructionHelper(classOpcode) {
+ setSuccessor(0, block);
+ }
+
+ MBasicBlock* target() const { return getSuccessor(0); }
+};
+
+class LNewArray : public LInstructionHelper<1, 0, 1> {
+ public:
+ LIR_HEADER(NewArray)
+
+ explicit LNewArray(const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setTemp(0, temp);
+ }
+
+ const char* extraName() const {
+ return mir()->isVMCall() ? "VMCall" : nullptr;
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+
+ MNewArray* mir() const { return mir_->toNewArray(); }
+};
+
+class LNewArrayDynamicLength : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(NewArrayDynamicLength)
+
+ explicit LNewArrayDynamicLength(const LAllocation& length,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, length);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* length() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+
+ MNewArrayDynamicLength* mir() const {
+ return mir_->toNewArrayDynamicLength();
+ }
+};
+
+class LNewIterator : public LInstructionHelper<1, 0, 1> {
+ public:
+ LIR_HEADER(NewIterator)
+
+ explicit LNewIterator(const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+
+ MNewIterator* mir() const { return mir_->toNewIterator(); }
+};
+
+class LNewTypedArray : public LInstructionHelper<1, 0, 2> {
+ public:
+ LIR_HEADER(NewTypedArray)
+
+ LNewTypedArray(const LDefinition& temp1, const LDefinition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+
+ const LDefinition* temp2() { return getTemp(1); }
+
+ MNewTypedArray* mir() const { return mir_->toNewTypedArray(); }
+};
+
+class LNewTypedArrayDynamicLength : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(NewTypedArrayDynamicLength)
+
+ LNewTypedArrayDynamicLength(const LAllocation& length,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, length);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* length() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+
+ MNewTypedArrayDynamicLength* mir() const {
+ return mir_->toNewTypedArrayDynamicLength();
+ }
+};
+
+class LNewTypedArrayFromArray : public LCallInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(NewTypedArrayFromArray)
+
+ explicit LNewTypedArrayFromArray(const LAllocation& array)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, array);
+ }
+
+ const LAllocation* array() { return getOperand(0); }
+
+ MNewTypedArrayFromArray* mir() const {
+ return mir_->toNewTypedArrayFromArray();
+ }
+};
+
+class LNewTypedArrayFromArrayBuffer
+ : public LCallInstructionHelper<1, 1 + 2 * BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(NewTypedArrayFromArrayBuffer)
+
+ LNewTypedArrayFromArrayBuffer(const LAllocation& arrayBuffer,
+ const LBoxAllocation& byteOffset,
+ const LBoxAllocation& length)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, arrayBuffer);
+ setBoxOperand(ByteOffsetIndex, byteOffset);
+ setBoxOperand(LengthIndex, length);
+ }
+
+ static const size_t ByteOffsetIndex = 1;
+ static const size_t LengthIndex = 1 + BOX_PIECES;
+
+ const LAllocation* arrayBuffer() { return getOperand(0); }
+
+ MNewTypedArrayFromArrayBuffer* mir() const {
+ return mir_->toNewTypedArrayFromArrayBuffer();
+ }
+};
+
+class LNewObject : public LInstructionHelper<1, 0, 1> {
+ public:
+ LIR_HEADER(NewObject)
+
+ explicit LNewObject(const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setTemp(0, temp);
+ }
+
+ const char* extraName() const {
+ return mir()->isVMCall() ? "VMCall" : nullptr;
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+
+ MNewObject* mir() const { return mir_->toNewObject(); }
+};
+
+// Allocates a new NamedLambdaObject.
+//
+// This instruction generates two possible instruction sets:
+// (1) An inline allocation of the call object is attempted.
+// (2) Otherwise, a callVM create a new object.
+//
+class LNewNamedLambdaObject : public LInstructionHelper<1, 0, 1> {
+ public:
+ LIR_HEADER(NewNamedLambdaObject);
+
+ explicit LNewNamedLambdaObject(const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+
+ MNewNamedLambdaObject* mir() const { return mir_->toNewNamedLambdaObject(); }
+};
+
+// Allocates a new CallObject.
+//
+// This instruction generates two possible instruction sets:
+// (1) If the call object is extensible, this is a callVM to create the
+// call object.
+// (2) Otherwise, an inline allocation of the call object is attempted.
+//
+class LNewCallObject : public LInstructionHelper<1, 0, 1> {
+ public:
+ LIR_HEADER(NewCallObject)
+
+ explicit LNewCallObject(const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+
+ MNewCallObject* mir() const { return mir_->toNewCallObject(); }
+};
+
+class LNewStringObject : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(NewStringObject)
+
+ LNewStringObject(const LAllocation& input, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* input() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+ MNewStringObject* mir() const { return mir_->toNewStringObject(); }
+};
+
+class LInitElemGetterSetter
+ : public LCallInstructionHelper<0, 2 + BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(InitElemGetterSetter)
+
+ LInitElemGetterSetter(const LAllocation& object, const LBoxAllocation& id,
+ const LAllocation& value)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setOperand(1, value);
+ setBoxOperand(IdIndex, id);
+ }
+
+ static const size_t IdIndex = 2;
+
+ const LAllocation* object() { return getOperand(0); }
+ const LAllocation* value() { return getOperand(1); }
+ MInitElemGetterSetter* mir() const { return mir_->toInitElemGetterSetter(); }
+};
+
+// Takes in an Object and a Value.
+class LMutateProto : public LCallInstructionHelper<0, 1 + BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(MutateProto)
+
+ LMutateProto(const LAllocation& object, const LBoxAllocation& value)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setBoxOperand(ValueIndex, value);
+ }
+
+ static const size_t ValueIndex = 1;
+
+ const LAllocation* getObject() { return getOperand(0); }
+ const LAllocation* getValue() { return getOperand(1); }
+};
+
+class LInitPropGetterSetter : public LCallInstructionHelper<0, 2, 0> {
+ public:
+ LIR_HEADER(InitPropGetterSetter)
+
+ LInitPropGetterSetter(const LAllocation& object, const LAllocation& value)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setOperand(1, value);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ const LAllocation* value() { return getOperand(1); }
+
+ MInitPropGetterSetter* mir() const { return mir_->toInitPropGetterSetter(); }
+};
+
+class LCheckOverRecursed : public LInstructionHelper<0, 0, 0> {
+ public:
+ LIR_HEADER(CheckOverRecursed)
+
+ LCheckOverRecursed() : LInstructionHelper(classOpcode) {}
+
+ MCheckOverRecursed* mir() const { return mir_->toCheckOverRecursed(); }
+};
+
+class LWasmTrap : public LInstructionHelper<0, 0, 0> {
+ public:
+ LIR_HEADER(WasmTrap);
+
+ LWasmTrap() : LInstructionHelper(classOpcode) {}
+
+ const MWasmTrap* mir() const { return mir_->toWasmTrap(); }
+};
+
+template <size_t Defs, size_t Ops>
+class LWasmReinterpretBase : public LInstructionHelper<Defs, Ops, 0> {
+ typedef LInstructionHelper<Defs, Ops, 0> Base;
+
+ protected:
+ explicit LWasmReinterpretBase(LNode::Opcode opcode) : Base(opcode) {}
+
+ public:
+ const LAllocation* input() { return Base::getOperand(0); }
+ MWasmReinterpret* mir() const { return Base::mir_->toWasmReinterpret(); }
+};
+
+class LWasmReinterpret : public LWasmReinterpretBase<1, 1> {
+ public:
+ LIR_HEADER(WasmReinterpret);
+ explicit LWasmReinterpret(const LAllocation& input)
+ : LWasmReinterpretBase(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+class LWasmReinterpretFromI64 : public LWasmReinterpretBase<1, INT64_PIECES> {
+ public:
+ LIR_HEADER(WasmReinterpretFromI64);
+ explicit LWasmReinterpretFromI64(const LInt64Allocation& input)
+ : LWasmReinterpretBase(classOpcode) {
+ setInt64Operand(0, input);
+ }
+};
+
+class LWasmReinterpretToI64 : public LWasmReinterpretBase<INT64_PIECES, 1> {
+ public:
+ LIR_HEADER(WasmReinterpretToI64);
+ explicit LWasmReinterpretToI64(const LAllocation& input)
+ : LWasmReinterpretBase(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+namespace details {
+template <size_t Defs, size_t Ops, size_t Temps>
+class RotateBase : public LInstructionHelper<Defs, Ops, Temps> {
+ typedef LInstructionHelper<Defs, Ops, Temps> Base;
+
+ protected:
+ explicit RotateBase(LNode::Opcode opcode) : Base(opcode) {}
+
+ public:
+ MRotate* mir() { return Base::mir_->toRotate(); }
+};
+} // namespace details
+
+class LRotate : public details::RotateBase<1, 2, 0> {
+ public:
+ LIR_HEADER(Rotate);
+
+ LRotate() : RotateBase(classOpcode) {}
+
+ const LAllocation* input() { return getOperand(0); }
+ LAllocation* count() { return getOperand(1); }
+};
+
+class LRotateI64
+ : public details::RotateBase<INT64_PIECES, INT64_PIECES + 1, 1> {
+ public:
+ LIR_HEADER(RotateI64);
+
+ LRotateI64() : RotateBase(classOpcode) {
+ setTemp(0, LDefinition::BogusTemp());
+ }
+
+ static const size_t Input = 0;
+ static const size_t Count = INT64_PIECES;
+
+ const LInt64Allocation input() { return getInt64Operand(Input); }
+ const LDefinition* temp() { return getTemp(0); }
+ LAllocation* count() { return getOperand(Count); }
+};
+
+class LInterruptCheck : public LInstructionHelper<0, 0, 0> {
+ public:
+ LIR_HEADER(InterruptCheck)
+
+ LInterruptCheck() : LInstructionHelper(classOpcode) {}
+ MInterruptCheck* mir() const { return mir_->toInterruptCheck(); }
+};
+
+class LWasmInterruptCheck : public LInstructionHelper<0, 1, 0> {
+ public:
+ LIR_HEADER(WasmInterruptCheck)
+
+ explicit LWasmInterruptCheck(const LAllocation& tlsData)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, tlsData);
+ }
+ MWasmInterruptCheck* mir() const { return mir_->toWasmInterruptCheck(); }
+ const LAllocation* tlsPtr() { return getOperand(0); }
+};
+
+class LTypeOfV : public LInstructionHelper<1, BOX_PIECES, 1> {
+ public:
+ LIR_HEADER(TypeOfV)
+
+ LTypeOfV(const LBoxAllocation& input, const LDefinition& tempToUnbox)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Input, input);
+ setTemp(0, tempToUnbox);
+ }
+
+ static const size_t Input = 0;
+
+ const LDefinition* tempToUnbox() { return getTemp(0); }
+
+ MTypeOf* mir() const { return mir_->toTypeOf(); }
+};
+
+class LTypeOfO : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(TypeOfO)
+
+ explicit LTypeOfO(const LAllocation& obj) : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+
+ MTypeOf* mir() const { return mir_->toTypeOf(); }
+};
+
+class LToAsyncIter : public LCallInstructionHelper<1, 1 + BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(ToAsyncIter)
+
+ explicit LToAsyncIter(const LAllocation& iterator,
+ const LBoxAllocation& nextMethod)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, iterator);
+ setBoxOperand(NextMethodIndex, nextMethod);
+ }
+
+ static const size_t NextMethodIndex = 1;
+
+ const LAllocation* iterator() { return getOperand(0); }
+};
+
+class LToPropertyKeyCache
+ : public LInstructionHelper<BOX_PIECES, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(ToPropertyKeyCache)
+
+ explicit LToPropertyKeyCache(const LBoxAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Input, input);
+ }
+
+ const MToPropertyKeyCache* mir() const {
+ return mir_->toToPropertyKeyCache();
+ }
+
+ const LAllocation* input() { return getOperand(Input); }
+
+ static const size_t Input = 0;
+};
+
+// Allocate an object for |new| on the caller-side,
+// when there is no templateObject or prototype known
+class LCreateThis : public LCallInstructionHelper<BOX_PIECES, 2, 0> {
+ public:
+ LIR_HEADER(CreateThis)
+
+ LCreateThis(const LAllocation& callee, const LAllocation& newTarget)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, callee);
+ setOperand(1, newTarget);
+ }
+
+ const LAllocation* getCallee() { return getOperand(0); }
+ const LAllocation* getNewTarget() { return getOperand(1); }
+
+ MCreateThis* mir() const { return mir_->toCreateThis(); }
+};
+
+// Allocate an object for |new| on the caller-side.
+// Always performs object initialization with a fast path.
+class LCreateThisWithTemplate : public LInstructionHelper<1, 0, 1> {
+ public:
+ LIR_HEADER(CreateThisWithTemplate)
+
+ explicit LCreateThisWithTemplate(const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setTemp(0, temp);
+ }
+
+ MCreateThisWithTemplate* mir() const {
+ return mir_->toCreateThisWithTemplate();
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// Allocate a new arguments object for the frame.
+class LCreateArgumentsObject : public LCallInstructionHelper<1, 1, 3> {
+ public:
+ LIR_HEADER(CreateArgumentsObject)
+
+ LCreateArgumentsObject(const LAllocation& callObj, const LDefinition& temp0,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, callObj);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ setTemp(2, temp2);
+ }
+
+ const LDefinition* temp0() { return getTemp(0); }
+ const LDefinition* temp1() { return getTemp(1); }
+ const LDefinition* temp2() { return getTemp(2); }
+
+ const LAllocation* getCallObject() { return getOperand(0); }
+
+ MCreateArgumentsObject* mir() const {
+ return mir_->toCreateArgumentsObject();
+ }
+};
+
+// Get argument from arguments object.
+class LGetArgumentsObjectArg : public LInstructionHelper<BOX_PIECES, 1, 1> {
+ public:
+ LIR_HEADER(GetArgumentsObjectArg)
+
+ LGetArgumentsObjectArg(const LAllocation& argsObj, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, argsObj);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* getArgsObject() { return getOperand(0); }
+
+ MGetArgumentsObjectArg* mir() const {
+ return mir_->toGetArgumentsObjectArg();
+ }
+};
+
+// Set argument on arguments object.
+class LSetArgumentsObjectArg : public LInstructionHelper<0, 1 + BOX_PIECES, 1> {
+ public:
+ LIR_HEADER(SetArgumentsObjectArg)
+
+ LSetArgumentsObjectArg(const LAllocation& argsObj,
+ const LBoxAllocation& value, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, argsObj);
+ setBoxOperand(ValueIndex, value);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* getArgsObject() { return getOperand(0); }
+
+ MSetArgumentsObjectArg* mir() const {
+ return mir_->toSetArgumentsObjectArg();
+ }
+
+ static const size_t ValueIndex = 1;
+};
+
+// Load an element from an arguments object.
+class LLoadArgumentsObjectArg : public LInstructionHelper<BOX_PIECES, 2, 1> {
+ public:
+ LIR_HEADER(LoadArgumentsObjectArg)
+
+ LLoadArgumentsObjectArg(const LAllocation& argsObj, const LAllocation& index,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, argsObj);
+ setOperand(1, index);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* getArgsObject() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LDefinition* temp() { return this->getTemp(0); }
+};
+
+// Return |arguments.length| unless it has been overridden.
+class LArgumentsObjectLength : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(ArgumentsObjectLength)
+
+ explicit LArgumentsObjectLength(const LAllocation& argsObj)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, argsObj);
+ }
+
+ const LAllocation* getArgsObject() { return getOperand(0); }
+};
+
+// Guard that the arguments object has no overridden iterator.
+class LGuardArgumentsObjectNotOverriddenIterator
+ : public LInstructionHelper<0, 1, 1> {
+ public:
+ LIR_HEADER(GuardArgumentsObjectNotOverriddenIterator)
+
+ explicit LGuardArgumentsObjectNotOverriddenIterator(
+ const LAllocation& argsObj, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, argsObj);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* getArgsObject() { return getOperand(0); }
+ const LDefinition* temp() { return this->getTemp(0); }
+};
+
+// If the Value is an Object, return unbox(Value).
+// Otherwise, return the other Object.
+class LReturnFromCtor : public LInstructionHelper<1, BOX_PIECES + 1, 0> {
+ public:
+ LIR_HEADER(ReturnFromCtor)
+
+ LReturnFromCtor(const LBoxAllocation& value, const LAllocation& object)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(ValueIndex, value);
+ setOperand(ObjectIndex, object);
+ }
+
+ const LAllocation* getObject() { return getOperand(ObjectIndex); }
+
+ static const size_t ValueIndex = 0;
+ static const size_t ObjectIndex = BOX_PIECES;
+};
+
+class LBoxNonStrictThis : public LInstructionHelper<1, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(BoxNonStrictThis)
+
+ static const size_t ValueIndex = 0;
+
+ explicit LBoxNonStrictThis(const LBoxAllocation& value)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(ValueIndex, value);
+ }
+
+ MBoxNonStrictThis* mir() const { return mir_->toBoxNonStrictThis(); }
+};
+
+class LImplicitThis : public LCallInstructionHelper<BOX_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(ImplicitThis)
+
+ explicit LImplicitThis(const LAllocation& env)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, env);
+ }
+
+ const LAllocation* env() { return getOperand(0); }
+
+ MImplicitThis* mir() const { return mir_->toImplicitThis(); }
+};
+
+// Writes a typed argument for a function call to the frame's argument vector.
+class LStackArgT : public LInstructionHelper<0, 1, 0> {
+ uint32_t argslot_; // Index into frame-scope argument vector.
+ MIRType type_;
+
+ public:
+ LIR_HEADER(StackArgT)
+
+ LStackArgT(uint32_t argslot, MIRType type, const LAllocation& arg)
+ : LInstructionHelper(classOpcode), argslot_(argslot), type_(type) {
+ setOperand(0, arg);
+ }
+ uint32_t argslot() const { return argslot_; }
+ MIRType type() const { return type_; }
+ const LAllocation* getArgument() { return getOperand(0); }
+};
+
+// Writes an untyped argument for a function call to the frame's argument
+// vector.
+class LStackArgV : public LInstructionHelper<0, BOX_PIECES, 0> {
+ uint32_t argslot_; // Index into frame-scope argument vector.
+
+ public:
+ LIR_HEADER(StackArgV)
+
+ LStackArgV(uint32_t argslot, const LBoxAllocation& value)
+ : LInstructionHelper(classOpcode), argslot_(argslot) {
+ setBoxOperand(0, value);
+ }
+
+ uint32_t argslot() const { return argslot_; }
+};
+
+// Common code for LIR descended from MCall.
+template <size_t Defs, size_t Operands, size_t Temps>
+class LJSCallInstructionHelper
+ : public LCallInstructionHelper<Defs, Operands, Temps> {
+ protected:
+ explicit LJSCallInstructionHelper(LNode::Opcode opcode)
+ : LCallInstructionHelper<Defs, Operands, Temps>(opcode) {}
+
+ public:
+ uint32_t argslot() const {
+ if (JitStackValueAlignment > 1) {
+ return AlignBytes(mir()->numStackArgs(), JitStackValueAlignment);
+ }
+ return mir()->numStackArgs();
+ }
+ MCall* mir() const { return this->mir_->toCall(); }
+
+ bool hasSingleTarget() const { return getSingleTarget() != nullptr; }
+ WrappedFunction* getSingleTarget() const { return mir()->getSingleTarget(); }
+
+ // Does not include |this|.
+ uint32_t numActualArgs() const { return mir()->numActualArgs(); }
+
+ bool isConstructing() const { return mir()->isConstructing(); }
+ bool ignoresReturnValue() const { return mir()->ignoresReturnValue(); }
+};
+
+// Generates a polymorphic callsite, wherein the function being called is
+// unknown and anticipated to vary.
+class LCallGeneric : public LJSCallInstructionHelper<BOX_PIECES, 1, 2> {
+ public:
+ LIR_HEADER(CallGeneric)
+
+ LCallGeneric(const LAllocation& func, const LDefinition& nargsreg,
+ const LDefinition& tmpobjreg)
+ : LJSCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setTemp(0, nargsreg);
+ setTemp(1, tmpobjreg);
+ }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LDefinition* getNargsReg() { return getTemp(0); }
+ const LDefinition* getTempObject() { return getTemp(1); }
+};
+
+// Generates a hardcoded callsite for a known, non-native target.
+class LCallKnown : public LJSCallInstructionHelper<BOX_PIECES, 1, 1> {
+ public:
+ LIR_HEADER(CallKnown)
+
+ LCallKnown(const LAllocation& func, const LDefinition& tmpobjreg)
+ : LJSCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setTemp(0, tmpobjreg);
+ }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LDefinition* getTempObject() { return getTemp(0); }
+};
+
+// Generates a hardcoded callsite for a known, native target.
+class LCallNative : public LJSCallInstructionHelper<BOX_PIECES, 0, 4> {
+ public:
+ LIR_HEADER(CallNative)
+
+ LCallNative(const LDefinition& argContext, const LDefinition& argUintN,
+ const LDefinition& argVp, const LDefinition& tmpreg)
+ : LJSCallInstructionHelper(classOpcode) {
+ // Registers used for callWithABI().
+ setTemp(0, argContext);
+ setTemp(1, argUintN);
+ setTemp(2, argVp);
+
+ // Temporary registers.
+ setTemp(3, tmpreg);
+ }
+
+ const LDefinition* getArgContextReg() { return getTemp(0); }
+ const LDefinition* getArgUintNReg() { return getTemp(1); }
+ const LDefinition* getArgVpReg() { return getTemp(2); }
+ const LDefinition* getTempReg() { return getTemp(3); }
+};
+
+// Generates a hardcoded callsite for a known, DOM-native target.
+class LCallDOMNative : public LJSCallInstructionHelper<BOX_PIECES, 0, 4> {
+ public:
+ LIR_HEADER(CallDOMNative)
+
+ LCallDOMNative(const LDefinition& argJSContext, const LDefinition& argObj,
+ const LDefinition& argPrivate, const LDefinition& argArgs)
+ : LJSCallInstructionHelper(classOpcode) {
+ setTemp(0, argJSContext);
+ setTemp(1, argObj);
+ setTemp(2, argPrivate);
+ setTemp(3, argArgs);
+ }
+
+ const LDefinition* getArgJSContext() { return getTemp(0); }
+ const LDefinition* getArgObj() { return getTemp(1); }
+ const LDefinition* getArgPrivate() { return getTemp(2); }
+ const LDefinition* getArgArgs() { return getTemp(3); }
+};
+
+class LBail : public LInstructionHelper<0, 0, 0> {
+ public:
+ LIR_HEADER(Bail)
+
+ LBail() : LInstructionHelper(classOpcode) {}
+};
+
+class LUnreachable : public LControlInstructionHelper<0, 0, 0> {
+ public:
+ LIR_HEADER(Unreachable)
+
+ LUnreachable() : LControlInstructionHelper(classOpcode) {}
+};
+
+class LEncodeSnapshot : public LInstructionHelper<0, 0, 0> {
+ public:
+ LIR_HEADER(EncodeSnapshot)
+
+ LEncodeSnapshot() : LInstructionHelper(classOpcode) {}
+};
+
+class LUnreachableResultV : public LInstructionHelper<BOX_PIECES, 0, 0> {
+ public:
+ LIR_HEADER(UnreachableResultV)
+
+ LUnreachableResultV() : LInstructionHelper(classOpcode) {}
+};
+
+class LUnreachableResultT : public LInstructionHelper<1, 0, 0> {
+ public:
+ LIR_HEADER(UnreachableResultT)
+
+ LUnreachableResultT() : LInstructionHelper(classOpcode) {}
+};
+
+template <size_t defs, size_t ops>
+class LDOMPropertyInstructionHelper
+ : public LCallInstructionHelper<defs, 1 + ops, 3> {
+ protected:
+ LDOMPropertyInstructionHelper(LNode::Opcode opcode,
+ const LDefinition& JSContextReg,
+ const LAllocation& ObjectReg,
+ const LDefinition& PrivReg,
+ const LDefinition& ValueReg)
+ : LCallInstructionHelper<defs, 1 + ops, 3>(opcode) {
+ this->setOperand(0, ObjectReg);
+ this->setTemp(0, JSContextReg);
+ this->setTemp(1, PrivReg);
+ this->setTemp(2, ValueReg);
+ }
+
+ public:
+ const LDefinition* getJSContextReg() { return this->getTemp(0); }
+ const LAllocation* getObjectReg() { return this->getOperand(0); }
+ const LDefinition* getPrivReg() { return this->getTemp(1); }
+ const LDefinition* getValueReg() { return this->getTemp(2); }
+};
+
+class LGetDOMProperty : public LDOMPropertyInstructionHelper<BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(GetDOMProperty)
+
+ LGetDOMProperty(const LDefinition& JSContextReg, const LAllocation& ObjectReg,
+ const LDefinition& PrivReg, const LDefinition& ValueReg)
+ : LDOMPropertyInstructionHelper<BOX_PIECES, 0>(
+ classOpcode, JSContextReg, ObjectReg, PrivReg, ValueReg) {}
+
+ MGetDOMProperty* mir() const { return mir_->toGetDOMProperty(); }
+};
+
+class LGetDOMMemberV : public LInstructionHelper<BOX_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(GetDOMMemberV);
+ explicit LGetDOMMemberV(const LAllocation& object)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+
+ MGetDOMMember* mir() const { return mir_->toGetDOMMember(); }
+};
+
+class LGetDOMMemberT : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(GetDOMMemberT);
+ explicit LGetDOMMemberT(const LAllocation& object)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+
+ MGetDOMMember* mir() const { return mir_->toGetDOMMember(); }
+};
+
+class LSetDOMProperty : public LDOMPropertyInstructionHelper<0, BOX_PIECES> {
+ public:
+ LIR_HEADER(SetDOMProperty)
+
+ LSetDOMProperty(const LDefinition& JSContextReg, const LAllocation& ObjectReg,
+ const LBoxAllocation& value, const LDefinition& PrivReg,
+ const LDefinition& ValueReg)
+ : LDOMPropertyInstructionHelper<0, BOX_PIECES>(
+ classOpcode, JSContextReg, ObjectReg, PrivReg, ValueReg) {
+ setBoxOperand(Value, value);
+ }
+
+ static const size_t Value = 1;
+
+ MSetDOMProperty* mir() const { return mir_->toSetDOMProperty(); }
+};
+
+class LLoadDOMExpandoValue : public LInstructionHelper<BOX_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(LoadDOMExpandoValue)
+
+ explicit LLoadDOMExpandoValue(const LAllocation& proxy)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, proxy);
+ }
+
+ const LAllocation* proxy() { return getOperand(0); }
+
+ const MLoadDOMExpandoValue* mir() const {
+ return mir_->toLoadDOMExpandoValue();
+ }
+};
+
+class LLoadDOMExpandoValueGuardGeneration
+ : public LInstructionHelper<BOX_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(LoadDOMExpandoValueGuardGeneration)
+
+ explicit LLoadDOMExpandoValueGuardGeneration(const LAllocation& proxy)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, proxy);
+ }
+
+ const LAllocation* proxy() { return getOperand(0); }
+
+ const MLoadDOMExpandoValueGuardGeneration* mir() const {
+ return mir_->toLoadDOMExpandoValueGuardGeneration();
+ }
+};
+
+class LLoadDOMExpandoValueIgnoreGeneration
+ : public LInstructionHelper<BOX_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(LoadDOMExpandoValueIgnoreGeneration)
+
+ explicit LLoadDOMExpandoValueIgnoreGeneration(const LAllocation& proxy)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, proxy);
+ }
+
+ const LAllocation* proxy() { return getOperand(0); }
+};
+
+class LGuardDOMExpandoMissingOrGuardShape
+ : public LInstructionHelper<0, BOX_PIECES, 1> {
+ public:
+ LIR_HEADER(GuardDOMExpandoMissingOrGuardShape)
+
+ explicit LGuardDOMExpandoMissingOrGuardShape(const LBoxAllocation& input,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Input, input);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+
+ static const size_t Input = 0;
+
+ MGuardDOMExpandoMissingOrGuardShape* mir() {
+ return mir_->toGuardDOMExpandoMissingOrGuardShape();
+ }
+};
+
+// Generates a polymorphic callsite, wherein the function being called is
+// unknown and anticipated to vary.
+class LApplyArgsGeneric
+ : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 2, 2> {
+ public:
+ LIR_HEADER(ApplyArgsGeneric)
+
+ LApplyArgsGeneric(const LAllocation& func, const LAllocation& argc,
+ const LBoxAllocation& thisv, const LDefinition& tmpobjreg,
+ const LDefinition& tmpcopy)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setOperand(1, argc);
+ setBoxOperand(ThisIndex, thisv);
+ setTemp(0, tmpobjreg);
+ setTemp(1, tmpcopy);
+ }
+
+ MApplyArgs* mir() const { return mir_->toApplyArgs(); }
+
+ bool hasSingleTarget() const { return getSingleTarget() != nullptr; }
+ WrappedFunction* getSingleTarget() const { return mir()->getSingleTarget(); }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LAllocation* getArgc() { return getOperand(1); }
+ static const size_t ThisIndex = 2;
+
+ const LDefinition* getTempObject() { return getTemp(0); }
+ const LDefinition* getTempStackCounter() { return getTemp(1); }
+};
+
+class LApplyArrayGeneric
+ : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 2, 2> {
+ public:
+ LIR_HEADER(ApplyArrayGeneric)
+
+ LApplyArrayGeneric(const LAllocation& func, const LAllocation& elements,
+ const LBoxAllocation& thisv, const LDefinition& tmpobjreg,
+ const LDefinition& tmpcopy)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setOperand(1, elements);
+ setBoxOperand(ThisIndex, thisv);
+ setTemp(0, tmpobjreg);
+ setTemp(1, tmpcopy);
+ }
+
+ MApplyArray* mir() const { return mir_->toApplyArray(); }
+
+ bool hasSingleTarget() const { return getSingleTarget() != nullptr; }
+ WrappedFunction* getSingleTarget() const { return mir()->getSingleTarget(); }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LAllocation* getElements() { return getOperand(1); }
+ // argc is mapped to the same register as elements: argc becomes
+ // live as elements is dying, all registers are calltemps.
+ const LAllocation* getArgc() { return getOperand(1); }
+ static const size_t ThisIndex = 2;
+
+ const LDefinition* getTempObject() { return getTemp(0); }
+ const LDefinition* getTempStackCounter() { return getTemp(1); }
+};
+
+class LConstructArrayGeneric
+ : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 3, 1> {
+ public:
+ LIR_HEADER(ConstructArrayGeneric)
+
+ LConstructArrayGeneric(const LAllocation& func, const LAllocation& elements,
+ const LAllocation& newTarget,
+ const LBoxAllocation& thisv,
+ const LDefinition& tmpobjreg)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, func);
+ setOperand(1, elements);
+ setOperand(2, newTarget);
+ setBoxOperand(ThisIndex, thisv);
+ setTemp(0, tmpobjreg);
+ }
+
+ MConstructArray* mir() const { return mir_->toConstructArray(); }
+
+ bool hasSingleTarget() const { return getSingleTarget() != nullptr; }
+ WrappedFunction* getSingleTarget() const { return mir()->getSingleTarget(); }
+
+ const LAllocation* getFunction() { return getOperand(0); }
+ const LAllocation* getElements() { return getOperand(1); }
+ const LAllocation* getNewTarget() { return getOperand(2); }
+
+ static const size_t ThisIndex = 3;
+
+ const LDefinition* getTempObject() { return getTemp(0); }
+
+ // argc is mapped to the same register as elements: argc becomes
+ // live as elements is dying, all registers are calltemps.
+ const LAllocation* getArgc() { return getOperand(1); }
+
+ // tempStackCounter is mapped to the same register as newTarget:
+ // tempStackCounter becomes live as newTarget is dying, all registers are
+ // calltemps.
+ const LAllocation* getTempStackCounter() { return getOperand(2); }
+};
+
+// Takes in either an integer or boolean input and tests it for truthiness.
+class LTestIAndBranch : public LControlInstructionHelper<2, 1, 0> {
+ public:
+ LIR_HEADER(TestIAndBranch)
+
+ LTestIAndBranch(const LAllocation& in, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse)
+ : LControlInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+};
+
+// Takes in an int64 input and tests it for truthiness.
+class LTestI64AndBranch : public LControlInstructionHelper<2, INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(TestI64AndBranch)
+
+ LTestI64AndBranch(const LInt64Allocation& in, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse)
+ : LControlInstructionHelper(classOpcode) {
+ setInt64Operand(0, in);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+};
+
+// Takes in either an integer or boolean input and tests it for truthiness.
+class LTestDAndBranch : public LControlInstructionHelper<2, 1, 0> {
+ public:
+ LIR_HEADER(TestDAndBranch)
+
+ LTestDAndBranch(const LAllocation& in, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse)
+ : LControlInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+};
+
+// Takes in either an integer or boolean input and tests it for truthiness.
+class LTestFAndBranch : public LControlInstructionHelper<2, 1, 0> {
+ public:
+ LIR_HEADER(TestFAndBranch)
+
+ LTestFAndBranch(const LAllocation& in, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse)
+ : LControlInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+};
+
+// Takes an object and tests it for truthiness. An object is falsy iff it
+// emulates |undefined|; see js::EmulatesUndefined.
+class LTestOAndBranch : public LControlInstructionHelper<2, 1, 1> {
+ public:
+ LIR_HEADER(TestOAndBranch)
+
+ LTestOAndBranch(const LAllocation& input, MBasicBlock* ifTruthy,
+ MBasicBlock* ifFalsy, const LDefinition& temp)
+ : LControlInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setSuccessor(0, ifTruthy);
+ setSuccessor(1, ifFalsy);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+
+ MBasicBlock* ifTruthy() { return getSuccessor(0); }
+ MBasicBlock* ifFalsy() { return getSuccessor(1); }
+
+ MTest* mir() { return mir_->toTest(); }
+};
+
+// Takes in a boxed value and tests it for truthiness.
+class LTestVAndBranch : public LControlInstructionHelper<2, BOX_PIECES, 3> {
+ public:
+ LIR_HEADER(TestVAndBranch)
+
+ LTestVAndBranch(MBasicBlock* ifTruthy, MBasicBlock* ifFalsy,
+ const LBoxAllocation& input, const LDefinition& temp0,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LControlInstructionHelper(classOpcode) {
+ setSuccessor(0, ifTruthy);
+ setSuccessor(1, ifFalsy);
+ setBoxOperand(Input, input);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ setTemp(2, temp2);
+ }
+
+ static const size_t Input = 0;
+
+ const LDefinition* tempFloat() { return getTemp(0); }
+
+ const LDefinition* temp1() { return getTemp(1); }
+
+ const LDefinition* temp2() { return getTemp(2); }
+
+ MBasicBlock* ifTruthy() { return getSuccessor(0); }
+ MBasicBlock* ifFalsy() { return getSuccessor(1); }
+
+ MTest* mir() const { return mir_->toTest(); }
+};
+
+// Compares two integral values of the same JS type, either integer or object.
+// For objects, both operands are in registers.
+class LCompare : public LInstructionHelper<1, 2, 0> {
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(Compare)
+ LCompare(JSOp jsop, const LAllocation& left, const LAllocation& right)
+ : LInstructionHelper(classOpcode), jsop_(jsop) {
+ setOperand(0, left);
+ setOperand(1, right);
+ }
+
+ JSOp jsop() const { return jsop_; }
+ const LAllocation* left() { return getOperand(0); }
+ const LAllocation* right() { return getOperand(1); }
+ MCompare* mir() { return mir_->toCompare(); }
+ const char* extraName() const { return CodeName(jsop_); }
+};
+
+class LCompareI64 : public LInstructionHelper<1, 2 * INT64_PIECES, 0> {
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(CompareI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LCompareI64(JSOp jsop, const LInt64Allocation& left,
+ const LInt64Allocation& right)
+ : LInstructionHelper(classOpcode), jsop_(jsop) {
+ setInt64Operand(Lhs, left);
+ setInt64Operand(Rhs, right);
+ }
+
+ JSOp jsop() const { return jsop_; }
+ MCompare* mir() { return mir_->toCompare(); }
+ const char* extraName() const { return CodeName(jsop_); }
+};
+
+class LCompareI64AndBranch
+ : public LControlInstructionHelper<2, 2 * INT64_PIECES, 0> {
+ MCompare* cmpMir_;
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(CompareI64AndBranch)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LCompareI64AndBranch(MCompare* cmpMir, JSOp jsop,
+ const LInt64Allocation& left,
+ const LInt64Allocation& right, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse)
+ : LControlInstructionHelper(classOpcode), cmpMir_(cmpMir), jsop_(jsop) {
+ setInt64Operand(Lhs, left);
+ setInt64Operand(Rhs, right);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ JSOp jsop() const { return jsop_; }
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+ MTest* mir() const { return mir_->toTest(); }
+ MCompare* cmpMir() const { return cmpMir_; }
+ const char* extraName() const { return CodeName(jsop_); }
+};
+
+// Compares two integral values of the same JS type, either integer or object.
+// For objects, both operands are in registers.
+class LCompareAndBranch : public LControlInstructionHelper<2, 2, 0> {
+ MCompare* cmpMir_;
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(CompareAndBranch)
+ LCompareAndBranch(MCompare* cmpMir, JSOp jsop, const LAllocation& left,
+ const LAllocation& right, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse)
+ : LControlInstructionHelper(classOpcode), cmpMir_(cmpMir), jsop_(jsop) {
+ setOperand(0, left);
+ setOperand(1, right);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ JSOp jsop() const { return jsop_; }
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+ const LAllocation* left() { return getOperand(0); }
+ const LAllocation* right() { return getOperand(1); }
+ MTest* mir() const { return mir_->toTest(); }
+ MCompare* cmpMir() const { return cmpMir_; }
+ const char* extraName() const { return CodeName(jsop_); }
+};
+
+class LCompareD : public LInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(CompareD)
+ LCompareD(const LAllocation& left, const LAllocation& right)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, left);
+ setOperand(1, right);
+ }
+
+ const LAllocation* left() { return getOperand(0); }
+ const LAllocation* right() { return getOperand(1); }
+ MCompare* mir() { return mir_->toCompare(); }
+};
+
+class LCompareF : public LInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(CompareF)
+
+ LCompareF(const LAllocation& left, const LAllocation& right)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, left);
+ setOperand(1, right);
+ }
+
+ const LAllocation* left() { return getOperand(0); }
+ const LAllocation* right() { return getOperand(1); }
+ MCompare* mir() { return mir_->toCompare(); }
+};
+
+class LCompareDAndBranch : public LControlInstructionHelper<2, 2, 0> {
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(CompareDAndBranch)
+
+ LCompareDAndBranch(MCompare* cmpMir, const LAllocation& left,
+ const LAllocation& right, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse)
+ : LControlInstructionHelper(classOpcode), cmpMir_(cmpMir) {
+ setOperand(0, left);
+ setOperand(1, right);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+ const LAllocation* left() { return getOperand(0); }
+ const LAllocation* right() { return getOperand(1); }
+ MTest* mir() const { return mir_->toTest(); }
+ MCompare* cmpMir() const { return cmpMir_; }
+};
+
+class LCompareFAndBranch : public LControlInstructionHelper<2, 2, 0> {
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(CompareFAndBranch)
+ LCompareFAndBranch(MCompare* cmpMir, const LAllocation& left,
+ const LAllocation& right, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse)
+ : LControlInstructionHelper(classOpcode), cmpMir_(cmpMir) {
+ setOperand(0, left);
+ setOperand(1, right);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+ const LAllocation* left() { return getOperand(0); }
+ const LAllocation* right() { return getOperand(1); }
+ MTest* mir() const { return mir_->toTest(); }
+ MCompare* cmpMir() const { return cmpMir_; }
+};
+
+class LCompareS : public LInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(CompareS)
+
+ LCompareS(const LAllocation& left, const LAllocation& right)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, left);
+ setOperand(1, right);
+ }
+
+ const LAllocation* left() { return getOperand(0); }
+ const LAllocation* right() { return getOperand(1); }
+ MCompare* mir() { return mir_->toCompare(); }
+};
+
+class LCompareBigInt : public LInstructionHelper<1, 2, 3> {
+ public:
+ LIR_HEADER(CompareBigInt)
+
+ LCompareBigInt(const LAllocation& left, const LAllocation& right,
+ const LDefinition& temp1, const LDefinition& temp2,
+ const LDefinition& temp3)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, left);
+ setOperand(1, right);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+
+ const LAllocation* left() { return getOperand(0); }
+ const LAllocation* right() { return getOperand(1); }
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+ const LDefinition* temp3() { return getTemp(2); }
+ MCompare* mir() { return mir_->toCompare(); }
+};
+
+class LCompareBigIntInt32 : public LInstructionHelper<1, 2, 2> {
+ public:
+ LIR_HEADER(CompareBigIntInt32)
+
+ LCompareBigIntInt32(const LAllocation& left, const LAllocation& right,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, left);
+ setOperand(1, right);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LAllocation* left() { return getOperand(0); }
+ const LAllocation* right() { return getOperand(1); }
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+ MCompare* mir() { return mir_->toCompare(); }
+};
+
+class LCompareBigIntDouble : public LCallInstructionHelper<1, 2, 1> {
+ public:
+ LIR_HEADER(CompareBigIntDouble)
+
+ LCompareBigIntDouble(const LAllocation& left, const LAllocation& right,
+ const LDefinition& temp)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, left);
+ setOperand(1, right);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* left() { return getOperand(0); }
+ const LAllocation* right() { return getOperand(1); }
+ const LDefinition* temp() { return getTemp(0); }
+ MCompare* mir() { return mir_->toCompare(); }
+};
+
+class LCompareBigIntString : public LCallInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(CompareBigIntString)
+
+ LCompareBigIntString(const LAllocation& left, const LAllocation& right)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, left);
+ setOperand(1, right);
+ }
+
+ const LAllocation* left() { return getOperand(0); }
+ const LAllocation* right() { return getOperand(1); }
+ MCompare* mir() { return mir_->toCompare(); }
+};
+
+class LBitAndAndBranch : public LControlInstructionHelper<2, 2, 0> {
+ Assembler::Condition cond_;
+
+ public:
+ LIR_HEADER(BitAndAndBranch)
+ LBitAndAndBranch(MBasicBlock* ifTrue, MBasicBlock* ifFalse,
+ Assembler::Condition cond = Assembler::NonZero)
+ : LControlInstructionHelper(classOpcode), cond_(cond) {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ }
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+ const LAllocation* left() { return getOperand(0); }
+ const LAllocation* right() { return getOperand(1); }
+ Assembler::Condition cond() const {
+ MOZ_ASSERT(cond_ == Assembler::Zero || cond_ == Assembler::NonZero);
+ return cond_;
+ }
+};
+
+// Takes a value and tests whether it is null, undefined, or is an object that
+// emulates |undefined|, as determined by the JSCLASS_EMULATES_UNDEFINED class
+// flag on unwrapped objects. See also js::EmulatesUndefined.
+class LIsNullOrLikeUndefinedV : public LInstructionHelper<1, BOX_PIECES, 2> {
+ public:
+ LIR_HEADER(IsNullOrLikeUndefinedV)
+
+ LIsNullOrLikeUndefinedV(const LBoxAllocation& value, const LDefinition& temp,
+ const LDefinition& tempToUnbox)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Value, value);
+ setTemp(0, temp);
+ setTemp(1, tempToUnbox);
+ }
+
+ static const size_t Value = 0;
+
+ MCompare* mir() { return mir_->toCompare(); }
+
+ const LDefinition* temp() { return getTemp(0); }
+
+ const LDefinition* tempToUnbox() { return getTemp(1); }
+};
+
+// Takes an object pointer and tests whether it is an object that emulates
+// |undefined|, as above.
+class LIsNullOrLikeUndefinedT : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(IsNullOrLikeUndefinedT)
+
+ explicit LIsNullOrLikeUndefinedT(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+
+ MCompare* mir() { return mir_->toCompare(); }
+};
+
+class LIsNullOrLikeUndefinedAndBranchV
+ : public LControlInstructionHelper<2, BOX_PIECES, 2> {
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(IsNullOrLikeUndefinedAndBranchV)
+
+ LIsNullOrLikeUndefinedAndBranchV(MCompare* cmpMir, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse,
+ const LBoxAllocation& value,
+ const LDefinition& temp,
+ const LDefinition& tempToUnbox)
+ : LControlInstructionHelper(classOpcode), cmpMir_(cmpMir) {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setBoxOperand(Value, value);
+ setTemp(0, temp);
+ setTemp(1, tempToUnbox);
+ }
+
+ static const size_t Value = 0;
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+ MTest* mir() const { return mir_->toTest(); }
+ MCompare* cmpMir() const { return cmpMir_; }
+ const LDefinition* temp() { return getTemp(0); }
+ const LDefinition* tempToUnbox() { return getTemp(1); }
+};
+
+class LIsNullOrLikeUndefinedAndBranchT
+ : public LControlInstructionHelper<2, 1, 1> {
+ MCompare* cmpMir_;
+
+ public:
+ LIR_HEADER(IsNullOrLikeUndefinedAndBranchT)
+
+ LIsNullOrLikeUndefinedAndBranchT(MCompare* cmpMir, const LAllocation& input,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse,
+ const LDefinition& temp)
+ : LControlInstructionHelper(classOpcode), cmpMir_(cmpMir) {
+ setOperand(0, input);
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setTemp(0, temp);
+ }
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+ MTest* mir() const { return mir_->toTest(); }
+ MCompare* cmpMir() const { return cmpMir_; }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LSameValueD : public LInstructionHelper<1, 2, 1> {
+ public:
+ LIR_HEADER(SameValueD)
+ LSameValueD(const LAllocation& left, const LAllocation& right,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, left);
+ setOperand(1, right);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* left() { return getOperand(0); }
+ const LAllocation* right() { return getOperand(1); }
+ const LDefinition* tempFloat() { return getTemp(0); }
+};
+
+// Not operation on an integer.
+class LNotI : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(NotI)
+
+ explicit LNotI(const LAllocation& input) : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+// Not operation on an int64.
+class LNotI64 : public LInstructionHelper<1, INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(NotI64)
+
+ explicit LNotI64(const LInt64Allocation& input)
+ : LInstructionHelper(classOpcode) {
+ setInt64Operand(0, input);
+ }
+};
+
+// Not operation on a double.
+class LNotD : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(NotD)
+
+ explicit LNotD(const LAllocation& input) : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+
+ MNot* mir() { return mir_->toNot(); }
+};
+
+// Not operation on a float32.
+class LNotF : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(NotF)
+
+ explicit LNotF(const LAllocation& input) : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+
+ MNot* mir() { return mir_->toNot(); }
+};
+
+// Boolean complement operation on an object.
+class LNotO : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(NotO)
+
+ explicit LNotO(const LAllocation& input) : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+
+ MNot* mir() { return mir_->toNot(); }
+};
+
+// Boolean complement operation on a value.
+class LNotV : public LInstructionHelper<1, BOX_PIECES, 3> {
+ public:
+ LIR_HEADER(NotV)
+
+ static const size_t Input = 0;
+ LNotV(const LBoxAllocation& input, const LDefinition& temp0,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Input, input);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ setTemp(2, temp2);
+ }
+
+ const LDefinition* tempFloat() { return getTemp(0); }
+
+ const LDefinition* temp1() { return getTemp(1); }
+
+ const LDefinition* temp2() { return getTemp(2); }
+
+ MNot* mir() { return mir_->toNot(); }
+};
+
+// Bitwise not operation, takes a 32-bit integer as input and returning
+// a 32-bit integer result as an output.
+class LBitNotI : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(BitNotI)
+
+ LBitNotI() : LInstructionHelper(classOpcode) {}
+};
+
+// Binary bitwise operation, taking two 32-bit integers as inputs and returning
+// a 32-bit integer result as an output.
+class LBitOpI : public LInstructionHelper<1, 2, 0> {
+ JSOp op_;
+
+ public:
+ LIR_HEADER(BitOpI)
+
+ explicit LBitOpI(JSOp op) : LInstructionHelper(classOpcode), op_(op) {}
+
+ const char* extraName() const {
+ if (bitop() == JSOp::Ursh && mir_->toUrsh()->bailoutsDisabled()) {
+ return "ursh:BailoutsDisabled";
+ }
+ return CodeName(op_);
+ }
+
+ JSOp bitop() const { return op_; }
+};
+
+class LBitOpI64 : public LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0> {
+ JSOp op_;
+
+ public:
+ LIR_HEADER(BitOpI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ explicit LBitOpI64(JSOp op) : LInstructionHelper(classOpcode), op_(op) {}
+
+ const char* extraName() const { return CodeName(op_); }
+
+ JSOp bitop() const { return op_; }
+};
+
+// Shift operation, taking two 32-bit integers as inputs and returning
+// a 32-bit integer result as an output.
+class LShiftI : public LBinaryMath<0> {
+ JSOp op_;
+
+ public:
+ LIR_HEADER(ShiftI)
+
+ explicit LShiftI(JSOp op) : LBinaryMath(classOpcode), op_(op) {}
+
+ JSOp bitop() { return op_; }
+
+ MInstruction* mir() { return mir_->toInstruction(); }
+
+ const char* extraName() const { return CodeName(op_); }
+};
+
+class LShiftI64 : public LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 0> {
+ JSOp op_;
+
+ public:
+ LIR_HEADER(ShiftI64)
+
+ explicit LShiftI64(JSOp op) : LInstructionHelper(classOpcode), op_(op) {}
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ JSOp bitop() { return op_; }
+
+ MInstruction* mir() { return mir_->toInstruction(); }
+
+ const char* extraName() const { return CodeName(op_); }
+};
+
+// Sign extension
+class LSignExtendInt32 : public LInstructionHelper<1, 1, 0> {
+ MSignExtendInt32::Mode mode_;
+
+ public:
+ LIR_HEADER(SignExtendInt32);
+
+ explicit LSignExtendInt32(const LAllocation& num, MSignExtendInt32::Mode mode)
+ : LInstructionHelper(classOpcode), mode_(mode) {
+ setOperand(0, num);
+ }
+
+ MSignExtendInt32::Mode mode() { return mode_; }
+};
+
+class LSignExtendInt64
+ : public LInstructionHelper<INT64_PIECES, INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(SignExtendInt64)
+
+ explicit LSignExtendInt64(const LInt64Allocation& input)
+ : LInstructionHelper(classOpcode) {
+ setInt64Operand(0, input);
+ }
+
+ const MSignExtendInt64* mir() const { return mir_->toSignExtendInt64(); }
+
+ MSignExtendInt64::Mode mode() const { return mir()->mode(); }
+};
+
+class LUrshD : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(UrshD)
+
+ LUrshD(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// Returns from the function being compiled (not used in inlined frames). The
+// input must be a box.
+class LReturn : public LInstructionHelper<0, BOX_PIECES, 0> {
+ bool isGenerator_;
+
+ public:
+ LIR_HEADER(Return)
+
+ explicit LReturn(bool isGenerator)
+ : LInstructionHelper(classOpcode), isGenerator_(isGenerator) {}
+
+ bool isGenerator() { return isGenerator_; }
+};
+
+class LThrow : public LCallInstructionHelper<0, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(Throw)
+
+ static const size_t Value = 0;
+
+ explicit LThrow(const LBoxAllocation& value)
+ : LCallInstructionHelper(classOpcode) {
+ setBoxOperand(Value, value);
+ }
+};
+
+class LMinMaxBase : public LInstructionHelper<1, 2, 0> {
+ protected:
+ LMinMaxBase(LNode::Opcode opcode, const LAllocation& first,
+ const LAllocation& second)
+ : LInstructionHelper(opcode) {
+ setOperand(0, first);
+ setOperand(1, second);
+ }
+
+ public:
+ const LAllocation* first() { return this->getOperand(0); }
+ const LAllocation* second() { return this->getOperand(1); }
+ const LDefinition* output() { return this->getDef(0); }
+ MMinMax* mir() const { return mir_->toMinMax(); }
+ const char* extraName() const { return mir()->isMax() ? "Max" : "Min"; }
+};
+
+class LMinMaxI : public LMinMaxBase {
+ public:
+ LIR_HEADER(MinMaxI)
+ LMinMaxI(const LAllocation& first, const LAllocation& second)
+ : LMinMaxBase(classOpcode, first, second) {}
+};
+
+class LMinMaxD : public LMinMaxBase {
+ public:
+ LIR_HEADER(MinMaxD)
+ LMinMaxD(const LAllocation& first, const LAllocation& second)
+ : LMinMaxBase(classOpcode, first, second) {}
+};
+
+class LMinMaxF : public LMinMaxBase {
+ public:
+ LIR_HEADER(MinMaxF)
+ LMinMaxF(const LAllocation& first, const LAllocation& second)
+ : LMinMaxBase(classOpcode, first, second) {}
+};
+
+// Negative of an integer
+class LNegI : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(NegI);
+ explicit LNegI(const LAllocation& num) : LInstructionHelper(classOpcode) {
+ setOperand(0, num);
+ }
+};
+
+// Negative of a double.
+class LNegD : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(NegD)
+ explicit LNegD(const LAllocation& num) : LInstructionHelper(classOpcode) {
+ setOperand(0, num);
+ }
+};
+
+// Negative of a float32.
+class LNegF : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(NegF)
+ explicit LNegF(const LAllocation& num) : LInstructionHelper(classOpcode) {
+ setOperand(0, num);
+ }
+};
+
+// Absolute value of an integer.
+class LAbsI : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(AbsI)
+ explicit LAbsI(const LAllocation& num) : LInstructionHelper(classOpcode) {
+ setOperand(0, num);
+ }
+
+ MAbs* mir() const { return mir_->toAbs(); }
+};
+
+// Absolute value of a double.
+class LAbsD : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(AbsD)
+ explicit LAbsD(const LAllocation& num) : LInstructionHelper(classOpcode) {
+ setOperand(0, num);
+ }
+};
+
+// Absolute value of a float32.
+class LAbsF : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(AbsF)
+ explicit LAbsF(const LAllocation& num) : LInstructionHelper(classOpcode) {
+ setOperand(0, num);
+ }
+};
+
+// Copysign for doubles.
+class LCopySignD : public LInstructionHelper<1, 2, 2> {
+ public:
+ LIR_HEADER(CopySignD)
+ explicit LCopySignD() : LInstructionHelper(classOpcode) {}
+};
+
+// Copysign for float32.
+class LCopySignF : public LInstructionHelper<1, 2, 2> {
+ public:
+ LIR_HEADER(CopySignF)
+ explicit LCopySignF() : LInstructionHelper(classOpcode) {}
+};
+
+// Count leading zeroes on an int32.
+class LClzI : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(ClzI)
+ explicit LClzI(const LAllocation& num) : LInstructionHelper(classOpcode) {
+ setOperand(0, num);
+ }
+
+ MClz* mir() const { return mir_->toClz(); }
+};
+
+// Count leading zeroes on an int64.
+class LClzI64 : public LInstructionHelper<INT64_PIECES, INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(ClzI64)
+ explicit LClzI64(const LInt64Allocation& num)
+ : LInstructionHelper(classOpcode) {
+ setInt64Operand(0, num);
+ }
+
+ MClz* mir() const { return mir_->toClz(); }
+};
+
+// Count trailing zeroes on an int32.
+class LCtzI : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(CtzI)
+ explicit LCtzI(const LAllocation& num) : LInstructionHelper(classOpcode) {
+ setOperand(0, num);
+ }
+
+ MCtz* mir() const { return mir_->toCtz(); }
+};
+
+// Count trailing zeroes on an int64.
+class LCtzI64 : public LInstructionHelper<INT64_PIECES, INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(CtzI64)
+ explicit LCtzI64(const LInt64Allocation& num)
+ : LInstructionHelper(classOpcode) {
+ setInt64Operand(0, num);
+ }
+
+ MCtz* mir() const { return mir_->toCtz(); }
+};
+
+// Count population on an int32.
+class LPopcntI : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(PopcntI)
+ explicit LPopcntI(const LAllocation& num, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, num);
+ setTemp(0, temp);
+ }
+
+ MPopcnt* mir() const { return mir_->toPopcnt(); }
+
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// Count population on an int64.
+class LPopcntI64 : public LInstructionHelper<INT64_PIECES, INT64_PIECES, 1> {
+ public:
+ LIR_HEADER(PopcntI64)
+ explicit LPopcntI64(const LInt64Allocation& num, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setInt64Operand(0, num);
+ setTemp(0, temp);
+ }
+
+ MPopcnt* mir() const { return mir_->toPopcnt(); }
+};
+
+// Square root of a double.
+class LSqrtD : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(SqrtD)
+ explicit LSqrtD(const LAllocation& num) : LInstructionHelper(classOpcode) {
+ setOperand(0, num);
+ }
+};
+
+// Square root of a float32.
+class LSqrtF : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(SqrtF)
+ explicit LSqrtF(const LAllocation& num) : LInstructionHelper(classOpcode) {
+ setOperand(0, num);
+ }
+};
+
+class LAtan2D : public LCallInstructionHelper<1, 2, 1> {
+ public:
+ LIR_HEADER(Atan2D)
+ LAtan2D(const LAllocation& y, const LAllocation& x, const LDefinition& temp)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, y);
+ setOperand(1, x);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* y() { return getOperand(0); }
+
+ const LAllocation* x() { return getOperand(1); }
+
+ const LDefinition* temp() { return getTemp(0); }
+
+ const LDefinition* output() { return getDef(0); }
+};
+
+class LHypot : public LCallInstructionHelper<1, 4, 1> {
+ uint32_t numOperands_;
+
+ public:
+ LIR_HEADER(Hypot)
+ LHypot(const LAllocation& x, const LAllocation& y, const LDefinition& temp)
+ : LCallInstructionHelper(classOpcode), numOperands_(2) {
+ setOperand(0, x);
+ setOperand(1, y);
+ setTemp(0, temp);
+ }
+
+ LHypot(const LAllocation& x, const LAllocation& y, const LAllocation& z,
+ const LDefinition& temp)
+ : LCallInstructionHelper(classOpcode), numOperands_(3) {
+ setOperand(0, x);
+ setOperand(1, y);
+ setOperand(2, z);
+ setTemp(0, temp);
+ }
+
+ LHypot(const LAllocation& x, const LAllocation& y, const LAllocation& z,
+ const LAllocation& w, const LDefinition& temp)
+ : LCallInstructionHelper(classOpcode), numOperands_(4) {
+ setOperand(0, x);
+ setOperand(1, y);
+ setOperand(2, z);
+ setOperand(3, w);
+ setTemp(0, temp);
+ }
+
+ uint32_t numArgs() const { return numOperands_; }
+
+ const LAllocation* x() { return getOperand(0); }
+
+ const LAllocation* y() { return getOperand(1); }
+
+ const LDefinition* temp() { return getTemp(0); }
+
+ const LDefinition* output() { return getDef(0); }
+};
+
+// Double raised to an integer power.
+class LPowI : public LCallInstructionHelper<1, 2, 1> {
+ public:
+ LIR_HEADER(PowI)
+ LPowI(const LAllocation& value, const LAllocation& power,
+ const LDefinition& temp)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, value);
+ setOperand(1, power);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* value() { return getOperand(0); }
+ const LAllocation* power() { return getOperand(1); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// Integer raised to an integer power.
+class LPowII : public LInstructionHelper<1, 2, 2> {
+ public:
+ LIR_HEADER(PowII)
+ LPowII(const LAllocation& value, const LAllocation& power,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, value);
+ setOperand(1, power);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LAllocation* value() { return getOperand(0); }
+ const LAllocation* power() { return getOperand(1); }
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+
+ MPow* mir() const { return mir_->toPow(); }
+};
+
+// Double raised to a double power.
+class LPowD : public LCallInstructionHelper<1, 2, 1> {
+ public:
+ LIR_HEADER(PowD)
+ LPowD(const LAllocation& value, const LAllocation& power,
+ const LDefinition& temp)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, value);
+ setOperand(1, power);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* value() { return getOperand(0); }
+ const LAllocation* power() { return getOperand(1); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// Constant of a power of two raised to an integer power.
+class LPowOfTwoI : public LInstructionHelper<1, 1, 0> {
+ uint32_t base_;
+
+ public:
+ LIR_HEADER(PowOfTwoI)
+ LPowOfTwoI(uint32_t base, const LAllocation& power)
+ : LInstructionHelper(classOpcode), base_(base) {
+ setOperand(0, power);
+ }
+
+ uint32_t base() const { return base_; }
+ const LAllocation* power() { return getOperand(0); }
+};
+
+// Sign value of an integer.
+class LSignI : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(SignI)
+ explicit LSignI(const LAllocation& input) : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+// Sign value of a double.
+class LSignD : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(SignD)
+ explicit LSignD(const LAllocation& input) : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+// Sign value of a double with expected int32 result.
+class LSignDI : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(SignDI)
+ explicit LSignDI(const LAllocation& input, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LMathFunctionD : public LCallInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(MathFunctionD)
+ LMathFunctionD(const LAllocation& input, const LDefinition& temp)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+ MMathFunction* mir() const { return mir_->toMathFunction(); }
+ const char* extraName() const {
+ return MMathFunction::FunctionName(mir()->function());
+ }
+};
+
+class LMathFunctionF : public LCallInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(MathFunctionF)
+ LMathFunctionF(const LAllocation& input, const LDefinition& temp)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+ MMathFunction* mir() const { return mir_->toMathFunction(); }
+ const char* extraName() const {
+ return MMathFunction::FunctionName(mir()->function());
+ }
+};
+
+// Adds two integers, returning an integer value.
+class LAddI : public LBinaryMath<0> {
+ bool recoversInput_;
+
+ public:
+ LIR_HEADER(AddI)
+
+ LAddI() : LBinaryMath(classOpcode), recoversInput_(false) {}
+
+ const char* extraName() const {
+ return snapshot() ? "OverflowCheck" : nullptr;
+ }
+
+ bool recoversInput() const { return recoversInput_; }
+ void setRecoversInput() { recoversInput_ = true; }
+
+ MAdd* mir() const { return mir_->toAdd(); }
+};
+
+class LAddI64 : public LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(AddI64)
+
+ LAddI64() : LInstructionHelper(classOpcode) {}
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+};
+
+// Subtracts two integers, returning an integer value.
+class LSubI : public LBinaryMath<0> {
+ bool recoversInput_;
+
+ public:
+ LIR_HEADER(SubI)
+
+ LSubI() : LBinaryMath(classOpcode), recoversInput_(false) {}
+
+ const char* extraName() const {
+ return snapshot() ? "OverflowCheck" : nullptr;
+ }
+
+ bool recoversInput() const { return recoversInput_; }
+ void setRecoversInput() { recoversInput_ = true; }
+ MSub* mir() const { return mir_->toSub(); }
+};
+
+inline bool LNode::recoversInput() const {
+ switch (op()) {
+ case Opcode::AddI:
+ return toAddI()->recoversInput();
+ case Opcode::SubI:
+ return toSubI()->recoversInput();
+ default:
+ return false;
+ }
+}
+
+class LSubI64 : public LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(SubI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LSubI64() : LInstructionHelper(classOpcode) {}
+};
+
+class LMulI64 : public LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 1> {
+ public:
+ LIR_HEADER(MulI64)
+
+ explicit LMulI64() : LInstructionHelper(classOpcode) {
+ setTemp(0, LDefinition());
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+};
+
+// Performs an add, sub, mul, or div on two double values.
+class LMathD : public LBinaryMath<0> {
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(MathD)
+
+ explicit LMathD(JSOp jsop) : LBinaryMath(classOpcode), jsop_(jsop) {}
+
+ JSOp jsop() const { return jsop_; }
+
+ const char* extraName() const { return CodeName(jsop_); }
+};
+
+// Performs an add, sub, mul, or div on two double values.
+class LMathF : public LBinaryMath<0> {
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(MathF)
+
+ explicit LMathF(JSOp jsop) : LBinaryMath(classOpcode), jsop_(jsop) {}
+
+ JSOp jsop() const { return jsop_; }
+
+ const char* extraName() const { return CodeName(jsop_); }
+};
+
+class LModD : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(ModD)
+
+ LModD(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ setIsCall();
+ }
+ const LDefinition* temp() { return getTemp(0); }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+class LModPowTwoD : public LInstructionHelper<1, 1, 0> {
+ const uint32_t divisor_;
+
+ public:
+ LIR_HEADER(ModPowTwoD)
+
+ LModPowTwoD(const LAllocation& lhs, uint32_t divisor)
+ : LInstructionHelper(classOpcode), divisor_(divisor) {
+ setOperand(0, lhs);
+ }
+
+ uint32_t divisor() const { return divisor_; }
+ const LAllocation* lhs() { return getOperand(0); }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+class LWasmBuiltinModD : public LInstructionHelper<1, 3, 0> {
+ static const size_t LhsIndex = 0;
+ static const size_t RhsIndex = 1;
+ static const size_t TlsIndex = 2;
+
+ public:
+ LIR_HEADER(WasmBuiltinModD)
+
+ LWasmBuiltinModD(const LAllocation& lhs, const LAllocation& rhs,
+ const LAllocation& tls)
+ : LInstructionHelper(classOpcode) {
+ setOperand(LhsIndex, lhs);
+ setOperand(RhsIndex, rhs);
+ setOperand(TlsIndex, tls);
+ setIsCall();
+ }
+
+ const LAllocation* lhs() { return this->getOperand(LhsIndex); }
+ const LAllocation* rhs() { return this->getOperand(RhsIndex); }
+ const LAllocation* tls() { return this->getOperand(TlsIndex); }
+
+ MWasmBuiltinModD* mir() const { return mir_->toWasmBuiltinModD(); }
+};
+
+class LBigIntAdd : public LBinaryMath<2> {
+ public:
+ LIR_HEADER(BigIntAdd)
+
+ LBigIntAdd(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+class LBigIntSub : public LBinaryMath<2> {
+ public:
+ LIR_HEADER(BigIntSub)
+
+ LBigIntSub(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+class LBigIntMul : public LBinaryMath<2> {
+ public:
+ LIR_HEADER(BigIntMul)
+
+ LBigIntMul(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+class LBigIntDiv : public LBinaryMath<2> {
+ public:
+ LIR_HEADER(BigIntDiv)
+
+ LBigIntDiv(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+
+ const MBigIntDiv* mir() const { return mirRaw()->toBigIntDiv(); }
+};
+
+class LBigIntMod : public LBinaryMath<2> {
+ public:
+ LIR_HEADER(BigIntMod)
+
+ LBigIntMod(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+
+ const MBigIntMod* mir() const { return mirRaw()->toBigIntMod(); }
+};
+
+class LBigIntPow : public LBinaryMath<2> {
+ public:
+ LIR_HEADER(BigIntPow)
+
+ LBigIntPow(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+
+ const MBigIntPow* mir() const { return mirRaw()->toBigIntPow(); }
+};
+
+class LBigIntBitAnd : public LBinaryMath<2> {
+ public:
+ LIR_HEADER(BigIntBitAnd)
+
+ LBigIntBitAnd(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+class LBigIntBitOr : public LBinaryMath<2> {
+ public:
+ LIR_HEADER(BigIntBitOr)
+
+ LBigIntBitOr(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+class LBigIntBitXor : public LBinaryMath<2> {
+ public:
+ LIR_HEADER(BigIntBitXor)
+
+ LBigIntBitXor(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+class LBigIntLsh : public LBinaryMath<3> {
+ public:
+ LIR_HEADER(BigIntLsh)
+
+ LBigIntLsh(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2,
+ const LDefinition& temp3)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+ const LDefinition* temp3() { return getTemp(2); }
+};
+
+class LBigIntRsh : public LBinaryMath<3> {
+ public:
+ LIR_HEADER(BigIntRsh)
+
+ LBigIntRsh(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2,
+ const LDefinition& temp3)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+ const LDefinition* temp3() { return getTemp(2); }
+};
+
+class LBigIntIncrement : public LUnaryMath<2> {
+ public:
+ LIR_HEADER(BigIntIncrement)
+
+ LBigIntIncrement(const LAllocation& input, const LDefinition& temp1,
+ const LDefinition& temp2)
+ : LUnaryMath(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+class LBigIntDecrement : public LUnaryMath<2> {
+ public:
+ LIR_HEADER(BigIntDecrement)
+
+ LBigIntDecrement(const LAllocation& input, const LDefinition& temp1,
+ const LDefinition& temp2)
+ : LUnaryMath(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+class LBigIntNegate : public LUnaryMath<1> {
+ public:
+ LIR_HEADER(BigIntNegate)
+
+ LBigIntNegate(const LAllocation& input, const LDefinition& temp)
+ : LUnaryMath(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LBigIntBitNot : public LUnaryMath<2> {
+ public:
+ LIR_HEADER(BigIntBitNot)
+
+ LBigIntBitNot(const LAllocation& input, const LDefinition& temp1,
+ const LDefinition& temp2)
+ : LUnaryMath(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+// Adds two string, returning a string.
+class LConcat : public LInstructionHelper<1, 2, 5> {
+ public:
+ LIR_HEADER(Concat)
+
+ LConcat(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2,
+ const LDefinition& temp3, const LDefinition& temp4,
+ const LDefinition& temp5)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ setTemp(3, temp4);
+ setTemp(4, temp5);
+ }
+
+ const LAllocation* lhs() { return this->getOperand(0); }
+ const LAllocation* rhs() { return this->getOperand(1); }
+ const LDefinition* temp1() { return this->getTemp(0); }
+ const LDefinition* temp2() { return this->getTemp(1); }
+ const LDefinition* temp3() { return this->getTemp(2); }
+ const LDefinition* temp4() { return this->getTemp(3); }
+ const LDefinition* temp5() { return this->getTemp(4); }
+};
+
+// Get uint16 character code from a string.
+class LCharCodeAt : public LInstructionHelper<1, 2, 1> {
+ public:
+ LIR_HEADER(CharCodeAt)
+
+ LCharCodeAt(const LAllocation& str, const LAllocation& index,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, str);
+ setOperand(1, index);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* str() { return this->getOperand(0); }
+ const LAllocation* index() { return this->getOperand(1); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// Convert uint16 character code to a string.
+class LFromCharCode : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(FromCharCode)
+
+ explicit LFromCharCode(const LAllocation& code)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, code);
+ }
+
+ const LAllocation* code() { return this->getOperand(0); }
+};
+
+// Convert uint32 code point to a string.
+class LFromCodePoint : public LInstructionHelper<1, 1, 2> {
+ public:
+ LIR_HEADER(FromCodePoint)
+
+ explicit LFromCodePoint(const LAllocation& codePoint,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, codePoint);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LAllocation* codePoint() { return this->getOperand(0); }
+
+ const LDefinition* temp1() { return this->getTemp(0); }
+
+ const LDefinition* temp2() { return this->getTemp(1); }
+};
+
+// Calls the ToLowerCase or ToUpperCase case conversion function.
+class LStringConvertCase : public LCallInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(StringConvertCase)
+
+ explicit LStringConvertCase(const LAllocation& string)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, string);
+ }
+
+ const MStringConvertCase* mir() const { return mir_->toStringConvertCase(); }
+
+ const LAllocation* string() { return this->getOperand(0); }
+};
+
+class LStringSplit : public LCallInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(StringSplit)
+
+ LStringSplit(const LAllocation& string, const LAllocation& separator)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, string);
+ setOperand(1, separator);
+ }
+ const LAllocation* string() { return getOperand(0); }
+ const LAllocation* separator() { return getOperand(1); }
+ const MStringSplit* mir() const { return mir_->toStringSplit(); }
+};
+
+class LSubstr : public LInstructionHelper<1, 3, 3> {
+ public:
+ LIR_HEADER(Substr)
+
+ LSubstr(const LAllocation& string, const LAllocation& begin,
+ const LAllocation& length, const LDefinition& temp,
+ const LDefinition& temp2, const LDefinition& temp3)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, string);
+ setOperand(1, begin);
+ setOperand(2, length);
+ setTemp(0, temp);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+ const LAllocation* string() { return getOperand(0); }
+ const LAllocation* begin() { return getOperand(1); }
+ const LAllocation* length() { return getOperand(2); }
+ const LDefinition* temp() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+ const LDefinition* temp3() { return getTemp(2); }
+ const MStringSplit* mir() const { return mir_->toStringSplit(); }
+};
+
+// Convert a 32-bit integer to a double.
+class LInt32ToDouble : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(Int32ToDouble)
+
+ explicit LInt32ToDouble(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit float to a double.
+class LFloat32ToDouble : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(Float32ToDouble)
+
+ explicit LFloat32ToDouble(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a double to a 32-bit float.
+class LDoubleToFloat32 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(DoubleToFloat32)
+
+ explicit LDoubleToFloat32(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit integer to a float32.
+class LInt32ToFloat32 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(Int32ToFloat32)
+
+ explicit LInt32ToFloat32(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a value to a double.
+class LValueToDouble : public LInstructionHelper<1, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(ValueToDouble)
+ static const size_t Input = 0;
+
+ explicit LValueToDouble(const LBoxAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Input, input);
+ }
+
+ MToDouble* mir() { return mir_->toToDouble(); }
+};
+
+// Convert a value to a float32.
+class LValueToFloat32 : public LInstructionHelper<1, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(ValueToFloat32)
+ static const size_t Input = 0;
+
+ explicit LValueToFloat32(const LBoxAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Input, input);
+ }
+
+ MToFloat32* mir() { return mir_->toToFloat32(); }
+};
+
+// Convert a value to an int32.
+// Input: components of a Value
+// Output: 32-bit integer
+// Bailout: undefined, string, object, or non-int32 double
+// Temps: one float register, one GP register
+//
+// This instruction requires a temporary float register.
+class LValueToInt32 : public LInstructionHelper<1, BOX_PIECES, 2> {
+ public:
+ enum Mode { NORMAL, TRUNCATE, TRUNCATE_NOWRAP };
+
+ private:
+ Mode mode_;
+
+ public:
+ LIR_HEADER(ValueToInt32)
+
+ LValueToInt32(const LBoxAllocation& input, const LDefinition& temp0,
+ const LDefinition& temp1, Mode mode)
+ : LInstructionHelper(classOpcode), mode_(mode) {
+ setBoxOperand(Input, input);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ const char* extraName() const {
+ return mode() == NORMAL ? "Normal"
+ : mode() == TRUNCATE ? "Truncate"
+ : "TruncateNoWrap";
+ }
+
+ static const size_t Input = 0;
+
+ Mode mode() const { return mode_; }
+ const LDefinition* tempFloat() { return getTemp(0); }
+ const LDefinition* temp() { return getTemp(1); }
+ MToNumberInt32* mirNormal() const {
+ MOZ_ASSERT(mode_ == NORMAL);
+ return mir_->toToNumberInt32();
+ }
+ MTruncateToInt32* mirTruncate() const {
+ MOZ_ASSERT(mode_ == TRUNCATE);
+ return mir_->toTruncateToInt32();
+ }
+ MToIntegerInt32* mirTruncateNoWrap() const {
+ MOZ_ASSERT(mode_ == TRUNCATE_NOWRAP);
+ return mir_->toToIntegerInt32();
+ }
+ MInstruction* mir() const { return mir_->toInstruction(); }
+};
+
+// Convert a value to a BigInt.
+class LValueToBigInt : public LInstructionHelper<1, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(ValueToBigInt)
+ static const size_t Input = 0;
+
+ explicit LValueToBigInt(const LBoxAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Input, input);
+ }
+
+ MToBigInt* mir() const { return mir_->toToBigInt(); }
+};
+
+// Convert a double to an int32.
+// Input: floating-point register
+// Output: 32-bit integer
+// Bailout: if the double cannot be converted to an integer.
+class LDoubleToInt32 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(DoubleToInt32)
+
+ explicit LDoubleToInt32(const LAllocation& in)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ }
+
+ MToNumberInt32* mir() const { return mir_->toToNumberInt32(); }
+};
+
+// Convert a float32 to an int32.
+// Input: floating-point register
+// Output: 32-bit integer
+// Bailout: if the float32 cannot be converted to an integer.
+class LFloat32ToInt32 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(Float32ToInt32)
+
+ explicit LFloat32ToInt32(const LAllocation& in)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ }
+
+ MToNumberInt32* mir() const { return mir_->toToNumberInt32(); }
+};
+
+// Truncates a double to an int32.
+// Input: floating-point register
+// Output: 32-bit integer
+// Bailout: if the double when converted to an integer exceeds the int32
+// bounds. No bailout for NaN or negative zero.
+class LDoubleToIntegerInt32 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(DoubleToIntegerInt32)
+
+ explicit LDoubleToIntegerInt32(const LAllocation& in)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ }
+
+ MToIntegerInt32* mir() const { return mir_->toToIntegerInt32(); }
+};
+
+// Truncates a float to an int32.
+// Input: floating-point register
+// Output: 32-bit integer
+// Bailout: if the double when converted to an integer exceeds the int32
+// bounds. No bailout for NaN or negative zero.
+class LFloat32ToIntegerInt32 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(Float32ToIntegerInt32)
+
+ explicit LFloat32ToIntegerInt32(const LAllocation& in)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ }
+
+ MToIntegerInt32* mir() const { return mir_->toToIntegerInt32(); }
+};
+
+// Convert a double to a truncated int32.
+// Input: floating-point register
+// Output: 32-bit integer
+class LTruncateDToInt32 : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(TruncateDToInt32)
+
+ LTruncateDToInt32(const LAllocation& in, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* tempFloat() { return getTemp(0); }
+
+ MTruncateToInt32* mir() const { return mir_->toTruncateToInt32(); }
+};
+
+// Convert a double to a truncated int32 with tls offset because we need it for
+// the slow ool path.
+class LWasmBuiltinTruncateDToInt32 : public LInstructionHelper<1, 2, 1> {
+ public:
+ LIR_HEADER(WasmBuiltinTruncateDToInt32)
+
+ LWasmBuiltinTruncateDToInt32(const LAllocation& in, const LAllocation& tls,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setOperand(1, tls);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* tempFloat() { return getTemp(0); }
+
+ MWasmBuiltinTruncateToInt32* mir() const {
+ return mir_->toWasmBuiltinTruncateToInt32();
+ }
+};
+
+// Convert a float32 to a truncated int32.
+// Input: floating-point register
+// Output: 32-bit integer
+class LTruncateFToInt32 : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(TruncateFToInt32)
+
+ LTruncateFToInt32(const LAllocation& in, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* tempFloat() { return getTemp(0); }
+
+ MTruncateToInt32* mir() const { return mir_->toTruncateToInt32(); }
+};
+
+// Convert a float32 to a truncated int32 with tls offset because we need it for
+// the slow ool path.
+class LWasmBuiltinTruncateFToInt32 : public LInstructionHelper<1, 2, 1> {
+ public:
+ LIR_HEADER(WasmBuiltinTruncateFToInt32)
+
+ LWasmBuiltinTruncateFToInt32(const LAllocation& in, const LAllocation& tls,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setOperand(1, tls);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* tempFloat() { return getTemp(0); }
+
+ MWasmBuiltinTruncateToInt32* mir() const {
+ return mir_->toWasmBuiltinTruncateToInt32();
+ }
+};
+
+class LWasmTruncateToInt32 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmTruncateToInt32)
+
+ explicit LWasmTruncateToInt32(const LAllocation& in)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ }
+
+ MWasmTruncateToInt32* mir() const { return mir_->toWasmTruncateToInt32(); }
+};
+
+class LWrapInt64ToInt32 : public LInstructionHelper<1, INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(WrapInt64ToInt32)
+
+ static const size_t Input = 0;
+
+ explicit LWrapInt64ToInt32(const LInt64Allocation& input)
+ : LInstructionHelper(classOpcode) {
+ setInt64Operand(Input, input);
+ }
+
+ const MWrapInt64ToInt32* mir() { return mir_->toWrapInt64ToInt32(); }
+};
+
+class LExtendInt32ToInt64 : public LInstructionHelper<INT64_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(ExtendInt32ToInt64)
+
+ explicit LExtendInt32ToInt64(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+
+ const MExtendInt32ToInt64* mir() { return mir_->toExtendInt32ToInt64(); }
+};
+
+// Convert a boolean value to a string.
+class LBooleanToString : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(BooleanToString)
+
+ explicit LBooleanToString(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+
+ const MToString* mir() { return mir_->toToString(); }
+};
+
+// Convert an integer hosted on one definition to a string with a function call.
+class LIntToString : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(IntToString)
+
+ explicit LIntToString(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+
+ const MToString* mir() { return mir_->toToString(); }
+};
+
+// Convert a double hosted on one definition to a string with a function call.
+class LDoubleToString : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(DoubleToString)
+
+ LDoubleToString(const LAllocation& input, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* tempInt() { return getTemp(0); }
+ const MToString* mir() { return mir_->toToString(); }
+};
+
+// Convert a primitive to a string with a function call.
+class LValueToString : public LInstructionHelper<1, BOX_PIECES, 1> {
+ public:
+ LIR_HEADER(ValueToString)
+
+ LValueToString(const LBoxAllocation& input, const LDefinition& tempToUnbox)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Input, input);
+ setTemp(0, tempToUnbox);
+ }
+
+ static const size_t Input = 0;
+
+ const MToString* mir() { return mir_->toToString(); }
+
+ const LDefinition* tempToUnbox() { return getTemp(0); }
+};
+
+// Double raised to a half power.
+class LPowHalfD : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(PowHalfD);
+ explicit LPowHalfD(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+
+ const LAllocation* input() { return getOperand(0); }
+ const LDefinition* output() { return getDef(0); }
+ MPowHalf* mir() const { return mir_->toPowHalf(); }
+};
+
+class LNaNToZero : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(NaNToZero)
+
+ explicit LNaNToZero(const LAllocation& input, const LDefinition& tempDouble)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, tempDouble);
+ }
+
+ const MNaNToZero* mir() { return mir_->toNaNToZero(); }
+ const LAllocation* input() { return getOperand(0); }
+ const LDefinition* output() { return getDef(0); }
+ const LDefinition* tempDouble() { return getTemp(0); }
+};
+
+// Passed the BaselineFrame address in the OsrFrameReg via the IonOsrTempData
+// populated by PrepareOsrTempData.
+//
+// Forwards this object to the LOsrValues for Value materialization.
+class LOsrEntry : public LInstructionHelper<1, 0, 1> {
+ protected:
+ Label label_;
+ uint32_t frameDepth_;
+
+ public:
+ LIR_HEADER(OsrEntry)
+
+ explicit LOsrEntry(const LDefinition& temp)
+ : LInstructionHelper(classOpcode), frameDepth_(0) {
+ setTemp(0, temp);
+ }
+
+ void setFrameDepth(uint32_t depth) { frameDepth_ = depth; }
+ uint32_t getFrameDepth() { return frameDepth_; }
+ Label* label() { return &label_; }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// Materialize a Value stored in an interpreter frame for OSR.
+class LOsrValue : public LInstructionHelper<BOX_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(OsrValue)
+
+ explicit LOsrValue(const LAllocation& entry)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, entry);
+ }
+
+ const MOsrValue* mir() { return mir_->toOsrValue(); }
+};
+
+// Materialize a JSObject env chain stored in an interpreter frame for OSR.
+class LOsrEnvironmentChain : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(OsrEnvironmentChain)
+
+ explicit LOsrEnvironmentChain(const LAllocation& entry)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, entry);
+ }
+
+ const MOsrEnvironmentChain* mir() { return mir_->toOsrEnvironmentChain(); }
+};
+
+// Materialize a JSObject env chain stored in an interpreter frame for OSR.
+class LOsrReturnValue : public LInstructionHelper<BOX_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(OsrReturnValue)
+
+ explicit LOsrReturnValue(const LAllocation& entry)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, entry);
+ }
+
+ const MOsrReturnValue* mir() { return mir_->toOsrReturnValue(); }
+};
+
+// Materialize a JSObject ArgumentsObject stored in an interpreter frame for
+// OSR.
+class LOsrArgumentsObject : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(OsrArgumentsObject)
+
+ explicit LOsrArgumentsObject(const LAllocation& entry)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, entry);
+ }
+
+ const MOsrArgumentsObject* mir() { return mir_->toOsrArgumentsObject(); }
+};
+
+class LRegExp : public LInstructionHelper<1, 0, 1> {
+ public:
+ LIR_HEADER(RegExp)
+
+ explicit LRegExp(const LDefinition& temp) : LInstructionHelper(classOpcode) {
+ setTemp(0, temp);
+ }
+ const LDefinition* temp() { return getTemp(0); }
+ const MRegExp* mir() const { return mir_->toRegExp(); }
+};
+
+class LRegExpMatcher : public LCallInstructionHelper<BOX_PIECES, 3, 0> {
+ public:
+ LIR_HEADER(RegExpMatcher)
+
+ LRegExpMatcher(const LAllocation& regexp, const LAllocation& string,
+ const LAllocation& lastIndex)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, regexp);
+ setOperand(1, string);
+ setOperand(2, lastIndex);
+ }
+
+ const LAllocation* regexp() { return getOperand(0); }
+ const LAllocation* string() { return getOperand(1); }
+ const LAllocation* lastIndex() { return getOperand(2); }
+
+ const MRegExpMatcher* mir() const { return mir_->toRegExpMatcher(); }
+};
+
+class LRegExpSearcher : public LCallInstructionHelper<1, 3, 0> {
+ public:
+ LIR_HEADER(RegExpSearcher)
+
+ LRegExpSearcher(const LAllocation& regexp, const LAllocation& string,
+ const LAllocation& lastIndex)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, regexp);
+ setOperand(1, string);
+ setOperand(2, lastIndex);
+ }
+
+ const LAllocation* regexp() { return getOperand(0); }
+ const LAllocation* string() { return getOperand(1); }
+ const LAllocation* lastIndex() { return getOperand(2); }
+
+ const MRegExpSearcher* mir() const { return mir_->toRegExpSearcher(); }
+};
+
+class LRegExpTester : public LCallInstructionHelper<1, 3, 0> {
+ public:
+ LIR_HEADER(RegExpTester)
+
+ LRegExpTester(const LAllocation& regexp, const LAllocation& string,
+ const LAllocation& lastIndex)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, regexp);
+ setOperand(1, string);
+ setOperand(2, lastIndex);
+ }
+
+ const LAllocation* regexp() { return getOperand(0); }
+ const LAllocation* string() { return getOperand(1); }
+ const LAllocation* lastIndex() { return getOperand(2); }
+
+ const MRegExpTester* mir() const { return mir_->toRegExpTester(); }
+};
+
+class LRegExpPrototypeOptimizable : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(RegExpPrototypeOptimizable);
+ LRegExpPrototypeOptimizable(const LAllocation& object,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+ MRegExpPrototypeOptimizable* mir() const {
+ return mir_->toRegExpPrototypeOptimizable();
+ }
+};
+
+class LRegExpInstanceOptimizable : public LInstructionHelper<1, 2, 1> {
+ public:
+ LIR_HEADER(RegExpInstanceOptimizable);
+ LRegExpInstanceOptimizable(const LAllocation& object,
+ const LAllocation& proto, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setOperand(1, proto);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ const LAllocation* proto() { return getOperand(1); }
+ const LDefinition* temp() { return getTemp(0); }
+ MRegExpInstanceOptimizable* mir() const {
+ return mir_->toRegExpInstanceOptimizable();
+ }
+};
+
+class LGetFirstDollarIndex : public LInstructionHelper<1, 1, 3> {
+ public:
+ LIR_HEADER(GetFirstDollarIndex);
+ LGetFirstDollarIndex(const LAllocation& str, const LDefinition& temp0,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, str);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ setTemp(2, temp2);
+ }
+
+ const LAllocation* str() { return getOperand(0); }
+ const LDefinition* temp0() { return getTemp(0); }
+ const LDefinition* temp1() { return getTemp(1); }
+ const LDefinition* temp2() { return getTemp(2); }
+};
+
+class LStringReplace : public LCallInstructionHelper<1, 3, 0> {
+ public:
+ LIR_HEADER(StringReplace);
+
+ LStringReplace(const LAllocation& string, const LAllocation& pattern,
+ const LAllocation& replacement)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, string);
+ setOperand(1, pattern);
+ setOperand(2, replacement);
+ }
+
+ const MStringReplace* mir() const { return mir_->toStringReplace(); }
+
+ const LAllocation* string() { return getOperand(0); }
+ const LAllocation* pattern() { return getOperand(1); }
+ const LAllocation* replacement() { return getOperand(2); }
+};
+
+class LBinaryValueCache
+ : public LInstructionHelper<BOX_PIECES, 2 * BOX_PIECES, 2> {
+ public:
+ LIR_HEADER(BinaryValueCache)
+
+ // Takes two temps: these are intendend to be FloatReg0 and FloatReg1
+ // To allow the actual cache code to safely clobber those values without
+ // save and restore.
+ LBinaryValueCache(const LBoxAllocation& lhs, const LBoxAllocation& rhs,
+ const LDefinition& temp0, const LDefinition& temp1)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(LhsInput, lhs);
+ setBoxOperand(RhsInput, rhs);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ const MBinaryCache* mir() const { return mir_->toBinaryCache(); }
+
+ static const size_t LhsInput = 0;
+ static const size_t RhsInput = BOX_PIECES;
+};
+
+class LBinaryBoolCache : public LInstructionHelper<1, 2 * BOX_PIECES, 2> {
+ public:
+ LIR_HEADER(BinaryBoolCache)
+
+ // Takes two temps: these are intendend to be FloatReg0 and FloatReg1
+ // To allow the actual cache code to safely clobber those values without
+ // save and restore.
+ LBinaryBoolCache(const LBoxAllocation& lhs, const LBoxAllocation& rhs,
+ const LDefinition& temp0, const LDefinition& temp1)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(LhsInput, lhs);
+ setBoxOperand(RhsInput, rhs);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ const MBinaryCache* mir() const { return mir_->toBinaryCache(); }
+
+ static const size_t LhsInput = 0;
+ static const size_t RhsInput = BOX_PIECES;
+};
+
+class LUnaryCache : public LInstructionHelper<BOX_PIECES, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(UnaryCache)
+
+ explicit LUnaryCache(const LBoxAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Input, input);
+ }
+
+ const MUnaryCache* mir() const { return mir_->toUnaryCache(); }
+
+ const LAllocation* input() { return getOperand(Input); }
+
+ static const size_t Input = 0;
+};
+
+class LClassConstructor : public LCallInstructionHelper<1, 0, 0> {
+ public:
+ LIR_HEADER(ClassConstructor)
+
+ const MClassConstructor* mir() const { return mir_->toClassConstructor(); }
+
+ LClassConstructor() : LCallInstructionHelper(classOpcode) {}
+};
+
+class LDerivedClassConstructor : public LCallInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(DerivedClassConstructor)
+
+ explicit LDerivedClassConstructor(const LAllocation& prototype)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, prototype);
+ }
+
+ const LAllocation* prototype() { return getOperand(0); }
+
+ const MDerivedClassConstructor* mir() const {
+ return mir_->toDerivedClassConstructor();
+ }
+};
+
+class LModuleMetadata : public LCallInstructionHelper<1, 0, 0> {
+ public:
+ LIR_HEADER(ModuleMetadata)
+
+ const MModuleMetadata* mir() const { return mir_->toModuleMetadata(); }
+
+ LModuleMetadata() : LCallInstructionHelper(classOpcode) {}
+};
+
+class LDynamicImport : public LCallInstructionHelper<1, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(DynamicImport)
+
+ static const size_t SpecifierIndex = 0;
+
+ explicit LDynamicImport(const LBoxAllocation& specifier)
+ : LCallInstructionHelper(classOpcode) {
+ setBoxOperand(SpecifierIndex, specifier);
+ }
+
+ const MDynamicImport* mir() const { return mir_->toDynamicImport(); }
+};
+
+class LLambda : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(Lambda)
+
+ LLambda(const LAllocation& envChain, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, envChain);
+ setTemp(0, temp);
+ }
+ const LAllocation* environmentChain() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+ const MLambda* mir() const { return mir_->toLambda(); }
+};
+
+class LLambdaArrow : public LInstructionHelper<1, 1 + BOX_PIECES, 1> {
+ public:
+ LIR_HEADER(LambdaArrow)
+
+ static const size_t NewTargetValue = 1;
+
+ LLambdaArrow(const LAllocation& envChain, const LBoxAllocation& newTarget,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, envChain);
+ setBoxOperand(NewTargetValue, newTarget);
+ setTemp(0, temp);
+ }
+ const LAllocation* environmentChain() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+ const MLambdaArrow* mir() const { return mir_->toLambdaArrow(); }
+};
+
+class LFunctionWithProto : public LCallInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(FunctionWithProto)
+
+ LFunctionWithProto(const LAllocation& envChain, const LAllocation& prototype)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, envChain);
+ setOperand(1, prototype);
+ }
+ const LAllocation* environmentChain() { return getOperand(0); }
+ const LAllocation* prototype() { return getOperand(1); }
+ const MFunctionWithProto* mir() const { return mir_->toFunctionWithProto(); }
+};
+
+class LSetFunName : public LCallInstructionHelper<1, 1 + BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(SetFunName)
+
+ static const size_t NameValue = 1;
+
+ LSetFunName(const LAllocation& fun, const LBoxAllocation& name)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, fun);
+ setBoxOperand(NameValue, name);
+ }
+ const LAllocation* fun() { return getOperand(0); }
+ const MSetFunName* mir() const { return mir_->toSetFunName(); }
+};
+
+class LKeepAliveObject : public LInstructionHelper<0, 1, 0> {
+ public:
+ LIR_HEADER(KeepAliveObject)
+
+ explicit LKeepAliveObject(const LAllocation& object)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+};
+
+// Load the "slots" member out of a JSObject.
+// Input: JSObject pointer
+// Output: slots pointer
+class LSlots : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(Slots)
+
+ explicit LSlots(const LAllocation& object) : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+};
+
+// Load the "elements" member out of a JSObject.
+// Input: JSObject pointer
+// Output: elements pointer
+class LElements : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(Elements)
+
+ explicit LElements(const LAllocation& object)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+
+ const MElements* mir() const { return mir_->toElements(); }
+};
+
+// Load the initialized length from an elements header.
+class LInitializedLength : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(InitializedLength)
+
+ explicit LInitializedLength(const LAllocation& elements)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ }
+
+ const LAllocation* elements() { return getOperand(0); }
+};
+
+// Store to the initialized length in an elements header. Note the input is an
+// *index*, one less than the desired initialized length.
+class LSetInitializedLength : public LInstructionHelper<0, 2, 0> {
+ public:
+ LIR_HEADER(SetInitializedLength)
+
+ LSetInitializedLength(const LAllocation& elements, const LAllocation& index)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ }
+
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+};
+
+// Load the length from an elements header.
+class LArrayLength : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(ArrayLength)
+
+ explicit LArrayLength(const LAllocation& elements)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ }
+
+ const LAllocation* elements() { return getOperand(0); }
+};
+
+// Store to the length in an elements header. Note the input is an *index*,
+// one less than the desired length.
+class LSetArrayLength : public LInstructionHelper<0, 2, 0> {
+ public:
+ LIR_HEADER(SetArrayLength)
+
+ LSetArrayLength(const LAllocation& elements, const LAllocation& index)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ }
+
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+};
+
+// Load the "length" property of a function.
+class LFunctionLength : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(FunctionLength)
+
+ explicit LFunctionLength(const LAllocation& function)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, function);
+ }
+
+ const LAllocation* function() { return getOperand(0); }
+};
+
+// Load the "name" property of a function.
+class LFunctionName : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(FunctionName)
+
+ explicit LFunctionName(const LAllocation& function)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, function);
+ }
+
+ const LAllocation* function() { return getOperand(0); }
+};
+
+class LGetNextEntryForIterator : public LInstructionHelper<1, 2, 3> {
+ public:
+ LIR_HEADER(GetNextEntryForIterator)
+
+ explicit LGetNextEntryForIterator(const LAllocation& iter,
+ const LAllocation& result,
+ const LDefinition& temp0,
+ const LDefinition& temp1,
+ const LDefinition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, iter);
+ setOperand(1, result);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ setTemp(2, temp2);
+ }
+
+ const MGetNextEntryForIterator* mir() const {
+ return mir_->toGetNextEntryForIterator();
+ }
+ const LAllocation* iter() { return getOperand(0); }
+ const LAllocation* result() { return getOperand(1); }
+ const LDefinition* temp0() { return getTemp(0); }
+ const LDefinition* temp1() { return getTemp(1); }
+ const LDefinition* temp2() { return getTemp(2); }
+};
+
+class LArrayBufferByteLengthInt32 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(ArrayBufferByteLengthInt32)
+
+ explicit LArrayBufferByteLengthInt32(const LAllocation& obj)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+};
+
+// Read the length of an array buffer view.
+class LArrayBufferViewLength : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(ArrayBufferViewLength)
+
+ explicit LArrayBufferViewLength(const LAllocation& obj)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+};
+
+// Read the byteOffset of an array buffer view.
+class LArrayBufferViewByteOffset : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(ArrayBufferViewByteOffset)
+
+ explicit LArrayBufferViewByteOffset(const LAllocation& obj)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+};
+
+// Load an array buffer view's elements vector.
+class LArrayBufferViewElements : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(ArrayBufferViewElements)
+
+ explicit LArrayBufferViewElements(const LAllocation& object)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ }
+ const LAllocation* object() { return getOperand(0); }
+};
+
+// Return the element shift of a typed array.
+class LTypedArrayElementShift : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(TypedArrayElementShift)
+
+ explicit LTypedArrayElementShift(const LAllocation& obj)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+};
+
+// Double to Int32, eligible for accessing into a TypedArray. If the index isn't
+// exactly representable as an Int32, produce any Int32 which is equivalent to
+// an OOB access into a TypedArray.
+class LTypedArrayIndexToInt32 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(TypedArrayIndexToInt32)
+
+ explicit LTypedArrayIndexToInt32(const LAllocation& obj)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ }
+
+ const LAllocation* index() { return getOperand(0); }
+ const MTypedArrayIndexToInt32* mir() const {
+ return mir_->toTypedArrayIndexToInt32();
+ }
+};
+
+// Bailout if index >= length.
+class LBoundsCheck : public LInstructionHelper<0, 2, 0> {
+ public:
+ LIR_HEADER(BoundsCheck)
+
+ LBoundsCheck(const LAllocation& index, const LAllocation& length)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, index);
+ setOperand(1, length);
+ }
+ const MBoundsCheck* mir() const { return mir_->toBoundsCheck(); }
+ const LAllocation* index() { return getOperand(0); }
+ const LAllocation* length() { return getOperand(1); }
+};
+
+// Bailout if index + minimum < 0 or index + maximum >= length.
+class LBoundsCheckRange : public LInstructionHelper<0, 2, 1> {
+ public:
+ LIR_HEADER(BoundsCheckRange)
+
+ LBoundsCheckRange(const LAllocation& index, const LAllocation& length,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, index);
+ setOperand(1, length);
+ setTemp(0, temp);
+ }
+ const MBoundsCheck* mir() const { return mir_->toBoundsCheck(); }
+ const LAllocation* index() { return getOperand(0); }
+ const LAllocation* length() { return getOperand(1); }
+};
+
+// Bailout if index < minimum.
+class LBoundsCheckLower : public LInstructionHelper<0, 1, 0> {
+ public:
+ LIR_HEADER(BoundsCheckLower)
+
+ explicit LBoundsCheckLower(const LAllocation& index)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, index);
+ }
+ MBoundsCheckLower* mir() const { return mir_->toBoundsCheckLower(); }
+ const LAllocation* index() { return getOperand(0); }
+};
+
+class LSpectreMaskIndex : public LInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(SpectreMaskIndex)
+
+ LSpectreMaskIndex(const LAllocation& index, const LAllocation& length)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, index);
+ setOperand(1, length);
+ }
+ const LAllocation* index() { return getOperand(0); }
+ const LAllocation* length() { return getOperand(1); }
+};
+
+// Load a value from a dense array's elements vector. Bail out if it's the hole
+// value.
+class LLoadElementV : public LInstructionHelper<BOX_PIECES, 2, 0> {
+ public:
+ LIR_HEADER(LoadElementV)
+
+ LLoadElementV(const LAllocation& elements, const LAllocation& index)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ }
+
+ const char* extraName() const {
+ return mir()->needsHoleCheck() ? "HoleCheck" : nullptr;
+ }
+
+ const MLoadElement* mir() const { return mir_->toLoadElement(); }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+};
+
+class LInArray : public LInstructionHelper<1, 3, 0> {
+ public:
+ LIR_HEADER(InArray)
+
+ LInArray(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& initLength)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, initLength);
+ }
+ const MInArray* mir() const { return mir_->toInArray(); }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* initLength() { return getOperand(2); }
+};
+
+class LGuardElementNotHole : public LInstructionHelper<0, 2, 0> {
+ public:
+ LIR_HEADER(GuardElementNotHole)
+
+ LGuardElementNotHole(const LAllocation& elements, const LAllocation& index)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+};
+
+// Load a value from an array's elements vector, loading |undefined| if we hit a
+// hole. Bail out if we get a negative index.
+class LLoadElementHole : public LInstructionHelper<BOX_PIECES, 3, 0> {
+ public:
+ LIR_HEADER(LoadElementHole)
+
+ LLoadElementHole(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& initLength)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, initLength);
+ }
+
+ const char* extraName() const {
+ return mir()->needsHoleCheck() ? "HoleCheck" : nullptr;
+ }
+
+ const MLoadElementHole* mir() const { return mir_->toLoadElementHole(); }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* initLength() { return getOperand(2); }
+};
+
+// Store a boxed value to a dense array's element vector.
+class LStoreElementV : public LInstructionHelper<0, 2 + BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(StoreElementV)
+
+ LStoreElementV(const LAllocation& elements, const LAllocation& index,
+ const LBoxAllocation& value)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setBoxOperand(Value, value);
+ }
+
+ const char* extraName() const {
+ return mir()->needsHoleCheck() ? "HoleCheck" : nullptr;
+ }
+
+ static const size_t Value = 2;
+
+ const MStoreElement* mir() const { return mir_->toStoreElement(); }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+};
+
+// Store a typed value to a dense array's elements vector. Compared to
+// LStoreElementV, this instruction can store doubles and constants directly,
+// and does not store the type tag if the array is monomorphic and known to
+// be packed.
+class LStoreElementT : public LInstructionHelper<0, 3, 0> {
+ public:
+ LIR_HEADER(StoreElementT)
+
+ LStoreElementT(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ }
+
+ const char* extraName() const {
+ return mir()->needsHoleCheck() ? "HoleCheck" : nullptr;
+ }
+
+ const MStoreElement* mir() const { return mir_->toStoreElement(); }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* value() { return getOperand(2); }
+};
+
+class LStoreHoleValueElement : public LInstructionHelper<0, 2, 0> {
+ public:
+ LIR_HEADER(StoreHoleValueElement)
+
+ LStoreHoleValueElement(const LAllocation& elements, const LAllocation& index)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ }
+
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+};
+
+// Like LStoreElementV, but supports indexes >= initialized length.
+class LStoreElementHoleV : public LInstructionHelper<0, 3 + BOX_PIECES, 1> {
+ public:
+ LIR_HEADER(StoreElementHoleV)
+
+ LStoreElementHoleV(const LAllocation& object, const LAllocation& elements,
+ const LAllocation& index, const LBoxAllocation& value,
+ const LDefinition& spectreTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setOperand(1, elements);
+ setOperand(2, index);
+ setBoxOperand(Value, value);
+ setTemp(0, spectreTemp);
+ }
+
+ static const size_t Value = 3;
+
+ const MStoreElementHole* mir() const { return mir_->toStoreElementHole(); }
+ const LAllocation* object() { return getOperand(0); }
+ const LAllocation* elements() { return getOperand(1); }
+ const LAllocation* index() { return getOperand(2); }
+ const LDefinition* spectreTemp() { return getTemp(0); }
+};
+
+// Like LStoreElementT, but supports indexes >= initialized length.
+class LStoreElementHoleT : public LInstructionHelper<0, 4, 1> {
+ public:
+ LIR_HEADER(StoreElementHoleT)
+
+ LStoreElementHoleT(const LAllocation& object, const LAllocation& elements,
+ const LAllocation& index, const LAllocation& value,
+ const LDefinition& spectreTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setOperand(1, elements);
+ setOperand(2, index);
+ setOperand(3, value);
+ setTemp(0, spectreTemp);
+ }
+
+ const MStoreElementHole* mir() const { return mir_->toStoreElementHole(); }
+ const LAllocation* object() { return getOperand(0); }
+ const LAllocation* elements() { return getOperand(1); }
+ const LAllocation* index() { return getOperand(2); }
+ const LAllocation* value() { return getOperand(3); }
+ const LDefinition* spectreTemp() { return getTemp(0); }
+};
+
+class LArrayPopShift : public LInstructionHelper<BOX_PIECES, 1, 2> {
+ public:
+ LIR_HEADER(ArrayPopShift)
+
+ LArrayPopShift(const LAllocation& object, const LDefinition& temp0,
+ const LDefinition& temp1)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ const char* extraName() const {
+ return mir()->mode() == MArrayPopShift::Pop ? "Pop" : "Shift";
+ }
+
+ const MArrayPopShift* mir() const { return mir_->toArrayPopShift(); }
+ const LAllocation* object() { return getOperand(0); }
+ const LDefinition* temp0() { return getTemp(0); }
+ const LDefinition* temp1() { return getTemp(1); }
+};
+
+class LArrayPush : public LInstructionHelper<1, 1 + BOX_PIECES, 2> {
+ public:
+ LIR_HEADER(ArrayPush)
+
+ LArrayPush(const LAllocation& object, const LBoxAllocation& value,
+ const LDefinition& temp, const LDefinition& spectreTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setBoxOperand(Value, value);
+ setTemp(0, temp);
+ setTemp(1, spectreTemp);
+ }
+
+ static const size_t Value = 1;
+
+ const MArrayPush* mir() const { return mir_->toArrayPush(); }
+ const LAllocation* object() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+ const LDefinition* spectreTemp() { return getTemp(1); }
+};
+
+class LArraySlice : public LCallInstructionHelper<1, 3, 2> {
+ public:
+ LIR_HEADER(ArraySlice)
+
+ LArraySlice(const LAllocation& obj, const LAllocation& begin,
+ const LAllocation& end, const LDefinition& temp1,
+ const LDefinition& temp2)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setOperand(1, begin);
+ setOperand(2, end);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+ const MArraySlice* mir() const { return mir_->toArraySlice(); }
+ const LAllocation* object() { return getOperand(0); }
+ const LAllocation* begin() { return getOperand(1); }
+ const LAllocation* end() { return getOperand(2); }
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+class LArrayJoin : public LCallInstructionHelper<1, 2, 1> {
+ public:
+ LIR_HEADER(ArrayJoin)
+
+ LArrayJoin(const LAllocation& array, const LAllocation& sep,
+ const LDefinition& temp)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, array);
+ setOperand(1, sep);
+ setTemp(0, temp);
+ }
+
+ const MArrayJoin* mir() const { return mir_->toArrayJoin(); }
+ const LDefinition* output() { return getDef(0); }
+ const LAllocation* array() { return getOperand(0); }
+ const LAllocation* separator() { return getOperand(1); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LLoadUnboxedScalar : public LInstructionHelper<1, 2, 1> {
+ public:
+ LIR_HEADER(LoadUnboxedScalar)
+
+ LLoadUnboxedScalar(const LAllocation& elements, const LAllocation& index,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setTemp(0, temp);
+ }
+ const MLoadUnboxedScalar* mir() const { return mir_->toLoadUnboxedScalar(); }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LLoadUnboxedBigInt : public LInstructionHelper<1, 2, 1 + INT64_PIECES> {
+ public:
+ LIR_HEADER(LoadUnboxedBigInt)
+
+ LLoadUnboxedBigInt(const LAllocation& elements, const LAllocation& index,
+ const LDefinition& temp, const LInt64Definition& temp64)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setTemp(0, temp);
+ setInt64Temp(1, temp64);
+ }
+ const MLoadUnboxedScalar* mir() const { return mir_->toLoadUnboxedScalar(); }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LDefinition* temp() { return getTemp(0); }
+ const LInt64Definition temp64() { return getInt64Temp(1); }
+};
+
+class LLoadDataViewElement : public LInstructionHelper<1, 3, 1 + INT64_PIECES> {
+ public:
+ LIR_HEADER(LoadDataViewElement)
+
+ LLoadDataViewElement(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& littleEndian, const LDefinition& temp,
+ const LInt64Definition& temp64)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, littleEndian);
+ setTemp(0, temp);
+ setInt64Temp(1, temp64);
+ }
+ const MLoadDataViewElement* mir() const {
+ return mir_->toLoadDataViewElement();
+ }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* littleEndian() { return getOperand(2); }
+ const LDefinition* temp() { return getTemp(0); }
+ const LInt64Definition temp64() { return getInt64Temp(1); }
+};
+
+class LLoadTypedArrayElementHole : public LInstructionHelper<BOX_PIECES, 2, 1> {
+ public:
+ LIR_HEADER(LoadTypedArrayElementHole)
+
+ LLoadTypedArrayElementHole(const LAllocation& object,
+ const LAllocation& index, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setOperand(1, index);
+ setTemp(0, temp);
+ }
+ const MLoadTypedArrayElementHole* mir() const {
+ return mir_->toLoadTypedArrayElementHole();
+ }
+ const LAllocation* object() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LLoadTypedArrayElementHoleBigInt
+ : public LInstructionHelper<BOX_PIECES, 2, 1 + INT64_PIECES> {
+ public:
+ LIR_HEADER(LoadTypedArrayElementHoleBigInt)
+
+ LLoadTypedArrayElementHoleBigInt(const LAllocation& object,
+ const LAllocation& index,
+ const LDefinition& temp,
+ const LInt64Definition& temp64)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setOperand(1, index);
+ setTemp(0, temp);
+ setInt64Temp(1, temp64);
+ }
+ const MLoadTypedArrayElementHole* mir() const {
+ return mir_->toLoadTypedArrayElementHole();
+ }
+ const LAllocation* object() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LDefinition* temp() { return getTemp(0); }
+ const LInt64Definition temp64() { return getInt64Temp(1); }
+};
+
+class LStoreUnboxedScalar : public LInstructionHelper<0, 3, 0> {
+ public:
+ LIR_HEADER(StoreUnboxedScalar)
+
+ LStoreUnboxedScalar(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ }
+
+ const MStoreUnboxedScalar* mir() const {
+ return mir_->toStoreUnboxedScalar();
+ }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* value() { return getOperand(2); }
+};
+
+class LStoreUnboxedBigInt : public LInstructionHelper<0, 3, INT64_PIECES> {
+ public:
+ LIR_HEADER(StoreUnboxedBigInt)
+
+ LStoreUnboxedBigInt(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value, const LInt64Definition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setInt64Temp(0, temp);
+ }
+
+ const MStoreUnboxedScalar* mir() const {
+ return mir_->toStoreUnboxedScalar();
+ }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* value() { return getOperand(2); }
+ LInt64Definition temp() { return getInt64Temp(0); }
+};
+
+class LStoreDataViewElement
+ : public LInstructionHelper<0, 4, 1 + INT64_PIECES> {
+ public:
+ LIR_HEADER(StoreDataViewElement)
+
+ LStoreDataViewElement(const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value,
+ const LAllocation& littleEndian,
+ const LDefinition& temp, const LInt64Definition& temp64)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setOperand(3, littleEndian);
+ setTemp(0, temp);
+ setInt64Temp(1, temp64);
+ }
+
+ const MStoreDataViewElement* mir() const {
+ return mir_->toStoreDataViewElement();
+ }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* value() { return getOperand(2); }
+ const LAllocation* littleEndian() { return getOperand(3); }
+ const LDefinition* temp() { return getTemp(0); }
+ const LInt64Definition temp64() { return getInt64Temp(1); }
+};
+
+class LStoreTypedArrayElementHole : public LInstructionHelper<0, 4, 1> {
+ public:
+ LIR_HEADER(StoreTypedArrayElementHole)
+
+ LStoreTypedArrayElementHole(const LAllocation& elements,
+ const LAllocation& length,
+ const LAllocation& index,
+ const LAllocation& value,
+ const LDefinition& spectreTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, length);
+ setOperand(2, index);
+ setOperand(3, value);
+ setTemp(0, spectreTemp);
+ }
+
+ const MStoreTypedArrayElementHole* mir() const {
+ return mir_->toStoreTypedArrayElementHole();
+ }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* length() { return getOperand(1); }
+ const LAllocation* index() { return getOperand(2); }
+ const LAllocation* value() { return getOperand(3); }
+ const LDefinition* spectreTemp() { return getTemp(0); }
+};
+
+class LStoreTypedArrayElementHoleBigInt
+ : public LInstructionHelper<0, 4, INT64_PIECES> {
+ public:
+ LIR_HEADER(StoreTypedArrayElementHoleBigInt)
+
+ LStoreTypedArrayElementHoleBigInt(const LAllocation& elements,
+ const LAllocation& length,
+ const LAllocation& index,
+ const LAllocation& value,
+ const LInt64Definition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, length);
+ setOperand(2, index);
+ setOperand(3, value);
+ setInt64Temp(0, temp);
+ }
+
+ const MStoreTypedArrayElementHole* mir() const {
+ return mir_->toStoreTypedArrayElementHole();
+ }
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* length() { return getOperand(1); }
+ const LAllocation* index() { return getOperand(2); }
+ const LAllocation* value() { return getOperand(3); }
+ LInt64Definition temp() { return getInt64Temp(0); }
+};
+
+class LAtomicIsLockFree : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(AtomicIsLockFree)
+
+ explicit LAtomicIsLockFree(const LAllocation& value)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, value);
+ }
+ const LAllocation* value() { return getOperand(0); }
+};
+
+class LCompareExchangeTypedArrayElement : public LInstructionHelper<1, 4, 4> {
+ public:
+ LIR_HEADER(CompareExchangeTypedArrayElement)
+
+ // ARM, ARM64, x86, x64
+ LCompareExchangeTypedArrayElement(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& oldval,
+ const LAllocation& newval,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, oldval);
+ setOperand(3, newval);
+ setTemp(0, temp);
+ }
+ // MIPS32, MIPS64
+ LCompareExchangeTypedArrayElement(
+ const LAllocation& elements, const LAllocation& index,
+ const LAllocation& oldval, const LAllocation& newval,
+ const LDefinition& temp, const LDefinition& valueTemp,
+ const LDefinition& offsetTemp, const LDefinition& maskTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, oldval);
+ setOperand(3, newval);
+ setTemp(0, temp);
+ setTemp(1, valueTemp);
+ setTemp(2, offsetTemp);
+ setTemp(3, maskTemp);
+ }
+
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* oldval() { return getOperand(2); }
+ const LAllocation* newval() { return getOperand(3); }
+ const LDefinition* temp() { return getTemp(0); }
+
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() { return getTemp(1); }
+ const LDefinition* offsetTemp() { return getTemp(2); }
+ const LDefinition* maskTemp() { return getTemp(3); }
+
+ const MCompareExchangeTypedArrayElement* mir() const {
+ return mir_->toCompareExchangeTypedArrayElement();
+ }
+};
+
+class LAtomicExchangeTypedArrayElement : public LInstructionHelper<1, 3, 4> {
+ public:
+ LIR_HEADER(AtomicExchangeTypedArrayElement)
+
+ // ARM, ARM64, x86, x64
+ LAtomicExchangeTypedArrayElement(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& value,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, temp);
+ }
+ // MIPS32, MIPS64
+ LAtomicExchangeTypedArrayElement(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& value,
+ const LDefinition& temp,
+ const LDefinition& valueTemp,
+ const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, temp);
+ setTemp(1, valueTemp);
+ setTemp(2, offsetTemp);
+ setTemp(3, maskTemp);
+ }
+
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* value() { return getOperand(2); }
+ const LDefinition* temp() { return getTemp(0); }
+
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() { return getTemp(1); }
+ const LDefinition* offsetTemp() { return getTemp(2); }
+ const LDefinition* maskTemp() { return getTemp(3); }
+
+ const MAtomicExchangeTypedArrayElement* mir() const {
+ return mir_->toAtomicExchangeTypedArrayElement();
+ }
+};
+
+class LAtomicTypedArrayElementBinop : public LInstructionHelper<1, 3, 5> {
+ public:
+ LIR_HEADER(AtomicTypedArrayElementBinop)
+
+ static const int32_t valueOp = 2;
+
+ // ARM, ARM64, x86, x64
+ LAtomicTypedArrayElementBinop(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& value,
+ const LDefinition& temp1,
+ const LDefinition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+ // MIPS32, MIPS64
+ LAtomicTypedArrayElementBinop(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& value,
+ const LDefinition& temp2,
+ const LDefinition& valueTemp,
+ const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, temp2);
+ setTemp(2, valueTemp);
+ setTemp(3, offsetTemp);
+ setTemp(4, maskTemp);
+ }
+
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* value() {
+ MOZ_ASSERT(valueOp == 2);
+ return getOperand(2);
+ }
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() { return getTemp(2); }
+ const LDefinition* offsetTemp() { return getTemp(3); }
+ const LDefinition* maskTemp() { return getTemp(4); }
+
+ const MAtomicTypedArrayElementBinop* mir() const {
+ return mir_->toAtomicTypedArrayElementBinop();
+ }
+};
+
+// Atomic binary operation where the result is discarded.
+class LAtomicTypedArrayElementBinopForEffect
+ : public LInstructionHelper<0, 3, 4> {
+ public:
+ LIR_HEADER(AtomicTypedArrayElementBinopForEffect)
+
+ // ARM, ARM64, x86, x64
+ LAtomicTypedArrayElementBinopForEffect(
+ const LAllocation& elements, const LAllocation& index,
+ const LAllocation& value,
+ const LDefinition& flagTemp = LDefinition::BogusTemp())
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, flagTemp);
+ }
+ // MIPS32, MIPS64
+ LAtomicTypedArrayElementBinopForEffect(const LAllocation& elements,
+ const LAllocation& index,
+ const LAllocation& value,
+ const LDefinition& valueTemp,
+ const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ setOperand(2, value);
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, valueTemp);
+ setTemp(2, offsetTemp);
+ setTemp(3, maskTemp);
+ }
+
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LAllocation* value() { return getOperand(2); }
+
+ // Temp that may be used on LL/SC platforms for the flag result of the store.
+ const LDefinition* flagTemp() { return getTemp(0); }
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() { return getTemp(1); }
+ const LDefinition* offsetTemp() { return getTemp(2); }
+ const LDefinition* maskTemp() { return getTemp(3); }
+
+ const MAtomicTypedArrayElementBinop* mir() const {
+ return mir_->toAtomicTypedArrayElementBinop();
+ }
+};
+
+class LEffectiveAddress : public LInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(EffectiveAddress);
+
+ LEffectiveAddress(const LAllocation& base, const LAllocation& index)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, base);
+ setOperand(1, index);
+ }
+ const MEffectiveAddress* mir() const { return mir_->toEffectiveAddress(); }
+ const LAllocation* base() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+};
+
+class LClampIToUint8 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(ClampIToUint8)
+
+ explicit LClampIToUint8(const LAllocation& in)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ }
+};
+
+class LClampDToUint8 : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(ClampDToUint8)
+
+ LClampDToUint8(const LAllocation& in, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+};
+
+class LClampVToUint8 : public LInstructionHelper<1, BOX_PIECES, 1> {
+ public:
+ LIR_HEADER(ClampVToUint8)
+
+ LClampVToUint8(const LBoxAllocation& input, const LDefinition& tempFloat)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Input, input);
+ setTemp(0, tempFloat);
+ }
+
+ static const size_t Input = 0;
+
+ const LDefinition* tempFloat() { return getTemp(0); }
+ const MClampToUint8* mir() const { return mir_->toClampToUint8(); }
+};
+
+// Load a boxed value from an object's fixed slot.
+class LLoadFixedSlotV : public LInstructionHelper<BOX_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(LoadFixedSlotV)
+
+ explicit LLoadFixedSlotV(const LAllocation& object)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ }
+ const MLoadFixedSlot* mir() const { return mir_->toLoadFixedSlot(); }
+};
+
+// Load a typed value from an object's fixed slot.
+class LLoadFixedSlotT : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(LoadFixedSlotT)
+
+ explicit LLoadFixedSlotT(const LAllocation& object)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ }
+ const MLoadFixedSlot* mir() const { return mir_->toLoadFixedSlot(); }
+};
+
+class LLoadFixedSlotAndUnbox : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(LoadFixedSlotAndUnbox)
+
+ explicit LLoadFixedSlotAndUnbox(const LAllocation& object)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+
+ const MLoadFixedSlotAndUnbox* mir() const {
+ return mir_->toLoadFixedSlotAndUnbox();
+ }
+};
+
+class LLoadDynamicSlotAndUnbox : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(LoadDynamicSlotAndUnbox)
+
+ explicit LLoadDynamicSlotAndUnbox(const LAllocation& slots)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, slots);
+ }
+
+ const LAllocation* slots() { return getOperand(0); }
+
+ const MLoadDynamicSlotAndUnbox* mir() const {
+ return mir_->toLoadDynamicSlotAndUnbox();
+ }
+};
+
+class LLoadElementAndUnbox : public LInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(LoadElementAndUnbox)
+
+ LLoadElementAndUnbox(const LAllocation& elements, const LAllocation& index)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, elements);
+ setOperand(1, index);
+ }
+
+ const LAllocation* elements() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+
+ const MLoadElementAndUnbox* mir() const {
+ return mir_->toLoadElementAndUnbox();
+ }
+};
+
+class LAddAndStoreSlot : public LInstructionHelper<0, 1 + BOX_PIECES, 1> {
+ public:
+ LIR_HEADER(AddAndStoreSlot)
+
+ LAddAndStoreSlot(const LAllocation& obj, const LBoxAllocation& value,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setBoxOperand(Value, value);
+ setTemp(0, temp);
+ }
+
+ static const size_t Value = 1;
+
+ const MAddAndStoreSlot* mir() const { return mir_->toAddAndStoreSlot(); }
+};
+
+class LAllocateAndStoreSlot
+ : public LCallInstructionHelper<0, 1 + BOX_PIECES, 2> {
+ public:
+ LIR_HEADER(AllocateAndStoreSlot)
+
+ LAllocateAndStoreSlot(const LAllocation& obj, const LBoxAllocation& value,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setBoxOperand(Value, value);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ static const size_t Value = 1;
+
+ const MAllocateAndStoreSlot* mir() const {
+ return mir_->toAllocateAndStoreSlot();
+ }
+};
+
+// Store a boxed value to an object's fixed slot.
+class LStoreFixedSlotV : public LInstructionHelper<0, 1 + BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(StoreFixedSlotV)
+
+ LStoreFixedSlotV(const LAllocation& obj, const LBoxAllocation& value)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setBoxOperand(Value, value);
+ }
+
+ static const size_t Value = 1;
+
+ const MStoreFixedSlot* mir() const { return mir_->toStoreFixedSlot(); }
+ const LAllocation* obj() { return getOperand(0); }
+};
+
+// Store a typed value to an object's fixed slot.
+class LStoreFixedSlotT : public LInstructionHelper<0, 2, 0> {
+ public:
+ LIR_HEADER(StoreFixedSlotT)
+
+ LStoreFixedSlotT(const LAllocation& obj, const LAllocation& value)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setOperand(1, value);
+ }
+ const MStoreFixedSlot* mir() const { return mir_->toStoreFixedSlot(); }
+ const LAllocation* obj() { return getOperand(0); }
+ const LAllocation* value() { return getOperand(1); }
+};
+
+// Note, Name ICs always return a Value. There are no V/T variants.
+class LGetNameCache : public LInstructionHelper<BOX_PIECES, 1, 1> {
+ public:
+ LIR_HEADER(GetNameCache)
+
+ LGetNameCache(const LAllocation& envObj, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, envObj);
+ setTemp(0, temp);
+ }
+ const LAllocation* envObj() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+ const MGetNameCache* mir() const { return mir_->toGetNameCache(); }
+};
+
+class LCallGetIntrinsicValue : public LCallInstructionHelper<BOX_PIECES, 0, 0> {
+ public:
+ LIR_HEADER(CallGetIntrinsicValue)
+
+ const MCallGetIntrinsicValue* mir() const {
+ return mir_->toCallGetIntrinsicValue();
+ }
+
+ LCallGetIntrinsicValue() : LCallInstructionHelper(classOpcode) {}
+};
+
+class LGetPropSuperCache
+ : public LInstructionHelper<BOX_PIECES, 1 + 2 * BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(GetPropSuperCache)
+
+ static const size_t Receiver = 1;
+ static const size_t Id = Receiver + BOX_PIECES;
+
+ LGetPropSuperCache(const LAllocation& obj, const LBoxAllocation& receiver,
+ const LBoxAllocation& id)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setBoxOperand(Receiver, receiver);
+ setBoxOperand(Id, id);
+ }
+ const LAllocation* obj() { return getOperand(0); }
+ const MGetPropSuperCache* mir() const { return mir_->toGetPropSuperCache(); }
+};
+
+// Patchable jump to stubs generated for a GetProperty cache, which loads a
+// boxed value.
+class LGetPropertyCache
+ : public LInstructionHelper<BOX_PIECES, 2 * BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(GetPropertyCache)
+
+ static const size_t Value = 0;
+ static const size_t Id = BOX_PIECES;
+
+ LGetPropertyCache(const LBoxAllocation& value, const LBoxAllocation& id)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Value, value);
+ setBoxOperand(Id, id);
+ }
+ const MGetPropertyCache* mir() const { return mir_->toGetPropertyCache(); }
+};
+
+class LBindNameCache : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(BindNameCache)
+
+ LBindNameCache(const LAllocation& envChain, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, envChain);
+ setTemp(0, temp);
+ }
+ const LAllocation* environmentChain() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+ const MBindNameCache* mir() const { return mir_->toBindNameCache(); }
+};
+
+class LCallBindVar : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(CallBindVar)
+
+ explicit LCallBindVar(const LAllocation& envChain)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, envChain);
+ }
+ const LAllocation* environmentChain() { return getOperand(0); }
+ const MCallBindVar* mir() const { return mir_->toCallBindVar(); }
+};
+
+// Load a value from an object's dslots or a slots vector.
+class LLoadDynamicSlotV : public LInstructionHelper<BOX_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(LoadDynamicSlotV)
+
+ explicit LLoadDynamicSlotV(const LAllocation& in)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ }
+ const MLoadDynamicSlot* mir() const { return mir_->toLoadDynamicSlot(); }
+};
+
+// Store a value to an object's dslots or a slots vector.
+class LStoreDynamicSlotV : public LInstructionHelper<0, 1 + BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(StoreDynamicSlotV)
+
+ LStoreDynamicSlotV(const LAllocation& slots, const LBoxAllocation& value)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, slots);
+ setBoxOperand(Value, value);
+ }
+
+ static const size_t Value = 1;
+
+ const MStoreDynamicSlot* mir() const { return mir_->toStoreDynamicSlot(); }
+ const LAllocation* slots() { return getOperand(0); }
+};
+
+// Store a typed value to an object's dslots or a slots vector. This has a
+// few advantages over LStoreDynamicSlotV:
+// 1) We can bypass storing the type tag if the slot has the same type as
+// the value.
+// 2) Better register allocation: we can store constants and FP regs directly
+// without requiring a second register for the value.
+class LStoreDynamicSlotT : public LInstructionHelper<0, 2, 0> {
+ public:
+ LIR_HEADER(StoreDynamicSlotT)
+
+ LStoreDynamicSlotT(const LAllocation& slots, const LAllocation& value)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, slots);
+ setOperand(1, value);
+ }
+ const MStoreDynamicSlot* mir() const { return mir_->toStoreDynamicSlot(); }
+ const LAllocation* slots() { return getOperand(0); }
+ const LAllocation* value() { return getOperand(1); }
+};
+
+// Read length field of a JSString*.
+class LStringLength : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(StringLength)
+
+ explicit LStringLength(const LAllocation& string)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, string);
+ }
+
+ const LAllocation* string() { return getOperand(0); }
+};
+
+// Take the floor of a double precision number and converts it to an int32.
+// Implements Math.floor().
+class LFloor : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(Floor)
+
+ explicit LFloor(const LAllocation& num) : LInstructionHelper(classOpcode) {
+ setOperand(0, num);
+ }
+};
+
+// Take the floor of a single precision number and converts it to an int32.
+// Implements Math.floor().
+class LFloorF : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(FloorF)
+
+ explicit LFloorF(const LAllocation& num) : LInstructionHelper(classOpcode) {
+ setOperand(0, num);
+ }
+};
+
+// Take the ceiling of a double precision number and converts it to an int32.
+// Implements Math.ceil().
+class LCeil : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(Ceil)
+
+ explicit LCeil(const LAllocation& num) : LInstructionHelper(classOpcode) {
+ setOperand(0, num);
+ }
+};
+
+// Take the ceiling of a single precision number and converts it to an int32.
+// Implements Math.ceil().
+class LCeilF : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(CeilF)
+
+ explicit LCeilF(const LAllocation& num) : LInstructionHelper(classOpcode) {
+ setOperand(0, num);
+ }
+};
+
+// Round a double precision number and converts it to an int32.
+// Implements Math.round().
+class LRound : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(Round)
+
+ LRound(const LAllocation& num, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, num);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+ MRound* mir() const { return mir_->toRound(); }
+};
+
+// Round a single precision number and converts it to an int32.
+// Implements Math.round().
+class LRoundF : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(RoundF)
+
+ LRoundF(const LAllocation& num, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, num);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+ MRound* mir() const { return mir_->toRound(); }
+};
+
+// Truncates a double precision number and converts it to an int32.
+// Implements Math.trunc().
+class LTrunc : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(Trunc)
+
+ explicit LTrunc(const LAllocation& num) : LInstructionHelper(classOpcode) {
+ setOperand(0, num);
+ }
+};
+
+// Truncates a single precision number and converts it to an int32.
+// Implements Math.trunc().
+class LTruncF : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(TruncF)
+
+ explicit LTruncF(const LAllocation& num) : LInstructionHelper(classOpcode) {
+ setOperand(0, num);
+ }
+};
+
+// Rounds a double precision number accordingly to mir()->roundingMode(),
+// and keeps a double output.
+class LNearbyInt : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(NearbyInt)
+
+ explicit LNearbyInt(const LAllocation& num)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, num);
+ }
+ MNearbyInt* mir() const { return mir_->toNearbyInt(); }
+};
+
+// Rounds a single precision number accordingly to mir()->roundingMode(),
+// and keeps a single output.
+class LNearbyIntF : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(NearbyIntF)
+
+ explicit LNearbyIntF(const LAllocation& num)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, num);
+ }
+ MNearbyInt* mir() const { return mir_->toNearbyInt(); }
+};
+
+// Load a function's call environment.
+class LFunctionEnvironment : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(FunctionEnvironment)
+
+ explicit LFunctionEnvironment(const LAllocation& function)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, function);
+ }
+ const LAllocation* function() { return getOperand(0); }
+};
+
+class LHomeObject : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(HomeObject)
+
+ explicit LHomeObject(const LAllocation& function)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, function);
+ }
+ const LAllocation* function() { return getOperand(0); }
+};
+
+class LHomeObjectSuperBase : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(HomeObjectSuperBase)
+
+ explicit LHomeObjectSuperBase(const LAllocation& homeObject)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, homeObject);
+ }
+
+ const LAllocation* homeObject() { return getOperand(0); }
+};
+
+// Allocate a new LexicalEnvironmentObject.
+class LNewLexicalEnvironmentObject : public LCallInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(NewLexicalEnvironmentObject)
+
+ explicit LNewLexicalEnvironmentObject(const LAllocation& enclosing)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, enclosing);
+ }
+ const LAllocation* enclosing() { return getOperand(0); }
+
+ MNewLexicalEnvironmentObject* mir() const {
+ return mir_->toNewLexicalEnvironmentObject();
+ }
+};
+
+// Copy a LexicalEnvironmentObject.
+class LCopyLexicalEnvironmentObject : public LCallInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(CopyLexicalEnvironmentObject)
+
+ explicit LCopyLexicalEnvironmentObject(const LAllocation& env)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, env);
+ }
+ const LAllocation* env() { return getOperand(0); }
+
+ MCopyLexicalEnvironmentObject* mir() const {
+ return mir_->toCopyLexicalEnvironmentObject();
+ }
+};
+
+class LCallSetElement
+ : public LCallInstructionHelper<0, 1 + 2 * BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(CallSetElement)
+
+ static const size_t Index = 1;
+ static const size_t Value = 1 + BOX_PIECES;
+
+ LCallSetElement(const LAllocation& obj, const LBoxAllocation& index,
+ const LBoxAllocation& value)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setBoxOperand(Index, index);
+ setBoxOperand(Value, value);
+ }
+
+ const MCallSetElement* mir() const { return mir_->toCallSetElement(); }
+};
+
+class LCallDeleteProperty : public LCallInstructionHelper<1, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(CallDeleteProperty)
+
+ static const size_t Value = 0;
+
+ explicit LCallDeleteProperty(const LBoxAllocation& value)
+ : LCallInstructionHelper(classOpcode) {
+ setBoxOperand(Value, value);
+ }
+
+ MDeleteProperty* mir() const { return mir_->toDeleteProperty(); }
+};
+
+class LCallDeleteElement : public LCallInstructionHelper<1, 2 * BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(CallDeleteElement)
+
+ static const size_t Value = 0;
+ static const size_t Index = BOX_PIECES;
+
+ LCallDeleteElement(const LBoxAllocation& value, const LBoxAllocation& index)
+ : LCallInstructionHelper(classOpcode) {
+ setBoxOperand(Value, value);
+ setBoxOperand(Index, index);
+ }
+
+ MDeleteElement* mir() const { return mir_->toDeleteElement(); }
+};
+
+// Patchable jump to stubs generated for a SetProperty cache.
+class LSetPropertyCache : public LInstructionHelper<0, 1 + 2 * BOX_PIECES, 2> {
+ public:
+ LIR_HEADER(SetPropertyCache)
+
+ // Takes an additional temp: this is intendend to be FloatReg0 to allow the
+ // actual cache code to safely clobber that value without save and restore.
+ LSetPropertyCache(const LAllocation& object, const LBoxAllocation& id,
+ const LBoxAllocation& value, const LDefinition& temp,
+ const LDefinition& tempDouble)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setBoxOperand(Id, id);
+ setBoxOperand(Value, value);
+ setTemp(0, temp);
+ setTemp(1, tempDouble);
+ }
+
+ static const size_t Id = 1;
+ static const size_t Value = 1 + BOX_PIECES;
+
+ const MSetPropertyCache* mir() const { return mir_->toSetPropertyCache(); }
+
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LGetIteratorCache : public LInstructionHelper<1, BOX_PIECES, 2> {
+ public:
+ LIR_HEADER(GetIteratorCache)
+
+ static const size_t Value = 0;
+
+ LGetIteratorCache(const LBoxAllocation& value, const LDefinition& temp1,
+ const LDefinition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Value, value);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+ const MGetIteratorCache* mir() const { return mir_->toGetIteratorCache(); }
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+class LOptimizeSpreadCallCache : public LInstructionHelper<1, BOX_PIECES, 1> {
+ public:
+ LIR_HEADER(OptimizeSpreadCallCache)
+
+ static const size_t Value = 0;
+
+ LOptimizeSpreadCallCache(const LBoxAllocation& value, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Value, value);
+ setTemp(0, temp);
+ }
+ const MOptimizeSpreadCallCache* mir() const {
+ return mir_->toOptimizeSpreadCallCache();
+ }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LIteratorMore : public LInstructionHelper<BOX_PIECES, 1, 1> {
+ public:
+ LIR_HEADER(IteratorMore)
+
+ LIteratorMore(const LAllocation& iterator, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, iterator);
+ setTemp(0, temp);
+ }
+ const LAllocation* object() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+ MIteratorMore* mir() const { return mir_->toIteratorMore(); }
+};
+
+class LIsNoIterAndBranch : public LControlInstructionHelper<2, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(IsNoIterAndBranch)
+
+ LIsNoIterAndBranch(MBasicBlock* ifTrue, MBasicBlock* ifFalse,
+ const LBoxAllocation& input)
+ : LControlInstructionHelper(classOpcode) {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setBoxOperand(Input, input);
+ }
+
+ static const size_t Input = 0;
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+};
+
+class LIteratorEnd : public LInstructionHelper<0, 1, 3> {
+ public:
+ LIR_HEADER(IteratorEnd)
+
+ LIteratorEnd(const LAllocation& iterator, const LDefinition& temp1,
+ const LDefinition& temp2, const LDefinition& temp3)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, iterator);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+ const LAllocation* object() { return getOperand(0); }
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+ const LDefinition* temp3() { return getTemp(2); }
+ MIteratorEnd* mir() const { return mir_->toIteratorEnd(); }
+};
+
+// Read the number of actual arguments.
+class LArgumentsLength : public LInstructionHelper<1, 0, 0> {
+ public:
+ LIR_HEADER(ArgumentsLength)
+
+ LArgumentsLength() : LInstructionHelper(classOpcode) {}
+};
+
+// Load a value from the actual arguments.
+class LGetFrameArgument : public LInstructionHelper<BOX_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(GetFrameArgument)
+
+ explicit LGetFrameArgument(const LAllocation& index)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, index);
+ }
+ const LAllocation* index() { return getOperand(0); }
+};
+
+// Create the rest parameter.
+class LRest : public LCallInstructionHelper<1, 1, 3> {
+ public:
+ LIR_HEADER(Rest)
+
+ LRest(const LAllocation& numActuals, const LDefinition& temp1,
+ const LDefinition& temp2, const LDefinition& temp3)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, numActuals);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+ const LAllocation* numActuals() { return getOperand(0); }
+ MRest* mir() const { return mir_->toRest(); }
+};
+
+// Convert a Boolean to an Int64, following ToBigInt.
+class LBooleanToInt64 : public LInstructionHelper<INT64_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(BooleanToInt64)
+
+ explicit LBooleanToInt64(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(Input, input);
+ }
+
+ static const size_t Input = 0;
+
+ const MToInt64* mir() const { return mir_->toToInt64(); }
+};
+
+// Convert a String to an Int64, following ToBigInt.
+class LStringToInt64 : public LInstructionHelper<INT64_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(StringToInt64)
+
+ explicit LStringToInt64(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(Input, input);
+ }
+
+ static const size_t Input = 0;
+
+ const MToInt64* mir() const { return mir_->toToInt64(); }
+};
+
+// Simulate ToBigInt on a Value and produce a matching Int64.
+class LValueToInt64 : public LInstructionHelper<INT64_PIECES, BOX_PIECES, 1> {
+ public:
+ LIR_HEADER(ValueToInt64)
+
+ explicit LValueToInt64(const LBoxAllocation& input, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Input, input);
+ setTemp(0, temp);
+ }
+
+ static const size_t Input = 0;
+
+ const MToInt64* mir() const { return mir_->toToInt64(); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// Truncate a BigInt to an unboxed int64.
+class LTruncateBigIntToInt64 : public LInstructionHelper<INT64_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(TruncateBigIntToInt64)
+
+ explicit LTruncateBigIntToInt64(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(Input, input);
+ }
+
+ static const size_t Input = 0;
+
+ const MTruncateBigIntToInt64* mir() const {
+ return mir_->toTruncateBigIntToInt64();
+ }
+};
+
+// Create a new BigInt* from an unboxed int64.
+class LInt64ToBigInt : public LInstructionHelper<1, INT64_PIECES, 1> {
+ public:
+ LIR_HEADER(Int64ToBigInt)
+
+ LInt64ToBigInt(const LInt64Allocation& input, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setInt64Operand(Input, input);
+ setTemp(0, temp);
+ }
+
+ static const size_t Input = 0;
+
+ const MInt64ToBigInt* mir() const { return mir_->toInt64ToBigInt(); }
+ const LInt64Allocation input() { return getInt64Operand(Input); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// Generational write barrier used when writing an object to another object.
+class LPostWriteBarrierO : public LInstructionHelper<0, 2, 1> {
+ public:
+ LIR_HEADER(PostWriteBarrierO)
+
+ LPostWriteBarrierO(const LAllocation& obj, const LAllocation& value,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setOperand(1, value);
+ setTemp(0, temp);
+ }
+
+ const MPostWriteBarrier* mir() const { return mir_->toPostWriteBarrier(); }
+ const LAllocation* object() { return getOperand(0); }
+ const LAllocation* value() { return getOperand(1); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// Generational write barrier used when writing a string to an object.
+class LPostWriteBarrierS : public LInstructionHelper<0, 2, 1> {
+ public:
+ LIR_HEADER(PostWriteBarrierS)
+
+ LPostWriteBarrierS(const LAllocation& obj, const LAllocation& value,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setOperand(1, value);
+ setTemp(0, temp);
+ }
+
+ const MPostWriteBarrier* mir() const { return mir_->toPostWriteBarrier(); }
+ const LAllocation* object() { return getOperand(0); }
+ const LAllocation* value() { return getOperand(1); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// Generational write barrier used when writing a BigInt to an object.
+class LPostWriteBarrierBI : public LInstructionHelper<0, 2, 1> {
+ public:
+ LIR_HEADER(PostWriteBarrierBI)
+
+ LPostWriteBarrierBI(const LAllocation& obj, const LAllocation& value,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setOperand(1, value);
+ setTemp(0, temp);
+ }
+
+ const MPostWriteBarrier* mir() const { return mir_->toPostWriteBarrier(); }
+ const LAllocation* object() { return getOperand(0); }
+ const LAllocation* value() { return getOperand(1); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// Generational write barrier used when writing a value to another object.
+class LPostWriteBarrierV : public LInstructionHelper<0, 1 + BOX_PIECES, 1> {
+ public:
+ LIR_HEADER(PostWriteBarrierV)
+
+ LPostWriteBarrierV(const LAllocation& obj, const LBoxAllocation& value,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setBoxOperand(Input, value);
+ setTemp(0, temp);
+ }
+
+ static const size_t Input = 1;
+
+ const MPostWriteBarrier* mir() const { return mir_->toPostWriteBarrier(); }
+ const LAllocation* object() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// Generational write barrier used when writing an object to another object's
+// elements.
+class LPostWriteElementBarrierO : public LInstructionHelper<0, 3, 1> {
+ public:
+ LIR_HEADER(PostWriteElementBarrierO)
+
+ LPostWriteElementBarrierO(const LAllocation& obj, const LAllocation& value,
+ const LAllocation& index, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setOperand(1, value);
+ setOperand(2, index);
+ setTemp(0, temp);
+ }
+
+ const MPostWriteElementBarrier* mir() const {
+ return mir_->toPostWriteElementBarrier();
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+
+ const LAllocation* value() { return getOperand(1); }
+
+ const LAllocation* index() { return getOperand(2); }
+
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// Generational write barrier used when writing a string to an object's
+// elements.
+class LPostWriteElementBarrierS : public LInstructionHelper<0, 3, 1> {
+ public:
+ LIR_HEADER(PostWriteElementBarrierS)
+
+ LPostWriteElementBarrierS(const LAllocation& obj, const LAllocation& value,
+ const LAllocation& index, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setOperand(1, value);
+ setOperand(2, index);
+ setTemp(0, temp);
+ }
+
+ const MPostWriteElementBarrier* mir() const {
+ return mir_->toPostWriteElementBarrier();
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+
+ const LAllocation* value() { return getOperand(1); }
+
+ const LAllocation* index() { return getOperand(2); }
+
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// Generational write barrier used when writing a BigInt to an object's
+// elements.
+class LPostWriteElementBarrierBI : public LInstructionHelper<0, 3, 1> {
+ public:
+ LIR_HEADER(PostWriteElementBarrierBI)
+
+ LPostWriteElementBarrierBI(const LAllocation& obj, const LAllocation& value,
+ const LAllocation& index, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setOperand(1, value);
+ setOperand(2, index);
+ setTemp(0, temp);
+ }
+
+ const MPostWriteElementBarrier* mir() const {
+ return mir_->toPostWriteElementBarrier();
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+
+ const LAllocation* value() { return getOperand(1); }
+
+ const LAllocation* index() { return getOperand(2); }
+
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// Generational write barrier used when writing a value to another object's
+// elements.
+class LPostWriteElementBarrierV
+ : public LInstructionHelper<0, 2 + BOX_PIECES, 1> {
+ public:
+ LIR_HEADER(PostWriteElementBarrierV)
+
+ LPostWriteElementBarrierV(const LAllocation& obj, const LAllocation& index,
+ const LBoxAllocation& value,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setOperand(1, index);
+ setBoxOperand(Input, value);
+ setTemp(0, temp);
+ }
+
+ static const size_t Input = 2;
+
+ const MPostWriteElementBarrier* mir() const {
+ return mir_->toPostWriteElementBarrier();
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+
+ const LAllocation* index() { return getOperand(1); }
+
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// Guard against an object's identity.
+class LGuardObjectIdentity : public LInstructionHelper<0, 2, 0> {
+ public:
+ LIR_HEADER(GuardObjectIdentity)
+
+ LGuardObjectIdentity(const LAllocation& in, const LAllocation& expected)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setOperand(1, expected);
+ }
+ const LAllocation* input() { return getOperand(0); }
+ const LAllocation* expected() { return getOperand(1); }
+ const MGuardObjectIdentity* mir() const {
+ return mir_->toGuardObjectIdentity();
+ }
+};
+
+// Guard against an function's identity.
+class LGuardSpecificFunction : public LInstructionHelper<0, 2, 0> {
+ public:
+ LIR_HEADER(GuardSpecificFunction)
+
+ LGuardSpecificFunction(const LAllocation& in, const LAllocation& expected)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setOperand(1, expected);
+ }
+ const LAllocation* input() { return getOperand(0); }
+ const LAllocation* expected() { return getOperand(1); }
+};
+
+class LGuardSpecificAtom : public LInstructionHelper<0, 1, 1> {
+ public:
+ LIR_HEADER(GuardSpecificAtom)
+
+ LGuardSpecificAtom(const LAllocation& str, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, str);
+ setTemp(0, temp);
+ }
+ const LAllocation* str() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+
+ const MGuardSpecificAtom* mir() const { return mir_->toGuardSpecificAtom(); }
+};
+
+class LGuardSpecificSymbol : public LInstructionHelper<0, 1, 0> {
+ public:
+ LIR_HEADER(GuardSpecificSymbol)
+
+ explicit LGuardSpecificSymbol(const LAllocation& symbol)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, symbol);
+ }
+ const LAllocation* symbol() { return getOperand(0); }
+ const MGuardSpecificSymbol* mir() const {
+ return mir_->toGuardSpecificSymbol();
+ }
+};
+
+class LGuardStringToIndex : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(GuardStringToIndex)
+
+ explicit LGuardStringToIndex(const LAllocation& str)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, str);
+ }
+
+ const LAllocation* string() { return getOperand(0); }
+};
+
+class LGuardStringToInt32 : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(GuardStringToInt32)
+
+ LGuardStringToInt32(const LAllocation& str, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, str);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* string() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LGuardStringToDouble : public LInstructionHelper<1, 1, 2> {
+ public:
+ LIR_HEADER(GuardStringToDouble)
+
+ LGuardStringToDouble(const LAllocation& str, const LDefinition& temp1,
+ const LDefinition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, str);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LAllocation* string() { return getOperand(0); }
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+class LGuardShape : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(GuardShape)
+
+ LGuardShape(const LAllocation& in, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+ const LDefinition* temp() { return getTemp(0); }
+ const MGuardShape* mir() const { return mir_->toGuardShape(); }
+};
+
+class LGuardProto : public LInstructionHelper<0, 2, 1> {
+ public:
+ LIR_HEADER(GuardProto)
+
+ LGuardProto(const LAllocation& obj, const LAllocation& expected,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setOperand(1, expected);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ const LAllocation* expected() { return getOperand(1); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LGuardNullProto : public LInstructionHelper<0, 1, 1> {
+ public:
+ LIR_HEADER(GuardNullProto)
+
+ LGuardNullProto(const LAllocation& obj, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LGuardIsNativeObject : public LInstructionHelper<0, 1, 1> {
+ public:
+ LIR_HEADER(GuardIsNativeObject)
+
+ LGuardIsNativeObject(const LAllocation& obj, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LGuardIsProxy : public LInstructionHelper<0, 1, 1> {
+ public:
+ LIR_HEADER(GuardIsProxy)
+
+ LGuardIsProxy(const LAllocation& obj, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LGuardIsNotProxy : public LInstructionHelper<0, 1, 1> {
+ public:
+ LIR_HEADER(GuardIsNotProxy)
+
+ LGuardIsNotProxy(const LAllocation& obj, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LGuardIsNotDOMProxy : public LInstructionHelper<0, 1, 1> {
+ public:
+ LIR_HEADER(GuardIsNotDOMProxy)
+
+ LGuardIsNotDOMProxy(const LAllocation& proxy, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, proxy);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* proxy() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LProxyGet : public LCallInstructionHelper<BOX_PIECES, 1, 1> {
+ public:
+ LIR_HEADER(ProxyGet)
+
+ LProxyGet(const LAllocation& proxy, const LDefinition& temp)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, proxy);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* proxy() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+
+ MProxyGet* mir() const { return mir_->toProxyGet(); }
+};
+
+class LProxyGetByValue
+ : public LCallInstructionHelper<BOX_PIECES, 1 + BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(ProxyGetByValue)
+
+ LProxyGetByValue(const LAllocation& proxy, const LBoxAllocation& idVal)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, proxy);
+ setBoxOperand(IdIndex, idVal);
+ }
+
+ static const size_t IdIndex = 1;
+
+ const LAllocation* proxy() { return getOperand(0); }
+};
+
+class LProxyHasProp
+ : public LCallInstructionHelper<BOX_PIECES, 1 + BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(ProxyHasProp)
+
+ LProxyHasProp(const LAllocation& proxy, const LBoxAllocation& idVal)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, proxy);
+ setBoxOperand(IdIndex, idVal);
+ }
+
+ static const size_t IdIndex = 1;
+
+ const LAllocation* proxy() { return getOperand(0); }
+
+ MProxyHasProp* mir() const { return mir_->toProxyHasProp(); }
+};
+
+class LProxySet : public LCallInstructionHelper<0, 1 + BOX_PIECES, 1> {
+ public:
+ LIR_HEADER(ProxySet)
+
+ LProxySet(const LAllocation& proxy, const LBoxAllocation& rhs,
+ const LDefinition& temp)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, proxy);
+ setBoxOperand(RhsIndex, rhs);
+ setTemp(0, temp);
+ }
+
+ static const size_t RhsIndex = 1;
+
+ const LAllocation* proxy() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+
+ MProxySet* mir() const { return mir_->toProxySet(); }
+};
+
+class LProxySetByValue
+ : public LCallInstructionHelper<0, 1 + 2 * BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(ProxySetByValue)
+
+ LProxySetByValue(const LAllocation& proxy, const LBoxAllocation& idVal,
+ const LBoxAllocation& rhs)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, proxy);
+ setBoxOperand(IdIndex, idVal);
+ setBoxOperand(RhsIndex, rhs);
+ }
+
+ static const size_t IdIndex = 1;
+ static const size_t RhsIndex = 1 + BOX_PIECES;
+
+ const LAllocation* proxy() { return getOperand(0); }
+
+ MProxySetByValue* mir() const { return mir_->toProxySetByValue(); }
+};
+
+class LCallSetArrayLength
+ : public LCallInstructionHelper<0, 1 + BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(CallSetArrayLength)
+
+ LCallSetArrayLength(const LAllocation& obj, const LBoxAllocation& rhs)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setBoxOperand(RhsIndex, rhs);
+ }
+
+ static const size_t RhsIndex = 1;
+
+ const LAllocation* obj() { return getOperand(0); }
+
+ MCallSetArrayLength* mir() const { return mir_->toCallSetArrayLength(); }
+};
+
+class LMegamorphicLoadSlot : public LCallInstructionHelper<BOX_PIECES, 1, 3> {
+ public:
+ LIR_HEADER(MegamorphicLoadSlot)
+
+ LMegamorphicLoadSlot(const LAllocation& obj, const LDefinition& temp1,
+ const LDefinition& temp2, const LDefinition& temp3)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+ const LDefinition* temp3() { return getTemp(2); }
+
+ MMegamorphicLoadSlot* mir() const { return mir_->toMegamorphicLoadSlot(); }
+};
+
+class LMegamorphicLoadSlotByValue
+ : public LCallInstructionHelper<BOX_PIECES, 1 + BOX_PIECES, 2> {
+ public:
+ LIR_HEADER(MegamorphicLoadSlotByValue)
+
+ LMegamorphicLoadSlotByValue(const LAllocation& obj,
+ const LBoxAllocation& idVal,
+ const LDefinition& temp1,
+ const LDefinition& temp2)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setBoxOperand(IdIndex, idVal);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ static const size_t IdIndex = 1;
+
+ const LAllocation* object() { return getOperand(0); }
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+
+ MMegamorphicLoadSlotByValue* mir() const {
+ return mir_->toMegamorphicLoadSlotByValue();
+ }
+};
+
+class LMegamorphicStoreSlot
+ : public LCallInstructionHelper<BOX_PIECES, 1 + BOX_PIECES, 3> {
+ public:
+ LIR_HEADER(MegamorphicStoreSlot)
+
+ LMegamorphicStoreSlot(const LAllocation& obj, const LBoxAllocation& rhs,
+ const LDefinition& temp1, const LDefinition& temp2,
+ const LDefinition& temp3)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setBoxOperand(RhsIndex, rhs);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ setTemp(2, temp3);
+ }
+
+ static const size_t RhsIndex = 1;
+
+ const LAllocation* object() { return getOperand(0); }
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+ const LDefinition* temp3() { return getTemp(2); }
+
+ MMegamorphicStoreSlot* mir() const { return mir_->toMegamorphicStoreSlot(); }
+};
+
+class LMegamorphicHasProp
+ : public LCallInstructionHelper<1, 1 + BOX_PIECES, 2> {
+ public:
+ LIR_HEADER(MegamorphicHasProp)
+
+ LMegamorphicHasProp(const LAllocation& obj, const LBoxAllocation& idVal,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setBoxOperand(IdIndex, idVal);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ static const size_t IdIndex = 1;
+
+ const LAllocation* object() { return getOperand(0); }
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+
+ MMegamorphicHasProp* mir() const { return mir_->toMegamorphicHasProp(); }
+};
+
+class LGuardIsNotArrayBufferMaybeShared : public LInstructionHelper<0, 1, 1> {
+ public:
+ LIR_HEADER(GuardIsNotArrayBufferMaybeShared)
+
+ LGuardIsNotArrayBufferMaybeShared(const LAllocation& obj,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LGuardIsTypedArray : public LInstructionHelper<0, 1, 1> {
+ public:
+ LIR_HEADER(GuardIsTypedArray)
+
+ LGuardIsTypedArray(const LAllocation& obj, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, obj);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LGuardObjectGroup : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(GuardObjectGroup)
+
+ LGuardObjectGroup(const LAllocation& in, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+ const LDefinition* temp() { return getTemp(0); }
+ const MGuardObjectGroup* mir() const { return mir_->toGuardObjectGroup(); }
+};
+
+class LGuardNoDenseElements : public LInstructionHelper<0, 1, 1> {
+ public:
+ LIR_HEADER(GuardNoDenseElements)
+
+ LGuardNoDenseElements(const LAllocation& in, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LInCache : public LInstructionHelper<1, BOX_PIECES + 1, 1> {
+ public:
+ LIR_HEADER(InCache)
+ LInCache(const LBoxAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(LHS, lhs);
+ setOperand(RHS, rhs);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* lhs() { return getOperand(LHS); }
+ const LAllocation* rhs() { return getOperand(RHS); }
+ const LDefinition* temp() { return getTemp(0); }
+ const MInCache* mir() const { return mir_->toInCache(); }
+
+ static const size_t LHS = 0;
+ static const size_t RHS = BOX_PIECES;
+};
+
+class LHasOwnCache : public LInstructionHelper<1, 2 * BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(HasOwnCache)
+
+ static const size_t Value = 0;
+ static const size_t Id = BOX_PIECES;
+
+ LHasOwnCache(const LBoxAllocation& value, const LBoxAllocation& id)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Value, value);
+ setBoxOperand(Id, id);
+ }
+
+ const MHasOwnCache* mir() const { return mir_->toHasOwnCache(); }
+};
+
+class LCheckPrivateFieldCache
+ : public LInstructionHelper<1, 2 * BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(CheckPrivateFieldCache)
+
+ static const size_t Value = 0;
+ static const size_t Id = BOX_PIECES;
+
+ LCheckPrivateFieldCache(const LBoxAllocation& value, const LBoxAllocation& id)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Value, value);
+ setBoxOperand(Id, id);
+ }
+
+ const MCheckPrivateFieldCache* mir() const {
+ return mir_->toCheckPrivateFieldCache();
+ }
+};
+
+class LInstanceOfO : public LInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(InstanceOfO)
+ explicit LInstanceOfO(const LAllocation& lhs, const LAllocation& rhs)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ }
+
+ MInstanceOf* mir() const { return mir_->toInstanceOf(); }
+
+ const LAllocation* lhs() { return getOperand(0); }
+ const LAllocation* rhs() { return getOperand(1); }
+};
+
+class LInstanceOfV : public LInstructionHelper<1, BOX_PIECES + 1, 0> {
+ public:
+ LIR_HEADER(InstanceOfV)
+ explicit LInstanceOfV(const LBoxAllocation& lhs, const LAllocation& rhs)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(LHS, lhs);
+ setOperand(RHS, rhs);
+ }
+
+ MInstanceOf* mir() const { return mir_->toInstanceOf(); }
+
+ const LAllocation* rhs() { return getOperand(RHS); }
+
+ static const size_t LHS = 0;
+ static const size_t RHS = BOX_PIECES;
+};
+
+class LInstanceOfCache : public LInstructionHelper<1, BOX_PIECES + 1, 0> {
+ public:
+ LIR_HEADER(InstanceOfCache)
+ LInstanceOfCache(const LBoxAllocation& lhs, const LAllocation& rhs)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(LHS, lhs);
+ setOperand(RHS, rhs);
+ }
+
+ const LDefinition* output() { return this->getDef(0); }
+ const LAllocation* rhs() { return getOperand(RHS); }
+
+ static const size_t LHS = 0;
+ static const size_t RHS = BOX_PIECES;
+};
+
+class LIsCallableO : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(IsCallableO);
+ explicit LIsCallableO(const LAllocation& object)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ MIsCallable* mir() const { return mir_->toIsCallable(); }
+};
+
+class LIsCallableV : public LInstructionHelper<1, BOX_PIECES, 1> {
+ public:
+ LIR_HEADER(IsCallableV);
+ static const size_t Value = 0;
+
+ LIsCallableV(const LBoxAllocation& value, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(0, value);
+ setTemp(0, temp);
+ }
+ const LDefinition* temp() { return getTemp(0); }
+ MIsCallable* mir() const { return mir_->toIsCallable(); }
+};
+
+class LIsConstructor : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(IsConstructor);
+ explicit LIsConstructor(const LAllocation& object)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ MIsConstructor* mir() const { return mir_->toIsConstructor(); }
+};
+
+class LIsCrossRealmArrayConstructor : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(IsCrossRealmArrayConstructor);
+ explicit LIsCrossRealmArrayConstructor(const LAllocation& object)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+};
+
+class LIsArrayO : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(IsArrayO);
+
+ explicit LIsArrayO(const LAllocation& object)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ }
+ const LAllocation* object() { return getOperand(0); }
+ MIsArray* mir() const { return mir_->toIsArray(); }
+};
+
+class LIsArrayV : public LInstructionHelper<1, BOX_PIECES, 1> {
+ public:
+ LIR_HEADER(IsArrayV);
+ static const size_t Value = 0;
+
+ LIsArrayV(const LBoxAllocation& value, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(0, value);
+ setTemp(0, temp);
+ }
+ const LDefinition* temp() { return getTemp(0); }
+ MIsArray* mir() const { return mir_->toIsArray(); }
+};
+
+class LIsTypedArray : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(IsTypedArray);
+
+ explicit LIsTypedArray(const LAllocation& object)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ }
+ const LAllocation* object() { return getOperand(0); }
+ MIsTypedArray* mir() const { return mir_->toIsTypedArray(); }
+};
+
+class LIsObject : public LInstructionHelper<1, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(IsObject);
+ static const size_t Input = 0;
+
+ explicit LIsObject(const LBoxAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Input, input);
+ }
+
+ MIsObject* mir() const { return mir_->toIsObject(); }
+};
+
+class LIsObjectAndBranch : public LControlInstructionHelper<2, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(IsObjectAndBranch)
+
+ LIsObjectAndBranch(MBasicBlock* ifTrue, MBasicBlock* ifFalse,
+ const LBoxAllocation& input)
+ : LControlInstructionHelper(classOpcode) {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setBoxOperand(Input, input);
+ }
+
+ static const size_t Input = 0;
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+};
+
+class LIsNullOrUndefined : public LInstructionHelper<1, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(IsNullOrUndefined);
+ static const size_t Input = 0;
+
+ explicit LIsNullOrUndefined(const LBoxAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Input, input);
+ }
+
+ MIsNullOrUndefined* mir() const { return mir_->toIsNullOrUndefined(); }
+};
+
+class LIsNullOrUndefinedAndBranch
+ : public LControlInstructionHelper<2, BOX_PIECES, 0> {
+ MIsNullOrUndefined* isNullOrUndefined_;
+
+ public:
+ LIR_HEADER(IsNullOrUndefinedAndBranch)
+ static const size_t Input = 0;
+
+ LIsNullOrUndefinedAndBranch(MIsNullOrUndefined* isNullOrUndefined,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse,
+ const LBoxAllocation& input)
+ : LControlInstructionHelper(classOpcode),
+ isNullOrUndefined_(isNullOrUndefined) {
+ setSuccessor(0, ifTrue);
+ setSuccessor(1, ifFalse);
+ setBoxOperand(Input, input);
+ }
+
+ MBasicBlock* ifTrue() const { return getSuccessor(0); }
+ MBasicBlock* ifFalse() const { return getSuccessor(1); }
+
+ MIsNullOrUndefined* isNullOrUndefinedMir() const {
+ return isNullOrUndefined_;
+ }
+};
+
+class LHasClass : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(HasClass);
+ explicit LHasClass(const LAllocation& lhs) : LInstructionHelper(classOpcode) {
+ setOperand(0, lhs);
+ }
+
+ const LAllocation* lhs() { return getOperand(0); }
+ MHasClass* mir() const { return mir_->toHasClass(); }
+};
+
+class LGuardToClass : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(GuardToClass);
+ explicit LGuardToClass(const LAllocation& lhs, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, lhs);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* lhs() { return getOperand(0); }
+
+ const LDefinition* temp() { return getTemp(0); }
+
+ MGuardToClass* mir() const { return mir_->toGuardToClass(); }
+};
+
+class LObjectClassToString : public LCallInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(ObjectClassToString);
+
+ LObjectClassToString(const LAllocation& lhs, const LDefinition& temp)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, lhs);
+ setTemp(0, temp);
+ }
+ const LAllocation* object() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+ MObjectClassToString* mir() const { return mir_->toObjectClassToString(); }
+};
+
+template <size_t Defs, size_t Ops>
+class LWasmSelectBase : public LInstructionHelper<Defs, Ops, 0> {
+ typedef LInstructionHelper<Defs, Ops, 0> Base;
+
+ protected:
+ explicit LWasmSelectBase(LNode::Opcode opcode) : Base(opcode) {}
+
+ public:
+ MWasmSelect* mir() const { return Base::mir_->toWasmSelect(); }
+};
+
+class LWasmSelect : public LWasmSelectBase<1, 3> {
+ public:
+ LIR_HEADER(WasmSelect);
+
+ static const size_t TrueExprIndex = 0;
+ static const size_t FalseExprIndex = 1;
+ static const size_t CondIndex = 2;
+
+ LWasmSelect(const LAllocation& trueExpr, const LAllocation& falseExpr,
+ const LAllocation& cond)
+ : LWasmSelectBase(classOpcode) {
+ setOperand(TrueExprIndex, trueExpr);
+ setOperand(FalseExprIndex, falseExpr);
+ setOperand(CondIndex, cond);
+ }
+
+ const LAllocation* trueExpr() { return getOperand(TrueExprIndex); }
+ const LAllocation* falseExpr() { return getOperand(FalseExprIndex); }
+ const LAllocation* condExpr() { return getOperand(CondIndex); }
+};
+
+class LWasmSelectI64
+ : public LWasmSelectBase<INT64_PIECES, 2 * INT64_PIECES + 1> {
+ public:
+ LIR_HEADER(WasmSelectI64);
+
+ static const size_t TrueExprIndex = 0;
+ static const size_t FalseExprIndex = INT64_PIECES;
+ static const size_t CondIndex = INT64_PIECES * 2;
+
+ LWasmSelectI64(const LInt64Allocation& trueExpr,
+ const LInt64Allocation& falseExpr, const LAllocation& cond)
+ : LWasmSelectBase(classOpcode) {
+ setInt64Operand(TrueExprIndex, trueExpr);
+ setInt64Operand(FalseExprIndex, falseExpr);
+ setOperand(CondIndex, cond);
+ }
+
+ const LInt64Allocation trueExpr() { return getInt64Operand(TrueExprIndex); }
+ const LInt64Allocation falseExpr() { return getInt64Operand(FalseExprIndex); }
+ const LAllocation* condExpr() { return getOperand(CondIndex); }
+};
+
+class LWasmCompareAndSelect : public LWasmSelectBase<1, 4> {
+ MCompare::CompareType compareType_;
+ JSOp jsop_;
+
+ public:
+ LIR_HEADER(WasmCompareAndSelect);
+
+ static const size_t LeftExprIndex = 0;
+ static const size_t RightExprIndex = 1;
+ static const size_t IfTrueExprIndex = 2;
+ static const size_t IfFalseExprIndex = 3;
+
+ LWasmCompareAndSelect(const LAllocation& leftExpr,
+ const LAllocation& rightExpr,
+ MCompare::CompareType compareType, JSOp jsop,
+ const LAllocation& ifTrueExpr,
+ const LAllocation& ifFalseExpr)
+ : LWasmSelectBase(classOpcode), compareType_(compareType), jsop_(jsop) {
+ setOperand(LeftExprIndex, leftExpr);
+ setOperand(RightExprIndex, rightExpr);
+ setOperand(IfTrueExprIndex, ifTrueExpr);
+ setOperand(IfFalseExprIndex, ifFalseExpr);
+ }
+
+ const LAllocation* leftExpr() { return getOperand(LeftExprIndex); }
+ const LAllocation* rightExpr() { return getOperand(RightExprIndex); }
+ const LAllocation* ifTrueExpr() { return getOperand(IfTrueExprIndex); }
+ const LAllocation* ifFalseExpr() { return getOperand(IfFalseExprIndex); }
+
+ MCompare::CompareType compareType() { return compareType_; }
+ JSOp jsop() { return jsop_; }
+};
+
+class LWasmAddOffset : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmAddOffset);
+ explicit LWasmAddOffset(const LAllocation& base)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, base);
+ }
+ MWasmAddOffset* mir() const { return mir_->toWasmAddOffset(); }
+ const LAllocation* base() { return getOperand(0); }
+};
+
+class LWasmBoundsCheck : public LInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(WasmBoundsCheck);
+ explicit LWasmBoundsCheck(const LAllocation& ptr,
+ const LAllocation& boundsCheckLimit = LAllocation())
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setOperand(1, boundsCheckLimit);
+ }
+ MWasmBoundsCheck* mir() const { return mir_->toWasmBoundsCheck(); }
+ const LAllocation* ptr() { return getOperand(0); }
+ const LAllocation* boundsCheckLimit() { return getOperand(1); }
+};
+
+class LWasmAlignmentCheck : public LInstructionHelper<0, 1, 0> {
+ public:
+ LIR_HEADER(WasmAlignmentCheck);
+ explicit LWasmAlignmentCheck(const LAllocation& ptr)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ }
+ MWasmAlignmentCheck* mir() const { return mir_->toWasmAlignmentCheck(); }
+ const LAllocation* ptr() { return getOperand(0); }
+};
+
+class LWasmLoadTls : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmLoadTls);
+ explicit LWasmLoadTls(const LAllocation& tlsPtr)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, tlsPtr);
+ }
+ MWasmLoadTls* mir() const { return mir_->toWasmLoadTls(); }
+ const LAllocation* tlsPtr() { return getOperand(0); }
+};
+
+class LWasmHeapBase : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmHeapBase);
+ explicit LWasmHeapBase(const LAllocation& tlsPtr)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, tlsPtr);
+ }
+ MWasmHeapBase* mir() const { return mir_->toWasmHeapBase(); }
+ const LAllocation* tlsPtr() { return getOperand(0); }
+};
+
+namespace details {
+
+// This is a base class for LWasmLoad/LWasmLoadI64.
+template <size_t Defs, size_t Temp>
+class LWasmLoadBase : public LInstructionHelper<Defs, 2, Temp> {
+ public:
+ typedef LInstructionHelper<Defs, 2, Temp> Base;
+ explicit LWasmLoadBase(LNode::Opcode opcode, const LAllocation& ptr,
+ const LAllocation& memoryBase)
+ : Base(opcode) {
+ Base::setOperand(0, ptr);
+ Base::setOperand(1, memoryBase);
+ }
+ MWasmLoad* mir() const { return Base::mir_->toWasmLoad(); }
+ const LAllocation* ptr() { return Base::getOperand(0); }
+ const LAllocation* memoryBase() { return Base::getOperand(1); }
+};
+
+} // namespace details
+
+class LWasmLoad : public details::LWasmLoadBase<1, 1> {
+ public:
+ explicit LWasmLoad(const LAllocation& ptr,
+ const LAllocation& memoryBase = LAllocation())
+ : LWasmLoadBase(classOpcode, ptr, memoryBase) {
+ setTemp(0, LDefinition::BogusTemp());
+ }
+
+ const LDefinition* ptrCopy() { return Base::getTemp(0); }
+
+ LIR_HEADER(WasmLoad);
+};
+
+class LWasmLoadI64 : public details::LWasmLoadBase<INT64_PIECES, 1> {
+ public:
+ explicit LWasmLoadI64(const LAllocation& ptr,
+ const LAllocation& memoryBase = LAllocation())
+ : LWasmLoadBase(classOpcode, ptr, memoryBase) {
+ setTemp(0, LDefinition::BogusTemp());
+ }
+
+ const LDefinition* ptrCopy() { return Base::getTemp(0); }
+
+ LIR_HEADER(WasmLoadI64);
+};
+
+class LWasmStore : public LInstructionHelper<0, 3, 1> {
+ public:
+ LIR_HEADER(WasmStore);
+
+ static const size_t PtrIndex = 0;
+ static const size_t ValueIndex = 1;
+ static const size_t MemoryBaseIndex = 2;
+
+ LWasmStore(const LAllocation& ptr, const LAllocation& value,
+ const LAllocation& memoryBase = LAllocation())
+ : LInstructionHelper(classOpcode) {
+ setOperand(PtrIndex, ptr);
+ setOperand(ValueIndex, value);
+ setOperand(MemoryBaseIndex, memoryBase);
+ setTemp(0, LDefinition::BogusTemp());
+ }
+ MWasmStore* mir() const { return mir_->toWasmStore(); }
+ const LAllocation* ptr() { return getOperand(PtrIndex); }
+ const LDefinition* ptrCopy() { return getTemp(0); }
+ const LAllocation* value() { return getOperand(ValueIndex); }
+ const LAllocation* memoryBase() { return getOperand(MemoryBaseIndex); }
+};
+
+class LWasmStoreI64 : public LInstructionHelper<0, INT64_PIECES + 2, 1> {
+ public:
+ LIR_HEADER(WasmStoreI64);
+
+ static const size_t PtrIndex = 0;
+ static const size_t MemoryBaseIndex = 1;
+ static const size_t ValueIndex = 2;
+
+ LWasmStoreI64(const LAllocation& ptr, const LInt64Allocation& value,
+ const LAllocation& memoryBase = LAllocation())
+ : LInstructionHelper(classOpcode) {
+ setOperand(PtrIndex, ptr);
+ setOperand(MemoryBaseIndex, memoryBase);
+ setInt64Operand(ValueIndex, value);
+ setTemp(0, LDefinition::BogusTemp());
+ }
+ MWasmStore* mir() const { return mir_->toWasmStore(); }
+ const LAllocation* ptr() { return getOperand(PtrIndex); }
+ const LAllocation* memoryBase() { return getOperand(MemoryBaseIndex); }
+ const LDefinition* ptrCopy() { return getTemp(0); }
+ const LInt64Allocation value() { return getInt64Operand(ValueIndex); }
+};
+
+class LAsmJSLoadHeap : public LInstructionHelper<1, 3, 0> {
+ public:
+ LIR_HEADER(AsmJSLoadHeap);
+ explicit LAsmJSLoadHeap(const LAllocation& ptr,
+ const LAllocation& boundsCheckLimit,
+ const LAllocation& memoryBase = LAllocation())
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setOperand(1, boundsCheckLimit);
+ setOperand(2, memoryBase);
+ }
+ MAsmJSLoadHeap* mir() const { return mir_->toAsmJSLoadHeap(); }
+ const LAllocation* ptr() { return getOperand(0); }
+ const LAllocation* boundsCheckLimit() { return getOperand(1); }
+ const LAllocation* memoryBase() { return getOperand(2); }
+};
+
+class LAsmJSStoreHeap : public LInstructionHelper<0, 4, 0> {
+ public:
+ LIR_HEADER(AsmJSStoreHeap);
+ LAsmJSStoreHeap(const LAllocation& ptr, const LAllocation& value,
+ const LAllocation& boundsCheckLimit,
+ const LAllocation& memoryBase = LAllocation())
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setOperand(2, boundsCheckLimit);
+ setOperand(3, memoryBase);
+ }
+ MAsmJSStoreHeap* mir() const { return mir_->toAsmJSStoreHeap(); }
+ const LAllocation* ptr() { return getOperand(0); }
+ const LAllocation* value() { return getOperand(1); }
+ const LAllocation* boundsCheckLimit() { return getOperand(2); }
+ const LAllocation* memoryBase() { return getOperand(3); }
+};
+
+class LWasmCompareExchangeHeap : public LInstructionHelper<1, 4, 4> {
+ public:
+ LIR_HEADER(WasmCompareExchangeHeap);
+
+ // ARM, ARM64, x86, x64
+ LWasmCompareExchangeHeap(const LAllocation& ptr, const LAllocation& oldValue,
+ const LAllocation& newValue,
+ const LAllocation& memoryBase = LAllocation())
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setOperand(1, oldValue);
+ setOperand(2, newValue);
+ setOperand(3, memoryBase);
+ setTemp(0, LDefinition::BogusTemp());
+ }
+ // MIPS32, MIPS64
+ LWasmCompareExchangeHeap(const LAllocation& ptr, const LAllocation& oldValue,
+ const LAllocation& newValue,
+ const LDefinition& valueTemp,
+ const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setOperand(1, oldValue);
+ setOperand(2, newValue);
+ setOperand(3, LAllocation());
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, valueTemp);
+ setTemp(2, offsetTemp);
+ setTemp(3, maskTemp);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LAllocation* oldValue() { return getOperand(1); }
+ const LAllocation* newValue() { return getOperand(2); }
+ const LAllocation* memoryBase() { return getOperand(3); }
+ const LDefinition* addrTemp() { return getTemp(0); }
+
+ void setAddrTemp(const LDefinition& addrTemp) { setTemp(0, addrTemp); }
+
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() { return getTemp(1); }
+ const LDefinition* offsetTemp() { return getTemp(2); }
+ const LDefinition* maskTemp() { return getTemp(3); }
+
+ MWasmCompareExchangeHeap* mir() const {
+ return mir_->toWasmCompareExchangeHeap();
+ }
+};
+
+class LWasmFence : public LInstructionHelper<0, 0, 0> {
+ public:
+ LIR_HEADER(WasmFence);
+ explicit LWasmFence() : LInstructionHelper(classOpcode) {}
+};
+
+class LWasmAtomicExchangeHeap : public LInstructionHelper<1, 3, 4> {
+ public:
+ LIR_HEADER(WasmAtomicExchangeHeap);
+
+ // ARM, ARM64, x86, x64
+ LWasmAtomicExchangeHeap(const LAllocation& ptr, const LAllocation& value,
+ const LAllocation& memoryBase = LAllocation())
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setOperand(2, memoryBase);
+ setTemp(0, LDefinition::BogusTemp());
+ }
+ // MIPS32, MIPS64
+ LWasmAtomicExchangeHeap(const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& valueTemp,
+ const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setOperand(2, LAllocation());
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, valueTemp);
+ setTemp(2, offsetTemp);
+ setTemp(3, maskTemp);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LAllocation* value() { return getOperand(1); }
+ const LAllocation* memoryBase() { return getOperand(2); }
+ const LDefinition* addrTemp() { return getTemp(0); }
+
+ void setAddrTemp(const LDefinition& addrTemp) { setTemp(0, addrTemp); }
+
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() { return getTemp(1); }
+ const LDefinition* offsetTemp() { return getTemp(2); }
+ const LDefinition* maskTemp() { return getTemp(3); }
+
+ MWasmAtomicExchangeHeap* mir() const {
+ return mir_->toWasmAtomicExchangeHeap();
+ }
+};
+
+class LWasmAtomicBinopHeap : public LInstructionHelper<1, 3, 6> {
+ public:
+ LIR_HEADER(WasmAtomicBinopHeap);
+
+ static const int32_t valueOp = 1;
+
+ // ARM, ARM64, x86, x64
+ LWasmAtomicBinopHeap(const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& temp,
+ const LDefinition& flagTemp = LDefinition::BogusTemp(),
+ const LAllocation& memoryBase = LAllocation())
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setOperand(2, memoryBase);
+ setTemp(0, temp);
+ setTemp(1, LDefinition::BogusTemp());
+ setTemp(2, flagTemp);
+ }
+ // MIPS32, MIPS64
+ LWasmAtomicBinopHeap(const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& valueTemp,
+ const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setOperand(2, LAllocation());
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, LDefinition::BogusTemp());
+ setTemp(2, LDefinition::BogusTemp());
+ setTemp(3, valueTemp);
+ setTemp(4, offsetTemp);
+ setTemp(5, maskTemp);
+ }
+ const LAllocation* ptr() { return getOperand(0); }
+ const LAllocation* value() {
+ MOZ_ASSERT(valueOp == 1);
+ return getOperand(1);
+ }
+ const LAllocation* memoryBase() { return getOperand(2); }
+ const LDefinition* temp() { return getTemp(0); }
+
+ // Temp that may be used on some platforms to hold a computed address.
+ const LDefinition* addrTemp() { return getTemp(1); }
+ void setAddrTemp(const LDefinition& addrTemp) { setTemp(1, addrTemp); }
+
+ // Temp that may be used on LL/SC platforms for the flag result of the store.
+ const LDefinition* flagTemp() { return getTemp(2); }
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() { return getTemp(3); }
+ const LDefinition* offsetTemp() { return getTemp(4); }
+ const LDefinition* maskTemp() { return getTemp(5); }
+
+ MWasmAtomicBinopHeap* mir() const { return mir_->toWasmAtomicBinopHeap(); }
+};
+
+// Atomic binary operation where the result is discarded.
+class LWasmAtomicBinopHeapForEffect : public LInstructionHelper<0, 3, 5> {
+ public:
+ LIR_HEADER(WasmAtomicBinopHeapForEffect);
+ // ARM, ARM64, x86, x64
+ LWasmAtomicBinopHeapForEffect(
+ const LAllocation& ptr, const LAllocation& value,
+ const LDefinition& flagTemp = LDefinition::BogusTemp(),
+ const LAllocation& memoryBase = LAllocation())
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setOperand(2, memoryBase);
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, flagTemp);
+ }
+ // MIPS32, MIPS64
+ LWasmAtomicBinopHeapForEffect(const LAllocation& ptr,
+ const LAllocation& value,
+ const LDefinition& valueTemp,
+ const LDefinition& offsetTemp,
+ const LDefinition& maskTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setOperand(1, value);
+ setOperand(2, LAllocation());
+ setTemp(0, LDefinition::BogusTemp());
+ setTemp(1, LDefinition::BogusTemp());
+ setTemp(2, valueTemp);
+ setTemp(3, offsetTemp);
+ setTemp(4, maskTemp);
+ }
+ const LAllocation* ptr() { return getOperand(0); }
+ const LAllocation* value() { return getOperand(1); }
+ const LAllocation* memoryBase() { return getOperand(2); }
+
+ // Temp that may be used on some platforms to hold a computed address.
+ const LDefinition* addrTemp() { return getTemp(0); }
+ void setAddrTemp(const LDefinition& addrTemp) { setTemp(0, addrTemp); }
+
+ // Temp that may be used on LL/SC platforms for the flag result of the store.
+ const LDefinition* flagTemp() { return getTemp(1); }
+ // Temp that may be used on LL/SC platforms for extract/insert bits of word.
+ const LDefinition* valueTemp() { return getTemp(2); }
+ const LDefinition* offsetTemp() { return getTemp(3); }
+ const LDefinition* maskTemp() { return getTemp(4); }
+
+ MWasmAtomicBinopHeap* mir() const { return mir_->toWasmAtomicBinopHeap(); }
+};
+
+class LWasmLoadSlot : public LInstructionHelper<1, 1, 0> {
+ size_t offset_;
+ MIRType type_;
+
+ public:
+ LIR_HEADER(WasmLoadSlot);
+ explicit LWasmLoadSlot(const LAllocation& containerRef, size_t offset,
+ MIRType type)
+ : LInstructionHelper(classOpcode), offset_(offset), type_(type) {
+ setOperand(0, containerRef);
+ }
+ const LAllocation* containerRef() { return getOperand(0); }
+ size_t offset() const { return offset_; }
+ MIRType type() const { return type_; }
+};
+
+class LWasmLoadSlotI64 : public LInstructionHelper<INT64_PIECES, 1, 0> {
+ size_t offset_;
+
+ public:
+ LIR_HEADER(WasmLoadSlotI64);
+ explicit LWasmLoadSlotI64(const LAllocation& containerRef, size_t offset)
+ : LInstructionHelper(classOpcode), offset_(offset) {
+ setOperand(0, containerRef);
+ }
+ const LAllocation* containerRef() { return getOperand(0); }
+ size_t offset() const { return offset_; }
+};
+
+class LWasmStoreSlot : public LInstructionHelper<0, 2, 0> {
+ size_t offset_;
+ MIRType type_;
+
+ public:
+ LIR_HEADER(WasmStoreSlot);
+ LWasmStoreSlot(const LAllocation& value, const LAllocation& containerRef,
+ size_t offset, MIRType type)
+ : LInstructionHelper(classOpcode), offset_(offset), type_(type) {
+ setOperand(0, value);
+ setOperand(1, containerRef);
+ }
+ const LAllocation* value() { return getOperand(0); }
+ const LAllocation* containerRef() { return getOperand(1); }
+ size_t offset() const { return offset_; }
+ MIRType type() const { return type_; }
+};
+
+class LWasmStoreSlotI64 : public LInstructionHelper<0, INT64_PIECES + 1, 0> {
+ size_t offset_;
+
+ public:
+ LIR_HEADER(WasmStoreSlotI64);
+ LWasmStoreSlotI64(const LInt64Allocation& value,
+ const LAllocation& containerRef, size_t offset)
+ : LInstructionHelper(classOpcode), offset_(offset) {
+ setInt64Operand(0, value);
+ setOperand(INT64_PIECES, containerRef);
+ }
+ const LInt64Allocation value() { return getInt64Operand(0); }
+ const LAllocation* containerRef() { return getOperand(INT64_PIECES); }
+ size_t offset() const { return offset_; }
+};
+
+class LWasmDerivedPointer : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmDerivedPointer);
+ explicit LWasmDerivedPointer(const LAllocation& base)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, base);
+ }
+ const LAllocation* base() { return getOperand(0); }
+ size_t offset() { return mirRaw()->toWasmDerivedPointer()->offset(); }
+};
+
+class LWasmStoreRef : public LInstructionHelper<0, 3, 1> {
+ public:
+ LIR_HEADER(WasmStoreRef);
+ LWasmStoreRef(const LAllocation& tls, const LAllocation& valueAddr,
+ const LAllocation& value, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, tls);
+ setOperand(1, valueAddr);
+ setOperand(2, value);
+ setTemp(0, temp);
+ }
+ MWasmStoreRef* mir() const { return mirRaw()->toWasmStoreRef(); }
+ const LAllocation* tls() { return getOperand(0); }
+ const LAllocation* valueAddr() { return getOperand(1); }
+ const LAllocation* value() { return getOperand(2); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LWasmParameter : public LInstructionHelper<1, 0, 0> {
+ public:
+ LIR_HEADER(WasmParameter);
+
+ LWasmParameter() : LInstructionHelper(classOpcode) {}
+};
+
+class LWasmParameterI64 : public LInstructionHelper<INT64_PIECES, 0, 0> {
+ public:
+ LIR_HEADER(WasmParameterI64);
+
+ LWasmParameterI64() : LInstructionHelper(classOpcode) {}
+};
+
+class LWasmReturn : public LInstructionHelper<0, 2, 0> {
+ public:
+ LIR_HEADER(WasmReturn);
+
+ LWasmReturn() : LInstructionHelper(classOpcode) {}
+};
+
+// +1 for tls.
+class LWasmReturnI64 : public LInstructionHelper<0, INT64_PIECES + 1, 0> {
+ public:
+ LIR_HEADER(WasmReturnI64)
+
+ LWasmReturnI64(const LInt64Allocation& input, const LAllocation& tls)
+ : LInstructionHelper(classOpcode) {
+ setInt64Operand(0, input);
+ setOperand(INT64_PIECES, tls);
+ }
+};
+
+class LWasmReturnVoid : public LInstructionHelper<0, 1, 0> {
+ public:
+ LIR_HEADER(WasmReturnVoid);
+
+ LWasmReturnVoid() : LInstructionHelper(classOpcode) {}
+};
+
+class LWasmStackArg : public LInstructionHelper<0, 1, 0> {
+ public:
+ LIR_HEADER(WasmStackArg);
+ explicit LWasmStackArg(const LAllocation& arg)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, arg);
+ }
+ MWasmStackArg* mir() const { return mirRaw()->toWasmStackArg(); }
+ const LAllocation* arg() { return getOperand(0); }
+};
+
+class LWasmStackArgI64 : public LInstructionHelper<0, INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(WasmStackArgI64);
+ explicit LWasmStackArgI64(const LInt64Allocation& arg)
+ : LInstructionHelper(classOpcode) {
+ setInt64Operand(0, arg);
+ }
+ MWasmStackArg* mir() const { return mirRaw()->toWasmStackArg(); }
+ const LInt64Allocation arg() { return getInt64Operand(0); }
+};
+
+class LWasmNullConstant : public LInstructionHelper<1, 0, 0> {
+ public:
+ LIR_HEADER(WasmNullConstant);
+ explicit LWasmNullConstant() : LInstructionHelper(classOpcode) {}
+};
+
+class LWasmCall : public LVariadicInstruction<0, 0> {
+ bool needsBoundsCheck_;
+
+ public:
+ LIR_HEADER(WasmCall);
+
+ LWasmCall(uint32_t numOperands, bool needsBoundsCheck)
+ : LVariadicInstruction(classOpcode, numOperands),
+ needsBoundsCheck_(needsBoundsCheck) {
+ this->setIsCall();
+ }
+
+ MWasmCall* mir() const { return mir_->toWasmCall(); }
+
+ static bool isCallPreserved(AnyRegister reg) {
+ // All MWasmCalls preserve the TLS register:
+ // - internal/indirect calls do by the internal wasm ABI
+ // - import calls do by explicitly saving/restoring at the callsite
+ // - builtin calls do because the TLS reg is non-volatile
+ // See also CodeGeneratorShared::emitWasmCall.
+ return !reg.isFloat() && reg.gpr() == WasmTlsReg;
+ }
+
+ bool needsBoundsCheck() const { return needsBoundsCheck_; }
+};
+
+class LWasmRegisterResult : public LInstructionHelper<1, 0, 0> {
+ public:
+ LIR_HEADER(WasmRegisterResult);
+
+ LWasmRegisterResult() : LInstructionHelper(classOpcode) {}
+
+ MWasmRegisterResult* mir() const {
+ if (!mir_->isWasmRegisterResult()) {
+ return nullptr;
+ }
+ return mir_->toWasmRegisterResult();
+ }
+};
+
+class LWasmRegisterPairResult : public LInstructionHelper<2, 0, 0> {
+ public:
+ LIR_HEADER(WasmRegisterPairResult);
+
+ LWasmRegisterPairResult() : LInstructionHelper(classOpcode) {}
+
+ MDefinition* mir() const { return mirRaw(); }
+};
+
+class LWasmStackResultArea : public LInstructionHelper<1, 0, 1> {
+ public:
+ LIR_HEADER(WasmStackResultArea);
+
+ explicit LWasmStackResultArea(const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setTemp(0, temp);
+ }
+
+ MWasmStackResultArea* mir() const { return mir_->toWasmStackResultArea(); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+inline uint32_t LStackArea::base() const {
+ return ins()->toWasmStackResultArea()->mir()->base();
+}
+inline void LStackArea::setBase(uint32_t base) {
+ ins()->toWasmStackResultArea()->mir()->setBase(base);
+}
+inline uint32_t LStackArea::size() const {
+ return ins()->toWasmStackResultArea()->mir()->byteSize();
+}
+
+inline bool LStackArea::ResultIterator::done() const {
+ return idx_ == alloc_.ins()->toWasmStackResultArea()->mir()->resultCount();
+}
+inline void LStackArea::ResultIterator::next() {
+ MOZ_ASSERT(!done());
+ idx_++;
+}
+inline LAllocation LStackArea::ResultIterator::alloc() const {
+ MOZ_ASSERT(!done());
+ MWasmStackResultArea* area = alloc_.ins()->toWasmStackResultArea()->mir();
+ return LStackSlot(area->base() - area->result(idx_).offset());
+}
+inline bool LStackArea::ResultIterator::isGcPointer() const {
+ MOZ_ASSERT(!done());
+ MWasmStackResultArea* area = alloc_.ins()->toWasmStackResultArea()->mir();
+ MIRType type = area->result(idx_).type();
+#ifndef JS_PUNBOX64
+ // LDefinition::TypeFrom isn't defined for MIRType::Int64 values on
+ // this platform, so here we have a special case.
+ if (type == MIRType::Int64) {
+ return false;
+ }
+#endif
+ return LDefinition::TypeFrom(type) == LDefinition::OBJECT;
+}
+
+class LWasmStackResult : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmStackResult);
+
+ LWasmStackResult() : LInstructionHelper(classOpcode) {}
+
+ MWasmStackResult* mir() const { return mir_->toWasmStackResult(); }
+ LStackSlot result(uint32_t base) const {
+ return LStackSlot(base - mir()->result().offset());
+ }
+};
+
+class LWasmStackResult64 : public LInstructionHelper<INT64_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(WasmStackResult64);
+
+ LWasmStackResult64() : LInstructionHelper(classOpcode) {}
+
+ MWasmStackResult* mir() const { return mir_->toWasmStackResult(); }
+ LStackSlot result(uint32_t base, LDefinition* def) {
+ uint32_t offset = base - mir()->result().offset();
+#if defined(JS_NUNBOX32)
+ if (def == getDef(INT64LOW_INDEX)) {
+ offset -= INT64LOW_OFFSET;
+ } else {
+ MOZ_ASSERT(def == getDef(INT64HIGH_INDEX));
+ offset -= INT64HIGH_OFFSET;
+ }
+#else
+ MOZ_ASSERT(def == getDef(0));
+#endif
+ return LStackSlot(offset);
+ }
+};
+
+inline LStackSlot LStackArea::resultAlloc(LInstruction* lir,
+ LDefinition* def) const {
+ if (lir->isWasmStackResult64()) {
+ return lir->toWasmStackResult64()->result(base(), def);
+ }
+ MOZ_ASSERT(def == lir->getDef(0));
+ return lir->toWasmStackResult()->result(base());
+}
+
+inline bool LNode::isCallPreserved(AnyRegister reg) const {
+ return isWasmCall() && LWasmCall::isCallPreserved(reg);
+}
+
+class LAssertRangeI : public LInstructionHelper<0, 1, 0> {
+ public:
+ LIR_HEADER(AssertRangeI)
+
+ explicit LAssertRangeI(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+
+ const LAllocation* input() { return getOperand(0); }
+
+ MAssertRange* mir() { return mir_->toAssertRange(); }
+ const Range* range() { return mir()->assertedRange(); }
+};
+
+class LAssertRangeD : public LInstructionHelper<0, 1, 1> {
+ public:
+ LIR_HEADER(AssertRangeD)
+
+ LAssertRangeD(const LAllocation& input, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* input() { return getOperand(0); }
+
+ const LDefinition* temp() { return getTemp(0); }
+
+ MAssertRange* mir() { return mir_->toAssertRange(); }
+ const Range* range() { return mir()->assertedRange(); }
+};
+
+class LAssertRangeF : public LInstructionHelper<0, 1, 2> {
+ public:
+ LIR_HEADER(AssertRangeF)
+ LAssertRangeF(const LAllocation& input, const LDefinition& temp,
+ const LDefinition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ setTemp(1, temp2);
+ }
+
+ const LAllocation* input() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+
+ MAssertRange* mir() { return mir_->toAssertRange(); }
+ const Range* range() { return mir()->assertedRange(); }
+};
+
+class LAssertRangeV : public LInstructionHelper<0, BOX_PIECES, 3> {
+ public:
+ LIR_HEADER(AssertRangeV)
+
+ LAssertRangeV(const LBoxAllocation& input, const LDefinition& temp,
+ const LDefinition& floatTemp1, const LDefinition& floatTemp2)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Input, input);
+ setTemp(0, temp);
+ setTemp(1, floatTemp1);
+ setTemp(2, floatTemp2);
+ }
+
+ static const size_t Input = 0;
+
+ const LDefinition* temp() { return getTemp(0); }
+ const LDefinition* floatTemp1() { return getTemp(1); }
+ const LDefinition* floatTemp2() { return getTemp(2); }
+
+ MAssertRange* mir() { return mir_->toAssertRange(); }
+ const Range* range() { return mir()->assertedRange(); }
+};
+
+class LAssertClass : public LInstructionHelper<0, 1, 1> {
+ public:
+ LIR_HEADER(AssertClass)
+
+ explicit LAssertClass(const LAllocation& input, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* input() { return getOperand(0); }
+
+ MAssertClass* mir() { return mir_->toAssertClass(); }
+};
+
+class LAssertShape : public LInstructionHelper<0, 1, 0> {
+ public:
+ LIR_HEADER(AssertShape)
+
+ explicit LAssertShape(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+
+ const LAllocation* input() { return getOperand(0); }
+
+ MAssertShape* mir() { return mir_->toAssertShape(); }
+};
+
+class LAssertResultT : public LInstructionHelper<0, 1, 0> {
+ public:
+ LIR_HEADER(AssertResultT)
+
+ explicit LAssertResultT(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+
+ const LAllocation* input() { return getOperand(0); }
+};
+
+class LAssertResultV : public LInstructionHelper<0, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(AssertResultV)
+
+ static const size_t Input = 0;
+
+ explicit LAssertResultV(const LBoxAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Input, input);
+ }
+};
+
+class LGuardValue : public LInstructionHelper<0, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(GuardValue)
+
+ explicit LGuardValue(const LBoxAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Input, input);
+ }
+
+ static const size_t Input = 0;
+
+ MGuardValue* mir() { return mir_->toGuardValue(); }
+};
+
+class LGuardNotOptimizedArguments
+ : public LInstructionHelper<0, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(GuardNotOptimizedArguments)
+
+ explicit LGuardNotOptimizedArguments(const LBoxAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Input, input);
+ }
+
+ static const size_t Input = 0;
+};
+
+class LGuardNullOrUndefined : public LInstructionHelper<0, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(GuardNullOrUndefined)
+
+ explicit LGuardNullOrUndefined(const LBoxAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Input, input);
+ }
+
+ static const size_t Input = 0;
+
+ MGuardNullOrUndefined* mir() { return mir_->toGuardNullOrUndefined(); }
+};
+
+class LGuardFunctionFlags : public LInstructionHelper<0, 1, 0> {
+ public:
+ LIR_HEADER(GuardFunctionFlags)
+
+ explicit LGuardFunctionFlags(const LAllocation& fun)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, fun);
+ }
+
+ const LAllocation* function() { return getOperand(0); }
+
+ MGuardFunctionFlags* mir() { return mir_->toGuardFunctionFlags(); }
+};
+
+class LGuardFunctionIsNonBuiltinCtor : public LInstructionHelper<0, 1, 1> {
+ public:
+ LIR_HEADER(GuardFunctionIsNonBuiltinCtor)
+
+ LGuardFunctionIsNonBuiltinCtor(const LAllocation& fun,
+ const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, fun);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* function() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LGuardFunctionKind : public LInstructionHelper<0, 1, 1> {
+ public:
+ LIR_HEADER(GuardFunctionKind)
+
+ explicit LGuardFunctionKind(const LAllocation& fun, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, fun);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* function() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+
+ MGuardFunctionKind* mir() { return mir_->toGuardFunctionKind(); }
+};
+
+class LGuardFunctionScript : public LInstructionHelper<0, 1, 0> {
+ public:
+ LIR_HEADER(GuardFunctionScript)
+
+ explicit LGuardFunctionScript(const LAllocation& fun)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, fun);
+ }
+
+ const LAllocation* function() { return getOperand(0); }
+
+ MGuardFunctionScript* mir() { return mir_->toGuardFunctionScript(); }
+};
+
+class LIncrementWarmUpCounter : public LInstructionHelper<0, 0, 1> {
+ public:
+ LIR_HEADER(IncrementWarmUpCounter)
+
+ explicit LIncrementWarmUpCounter(const LDefinition& scratch)
+ : LInstructionHelper(classOpcode) {
+ setTemp(0, scratch);
+ }
+
+ const LDefinition* scratch() { return getTemp(0); }
+ MIncrementWarmUpCounter* mir() { return mir_->toIncrementWarmUpCounter(); }
+};
+
+class LLexicalCheck : public LInstructionHelper<0, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(LexicalCheck)
+
+ explicit LLexicalCheck(const LBoxAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Input, input);
+ }
+
+ MLexicalCheck* mir() { return mir_->toLexicalCheck(); }
+
+ static const size_t Input = 0;
+};
+
+class LThrowRuntimeLexicalError : public LCallInstructionHelper<0, 0, 0> {
+ public:
+ LIR_HEADER(ThrowRuntimeLexicalError)
+
+ LThrowRuntimeLexicalError() : LCallInstructionHelper(classOpcode) {}
+
+ MThrowRuntimeLexicalError* mir() {
+ return mir_->toThrowRuntimeLexicalError();
+ }
+};
+
+class LThrowMsg : public LCallInstructionHelper<0, 0, 0> {
+ public:
+ LIR_HEADER(ThrowMsg)
+
+ LThrowMsg() : LCallInstructionHelper(classOpcode) {}
+
+ MThrowMsg* mir() { return mir_->toThrowMsg(); }
+};
+
+class LGlobalDeclInstantiation : public LInstructionHelper<0, 0, 0> {
+ public:
+ LIR_HEADER(GlobalDeclInstantiation)
+
+ LGlobalDeclInstantiation() : LInstructionHelper(classOpcode) {}
+
+ MGlobalDeclInstantiation* mir() { return mir_->toGlobalDeclInstantiation(); }
+};
+
+class LMemoryBarrier : public LInstructionHelper<0, 0, 0> {
+ private:
+ const MemoryBarrierBits type_;
+
+ public:
+ LIR_HEADER(MemoryBarrier)
+
+ // The parameter 'type' is a bitwise 'or' of the barrier types needed,
+ // see AtomicOp.h.
+ explicit LMemoryBarrier(MemoryBarrierBits type)
+ : LInstructionHelper(classOpcode), type_(type) {
+ MOZ_ASSERT((type_ & ~MembarAllbits) == MembarNobits);
+ }
+
+ MemoryBarrierBits type() const { return type_; }
+};
+
+class LDebugger : public LCallInstructionHelper<0, 0, 2> {
+ public:
+ LIR_HEADER(Debugger)
+
+ LDebugger(const LDefinition& temp1, const LDefinition& temp2)
+ : LCallInstructionHelper(classOpcode) {
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+};
+
+class LNewTarget : public LInstructionHelper<BOX_PIECES, 0, 0> {
+ public:
+ LIR_HEADER(NewTarget)
+
+ LNewTarget() : LInstructionHelper(classOpcode) {}
+};
+
+class LArrowNewTarget : public LInstructionHelper<BOX_PIECES, 1, 0> {
+ public:
+ explicit LArrowNewTarget(const LAllocation& callee)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, callee);
+ }
+
+ LIR_HEADER(ArrowNewTarget)
+
+ const LAllocation* callee() { return getOperand(0); }
+};
+
+// Math.random().
+class LRandom : public LInstructionHelper<1, 0, 1 + 2 * INT64_PIECES> {
+ public:
+ LIR_HEADER(Random)
+ LRandom(const LDefinition& temp0, const LInt64Definition& temp1,
+ const LInt64Definition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setTemp(0, temp0);
+ setInt64Temp(1, temp1);
+ setInt64Temp(1 + INT64_PIECES, temp2);
+ }
+ const LDefinition* temp0() { return getTemp(0); }
+ LInt64Definition temp1() { return getInt64Temp(1); }
+ LInt64Definition temp2() { return getInt64Temp(1 + INT64_PIECES); }
+
+ MRandom* mir() const { return mir_->toRandom(); }
+};
+
+class LCheckReturn : public LInstructionHelper<BOX_PIECES, 2 * BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(CheckReturn)
+
+ LCheckReturn(const LBoxAllocation& retVal, const LBoxAllocation& thisVal)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(ReturnValue, retVal);
+ setBoxOperand(ThisValue, thisVal);
+ }
+
+ static const size_t ReturnValue = 0;
+ static const size_t ThisValue = BOX_PIECES;
+};
+
+class LCheckIsObj : public LInstructionHelper<1, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(CheckIsObj)
+
+ static const size_t ValueIndex = 0;
+
+ explicit LCheckIsObj(const LBoxAllocation& value)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(ValueIndex, value);
+ }
+
+ MCheckIsObj* mir() const { return mir_->toCheckIsObj(); }
+};
+
+class LCheckObjCoercible : public LInstructionHelper<0, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(CheckObjCoercible)
+
+ static const size_t CheckValue = 0;
+
+ explicit LCheckObjCoercible(const LBoxAllocation& value)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(CheckValue, value);
+ }
+};
+
+class LCheckClassHeritage : public LInstructionHelper<0, BOX_PIECES, 1> {
+ public:
+ LIR_HEADER(CheckClassHeritage)
+
+ static const size_t Heritage = 0;
+
+ LCheckClassHeritage(const LBoxAllocation& value, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Heritage, value);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LCheckThis : public LInstructionHelper<0, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(CheckThis)
+
+ static const size_t ThisValue = 0;
+
+ explicit LCheckThis(const LBoxAllocation& value)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(ThisValue, value);
+ }
+};
+
+class LCheckThisReinit : public LInstructionHelper<0, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(CheckThisReinit)
+
+ static const size_t ThisValue = 0;
+
+ explicit LCheckThisReinit(const LBoxAllocation& value)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(ThisValue, value);
+ }
+};
+
+class LGenerator
+ : public LCallInstructionHelper</* defs = */ 1, /* operands = */ 3,
+ /* temps = */ 0> {
+ public:
+ LIR_HEADER(Generator)
+
+ static const size_t CalleeInput = 0;
+ static const size_t EnvInput = 1;
+ static const size_t ArgsInput = 2;
+
+ LGenerator(const LAllocation& callee, const LAllocation& environmentChain,
+ const LAllocation& argsObject)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(CalleeInput, callee);
+ setOperand(EnvInput, environmentChain);
+ setOperand(ArgsInput, argsObject);
+ }
+
+ MGenerator* mir() const { return mir_->toGenerator(); }
+ const LAllocation* callee() { return getOperand(CalleeInput); }
+ const LAllocation* environmentChain() { return getOperand(EnvInput); }
+ const LAllocation* argsObject() { return getOperand(ArgsInput); }
+};
+
+class LAsyncResolve : public LCallInstructionHelper<1, 1 + BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(AsyncResolve)
+
+ static const size_t GeneratorInput = 0;
+ static const size_t ValueOrReasonInput = 1;
+
+ LAsyncResolve(const LAllocation& generator,
+ const LBoxAllocation& valueOrReason)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(GeneratorInput, generator);
+ setBoxOperand(ValueOrReasonInput, valueOrReason);
+ }
+
+ MAsyncResolve* mir() const { return mir_->toAsyncResolve(); }
+ const LAllocation* generator() { return getOperand(GeneratorInput); }
+};
+
+class LAsyncAwait
+ : public LCallInstructionHelper</* defs= */ 1,
+ /*operands = */ BOX_PIECES + 1,
+ /* temps = */ 0> {
+ public:
+ LIR_HEADER(AsyncAwait)
+
+ static const size_t ValueInput = 0;
+ static const size_t GenInput = BOX_PIECES;
+
+ explicit LAsyncAwait(const LBoxAllocation& value,
+ const LAllocation& generator)
+ : LCallInstructionHelper(classOpcode) {
+ setBoxOperand(ValueInput, value);
+ setOperand(GenInput, generator);
+ }
+
+ MAsyncAwait* mir() { return mir_->toAsyncAwait(); }
+ const LAllocation* generator() { return getOperand(GenInput); }
+};
+
+class LCanSkipAwait
+ : public LCallInstructionHelper</* defs= */ 1, /* defs= */ BOX_PIECES,
+ /* temps = */ 0> {
+ public:
+ LIR_HEADER(CanSkipAwait)
+
+ static const size_t ValueInput = 0;
+
+ explicit LCanSkipAwait(const LBoxAllocation& value)
+ : LCallInstructionHelper(classOpcode) {
+ setBoxOperand(ValueInput, value);
+ }
+
+ MCanSkipAwait* mir() { return mir_->toCanSkipAwait(); }
+};
+
+class LMaybeExtractAwaitValue
+ : public LCallInstructionHelper</* defs= */ BOX_PIECES,
+ /* defs= */ BOX_PIECES + 1,
+ /* temps = */ 0> {
+ public:
+ LIR_HEADER(MaybeExtractAwaitValue);
+
+ static const size_t ValueInput = 0;
+ static const size_t CanSkipInput = BOX_PIECES;
+
+ explicit LMaybeExtractAwaitValue(const LBoxAllocation& value,
+ const LAllocation& canSkip)
+ : LCallInstructionHelper(classOpcode) {
+ setBoxOperand(ValueInput, value);
+ setOperand(CanSkipInput, canSkip);
+ }
+
+ MMaybeExtractAwaitValue* mir() { return mir_->toMaybeExtractAwaitValue(); }
+ const LAllocation* canSkip() { return getOperand(CanSkipInput); }
+};
+
+class LDebugCheckSelfHosted : public LCallInstructionHelper<0, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(DebugCheckSelfHosted)
+
+ static const size_t CheckValue = 0;
+
+ explicit LDebugCheckSelfHosted(const LBoxAllocation& value)
+ : LCallInstructionHelper(classOpcode) {
+ setBoxOperand(CheckValue, value);
+ }
+};
+
+class LFinishBoundFunctionInit : public LInstructionHelper<0, 3, 2> {
+ public:
+ LIR_HEADER(FinishBoundFunctionInit)
+
+ LFinishBoundFunctionInit(const LAllocation& bound, const LAllocation& target,
+ const LAllocation& argCount,
+ const LDefinition& temp1, const LDefinition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, bound);
+ setOperand(1, target);
+ setOperand(2, argCount);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LAllocation* bound() { return getOperand(0); }
+ const LAllocation* target() { return getOperand(1); }
+ const LAllocation* argCount() { return getOperand(2); }
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+class LIsPackedArray : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(IsPackedArray)
+
+ LIsPackedArray(const LAllocation& object, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LGuardArrayIsPacked : public LInstructionHelper<0, 1, 2> {
+ public:
+ LIR_HEADER(GuardArrayIsPacked)
+
+ explicit LGuardArrayIsPacked(const LAllocation& array,
+ const LDefinition& temp1,
+ const LDefinition& temp2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, array);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LAllocation* array() { return getOperand(0); }
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+
+ MGuardArrayIsPacked* mir() { return mir_->toGuardArrayIsPacked(); }
+};
+
+class LGetPrototypeOf : public LInstructionHelper<BOX_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(GetPrototypeOf)
+
+ explicit LGetPrototypeOf(const LAllocation& target)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, target);
+ }
+
+ const LAllocation* target() { return getOperand(0); }
+};
+
+class LObjectWithProto : public LCallInstructionHelper<1, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(ObjectWithProto)
+
+ static const size_t PrototypeValue = 0;
+
+ explicit LObjectWithProto(const LBoxAllocation& prototype)
+ : LCallInstructionHelper(classOpcode) {
+ setBoxOperand(PrototypeValue, prototype);
+ }
+};
+
+class LObjectStaticProto : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(ObjectStaticProto)
+
+ explicit LObjectStaticProto(const LAllocation& object)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ }
+};
+
+class LBuiltinObject : public LCallInstructionHelper<1, 0, 0> {
+ public:
+ LIR_HEADER(BuiltinObject)
+
+ LBuiltinObject() : LCallInstructionHelper(classOpcode) {}
+
+ MBuiltinObject* mir() const { return mir_->toBuiltinObject(); }
+};
+
+class LSuperFunction : public LInstructionHelper<BOX_PIECES, 1, 1> {
+ public:
+ LIR_HEADER(SuperFunction)
+
+ explicit LSuperFunction(const LAllocation& callee, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, callee);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* callee() { return getOperand(0); }
+ const LDefinition* temp() { return this->getTemp(0); }
+};
+
+class LInitHomeObject : public LInstructionHelper<0, 1 + BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(InitHomeObject)
+
+ static const size_t HomeObjectValue = 1;
+
+ LInitHomeObject(const LAllocation& function, const LBoxAllocation& homeObject)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, function);
+ setBoxOperand(HomeObjectValue, homeObject);
+ }
+
+ const LAllocation* function() { return getOperand(0); }
+};
+
+class LIsTypedArrayConstructor : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(IsTypedArrayConstructor)
+
+ explicit LIsTypedArrayConstructor(const LAllocation& object)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+};
+
+class LLoadValueTag : public LInstructionHelper<1, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(LoadValueTag)
+
+ static const size_t Value = 0;
+
+ explicit LLoadValueTag(const LBoxAllocation& value)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Value, value);
+ }
+};
+
+class LGuardTagNotEqual : public LInstructionHelper<0, 2, 0> {
+ public:
+ LIR_HEADER(GuardTagNotEqual)
+
+ LGuardTagNotEqual(const LAllocation& lhs, const LAllocation& rhs)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ }
+
+ const LAllocation* lhs() { return getOperand(0); }
+ const LAllocation* rhs() { return getOperand(1); }
+};
+
+class LLoadWrapperTarget : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(LoadWrapperTarget)
+
+ explicit LLoadWrapperTarget(const LAllocation& object)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+};
+
+class LGuardHasGetterSetter : public LCallInstructionHelper<0, 1, 2> {
+ public:
+ LIR_HEADER(GuardHasGetterSetter)
+
+ LGuardHasGetterSetter(const LAllocation& object, const LDefinition& temp1,
+ const LDefinition& temp2)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+
+ MGuardHasGetterSetter* mir() const { return mir_->toGuardHasGetterSetter(); }
+};
+
+class LGuardIsExtensible : public LInstructionHelper<0, 1, 1> {
+ public:
+ LIR_HEADER(GuardIsExtensible)
+
+ LGuardIsExtensible(const LAllocation& object, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LGuardInt32IsNonNegative : public LInstructionHelper<0, 1, 0> {
+ public:
+ LIR_HEADER(GuardInt32IsNonNegative)
+
+ explicit LGuardInt32IsNonNegative(const LAllocation& index)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, index);
+ }
+
+ const LAllocation* index() { return getOperand(0); }
+};
+
+class LGuardIndexGreaterThanDenseInitLength
+ : public LInstructionHelper<0, 2, 2> {
+ public:
+ LIR_HEADER(GuardIndexGreaterThanDenseInitLength)
+
+ LGuardIndexGreaterThanDenseInitLength(const LAllocation& object,
+ const LAllocation& index,
+ const LDefinition& temp,
+ const LDefinition& spectreTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setOperand(1, index);
+ setTemp(0, temp);
+ setTemp(1, spectreTemp);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LDefinition* temp() { return getTemp(0); }
+ const LDefinition* spectreTemp() { return getTemp(1); }
+};
+
+class LGuardIndexIsValidUpdateOrAdd : public LInstructionHelper<0, 2, 2> {
+ public:
+ LIR_HEADER(GuardIndexIsValidUpdateOrAdd)
+
+ LGuardIndexIsValidUpdateOrAdd(const LAllocation& object,
+ const LAllocation& index,
+ const LDefinition& temp,
+ const LDefinition& spectreTemp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setOperand(1, index);
+ setTemp(0, temp);
+ setTemp(1, spectreTemp);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LDefinition* temp() { return getTemp(0); }
+ const LDefinition* spectreTemp() { return getTemp(1); }
+};
+
+class LCallAddOrUpdateSparseElement
+ : public LCallInstructionHelper<0, 2 + BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(CallAddOrUpdateSparseElement)
+
+ LCallAddOrUpdateSparseElement(const LAllocation& object,
+ const LAllocation& index,
+ const LBoxAllocation& value)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setOperand(1, index);
+ setBoxOperand(ValueIndex, value);
+ }
+
+ static const size_t ValueIndex = 2;
+
+ const LAllocation* object() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+
+ MCallAddOrUpdateSparseElement* mir() const {
+ return mir_->toCallAddOrUpdateSparseElement();
+ }
+};
+
+class LCallGetSparseElement : public LCallInstructionHelper<BOX_PIECES, 2, 0> {
+ public:
+ LIR_HEADER(CallGetSparseElement)
+
+ LCallGetSparseElement(const LAllocation& object, const LAllocation& index)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setOperand(1, index);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+};
+
+class LCallNativeGetElement : public LCallInstructionHelper<BOX_PIECES, 2, 0> {
+ public:
+ LIR_HEADER(CallNativeGetElement)
+
+ LCallNativeGetElement(const LAllocation& object, const LAllocation& index)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setOperand(1, index);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+};
+
+class LCallObjectHasSparseElement : public LCallInstructionHelper<1, 2, 2> {
+ public:
+ LIR_HEADER(CallObjectHasSparseElement)
+
+ LCallObjectHasSparseElement(const LAllocation& object,
+ const LAllocation& index,
+ const LDefinition& temp1,
+ const LDefinition& temp2)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, object);
+ setOperand(1, index);
+ setTemp(0, temp1);
+ setTemp(1, temp2);
+ }
+
+ const LAllocation* object() { return getOperand(0); }
+ const LAllocation* index() { return getOperand(1); }
+ const LDefinition* temp1() { return getTemp(0); }
+ const LDefinition* temp2() { return getTemp(1); }
+};
+
+class LBigIntAsIntN : public LCallInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(BigIntAsIntN)
+
+ LBigIntAsIntN(const LAllocation& bits, const LAllocation& input)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, bits);
+ setOperand(1, input);
+ }
+
+ const LAllocation* bits() { return getOperand(0); }
+ const LAllocation* input() { return getOperand(1); }
+};
+
+class LBigIntAsIntN64 : public LInstructionHelper<1, 1, 1 + INT64_PIECES> {
+ public:
+ LIR_HEADER(BigIntAsIntN64)
+
+ LBigIntAsIntN64(const LAllocation& input, const LDefinition& temp,
+ const LInt64Definition& temp64)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ setInt64Temp(1, temp64);
+ }
+
+ const LAllocation* input() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+ LInt64Definition temp64() { return getInt64Temp(1); }
+};
+
+class LBigIntAsIntN32 : public LInstructionHelper<1, 1, 1 + INT64_PIECES> {
+ public:
+ LIR_HEADER(BigIntAsIntN32)
+
+ LBigIntAsIntN32(const LAllocation& input, const LDefinition& temp,
+ const LInt64Definition& temp64)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ setInt64Temp(1, temp64);
+ }
+
+ const LAllocation* input() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+ LInt64Definition temp64() { return getInt64Temp(1); }
+};
+
+class LBigIntAsUintN : public LCallInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(BigIntAsUintN)
+
+ LBigIntAsUintN(const LAllocation& bits, const LAllocation& input)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, bits);
+ setOperand(1, input);
+ }
+
+ const LAllocation* bits() { return getOperand(0); }
+ const LAllocation* input() { return getOperand(1); }
+};
+
+class LBigIntAsUintN64 : public LInstructionHelper<1, 1, 1 + INT64_PIECES> {
+ public:
+ LIR_HEADER(BigIntAsUintN64)
+
+ LBigIntAsUintN64(const LAllocation& input, const LDefinition& temp,
+ const LInt64Definition& temp64)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ setInt64Temp(1, temp64);
+ }
+
+ const LAllocation* input() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+ LInt64Definition temp64() { return getInt64Temp(1); }
+};
+
+class LBigIntAsUintN32 : public LInstructionHelper<1, 1, 1 + INT64_PIECES> {
+ public:
+ LIR_HEADER(BigIntAsUintN32)
+
+ LBigIntAsUintN32(const LAllocation& input, const LDefinition& temp,
+ const LInt64Definition& temp64)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ setInt64Temp(1, temp64);
+ }
+
+ const LAllocation* input() { return getOperand(0); }
+ const LDefinition* temp() { return getTemp(0); }
+ LInt64Definition temp64() { return getInt64Temp(1); }
+};
+
+template <size_t NumDefs>
+class LIonToWasmCallBase : public LVariadicInstruction<NumDefs, 2> {
+ using Base = LVariadicInstruction<NumDefs, 2>;
+
+ public:
+ explicit LIonToWasmCallBase(LNode::Opcode classOpcode, uint32_t numOperands,
+ const LDefinition& temp, const LDefinition& fp)
+ : Base(classOpcode, numOperands) {
+ this->setIsCall();
+ this->setTemp(0, temp);
+ this->setTemp(1, fp);
+ }
+ MIonToWasmCall* mir() const { return this->mir_->toIonToWasmCall(); }
+ const LDefinition* temp() { return this->getTemp(0); }
+};
+
+class LIonToWasmCall : public LIonToWasmCallBase<1> {
+ public:
+ LIR_HEADER(IonToWasmCall);
+ LIonToWasmCall(uint32_t numOperands, const LDefinition& temp,
+ const LDefinition& fp)
+ : LIonToWasmCallBase<1>(classOpcode, numOperands, temp, fp) {}
+};
+
+class LIonToWasmCallV : public LIonToWasmCallBase<BOX_PIECES> {
+ public:
+ LIR_HEADER(IonToWasmCallV);
+ LIonToWasmCallV(uint32_t numOperands, const LDefinition& temp,
+ const LDefinition& fp)
+ : LIonToWasmCallBase<BOX_PIECES>(classOpcode, numOperands, temp, fp) {}
+};
+
+class LIonToWasmCallI64 : public LIonToWasmCallBase<INT64_PIECES> {
+ public:
+ LIR_HEADER(IonToWasmCallI64);
+ LIonToWasmCallI64(uint32_t numOperands, const LDefinition& temp,
+ const LDefinition& fp)
+ : LIonToWasmCallBase<INT64_PIECES>(classOpcode, numOperands, temp, fp) {}
+};
+
+class LWasmBoxValue : public LInstructionHelper<1, BOX_PIECES, 0> {
+ public:
+ LIR_HEADER(WasmBoxValue)
+
+ explicit LWasmBoxValue(const LBoxAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(Input, input);
+ }
+
+ static const size_t Input = 0;
+};
+
+class LWasmAnyRefFromJSObject : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmAnyRefFromJSObject)
+
+ explicit LWasmAnyRefFromJSObject(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(Input, input);
+ }
+
+ static const size_t Input = 0;
+};
+
+// Wasm SIMD.
+
+// Constant Simd128
+class LSimd128 : public LInstructionHelper<1, 0, 0> {
+ SimdConstant v_;
+
+ public:
+ LIR_HEADER(Simd128);
+
+ explicit LSimd128(SimdConstant v) : LInstructionHelper(classOpcode), v_(v) {}
+
+ const SimdConstant& getSimd128() const { return v_; }
+};
+
+// (v128, v128, v128) -> v128 effect-free operation.
+// temp is FPR (and always in use).
+class LWasmBitselectSimd128 : public LInstructionHelper<1, 3, 1> {
+ public:
+ LIR_HEADER(WasmBitselectSimd128)
+
+ static constexpr uint32_t Lhs = 0;
+ static constexpr uint32_t LhsDest = 0;
+ static constexpr uint32_t Rhs = 1;
+ static constexpr uint32_t Control = 2;
+
+ LWasmBitselectSimd128(const LAllocation& lhs, const LAllocation& rhs,
+ const LAllocation& control, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(Lhs, lhs);
+ setOperand(Rhs, rhs);
+ setOperand(Control, control);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* lhs() { return getOperand(Lhs); }
+ const LAllocation* lhsDest() { return getOperand(LhsDest); }
+ const LAllocation* rhs() { return getOperand(Rhs); }
+ const LAllocation* control() { return getOperand(Control); }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// (v128, v128) -> v128 effect-free operations
+// lhs and dest are the same.
+// temps (if in use) are FPR.
+// The op may differ from the MIR node's op.
+class LWasmBinarySimd128 : public LInstructionHelper<1, 2, 2> {
+ wasm::SimdOp op_;
+
+ public:
+ LIR_HEADER(WasmBinarySimd128)
+
+ static constexpr uint32_t Lhs = 0;
+ static constexpr uint32_t LhsDest = 0;
+ static constexpr uint32_t Rhs = 1;
+
+ LWasmBinarySimd128(wasm::SimdOp op, const LAllocation& lhs,
+ const LAllocation& rhs, const LDefinition& temp0,
+ const LDefinition& temp1)
+ : LInstructionHelper(classOpcode), op_(op) {
+ setOperand(Lhs, lhs);
+ setOperand(Rhs, rhs);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ const LAllocation* lhs() { return getOperand(Lhs); }
+ const LAllocation* lhsDest() { return getOperand(LhsDest); }
+ const LAllocation* rhs() { return getOperand(Rhs); }
+ wasm::SimdOp simdOp() const { return op_; }
+
+ static bool SpecializeForConstantRhs(wasm::SimdOp op);
+};
+
+class LWasmBinarySimd128WithConstant : public LInstructionHelper<1, 1, 0> {
+ SimdConstant rhs_;
+
+ public:
+ LIR_HEADER(WasmBinarySimd128WithConstant)
+
+ static constexpr uint32_t Lhs = 0;
+ static constexpr uint32_t LhsDest = 0;
+
+ LWasmBinarySimd128WithConstant(const LAllocation& lhs,
+ const SimdConstant& rhs)
+ : LInstructionHelper(classOpcode), rhs_(rhs) {
+ setOperand(Lhs, lhs);
+ }
+
+ const LAllocation* lhs() { return getOperand(Lhs); }
+ const LAllocation* lhsDest() { return getOperand(LhsDest); }
+ const SimdConstant& rhs() { return rhs_; }
+ wasm::SimdOp simdOp() const {
+ return mir_->toWasmBinarySimd128WithConstant()->simdOp();
+ }
+};
+
+// (v128, i32) -> v128 effect-free variable-width shift operations
+// lhs and dest are the same.
+// temp0 is a GPR (if in use).
+// temp1 is an FPR (if in use).
+class LWasmVariableShiftSimd128 : public LInstructionHelper<1, 2, 2> {
+ public:
+ LIR_HEADER(WasmVariableShiftSimd128)
+
+ static constexpr uint32_t Lhs = 0;
+ static constexpr uint32_t LhsDest = 0;
+ static constexpr uint32_t Rhs = 1;
+
+ LWasmVariableShiftSimd128(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp0, const LDefinition& temp1)
+ : LInstructionHelper(classOpcode) {
+ setOperand(Lhs, lhs);
+ setOperand(Rhs, rhs);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ const LAllocation* lhs() { return getOperand(Lhs); }
+ const LAllocation* lhsDest() { return getOperand(LhsDest); }
+ const LAllocation* rhs() { return getOperand(Rhs); }
+ wasm::SimdOp simdOp() const { return mir_->toWasmShiftSimd128()->simdOp(); }
+};
+
+// (v128, i32) -> v128 effect-free constant-width shift operations
+class LWasmConstantShiftSimd128 : public LInstructionHelper<1, 1, 1> {
+ int32_t shift_;
+
+ public:
+ LIR_HEADER(WasmConstantShiftSimd128)
+
+ static constexpr uint32_t Src = 0;
+
+ LWasmConstantShiftSimd128(const LAllocation& src, const LDefinition& temp,
+ int32_t shift)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(Src, src);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* src() { return getOperand(Src); }
+ const LDefinition* temp() { return getTemp(0); }
+ int32_t shift() { return shift_; }
+ wasm::SimdOp simdOp() const { return mir_->toWasmShiftSimd128()->simdOp(); }
+};
+
+// (v128, v128, imm_simd) -> v128 effect-free operation.
+// temp is FPR (and always in use).
+class LWasmShuffleSimd128 : public LInstructionHelper<1, 2, 1> {
+ public:
+ // Shuffle operations. NOTE: these may still be x86-centric, but the set can
+ // accomodate operations from other architectures.
+ enum Op {
+ // Blend bytes. control_ has the blend mask as an I8x16: 0 to select from
+ // the lhs, -1 to select from the rhs.
+ BLEND_8x16,
+
+ // Blend words. control_ has the blend mask as an I16x8: 0 to select from
+ // the lhs, -1 to select from the rhs.
+ BLEND_16x8,
+
+ // Concat the lhs in front of the rhs and shift right by bytes, extracting
+ // the low 16 bytes; control_[0] has the shift count.
+ CONCAT_RIGHT_SHIFT_8x16,
+
+ // Interleave qwords/dwords/words/bytes from high/low halves of operands.
+ // The low-order item in the result comes from the lhs, then the next from
+ // the rhs, and so on. control_ is ignored.
+ INTERLEAVE_HIGH_8x16,
+ INTERLEAVE_HIGH_16x8,
+ INTERLEAVE_HIGH_32x4,
+ INTERLEAVE_HIGH_64x2,
+ INTERLEAVE_LOW_8x16,
+ INTERLEAVE_LOW_16x8,
+ INTERLEAVE_LOW_32x4,
+ INTERLEAVE_LOW_64x2,
+
+ // Fully general shuffle+blend. control_ has the shuffle mask.
+ SHUFFLE_BLEND_8x16,
+ };
+
+ private:
+ Op op_;
+ SimdConstant control_;
+
+ public:
+ LIR_HEADER(WasmShuffleSimd128)
+
+ static constexpr uint32_t Lhs = 0;
+ static constexpr uint32_t LhsDest = 0;
+ static constexpr uint32_t Rhs = 1;
+
+ LWasmShuffleSimd128(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp, Op op, SimdConstant control)
+ : LInstructionHelper(classOpcode), op_(op), control_(control) {
+ setOperand(Lhs, lhs);
+ setOperand(Rhs, rhs);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* lhs() { return getOperand(Lhs); }
+ const LAllocation* lhsDest() { return getOperand(LhsDest); }
+ const LAllocation* rhs() { return getOperand(Rhs); }
+ const LDefinition* temp() { return getTemp(0); }
+ Op op() { return op_; }
+ SimdConstant control() { return control_; }
+};
+
+// (v128, imm_simd) -> v128 effect-free operation.
+class LWasmPermuteSimd128 : public LInstructionHelper<1, 1, 0> {
+ public:
+ // Permutation operations. NOTE: these may still be x86-centric, but the set
+ // can accomodate operations from other architectures.
+ //
+ // The "low-order" byte is in lane 0 of an 8x16 datum, the "high-order" byte
+ // in lane 15. The low-order byte is also the "rightmost". In wasm, the
+ // constant (v128.const i8x16 0 1 2 ... 15) has 0 in the low-order byte and 15
+ // in the high-order byte.
+ enum Op {
+ // A single byte lane is copied into all the other byte lanes. control_[0]
+ // has the source lane.
+ BROADCAST_8x16,
+
+ // A single word lane is copied into all the other word lanes. control_[0]
+ // has the source lane.
+ BROADCAST_16x8,
+
+ // Copy input to output.
+ MOVE,
+
+ // control_ has bytes in range 0..15 s.t. control_[i] holds the source lane
+ // for output lane i.
+ PERMUTE_8x16,
+
+ // control_ has int16s in range 0..7, as for 8x16. In addition, the high
+ // byte of control_[0] has flags detailing the operation, values taken
+ // from the Perm16x8Action enum below.
+ PERMUTE_16x8,
+
+ // control_ has int32s in range 0..3, as for 8x16.
+ PERMUTE_32x4,
+
+ // control_[0] has the number of places to rotate by.
+ ROTATE_RIGHT_8x16,
+
+ // Zeroes are shifted into high-order bytes and low-order bytes are lost.
+ // control_[0] has the number of places to shift by.
+ SHIFT_RIGHT_8x16,
+
+ // Zeroes are shifted into low-order bytes and high-order bytes are lost.
+ // control_[0] has the number of places to shift by.
+ SHIFT_LEFT_8x16,
+ };
+
+ enum Perm16x8Action {
+ SWAP_QWORDS = 1, // Swap qwords first
+ PERM_LOW = 2, // Permute low qword by control_[0..3]
+ PERM_HIGH = 4 // Permute high qword by control_[4..7]
+ };
+
+ private:
+ Op op_;
+ SimdConstant control_;
+
+ public:
+ LIR_HEADER(WasmPermuteSimd128)
+
+ static constexpr uint32_t Src = 0;
+
+ LWasmPermuteSimd128(const LAllocation& src, Op op, SimdConstant control)
+ : LInstructionHelper(classOpcode), op_(op), control_(control) {
+ setOperand(Src, src);
+ }
+
+ const LAllocation* src() { return getOperand(Src); }
+ Op op() { return op_; }
+ SimdConstant control() { return control_; }
+};
+
+class LWasmReplaceLaneSimd128 : public LInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(WasmReplaceLaneSimd128)
+
+ static constexpr uint32_t Lhs = 0;
+ static constexpr uint32_t LhsDest = 0;
+ static constexpr uint32_t Rhs = 1;
+
+ LWasmReplaceLaneSimd128(const LAllocation& lhs, const LAllocation& rhs)
+ : LInstructionHelper(classOpcode) {
+ setOperand(Lhs, lhs);
+ setOperand(Rhs, rhs);
+ }
+
+ const LAllocation* lhs() { return getOperand(Lhs); }
+ const LAllocation* lhsDest() { return getOperand(LhsDest); }
+ const LAllocation* rhs() { return getOperand(Rhs); }
+ uint32_t laneIndex() const {
+ return mir_->toWasmReplaceLaneSimd128()->laneIndex();
+ }
+ wasm::SimdOp simdOp() const {
+ return mir_->toWasmReplaceLaneSimd128()->simdOp();
+ }
+};
+
+class LWasmReplaceInt64LaneSimd128
+ : public LInstructionHelper<1, INT64_PIECES + 1, 0> {
+ public:
+ LIR_HEADER(WasmReplaceInt64LaneSimd128)
+
+ static constexpr uint32_t Lhs = 0;
+ static constexpr uint32_t LhsDest = 0;
+ static constexpr uint32_t Rhs = 1;
+
+ LWasmReplaceInt64LaneSimd128(const LAllocation& lhs,
+ const LInt64Allocation& rhs)
+ : LInstructionHelper(classOpcode) {
+ setOperand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ }
+
+ const LAllocation* lhs() { return getOperand(Lhs); }
+ const LAllocation* lhsDest() { return getOperand(LhsDest); }
+ const LInt64Allocation rhs() { return getInt64Operand(Rhs); }
+ uint32_t laneIndex() const {
+ return mir_->toWasmReplaceLaneSimd128()->laneIndex();
+ }
+ wasm::SimdOp simdOp() const {
+ return mir_->toWasmReplaceLaneSimd128()->simdOp();
+ }
+};
+
+// (scalar) -> v128 effect-free operations, scalar != int64
+class LWasmScalarToSimd128 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmScalarToSimd128)
+
+ static constexpr uint32_t Src = 0;
+
+ explicit LWasmScalarToSimd128(const LAllocation& src)
+ : LInstructionHelper(classOpcode) {
+ setOperand(Src, src);
+ }
+
+ const LAllocation* src() { return getOperand(Src); }
+ wasm::SimdOp simdOp() const {
+ return mir_->toWasmScalarToSimd128()->simdOp();
+ }
+};
+
+// (int64) -> v128 effect-free operations
+class LWasmInt64ToSimd128 : public LInstructionHelper<1, INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(WasmInt64ToSimd128)
+
+ static constexpr uint32_t Src = 0;
+
+ explicit LWasmInt64ToSimd128(const LInt64Allocation& src)
+ : LInstructionHelper(classOpcode) {
+ setInt64Operand(Src, src);
+ }
+
+ const LInt64Allocation src() { return getInt64Operand(Src); }
+ wasm::SimdOp simdOp() const {
+ return mir_->toWasmScalarToSimd128()->simdOp();
+ }
+};
+
+// (v128) -> v128 effect-free operations
+// temp is FPR (if in use).
+class LWasmUnarySimd128 : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(WasmUnarySimd128)
+
+ static constexpr uint32_t Src = 0;
+
+ LWasmUnarySimd128(const LAllocation& src, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(Src, src);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* src() { return getOperand(Src); }
+ const LDefinition* temp() { return getTemp(0); }
+ wasm::SimdOp simdOp() const { return mir_->toWasmUnarySimd128()->simdOp(); }
+};
+
+// (v128, imm) -> scalar effect-free operations.
+class LWasmReduceSimd128 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmReduceSimd128)
+
+ static constexpr uint32_t Src = 0;
+
+ explicit LWasmReduceSimd128(const LAllocation& src)
+ : LInstructionHelper(classOpcode) {
+ setOperand(Src, src);
+ }
+
+ const LAllocation* src() { return getOperand(Src); }
+ uint32_t imm() const { return mir_->toWasmReduceSimd128()->imm(); }
+ wasm::SimdOp simdOp() const { return mir_->toWasmReduceSimd128()->simdOp(); }
+};
+
+// (v128, onTrue, onFalse) test-and-branch operations.
+class LWasmReduceAndBranchSimd128 : public LControlInstructionHelper<2, 1, 0> {
+ wasm::SimdOp op_;
+
+ public:
+ LIR_HEADER(WasmReduceAndBranchSimd128)
+
+ static constexpr uint32_t Src = 0;
+ static constexpr uint32_t IfTrue = 0;
+ static constexpr uint32_t IfFalse = 1;
+
+ LWasmReduceAndBranchSimd128(const LAllocation& src, wasm::SimdOp op,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse)
+ : LControlInstructionHelper(classOpcode), op_(op) {
+ setOperand(Src, src);
+ setSuccessor(IfTrue, ifTrue);
+ setSuccessor(IfFalse, ifFalse);
+ }
+
+ const LAllocation* src() { return getOperand(Src); }
+ wasm::SimdOp simdOp() const { return op_; }
+ MBasicBlock* ifTrue() const { return getSuccessor(IfTrue); }
+ MBasicBlock* ifFalse() const { return getSuccessor(IfFalse); }
+};
+
+// (v128, imm) -> i64 effect-free operations
+class LWasmReduceSimd128ToInt64
+ : public LInstructionHelper<INT64_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(WasmReduceSimd128ToInt64)
+
+ static constexpr uint32_t Src = 0;
+
+ explicit LWasmReduceSimd128ToInt64(const LAllocation& src)
+ : LInstructionHelper(classOpcode) {
+ setOperand(Src, src);
+ }
+
+ const LAllocation* src() { return getOperand(Src); }
+ uint32_t imm() const { return mir_->toWasmReduceSimd128()->imm(); }
+ wasm::SimdOp simdOp() const { return mir_->toWasmReduceSimd128()->simdOp(); }
+};
+
+// End Wasm SIMD
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_LIR_shared_h */
diff --git a/js/src/jit/shared/Lowering-shared-inl.h b/js/src/jit/shared/Lowering-shared-inl.h
new file mode 100644
index 0000000000..e4dedfd054
--- /dev/null
+++ b/js/src/jit/shared/Lowering-shared-inl.h
@@ -0,0 +1,805 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_Lowering_shared_inl_h
+#define jit_shared_Lowering_shared_inl_h
+
+#include "jit/shared/Lowering-shared.h"
+
+#include "jit/MIR.h"
+#include "jit/MIRGenerator.h"
+
+namespace js {
+namespace jit {
+
+void LIRGeneratorShared::emitAtUses(MInstruction* mir) {
+ MOZ_ASSERT(mir->canEmitAtUses());
+ mir->setEmittedAtUses();
+ mir->setVirtualRegister(0);
+}
+
+LUse LIRGeneratorShared::use(MDefinition* mir, LUse policy) {
+ // It is illegal to call use() on an instruction with two defs.
+#if BOX_PIECES > 1
+ MOZ_ASSERT(mir->type() != MIRType::Value);
+#endif
+#if INT64_PIECES > 1
+ MOZ_ASSERT(mir->type() != MIRType::Int64);
+#endif
+ ensureDefined(mir);
+ policy.setVirtualRegister(mir->virtualRegister());
+ return policy;
+}
+
+template <size_t X>
+void LIRGeneratorShared::define(
+ details::LInstructionFixedDefsTempsHelper<1, X>* lir, MDefinition* mir,
+ LDefinition::Policy policy) {
+ LDefinition::Type type = LDefinition::TypeFrom(mir->type());
+ define(lir, mir, LDefinition(type, policy));
+}
+
+template <size_t X>
+void LIRGeneratorShared::define(
+ details::LInstructionFixedDefsTempsHelper<1, X>* lir, MDefinition* mir,
+ const LDefinition& def) {
+ // Call instructions should use defineReturn.
+ MOZ_ASSERT(!lir->isCall());
+
+ uint32_t vreg = getVirtualRegister();
+
+ // Assign the definition and a virtual register. Then, propagate this
+ // virtual register to the MIR, so we can map MIR to LIR during lowering.
+ lir->setDef(0, def);
+ lir->getDef(0)->setVirtualRegister(vreg);
+ lir->setMir(mir);
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+template <size_t X, size_t Y>
+void LIRGeneratorShared::defineFixed(LInstructionHelper<1, X, Y>* lir,
+ MDefinition* mir,
+ const LAllocation& output) {
+ LDefinition::Type type = LDefinition::TypeFrom(mir->type());
+
+ LDefinition def(type, LDefinition::FIXED);
+ def.setOutput(output);
+
+ define(lir, mir, def);
+}
+
+template <size_t Ops, size_t Temps>
+void LIRGeneratorShared::defineInt64Fixed(
+ LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ const LInt64Allocation& output) {
+ uint32_t vreg = getVirtualRegister();
+
+#if JS_BITS_PER_WORD == 64
+ LDefinition def(LDefinition::GENERAL, LDefinition::FIXED);
+ def.setOutput(output.value());
+ lir->setDef(0, def);
+ lir->getDef(0)->setVirtualRegister(vreg);
+#else
+ LDefinition def0(LDefinition::GENERAL, LDefinition::FIXED);
+ def0.setOutput(output.low());
+ lir->setDef(0, def0);
+ lir->getDef(0)->setVirtualRegister(vreg);
+
+ getVirtualRegister();
+ LDefinition def1(LDefinition::GENERAL, LDefinition::FIXED);
+ def1.setOutput(output.high());
+ lir->setDef(1, def1);
+ lir->getDef(1)->setVirtualRegister(vreg + 1);
+#endif
+
+ lir->setMir(mir);
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+template <size_t Ops, size_t Temps>
+void LIRGeneratorShared::defineReuseInput(
+ LInstructionHelper<1, Ops, Temps>* lir, MDefinition* mir,
+ uint32_t operand) {
+ // Note: Any other operand that is not the same as this operand should be
+ // marked as not being "atStart". The regalloc cannot handle those and can
+ // overwrite the inputs!
+
+ // The input should be used at the start of the instruction, to avoid moves.
+ MOZ_ASSERT(lir->getOperand(operand)->toUse()->usedAtStart());
+
+ LDefinition::Type type = LDefinition::TypeFrom(mir->type());
+
+ LDefinition def(type, LDefinition::MUST_REUSE_INPUT);
+ def.setReusedInput(operand);
+
+ define(lir, mir, def);
+}
+
+template <size_t Ops, size_t Temps>
+void LIRGeneratorShared::defineInt64ReuseInput(
+ LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ uint32_t operand) {
+ // Note: Any other operand that is not the same as this operand should be
+ // marked as not being "atStart". The regalloc cannot handle those and can
+ // overwrite the inputs!
+
+ // The input should be used at the start of the instruction, to avoid moves.
+ MOZ_ASSERT(lir->getOperand(operand)->toUse()->usedAtStart());
+#if JS_BITS_PER_WORD == 32
+ MOZ_ASSERT(lir->getOperand(operand + 1)->toUse()->usedAtStart());
+#endif
+ MOZ_ASSERT(!lir->isCall());
+
+ uint32_t vreg = getVirtualRegister();
+
+ LDefinition def1(LDefinition::GENERAL, LDefinition::MUST_REUSE_INPUT);
+ def1.setReusedInput(operand);
+ lir->setDef(0, def1);
+ lir->getDef(0)->setVirtualRegister(vreg);
+
+#if JS_BITS_PER_WORD == 32
+ getVirtualRegister();
+ LDefinition def2(LDefinition::GENERAL, LDefinition::MUST_REUSE_INPUT);
+ def2.setReusedInput(operand + 1);
+ lir->setDef(1, def2);
+ lir->getDef(1)->setVirtualRegister(vreg + 1);
+#endif
+
+ lir->setMir(mir);
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+template <size_t Ops, size_t Temps>
+void LIRGeneratorShared::defineBoxReuseInput(
+ LInstructionHelper<BOX_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ uint32_t operand) {
+ // The input should be used at the start of the instruction, to avoid moves.
+ MOZ_ASSERT(lir->getOperand(operand)->toUse()->usedAtStart());
+#ifdef JS_NUNBOX32
+ MOZ_ASSERT(lir->getOperand(operand + 1)->toUse()->usedAtStart());
+#endif
+ MOZ_ASSERT(!lir->isCall());
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ uint32_t vreg = getVirtualRegister();
+
+#ifdef JS_NUNBOX32
+ static_assert(VREG_TYPE_OFFSET == 0,
+ "Code below assumes VREG_TYPE_OFFSET == 0");
+ static_assert(VREG_DATA_OFFSET == 1,
+ "Code below assumes VREG_DATA_OFFSET == 1");
+
+ LDefinition def1(LDefinition::TYPE, LDefinition::MUST_REUSE_INPUT);
+ def1.setReusedInput(operand);
+ def1.setVirtualRegister(vreg);
+ lir->setDef(0, def1);
+
+ getVirtualRegister();
+ LDefinition def2(LDefinition::PAYLOAD, LDefinition::MUST_REUSE_INPUT);
+ def2.setReusedInput(operand + 1);
+ def2.setVirtualRegister(vreg + 1);
+ lir->setDef(1, def2);
+#else
+ LDefinition def(LDefinition::BOX, LDefinition::MUST_REUSE_INPUT);
+ def.setReusedInput(operand);
+ def.setVirtualRegister(vreg);
+ lir->setDef(0, def);
+#endif
+
+ lir->setMir(mir);
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+template <size_t Temps>
+void LIRGeneratorShared::defineBox(
+ details::LInstructionFixedDefsTempsHelper<BOX_PIECES, Temps>* lir,
+ MDefinition* mir, LDefinition::Policy policy) {
+ // Call instructions should use defineReturn.
+ MOZ_ASSERT(!lir->isCall());
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ uint32_t vreg = getVirtualRegister();
+
+#if defined(JS_NUNBOX32)
+ lir->setDef(0,
+ LDefinition(vreg + VREG_TYPE_OFFSET, LDefinition::TYPE, policy));
+ lir->setDef(
+ 1, LDefinition(vreg + VREG_DATA_OFFSET, LDefinition::PAYLOAD, policy));
+ getVirtualRegister();
+#elif defined(JS_PUNBOX64)
+ lir->setDef(0, LDefinition(vreg, LDefinition::BOX, policy));
+#endif
+ lir->setMir(mir);
+
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+template <size_t Ops, size_t Temps>
+void LIRGeneratorShared::defineInt64(
+ LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ LDefinition::Policy policy) {
+ // Call instructions should use defineReturn.
+ MOZ_ASSERT(!lir->isCall());
+ MOZ_ASSERT(mir->type() == MIRType::Int64);
+
+ uint32_t vreg = getVirtualRegister();
+
+#if JS_BITS_PER_WORD == 32
+ lir->setDef(0,
+ LDefinition(vreg + INT64LOW_INDEX, LDefinition::GENERAL, policy));
+ lir->setDef(
+ 1, LDefinition(vreg + INT64HIGH_INDEX, LDefinition::GENERAL, policy));
+ getVirtualRegister();
+#else
+ lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL, policy));
+#endif
+ lir->setMir(mir);
+
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+void LIRGeneratorShared::defineReturn(LInstruction* lir, MDefinition* mir) {
+ lir->setMir(mir);
+
+ MOZ_ASSERT(lir->isCall());
+
+ uint32_t vreg = getVirtualRegister();
+
+ switch (mir->type()) {
+ case MIRType::Value:
+#if defined(JS_NUNBOX32)
+ lir->setDef(TYPE_INDEX,
+ LDefinition(vreg + VREG_TYPE_OFFSET, LDefinition::TYPE,
+ LGeneralReg(JSReturnReg_Type)));
+ lir->setDef(PAYLOAD_INDEX,
+ LDefinition(vreg + VREG_DATA_OFFSET, LDefinition::PAYLOAD,
+ LGeneralReg(JSReturnReg_Data)));
+ getVirtualRegister();
+#elif defined(JS_PUNBOX64)
+ lir->setDef(
+ 0, LDefinition(vreg, LDefinition::BOX, LGeneralReg(JSReturnReg)));
+#endif
+ break;
+ case MIRType::Int64:
+#if defined(JS_NUNBOX32)
+ lir->setDef(INT64LOW_INDEX,
+ LDefinition(vreg + INT64LOW_INDEX, LDefinition::GENERAL,
+ LGeneralReg(ReturnReg64.low)));
+ lir->setDef(INT64HIGH_INDEX,
+ LDefinition(vreg + INT64HIGH_INDEX, LDefinition::GENERAL,
+ LGeneralReg(ReturnReg64.high)));
+ getVirtualRegister();
+#elif defined(JS_PUNBOX64)
+ lir->setDef(
+ 0, LDefinition(vreg, LDefinition::GENERAL, LGeneralReg(ReturnReg)));
+#endif
+ break;
+ case MIRType::Float32:
+ lir->setDef(0, LDefinition(vreg, LDefinition::FLOAT32,
+ LFloatReg(ReturnFloat32Reg)));
+ break;
+ case MIRType::Double:
+ lir->setDef(0, LDefinition(vreg, LDefinition::DOUBLE,
+ LFloatReg(ReturnDoubleReg)));
+ break;
+ case MIRType::Simd128:
+#ifdef ENABLE_WASM_SIMD
+ lir->setDef(0, LDefinition(vreg, LDefinition::SIMD128,
+ LFloatReg(ReturnSimd128Reg)));
+ break;
+#else
+ MOZ_CRASH("No SIMD support");
+#endif
+ default:
+ LDefinition::Type type = LDefinition::TypeFrom(mir->type());
+ switch (type) {
+ case LDefinition::GENERAL:
+ case LDefinition::INT32:
+ case LDefinition::OBJECT:
+ case LDefinition::SLOTS:
+ case LDefinition::STACKRESULTS:
+ lir->setDef(0, LDefinition(vreg, type, LGeneralReg(ReturnReg)));
+ break;
+ case LDefinition::DOUBLE:
+ case LDefinition::FLOAT32:
+ case LDefinition::SIMD128:
+ MOZ_CRASH("Float cases must have been handled earlier");
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+ break;
+ }
+
+ mir->setVirtualRegister(vreg);
+ add(lir);
+}
+
+// In LIR, we treat booleans and integers as the same low-level type (INTEGER).
+// When snapshotting, we recover the actual JS type from MIR. This function
+// checks that when making redefinitions, we don't accidentally coerce two
+// incompatible types.
+static inline bool IsCompatibleLIRCoercion(MIRType to, MIRType from) {
+ if (to == from) {
+ return true;
+ }
+ if ((to == MIRType::Int32 || to == MIRType::Boolean) &&
+ (from == MIRType::Int32 || from == MIRType::Boolean)) {
+ return true;
+ }
+ return false;
+}
+
+void LIRGeneratorShared::redefine(MDefinition* def, MDefinition* as) {
+ MOZ_ASSERT(IsCompatibleLIRCoercion(def->type(), as->type()));
+
+ // Try to emit MIR marked as emitted-at-uses at, well, uses. For
+ // snapshotting reasons we delay the MIRTypes match, or when we are
+ // coercing between bool and int32 constants.
+ if (as->isEmittedAtUses() &&
+ (def->type() == as->type() ||
+ (as->isConstant() &&
+ (def->type() == MIRType::Int32 || def->type() == MIRType::Boolean) &&
+ (as->type() == MIRType::Int32 || as->type() == MIRType::Boolean)))) {
+ MInstruction* replacement;
+ if (def->type() != as->type()) {
+ if (as->type() == MIRType::Int32) {
+ replacement =
+ MConstant::New(alloc(), BooleanValue(as->toConstant()->toInt32()));
+ } else {
+ replacement =
+ MConstant::New(alloc(), Int32Value(as->toConstant()->toBoolean()));
+ }
+ def->block()->insertBefore(def->toInstruction(), replacement);
+ emitAtUses(replacement->toInstruction());
+ } else {
+ replacement = as->toInstruction();
+ }
+ def->replaceAllUsesWith(replacement);
+ } else {
+ ensureDefined(as);
+ def->setVirtualRegister(as->virtualRegister());
+ }
+}
+
+void LIRGeneratorShared::ensureDefined(MDefinition* mir) {
+ if (mir->isEmittedAtUses()) {
+ visitEmittedAtUses(mir->toInstruction());
+ MOZ_ASSERT(mir->isLowered());
+ }
+}
+
+template <typename LClass, typename... Args>
+LClass* LIRGeneratorShared::allocateVariadic(uint32_t numOperands,
+ Args&&... args) {
+ size_t numBytes = sizeof(LClass) + numOperands * sizeof(LAllocation);
+ void* buf = alloc().allocate(numBytes);
+ if (!buf) {
+ return nullptr;
+ }
+
+ LClass* ins = static_cast<LClass*>(buf);
+ new (ins) LClass(numOperands, std::forward<Args>(args)...);
+
+ ins->initOperandsOffset(sizeof(LClass));
+
+ for (uint32_t i = 0; i < numOperands; i++) {
+ ins->setOperand(i, LAllocation());
+ }
+
+ return ins;
+}
+
+LUse LIRGeneratorShared::useRegister(MDefinition* mir) {
+ return use(mir, LUse(LUse::REGISTER));
+}
+
+LUse LIRGeneratorShared::useRegisterAtStart(MDefinition* mir) {
+ return use(mir, LUse(LUse::REGISTER, true));
+}
+
+LUse LIRGeneratorShared::use(MDefinition* mir) {
+ return use(mir, LUse(LUse::ANY));
+}
+
+LUse LIRGeneratorShared::useAtStart(MDefinition* mir) {
+ return use(mir, LUse(LUse::ANY, true));
+}
+
+LAllocation LIRGeneratorShared::useOrConstant(MDefinition* mir) {
+ if (mir->isConstant()) {
+ return LAllocation(mir->toConstant());
+ }
+ return use(mir);
+}
+
+LAllocation LIRGeneratorShared::useOrConstantAtStart(MDefinition* mir) {
+ if (mir->isConstant()) {
+ return LAllocation(mir->toConstant());
+ }
+ return useAtStart(mir);
+}
+
+LAllocation LIRGeneratorShared::useRegisterOrConstant(MDefinition* mir) {
+ if (mir->isConstant()) {
+ return LAllocation(mir->toConstant());
+ }
+ return useRegister(mir);
+}
+
+LAllocation LIRGeneratorShared::useRegisterOrConstantAtStart(MDefinition* mir) {
+ if (mir->isConstant()) {
+ return LAllocation(mir->toConstant());
+ }
+ return useRegisterAtStart(mir);
+}
+
+LAllocation LIRGeneratorShared::useRegisterOrZero(MDefinition* mir) {
+ if (mir->isConstant() && mir->toConstant()->isInt32(0)) {
+ return LAllocation();
+ }
+ return useRegister(mir);
+}
+
+LAllocation LIRGeneratorShared::useRegisterOrZeroAtStart(MDefinition* mir) {
+ if (mir->isConstant() && mir->toConstant()->isInt32(0)) {
+ return LAllocation();
+ }
+ return useRegisterAtStart(mir);
+}
+
+LAllocation LIRGeneratorShared::useRegisterOrNonDoubleConstant(
+ MDefinition* mir) {
+ if (mir->isConstant() && mir->type() != MIRType::Double &&
+ mir->type() != MIRType::Float32) {
+ return LAllocation(mir->toConstant());
+ }
+ return useRegister(mir);
+}
+
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
+LAllocation LIRGeneratorShared::useAnyOrConstant(MDefinition* mir) {
+ return useRegisterOrConstant(mir);
+}
+LAllocation LIRGeneratorShared::useStorable(MDefinition* mir) {
+ return useRegister(mir);
+}
+LAllocation LIRGeneratorShared::useStorableAtStart(MDefinition* mir) {
+ return useRegisterAtStart(mir);
+}
+
+LAllocation LIRGeneratorShared::useAny(MDefinition* mir) {
+ return useRegister(mir);
+}
+#else
+LAllocation LIRGeneratorShared::useAnyOrConstant(MDefinition* mir) {
+ return useOrConstant(mir);
+}
+
+LAllocation LIRGeneratorShared::useAny(MDefinition* mir) { return use(mir); }
+LAllocation LIRGeneratorShared::useStorable(MDefinition* mir) {
+ return useRegisterOrConstant(mir);
+}
+LAllocation LIRGeneratorShared::useStorableAtStart(MDefinition* mir) {
+ return useRegisterOrConstantAtStart(mir);
+}
+
+#endif
+
+LAllocation LIRGeneratorShared::useKeepalive(MDefinition* mir) {
+ return use(mir, LUse(LUse::KEEPALIVE));
+}
+
+LAllocation LIRGeneratorShared::useKeepaliveOrConstant(MDefinition* mir) {
+ if (mir->isConstant()) {
+ return LAllocation(mir->toConstant());
+ }
+ return useKeepalive(mir);
+}
+
+LUse LIRGeneratorShared::useFixed(MDefinition* mir, Register reg) {
+ return use(mir, LUse(reg));
+}
+
+LUse LIRGeneratorShared::useFixedAtStart(MDefinition* mir, Register reg) {
+ return use(mir, LUse(reg, true));
+}
+
+LUse LIRGeneratorShared::useFixed(MDefinition* mir, FloatRegister reg) {
+ return use(mir, LUse(reg));
+}
+
+LUse LIRGeneratorShared::useFixed(MDefinition* mir, AnyRegister reg) {
+ return reg.isFloat() ? use(mir, LUse(reg.fpu())) : use(mir, LUse(reg.gpr()));
+}
+
+LUse LIRGeneratorShared::useFixedAtStart(MDefinition* mir, AnyRegister reg) {
+ return reg.isFloat() ? use(mir, LUse(reg.fpu(), true))
+ : use(mir, LUse(reg.gpr(), true));
+}
+
+LDefinition LIRGeneratorShared::temp(LDefinition::Type type,
+ LDefinition::Policy policy) {
+ return LDefinition(getVirtualRegister(), type, policy);
+}
+
+LInt64Definition LIRGeneratorShared::tempInt64(LDefinition::Policy policy) {
+#if JS_BITS_PER_WORD == 32
+ LDefinition high = temp(LDefinition::GENERAL, policy);
+ LDefinition low = temp(LDefinition::GENERAL, policy);
+ return LInt64Definition(high, low);
+#else
+ return LInt64Definition(temp(LDefinition::GENERAL, policy));
+#endif
+}
+
+LDefinition LIRGeneratorShared::tempFixed(Register reg) {
+ LDefinition t = temp(LDefinition::GENERAL);
+ t.setOutput(LGeneralReg(reg));
+ return t;
+}
+
+LInt64Definition LIRGeneratorShared::tempInt64Fixed(Register64 reg) {
+#if JS_BITS_PER_WORD == 32
+ LDefinition high = temp(LDefinition::GENERAL);
+ LDefinition low = temp(LDefinition::GENERAL);
+ high.setOutput(LGeneralReg(reg.high));
+ low.setOutput(LGeneralReg(reg.low));
+ return LInt64Definition(high, low);
+#else
+ LDefinition t = temp(LDefinition::GENERAL);
+ t.setOutput(LGeneralReg(reg.reg));
+ return LInt64Definition(t);
+#endif
+}
+
+LDefinition LIRGeneratorShared::tempFixed(FloatRegister reg) {
+ LDefinition t = temp(LDefinition::DOUBLE);
+ t.setOutput(LFloatReg(reg));
+ return t;
+}
+
+LDefinition LIRGeneratorShared::tempFloat32() {
+ return temp(LDefinition::FLOAT32);
+}
+
+LDefinition LIRGeneratorShared::tempDouble() {
+ return temp(LDefinition::DOUBLE);
+}
+
+#ifdef ENABLE_WASM_SIMD
+LDefinition LIRGeneratorShared::tempSimd128() {
+ return temp(LDefinition::SIMD128);
+}
+#endif
+
+LDefinition LIRGeneratorShared::tempCopy(MDefinition* input,
+ uint32_t reusedInput) {
+ MOZ_ASSERT(input->virtualRegister());
+ LDefinition t =
+ temp(LDefinition::TypeFrom(input->type()), LDefinition::MUST_REUSE_INPUT);
+ t.setReusedInput(reusedInput);
+ return t;
+}
+
+template <typename T>
+void LIRGeneratorShared::annotate(T* ins) {
+ ins->setId(lirGraph_.getInstructionId());
+}
+
+template <typename T>
+void LIRGeneratorShared::add(T* ins, MInstruction* mir) {
+ MOZ_ASSERT(!ins->isPhi());
+ current->add(ins);
+ if (mir) {
+ MOZ_ASSERT(current == mir->block()->lir());
+ ins->setMir(mir);
+ }
+ annotate(ins);
+ if (ins->isCall()) {
+ gen->setNeedsOverrecursedCheck();
+ gen->setNeedsStaticStackAlignment();
+ }
+}
+
+#ifdef JS_NUNBOX32
+// Returns the virtual register of a js::Value-defining instruction. This is
+// abstracted because MBox is a special value-returning instruction that
+// redefines its input payload if its input is not constant. Therefore, it is
+// illegal to request a box's payload by adding VREG_DATA_OFFSET to its raw id.
+static inline uint32_t VirtualRegisterOfPayload(MDefinition* mir) {
+ if (mir->isBox()) {
+ MDefinition* inner = mir->toBox()->getOperand(0);
+ if (!inner->isConstant() && inner->type() != MIRType::Double &&
+ inner->type() != MIRType::Float32) {
+ return inner->virtualRegister();
+ }
+ }
+ return mir->virtualRegister() + VREG_DATA_OFFSET;
+}
+
+// Note: always call ensureDefined before calling useType/usePayload,
+// so that emitted-at-use operands are handled correctly.
+LUse LIRGeneratorShared::useType(MDefinition* mir, LUse::Policy policy) {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ return LUse(mir->virtualRegister() + VREG_TYPE_OFFSET, policy);
+}
+
+LUse LIRGeneratorShared::usePayload(MDefinition* mir, LUse::Policy policy) {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ return LUse(VirtualRegisterOfPayload(mir), policy);
+}
+
+LUse LIRGeneratorShared::usePayloadAtStart(MDefinition* mir,
+ LUse::Policy policy) {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ return LUse(VirtualRegisterOfPayload(mir), policy, true);
+}
+
+LUse LIRGeneratorShared::usePayloadInRegisterAtStart(MDefinition* mir) {
+ return usePayloadAtStart(mir, LUse::REGISTER);
+}
+
+void LIRGeneratorShared::fillBoxUses(LInstruction* lir, size_t n,
+ MDefinition* mir) {
+ ensureDefined(mir);
+ lir->getOperand(n)->toUse()->setVirtualRegister(mir->virtualRegister() +
+ VREG_TYPE_OFFSET);
+ lir->getOperand(n + 1)->toUse()->setVirtualRegister(
+ VirtualRegisterOfPayload(mir));
+}
+#endif
+
+LUse LIRGeneratorShared::useRegisterForTypedLoad(MDefinition* mir,
+ MIRType type) {
+ MOZ_ASSERT(type != MIRType::Value && type != MIRType::None);
+ MOZ_ASSERT(mir->type() == MIRType::Object || mir->type() == MIRType::Slots);
+
+#ifdef JS_PUNBOX64
+ // On x64, masm.loadUnboxedValue emits slightly less efficient code when
+ // the input and output use the same register and we're not loading an
+ // int32/bool/double, so we just call useRegister in this case.
+ if (type != MIRType::Int32 && type != MIRType::Boolean &&
+ type != MIRType::Double) {
+ return useRegister(mir);
+ }
+#endif
+
+ return useRegisterAtStart(mir);
+}
+
+LBoxAllocation LIRGeneratorShared::useBox(MDefinition* mir, LUse::Policy policy,
+ bool useAtStart) {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ ensureDefined(mir);
+
+#if defined(JS_NUNBOX32)
+ return LBoxAllocation(
+ LUse(mir->virtualRegister(), policy, useAtStart),
+ LUse(VirtualRegisterOfPayload(mir), policy, useAtStart));
+#else
+ return LBoxAllocation(LUse(mir->virtualRegister(), policy, useAtStart));
+#endif
+}
+
+LBoxAllocation LIRGeneratorShared::useBoxOrTyped(MDefinition* mir) {
+ if (mir->type() == MIRType::Value) {
+ return useBox(mir);
+ }
+
+#if defined(JS_NUNBOX32)
+ return LBoxAllocation(useRegister(mir), LAllocation());
+#else
+ return LBoxAllocation(useRegister(mir));
+#endif
+}
+
+LBoxAllocation LIRGeneratorShared::useBoxOrTypedOrConstant(MDefinition* mir,
+ bool useConstant) {
+ if (useConstant && mir->isConstant()) {
+#if defined(JS_NUNBOX32)
+ return LBoxAllocation(LAllocation(mir->toConstant()), LAllocation());
+#else
+ return LBoxAllocation(LAllocation(mir->toConstant()));
+#endif
+ }
+
+ return useBoxOrTyped(mir);
+}
+
+LInt64Allocation LIRGeneratorShared::useInt64(MDefinition* mir,
+ LUse::Policy policy,
+ bool useAtStart) {
+ MOZ_ASSERT(mir->type() == MIRType::Int64);
+
+ ensureDefined(mir);
+
+ uint32_t vreg = mir->virtualRegister();
+#if JS_BITS_PER_WORD == 32
+ return LInt64Allocation(LUse(vreg + INT64HIGH_INDEX, policy, useAtStart),
+ LUse(vreg + INT64LOW_INDEX, policy, useAtStart));
+#else
+ return LInt64Allocation(LUse(vreg, policy, useAtStart));
+#endif
+}
+
+LInt64Allocation LIRGeneratorShared::useInt64Fixed(MDefinition* mir,
+ Register64 regs,
+ bool useAtStart) {
+ MOZ_ASSERT(mir->type() == MIRType::Int64);
+
+ ensureDefined(mir);
+
+ uint32_t vreg = mir->virtualRegister();
+#if JS_BITS_PER_WORD == 32
+ return LInt64Allocation(LUse(regs.high, vreg + INT64HIGH_INDEX, useAtStart),
+ LUse(regs.low, vreg + INT64LOW_INDEX, useAtStart));
+#else
+ return LInt64Allocation(LUse(regs.reg, vreg, useAtStart));
+#endif
+}
+
+LInt64Allocation LIRGeneratorShared::useInt64FixedAtStart(MDefinition* mir,
+ Register64 regs) {
+ return useInt64Fixed(mir, regs, true);
+}
+
+LInt64Allocation LIRGeneratorShared::useInt64(MDefinition* mir,
+ bool useAtStart) {
+ // On 32-bit platforms, always load the value in registers.
+#if JS_BITS_PER_WORD == 32
+ return useInt64(mir, LUse::REGISTER, useAtStart);
+#else
+ return useInt64(mir, LUse::ANY, useAtStart);
+#endif
+}
+
+LInt64Allocation LIRGeneratorShared::useInt64AtStart(MDefinition* mir) {
+ return useInt64(mir, /* useAtStart = */ true);
+}
+
+LInt64Allocation LIRGeneratorShared::useInt64Register(MDefinition* mir,
+ bool useAtStart) {
+ return useInt64(mir, LUse::REGISTER, useAtStart);
+}
+
+LInt64Allocation LIRGeneratorShared::useInt64OrConstant(MDefinition* mir,
+ bool useAtStart) {
+ if (mir->isConstant()) {
+#if defined(JS_NUNBOX32)
+ return LInt64Allocation(LAllocation(mir->toConstant()), LAllocation());
+#else
+ return LInt64Allocation(LAllocation(mir->toConstant()));
+#endif
+ }
+ return useInt64(mir, useAtStart);
+}
+
+LInt64Allocation LIRGeneratorShared::useInt64RegisterOrConstant(
+ MDefinition* mir, bool useAtStart) {
+ if (mir->isConstant()) {
+#if defined(JS_NUNBOX32)
+ return LInt64Allocation(LAllocation(mir->toConstant()), LAllocation());
+#else
+ return LInt64Allocation(LAllocation(mir->toConstant()));
+#endif
+ }
+ return useInt64Register(mir, useAtStart);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_Lowering_shared_inl_h */
diff --git a/js/src/jit/shared/Lowering-shared.cpp b/js/src/jit/shared/Lowering-shared.cpp
new file mode 100644
index 0000000000..9d6faf4a8a
--- /dev/null
+++ b/js/src/jit/shared/Lowering-shared.cpp
@@ -0,0 +1,1034 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+#include "jit/LIR.h"
+#include "jit/Lowering.h"
+#include "jit/MIR.h"
+
+#include "vm/SymbolType.h"
+
+using namespace js;
+using namespace jit;
+
+using mozilla::Maybe;
+using mozilla::Nothing;
+using mozilla::Some;
+
+bool LIRGeneratorShared::ShouldReorderCommutative(MDefinition* lhs,
+ MDefinition* rhs,
+ MInstruction* ins) {
+ // lhs and rhs are used by the commutative operator.
+ MOZ_ASSERT(lhs->hasDefUses());
+ MOZ_ASSERT(rhs->hasDefUses());
+
+ // Ensure that if there is a constant, then it is in rhs.
+ if (rhs->isConstant()) {
+ return false;
+ }
+ if (lhs->isConstant()) {
+ return true;
+ }
+
+ // Since clobbering binary operations clobber the left operand, prefer a
+ // non-constant lhs operand with no further uses. To be fully precise, we
+ // should check whether this is the *last* use, but checking hasOneDefUse()
+ // is a decent approximation which doesn't require any extra analysis.
+ bool rhsSingleUse = rhs->hasOneDefUse();
+ bool lhsSingleUse = lhs->hasOneDefUse();
+ if (rhsSingleUse) {
+ if (!lhsSingleUse) {
+ return true;
+ }
+ } else {
+ if (lhsSingleUse) {
+ return false;
+ }
+ }
+
+ // If this is a reduction-style computation, such as
+ //
+ // sum = 0;
+ // for (...)
+ // sum += ...;
+ //
+ // put the phi on the left to promote coalescing. This is fairly specific.
+ if (rhsSingleUse && rhs->isPhi() && rhs->block()->isLoopHeader() &&
+ ins == rhs->toPhi()->getLoopBackedgeOperand()) {
+ return true;
+ }
+
+ return false;
+}
+
+void LIRGeneratorShared::ReorderCommutative(MDefinition** lhsp,
+ MDefinition** rhsp,
+ MInstruction* ins) {
+ MDefinition* lhs = *lhsp;
+ MDefinition* rhs = *rhsp;
+
+ if (ShouldReorderCommutative(lhs, rhs, ins)) {
+ *rhsp = lhs;
+ *lhsp = rhs;
+ }
+}
+
+void LIRGeneratorShared::definePhiOneRegister(MPhi* phi, size_t lirIndex) {
+ LPhi* lir = current->getPhi(lirIndex);
+
+ uint32_t vreg = getVirtualRegister();
+
+ phi->setVirtualRegister(vreg);
+ lir->setDef(0, LDefinition(vreg, LDefinition::TypeFrom(phi->type())));
+ annotate(lir);
+}
+
+#ifdef JS_NUNBOX32
+void LIRGeneratorShared::definePhiTwoRegisters(MPhi* phi, size_t lirIndex) {
+ LPhi* type = current->getPhi(lirIndex + VREG_TYPE_OFFSET);
+ LPhi* payload = current->getPhi(lirIndex + VREG_DATA_OFFSET);
+
+ uint32_t typeVreg = getVirtualRegister();
+ phi->setVirtualRegister(typeVreg);
+
+ uint32_t payloadVreg = getVirtualRegister();
+ MOZ_ASSERT(typeVreg + 1 == payloadVreg);
+
+ type->setDef(0, LDefinition(typeVreg, LDefinition::TYPE));
+ payload->setDef(0, LDefinition(payloadVreg, LDefinition::PAYLOAD));
+ annotate(type);
+ annotate(payload);
+}
+#endif
+
+void LIRGeneratorShared::lowerTypedPhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* lir = block->getPhi(lirIndex);
+ lir->setOperand(inputPosition, LUse(operand->virtualRegister(), LUse::ANY));
+}
+
+LRecoverInfo* LIRGeneratorShared::getRecoverInfo(MResumePoint* rp) {
+ if (cachedRecoverInfo_ && cachedRecoverInfo_->mir() == rp) {
+ return cachedRecoverInfo_;
+ }
+
+ LRecoverInfo* recoverInfo = LRecoverInfo::New(gen, rp);
+ if (!recoverInfo) {
+ return nullptr;
+ }
+
+ cachedRecoverInfo_ = recoverInfo;
+ return recoverInfo;
+}
+
+#ifdef DEBUG
+bool LRecoverInfo::OperandIter::canOptimizeOutIfUnused() {
+ MDefinition* ins = **this;
+
+ // We check ins->type() in addition to ins->isUnused() because
+ // EliminateDeadResumePointOperands may replace nodes with the constant
+ // MagicValue(JS_OPTIMIZED_OUT).
+ if ((ins->isUnused() || ins->type() == MIRType::MagicOptimizedOut) &&
+ (*it_)->isResumePoint()) {
+ return !(*it_)->toResumePoint()->isObservableOperand(op_);
+ }
+
+ return true;
+}
+#endif
+
+#ifdef JS_NUNBOX32
+LSnapshot* LIRGeneratorShared::buildSnapshot(MResumePoint* rp,
+ BailoutKind kind) {
+ LRecoverInfo* recoverInfo = getRecoverInfo(rp);
+ if (!recoverInfo) {
+ return nullptr;
+ }
+
+ LSnapshot* snapshot = LSnapshot::New(gen, recoverInfo, kind);
+ if (!snapshot) {
+ return nullptr;
+ }
+
+ size_t index = 0;
+ for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
+ // Check that optimized out operands are in eliminable slots.
+ MOZ_ASSERT(it.canOptimizeOutIfUnused());
+
+ MDefinition* ins = *it;
+
+ if (ins->isRecoveredOnBailout()) {
+ continue;
+ }
+
+ LAllocation* type = snapshot->typeOfSlot(index);
+ LAllocation* payload = snapshot->payloadOfSlot(index);
+ ++index;
+
+ if (ins->isBox()) {
+ ins = ins->toBox()->getOperand(0);
+ }
+
+ // Guards should never be eliminated.
+ MOZ_ASSERT_IF(ins->isUnused(), !ins->isGuard());
+
+ // Snapshot operands other than constants should never be
+ // emitted-at-uses. Try-catch support depends on there being no
+ // code between an instruction and the LOsiPoint that follows it.
+ MOZ_ASSERT_IF(!ins->isConstant(), !ins->isEmittedAtUses());
+
+ // The register allocation will fill these fields in with actual
+ // register/stack assignments. During code generation, we can restore
+ // interpreter state with the given information. Note that for
+ // constants, including known types, we record a dummy placeholder,
+ // since we can recover the same information, much cleaner, from MIR.
+ if (ins->isConstant() || ins->isUnused()) {
+ *type = LAllocation();
+ *payload = LAllocation();
+ } else if (ins->type() != MIRType::Value) {
+ *type = LAllocation();
+ *payload = use(ins, LUse(LUse::KEEPALIVE));
+ } else {
+ *type = useType(ins, LUse::KEEPALIVE);
+ *payload = usePayload(ins, LUse::KEEPALIVE);
+ }
+ }
+
+ return snapshot;
+}
+
+#elif JS_PUNBOX64
+
+LSnapshot* LIRGeneratorShared::buildSnapshot(MResumePoint* rp,
+ BailoutKind kind) {
+ LRecoverInfo* recoverInfo = getRecoverInfo(rp);
+ if (!recoverInfo) {
+ return nullptr;
+ }
+
+ LSnapshot* snapshot = LSnapshot::New(gen, recoverInfo, kind);
+ if (!snapshot) {
+ return nullptr;
+ }
+
+ size_t index = 0;
+ for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
+ // Check that optimized out operands are in eliminable slots.
+ MOZ_ASSERT(it.canOptimizeOutIfUnused());
+
+ MDefinition* def = *it;
+
+ if (def->isRecoveredOnBailout()) {
+ continue;
+ }
+
+ if (def->isBox()) {
+ def = def->toBox()->getOperand(0);
+ }
+
+ // Guards should never be eliminated.
+ MOZ_ASSERT_IF(def->isUnused(), !def->isGuard());
+
+ // Snapshot operands other than constants should never be
+ // emitted-at-uses. Try-catch support depends on there being no
+ // code between an instruction and the LOsiPoint that follows it.
+ MOZ_ASSERT_IF(!def->isConstant(), !def->isEmittedAtUses());
+
+ LAllocation* a = snapshot->getEntry(index++);
+
+ if (def->isUnused()) {
+ *a = LAllocation();
+ continue;
+ }
+
+ *a = useKeepaliveOrConstant(def);
+ }
+
+ return snapshot;
+}
+#endif
+
+void LIRGeneratorShared::assignSnapshot(LInstruction* ins, BailoutKind kind) {
+ // assignSnapshot must be called before define/add, since
+ // it may add new instructions for emitted-at-use operands.
+ MOZ_ASSERT(ins->id() == 0);
+ MOZ_ASSERT(kind != BailoutKind::Unknown);
+
+ LSnapshot* snapshot = buildSnapshot(lastResumePoint_, kind);
+ if (!snapshot) {
+ abort(AbortReason::Alloc, "buildSnapshot failed");
+ return;
+ }
+
+ ins->assignSnapshot(snapshot);
+}
+
+void LIRGeneratorShared::assignSafepoint(LInstruction* ins, MInstruction* mir,
+ BailoutKind kind) {
+ MOZ_ASSERT(!osiPoint_);
+ MOZ_ASSERT(!ins->safepoint());
+
+ ins->initSafepoint(alloc());
+
+ MResumePoint* mrp =
+ mir->resumePoint() ? mir->resumePoint() : lastResumePoint_;
+ LSnapshot* postSnapshot = buildSnapshot(mrp, kind);
+ if (!postSnapshot) {
+ abort(AbortReason::Alloc, "buildSnapshot failed");
+ return;
+ }
+
+ osiPoint_ = new (alloc()) LOsiPoint(ins->safepoint(), postSnapshot);
+
+ if (!lirGraph_.noteNeedsSafepoint(ins)) {
+ abort(AbortReason::Alloc, "noteNeedsSafepoint failed");
+ return;
+ }
+}
+
+void LIRGeneratorShared::assignWasmSafepoint(LInstruction* ins,
+ MInstruction* mir) {
+ MOZ_ASSERT(!osiPoint_);
+ MOZ_ASSERT(!ins->safepoint());
+
+ ins->initSafepoint(alloc());
+
+ if (!lirGraph_.noteNeedsSafepoint(ins)) {
+ abort(AbortReason::Alloc, "noteNeedsSafepoint failed");
+ return;
+ }
+}
+
+#ifdef ENABLE_WASM_SIMD
+
+// Specialization analysis for SIMD operations. This is still x86-centric but
+// generalizes fairly easily to other architectures.
+
+// Optimization of v8x16.shuffle. The general byte shuffle+blend is very
+// expensive (equivalent to at least a dozen instructions), and we want to avoid
+// that if we can. So look for special cases - there are many.
+//
+// The strategy is to sort the operation into one of three buckets depending
+// on the shuffle pattern and inputs:
+//
+// - single operand; shuffles on these values are rotations, reversals,
+// transpositions, and general permutations
+// - single-operand-with-interesting-constant (especially zero); shuffles on
+// these values are often byte shift or scatter operations
+// - dual operand; shuffles on these operations are blends, catenated
+// shifts, and (in the worst case) general shuffle+blends
+//
+// We're not trying to solve the general problem, only to lower reasonably
+// expressed patterns that express common operations. Producers that produce
+// dense and convoluted patterns will end up with the general byte shuffle.
+// Producers that produce simpler patterns that easily map to hardware will
+// get faster code.
+//
+// In particular, these matchers do not try to combine transformations, so a
+// shuffle that optimally is lowered to rotate + permute32x4 + rotate, say, is
+// usually going to end up as a general byte shuffle.
+
+// Reduce a 0..31 byte mask to a 0..15 word mask if possible and if so return
+// true, updating *control.
+static bool ByteMaskToWordMask(SimdConstant* control) {
+ const SimdConstant::I8x16& lanes = control->asInt8x16();
+ int16_t controlWords[8];
+ for (int i = 0; i < 16; i += 2) {
+ if (!((lanes[i] & 1) == 0 && lanes[i + 1] == lanes[i] + 1)) {
+ return false;
+ }
+ controlWords[i / 2] = lanes[i] / 2;
+ }
+ *control = SimdConstant::CreateX8(controlWords);
+ return true;
+}
+
+// Reduce a 0..31 byte mask to a 0..7 dword mask if possible and if so return
+// true, updating *control.
+static bool ByteMaskToDWordMask(SimdConstant* control) {
+ const SimdConstant::I8x16& lanes = control->asInt8x16();
+ int32_t controlDWords[4];
+ for (int i = 0; i < 16; i += 4) {
+ if (!((lanes[i] & 3) == 0 && lanes[i + 1] == lanes[i] + 1 &&
+ lanes[i + 2] == lanes[i] + 2 && lanes[i + 3] == lanes[i] + 3)) {
+ return false;
+ }
+ controlDWords[i / 4] = lanes[i] / 4;
+ }
+ *control = SimdConstant::CreateX4(controlDWords);
+ return true;
+}
+
+// Reduce a 0..31 byte mask to a 0..3 qword mask if possible and if so return
+// true, updating *control.
+static bool ByteMaskToQWordMask(SimdConstant* control) {
+ const SimdConstant::I8x16& lanes = control->asInt8x16();
+ int64_t controlQWords[2];
+ for (int i = 0; i < 16; i += 8) {
+ if (!((lanes[i] & 7) == 0 && lanes[i + 1] == lanes[i] + 1 &&
+ lanes[i + 2] == lanes[i] + 2 && lanes[i + 3] == lanes[i] + 3 &&
+ lanes[i + 4] == lanes[i] + 4 && lanes[i + 5] == lanes[i] + 5 &&
+ lanes[i + 6] == lanes[i] + 6 && lanes[i + 7] == lanes[i] + 7)) {
+ return false;
+ }
+ controlQWords[i / 8] = lanes[i] / 8;
+ }
+ *control = SimdConstant::CreateX2(controlQWords);
+ return true;
+}
+
+// Skip across consecutive values in lanes starting at i, returning the index
+// after the last element. Lane values must be <= len-1 ("masked").
+//
+// Since every element is a 1-element run, the return value is never the same as
+// the starting i.
+template <typename T>
+static int ScanIncreasingMasked(const T* lanes, int i) {
+ int len = int(16 / sizeof(T));
+ MOZ_ASSERT(i < len);
+ MOZ_ASSERT(lanes[i] <= len - 1);
+ i++;
+ while (i < len && lanes[i] == lanes[i - 1] + 1) {
+ MOZ_ASSERT(lanes[i] <= len - 1);
+ i++;
+ }
+ return i;
+}
+
+// Skip across consecutive values in lanes starting at i, returning the index
+// after the last element. Lane values must be <= len*2-1 ("unmasked"); the
+// values len-1 and len are not considered consecutive.
+//
+// Since every element is a 1-element run, the return value is never the same as
+// the starting i.
+template <typename T>
+static int ScanIncreasingUnmasked(const T* lanes, int i) {
+ int len = int(16 / sizeof(T));
+ MOZ_ASSERT(i < len);
+ if (lanes[i] < len) {
+ i++;
+ while (i < len && lanes[i] < len && lanes[i - 1] == lanes[i] - 1) {
+ i++;
+ }
+ } else {
+ i++;
+ while (i < len && lanes[i] >= len && lanes[i - 1] == lanes[i] - 1) {
+ i++;
+ }
+ }
+ return i;
+}
+
+// Skip lanes that equal v starting at i, returning the index just beyond the
+// last of those. There is no requirement that the initial lanes[i] == v.
+template <typename T>
+static int ScanConstant(const T* lanes, int v, int i) {
+ int len = int(16 / sizeof(T));
+ MOZ_ASSERT(i <= len);
+ while (i < len && lanes[i] == v) {
+ i++;
+ }
+ return i;
+}
+
+// Mask lane values denoting rhs elements into lhs elements.
+template <typename T>
+static void MaskLanes(T* result, const T* input) {
+ int len = int(16 / sizeof(T));
+ for (int i = 0; i < len; i++) {
+ result[i] = input[i] & (len - 1);
+ }
+}
+
+// Apply a transformation to each lane value.
+template <typename T>
+static void MapLanes(T* result, const T* input, int (*f)(int)) {
+ int len = int(16 / sizeof(T));
+ for (int i = 0; i < len; i++) {
+ result[i] = f(input[i]);
+ }
+}
+
+// Recognize an identity permutation, assuming lanes is masked.
+template <typename T>
+static bool IsIdentity(const T* lanes) {
+ return ScanIncreasingMasked(lanes, 0) == int(16 / sizeof(T));
+}
+
+// Recognize part of an identity permutation starting at start, with
+// the first value of the permutation expected to be bias.
+template <typename T>
+static bool IsIdentity(const T* lanes, int start, int len, int bias) {
+ if (lanes[start] != bias) {
+ return false;
+ }
+ for (int i = start + 1; i < start + len; i++) {
+ if (lanes[i] != lanes[i - 1] + 1) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// We can permute by dwords if the mask is reducible to a dword mask, and in
+// this case a single PSHUFD is enough.
+static bool TryPermute32x4(SimdConstant* control) {
+ SimdConstant tmp = *control;
+ if (!ByteMaskToDWordMask(&tmp)) {
+ return false;
+ }
+ *control = tmp;
+ return true;
+}
+
+// Can we perform a byte rotate right? We can use PALIGNR. The shift count is
+// just lanes[0], and *control is unchanged.
+static bool TryRotateRight8x16(SimdConstant* control) {
+ const SimdConstant::I8x16& lanes = control->asInt8x16();
+ // Look for the first run of consecutive bytes.
+ int i = ScanIncreasingMasked(lanes, 0);
+
+ // If we reach the end of the vector, the vector must start at 0.
+ if (i == 16) {
+ return lanes[0] == 0;
+ }
+
+ // Second run must start at source lane zero
+ if (lanes[i] != 0) {
+ return false;
+ }
+
+ // Second run must end at the end of the lane vector.
+ return ScanIncreasingMasked(lanes, i) == 16;
+}
+
+// We can permute by words if the mask is reducible to a word mask, but the x64
+// lowering is only efficient if we can permute the high and low quadwords
+// separately, possibly after swapping quadwords.
+static bool TryPermute16x8(SimdConstant* control) {
+ SimdConstant tmp = *control;
+ if (!ByteMaskToWordMask(&tmp)) {
+ return false;
+ }
+ const SimdConstant::I16x8& lanes = tmp.asInt16x8();
+ SimdConstant::I16x8 mapped;
+ MapLanes(mapped, lanes, [](int x) -> int { return x < 4 ? 0 : 1; });
+ int i = ScanConstant(mapped, mapped[0], 0);
+ if (i != 4) {
+ return false;
+ }
+ i = ScanConstant(mapped, mapped[4], 4);
+ if (i != 8) {
+ return false;
+ }
+ // Now compute the operation bits. `mapped` holds the adjusted lane mask.
+ memcpy(mapped, lanes, sizeof(mapped));
+ int16_t op = 0;
+ if (mapped[0] > mapped[4]) {
+ op |= LWasmPermuteSimd128::SWAP_QWORDS;
+ }
+ for (int i = 0; i < 8; i++) {
+ mapped[i] &= 3;
+ }
+ if (!IsIdentity(mapped, 0, 4, 0)) {
+ op |= LWasmPermuteSimd128::PERM_LOW;
+ }
+ if (!IsIdentity(mapped, 4, 4, 0)) {
+ op |= LWasmPermuteSimd128::PERM_HIGH;
+ }
+ MOZ_ASSERT(op != 0);
+ mapped[0] |= op << 8;
+ *control = SimdConstant::CreateX8(mapped);
+ return true;
+}
+
+// A single word lane is copied into all the other lanes: PSHUF*W + PSHUFD.
+static bool TryBroadcast16x8(SimdConstant* control) {
+ SimdConstant tmp = *control;
+ if (!ByteMaskToWordMask(&tmp)) {
+ return false;
+ }
+ const SimdConstant::I16x8& lanes = tmp.asInt16x8();
+ if (ScanConstant(lanes, lanes[0], 0) < 8) {
+ return false;
+ }
+ *control = tmp;
+ return true;
+}
+
+// A single byte lane is copied int all the other lanes: PUNPCK*BW + PSHUF*W +
+// PSHUFD.
+static bool TryBroadcast8x16(SimdConstant* control) {
+ const SimdConstant::I8x16& lanes = control->asInt8x16();
+ if (ScanConstant(lanes, lanes[0], 0) < 16) {
+ return false;
+ }
+ return true;
+}
+
+// Look for permutations of a single operand.
+static LWasmPermuteSimd128::Op AnalyzePermute(SimdConstant* control) {
+ // Lane indices are input-agnostic for single-operand permutations.
+ SimdConstant::I8x16 controlBytes;
+ MaskLanes(controlBytes, control->asInt8x16());
+
+ // Get rid of no-ops immediately, so nobody else needs to check.
+ if (IsIdentity(controlBytes)) {
+ return LWasmPermuteSimd128::MOVE;
+ }
+
+ // Default control is the masked bytes.
+ *control = SimdConstant::CreateX16(controlBytes);
+
+ // Analysis order matters here and is architecture-dependent or even
+ // microarchitecture-dependent: ideally the cheapest implementation first.
+ // The Intel manual says that the cost of a PSHUFB is about five other
+ // operations, so make that our cutoff.
+ //
+ // Word, dword, and qword reversals are handled optimally by general permutes.
+ //
+ // Byte reversals are probably best left to PSHUFB, no alternative rendition
+ // seems to reliably go below five instructions. (Discuss.)
+ //
+ // Word swaps within doublewords and dword swaps within quadwords are handled
+ // optimally by general permutes.
+ //
+ // Dword and qword broadcasts are handled by dword permute.
+
+ if (TryPermute32x4(control)) {
+ return LWasmPermuteSimd128::PERMUTE_32x4;
+ }
+ if (TryRotateRight8x16(control)) {
+ return LWasmPermuteSimd128::ROTATE_RIGHT_8x16;
+ }
+ if (TryPermute16x8(control)) {
+ return LWasmPermuteSimd128::PERMUTE_16x8;
+ }
+ if (TryBroadcast16x8(control)) {
+ return LWasmPermuteSimd128::BROADCAST_16x8;
+ }
+ if (TryBroadcast8x16(control)) {
+ return LWasmPermuteSimd128::BROADCAST_8x16;
+ }
+
+ // TODO: (From v8) Unzip and transpose generally have renditions that slightly
+ // beat a general permute (three or four instructions)
+ //
+ // TODO: (From MacroAssemblerX86Shared::ShuffleX4): MOVLHPS and MOVHLPS can be
+ // used when merging two values.
+ //
+ // TODO: Byteswap is MOV + PSLLW + PSRLW + POR, a small win over PSHUFB.
+
+ // The default operation is to permute bytes with the default control.
+ return LWasmPermuteSimd128::PERMUTE_8x16;
+}
+
+// Can we shift the bytes left or right by a constant? A shift is a run of
+// lanes from the rhs (which is zero) on one end and a run of values from the
+// lhs on the other end.
+static Maybe<LWasmPermuteSimd128::Op> TryShift8x16(SimdConstant* control) {
+ const SimdConstant::I8x16& lanes = control->asInt8x16();
+
+ // Represent all zero lanes by 16
+ SimdConstant::I8x16 zeroesMasked;
+ MapLanes(zeroesMasked, lanes, [](int x) -> int { return x >= 16 ? 16 : x; });
+
+ int i = ScanConstant(zeroesMasked, 16, 0);
+ int shiftLeft = i;
+ if (shiftLeft > 0 && lanes[shiftLeft] != 0) {
+ return Nothing();
+ }
+
+ i = ScanIncreasingUnmasked(zeroesMasked, i);
+ int shiftRight = 16 - i;
+ if (shiftRight > 0 && lanes[i - 1] != 15) {
+ return Nothing();
+ }
+
+ i = ScanConstant(zeroesMasked, 16, i);
+ if (i < 16 || (shiftRight > 0 && shiftLeft > 0) ||
+ (shiftRight == 0 && shiftLeft == 0)) {
+ return Nothing();
+ }
+
+ if (shiftRight) {
+ *control = SimdConstant::SplatX16(shiftRight);
+ return Some(LWasmPermuteSimd128::SHIFT_RIGHT_8x16);
+ }
+ *control = SimdConstant::SplatX16(shiftLeft);
+ return Some(LWasmPermuteSimd128::SHIFT_LEFT_8x16);
+}
+
+static Maybe<LWasmPermuteSimd128::Op> AnalyzeShuffleWithZero(
+ SimdConstant* control) {
+ Maybe<LWasmPermuteSimd128::Op> op;
+ op = TryShift8x16(control);
+ if (op) {
+ return op;
+ }
+
+ // TODO: Optimization opportunity? A byte-blend-with-zero is just a CONST;
+ // PAND. This may beat the general byte blend code below.
+ return Nothing();
+}
+
+// Concat: if the result is the suffix (high bytes) of the rhs in front of a
+// prefix (low bytes) of the lhs then this is PALIGNR; ditto if the operands are
+// swapped.
+static Maybe<LWasmShuffleSimd128::Op> TryConcatRightShift8x16(
+ SimdConstant* control, bool* swapOperands) {
+ const SimdConstant::I8x16& lanes = control->asInt8x16();
+ int i = ScanIncreasingUnmasked(lanes, 0);
+ MOZ_ASSERT(i < 16, "Single-operand run should have been handled elswhere");
+ // First run must end with 15 % 16
+ if ((lanes[i - 1] & 15) != 15) {
+ return Nothing();
+ }
+ // Second run must start with 0 % 16
+ if ((lanes[i] & 15) != 0) {
+ return Nothing();
+ }
+ // The two runs must come from different inputs
+ if ((lanes[i] & 16) == (lanes[i - 1] & 16)) {
+ return Nothing();
+ }
+ int suffixLength = i;
+
+ i = ScanIncreasingUnmasked(lanes, i);
+ // Must end at the left end
+ if (i != 16) {
+ return Nothing();
+ }
+
+ // If the suffix is from the lhs then swap the operands
+ if (lanes[0] < 16) {
+ *swapOperands = !*swapOperands;
+ }
+ *control = SimdConstant::SplatX16(suffixLength);
+ return Some(LWasmShuffleSimd128::CONCAT_RIGHT_SHIFT_8x16);
+}
+
+// Blend words: if we pick words from both operands without a pattern but all
+// the input words stay in their position then this is PBLENDW (immediate mask);
+// this also handles all larger sizes on x64.
+static Maybe<LWasmShuffleSimd128::Op> TryBlendInt16x8(SimdConstant* control) {
+ SimdConstant tmp(*control);
+ if (!ByteMaskToWordMask(&tmp)) {
+ return Nothing();
+ }
+ SimdConstant::I16x8 masked;
+ MaskLanes(masked, tmp.asInt16x8());
+ if (!IsIdentity(masked)) {
+ return Nothing();
+ }
+ SimdConstant::I16x8 mapped;
+ MapLanes(mapped, tmp.asInt16x8(),
+ [](int x) -> int { return x < 8 ? 0 : -1; });
+ *control = SimdConstant::CreateX8(mapped);
+ return Some(LWasmShuffleSimd128::BLEND_16x8);
+}
+
+// Blend bytes: if we pick bytes ditto then this is a byte blend, which can be
+// handled with a CONST, PAND, PANDNOT, and POR.
+//
+// TODO: Optimization opportunity? If we pick all but one lanes from one with at
+// most one from the other then it could be a MOV + PEXRB + PINSRB (also if this
+// element is not in its source location).
+static Maybe<LWasmShuffleSimd128::Op> TryBlendInt8x16(SimdConstant* control) {
+ SimdConstant::I8x16 masked;
+ MaskLanes(masked, control->asInt8x16());
+ if (!IsIdentity(masked)) {
+ return Nothing();
+ }
+ SimdConstant::I8x16 mapped;
+ MapLanes(mapped, control->asInt8x16(),
+ [](int x) -> int { return x < 16 ? 0 : -1; });
+ *control = SimdConstant::CreateX16(mapped);
+ return Some(LWasmShuffleSimd128::BLEND_8x16);
+}
+
+template <typename T>
+static bool MatchInterleave(const T* lanes, int lhs, int rhs, int len) {
+ for (int i = 0; i < len; i++) {
+ if (lanes[i * 2] != lhs + i || lanes[i * 2 + 1] != rhs + i) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Unpack/interleave:
+// - if we interleave the low (bytes/words/doublewords) of the inputs into
+// the output then this is UNPCKL*W (possibly with a swap of operands).
+// - if we interleave the high ditto then it is UNPCKH*W (ditto)
+template <typename T>
+static Maybe<LWasmShuffleSimd128::Op> TryInterleave(
+ const T* lanes, int lhs, int rhs, bool* swapOperands,
+ LWasmShuffleSimd128::Op lowOp, LWasmShuffleSimd128::Op highOp) {
+ int len = int(32 / (sizeof(T) * 4));
+ if (MatchInterleave(lanes, lhs, rhs, len)) {
+ return Some(lowOp);
+ }
+ if (MatchInterleave(lanes, rhs, lhs, len)) {
+ *swapOperands = !*swapOperands;
+ return Some(lowOp);
+ }
+ if (MatchInterleave(lanes, lhs + len, rhs + len, len)) {
+ return Some(highOp);
+ }
+ if (MatchInterleave(lanes, rhs + len, lhs + len, len)) {
+ *swapOperands = !*swapOperands;
+ return Some(highOp);
+ }
+ return Nothing();
+}
+
+static Maybe<LWasmShuffleSimd128::Op> TryInterleave64x2(SimdConstant* control,
+ bool* swapOperands) {
+ SimdConstant tmp = *control;
+ if (!ByteMaskToQWordMask(&tmp)) {
+ return Nothing();
+ }
+ const SimdConstant::I64x2& lanes = tmp.asInt64x2();
+ return TryInterleave(lanes, 0, 2, swapOperands,
+ LWasmShuffleSimd128::INTERLEAVE_LOW_64x2,
+ LWasmShuffleSimd128::INTERLEAVE_HIGH_64x2);
+}
+
+static Maybe<LWasmShuffleSimd128::Op> TryInterleave32x4(SimdConstant* control,
+ bool* swapOperands) {
+ SimdConstant tmp = *control;
+ if (!ByteMaskToDWordMask(&tmp)) {
+ return Nothing();
+ }
+ const SimdConstant::I32x4& lanes = tmp.asInt32x4();
+ return TryInterleave(lanes, 0, 4, swapOperands,
+ LWasmShuffleSimd128::INTERLEAVE_LOW_32x4,
+ LWasmShuffleSimd128::INTERLEAVE_HIGH_32x4);
+}
+
+static Maybe<LWasmShuffleSimd128::Op> TryInterleave16x8(SimdConstant* control,
+ bool* swapOperands) {
+ SimdConstant tmp = *control;
+ if (!ByteMaskToWordMask(&tmp)) {
+ return Nothing();
+ }
+ const SimdConstant::I16x8& lanes = tmp.asInt16x8();
+ return TryInterleave(lanes, 0, 8, swapOperands,
+ LWasmShuffleSimd128::INTERLEAVE_LOW_16x8,
+ LWasmShuffleSimd128::INTERLEAVE_HIGH_16x8);
+}
+
+static Maybe<LWasmShuffleSimd128::Op> TryInterleave8x16(SimdConstant* control,
+ bool* swapOperands) {
+ const SimdConstant::I8x16& lanes = control->asInt8x16();
+ return TryInterleave(lanes, 0, 16, swapOperands,
+ LWasmShuffleSimd128::INTERLEAVE_LOW_8x16,
+ LWasmShuffleSimd128::INTERLEAVE_HIGH_8x16);
+}
+
+static LWasmShuffleSimd128::Op AnalyzeTwoArgShuffle(SimdConstant* control,
+ bool* swapOperands) {
+ Maybe<LWasmShuffleSimd128::Op> op;
+ op = TryConcatRightShift8x16(control, swapOperands);
+ if (!op) {
+ op = TryBlendInt16x8(control);
+ }
+ if (!op) {
+ op = TryBlendInt8x16(control);
+ }
+ if (!op) {
+ op = TryInterleave64x2(control, swapOperands);
+ }
+ if (!op) {
+ op = TryInterleave32x4(control, swapOperands);
+ }
+ if (!op) {
+ op = TryInterleave16x8(control, swapOperands);
+ }
+ if (!op) {
+ op = TryInterleave8x16(control, swapOperands);
+ }
+ if (!op) {
+ op = Some(LWasmShuffleSimd128::SHUFFLE_BLEND_8x16);
+ }
+ return *op;
+}
+
+// Reorder the operands if that seems useful, notably, move a constant to the
+// right hand side. Rewrites the control to account for any move.
+static bool MaybeReorderShuffleOperands(MDefinition** lhs, MDefinition** rhs,
+ SimdConstant* control) {
+ if ((*lhs)->isWasmFloatConstant()) {
+ MDefinition* tmp = *lhs;
+ *lhs = *rhs;
+ *rhs = tmp;
+
+ int8_t controlBytes[16];
+ const SimdConstant::I8x16& lanes = control->asInt8x16();
+ for (unsigned i = 0; i < 16; i++) {
+ controlBytes[i] = lanes[i] ^ 16;
+ }
+ *control = SimdConstant::CreateX16(controlBytes);
+
+ return true;
+ }
+ return false;
+}
+
+Shuffle LIRGeneratorShared::AnalyzeShuffle(MWasmShuffleSimd128* ins) {
+ // Control may be updated, but only once we commit to an operation or when we
+ // swap operands.
+ SimdConstant control = ins->control();
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ // If only one of the inputs is used, determine which.
+ bool useLeft = true;
+ bool useRight = true;
+ if (lhs == rhs) {
+ useRight = false;
+ } else {
+ bool allAbove = true;
+ bool allBelow = true;
+ const SimdConstant::I8x16& lanes = control.asInt8x16();
+ for (unsigned i = 0; i < 16; i++) {
+ allAbove = allAbove && lanes[i] >= 16;
+ allBelow = allBelow && lanes[i] < 16;
+ }
+ if (allAbove) {
+ useLeft = false;
+ } else if (allBelow) {
+ useRight = false;
+ }
+ }
+
+ // Deal with one-ignored-input.
+ if (!(useLeft && useRight)) {
+ LWasmPermuteSimd128::Op op = AnalyzePermute(&control);
+ return Shuffle::permute(
+ useLeft ? Shuffle::Operand::LEFT : Shuffle::Operand::RIGHT, control,
+ op);
+ }
+
+ // Move constants to rhs.
+ bool swapOperands = MaybeReorderShuffleOperands(&lhs, &rhs, &control);
+
+ // Deal with constant rhs.
+ if (rhs->isWasmFloatConstant()) {
+ SimdConstant rhsConstant = rhs->toWasmFloatConstant()->toSimd128();
+ if (rhsConstant.isZeroBits()) {
+ Maybe<LWasmPermuteSimd128::Op> op = AnalyzeShuffleWithZero(&control);
+ if (op) {
+ return Shuffle::permute(
+ swapOperands ? Shuffle::Operand::RIGHT : Shuffle::Operand::LEFT,
+ control, *op);
+ }
+ }
+ }
+
+ // Two operands both of which are used. If there's one constant operand it is
+ // now on the rhs.
+ LWasmShuffleSimd128::Op op = AnalyzeTwoArgShuffle(&control, &swapOperands);
+ return Shuffle::shuffle(
+ swapOperands ? Shuffle::Operand::BOTH_SWAPPED : Shuffle::Operand::BOTH,
+ control, op);
+}
+
+# ifdef DEBUG
+void LIRGeneratorShared::ReportShuffleSpecialization(const Shuffle& s) {
+ switch (s.opd) {
+ case Shuffle::Operand::BOTH:
+ case Shuffle::Operand::BOTH_SWAPPED:
+ switch (*s.shuffleOp) {
+ case LWasmShuffleSimd128::SHUFFLE_BLEND_8x16:
+ js::wasm::ReportSimdAnalysis("shuffle -> shuffle+blend 8x16");
+ break;
+ case LWasmShuffleSimd128::BLEND_8x16:
+ js::wasm::ReportSimdAnalysis("shuffle -> blend 8x16");
+ break;
+ case LWasmShuffleSimd128::BLEND_16x8:
+ js::wasm::ReportSimdAnalysis("shuffle -> blend 16x8");
+ break;
+ case LWasmShuffleSimd128::CONCAT_RIGHT_SHIFT_8x16:
+ js::wasm::ReportSimdAnalysis("shuffle -> concat+shift-right 8x16");
+ break;
+ case LWasmShuffleSimd128::INTERLEAVE_HIGH_8x16:
+ js::wasm::ReportSimdAnalysis("shuffle -> interleave-high 8x16");
+ break;
+ case LWasmShuffleSimd128::INTERLEAVE_HIGH_16x8:
+ js::wasm::ReportSimdAnalysis("shuffle -> interleave-high 16x8");
+ break;
+ case LWasmShuffleSimd128::INTERLEAVE_HIGH_32x4:
+ js::wasm::ReportSimdAnalysis("shuffle -> interleave-high 32x4");
+ break;
+ case LWasmShuffleSimd128::INTERLEAVE_HIGH_64x2:
+ js::wasm::ReportSimdAnalysis("shuffle -> interleave-high 64x2");
+ break;
+ case LWasmShuffleSimd128::INTERLEAVE_LOW_8x16:
+ js::wasm::ReportSimdAnalysis("shuffle -> interleave-low 8x16");
+ break;
+ case LWasmShuffleSimd128::INTERLEAVE_LOW_16x8:
+ js::wasm::ReportSimdAnalysis("shuffle -> interleave-low 16x8");
+ break;
+ case LWasmShuffleSimd128::INTERLEAVE_LOW_32x4:
+ js::wasm::ReportSimdAnalysis("shuffle -> interleave-low 32x4");
+ break;
+ case LWasmShuffleSimd128::INTERLEAVE_LOW_64x2:
+ js::wasm::ReportSimdAnalysis("shuffle -> interleave-low 64x2");
+ break;
+ default:
+ MOZ_CRASH("Unexpected shuffle op");
+ }
+ break;
+ case Shuffle::Operand::LEFT:
+ case Shuffle::Operand::RIGHT:
+ switch (*s.permuteOp) {
+ case LWasmPermuteSimd128::BROADCAST_8x16:
+ js::wasm::ReportSimdAnalysis("shuffle -> broadcast 8x16");
+ break;
+ case LWasmPermuteSimd128::BROADCAST_16x8:
+ js::wasm::ReportSimdAnalysis("shuffle -> broadcast 16x8");
+ break;
+ case LWasmPermuteSimd128::MOVE:
+ js::wasm::ReportSimdAnalysis("shuffle -> move");
+ break;
+ case LWasmPermuteSimd128::PERMUTE_8x16:
+ js::wasm::ReportSimdAnalysis("shuffle -> permute 8x16");
+ break;
+ case LWasmPermuteSimd128::PERMUTE_16x8: {
+ int op = s.control.asInt16x8()[0] >> 8;
+ char buf[256];
+ sprintf(buf, "shuffle -> permute 16x8%s%s%s",
+ op & LWasmPermuteSimd128::SWAP_QWORDS ? " swap" : "",
+ op & LWasmPermuteSimd128::PERM_HIGH ? " high" : "",
+ op & LWasmPermuteSimd128::PERM_LOW ? " low" : "");
+ js::wasm::ReportSimdAnalysis(buf);
+ break;
+ }
+ case LWasmPermuteSimd128::PERMUTE_32x4:
+ js::wasm::ReportSimdAnalysis("shuffle -> permute 32x4");
+ break;
+ case LWasmPermuteSimd128::ROTATE_RIGHT_8x16:
+ js::wasm::ReportSimdAnalysis("shuffle -> rotate-right 8x16");
+ break;
+ case LWasmPermuteSimd128::SHIFT_LEFT_8x16:
+ js::wasm::ReportSimdAnalysis("shuffle -> shift-left 8x16");
+ break;
+ case LWasmPermuteSimd128::SHIFT_RIGHT_8x16:
+ js::wasm::ReportSimdAnalysis("shuffle -> shift-right 8x16");
+ break;
+ default:
+ MOZ_CRASH("Unexpected permute op");
+ }
+ break;
+ }
+}
+# endif // DEBUG
+
+#endif // ENABLE_WASM_SIMD
diff --git a/js/src/jit/shared/Lowering-shared.h b/js/src/jit/shared/Lowering-shared.h
new file mode 100644
index 0000000000..cb9e3884cf
--- /dev/null
+++ b/js/src/jit/shared/Lowering-shared.h
@@ -0,0 +1,384 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_shared_Lowering_shared_h
+#define jit_shared_Lowering_shared_h
+
+// This file declares the structures that are used for attaching LIR to a
+// MIRGraph.
+
+#include "jit/LIR.h"
+#include "jit/MIRGenerator.h"
+
+namespace js {
+namespace jit {
+
+class MIRGenerator;
+class MIRGraph;
+class MDefinition;
+class MInstruction;
+class LOsiPoint;
+
+#ifdef ENABLE_WASM_SIMD
+
+// Representation of the result of the shuffle analysis. See
+// Lowering-shared.cpp for more.
+
+struct Shuffle {
+ enum class Operand {
+ // Both inputs, in the original lhs-rhs order
+ BOTH,
+ // Both inputs, but in rhs-lhs order
+ BOTH_SWAPPED,
+ // Only the lhs input
+ LEFT,
+ // Only the rhs input
+ RIGHT,
+ };
+
+ Operand opd;
+ SimdConstant control;
+ mozilla::Maybe<LWasmPermuteSimd128::Op> permuteOp; // Single operands
+ mozilla::Maybe<LWasmShuffleSimd128::Op> shuffleOp; // Double operands
+
+ static Shuffle permute(Operand opd, SimdConstant control,
+ LWasmPermuteSimd128::Op op) {
+ MOZ_ASSERT(opd == Operand::LEFT || opd == Operand::RIGHT);
+ Shuffle s{opd, control, mozilla::Some(op), mozilla::Nothing()};
+ return s;
+ }
+
+ static Shuffle shuffle(Operand opd, SimdConstant control,
+ LWasmShuffleSimd128::Op op) {
+ MOZ_ASSERT(opd == Operand::BOTH || opd == Operand::BOTH_SWAPPED);
+ Shuffle s{opd, control, mozilla::Nothing(), mozilla::Some(op)};
+ return s;
+ }
+};
+
+#endif
+
+class LIRGeneratorShared {
+ protected:
+ MIRGenerator* gen;
+ MIRGraph& graph;
+ LIRGraph& lirGraph_;
+ LBlock* current;
+ MResumePoint* lastResumePoint_;
+ LRecoverInfo* cachedRecoverInfo_;
+ LOsiPoint* osiPoint_;
+
+ LIRGeneratorShared(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : gen(gen),
+ graph(graph),
+ lirGraph_(lirGraph),
+ current(nullptr),
+ lastResumePoint_(nullptr),
+ cachedRecoverInfo_(nullptr),
+ osiPoint_(nullptr) {}
+
+ MIRGenerator* mir() { return gen; }
+
+ // Abort errors are caught at end of visitInstruction. It is possible for
+ // multiple errors to be detected before the end of visitInstruction. In
+ // this case, we only report the first back to the MIRGenerator.
+ bool errored() { return gen->getOffThreadStatus().isErr(); }
+ void abort(AbortReason r, const char* message, ...) MOZ_FORMAT_PRINTF(3, 4) {
+ if (errored()) {
+ return;
+ }
+
+ va_list ap;
+ va_start(ap, message);
+ auto reason_ = gen->abortFmt(r, message, ap);
+ va_end(ap);
+ gen->setOffThreadStatus(reason_);
+ }
+ void abort(AbortReason r) {
+ if (errored()) {
+ return;
+ }
+
+ auto reason_ = gen->abort(r);
+ gen->setOffThreadStatus(reason_);
+ }
+
+ static void ReorderCommutative(MDefinition** lhsp, MDefinition** rhsp,
+ MInstruction* ins);
+ static bool ShouldReorderCommutative(MDefinition* lhs, MDefinition* rhs,
+ MInstruction* ins);
+
+#ifdef ENABLE_WASM_SIMD
+ static Shuffle AnalyzeShuffle(MWasmShuffleSimd128* ins);
+# ifdef DEBUG
+ static void ReportShuffleSpecialization(const Shuffle& s);
+# endif
+#endif
+
+ // A backend can decide that an instruction should be emitted at its uses,
+ // rather than at its definition. To communicate this, set the
+ // instruction's virtual register set to 0. When using the instruction,
+ // its virtual register is temporarily reassigned. To know to clear it
+ // after constructing the use information, the worklist bit is temporarily
+ // unset.
+ //
+ // The backend can use the worklist bit to determine whether or not a
+ // definition should be created.
+ inline void emitAtUses(MInstruction* mir);
+
+ // The lowest-level calls to use, those that do not wrap another call to
+ // use(), must prefix grabbing virtual register IDs by these calls.
+ inline void ensureDefined(MDefinition* mir);
+
+ void visitEmittedAtUses(MInstruction* ins);
+
+ // These all create a use of a virtual register, with an optional
+ // allocation policy.
+ //
+ // Some of these use functions have atStart variants.
+ // - non-atStart variants will tell the register allocator that the input
+ // allocation must be different from any Temp or Definition also needed for
+ // this LInstruction.
+ // - atStart variants relax that restriction and allow the input to be in
+ // the same register as any output Definition (but not Temps) used by the
+ // LInstruction. Note that it doesn't *imply* this will actually happen,
+ // but gives a hint to the register allocator that it can do it.
+ //
+ // TL;DR: Use non-atStart variants only if you need the input value after
+ // writing to any definitions (excluding temps), during code generation of
+ // this LInstruction. Otherwise, use atStart variants, which will lower
+ // register pressure.
+ inline LUse use(MDefinition* mir, LUse policy);
+ inline LUse use(MDefinition* mir);
+ inline LUse useAtStart(MDefinition* mir);
+ inline LUse useRegister(MDefinition* mir);
+ inline LUse useRegisterAtStart(MDefinition* mir);
+ inline LUse useFixed(MDefinition* mir, Register reg);
+ inline LUse useFixed(MDefinition* mir, FloatRegister reg);
+ inline LUse useFixed(MDefinition* mir, AnyRegister reg);
+ inline LUse useFixedAtStart(MDefinition* mir, Register reg);
+ inline LUse useFixedAtStart(MDefinition* mir, AnyRegister reg);
+ inline LAllocation useOrConstant(MDefinition* mir);
+ inline LAllocation useOrConstantAtStart(MDefinition* mir);
+ // "Any" is architecture dependent, and will include registers and stack
+ // slots on X86, and only registers on ARM.
+ inline LAllocation useAny(MDefinition* mir);
+ inline LAllocation useAnyOrConstant(MDefinition* mir);
+ // "Storable" is architecture dependend, and will include registers and
+ // constants on X86 and only registers on ARM. This is a generic "things
+ // we can expect to write into memory in 1 instruction".
+ inline LAllocation useStorable(MDefinition* mir);
+ inline LAllocation useStorableAtStart(MDefinition* mir);
+ inline LAllocation useKeepalive(MDefinition* mir);
+ inline LAllocation useKeepaliveOrConstant(MDefinition* mir);
+ inline LAllocation useRegisterOrConstant(MDefinition* mir);
+ inline LAllocation useRegisterOrConstantAtStart(MDefinition* mir);
+ inline LAllocation useRegisterOrZeroAtStart(MDefinition* mir);
+ inline LAllocation useRegisterOrZero(MDefinition* mir);
+ inline LAllocation useRegisterOrNonDoubleConstant(MDefinition* mir);
+
+ inline LUse useRegisterForTypedLoad(MDefinition* mir, MIRType type);
+
+#ifdef JS_NUNBOX32
+ inline LUse useType(MDefinition* mir, LUse::Policy policy);
+ inline LUse usePayload(MDefinition* mir, LUse::Policy policy);
+ inline LUse usePayloadAtStart(MDefinition* mir, LUse::Policy policy);
+ inline LUse usePayloadInRegisterAtStart(MDefinition* mir);
+
+ // Adds a box input to an instruction, setting operand |n| to the type and
+ // |n+1| to the payload. Does not modify the operands, instead expecting a
+ // policy to already be set.
+ inline void fillBoxUses(LInstruction* lir, size_t n, MDefinition* mir);
+#endif
+
+ // These create temporary register requests.
+ inline LDefinition temp(LDefinition::Type type = LDefinition::GENERAL,
+ LDefinition::Policy policy = LDefinition::REGISTER);
+ inline LInt64Definition tempInt64(
+ LDefinition::Policy policy = LDefinition::REGISTER);
+ inline LDefinition tempFloat32();
+ inline LDefinition tempDouble();
+#ifdef ENABLE_WASM_SIMD
+ inline LDefinition tempSimd128();
+#endif
+ inline LDefinition tempCopy(MDefinition* input, uint32_t reusedInput);
+
+ // Note that the fixed register has a GENERAL type,
+ // unless the arg is of FloatRegister type
+ inline LDefinition tempFixed(Register reg);
+ inline LDefinition tempFixed(FloatRegister reg);
+ inline LInt64Definition tempInt64Fixed(Register64 reg);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineFixed(LInstructionHelper<1, Ops, Temps>* lir,
+ MDefinition* mir, const LAllocation& output);
+
+ template <size_t Temps>
+ inline void defineBox(
+ details::LInstructionFixedDefsTempsHelper<BOX_PIECES, Temps>* lir,
+ MDefinition* mir, LDefinition::Policy policy = LDefinition::REGISTER);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineInt64(LInstructionHelper<INT64_PIECES, Ops, Temps>* lir,
+ MDefinition* mir,
+ LDefinition::Policy policy = LDefinition::REGISTER);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineInt64Fixed(
+ LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ const LInt64Allocation& output);
+
+ inline void defineReturn(LInstruction* lir, MDefinition* mir);
+
+ template <size_t X>
+ inline void define(details::LInstructionFixedDefsTempsHelper<1, X>* lir,
+ MDefinition* mir,
+ LDefinition::Policy policy = LDefinition::REGISTER);
+ template <size_t X>
+ inline void define(details::LInstructionFixedDefsTempsHelper<1, X>* lir,
+ MDefinition* mir, const LDefinition& def);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineReuseInput(LInstructionHelper<1, Ops, Temps>* lir,
+ MDefinition* mir, uint32_t operand);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineBoxReuseInput(
+ LInstructionHelper<BOX_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ uint32_t operand);
+
+ template <size_t Ops, size_t Temps>
+ inline void defineInt64ReuseInput(
+ LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir,
+ uint32_t operand);
+
+ // Returns a box allocation for a Value-typed instruction.
+ inline LBoxAllocation useBox(MDefinition* mir,
+ LUse::Policy policy = LUse::REGISTER,
+ bool useAtStart = false);
+
+ // Returns a box allocation. The use is either typed, a Value, or
+ // a constant (if useConstant is true).
+ inline LBoxAllocation useBoxOrTypedOrConstant(MDefinition* mir,
+ bool useConstant);
+ inline LBoxAllocation useBoxOrTyped(MDefinition* mir);
+
+ // Returns an int64 allocation for an Int64-typed instruction.
+ inline LInt64Allocation useInt64(MDefinition* mir, LUse::Policy policy,
+ bool useAtStart);
+ inline LInt64Allocation useInt64(MDefinition* mir, bool useAtStart = false);
+ inline LInt64Allocation useInt64AtStart(MDefinition* mir);
+ inline LInt64Allocation useInt64OrConstant(MDefinition* mir,
+ bool useAtStart = false);
+ inline LInt64Allocation useInt64Register(MDefinition* mir,
+ bool useAtStart = false);
+ inline LInt64Allocation useInt64RegisterOrConstant(MDefinition* mir,
+ bool useAtStart = false);
+ inline LInt64Allocation useInt64Fixed(MDefinition* mir, Register64 regs,
+ bool useAtStart = false);
+ inline LInt64Allocation useInt64FixedAtStart(MDefinition* mir,
+ Register64 regs);
+
+ LInt64Allocation useInt64RegisterAtStart(MDefinition* mir) {
+ return useInt64Register(mir, /* useAtStart = */ true);
+ }
+ LInt64Allocation useInt64RegisterOrConstantAtStart(MDefinition* mir) {
+ return useInt64RegisterOrConstant(mir, /* useAtStart = */ true);
+ }
+ LInt64Allocation useInt64OrConstantAtStart(MDefinition* mir) {
+ return useInt64OrConstant(mir, /* useAtStart = */ true);
+ }
+
+ // Rather than defining a new virtual register, sets |ins| to have the same
+ // virtual register as |as|.
+ inline void redefine(MDefinition* ins, MDefinition* as);
+
+ template <typename LClass, typename... Args>
+ inline LClass* allocateVariadic(uint32_t numOperands, Args&&... args);
+
+ TempAllocator& alloc() const { return graph.alloc(); }
+
+ uint32_t getVirtualRegister() {
+ uint32_t vreg = lirGraph_.getVirtualRegister();
+
+ // If we run out of virtual registers, mark code generation as having
+ // failed and return a dummy vreg. Include a + 1 here for NUNBOX32
+ // platforms that expect Value vregs to be adjacent.
+ if (vreg + 1 >= MAX_VIRTUAL_REGISTERS) {
+ abort(AbortReason::Alloc, "max virtual registers");
+ return 1;
+ }
+ return vreg;
+ }
+
+ template <typename T>
+ void annotate(T* ins);
+ template <typename T>
+ void add(T* ins, MInstruction* mir = nullptr);
+
+ void lowerTypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex);
+
+ void definePhiOneRegister(MPhi* phi, size_t lirIndex);
+#ifdef JS_NUNBOX32
+ void definePhiTwoRegisters(MPhi* phi, size_t lirIndex);
+#endif
+
+ void defineTypedPhi(MPhi* phi, size_t lirIndex) {
+ // One register containing the payload.
+ definePhiOneRegister(phi, lirIndex);
+ }
+ void defineUntypedPhi(MPhi* phi, size_t lirIndex) {
+#ifdef JS_NUNBOX32
+ // Two registers: one for the type, one for the payload.
+ definePhiTwoRegisters(phi, lirIndex);
+#else
+ // One register containing the full Value.
+ definePhiOneRegister(phi, lirIndex);
+#endif
+ }
+
+ LOsiPoint* popOsiPoint() {
+ LOsiPoint* tmp = osiPoint_;
+ osiPoint_ = nullptr;
+ return tmp;
+ }
+
+ LRecoverInfo* getRecoverInfo(MResumePoint* rp);
+ LSnapshot* buildSnapshot(MResumePoint* rp, BailoutKind kind);
+ bool assignPostSnapshot(MInstruction* mir, LInstruction* ins);
+
+ // Marks this instruction as fallible, meaning that before it performs
+ // effects (if any), it may check pre-conditions and bailout if they do not
+ // hold. This function informs the register allocator that it will need to
+ // capture appropriate state.
+ void assignSnapshot(LInstruction* ins, BailoutKind kind);
+
+ // Marks this instruction as needing to call into either the VM or GC. This
+ // function may build a snapshot that captures the result of its own
+ // instruction, and as such, should generally be called after define*().
+ void assignSafepoint(LInstruction* ins, MInstruction* mir,
+ BailoutKind kind = BailoutKind::DuringVMCall);
+
+ // Marks this instruction as needing a wasm safepoint.
+ void assignWasmSafepoint(LInstruction* ins, MInstruction* mir);
+
+ void lowerConstantDouble(double d, MInstruction* mir) {
+ define(new (alloc()) LDouble(d), mir);
+ }
+ void lowerConstantFloat32(float f, MInstruction* mir) {
+ define(new (alloc()) LFloat32(f), mir);
+ }
+
+ public:
+ // Whether to generate typed reads for element accesses with hole checks.
+ static bool allowTypedElementHoleCheck() { return false; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_shared_Lowering_shared_h */