summaryrefslogtreecommitdiffstats
path: root/js/src/jit/x86
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /js/src/jit/x86
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'js/src/jit/x86')
-rw-r--r--js/src/jit/x86/Assembler-x86.cpp85
-rw-r--r--js/src/jit/x86/Assembler-x86.h1084
-rw-r--r--js/src/jit/x86/BaseAssembler-x86.h190
-rw-r--r--js/src/jit/x86/CodeGenerator-x86.cpp1504
-rw-r--r--js/src/jit/x86/CodeGenerator-x86.h49
-rw-r--r--js/src/jit/x86/LIR-x86.h308
-rw-r--r--js/src/jit/x86/Lowering-x86.cpp835
-rw-r--r--js/src/jit/x86/Lowering-x86.h79
-rw-r--r--js/src/jit/x86/MacroAssembler-x86-inl.h1386
-rw-r--r--js/src/jit/x86/MacroAssembler-x86.cpp1897
-rw-r--r--js/src/jit/x86/MacroAssembler-x86.h1186
-rw-r--r--js/src/jit/x86/SharedICHelpers-x86-inl.h77
-rw-r--r--js/src/jit/x86/SharedICHelpers-x86.h70
-rw-r--r--js/src/jit/x86/SharedICRegisters-x86.h36
-rw-r--r--js/src/jit/x86/Trampoline-x86.cpp735
15 files changed, 9521 insertions, 0 deletions
diff --git a/js/src/jit/x86/Assembler-x86.cpp b/js/src/jit/x86/Assembler-x86.cpp
new file mode 100644
index 0000000000..0963b855b3
--- /dev/null
+++ b/js/src/jit/x86/Assembler-x86.cpp
@@ -0,0 +1,85 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86/Assembler-x86.h"
+
+#include "gc/Marking.h"
+#include "util/Memory.h"
+
+using namespace js;
+using namespace js::jit;
+
+ABIArgGenerator::ABIArgGenerator() : stackOffset_(0), current_() {}
+
+ABIArg ABIArgGenerator::next(MIRType type) {
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Float32:
+ case MIRType::Pointer:
+ case MIRType::WasmAnyRef:
+ case MIRType::StackResults:
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint32_t);
+ break;
+ case MIRType::Double:
+ case MIRType::Int64:
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ break;
+ case MIRType::Simd128:
+ // On Win64, >64 bit args need to be passed by reference. However, wasm
+ // doesn't allow passing SIMD values to JS, so the only way to reach this
+ // is wasm to wasm calls. Ergo we can break the native ABI here and use
+ // the Wasm ABI instead.
+ stackOffset_ = AlignBytes(stackOffset_, SimdMemoryAlignment);
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += Simd128DataSize;
+ break;
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+}
+
+void Assembler::executableCopy(uint8_t* buffer) {
+ AssemblerX86Shared::executableCopy(buffer);
+ for (RelativePatch& rp : jumps_) {
+ X86Encoding::SetRel32(buffer + rp.offset, rp.target);
+ }
+}
+
+class RelocationIterator {
+ CompactBufferReader reader_;
+ uint32_t offset_;
+
+ public:
+ explicit RelocationIterator(CompactBufferReader& reader) : reader_(reader) {}
+
+ bool read() {
+ if (!reader_.more()) {
+ return false;
+ }
+ offset_ = reader_.readUnsigned();
+ return true;
+ }
+
+ uint32_t offset() const { return offset_; }
+};
+
+static inline JitCode* CodeFromJump(uint8_t* jump) {
+ uint8_t* target = (uint8_t*)X86Encoding::GetRel32Target(jump);
+ return JitCode::FromExecutable(target);
+}
+
+void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ RelocationIterator iter(reader);
+ while (iter.read()) {
+ JitCode* child = CodeFromJump(code->raw() + iter.offset());
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ MOZ_ASSERT(child == CodeFromJump(code->raw() + iter.offset()));
+ }
+}
diff --git a/js/src/jit/x86/Assembler-x86.h b/js/src/jit/x86/Assembler-x86.h
new file mode 100644
index 0000000000..cedd94de09
--- /dev/null
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -0,0 +1,1084 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_Assembler_x86_h
+#define jit_x86_Assembler_x86_h
+
+#include <iterator>
+
+#include "jit/CompactBuffer.h"
+#include "jit/JitCode.h"
+#include "jit/shared/Assembler-shared.h"
+#include "jit/x86-shared/Constants-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register eax{X86Encoding::rax};
+static constexpr Register ecx{X86Encoding::rcx};
+static constexpr Register edx{X86Encoding::rdx};
+static constexpr Register ebx{X86Encoding::rbx};
+static constexpr Register esp{X86Encoding::rsp};
+static constexpr Register ebp{X86Encoding::rbp};
+static constexpr Register esi{X86Encoding::rsi};
+static constexpr Register edi{X86Encoding::rdi};
+
+static constexpr FloatRegister xmm0 =
+ FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
+static constexpr FloatRegister xmm1 =
+ FloatRegister(X86Encoding::xmm1, FloatRegisters::Double);
+static constexpr FloatRegister xmm2 =
+ FloatRegister(X86Encoding::xmm2, FloatRegisters::Double);
+static constexpr FloatRegister xmm3 =
+ FloatRegister(X86Encoding::xmm3, FloatRegisters::Double);
+static constexpr FloatRegister xmm4 =
+ FloatRegister(X86Encoding::xmm4, FloatRegisters::Double);
+static constexpr FloatRegister xmm5 =
+ FloatRegister(X86Encoding::xmm5, FloatRegisters::Double);
+static constexpr FloatRegister xmm6 =
+ FloatRegister(X86Encoding::xmm6, FloatRegisters::Double);
+static constexpr FloatRegister xmm7 =
+ FloatRegister(X86Encoding::xmm7, FloatRegisters::Double);
+
+// Vector registers fixed for use with some instructions, e.g. PBLENDVB.
+static constexpr FloatRegister vmm0 =
+ FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128);
+
+static constexpr Register InvalidReg{X86Encoding::invalid_reg};
+static constexpr FloatRegister InvalidFloatReg = FloatRegister();
+
+static constexpr Register JSReturnReg_Type = ecx;
+static constexpr Register JSReturnReg_Data = edx;
+static constexpr Register StackPointer = esp;
+static constexpr Register FramePointer = ebp;
+static constexpr Register ReturnReg = eax;
+static constexpr FloatRegister ReturnFloat32Reg =
+ FloatRegister(X86Encoding::xmm0, FloatRegisters::Single);
+static constexpr FloatRegister ReturnDoubleReg =
+ FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
+static constexpr FloatRegister ReturnSimd128Reg =
+ FloatRegister(X86Encoding::xmm0, FloatRegisters::Simd128);
+static constexpr FloatRegister ScratchFloat32Reg_ =
+ FloatRegister(X86Encoding::xmm7, FloatRegisters::Single);
+static constexpr FloatRegister ScratchDoubleReg_ =
+ FloatRegister(X86Encoding::xmm7, FloatRegisters::Double);
+static constexpr FloatRegister ScratchSimd128Reg =
+ FloatRegister(X86Encoding::xmm7, FloatRegisters::Simd128);
+
+// Note, EDX:EAX is the system ABI 64-bit return register, and it is to our
+// advantage to keep the SpiderMonkey ABI in sync with the system ABI.
+//
+// However, using EDX here means that we have to use a register that does not
+// have a word or byte part (eg DX/DH/DL) in some other places; notably,
+// ABINonArgReturnReg1 is EDI. If this becomes a problem and ReturnReg64 has to
+// be something other than EDX:EAX, then jitted code that calls directly to C++
+// will need to shuffle the return value from EDX:EAX into ReturnReg64 directly
+// after the call. See bug 1730161 for discussion and a patch that does that.
+static constexpr Register64 ReturnReg64(edx, eax);
+
+// Avoid ebp, which is the FramePointer, which is unavailable in some modes.
+static constexpr Register CallTempReg0 = edi;
+static constexpr Register CallTempReg1 = eax;
+static constexpr Register CallTempReg2 = ebx;
+static constexpr Register CallTempReg3 = ecx;
+static constexpr Register CallTempReg4 = esi;
+static constexpr Register CallTempReg5 = edx;
+
+// We have no arg regs, so our NonArgRegs are just our CallTempReg*
+static constexpr Register CallTempNonArgRegs[] = {edi, eax, ebx, ecx, esi, edx};
+static constexpr uint32_t NumCallTempNonArgRegs = std::size(CallTempNonArgRegs);
+
+class ABIArgGenerator {
+ uint32_t stackOffset_;
+ ABIArg current_;
+
+ public:
+ ABIArgGenerator();
+ ABIArg next(MIRType argType);
+ ABIArg& current() { return current_; }
+ uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
+ void increaseStackOffset(uint32_t bytes) { stackOffset_ += bytes; }
+};
+
+// These registers may be volatile or nonvolatile.
+static constexpr Register ABINonArgReg0 = eax;
+static constexpr Register ABINonArgReg1 = ebx;
+static constexpr Register ABINonArgReg2 = ecx;
+static constexpr Register ABINonArgReg3 = edx;
+
+// This register may be volatile or nonvolatile. Avoid xmm7 which is the
+// ScratchDoubleReg_.
+static constexpr FloatRegister ABINonArgDoubleReg =
+ FloatRegister(X86Encoding::xmm0, FloatRegisters::Double);
+
+// These registers may be volatile or nonvolatile.
+// Note: these three registers are all guaranteed to be different
+static constexpr Register ABINonArgReturnReg0 = ecx;
+static constexpr Register ABINonArgReturnReg1 = edi;
+static constexpr Register ABINonVolatileReg = ebx;
+
+// This register is guaranteed to be clobberable during the prologue and
+// epilogue of an ABI call which must preserve both ABI argument, return
+// and non-volatile registers.
+static constexpr Register ABINonArgReturnVolatileReg = ecx;
+
+// Instance pointer argument register for WebAssembly functions. This must not
+// alias any other register used for passing function arguments or return
+// values. Preserved by WebAssembly functions.
+static constexpr Register InstanceReg = esi;
+
+// Registers used for asm.js/wasm table calls. These registers must be disjoint
+// from the ABI argument registers, InstanceReg and each other.
+static constexpr Register WasmTableCallScratchReg0 = ABINonArgReg0;
+static constexpr Register WasmTableCallScratchReg1 = ABINonArgReg1;
+static constexpr Register WasmTableCallSigReg = ABINonArgReg2;
+static constexpr Register WasmTableCallIndexReg = ABINonArgReg3;
+
+// Registers used for ref calls.
+static constexpr Register WasmCallRefCallScratchReg0 = ABINonArgReg0;
+static constexpr Register WasmCallRefCallScratchReg1 = ABINonArgReg1;
+static constexpr Register WasmCallRefReg = ABINonArgReg3;
+
+// Registers used for wasm tail calls operations.
+static constexpr Register WasmTailCallInstanceScratchReg = ABINonArgReg1;
+static constexpr Register WasmTailCallRAScratchReg = ABINonArgReg2;
+static constexpr Register WasmTailCallFPScratchReg = ABINonArgReg3;
+
+// Register used as a scratch along the return path in the fast js -> wasm stub
+// code. This must not overlap ReturnReg, JSReturnOperand, or InstanceReg.
+// It must be a volatile register.
+static constexpr Register WasmJitEntryReturnScratch = ebx;
+
+static constexpr Register OsrFrameReg = edx;
+static constexpr Register PreBarrierReg = edx;
+
+// Not enough registers for a PC register (R0-R2 use 2 registers each).
+static constexpr Register InterpreterPCReg = InvalidReg;
+
+// Registers used by RegExpMatcher and RegExpExecMatch stubs (do not use
+// JSReturnOperand).
+static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpMatcherStringReg = CallTempReg1;
+static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
+
+// Registers used by RegExpExecTest stub (do not use ReturnReg).
+static constexpr Register RegExpExecTestRegExpReg = CallTempReg0;
+static constexpr Register RegExpExecTestStringReg = CallTempReg2;
+
+// Registers used by RegExpSearcher stub (do not use ReturnReg).
+static constexpr Register RegExpSearcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpSearcherStringReg = CallTempReg2;
+static constexpr Register RegExpSearcherLastIndexReg = CallTempReg3;
+
+// GCC stack is aligned on 16 bytes. Ion does not maintain this for internal
+// calls. wasm code does.
+#if defined(__GNUC__) && !defined(__MINGW32__)
+static constexpr uint32_t ABIStackAlignment = 16;
+#else
+static constexpr uint32_t ABIStackAlignment = 4;
+#endif
+static constexpr uint32_t CodeAlignment = 16;
+static constexpr uint32_t JitStackAlignment = 16;
+
+static constexpr uint32_t JitStackValueAlignment =
+ JitStackAlignment / sizeof(Value);
+static_assert(JitStackAlignment % sizeof(Value) == 0 &&
+ JitStackValueAlignment >= 1,
+ "Stack alignment should be a non-zero multiple of sizeof(Value)");
+
+static constexpr uint32_t SimdMemoryAlignment = 16;
+
+static_assert(CodeAlignment % SimdMemoryAlignment == 0,
+ "Code alignment should be larger than any of the alignments "
+ "which are used for "
+ "the constant sections of the code buffer. Thus it should be "
+ "larger than the "
+ "alignment for SIMD constants.");
+
+static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
+ "Stack alignment should be larger than any of the alignments "
+ "which are used for "
+ "spilled values. Thus it should be larger than the alignment "
+ "for SIMD accesses.");
+
+static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment;
+static constexpr uint32_t WasmTrapInstructionLength = 2;
+
+// See comments in wasm::GenerateFunctionPrologue. The difference between these
+// is the size of the largest callable prologue on the platform. (We could make
+// the tail offset 3, but I have opted for 4 as that results in a better-aligned
+// branch target.)
+static constexpr uint32_t WasmCheckedCallEntryOffset = 0u;
+
+struct ImmTag : public Imm32 {
+ explicit ImmTag(JSValueTag mask) : Imm32(int32_t(mask)) {}
+};
+
+struct ImmType : public ImmTag {
+ explicit ImmType(JSValueType type) : ImmTag(JSVAL_TYPE_TO_TAG(type)) {}
+};
+
+static constexpr Scale ScalePointer = TimesFour;
+
+} // namespace jit
+} // namespace js
+
+#include "jit/x86-shared/Assembler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+static inline Operand LowWord(const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ return Operand(LowWord(op.toAddress()));
+ case Operand::MEM_SCALE:
+ return Operand(LowWord(op.toBaseIndex()));
+ default:
+ MOZ_CRASH("Invalid operand type");
+ }
+}
+
+static inline Operand HighWord(const Operand& op) {
+ switch (op.kind()) {
+ case Operand::MEM_REG_DISP:
+ return Operand(HighWord(op.toAddress()));
+ case Operand::MEM_SCALE:
+ return Operand(HighWord(op.toBaseIndex()));
+ default:
+ MOZ_CRASH("Invalid operand type");
+ }
+}
+
+// Return operand from a JS -> JS call.
+static constexpr ValueOperand JSReturnOperand{JSReturnReg_Type,
+ JSReturnReg_Data};
+
+class Assembler : public AssemblerX86Shared {
+ Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
+
+ void addPendingJump(JmpSrc src, ImmPtr target, RelocationKind kind) {
+ enoughMemory_ &=
+ jumps_.append(RelativePatch(src.offset(), target.value, kind));
+ if (kind == RelocationKind::JITCODE) {
+ jumpRelocations_.writeUnsigned(src.offset());
+ }
+ }
+
+ public:
+ using AssemblerX86Shared::call;
+ using AssemblerX86Shared::cmpl;
+ using AssemblerX86Shared::j;
+ using AssemblerX86Shared::jmp;
+ using AssemblerX86Shared::movl;
+ using AssemblerX86Shared::pop;
+ using AssemblerX86Shared::push;
+ using AssemblerX86Shared::retarget;
+ using AssemblerX86Shared::vmovsd;
+ using AssemblerX86Shared::vmovss;
+
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
+
+ void assertNoGCThings() const {
+#ifdef DEBUG
+ MOZ_ASSERT(dataRelocations_.length() == 0);
+ for (auto& j : jumps_) {
+ MOZ_ASSERT(j.kind == RelocationKind::HARDCODED);
+ }
+#endif
+ }
+
+ // Actual assembly emitting functions.
+
+ void push(ImmGCPtr ptr) {
+ masm.push_i32(int32_t(ptr.value));
+ writeDataRelocation(ptr);
+ }
+ void push(const ImmWord imm) { push(Imm32(imm.value)); }
+ void push(const ImmPtr imm) { push(ImmWord(uintptr_t(imm.value))); }
+ void push(FloatRegister src) {
+ subl(Imm32(sizeof(double)), StackPointer);
+ vmovsd(src, Address(StackPointer, 0));
+ }
+
+ CodeOffset pushWithPatch(ImmWord word) {
+ masm.push_i32(int32_t(word.value));
+ return CodeOffset(masm.currentOffset());
+ }
+
+ void pop(FloatRegister src) {
+ vmovsd(Address(StackPointer, 0), src);
+ addl(Imm32(sizeof(double)), StackPointer);
+ }
+
+ CodeOffset movWithPatch(ImmWord word, Register dest) {
+ movl(Imm32(word.value), dest);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
+ }
+
+ void movl(ImmGCPtr ptr, Register dest) {
+ masm.movl_i32r(uintptr_t(ptr.value), dest.encoding());
+ writeDataRelocation(ptr);
+ }
+ void movl(ImmGCPtr ptr, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::REG:
+ masm.movl_i32r(uintptr_t(ptr.value), dest.reg());
+ writeDataRelocation(ptr);
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.movl_i32m(uintptr_t(ptr.value), dest.disp(), dest.base());
+ writeDataRelocation(ptr);
+ break;
+ case Operand::MEM_SCALE:
+ masm.movl_i32m(uintptr_t(ptr.value), dest.disp(), dest.base(),
+ dest.index(), dest.scale());
+ writeDataRelocation(ptr);
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void movl(ImmWord imm, Register dest) {
+ masm.movl_i32r(imm.value, dest.encoding());
+ }
+ void movl(ImmPtr imm, Register dest) {
+ movl(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void mov(ImmWord imm, Register dest) {
+ // Use xor for setting registers to zero, as it is specially optimized
+ // for this purpose on modern hardware. Note that it does clobber FLAGS
+ // though.
+ if (imm.value == 0) {
+ xorl(dest, dest);
+ } else {
+ movl(imm, dest);
+ }
+ }
+ void mov(ImmPtr imm, Register dest) {
+ mov(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void mov(wasm::SymbolicAddress imm, Register dest) {
+ masm.movl_i32r(-1, dest.encoding());
+ append(wasm::SymbolicAccess(CodeOffset(masm.currentOffset()), imm));
+ }
+ void mov(const Operand& src, Register dest) { movl(src, dest); }
+ void mov(Register src, const Operand& dest) { movl(src, dest); }
+ void mov(Imm32 imm, const Operand& dest) { movl(imm, dest); }
+ void mov(CodeLabel* label, Register dest) {
+ // Put a placeholder value in the instruction stream.
+ masm.movl_i32r(0, dest.encoding());
+ label->patchAt()->bind(masm.size());
+ }
+ void mov(Register src, Register dest) { movl(src, dest); }
+ void xchg(Register src, Register dest) { xchgl(src, dest); }
+ void lea(const Operand& src, Register dest) { return leal(src, dest); }
+ void cmovz32(const Operand& src, Register dest) { return cmovzl(src, dest); }
+ void cmovzPtr(const Operand& src, Register dest) { return cmovzl(src, dest); }
+
+ void fstp32(const Operand& src) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fstp32_m(src.disp(), src.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void faddp() { masm.faddp(); }
+
+ void cmpl(ImmWord rhs, Register lhs) {
+ masm.cmpl_ir(rhs.value, lhs.encoding());
+ }
+ void cmpl(ImmPtr rhs, Register lhs) {
+ cmpl(ImmWord(uintptr_t(rhs.value)), lhs);
+ }
+ void cmpl(ImmGCPtr rhs, Register lhs) {
+ masm.cmpl_i32r(uintptr_t(rhs.value), lhs.encoding());
+ writeDataRelocation(rhs);
+ }
+ void cmpl(Register rhs, Register lhs) {
+ masm.cmpl_rr(rhs.encoding(), lhs.encoding());
+ }
+ void cmpl(ImmGCPtr rhs, const Operand& lhs) {
+ switch (lhs.kind()) {
+ case Operand::REG:
+ masm.cmpl_i32r(uintptr_t(rhs.value), lhs.reg());
+ writeDataRelocation(rhs);
+ break;
+ case Operand::MEM_REG_DISP:
+ masm.cmpl_i32m(uintptr_t(rhs.value), lhs.disp(), lhs.base());
+ writeDataRelocation(rhs);
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.cmpl_i32m(uintptr_t(rhs.value), lhs.address());
+ writeDataRelocation(rhs);
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ void cmpl(Register rhs, wasm::SymbolicAddress lhs) {
+ masm.cmpl_rm_disp32(rhs.encoding(), (void*)-1);
+ append(wasm::SymbolicAccess(CodeOffset(masm.currentOffset()), lhs));
+ }
+ void cmpl(Imm32 rhs, wasm::SymbolicAddress lhs) {
+ JmpSrc src = masm.cmpl_im_disp32(rhs.value, (void*)-1);
+ append(wasm::SymbolicAccess(CodeOffset(src.offset()), lhs));
+ }
+
+ void adcl(Imm32 imm, Register dest) {
+ masm.adcl_ir(imm.value, dest.encoding());
+ }
+ void adcl(Register src, Register dest) {
+ masm.adcl_rr(src.encoding(), dest.encoding());
+ }
+ void adcl(Operand src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.adcl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.adcl_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void sbbl(Imm32 imm, Register dest) {
+ masm.sbbl_ir(imm.value, dest.encoding());
+ }
+ void sbbl(Register src, Register dest) {
+ masm.sbbl_rr(src.encoding(), dest.encoding());
+ }
+ void sbbl(Operand src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.sbbl_mr(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.sbbl_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void mull(Register multiplier) { masm.mull_r(multiplier.encoding()); }
+
+ void shldl(const Imm32 imm, Register src, Register dest) {
+ masm.shldl_irr(imm.value, src.encoding(), dest.encoding());
+ }
+ void shrdl(const Imm32 imm, Register src, Register dest) {
+ masm.shrdl_irr(imm.value, src.encoding(), dest.encoding());
+ }
+
+ void vhaddpd(FloatRegister rhs, FloatRegister lhsDest) {
+ MOZ_ASSERT(HasSSE3());
+ MOZ_ASSERT(rhs.size() == 16);
+ MOZ_ASSERT(lhsDest.size() == 16);
+ masm.vhaddpd_rr(rhs.encoding(), lhsDest.encoding(), lhsDest.encoding());
+ }
+
+ void fild(const Operand& src) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.fild_m(src.disp(), src.base());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+
+ void jmp(ImmPtr target, RelocationKind reloc = RelocationKind::HARDCODED) {
+ JmpSrc src = masm.jmp();
+ addPendingJump(src, target, reloc);
+ }
+ void j(Condition cond, ImmPtr target,
+ RelocationKind reloc = RelocationKind::HARDCODED) {
+ JmpSrc src = masm.jCC(static_cast<X86Encoding::Condition>(cond));
+ addPendingJump(src, target, reloc);
+ }
+
+ void jmp(JitCode* target) {
+ jmp(ImmPtr(target->raw()), RelocationKind::JITCODE);
+ }
+ void j(Condition cond, JitCode* target) {
+ j(cond, ImmPtr(target->raw()), RelocationKind::JITCODE);
+ }
+ void call(JitCode* target) {
+ JmpSrc src = masm.call();
+ addPendingJump(src, ImmPtr(target->raw()), RelocationKind::JITCODE);
+ }
+ void call(ImmWord target) { call(ImmPtr((void*)target.value)); }
+ void call(ImmPtr target) {
+ JmpSrc src = masm.call();
+ addPendingJump(src, target, RelocationKind::HARDCODED);
+ }
+
+ // Emit a CALL or CMP (nop) instruction. ToggleCall can be used to patch
+ // this instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled) {
+ CodeOffset offset(size());
+ JmpSrc src = enabled ? masm.call() : masm.cmp_eax();
+ addPendingJump(src, ImmPtr(target->raw()), RelocationKind::JITCODE);
+ MOZ_ASSERT_IF(!oom(), size() - offset.offset() == ToggledCallSize(nullptr));
+ return offset;
+ }
+
+ static size_t ToggledCallSize(uint8_t* code) {
+ // Size of a call instruction.
+ return 5;
+ }
+
+ // Re-routes pending jumps to an external target, flushing the label in the
+ // process.
+ void retarget(Label* label, ImmPtr target, RelocationKind reloc) {
+ if (label->used()) {
+ bool more;
+ X86Encoding::JmpSrc jmp(label->offset());
+ do {
+ X86Encoding::JmpSrc next;
+ more = masm.nextJump(jmp, &next);
+ addPendingJump(jmp, target, reloc);
+ jmp = next;
+ } while (more);
+ }
+ label->reset();
+ }
+
+ // Move a 32-bit immediate into a register where the immediate can be
+ // patched.
+ CodeOffset movlWithPatch(Imm32 imm, Register dest) {
+ masm.movl_i32r(imm.value, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+
+ // Load from *(base + disp32) where disp32 can be patched.
+ CodeOffset movsblWithPatch(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movsbl_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movsbl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movzblWithPatch(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movzbl_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movzbl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movswlWithPatch(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movswl_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movswl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movzwlWithPatch(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movzwl_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movzwl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movlWithPatch(const Operand& src, Register dest) {
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movl_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movl_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovssWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovss_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovss_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ void vmovss(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovss_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovss_mr(src.address(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovss_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ CodeOffset vmovdWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovd_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovd_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovqWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovq_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovq_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovsdWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovsd_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovsd_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ void vmovsd(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovsd_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovsd_mr(src.address(), dest.encoding());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovsd_mr(src.disp(), src.base(), src.index(), src.scale(),
+ dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ CodeOffset vmovupsWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovups_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovups_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdquWithPatch(const Operand& src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (src.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovdqu_mr_disp32(src.disp(), src.base(), dest.encoding());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovdqu_mr(src.address(), dest.encoding());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+
+ // Store to *(base + disp32) where disp32 can be patched.
+ CodeOffset movbWithPatch(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movb_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movb_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movwWithPatch(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movw_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movw_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movlWithPatch(Register src, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.movl_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.movl_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movlWithPatchLow(Register regLow, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP: {
+ return movlWithPatch(regLow, LowWord(dest));
+ }
+ case Operand::MEM_ADDRESS32: {
+ Operand low(
+ PatchedAbsoluteAddress(uint32_t(dest.address()) + INT64LOW_OFFSET));
+ return movlWithPatch(regLow, low);
+ }
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ CodeOffset movlWithPatchHigh(Register regHigh, const Operand& dest) {
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP: {
+ return movlWithPatch(regHigh, HighWord(dest));
+ }
+ case Operand::MEM_ADDRESS32: {
+ Operand high(PatchedAbsoluteAddress(uint32_t(dest.address()) +
+ INT64HIGH_OFFSET));
+ return movlWithPatch(regHigh, high);
+ }
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ CodeOffset vmovdWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovd_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovd_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovqWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovq_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovq_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovssWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovss_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovss_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ void vmovss(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovss_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovss_rm(src.encoding(), dest.address());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovss_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ CodeOffset vmovsdWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovsd_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovsd_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ void vmovsd(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovsd_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovsd_rm(src.encoding(), dest.address());
+ break;
+ case Operand::MEM_SCALE:
+ masm.vmovsd_rm(src.encoding(), dest.disp(), dest.base(), dest.index(),
+ dest.scale());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ CodeOffset vmovupsWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovups_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovups_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdquWithPatch(FloatRegister src, const Operand& dest) {
+ MOZ_ASSERT(HasSSE2());
+ switch (dest.kind()) {
+ case Operand::MEM_REG_DISP:
+ masm.vmovdqu_rm_disp32(src.encoding(), dest.disp(), dest.base());
+ break;
+ case Operand::MEM_ADDRESS32:
+ masm.vmovdqu_rm(src.encoding(), dest.address());
+ break;
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ return CodeOffset(masm.currentOffset());
+ }
+
+ // Load from *(addr + index*scale) where addr can be patched.
+ CodeOffset movlWithPatch(PatchedAbsoluteAddress addr, Register index,
+ Scale scale, Register dest) {
+ masm.movl_mr(addr.addr, index.encoding(), scale, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+
+ // Load from *src where src can be patched.
+ CodeOffset movsblWithPatch(PatchedAbsoluteAddress src, Register dest) {
+ masm.movsbl_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movzblWithPatch(PatchedAbsoluteAddress src, Register dest) {
+ masm.movzbl_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movswlWithPatch(PatchedAbsoluteAddress src, Register dest) {
+ masm.movswl_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movzwlWithPatch(PatchedAbsoluteAddress src, Register dest) {
+ masm.movzwl_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movlWithPatch(PatchedAbsoluteAddress src, Register dest) {
+ masm.movl_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovssWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovss_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovd_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovqWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovq_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovsdWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovsd_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdqaWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovdqa_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdquWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovdqu_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovapsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovaps_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovupsWithPatch(PatchedAbsoluteAddress src, FloatRegister dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovups_mr(src.addr, dest.encoding());
+ return CodeOffset(masm.currentOffset());
+ }
+
+ // Store to *dest where dest can be patched.
+ CodeOffset movbWithPatch(Register src, PatchedAbsoluteAddress dest) {
+ masm.movb_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movwWithPatch(Register src, PatchedAbsoluteAddress dest) {
+ masm.movw_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset movlWithPatch(Register src, PatchedAbsoluteAddress dest) {
+ masm.movl_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovssWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovss_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovd_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovqWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovq_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovsdWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovsd_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdqaWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovdqa_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovapsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovaps_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovdquWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovdqu_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+ CodeOffset vmovupsWithPatch(FloatRegister src, PatchedAbsoluteAddress dest) {
+ MOZ_ASSERT(HasSSE2());
+ masm.vmovups_rm(src.encoding(), dest.addr);
+ return CodeOffset(masm.currentOffset());
+ }
+};
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool GetTempRegForIntArg(uint32_t usedIntArgs,
+ uint32_t usedFloatArgs, Register* out) {
+ if (usedIntArgs >= NumCallTempNonArgRegs) {
+ return false;
+ }
+ *out = CallTempNonArgRegs[usedIntArgs];
+ return true;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_Assembler_x86_h */
diff --git a/js/src/jit/x86/BaseAssembler-x86.h b/js/src/jit/x86/BaseAssembler-x86.h
new file mode 100644
index 0000000000..a5a5f67bf2
--- /dev/null
+++ b/js/src/jit/x86/BaseAssembler-x86.h
@@ -0,0 +1,190 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_BaseAssembler_x86_h
+#define jit_x86_BaseAssembler_x86_h
+
+#include "jit/x86-shared/BaseAssembler-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+namespace X86Encoding {
+
+class BaseAssemblerX86 : public BaseAssembler {
+ public:
+ // Arithmetic operations:
+
+ void adcl_ir(int32_t imm, RegisterID dst) {
+ spew("adcl $%d, %s", imm, GPReg32Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_ADC);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_ADC);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void adcl_im(int32_t imm, const void* addr) {
+ spew("adcl %d, %p", imm, addr);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_ADC);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_ADC);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void adcl_rr(RegisterID src, RegisterID dst) {
+ spew("adcl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_ADC_GvEv, src, dst);
+ }
+
+ void adcl_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("adcl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_ADC_GvEv, offset, base, dst);
+ }
+
+ void adcl_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ RegisterID dst) {
+ spew("adcl " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
+ GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_ADC_GvEv, offset, base, index, scale, dst);
+ }
+
+ void sbbl_ir(int32_t imm, RegisterID dst) {
+ spew("sbbl $%d, %s", imm, GPReg32Name(dst));
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, dst, GROUP1_OP_SBB);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, dst, GROUP1_OP_SBB);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void sbbl_rr(RegisterID src, RegisterID dst) {
+ spew("sbbl %s, %s", GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_SBB_GvEv, src, dst);
+ }
+
+ void sbbl_mr(int32_t offset, RegisterID base, RegisterID dst) {
+ spew("sbbl " MEM_ob ", %s", ADDR_ob(offset, base), GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_SBB_GvEv, offset, base, dst);
+ }
+
+ void sbbl_mr(int32_t offset, RegisterID base, RegisterID index, int scale,
+ RegisterID dst) {
+ spew("sbbl " MEM_obs ", %s", ADDR_obs(offset, base, index, scale),
+ GPReg32Name(dst));
+ m_formatter.oneByteOp(OP_SBB_GvEv, offset, base, index, scale, dst);
+ }
+
+ using BaseAssembler::andl_im;
+ void andl_im(int32_t imm, const void* addr) {
+ spew("andl $0x%x, %p", imm, addr);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_AND);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_AND);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ using BaseAssembler::orl_im;
+ void orl_im(int32_t imm, const void* addr) {
+ spew("orl $0x%x, %p", imm, addr);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_OR);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_OR);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ using BaseAssembler::subl_im;
+ void subl_im(int32_t imm, const void* addr) {
+ spew("subl $%d, %p", imm, addr);
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, addr, GROUP1_OP_SUB);
+ m_formatter.immediate8s(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, addr, GROUP1_OP_SUB);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+ void shldl_irr(int32_t imm, RegisterID src, RegisterID dst) {
+ MOZ_ASSERT(imm < 32);
+ spew("shldl $%d, %s, %s", imm, GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp8(OP2_SHLD, dst, src);
+ m_formatter.immediate8u(imm);
+ }
+
+ void shrdl_irr(int32_t imm, RegisterID src, RegisterID dst) {
+ MOZ_ASSERT(imm < 32);
+ spew("shrdl $%d, %s, %s", imm, GPReg32Name(src), GPReg32Name(dst));
+ m_formatter.twoByteOp8(OP2_SHRD, dst, src);
+ m_formatter.immediate8u(imm);
+ }
+
+ // SSE operations:
+
+ using BaseAssembler::vcvtsi2sd_mr;
+ void vcvtsi2sd_mr(const void* address, XMMRegisterID src0,
+ XMMRegisterID dst) {
+ twoByteOpSimd("vcvtsi2sd", VEX_SD, OP2_CVTSI2SD_VsdEd, address, src0, dst);
+ }
+
+ using BaseAssembler::vmovaps_mr;
+ void vmovaps_mr(const void* address, XMMRegisterID dst) {
+ twoByteOpSimd("vmovaps", VEX_PS, OP2_MOVAPS_VsdWsd, address, invalid_xmm,
+ dst);
+ }
+
+ using BaseAssembler::vmovdqa_mr;
+ void vmovdqa_mr(const void* address, XMMRegisterID dst) {
+ twoByteOpSimd("vmovdqa", VEX_PD, OP2_MOVDQ_VdqWdq, address, invalid_xmm,
+ dst);
+ }
+
+ void vhaddpd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vhaddpd", VEX_PD, OP2_HADDPD, src1, src0, dst);
+ }
+
+ void vsubpd_rr(XMMRegisterID src1, XMMRegisterID src0, XMMRegisterID dst) {
+ twoByteOpSimd("vsubpd", VEX_PD, OP2_SUBPS_VpsWps, src1, src0, dst);
+ }
+
+ void fild_m(int32_t offset, RegisterID base) {
+ m_formatter.oneByteOp(OP_FILD, offset, base, FILD_OP_64);
+ }
+
+ // Misc instructions:
+
+ void pusha() {
+ spew("pusha");
+ m_formatter.oneByteOp(OP_PUSHA);
+ }
+
+ void popa() {
+ spew("popa");
+ m_formatter.oneByteOp(OP_POPA);
+ }
+};
+
+typedef BaseAssemblerX86 BaseAssemblerSpecific;
+
+} // namespace X86Encoding
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_BaseAssembler_x86_h */
diff --git a/js/src/jit/x86/CodeGenerator-x86.cpp b/js/src/jit/x86/CodeGenerator-x86.cpp
new file mode 100644
index 0000000000..f06a08d3e8
--- /dev/null
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -0,0 +1,1504 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86/CodeGenerator-x86.h"
+
+#include "mozilla/Casting.h"
+#include "mozilla/DebugOnly.h"
+
+#include <iterator>
+
+#include "jsnum.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "js/Conversions.h"
+#include "vm/Shape.h"
+#include "wasm/WasmBuiltins.h"
+#include "wasm/WasmCodegenTypes.h"
+#include "wasm/WasmInstanceData.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using JS::GenericNaN;
+using mozilla::BitwiseCast;
+using mozilla::DebugOnly;
+using mozilla::FloatingPoint;
+
+CodeGeneratorX86::CodeGeneratorX86(MIRGenerator* gen, LIRGraph* graph,
+ MacroAssembler* masm)
+ : CodeGeneratorX86Shared(gen, graph, masm) {}
+
+ValueOperand CodeGeneratorX86::ToValue(LInstruction* ins, size_t pos) {
+ Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+ValueOperand CodeGeneratorX86::ToTempValue(LInstruction* ins, size_t pos) {
+ Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+void CodeGenerator::visitValue(LValue* value) {
+ const ValueOperand out = ToOutValue(value);
+ masm.moveValue(value->value(), out);
+}
+
+void CodeGenerator::visitBox(LBox* box) {
+ const LDefinition* type = box->getDef(TYPE_INDEX);
+
+ DebugOnly<const LAllocation*> a = box->getOperand(0);
+ MOZ_ASSERT(!a->isConstant());
+
+ // On x86, the input operand and the output payload have the same
+ // virtual register. All that needs to be written is the type tag for
+ // the type definition.
+ masm.mov(ImmWord(MIRTypeToTag(box->type())), ToRegister(type));
+}
+
+void CodeGenerator::visitBoxFloatingPoint(LBoxFloatingPoint* box) {
+ const AnyRegister in = ToAnyRegister(box->getOperand(0));
+ const ValueOperand out = ToOutValue(box);
+
+ masm.moveValue(TypedOrValueRegister(box->type(), in), out);
+
+ if (JitOptions.spectreValueMasking) {
+ Register scratch = ToRegister(box->spectreTemp());
+ masm.move32(Imm32(JSVAL_TAG_CLEAR), scratch);
+ masm.cmp32Move32(Assembler::Below, scratch, out.typeReg(), scratch,
+ out.typeReg());
+ }
+}
+
+void CodeGenerator::visitUnbox(LUnbox* unbox) {
+ // Note that for unbox, the type and payload indexes are switched on the
+ // inputs.
+ Operand type = ToOperand(unbox->type());
+ Operand payload = ToOperand(unbox->payload());
+ Register output = ToRegister(unbox->output());
+ MUnbox* mir = unbox->mir();
+
+ JSValueTag tag = MIRTypeToTag(mir->type());
+ if (mir->fallible()) {
+ masm.cmp32(type, Imm32(tag));
+ bailoutIf(Assembler::NotEqual, unbox->snapshot());
+ } else {
+#ifdef DEBUG
+ Label ok;
+ masm.branch32(Assembler::Equal, type, Imm32(tag), &ok);
+ masm.assumeUnreachable("Infallible unbox type mismatch");
+ masm.bind(&ok);
+#endif
+ }
+
+ // Note: If spectreValueMasking is disabled, then this instruction will
+ // default to a no-op as long as the lowering allocate the same register for
+ // the output and the payload.
+ masm.unboxNonDouble(type, payload, output, ValueTypeFromMIRType(mir->type()));
+}
+
+void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register temp = ToRegister(lir->temp());
+ Register64 temp64 = ToRegister64(lir->temp64());
+ Register out = ToRegister(lir->output());
+
+ MOZ_ASSERT(out == ecx);
+ MOZ_ASSERT(temp == ebx);
+ MOZ_ASSERT(temp64 == Register64(edx, eax));
+
+ const MLoadUnboxedScalar* mir = lir->mir();
+
+ Scalar::Type storageType = mir->storageType();
+
+ if (lir->index()->isConstant()) {
+ Address source =
+ ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
+ masm.atomicLoad64(Synchronization::Load(), source, Register64(ecx, ebx),
+ Register64(edx, eax));
+ } else {
+ BaseIndex source(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(storageType), mir->offsetAdjustment());
+ masm.atomicLoad64(Synchronization::Load(), source, Register64(ecx, ebx),
+ Register64(edx, eax));
+ }
+
+ emitCreateBigInt(lir, storageType, temp64, out, temp);
+}
+
+void CodeGenerator::visitAtomicStore64(LAtomicStore64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = Register64(value, ToRegister(lir->tempLow()));
+
+ MOZ_ASSERT(temp1 == Register64(ecx, ebx));
+ MOZ_ASSERT(temp2 == Register64(edx, eax));
+
+ Scalar::Type writeType = lir->mir()->writeType();
+
+ masm.loadBigInt64(value, temp1);
+
+ masm.push(value);
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), writeType);
+ masm.atomicStore64(Synchronization::Store(), dest, temp1, temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(writeType));
+ masm.atomicStore64(Synchronization::Store(), dest, temp1, temp2);
+ }
+ masm.pop(value);
+}
+
+void CodeGenerator::visitCompareExchangeTypedArrayElement64(
+ LCompareExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register oldval = ToRegister(lir->oldval());
+ DebugOnly<Register> newval = ToRegister(lir->newval());
+ DebugOnly<Register> temp = ToRegister(lir->tempLow());
+ Register out = ToRegister(lir->output());
+
+ MOZ_ASSERT(elements == esi);
+ MOZ_ASSERT(oldval == eax);
+ MOZ_ASSERT(newval.inspect() == edx);
+ MOZ_ASSERT(temp.inspect() == ebx);
+ MOZ_ASSERT(out == ecx);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ DebugOnly<uint32_t> framePushed = masm.framePushed();
+
+ // Save eax and edx before they're clobbered below.
+ masm.push(eax);
+ masm.push(edx);
+
+ auto restoreSavedRegisters = [&]() {
+ masm.pop(edx);
+ masm.pop(eax);
+ };
+
+ Register64 expected = Register64(edx, eax);
+ Register64 replacement = Register64(ecx, ebx);
+
+ // Load |oldval| and |newval| into |expected| resp. |replacement|.
+ {
+ // Use `esi` as a temp register.
+ Register bigInt = esi;
+ masm.push(bigInt);
+
+ masm.mov(oldval, bigInt);
+ masm.loadBigInt64(bigInt, expected);
+
+ // |newval| is stored in `edx`, which is already pushed onto the stack.
+ masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), bigInt);
+ masm.loadBigInt64(bigInt, replacement);
+
+ masm.pop(bigInt);
+ }
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.compareExchange64(Synchronization::Full(), dest, expected, replacement,
+ expected);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.compareExchange64(Synchronization::Full(), dest, expected, replacement,
+ expected);
+ }
+
+ // Move the result from `edx:eax` to `ecx:ebx`.
+ masm.move64(expected, replacement);
+
+ // OutOfLineCallVM tracks the currently pushed stack entries as reported by
+ // |masm.framePushed()|. We mustn't have any additional entries on the stack
+ // which weren't previously recorded by the safepoint, otherwise the GC
+ // complains when tracing the Ion frames, because the stack frames don't
+ // have their expected layout.
+ MOZ_ASSERT(framePushed == masm.framePushed());
+
+ OutOfLineCode* ool = createBigIntOutOfLine(lir, arrayType, replacement, out);
+
+ // Use `edx:eax`, which are both already on the stack, as temp registers.
+ Register bigInt = eax;
+ Register temp2 = edx;
+
+ Label fail;
+ masm.newGCBigInt(bigInt, temp2, initialBigIntHeap(), &fail);
+ masm.initializeBigInt64(arrayType, bigInt, replacement);
+ masm.mov(bigInt, out);
+ restoreSavedRegisters();
+ masm.jump(ool->rejoin());
+
+ // Couldn't create the BigInt. Restore `edx:eax` and call into the VM.
+ masm.bind(&fail);
+ restoreSavedRegisters();
+ masm.jump(ool->entry());
+
+ // At this point `edx:eax` must have been restored to their original values.
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitAtomicExchangeTypedArrayElement64(
+ LAtomicExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register out = ToRegister(lir->output());
+ Register64 temp2 = Register64(value, out);
+
+ MOZ_ASSERT(value == edx);
+ MOZ_ASSERT(temp1 == Register64(ecx, ebx));
+ MOZ_ASSERT(temp2 == Register64(edx, eax));
+ MOZ_ASSERT(out == eax);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ DebugOnly<uint32_t> framePushed = masm.framePushed();
+
+ // Save edx before it's clobbered below.
+ masm.push(edx);
+
+ auto restoreSavedRegisters = [&]() { masm.pop(edx); };
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
+ }
+
+ // Move the result from `edx:eax` to `ecx:ebx`.
+ masm.move64(temp2, temp1);
+
+ // OutOfLineCallVM tracks the currently pushed stack entries as reported by
+ // |masm.framePushed()|. We mustn't have any additional entries on the stack
+ // which weren't previously recorded by the safepoint, otherwise the GC
+ // complains when tracing the Ion frames, because the stack frames don't
+ // have their expected layout.
+ MOZ_ASSERT(framePushed == masm.framePushed());
+
+ OutOfLineCode* ool = createBigIntOutOfLine(lir, arrayType, temp1, out);
+
+ // Use `edx`, which is already on the stack, as a temp register.
+ Register temp = edx;
+
+ Label fail;
+ masm.newGCBigInt(out, temp, initialBigIntHeap(), &fail);
+ masm.initializeBigInt64(arrayType, out, temp1);
+ restoreSavedRegisters();
+ masm.jump(ool->rejoin());
+
+ // Couldn't create the BigInt. Restore `edx` and call into the VM.
+ masm.bind(&fail);
+ restoreSavedRegisters();
+ masm.jump(ool->entry());
+
+ // At this point `edx` must have been restored to its original value.
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinop64(
+ LAtomicTypedArrayElementBinop64* lir) {
+ MOZ_ASSERT(!lir->mir()->isForEffect());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register out = ToRegister(lir->output());
+ Register64 temp2 = Register64(value, out);
+
+ MOZ_ASSERT(value == edx);
+ MOZ_ASSERT(temp1 == Register64(ecx, ebx));
+ MOZ_ASSERT(temp2 == Register64(edx, eax));
+ MOZ_ASSERT(out == eax);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ DebugOnly<uint32_t> framePushed = masm.framePushed();
+
+ // Save edx before it's clobbered below.
+ masm.push(edx);
+
+ auto restoreSavedRegisters = [&]() { masm.pop(edx); };
+
+ masm.loadBigInt64(value, temp1);
+
+ masm.Push(temp1);
+
+ Address addr(masm.getStackPointer(), 0);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, addr, dest, temp1,
+ temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, addr, dest, temp1,
+ temp2);
+ }
+
+ masm.freeStack(sizeof(uint64_t));
+
+ // Move the result from `edx:eax` to `ecx:ebx`.
+ masm.move64(temp2, temp1);
+
+ // OutOfLineCallVM tracks the currently pushed stack entries as reported by
+ // |masm.framePushed()|. We mustn't have any additional entries on the stack
+ // which weren't previously recorded by the safepoint, otherwise the GC
+ // complains when tracing the Ion frames, because the stack frames don't
+ // have their expected layout.
+ MOZ_ASSERT(framePushed == masm.framePushed());
+
+ OutOfLineCode* ool = createBigIntOutOfLine(lir, arrayType, temp1, out);
+
+ // Use `edx`, which is already on the stack, as a temp register.
+ Register temp = edx;
+
+ Label fail;
+ masm.newGCBigInt(out, temp, initialBigIntHeap(), &fail);
+ masm.initializeBigInt64(arrayType, out, temp1);
+ restoreSavedRegisters();
+ masm.jump(ool->rejoin());
+
+ // Couldn't create the BigInt. Restore `edx` and call into the VM.
+ masm.bind(&fail);
+ restoreSavedRegisters();
+ masm.jump(ool->entry());
+
+ // At this point `edx` must have been restored to its original value.
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
+ LAtomicTypedArrayElementBinopForEffect64* lir) {
+ MOZ_ASSERT(lir->mir()->isForEffect());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register tempLow = ToRegister(lir->tempLow());
+ Register64 temp2 = Register64(value, tempLow);
+
+ MOZ_ASSERT(value == edx);
+ MOZ_ASSERT(temp1 == Register64(ecx, ebx));
+ MOZ_ASSERT(temp2 == Register64(edx, eax));
+ MOZ_ASSERT(tempLow == eax);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ // Save edx before it's clobbered below.
+ masm.push(edx);
+
+ masm.loadBigInt64(value, temp1);
+
+ masm.Push(temp1);
+
+ Address addr(masm.getStackPointer(), 0);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, addr, dest, temp1,
+ temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, addr, dest, temp1,
+ temp2);
+ }
+
+ masm.freeStack(sizeof(uint64_t));
+
+ masm.pop(edx);
+}
+
+void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
+ Register input = ToRegister(lir->input());
+ Register temp = ToRegister(lir->temp());
+
+ if (input != temp) {
+ masm.mov(input, temp);
+ }
+
+ // Beware: convertUInt32ToDouble clobbers input.
+ masm.convertUInt32ToDouble(temp, ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
+ Register input = ToRegister(lir->input());
+ Register temp = ToRegister(lir->temp());
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ if (input != temp) {
+ masm.mov(input, temp);
+ }
+
+ // Beware: convertUInt32ToFloat32 clobbers input.
+ masm.convertUInt32ToFloat32(temp, output);
+}
+
+template <typename T>
+void CodeGeneratorX86::emitWasmLoad(T* ins) {
+ const MWasmLoad* mir = ins->mir();
+
+ mir->access().assertOffsetInGuardPages();
+ uint32_t offset = mir->access().offset();
+
+ const LAllocation* ptr = ins->ptr();
+ const LAllocation* memoryBase = ins->memoryBase();
+
+ // Lowering has set things up so that we can use a BaseIndex form if the
+ // pointer is constant and the offset is zero, or if the pointer is zero.
+
+ Operand srcAddr =
+ ptr->isBogus()
+ ? Operand(ToRegister(memoryBase),
+ offset ? offset : mir->base()->toConstant()->toInt32())
+ : Operand(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
+
+ if (mir->type() == MIRType::Int64) {
+ MOZ_ASSERT_IF(mir->access().isAtomic(),
+ mir->access().type() != Scalar::Int64);
+ masm.wasmLoadI64(mir->access(), srcAddr, ToOutRegister64(ins));
+ } else {
+ masm.wasmLoad(mir->access(), srcAddr, ToAnyRegister(ins->output()));
+ }
+}
+
+void CodeGenerator::visitWasmLoad(LWasmLoad* ins) { emitWasmLoad(ins); }
+
+void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* ins) { emitWasmLoad(ins); }
+
+template <typename T>
+void CodeGeneratorX86::emitWasmStore(T* ins) {
+ const MWasmStore* mir = ins->mir();
+
+ mir->access().assertOffsetInGuardPages();
+ uint32_t offset = mir->access().offset();
+
+ const LAllocation* ptr = ins->ptr();
+ const LAllocation* memoryBase = ins->memoryBase();
+
+ // Lowering has set things up so that we can use a BaseIndex form if the
+ // pointer is constant and the offset is zero, or if the pointer is zero.
+
+ Operand dstAddr =
+ ptr->isBogus()
+ ? Operand(ToRegister(memoryBase),
+ offset ? offset : mir->base()->toConstant()->toInt32())
+ : Operand(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
+
+ if (mir->access().type() == Scalar::Int64) {
+ Register64 value =
+ ToRegister64(ins->getInt64Operand(LWasmStoreI64::ValueIndex));
+ masm.wasmStoreI64(mir->access(), value, dstAddr);
+ } else {
+ AnyRegister value = ToAnyRegister(ins->getOperand(LWasmStore::ValueIndex));
+ masm.wasmStore(mir->access(), value, dstAddr);
+ }
+}
+
+void CodeGenerator::visitWasmStore(LWasmStore* ins) { emitWasmStore(ins); }
+
+void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* ins) {
+ emitWasmStore(ins);
+}
+
+void CodeGenerator::visitWasmCompareExchangeHeap(
+ LWasmCompareExchangeHeap* ins) {
+ MWasmCompareExchangeHeap* mir = ins->mir();
+
+ Register ptrReg = ToRegister(ins->ptr());
+ Register oldval = ToRegister(ins->oldValue());
+ Register newval = ToRegister(ins->newValue());
+ Register addrTemp = ToRegister(ins->addrTemp());
+ Register memoryBase = ToRegister(ins->memoryBase());
+ Register output = ToRegister(ins->output());
+
+ masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()),
+ addrTemp);
+
+ Address memAddr(addrTemp, 0);
+ masm.wasmCompareExchange(mir->access(), memAddr, oldval, newval, output);
+}
+
+void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
+ MWasmAtomicExchangeHeap* mir = ins->mir();
+
+ Register ptrReg = ToRegister(ins->ptr());
+ Register value = ToRegister(ins->value());
+ Register addrTemp = ToRegister(ins->addrTemp());
+ Register memoryBase = ToRegister(ins->memoryBase());
+ Register output = ToRegister(ins->output());
+
+ masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()),
+ addrTemp);
+
+ Address memAddr(addrTemp, 0);
+ masm.wasmAtomicExchange(mir->access(), memAddr, value, output);
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
+ MWasmAtomicBinopHeap* mir = ins->mir();
+
+ Register ptrReg = ToRegister(ins->ptr());
+ Register temp =
+ ins->temp()->isBogusTemp() ? InvalidReg : ToRegister(ins->temp());
+ Register addrTemp = ToRegister(ins->addrTemp());
+ Register out = ToRegister(ins->output());
+ const LAllocation* value = ins->value();
+ AtomicOp op = mir->operation();
+ Register memoryBase = ToRegister(ins->memoryBase());
+
+ masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()),
+ addrTemp);
+
+ Address memAddr(addrTemp, 0);
+ if (value->isConstant()) {
+ masm.wasmAtomicFetchOp(mir->access(), op, Imm32(ToInt32(value)), memAddr,
+ temp, out);
+ } else {
+ masm.wasmAtomicFetchOp(mir->access(), op, ToRegister(value), memAddr, temp,
+ out);
+ }
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
+ LWasmAtomicBinopHeapForEffect* ins) {
+ MWasmAtomicBinopHeap* mir = ins->mir();
+ MOZ_ASSERT(!mir->hasUses());
+
+ Register ptrReg = ToRegister(ins->ptr());
+ Register addrTemp = ToRegister(ins->addrTemp());
+ const LAllocation* value = ins->value();
+ AtomicOp op = mir->operation();
+ Register memoryBase = ToRegister(ins->memoryBase());
+
+ masm.leal(Operand(memoryBase, ptrReg, TimesOne, mir->access().offset()),
+ addrTemp);
+
+ Address memAddr(addrTemp, 0);
+ if (value->isConstant()) {
+ masm.wasmAtomicEffectOp(mir->access(), op, Imm32(ToInt32(value)), memAddr,
+ InvalidReg);
+ } else {
+ masm.wasmAtomicEffectOp(mir->access(), op, ToRegister(value), memAddr,
+ InvalidReg);
+ }
+}
+
+void CodeGenerator::visitWasmAtomicLoadI64(LWasmAtomicLoadI64* ins) {
+ ins->mir()->access().assertOffsetInGuardPages();
+ uint32_t offset = ins->mir()->access().offset();
+
+ const LAllocation* memoryBase = ins->memoryBase();
+ const LAllocation* ptr = ins->ptr();
+ BaseIndex srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
+
+ MOZ_ASSERT(ToRegister(ins->t1()) == ecx);
+ MOZ_ASSERT(ToRegister(ins->t2()) == ebx);
+ MOZ_ASSERT(ToOutRegister64(ins).high == edx);
+ MOZ_ASSERT(ToOutRegister64(ins).low == eax);
+
+ masm.wasmAtomicLoad64(ins->mir()->access(), srcAddr, Register64(ecx, ebx),
+ Register64(edx, eax));
+}
+
+void CodeGenerator::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* ins) {
+ ins->mir()->access().assertOffsetInGuardPages();
+ uint32_t offset = ins->mir()->access().offset();
+
+ const LAllocation* memoryBase = ins->memoryBase();
+ const LAllocation* ptr = ins->ptr();
+ Operand srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
+
+ MOZ_ASSERT(ToRegister64(ins->expected()).low == eax);
+ MOZ_ASSERT(ToRegister64(ins->expected()).high == edx);
+ MOZ_ASSERT(ToRegister64(ins->replacement()).low == ebx);
+ MOZ_ASSERT(ToRegister64(ins->replacement()).high == ecx);
+ MOZ_ASSERT(ToOutRegister64(ins).low == eax);
+ MOZ_ASSERT(ToOutRegister64(ins).high == edx);
+
+ masm.append(ins->mir()->access(), wasm::TrapMachineInsn::Atomic,
+ FaultingCodeOffset(masm.currentOffset()));
+ masm.lock_cmpxchg8b(edx, eax, ecx, ebx, srcAddr);
+}
+
+template <typename T>
+void CodeGeneratorX86::emitWasmStoreOrExchangeAtomicI64(
+ T* ins, const wasm::MemoryAccessDesc& access) {
+ access.assertOffsetInGuardPages();
+ const LAllocation* memoryBase = ins->memoryBase();
+ const LAllocation* ptr = ins->ptr();
+ Operand srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne,
+ access.offset());
+
+ DebugOnly<const LInt64Allocation> value = ins->value();
+ MOZ_ASSERT(ToRegister64(value).low == ebx);
+ MOZ_ASSERT(ToRegister64(value).high == ecx);
+
+ // eax and edx will be overwritten every time through the loop but
+ // memoryBase and ptr must remain live for a possible second iteration.
+
+ MOZ_ASSERT(ToRegister(memoryBase) != edx && ToRegister(memoryBase) != eax);
+ MOZ_ASSERT(ToRegister(ptr) != edx && ToRegister(ptr) != eax);
+
+ Label again;
+ masm.bind(&again);
+ masm.append(access, wasm::TrapMachineInsn::Atomic,
+ FaultingCodeOffset(masm.currentOffset()));
+ masm.lock_cmpxchg8b(edx, eax, ecx, ebx, srcAddr);
+ masm.j(Assembler::Condition::NonZero, &again);
+}
+
+void CodeGenerator::visitWasmAtomicStoreI64(LWasmAtomicStoreI64* ins) {
+ MOZ_ASSERT(ToRegister(ins->t1()) == edx);
+ MOZ_ASSERT(ToRegister(ins->t2()) == eax);
+
+ emitWasmStoreOrExchangeAtomicI64(ins, ins->mir()->access());
+}
+
+void CodeGenerator::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* ins) {
+ MOZ_ASSERT(ToOutRegister64(ins).high == edx);
+ MOZ_ASSERT(ToOutRegister64(ins).low == eax);
+
+ emitWasmStoreOrExchangeAtomicI64(ins, ins->access());
+}
+
+void CodeGenerator::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* ins) {
+ ins->access().assertOffsetInGuardPages();
+ uint32_t offset = ins->access().offset();
+
+ const LAllocation* memoryBase = ins->memoryBase();
+ const LAllocation* ptr = ins->ptr();
+
+ BaseIndex srcAddr(ToRegister(memoryBase), ToRegister(ptr), TimesOne, offset);
+
+ MOZ_ASSERT(ToRegister(memoryBase) == esi || ToRegister(memoryBase) == edi);
+ MOZ_ASSERT(ToRegister(ptr) == esi || ToRegister(ptr) == edi);
+
+ Register64 value = ToRegister64(ins->value());
+
+ MOZ_ASSERT(value.low == ebx);
+ MOZ_ASSERT(value.high == ecx);
+
+ Register64 output = ToOutRegister64(ins);
+
+ MOZ_ASSERT(output.low == eax);
+ MOZ_ASSERT(output.high == edx);
+
+ masm.Push(ecx);
+ masm.Push(ebx);
+
+ Address valueAddr(esp, 0);
+
+ // Here the `value` register acts as a temp, we'll restore it below.
+ masm.wasmAtomicFetchOp64(ins->access(), ins->operation(), valueAddr, srcAddr,
+ value, output);
+
+ masm.Pop(ebx);
+ masm.Pop(ecx);
+}
+
+namespace js {
+namespace jit {
+
+class OutOfLineTruncate : public OutOfLineCodeBase<CodeGeneratorX86> {
+ LInstruction* ins_;
+
+ public:
+ explicit OutOfLineTruncate(LInstruction* ins) : ins_(ins) {
+ MOZ_ASSERT(ins_->isTruncateDToInt32() ||
+ ins_->isWasmBuiltinTruncateDToInt32());
+ }
+
+ void accept(CodeGeneratorX86* codegen) override {
+ codegen->visitOutOfLineTruncate(this);
+ }
+
+ LAllocation* input() { return ins_->getOperand(0); }
+ LDefinition* output() { return ins_->getDef(0); }
+ LDefinition* tempFloat() { return ins_->getTemp(0); }
+
+ wasm::BytecodeOffset bytecodeOffset() const {
+ if (ins_->isTruncateDToInt32()) {
+ return ins_->toTruncateDToInt32()->mir()->bytecodeOffset();
+ }
+
+ return ins_->toWasmBuiltinTruncateDToInt32()->mir()->bytecodeOffset();
+ }
+};
+
+class OutOfLineTruncateFloat32 : public OutOfLineCodeBase<CodeGeneratorX86> {
+ LInstruction* ins_;
+
+ public:
+ explicit OutOfLineTruncateFloat32(LInstruction* ins) : ins_(ins) {
+ MOZ_ASSERT(ins_->isTruncateFToInt32() ||
+ ins_->isWasmBuiltinTruncateFToInt32());
+ }
+
+ void accept(CodeGeneratorX86* codegen) override {
+ codegen->visitOutOfLineTruncateFloat32(this);
+ }
+
+ LAllocation* input() { return ins_->getOperand(0); }
+ LDefinition* output() { return ins_->getDef(0); }
+ LDefinition* tempFloat() { return ins_->getTemp(0); }
+
+ wasm::BytecodeOffset bytecodeOffset() const {
+ if (ins_->isTruncateFToInt32()) {
+ return ins_->toTruncateDToInt32()->mir()->bytecodeOffset();
+ }
+
+ return ins_->toWasmBuiltinTruncateFToInt32()->mir()->bytecodeOffset();
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ OutOfLineTruncate* ool = new (alloc()) OutOfLineTruncate(ins);
+ addOutOfLineCode(ool, ins->mir());
+
+ masm.branchTruncateDoubleMaybeModUint32(input, output, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
+ LWasmBuiltinTruncateDToInt32* lir) {
+ FloatRegister input = ToFloatRegister(lir->getOperand(0));
+ Register output = ToRegister(lir->getDef(0));
+
+ OutOfLineTruncate* ool = new (alloc()) OutOfLineTruncate(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ masm.branchTruncateDoubleMaybeModUint32(input, output, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ OutOfLineTruncateFloat32* ool = new (alloc()) OutOfLineTruncateFloat32(ins);
+ addOutOfLineCode(ool, ins->mir());
+
+ masm.branchTruncateFloat32MaybeModUint32(input, output, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
+ LWasmBuiltinTruncateFToInt32* lir) {
+ FloatRegister input = ToFloatRegister(lir->getOperand(0));
+ Register output = ToRegister(lir->getDef(0));
+
+ OutOfLineTruncateFloat32* ool = new (alloc()) OutOfLineTruncateFloat32(lir);
+ addOutOfLineCode(ool, lir->mir());
+
+ masm.branchTruncateFloat32MaybeModUint32(input, output, ool->entry());
+ masm.bind(ool->rejoin());
+}
+
+void CodeGeneratorX86::visitOutOfLineTruncate(OutOfLineTruncate* ool) {
+ FloatRegister input = ToFloatRegister(ool->input());
+ Register output = ToRegister(ool->output());
+
+ Label fail;
+
+ if (Assembler::HasSSE3()) {
+ Label failPopDouble;
+ // Push double.
+ masm.subl(Imm32(sizeof(double)), esp);
+ masm.storeDouble(input, Operand(esp, 0));
+
+ // Check exponent to avoid fp exceptions.
+ masm.branchDoubleNotInInt64Range(Address(esp, 0), output, &failPopDouble);
+
+ // Load double, perform 64-bit truncation.
+ masm.truncateDoubleToInt64(Address(esp, 0), Address(esp, 0), output);
+
+ // Load low word, pop double and jump back.
+ masm.load32(Address(esp, 0), output);
+ masm.addl(Imm32(sizeof(double)), esp);
+ masm.jump(ool->rejoin());
+
+ masm.bind(&failPopDouble);
+ masm.addl(Imm32(sizeof(double)), esp);
+ masm.jump(&fail);
+ } else {
+ FloatRegister temp = ToFloatRegister(ool->tempFloat());
+
+ // Try to convert doubles representing integers within 2^32 of a signed
+ // integer, by adding/subtracting 2^32 and then trying to convert to int32.
+ // This has to be an exact conversion, as otherwise the truncation works
+ // incorrectly on the modified value.
+ {
+ ScratchDoubleScope fpscratch(masm);
+ masm.zeroDouble(fpscratch);
+ masm.vucomisd(fpscratch, input);
+ masm.j(Assembler::Parity, &fail);
+ }
+
+ {
+ Label positive;
+ masm.j(Assembler::Above, &positive);
+
+ masm.loadConstantDouble(4294967296.0, temp);
+ Label skip;
+ masm.jmp(&skip);
+
+ masm.bind(&positive);
+ masm.loadConstantDouble(-4294967296.0, temp);
+ masm.bind(&skip);
+ }
+
+ masm.addDouble(input, temp);
+ masm.vcvttsd2si(temp, output);
+ ScratchDoubleScope fpscratch(masm);
+ masm.vcvtsi2sd(output, fpscratch, fpscratch);
+
+ masm.vucomisd(fpscratch, temp);
+ masm.j(Assembler::Parity, &fail);
+ masm.j(Assembler::Equal, ool->rejoin());
+ }
+
+ masm.bind(&fail);
+ {
+ if (gen->compilingWasm()) {
+ masm.Push(InstanceReg);
+ }
+ int32_t framePushedAfterInstance = masm.framePushed();
+
+ saveVolatile(output);
+
+ if (gen->compilingWasm()) {
+ masm.setupWasmABICall();
+ masm.passABIArg(input, ABIType::Float64);
+
+ int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
+ masm.callWithABI(ool->bytecodeOffset(), wasm::SymbolicAddress::ToInt32,
+ mozilla::Some(instanceOffset));
+ } else {
+ using Fn = int32_t (*)(double);
+ masm.setupUnalignedABICall(output);
+ masm.passABIArg(input, ABIType::Float64);
+ masm.callWithABI<Fn, JS::ToInt32>(ABIType::General,
+ CheckUnsafeCallWithABI::DontCheckOther);
+ }
+ masm.storeCallInt32Result(output);
+
+ restoreVolatile(output);
+
+ if (gen->compilingWasm()) {
+ masm.Pop(InstanceReg);
+ }
+ }
+
+ masm.jump(ool->rejoin());
+}
+
+void CodeGeneratorX86::visitOutOfLineTruncateFloat32(
+ OutOfLineTruncateFloat32* ool) {
+ FloatRegister input = ToFloatRegister(ool->input());
+ Register output = ToRegister(ool->output());
+
+ Label fail;
+
+ if (Assembler::HasSSE3()) {
+ Label failPopFloat;
+
+ // Push float32, but subtracts 64 bits so that the value popped by fisttp
+ // fits
+ masm.subl(Imm32(sizeof(uint64_t)), esp);
+ masm.storeFloat32(input, Operand(esp, 0));
+
+ // Check exponent to avoid fp exceptions.
+ masm.branchFloat32NotInInt64Range(Address(esp, 0), output, &failPopFloat);
+
+ // Load float, perform 32-bit truncation.
+ masm.truncateFloat32ToInt64(Address(esp, 0), Address(esp, 0), output);
+
+ // Load low word, pop 64bits and jump back.
+ masm.load32(Address(esp, 0), output);
+ masm.addl(Imm32(sizeof(uint64_t)), esp);
+ masm.jump(ool->rejoin());
+
+ masm.bind(&failPopFloat);
+ masm.addl(Imm32(sizeof(uint64_t)), esp);
+ masm.jump(&fail);
+ } else {
+ FloatRegister temp = ToFloatRegister(ool->tempFloat());
+
+ // Try to convert float32 representing integers within 2^32 of a signed
+ // integer, by adding/subtracting 2^32 and then trying to convert to int32.
+ // This has to be an exact conversion, as otherwise the truncation works
+ // incorrectly on the modified value.
+ {
+ ScratchFloat32Scope fpscratch(masm);
+ masm.zeroFloat32(fpscratch);
+ masm.vucomiss(fpscratch, input);
+ masm.j(Assembler::Parity, &fail);
+ }
+
+ {
+ Label positive;
+ masm.j(Assembler::Above, &positive);
+
+ masm.loadConstantFloat32(4294967296.f, temp);
+ Label skip;
+ masm.jmp(&skip);
+
+ masm.bind(&positive);
+ masm.loadConstantFloat32(-4294967296.f, temp);
+ masm.bind(&skip);
+ }
+
+ masm.addFloat32(input, temp);
+ masm.vcvttss2si(temp, output);
+ ScratchFloat32Scope fpscratch(masm);
+ masm.vcvtsi2ss(output, fpscratch, fpscratch);
+
+ masm.vucomiss(fpscratch, temp);
+ masm.j(Assembler::Parity, &fail);
+ masm.j(Assembler::Equal, ool->rejoin());
+ }
+
+ masm.bind(&fail);
+ {
+ if (gen->compilingWasm()) {
+ masm.Push(InstanceReg);
+ }
+ int32_t framePushedAfterInstance = masm.framePushed();
+
+ saveVolatile(output);
+
+ masm.Push(input);
+
+ if (gen->compilingWasm()) {
+ masm.setupWasmABICall();
+ } else {
+ masm.setupUnalignedABICall(output);
+ }
+
+ masm.vcvtss2sd(input, input, input);
+ masm.passABIArg(input.asDouble(), ABIType::Float64);
+
+ if (gen->compilingWasm()) {
+ int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
+ masm.callWithABI(ool->bytecodeOffset(), wasm::SymbolicAddress::ToInt32,
+ mozilla::Some(instanceOffset));
+ } else {
+ using Fn = int32_t (*)(double);
+ masm.callWithABI<Fn, JS::ToInt32>(ABIType::General,
+ CheckUnsafeCallWithABI::DontCheckOther);
+ }
+
+ masm.storeCallInt32Result(output);
+ masm.Pop(input);
+
+ restoreVolatile(output);
+
+ if (gen->compilingWasm()) {
+ masm.Pop(InstanceReg);
+ }
+ }
+
+ masm.jump(ool->rejoin());
+}
+
+void CodeGenerator::visitCompareI64(LCompareI64* lir) {
+ MCompare* mir = lir->mir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register64 lhsRegs = ToRegister64(lhs);
+ Register output = ToRegister(lir->output());
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+ Label done;
+
+ masm.move32(Imm32(1), output);
+
+ if (IsConstant(rhs)) {
+ Imm64 imm = Imm64(ToInt64(rhs));
+ masm.branch64(condition, lhsRegs, imm, &done);
+ } else {
+ Register64 rhsRegs = ToRegister64(rhs);
+ masm.branch64(condition, lhsRegs, rhsRegs, &done);
+ }
+
+ masm.xorl(output, output);
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
+ MCompare* mir = lir->cmpMir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register64 lhsRegs = ToRegister64(lhs);
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+
+ Label* trueLabel = getJumpLabelForBranch(lir->ifTrue());
+ Label* falseLabel = getJumpLabelForBranch(lir->ifFalse());
+
+ if (isNextBlock(lir->ifFalse()->lir())) {
+ falseLabel = nullptr;
+ } else if (isNextBlock(lir->ifTrue()->lir())) {
+ condition = Assembler::InvertCondition(condition);
+ trueLabel = falseLabel;
+ falseLabel = nullptr;
+ }
+
+ if (IsConstant(rhs)) {
+ Imm64 imm = Imm64(ToInt64(rhs));
+ masm.branch64(condition, lhsRegs, imm, trueLabel, falseLabel);
+ } else {
+ Register64 rhsRegs = ToRegister64(rhs);
+ masm.branch64(condition, lhsRegs, rhsRegs, trueLabel, falseLabel);
+ }
+}
+
+void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
+ MOZ_ASSERT(gen->compilingWasm());
+ MOZ_ASSERT(ToRegister(lir->getOperand(LDivOrModI64::Instance)) ==
+ InstanceReg);
+
+ masm.Push(InstanceReg);
+ int32_t framePushedAfterInstance = masm.framePushed();
+
+ Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
+ Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+ Register64 output = ToOutRegister64(lir);
+
+ MOZ_ASSERT(output == ReturnReg64);
+
+ Label done;
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ // We can use InstanceReg as temp register because we preserved it
+ // before.
+ masm.branchTest64(Assembler::NonZero, rhs, rhs, InstanceReg, &nonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ MDefinition* mir = lir->mir();
+
+ // Handle an integer overflow exception from INT64_MIN / -1.
+ if (lir->canBeNegativeOverflow()) {
+ Label notOverflow;
+ masm.branch64(Assembler::NotEqual, lhs, Imm64(INT64_MIN), &notOverflow);
+ masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), &notOverflow);
+ if (mir->isWasmBuiltinModI64()) {
+ masm.xor64(output, output);
+ } else {
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
+ }
+ masm.jump(&done);
+ masm.bind(&notOverflow);
+ }
+
+ masm.setupWasmABICall();
+ masm.passABIArg(lhs.high);
+ masm.passABIArg(lhs.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+
+ int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
+ if (mir->isWasmBuiltinModI64()) {
+ masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::ModI64,
+ mozilla::Some(instanceOffset));
+ } else {
+ masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::DivI64,
+ mozilla::Some(instanceOffset));
+ }
+
+ // output in edx:eax, move to output register.
+ masm.movl(edx, output.high);
+ MOZ_ASSERT(eax == output.low);
+
+ masm.bind(&done);
+ masm.Pop(InstanceReg);
+}
+
+void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
+ MOZ_ASSERT(gen->compilingWasm());
+ MOZ_ASSERT(ToRegister(lir->getOperand(LDivOrModI64::Instance)) ==
+ InstanceReg);
+
+ masm.Push(InstanceReg);
+ int32_t framePushedAfterInstance = masm.framePushed();
+
+ Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
+ Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+ Register64 output = ToOutRegister64(lir);
+
+ MOZ_ASSERT(output == ReturnReg64);
+
+ // Prevent divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ // We can use InstanceReg as temp register because we preserved it
+ // before.
+ masm.branchTest64(Assembler::NonZero, rhs, rhs, InstanceReg, &nonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ masm.setupWasmABICall();
+ masm.passABIArg(lhs.high);
+ masm.passABIArg(lhs.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+
+ MDefinition* mir = lir->mir();
+ int32_t instanceOffset = masm.framePushed() - framePushedAfterInstance;
+ if (mir->isWasmBuiltinModI64()) {
+ masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UModI64,
+ mozilla::Some(instanceOffset));
+ } else {
+ masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UDivI64,
+ mozilla::Some(instanceOffset));
+ }
+
+ // output in edx:eax, move to output register.
+ masm.movl(edx, output.high);
+ MOZ_ASSERT(eax == output.low);
+
+ masm.Pop(InstanceReg);
+}
+
+void CodeGeneratorX86::emitBigIntDiv(LBigIntDiv* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+
+ MOZ_ASSERT(dividend == eax);
+ MOZ_ASSERT(output == edx);
+
+ // Sign extend the lhs into rdx to make rdx:rax.
+ masm.cdq();
+
+ masm.idiv(divisor);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+void CodeGeneratorX86::emitBigIntMod(LBigIntMod* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+
+ MOZ_ASSERT(dividend == eax);
+ MOZ_ASSERT(output == edx);
+
+ // Sign extend the lhs into rdx to make edx:eax.
+ masm.cdq();
+
+ masm.idiv(divisor);
+
+ // Move the remainder from edx.
+ masm.movl(output, dividend);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+
+ Register cond = ToRegister(lir->condExpr());
+ Register64 falseExpr = ToRegister64(lir->falseExpr());
+ Register64 out = ToOutRegister64(lir);
+
+ MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
+ "true expr is reused for input");
+
+ Label done;
+ masm.branchTest32(Assembler::NonZero, cond, cond, &done);
+ masm.movl(falseExpr.low, out.low);
+ masm.movl(falseExpr.high, out.high);
+ masm.bind(&done);
+}
+
+// We expect to handle only the case where compare is {U,}Int32 and select is
+// {U,}Int32. Some values may be stack allocated, and the "true" input is
+// reused for the output.
+void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
+ bool cmpIs32bit = ins->compareType() == MCompare::Compare_Int32 ||
+ ins->compareType() == MCompare::Compare_UInt32;
+ bool selIs32bit = ins->mir()->type() == MIRType::Int32;
+
+ MOZ_RELEASE_ASSERT(
+ cmpIs32bit && selIs32bit,
+ "CodeGenerator::visitWasmCompareAndSelect: unexpected types");
+
+ Register trueExprAndDest = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->ifTrueExpr()) == trueExprAndDest,
+ "true expr input is reused for output");
+
+ Assembler::Condition cond = Assembler::InvertCondition(
+ JSOpToCondition(ins->compareType(), ins->jsop()));
+ const LAllocation* rhs = ins->rightExpr();
+ const LAllocation* falseExpr = ins->ifFalseExpr();
+ Register lhs = ToRegister(ins->leftExpr());
+
+ if (rhs->isRegister()) {
+ if (falseExpr->isRegister()) {
+ masm.cmp32Move32(cond, lhs, ToRegister(rhs), ToRegister(falseExpr),
+ trueExprAndDest);
+ } else {
+ masm.cmp32Load32(cond, lhs, ToRegister(rhs), ToAddress(falseExpr),
+ trueExprAndDest);
+ }
+ } else {
+ if (falseExpr->isRegister()) {
+ masm.cmp32Move32(cond, lhs, ToAddress(rhs), ToRegister(falseExpr),
+ trueExprAndDest);
+ } else {
+ masm.cmp32Load32(cond, lhs, ToAddress(rhs), ToAddress(falseExpr),
+ trueExprAndDest);
+ }
+ }
+}
+
+void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+
+ masm.Push(input.high);
+ masm.Push(input.low);
+ masm.vmovq(Operand(esp, 0), ToFloatRegister(lir->output()));
+ masm.freeStack(sizeof(uint64_t));
+}
+
+void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
+ Register64 output = ToOutRegister64(lir);
+
+ masm.reserveStack(sizeof(uint64_t));
+ masm.vmovq(ToFloatRegister(lir->input()), Operand(esp, 0));
+ masm.Pop(output.low);
+ masm.Pop(output.high);
+}
+
+void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
+ Register64 output = ToOutRegister64(lir);
+ Register input = ToRegister(lir->input());
+
+ if (lir->mir()->isUnsigned()) {
+ if (output.low != input) {
+ masm.movl(input, output.low);
+ }
+ masm.xorl(output.high, output.high);
+ } else {
+ MOZ_ASSERT(output.low == input);
+ MOZ_ASSERT(output.low == eax);
+ MOZ_ASSERT(output.high == edx);
+ masm.cdq();
+ }
+}
+
+void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
+#ifdef DEBUG
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ MOZ_ASSERT(input.low == eax);
+ MOZ_ASSERT(output.low == eax);
+ MOZ_ASSERT(input.high == edx);
+ MOZ_ASSERT(output.high == edx);
+#endif
+ switch (lir->mode()) {
+ case MSignExtendInt64::Byte:
+ masm.move8SignExtend(eax, eax);
+ break;
+ case MSignExtendInt64::Half:
+ masm.move16SignExtend(eax, eax);
+ break;
+ case MSignExtendInt64::Word:
+ break;
+ }
+ masm.cdq();
+}
+
+void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
+ const LInt64Allocation& input = lir->getInt64Operand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->bottomHalf()) {
+ masm.movl(ToRegister(input.low()), output);
+ } else {
+ masm.movl(ToRegister(input.high()), output);
+ }
+}
+
+void CodeGenerator::visitWasmExtendU32Index(LWasmExtendU32Index*) {
+ MOZ_CRASH("64-bit only");
+}
+
+void CodeGenerator::visitWasmWrapU32Index(LWasmWrapU32Index* lir) {
+ // Generates no code on this platform because we just return the low part of
+ // the input register pair.
+ MOZ_ASSERT(ToRegister(lir->input()) == ToRegister(lir->output()));
+}
+
+void CodeGenerator::visitClzI64(LClzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+
+ masm.clz64(input, output.low);
+ masm.xorl(output.high, output.high);
+}
+
+void CodeGenerator::visitCtzI64(LCtzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+
+ masm.ctz64(input, output.low);
+ masm.xorl(output.high, output.high);
+}
+
+void CodeGenerator::visitNotI64(LNotI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register output = ToRegister(lir->output());
+
+ if (input.high == output) {
+ masm.orl(input.low, output);
+ } else if (input.low == output) {
+ masm.orl(input.high, output);
+ } else {
+ masm.movl(input.high, output);
+ masm.orl(input.low, output);
+ }
+
+ masm.cmpl(Imm32(0), output);
+ masm.emitSet(Assembler::Equal, output);
+}
+
+void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register64 output = ToOutRegister64(lir);
+
+ MWasmTruncateToInt64* mir = lir->mir();
+ FloatRegister floatTemp = ToFloatRegister(lir->temp());
+
+ Label fail, convert;
+
+ MOZ_ASSERT(mir->input()->type() == MIRType::Double ||
+ mir->input()->type() == MIRType::Float32);
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
+ addOutOfLineCode(ool, mir);
+
+ bool isSaturating = mir->isSaturating();
+ if (mir->input()->type() == MIRType::Float32) {
+ if (mir->isUnsigned()) {
+ masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating,
+ ool->entry(), ool->rejoin(), floatTemp);
+ } else {
+ masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, ool->entry(),
+ ool->rejoin(), floatTemp);
+ }
+ } else {
+ if (mir->isUnsigned()) {
+ masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, ool->entry(),
+ ool->rejoin(), floatTemp);
+ } else {
+ masm.wasmTruncateDoubleToInt64(input, output, isSaturating, ool->entry(),
+ ool->rejoin(), floatTemp);
+ }
+ }
+}
+
+void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+ Register temp =
+ lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
+
+ MIRType outputType = lir->mir()->type();
+ MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
+
+ if (outputType == MIRType::Double) {
+ if (lir->mir()->isUnsigned()) {
+ masm.convertUInt64ToDouble(input, output, temp);
+ } else {
+ masm.convertInt64ToDouble(input, output);
+ }
+ } else {
+ if (lir->mir()->isUnsigned()) {
+ masm.convertUInt64ToFloat32(input, output, temp);
+ } else {
+ masm.convertInt64ToFloat32(input, output);
+ }
+ }
+}
+
+void CodeGenerator::visitBitNotI64(LBitNotI64* ins) {
+ const LInt64Allocation input = ins->getInt64Operand(0);
+ Register64 inputR = ToRegister64(input);
+ MOZ_ASSERT(inputR == ToOutRegister64(ins));
+ masm.notl(inputR.high);
+ masm.notl(inputR.low);
+}
+
+void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+
+ masm.testl(input.high, input.high);
+ jumpToBlock(lir->ifTrue(), Assembler::NonZero);
+ masm.testl(input.low, input.low);
+ emitBranch(Assembler::NonZero, lir->ifTrue(), lir->ifFalse());
+}
+
+void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* baab) {
+ // LBitAndAndBranch only represents single-word ANDs, hence it can't be
+ // 64-bit here.
+ MOZ_ASSERT(!baab->is64());
+ Register regL = ToRegister(baab->left());
+ if (baab->right()->isConstant()) {
+ masm.test32(regL, Imm32(ToInt32(baab->right())));
+ } else {
+ masm.test32(regL, ToRegister(baab->right()));
+ }
+ emitBranch(baab->cond(), baab->ifTrue(), baab->ifFalse());
+}
diff --git a/js/src/jit/x86/CodeGenerator-x86.h b/js/src/jit/x86/CodeGenerator-x86.h
new file mode 100644
index 0000000000..4f92bf615f
--- /dev/null
+++ b/js/src/jit/x86/CodeGenerator-x86.h
@@ -0,0 +1,49 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_CodeGenerator_x86_h
+#define jit_x86_CodeGenerator_x86_h
+
+#include "jit/x86-shared/CodeGenerator-x86-shared.h"
+#include "jit/x86/Assembler-x86.h"
+
+namespace js {
+namespace jit {
+
+class OutOfLineTruncate;
+class OutOfLineTruncateFloat32;
+
+class CodeGeneratorX86 : public CodeGeneratorX86Shared {
+ protected:
+ CodeGeneratorX86(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm);
+
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ void emitBigIntDiv(LBigIntDiv* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+ void emitBigIntMod(LBigIntMod* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+
+ template <typename T>
+ void emitWasmLoad(T* ins);
+ template <typename T>
+ void emitWasmStore(T* ins);
+ template <typename T>
+ void emitWasmStoreOrExchangeAtomicI64(T* ins,
+ const wasm::MemoryAccessDesc& access);
+
+ public:
+ void visitOutOfLineTruncate(OutOfLineTruncate* ool);
+ void visitOutOfLineTruncateFloat32(OutOfLineTruncateFloat32* ool);
+};
+
+typedef CodeGeneratorX86 CodeGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_CodeGenerator_x86_h */
diff --git a/js/src/jit/x86/LIR-x86.h b/js/src/jit/x86/LIR-x86.h
new file mode 100644
index 0000000000..c7c9587e20
--- /dev/null
+++ b/js/src/jit/x86/LIR-x86.h
@@ -0,0 +1,308 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_LIR_x86_h
+#define jit_x86_LIR_x86_h
+
+namespace js {
+namespace jit {
+
+class LBoxFloatingPoint : public LInstructionHelper<2, 1, 2> {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(BoxFloatingPoint);
+
+ LBoxFloatingPoint(const LAllocation& in, const LDefinition& temp,
+ const LDefinition& spectreTemp, MIRType type)
+ : LInstructionHelper(classOpcode), type_(type) {
+ MOZ_ASSERT(IsFloatingPointType(type));
+ setOperand(0, in);
+ setTemp(0, temp);
+ setTemp(1, spectreTemp);
+ }
+
+ const LDefinition* spectreTemp() { return getTemp(1); }
+
+ MIRType type() const { return type_; }
+ const char* extraName() const { return StringFromMIRType(type_); }
+};
+
+class LUnbox : public LInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(Unbox);
+
+ LUnbox() : LInstructionHelper(classOpcode) {}
+
+ MUnbox* mir() const { return mir_->toUnbox(); }
+ const LAllocation* payload() { return getOperand(0); }
+ const LAllocation* type() { return getOperand(1); }
+ const char* extraName() const { return StringFromMIRType(mir()->type()); }
+};
+
+class LUnboxFloatingPoint : public LInstructionHelper<1, 2, 0> {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint);
+
+ static const size_t Input = 0;
+
+ LUnboxFloatingPoint(const LBoxAllocation& input, MIRType type)
+ : LInstructionHelper(classOpcode), type_(type) {
+ setBoxOperand(Input, input);
+ }
+
+ MUnbox* mir() const { return mir_->toUnbox(); }
+
+ MIRType type() const { return type_; }
+ const char* extraName() const { return StringFromMIRType(type_); }
+};
+
+// Convert a 32-bit unsigned integer to a double.
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(WasmUint32ToDouble)
+
+ LWasmUint32ToDouble(const LAllocation& input, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+// Convert a 32-bit unsigned integer to a float32.
+class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 1> {
+ public:
+ LIR_HEADER(WasmUint32ToFloat32)
+
+ LWasmUint32ToFloat32(const LAllocation& input, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ setTemp(0, temp);
+ }
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LDivOrModI64
+ : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES * 2 + 1, 0> {
+ public:
+ LIR_HEADER(DivOrModI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+ static const size_t Instance = 2 * INT64_PIECES;
+
+ LDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs,
+ const LAllocation& instance)
+ : LCallInstructionHelper(classOpcode) {
+ setInt64Operand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ setOperand(Instance, instance);
+ }
+
+ MDefinition* mir() const {
+ MOZ_ASSERT(mir_->isWasmBuiltinDivI64() || mir_->isWasmBuiltinModI64());
+ return mir_;
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isWasmBuiltinModI64()) {
+ return mir_->toWasmBuiltinModI64()->canBeDivideByZero();
+ }
+ return mir_->toWasmBuiltinDivI64()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isWasmBuiltinModI64()) {
+ return mir_->toWasmBuiltinModI64()->canBeNegativeDividend();
+ }
+ return mir_->toWasmBuiltinDivI64()->canBeNegativeOverflow();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isWasmBuiltinDivI64() || mir_->isWasmBuiltinModI64());
+ if (mir_->isWasmBuiltinModI64()) {
+ return mir_->toWasmBuiltinModI64()->bytecodeOffset();
+ }
+ return mir_->toWasmBuiltinDivI64()->bytecodeOffset();
+ }
+};
+
+class LUDivOrModI64
+ : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES * 2 + 1, 0> {
+ public:
+ LIR_HEADER(UDivOrModI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+ static const size_t Instance = 2 * INT64_PIECES;
+
+ LUDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs,
+ const LAllocation& instance)
+ : LCallInstructionHelper(classOpcode) {
+ setInt64Operand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ setOperand(Instance, instance);
+ }
+
+ MDefinition* mir() const {
+ MOZ_ASSERT(mir_->isWasmBuiltinDivI64() || mir_->isWasmBuiltinModI64());
+ return mir_;
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isWasmBuiltinModI64()) {
+ return mir_->toWasmBuiltinModI64()->canBeDivideByZero();
+ }
+ return mir_->toWasmBuiltinDivI64()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isWasmBuiltinModI64()) {
+ return mir_->toWasmBuiltinModI64()->canBeNegativeDividend();
+ }
+ return mir_->toWasmBuiltinDivI64()->canBeNegativeOverflow();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isWasmBuiltinDivI64() || mir_->isWasmBuiltinModI64());
+ if (mir_->isWasmBuiltinModI64()) {
+ return mir_->toWasmBuiltinModI64()->bytecodeOffset();
+ }
+ return mir_->toWasmBuiltinDivI64()->bytecodeOffset();
+ }
+};
+
+class LWasmTruncateToInt64 : public LInstructionHelper<INT64_PIECES, 1, 1> {
+ public:
+ LIR_HEADER(WasmTruncateToInt64);
+
+ LWasmTruncateToInt64(const LAllocation& in, const LDefinition& temp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ MWasmTruncateToInt64* mir() const { return mir_->toWasmTruncateToInt64(); }
+
+ const LDefinition* temp() { return getTemp(0); }
+};
+
+class LWasmAtomicLoadI64 : public LInstructionHelper<INT64_PIECES, 2, 2> {
+ public:
+ LIR_HEADER(WasmAtomicLoadI64);
+
+ LWasmAtomicLoadI64(const LAllocation& memoryBase, const LAllocation& ptr,
+ const LDefinition& t1, const LDefinition& t2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, memoryBase);
+ setOperand(1, ptr);
+ setTemp(0, t1);
+ setTemp(1, t2);
+ }
+
+ MWasmLoad* mir() const { return mir_->toWasmLoad(); }
+ const LAllocation* memoryBase() { return getOperand(0); }
+ const LAllocation* ptr() { return getOperand(1); }
+ const LDefinition* t1() { return getTemp(0); }
+ const LDefinition* t2() { return getTemp(1); }
+};
+
+class LWasmAtomicStoreI64 : public LInstructionHelper<0, 2 + INT64_PIECES, 2> {
+ public:
+ LIR_HEADER(WasmAtomicStoreI64);
+
+ LWasmAtomicStoreI64(const LAllocation& memoryBase, const LAllocation& ptr,
+ const LInt64Allocation& value, const LDefinition& t1,
+ const LDefinition& t2)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, memoryBase);
+ setOperand(1, ptr);
+ setInt64Operand(2, value);
+ setTemp(0, t1);
+ setTemp(1, t2);
+ }
+
+ MWasmStore* mir() const { return mir_->toWasmStore(); }
+ const LAllocation* memoryBase() { return getOperand(0); }
+ const LAllocation* ptr() { return getOperand(1); }
+ const LInt64Allocation value() { return getInt64Operand(2); }
+ const LDefinition* t1() { return getTemp(0); }
+ const LDefinition* t2() { return getTemp(1); }
+};
+
+class LWasmCompareExchangeI64
+ : public LInstructionHelper<INT64_PIECES, 2 + 2 * INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(WasmCompareExchangeI64);
+
+ LWasmCompareExchangeI64(const LAllocation& memoryBase, const LAllocation& ptr,
+ const LInt64Allocation& expected,
+ const LInt64Allocation& replacement)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, memoryBase);
+ setOperand(1, ptr);
+ setInt64Operand(2, expected);
+ setInt64Operand(2 + INT64_PIECES, replacement);
+ }
+
+ MWasmCompareExchangeHeap* mir() const {
+ return mir_->toWasmCompareExchangeHeap();
+ }
+ const LAllocation* memoryBase() { return getOperand(0); }
+ const LAllocation* ptr() { return getOperand(1); }
+ const LInt64Allocation expected() { return getInt64Operand(2); }
+ const LInt64Allocation replacement() {
+ return getInt64Operand(2 + INT64_PIECES);
+ }
+};
+
+class LWasmAtomicExchangeI64
+ : public LInstructionHelper<INT64_PIECES, 2 + INT64_PIECES, 0> {
+ const wasm::MemoryAccessDesc& access_;
+
+ public:
+ LIR_HEADER(WasmAtomicExchangeI64);
+
+ LWasmAtomicExchangeI64(const LAllocation& memoryBase, const LAllocation& ptr,
+ const LInt64Allocation& value,
+ const wasm::MemoryAccessDesc& access)
+ : LInstructionHelper(classOpcode), access_(access) {
+ setOperand(0, memoryBase);
+ setOperand(1, ptr);
+ setInt64Operand(2, value);
+ }
+
+ const LAllocation* memoryBase() { return getOperand(0); }
+ const LAllocation* ptr() { return getOperand(1); }
+ const LInt64Allocation value() { return getInt64Operand(2); }
+ const wasm::MemoryAccessDesc& access() { return access_; }
+};
+
+class LWasmAtomicBinopI64
+ : public LInstructionHelper<INT64_PIECES, 2 + INT64_PIECES, 0> {
+ const wasm::MemoryAccessDesc& access_;
+ AtomicOp op_;
+
+ public:
+ LIR_HEADER(WasmAtomicBinopI64);
+
+ LWasmAtomicBinopI64(const LAllocation& memoryBase, const LAllocation& ptr,
+ const LInt64Allocation& value,
+ const wasm::MemoryAccessDesc& access, AtomicOp op)
+ : LInstructionHelper(classOpcode), access_(access), op_(op) {
+ setOperand(0, memoryBase);
+ setOperand(1, ptr);
+ setInt64Operand(2, value);
+ }
+
+ const LAllocation* memoryBase() { return getOperand(0); }
+ const LAllocation* ptr() { return getOperand(1); }
+ const LInt64Allocation value() { return getInt64Operand(2); }
+ const wasm::MemoryAccessDesc& access() { return access_; }
+ AtomicOp operation() const { return op_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_LIR_x86_h */
diff --git a/js/src/jit/x86/Lowering-x86.cpp b/js/src/jit/x86/Lowering-x86.cpp
new file mode 100644
index 0000000000..0577a0976e
--- /dev/null
+++ b/js/src/jit/x86/Lowering-x86.cpp
@@ -0,0 +1,835 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86/Lowering-x86.h"
+
+#include "jit/Lowering.h"
+#include "jit/MIR.h"
+#include "jit/x86/Assembler-x86.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+LBoxAllocation LIRGeneratorX86::useBoxFixed(MDefinition* mir, Register reg1,
+ Register reg2, bool useAtStart) {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+ MOZ_ASSERT(reg1 != reg2);
+
+ ensureDefined(mir);
+ return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart),
+ LUse(reg2, VirtualRegisterOfPayload(mir), useAtStart));
+}
+
+LAllocation LIRGeneratorX86::useByteOpRegister(MDefinition* mir) {
+ return useFixed(mir, eax);
+}
+
+LAllocation LIRGeneratorX86::useByteOpRegisterAtStart(MDefinition* mir) {
+ return useFixedAtStart(mir, eax);
+}
+
+LAllocation LIRGeneratorX86::useByteOpRegisterOrNonDoubleConstant(
+ MDefinition* mir) {
+ return useFixed(mir, eax);
+}
+
+LDefinition LIRGeneratorX86::tempByteOpRegister() { return tempFixed(eax); }
+
+void LIRGenerator::visitBox(MBox* box) {
+ MDefinition* inner = box->getOperand(0);
+
+ // If the box wrapped a double, it needs a new register.
+ if (IsFloatingPointType(inner->type())) {
+ LDefinition spectreTemp =
+ JitOptions.spectreValueMasking ? temp() : LDefinition::BogusTemp();
+ defineBox(new (alloc()) LBoxFloatingPoint(useRegisterAtStart(inner),
+ tempCopy(inner, 0), spectreTemp,
+ inner->type()),
+ box);
+ return;
+ }
+
+ if (box->canEmitAtUses()) {
+ emitAtUses(box);
+ return;
+ }
+
+ if (inner->isConstant()) {
+ defineBox(new (alloc()) LValue(inner->toConstant()->toJSValue()), box);
+ return;
+ }
+
+ LBox* lir = new (alloc()) LBox(use(inner), inner->type());
+
+ // Otherwise, we should not define a new register for the payload portion
+ // of the output, so bypass defineBox().
+ uint32_t vreg = getVirtualRegister();
+
+ // Note that because we're using BogusTemp(), we do not change the type of
+ // the definition. We also do not define the first output as "TYPE",
+ // because it has no corresponding payload at (vreg + 1). Also note that
+ // although we copy the input's original type for the payload half of the
+ // definition, this is only for clarity. BogusTemp() definitions are
+ // ignored.
+ lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL));
+ lir->setDef(1, LDefinition::BogusTemp());
+ box->setVirtualRegister(vreg);
+ add(lir);
+}
+
+void LIRGenerator::visitUnbox(MUnbox* unbox) {
+ MDefinition* inner = unbox->getOperand(0);
+
+ // An unbox on x86 reads in a type tag (either in memory or a register) and
+ // a payload. Unlike most instructions consuming a box, we ask for the type
+ // second, so that the result can re-use the first input.
+ MOZ_ASSERT(inner->type() == MIRType::Value);
+
+ ensureDefined(inner);
+
+ if (IsFloatingPointType(unbox->type())) {
+ LUnboxFloatingPoint* lir =
+ new (alloc()) LUnboxFloatingPoint(useBox(inner), unbox->type());
+ if (unbox->fallible()) {
+ assignSnapshot(lir, unbox->bailoutKind());
+ }
+ define(lir, unbox);
+ return;
+ }
+
+ // Swap the order we use the box pieces so we can re-use the payload register.
+ LUnbox* lir = new (alloc()) LUnbox;
+ bool reusePayloadReg = !JitOptions.spectreValueMasking ||
+ unbox->type() == MIRType::Int32 ||
+ unbox->type() == MIRType::Boolean;
+ if (reusePayloadReg) {
+ lir->setOperand(0, usePayloadInRegisterAtStart(inner));
+ lir->setOperand(1, useType(inner, LUse::ANY));
+ } else {
+ lir->setOperand(0, usePayload(inner, LUse::REGISTER));
+ lir->setOperand(1, useType(inner, LUse::ANY));
+ }
+
+ if (unbox->fallible()) {
+ assignSnapshot(lir, unbox->bailoutKind());
+ }
+
+ // Types and payloads form two separate intervals. If the type becomes dead
+ // before the payload, it could be used as a Value without the type being
+ // recoverable. Unbox's purpose is to eagerly kill the definition of a type
+ // tag, so keeping both alive (for the purpose of gcmaps) is unappealing.
+ // Instead, we create a new virtual register.
+ if (reusePayloadReg) {
+ defineReuseInput(lir, unbox, 0);
+ } else {
+ define(lir, unbox);
+ }
+}
+
+void LIRGenerator::visitReturnImpl(MDefinition* opd, bool isGenerator) {
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LReturn* ins = new (alloc()) LReturn(isGenerator);
+ ins->setOperand(0, LUse(JSReturnReg_Type));
+ ins->setOperand(1, LUse(JSReturnReg_Data));
+ fillBoxUses(ins, 0, opd);
+ add(ins);
+}
+
+void LIRGeneratorX86::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* type = block->getPhi(lirIndex + VREG_TYPE_OFFSET);
+ LPhi* payload = block->getPhi(lirIndex + VREG_DATA_OFFSET);
+ type->setOperand(
+ inputPosition,
+ LUse(operand->virtualRegister() + VREG_TYPE_OFFSET, LUse::ANY));
+ payload->setOperand(inputPosition,
+ LUse(VirtualRegisterOfPayload(operand), LUse::ANY));
+}
+
+void LIRGeneratorX86::defineInt64Phi(MPhi* phi, size_t lirIndex) {
+ LPhi* low = current->getPhi(lirIndex + INT64LOW_INDEX);
+ LPhi* high = current->getPhi(lirIndex + INT64HIGH_INDEX);
+
+ uint32_t lowVreg = getVirtualRegister();
+
+ phi->setVirtualRegister(lowVreg);
+
+ uint32_t highVreg = getVirtualRegister();
+ MOZ_ASSERT(lowVreg + INT64HIGH_INDEX == highVreg + INT64LOW_INDEX);
+
+ low->setDef(0, LDefinition(lowVreg, LDefinition::INT32));
+ high->setDef(0, LDefinition(highVreg, LDefinition::INT32));
+ annotate(high);
+ annotate(low);
+}
+
+void LIRGeneratorX86::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* low = block->getPhi(lirIndex + INT64LOW_INDEX);
+ LPhi* high = block->getPhi(lirIndex + INT64HIGH_INDEX);
+ low->setOperand(inputPosition,
+ LUse(operand->virtualRegister() + INT64LOW_INDEX, LUse::ANY));
+ high->setOperand(
+ inputPosition,
+ LUse(operand->virtualRegister() + INT64HIGH_INDEX, LUse::ANY));
+}
+
+void LIRGeneratorX86::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins, MDefinition* mir,
+ MDefinition* input) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(input));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorX86::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES, useInt64OrConstant(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorX86::lowerForMulInt64(LMulI64* ins, MMul* mir,
+ MDefinition* lhs, MDefinition* rhs) {
+ bool needsTemp = true;
+
+ if (rhs->isConstant()) {
+ int64_t constant = rhs->toConstant()->toInt64();
+ int32_t shift = mozilla::FloorLog2(constant);
+ // See special cases in CodeGeneratorX86Shared::visitMulI64.
+ if (constant >= -1 && constant <= 2) {
+ needsTemp = false;
+ }
+ if (constant > 0 && int64_t(1) << shift == constant) {
+ needsTemp = false;
+ }
+ }
+
+ // MulI64 on x86 needs output to be in edx, eax;
+ ins->setInt64Operand(
+ 0, useInt64Fixed(lhs, Register64(edx, eax), /*useAtStart = */ true));
+ ins->setInt64Operand(INT64_PIECES, useInt64OrConstant(rhs));
+ if (needsTemp) {
+ ins->setTemp(0, temp());
+ }
+
+ defineInt64Fixed(ins, mir,
+ LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+}
+
+void LIRGenerator::visitCompareExchangeTypedArrayElement(
+ MCompareExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LUse elements = useFixed(ins->elements(), esi);
+ LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+ LUse oldval = useFixed(ins->oldval(), eax);
+ LUse newval = useFixed(ins->newval(), edx);
+ LDefinition temp = tempFixed(ebx);
+
+ auto* lir = new (alloc()) LCompareExchangeTypedArrayElement64(
+ elements, index, oldval, newval, temp);
+ defineFixed(lir, ins, LAllocation(AnyRegister(ecx)));
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ lowerCompareExchangeTypedArrayElement(ins, /* useI386ByteRegisters = */ true);
+}
+
+void LIRGenerator::visitAtomicExchangeTypedArrayElement(
+ MAtomicExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LUse elements = useRegister(ins->elements());
+ LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+ LAllocation value = useFixed(ins->value(), edx);
+ LInt64Definition temp = tempInt64Fixed(Register64(ecx, ebx));
+
+ auto* lir = new (alloc())
+ LAtomicExchangeTypedArrayElement64(elements, index, value, temp);
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ lowerAtomicExchangeTypedArrayElement(ins, /*useI386ByteRegisters=*/true);
+}
+
+void LIRGenerator::visitAtomicTypedArrayElementBinop(
+ MAtomicTypedArrayElementBinop* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LUse elements = useRegister(ins->elements());
+ LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+ LAllocation value = useFixed(ins->value(), edx);
+ LInt64Definition temp = tempInt64Fixed(Register64(ecx, ebx));
+
+ // Case 1: the result of the operation is not used.
+ //
+ // We can omit allocating the result BigInt.
+
+ if (ins->isForEffect()) {
+ LDefinition tempLow = tempFixed(eax);
+
+ auto* lir = new (alloc()) LAtomicTypedArrayElementBinopForEffect64(
+ elements, index, value, temp, tempLow);
+ add(lir, ins);
+ return;
+ }
+
+ // Case 2: the result of the operation is used.
+
+ auto* lir = new (alloc())
+ LAtomicTypedArrayElementBinop64(elements, index, value, temp);
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ lowerAtomicTypedArrayElementBinop(ins, /* useI386ByteRegisters = */ true);
+}
+
+void LIRGeneratorX86::lowerAtomicLoad64(MLoadUnboxedScalar* ins) {
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->storageType());
+
+ auto* lir = new (alloc()) LAtomicLoad64(elements, index, tempFixed(ebx),
+ tempInt64Fixed(Register64(edx, eax)));
+ defineFixed(lir, ins, LAllocation(AnyRegister(ecx)));
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorX86::lowerAtomicStore64(MStoreUnboxedScalar* ins) {
+ LUse elements = useRegister(ins->elements());
+ LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->writeType());
+ LAllocation value = useFixed(ins->value(), edx);
+ LInt64Definition temp1 = tempInt64Fixed(Register64(ecx, ebx));
+ LDefinition temp2 = tempFixed(eax);
+
+ add(new (alloc()) LAtomicStore64(elements, index, value, temp1, temp2), ins);
+}
+
+void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToDouble* lir = new (alloc())
+ LWasmUint32ToDouble(useRegisterAtStart(ins->input()), temp());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToFloat32* lir = new (alloc())
+ LWasmUint32ToFloat32(useRegisterAtStart(ins->input()), temp());
+ define(lir, ins);
+}
+
+// If the base is a constant, and it is zero or its offset is zero, then
+// code generation will fold the values into the access. Allocate the
+// pointer to a register only if that can't happen.
+
+static bool OptimizableConstantAccess(MDefinition* base,
+ const wasm::MemoryAccessDesc& access) {
+ MOZ_ASSERT(base->isConstant());
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ if (!(base->toConstant()->isInt32(0) || access.offset() == 0)) {
+ return false;
+ }
+ if (access.type() == Scalar::Int64) {
+ // For int64 accesses on 32-bit systems we will need to add another offset
+ // of 4 to access the high part of the value; make sure this does not
+ // overflow the value.
+ int32_t v;
+ if (base->toConstant()->isInt32(0)) {
+ v = access.offset();
+ } else {
+ v = base->toConstant()->toInt32();
+ }
+ return v <= int32_t(INT32_MAX - INT64HIGH_OFFSET);
+ }
+ return true;
+}
+
+void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* memoryBase = ins->memoryBase();
+ MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
+
+ if (ins->access().type() == Scalar::Int64 && ins->access().isAtomic()) {
+ auto* lir = new (alloc())
+ LWasmAtomicLoadI64(useRegister(memoryBase), useRegister(base),
+ tempFixed(ecx), tempFixed(ebx));
+ defineInt64Fixed(lir, ins,
+ LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+ return;
+ }
+
+ LAllocation baseAlloc;
+ if (!base->isConstant() || !OptimizableConstantAccess(base, ins->access())) {
+ baseAlloc = ins->type() == MIRType::Int64 ? useRegister(base)
+ : useRegisterAtStart(base);
+ }
+
+ if (ins->type() != MIRType::Int64) {
+ auto* lir =
+ new (alloc()) LWasmLoad(baseAlloc, useRegisterAtStart(memoryBase));
+ define(lir, ins);
+ return;
+ }
+
+ // "AtStart" register usage does not work for the 64-bit case because we
+ // clobber two registers for the result and may need two registers for a
+ // scaled address; we can't guarantee non-interference.
+
+ auto* lir = new (alloc()) LWasmLoadI64(baseAlloc, useRegister(memoryBase));
+
+ Scalar::Type accessType = ins->access().type();
+ if (accessType == Scalar::Int8 || accessType == Scalar::Int16 ||
+ accessType == Scalar::Int32) {
+ // We use cdq to sign-extend the result and cdq demands these registers.
+ defineInt64Fixed(lir, ins,
+ LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+ return;
+ }
+
+ defineInt64(lir, ins);
+}
+
+void LIRGenerator::visitWasmStore(MWasmStore* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* memoryBase = ins->memoryBase();
+ MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
+
+ if (ins->access().type() == Scalar::Int64 && ins->access().isAtomic()) {
+ auto* lir = new (alloc())
+ LWasmAtomicStoreI64(useRegister(memoryBase), useRegister(base),
+ useInt64Fixed(ins->value(), Register64(ecx, ebx)),
+ tempFixed(edx), tempFixed(eax));
+ add(lir, ins);
+ return;
+ }
+
+ LAllocation baseAlloc;
+ if (!base->isConstant() || !OptimizableConstantAccess(base, ins->access())) {
+ baseAlloc = useRegisterAtStart(base);
+ }
+
+ LAllocation valueAlloc;
+ switch (ins->access().type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ // See comment for LIRGeneratorX86::useByteOpRegister.
+ valueAlloc = useFixed(ins->value(), eax);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Float32:
+ case Scalar::Float64:
+ // For now, don't allow constant values. The immediate operand affects
+ // instruction layout which affects patching.
+ valueAlloc = useRegisterAtStart(ins->value());
+ break;
+ case Scalar::Simd128:
+#ifdef ENABLE_WASM_SIMD
+ valueAlloc = useRegisterAtStart(ins->value());
+ break;
+#else
+ MOZ_CRASH("unexpected array type");
+#endif
+ case Scalar::Int64: {
+ LInt64Allocation valueAlloc = useInt64RegisterAtStart(ins->value());
+ auto* lir = new (alloc())
+ LWasmStoreI64(baseAlloc, valueAlloc, useRegisterAtStart(memoryBase));
+ add(lir, ins);
+ return;
+ }
+ case Scalar::Uint8Clamped:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ auto* lir = new (alloc())
+ LWasmStore(baseAlloc, valueAlloc, useRegisterAtStart(memoryBase));
+ add(lir, ins);
+}
+
+void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* memoryBase = ins->memoryBase();
+ MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc()) LWasmCompareExchangeI64(
+ useRegisterAtStart(memoryBase), useRegisterAtStart(base),
+ useInt64FixedAtStart(ins->oldValue(), Register64(edx, eax)),
+ useInt64FixedAtStart(ins->newValue(), Register64(ecx, ebx)));
+ defineInt64Fixed(lir, ins,
+ LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+ return;
+ }
+
+ MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+
+ bool byteArray = byteSize(ins->access().type()) == 1;
+
+ // Register allocation:
+ //
+ // The output may not be used, but eax will be clobbered regardless
+ // so pin the output to eax.
+ //
+ // oldval must be in a register.
+ //
+ // newval must be in a register. If the source is a byte array
+ // then newval must be a register that has a byte size: this must
+ // be ebx, ecx, or edx (eax is taken).
+ //
+ // Bug #1077036 describes some optimization opportunities.
+
+ const LAllocation oldval = useRegister(ins->oldValue());
+ const LAllocation newval =
+ byteArray ? useFixed(ins->newValue(), ebx) : useRegister(ins->newValue());
+
+ LWasmCompareExchangeHeap* lir = new (alloc()) LWasmCompareExchangeHeap(
+ useRegister(base), oldval, newval, useRegister(memoryBase));
+
+ lir->setAddrTemp(temp());
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+}
+
+void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
+ MDefinition* memoryBase = ins->memoryBase();
+ MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
+
+ if (ins->access().type() == Scalar::Int64) {
+ MDefinition* base = ins->base();
+ auto* lir = new (alloc()) LWasmAtomicExchangeI64(
+ useRegister(memoryBase), useRegister(base),
+ useInt64Fixed(ins->value(), Register64(ecx, ebx)), ins->access());
+ defineInt64Fixed(lir, ins,
+ LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+ return;
+ }
+
+ const LAllocation base = useRegister(ins->base());
+ const LAllocation value = useRegister(ins->value());
+
+ LWasmAtomicExchangeHeap* lir = new (alloc())
+ LWasmAtomicExchangeHeap(base, value, useRegister(memoryBase));
+
+ lir->setAddrTemp(temp());
+ if (byteSize(ins->access().type()) == 1) {
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+ } else {
+ define(lir, ins);
+ }
+}
+
+void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* memoryBase = ins->memoryBase();
+ MOZ_ASSERT(memoryBase->type() == MIRType::Pointer);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc())
+ LWasmAtomicBinopI64(useRegister(memoryBase), useRegister(base),
+ useInt64Fixed(ins->value(), Register64(ecx, ebx)),
+ ins->access(), ins->operation());
+ defineInt64Fixed(lir, ins,
+ LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+ return;
+ }
+
+ MOZ_ASSERT(ins->access().type() < Scalar::Float32);
+
+ bool byteArray = byteSize(ins->access().type()) == 1;
+
+ // Case 1: the result of the operation is not used.
+ //
+ // We'll emit a single instruction: LOCK ADD, LOCK SUB, LOCK AND,
+ // LOCK OR, or LOCK XOR. These can all take an immediate.
+
+ if (!ins->hasUses()) {
+ LAllocation value;
+ if (byteArray && !ins->value()->isConstant()) {
+ value = useFixed(ins->value(), ebx);
+ } else {
+ value = useRegisterOrConstant(ins->value());
+ }
+ LWasmAtomicBinopHeapForEffect* lir =
+ new (alloc()) LWasmAtomicBinopHeapForEffect(useRegister(base), value,
+ LDefinition::BogusTemp(),
+ useRegister(memoryBase));
+ lir->setAddrTemp(temp());
+ add(lir, ins);
+ return;
+ }
+
+ // Case 2: the result of the operation is used.
+ //
+ // For ADD and SUB we'll use XADD:
+ //
+ // movl value, output
+ // lock xaddl output, mem
+ //
+ // For the 8-bit variants XADD needs a byte register for the
+ // output only, we can still set up with movl; just pin the output
+ // to eax (or ebx / ecx / edx).
+ //
+ // For AND/OR/XOR we need to use a CMPXCHG loop:
+ //
+ // movl *mem, eax
+ // L: mov eax, temp
+ // andl value, temp
+ // lock cmpxchg temp, mem ; reads eax also
+ // jnz L
+ // ; result in eax
+ //
+ // Note the placement of L, cmpxchg will update eax with *mem if
+ // *mem does not have the expected value, so reloading it at the
+ // top of the loop would be redundant.
+ //
+ // We want to fix eax as the output. We also need a temp for
+ // the intermediate value.
+ //
+ // For the 8-bit variants the temp must have a byte register.
+ //
+ // There are optimization opportunities:
+ // - better 8-bit register allocation and instruction selection, Bug
+ // #1077036.
+
+ bool bitOp = !(ins->operation() == AtomicFetchAddOp ||
+ ins->operation() == AtomicFetchSubOp);
+ LDefinition tempDef = LDefinition::BogusTemp();
+ LAllocation value;
+
+ if (byteArray) {
+ value = useFixed(ins->value(), ebx);
+ if (bitOp) {
+ tempDef = tempFixed(ecx);
+ }
+ } else if (bitOp || ins->value()->isConstant()) {
+ value = useRegisterOrConstant(ins->value());
+ if (bitOp) {
+ tempDef = temp();
+ }
+ } else {
+ value = useRegisterAtStart(ins->value());
+ }
+
+ LWasmAtomicBinopHeap* lir = new (alloc())
+ LWasmAtomicBinopHeap(useRegister(base), value, tempDef,
+ LDefinition::BogusTemp(), useRegister(memoryBase));
+
+ lir->setAddrTemp(temp());
+ if (byteArray || bitOp) {
+ defineFixed(lir, ins, LAllocation(AnyRegister(eax)));
+ } else if (ins->value()->isConstant()) {
+ define(lir, ins);
+ } else {
+ defineReuseInput(lir, ins, LWasmAtomicBinopHeap::valueOp);
+ }
+}
+
+void LIRGeneratorX86::lowerDivI64(MDiv* div) {
+ MOZ_CRASH("We use MWasmBuiltinModI64 instead.");
+}
+
+void LIRGeneratorX86::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
+ MOZ_ASSERT(div->lhs()->type() == div->rhs()->type());
+ MOZ_ASSERT(IsNumberType(div->type()));
+
+ MOZ_ASSERT(div->type() == MIRType::Int64);
+
+ if (div->isUnsigned()) {
+ LUDivOrModI64* lir = new (alloc())
+ LUDivOrModI64(useInt64FixedAtStart(div->lhs(), Register64(eax, ebx)),
+ useInt64FixedAtStart(div->rhs(), Register64(ecx, edx)),
+ useFixedAtStart(div->instance(), InstanceReg));
+ defineReturn(lir, div);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc())
+ LDivOrModI64(useInt64FixedAtStart(div->lhs(), Register64(eax, ebx)),
+ useInt64FixedAtStart(div->rhs(), Register64(ecx, edx)),
+ useFixedAtStart(div->instance(), InstanceReg));
+ defineReturn(lir, div);
+}
+
+void LIRGeneratorX86::lowerModI64(MMod* mod) {
+ MOZ_CRASH("We use MWasmBuiltinModI64 instead.");
+}
+
+void LIRGeneratorX86::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
+ MDefinition* lhs = mod->lhs();
+ MDefinition* rhs = mod->rhs();
+ MOZ_ASSERT(lhs->type() == rhs->type());
+ MOZ_ASSERT(IsNumberType(mod->type()));
+
+ MOZ_ASSERT(mod->type() == MIRType::Int64);
+ MOZ_ASSERT(mod->type() == MIRType::Int64);
+
+ if (mod->isUnsigned()) {
+ LUDivOrModI64* lir = new (alloc())
+ LUDivOrModI64(useInt64FixedAtStart(lhs, Register64(eax, ebx)),
+ useInt64FixedAtStart(rhs, Register64(ecx, edx)),
+ useFixedAtStart(mod->instance(), InstanceReg));
+ defineReturn(lir, mod);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc())
+ LDivOrModI64(useInt64FixedAtStart(lhs, Register64(eax, ebx)),
+ useInt64FixedAtStart(rhs, Register64(ecx, edx)),
+ useFixedAtStart(mod->instance(), InstanceReg));
+ defineReturn(lir, mod);
+}
+
+void LIRGeneratorX86::lowerUDivI64(MDiv* div) {
+ MOZ_CRASH("We use MWasmBuiltinDivI64 instead.");
+}
+
+void LIRGeneratorX86::lowerUModI64(MMod* mod) {
+ MOZ_CRASH("We use MWasmBuiltinModI64 instead.");
+}
+
+void LIRGeneratorX86::lowerBigIntDiv(MBigIntDiv* ins) {
+ auto* lir = new (alloc()) LBigIntDiv(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), tempFixed(eax), temp());
+ defineFixed(lir, ins, LAllocation(AnyRegister(edx)));
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorX86::lowerBigIntMod(MBigIntMod* ins) {
+ auto* lir = new (alloc()) LBigIntMod(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), tempFixed(eax), temp());
+ defineFixed(lir, ins, LAllocation(AnyRegister(edx)));
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitSubstr(MSubstr* ins) {
+ // Due to lack of registers on x86, we reuse the string register as
+ // temporary. As a result we only need two temporary registers and take a
+ // bogus temporary as fifth argument.
+ LSubstr* lir = new (alloc())
+ LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
+ useRegister(ins->length()), temp(), LDefinition::BogusTemp(),
+ tempByteOpRegister());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ LDefinition temp = tempDouble();
+ defineInt64(new (alloc()) LWasmTruncateToInt64(useRegister(opd), temp), ins);
+}
+
+void LIRGeneratorX86::lowerWasmBuiltinTruncateToInt64(
+ MWasmBuiltinTruncateToInt64* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
+
+void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Int64);
+ MOZ_ASSERT(IsFloatingPointType(ins->type()));
+
+ LDefinition maybeTemp =
+ (ins->isUnsigned() &&
+ ((ins->type() == MIRType::Double && AssemblerX86Shared::HasSSE3()) ||
+ ins->type() == MIRType::Float32))
+ ? temp()
+ : LDefinition::BogusTemp();
+
+ define(new (alloc()) LInt64ToFloatingPoint(useInt64Register(opd), maybeTemp),
+ ins);
+}
+
+void LIRGeneratorX86::lowerBuiltinInt64ToFloatingPoint(
+ MBuiltinInt64ToFloatingPoint* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
+
+void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
+ if (ins->isUnsigned()) {
+ defineInt64(new (alloc())
+ LExtendInt32ToInt64(useRegisterAtStart(ins->input())),
+ ins);
+ } else {
+ LExtendInt32ToInt64* lir =
+ new (alloc()) LExtendInt32ToInt64(useFixedAtStart(ins->input(), eax));
+ defineInt64Fixed(lir, ins,
+ LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+ }
+}
+
+void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
+ // Here we'll end up using cdq which requires input and output in (edx,eax).
+ LSignExtendInt64* lir = new (alloc()) LSignExtendInt64(
+ useInt64FixedAtStart(ins->input(), Register64(edx, eax)));
+ defineInt64Fixed(lir, ins,
+ LInt64Allocation(LAllocation(AnyRegister(edx)),
+ LAllocation(AnyRegister(eax))));
+}
+
+// On x86 we specialize the only cases where compare is {U,}Int32 and select
+// is {U,}Int32.
+bool LIRGeneratorShared::canSpecializeWasmCompareAndSelect(
+ MCompare::CompareType compTy, MIRType insTy) {
+ return insTy == MIRType::Int32 && (compTy == MCompare::Compare_Int32 ||
+ compTy == MCompare::Compare_UInt32);
+}
+
+void LIRGeneratorShared::lowerWasmCompareAndSelect(MWasmSelect* ins,
+ MDefinition* lhs,
+ MDefinition* rhs,
+ MCompare::CompareType compTy,
+ JSOp jsop) {
+ MOZ_ASSERT(canSpecializeWasmCompareAndSelect(compTy, ins->type()));
+ auto* lir = new (alloc()) LWasmCompareAndSelect(
+ useRegister(lhs), useAny(rhs), compTy, jsop,
+ useRegisterAtStart(ins->trueExpr()), useAny(ins->falseExpr()));
+ defineReuseInput(lir, ins, LWasmCompareAndSelect::IfTrueExprIndex);
+}
diff --git a/js/src/jit/x86/Lowering-x86.h b/js/src/jit/x86/Lowering-x86.h
new file mode 100644
index 0000000000..b82109981e
--- /dev/null
+++ b/js/src/jit/x86/Lowering-x86.h
@@ -0,0 +1,79 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_Lowering_x86_h
+#define jit_x86_Lowering_x86_h
+
+#include "jit/x86-shared/Lowering-x86-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorX86 : public LIRGeneratorX86Shared {
+ protected:
+ LIRGeneratorX86(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorX86Shared(gen, graph, lirGraph) {}
+
+ // Returns a box allocation with type set to reg1 and payload set to reg2.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2,
+ bool useAtStart = false);
+
+ // It's a trap! On x86, the 1-byte store can only use one of
+ // {al,bl,cl,dl,ah,bh,ch,dh}. That means if the register allocator
+ // gives us one of {edi,esi,ebp,esp}, we're out of luck. (The formatter
+ // will assert on us.) Ideally, we'd just ask the register allocator to
+ // give us one of {al,bl,cl,dl}. For now, just useFixed(al).
+ LAllocation useByteOpRegister(MDefinition* mir);
+ LAllocation useByteOpRegisterAtStart(MDefinition* mir);
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir);
+ LDefinition tempByteOpRegister();
+
+ inline LDefinition tempToUnbox() { return LDefinition::BogusTemp(); }
+
+ bool needTempForPostBarrier() { return true; }
+
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex);
+
+ void lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex);
+ void defineInt64Phi(MPhi* phi, size_t lirIndex);
+
+ void lowerForALUInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* input);
+ void lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs,
+ MDefinition* rhs);
+
+ void lowerBuiltinInt64ToFloatingPoint(MBuiltinInt64ToFloatingPoint* ins);
+ void lowerWasmBuiltinTruncateToInt64(MWasmBuiltinTruncateToInt64* ins);
+ void lowerDivI64(MDiv* div);
+ void lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div);
+ void lowerModI64(MMod* mod);
+ void lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod);
+ void lowerUDivI64(MDiv* div);
+ void lowerUModI64(MMod* mod);
+
+ void lowerBigIntDiv(MBigIntDiv* ins);
+ void lowerBigIntMod(MBigIntMod* ins);
+
+ void lowerAtomicLoad64(MLoadUnboxedScalar* ins);
+ void lowerAtomicStore64(MStoreUnboxedScalar* ins);
+
+ void lowerPhi(MPhi* phi);
+
+ public:
+ static bool allowTypedElementHoleCheck() { return true; }
+};
+
+typedef LIRGeneratorX86 LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_Lowering_x86_h */
diff --git a/js/src/jit/x86/MacroAssembler-x86-inl.h b/js/src/jit/x86/MacroAssembler-x86-inl.h
new file mode 100644
index 0000000000..66050cc1b5
--- /dev/null
+++ b/js/src/jit/x86/MacroAssembler-x86-inl.h
@@ -0,0 +1,1386 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_MacroAssembler_x86_inl_h
+#define jit_x86_MacroAssembler_x86_inl_h
+
+#include "jit/x86/MacroAssembler-x86.h"
+
+#include "jit/x86-shared/MacroAssembler-x86-shared-inl.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+
+void MacroAssembler::move64(Imm64 imm, Register64 dest) {
+ move32(Imm32(imm.value & 0xFFFFFFFFL), dest.low);
+ move32(Imm32((imm.value >> 32) & 0xFFFFFFFFL), dest.high);
+}
+
+void MacroAssembler::move64(Register64 src, Register64 dest) {
+ movl(src.low, dest.low);
+ movl(src.high, dest.high);
+}
+
+void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
+ ScratchDoubleScope scratch(*this);
+
+ if (Assembler::HasSSE41()) {
+ vmovd(src, dest.low);
+ vpextrd(1, src, dest.high);
+ } else {
+ vmovd(src, dest.low);
+ moveDouble(src, scratch);
+ vpsrldq(Imm32(4), scratch, scratch);
+ vmovd(scratch, dest.high);
+ }
+}
+
+void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
+ if (Assembler::HasSSE41()) {
+ vmovd(src.low, dest);
+ vpinsrd(1, src.high, dest, dest);
+ } else {
+ ScratchDoubleScope fpscratch(*this);
+ vmovd(src.low, dest);
+ vmovd(src.high, fpscratch);
+ vunpcklps(fpscratch, dest, dest);
+ }
+}
+
+void MacroAssembler::move64To32(Register64 src, Register dest) {
+ if (src.low != dest) {
+ movl(src.low, dest);
+ }
+}
+
+void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
+ if (src != dest.low) {
+ movl(src, dest.low);
+ }
+ movl(Imm32(0), dest.high);
+}
+
+void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
+ move8SignExtend(src, dest.low);
+ if (dest.low == eax && dest.high == edx) {
+ masm.cdq();
+ } else {
+ movl(dest.low, dest.high);
+ sarl(Imm32(31), dest.high);
+ }
+}
+
+void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
+ move16SignExtend(src, dest.low);
+ if (dest.low == eax && dest.high == edx) {
+ masm.cdq();
+ } else {
+ movl(dest.low, dest.high);
+ sarl(Imm32(31), dest.high);
+ }
+}
+
+void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
+ if (src != dest.low) {
+ movl(src, dest.low);
+ }
+ if (dest.low == eax && dest.high == edx) {
+ masm.cdq();
+ } else {
+ movl(dest.low, dest.high);
+ sarl(Imm32(31), dest.high);
+ }
+}
+
+void MacroAssembler::move32SignExtendToPtr(Register src, Register dest) {
+ movl(src, dest);
+}
+
+void MacroAssembler::move32ZeroExtendToPtr(Register src, Register dest) {
+ movl(src, dest);
+}
+
+// ===============================================================
+// Load instructions
+
+void MacroAssembler::load32SignExtendToPtr(const Address& src, Register dest) {
+ load32(src, dest);
+}
+
+// ===============================================================
+// Logical functions
+
+void MacroAssembler::notPtr(Register reg) { notl(reg); }
+
+void MacroAssembler::andPtr(Register src, Register dest) { andl(src, dest); }
+
+void MacroAssembler::andPtr(Imm32 imm, Register dest) { andl(imm, dest); }
+
+void MacroAssembler::and64(Imm64 imm, Register64 dest) {
+ if (imm.low().value != int32_t(0xFFFFFFFF)) {
+ andl(imm.low(), dest.low);
+ }
+ if (imm.hi().value != int32_t(0xFFFFFFFF)) {
+ andl(imm.hi(), dest.high);
+ }
+}
+
+void MacroAssembler::or64(Imm64 imm, Register64 dest) {
+ if (imm.low().value != 0) {
+ orl(imm.low(), dest.low);
+ }
+ if (imm.hi().value != 0) {
+ orl(imm.hi(), dest.high);
+ }
+}
+
+void MacroAssembler::xor64(Imm64 imm, Register64 dest) {
+ if (imm.low().value != 0) {
+ xorl(imm.low(), dest.low);
+ }
+ if (imm.hi().value != 0) {
+ xorl(imm.hi(), dest.high);
+ }
+}
+
+void MacroAssembler::orPtr(Register src, Register dest) { orl(src, dest); }
+
+void MacroAssembler::orPtr(Imm32 imm, Register dest) { orl(imm, dest); }
+
+void MacroAssembler::and64(Register64 src, Register64 dest) {
+ andl(src.low, dest.low);
+ andl(src.high, dest.high);
+}
+
+void MacroAssembler::or64(Register64 src, Register64 dest) {
+ orl(src.low, dest.low);
+ orl(src.high, dest.high);
+}
+
+void MacroAssembler::xor64(Register64 src, Register64 dest) {
+ xorl(src.low, dest.low);
+ xorl(src.high, dest.high);
+}
+
+void MacroAssembler::xorPtr(Register src, Register dest) { xorl(src, dest); }
+
+void MacroAssembler::xorPtr(Imm32 imm, Register dest) { xorl(imm, dest); }
+
+// ===============================================================
+// Swap instructions
+
+void MacroAssembler::byteSwap64(Register64 reg) {
+ bswapl(reg.low);
+ bswapl(reg.high);
+ xchgl(reg.low, reg.high);
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void MacroAssembler::addPtr(Register src, Register dest) { addl(src, dest); }
+
+void MacroAssembler::addPtr(Imm32 imm, Register dest) { addl(imm, dest); }
+
+void MacroAssembler::addPtr(ImmWord imm, Register dest) {
+ addl(Imm32(imm.value), dest);
+}
+
+void MacroAssembler::addPtr(Imm32 imm, const Address& dest) {
+ addl(imm, Operand(dest));
+}
+
+void MacroAssembler::addPtr(Imm32 imm, const AbsoluteAddress& dest) {
+ addl(imm, Operand(dest));
+}
+
+void MacroAssembler::addPtr(const Address& src, Register dest) {
+ addl(Operand(src), dest);
+}
+
+void MacroAssembler::add64(Register64 src, Register64 dest) {
+ addl(src.low, dest.low);
+ adcl(src.high, dest.high);
+}
+
+void MacroAssembler::add64(Imm32 imm, Register64 dest) {
+ addl(imm, dest.low);
+ adcl(Imm32(0), dest.high);
+}
+
+void MacroAssembler::add64(Imm64 imm, Register64 dest) {
+ if (imm.low().value == 0) {
+ addl(imm.hi(), dest.high);
+ return;
+ }
+ addl(imm.low(), dest.low);
+ adcl(imm.hi(), dest.high);
+}
+
+void MacroAssembler::addConstantDouble(double d, FloatRegister dest) {
+ Double* dbl = getDouble(d);
+ if (!dbl) {
+ return;
+ }
+ masm.vaddsd_mr(nullptr, dest.encoding(), dest.encoding());
+ propagateOOM(dbl->uses.append(CodeOffset(masm.size())));
+}
+
+CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
+ moveStackPtrTo(dest);
+ addlWithPatch(Imm32(0), dest);
+ return CodeOffset(currentOffset());
+}
+
+void MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) {
+ patchAddl(offset, -imm.value);
+}
+
+void MacroAssembler::subPtr(Register src, Register dest) { subl(src, dest); }
+
+void MacroAssembler::subPtr(Register src, const Address& dest) {
+ subl(src, Operand(dest));
+}
+
+void MacroAssembler::subPtr(Imm32 imm, Register dest) { subl(imm, dest); }
+
+void MacroAssembler::subPtr(const Address& addr, Register dest) {
+ subl(Operand(addr), dest);
+}
+
+void MacroAssembler::sub64(Register64 src, Register64 dest) {
+ subl(src.low, dest.low);
+ sbbl(src.high, dest.high);
+}
+
+void MacroAssembler::sub64(Imm64 imm, Register64 dest) {
+ if (imm.low().value == 0) {
+ subl(imm.hi(), dest.high);
+ return;
+ }
+ subl(imm.low(), dest.low);
+ sbbl(imm.hi(), dest.high);
+}
+
+void MacroAssembler::mulHighUnsigned32(Imm32 imm, Register src, Register dest) {
+ // Preserve edx:eax, unless they're the destination register.
+ if (edx != dest) {
+ push(edx);
+ }
+ if (eax != dest) {
+ push(eax);
+ }
+
+ if (src != eax) {
+ // Compute edx:eax := eax ∗ src
+ movl(imm, eax);
+ mull(src);
+ } else {
+ // Compute edx:eax := eax ∗ edx
+ movl(imm, edx);
+ mull(edx);
+ }
+
+ // Move the high word from edx into |dest|.
+ if (edx != dest) {
+ movl(edx, dest);
+ }
+
+ // Restore edx:eax.
+ if (eax != dest) {
+ pop(eax);
+ }
+ if (edx != dest) {
+ pop(edx);
+ }
+}
+
+void MacroAssembler::mulPtr(Register rhs, Register srcDest) {
+ imull(rhs, srcDest);
+}
+
+// Note: this function clobbers eax and edx.
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest) {
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+
+ MOZ_ASSERT(dest.low != eax && dest.low != edx);
+ MOZ_ASSERT(dest.high != eax && dest.high != edx);
+
+ // HIGH(dest) = LOW(HIGH(dest) * LOW(imm));
+ movl(Imm32(imm.value & 0xFFFFFFFFL), edx);
+ imull(edx, dest.high);
+
+ // edx:eax = LOW(dest) * LOW(imm);
+ movl(Imm32(imm.value & 0xFFFFFFFFL), edx);
+ movl(dest.low, eax);
+ mull(edx);
+
+ // HIGH(dest) += edx;
+ addl(edx, dest.high);
+
+ // HIGH(dest) += LOW(LOW(dest) * HIGH(imm));
+ if (((imm.value >> 32) & 0xFFFFFFFFL) == 5) {
+ leal(Operand(dest.low, dest.low, TimesFour), edx);
+ } else {
+ MOZ_CRASH("Unsupported imm");
+ }
+ addl(edx, dest.high);
+
+ // LOW(dest) = eax;
+ movl(eax, dest.low);
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest,
+ const Register temp) {
+ // LOW32 = LOW(LOW(dest) * LOW(src)); (1)
+ // HIGH32 = LOW(HIGH(dest) * LOW(src)) [multiply src into upper bits] (2)
+ // + LOW(LOW(dest) * HIGH(src)) [multiply dest into upper bits] (3)
+ // + HIGH(LOW(dest) * LOW(src)) [carry] (4)
+
+ MOZ_ASSERT(dest == Register64(edx, eax));
+ MOZ_ASSERT(temp != edx && temp != eax);
+
+ movl(dest.low, temp);
+
+ // Compute mul64
+ imull(imm.low(), dest.high); // (2)
+ imull(imm.hi(), temp); // (3)
+ addl(dest.high, temp);
+ movl(imm.low(), dest.high);
+ mull(dest.high /*, dest.low*/); // (4) + (1) output in edx:eax
+ // (dest_hi:dest_lo)
+ addl(temp, dest.high);
+}
+
+void MacroAssembler::mul64(const Register64& src, const Register64& dest,
+ const Register temp) {
+ // LOW32 = LOW(LOW(dest) * LOW(src)); (1)
+ // HIGH32 = LOW(HIGH(dest) * LOW(src)) [multiply src into upper bits] (2)
+ // + LOW(LOW(dest) * HIGH(src)) [multiply dest into upper bits] (3)
+ // + HIGH(LOW(dest) * LOW(src)) [carry] (4)
+
+ MOZ_ASSERT(dest == Register64(edx, eax));
+ MOZ_ASSERT(src != Register64(edx, eax) && src != Register64(eax, edx));
+
+ // Make sure the rhs.high isn't the dest.high register anymore.
+ // This saves us from doing other register moves.
+ movl(dest.low, temp);
+
+ // Compute mul64
+ imull(src.low, dest.high); // (2)
+ imull(src.high, temp); // (3)
+ addl(dest.high, temp);
+ movl(src.low, dest.high);
+ mull(dest.high /*, dest.low*/); // (4) + (1) output in edx:eax
+ // (dest_hi:dest_lo)
+ addl(temp, dest.high);
+}
+
+void MacroAssembler::mulBy3(Register src, Register dest) {
+ lea(Operand(src, src, TimesTwo), dest);
+}
+
+void MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp,
+ FloatRegister dest) {
+ movl(imm, temp);
+ vmulsd(Operand(temp, 0), dest, dest);
+}
+
+void MacroAssembler::inc64(AbsoluteAddress dest) {
+ addl(Imm32(1), Operand(dest));
+ Label noOverflow;
+ j(NonZero, &noOverflow);
+ addl(Imm32(1), Operand(dest.offset(4)));
+ bind(&noOverflow);
+}
+
+void MacroAssembler::neg64(Register64 reg) {
+ negl(reg.low);
+ adcl(Imm32(0), reg.high);
+ negl(reg.high);
+}
+
+void MacroAssembler::negPtr(Register reg) { negl(reg); }
+
+// ===============================================================
+// Shift functions
+
+void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ shll(imm, dest);
+}
+
+void MacroAssembler::lshiftPtr(Register shift, Register srcDest) {
+ if (HasBMI2()) {
+ shlxl(srcDest, shift, srcDest);
+ return;
+ }
+ MOZ_ASSERT(shift == ecx);
+ shll_cl(srcDest);
+}
+
+void MacroAssembler::lshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ if (imm.value < 32) {
+ shldl(imm, dest.low, dest.high);
+ shll(imm, dest.low);
+ return;
+ }
+
+ mov(dest.low, dest.high);
+ shll(Imm32(imm.value & 0x1f), dest.high);
+ xorl(dest.low, dest.low);
+}
+
+void MacroAssembler::lshift64(Register shift, Register64 srcDest) {
+ MOZ_ASSERT(shift == ecx);
+ MOZ_ASSERT(srcDest.low != ecx && srcDest.high != ecx);
+
+ Label done;
+
+ shldl_cl(srcDest.low, srcDest.high);
+ shll_cl(srcDest.low);
+
+ testl(Imm32(0x20), ecx);
+ j(Condition::Equal, &done);
+
+ // 32 - 63 bit shift
+ movl(srcDest.low, srcDest.high);
+ xorl(srcDest.low, srcDest.low);
+
+ bind(&done);
+}
+
+void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ shrl(imm, dest);
+}
+
+void MacroAssembler::rshiftPtr(Register shift, Register srcDest) {
+ if (HasBMI2()) {
+ shrxl(srcDest, shift, srcDest);
+ return;
+ }
+ MOZ_ASSERT(shift == ecx);
+ shrl_cl(srcDest);
+}
+
+void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ if (imm.value < 32) {
+ shrdl(imm, dest.high, dest.low);
+ shrl(imm, dest.high);
+ return;
+ }
+
+ movl(dest.high, dest.low);
+ shrl(Imm32(imm.value & 0x1f), dest.low);
+ xorl(dest.high, dest.high);
+}
+
+void MacroAssembler::rshift64(Register shift, Register64 srcDest) {
+ MOZ_ASSERT(shift == ecx);
+ MOZ_ASSERT(srcDest.low != ecx && srcDest.high != ecx);
+
+ Label done;
+
+ shrdl_cl(srcDest.high, srcDest.low);
+ shrl_cl(srcDest.high);
+
+ testl(Imm32(0x20), ecx);
+ j(Condition::Equal, &done);
+
+ // 32 - 63 bit shift
+ movl(srcDest.high, srcDest.low);
+ xorl(srcDest.high, srcDest.high);
+
+ bind(&done);
+}
+
+void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ sarl(imm, dest);
+}
+
+void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ if (imm.value < 32) {
+ shrdl(imm, dest.high, dest.low);
+ sarl(imm, dest.high);
+ return;
+ }
+
+ movl(dest.high, dest.low);
+ sarl(Imm32(imm.value & 0x1f), dest.low);
+ sarl(Imm32(0x1f), dest.high);
+}
+
+void MacroAssembler::rshift64Arithmetic(Register shift, Register64 srcDest) {
+ MOZ_ASSERT(shift == ecx);
+ MOZ_ASSERT(srcDest.low != ecx && srcDest.high != ecx);
+
+ Label done;
+
+ shrdl_cl(srcDest.high, srcDest.low);
+ sarl_cl(srcDest.high);
+
+ testl(Imm32(0x20), ecx);
+ j(Condition::Equal, &done);
+
+ // 32 - 63 bit shift
+ movl(srcDest.high, srcDest.low);
+ sarl(Imm32(0x1f), srcDest.high);
+
+ bind(&done);
+}
+
+// ===============================================================
+// Rotation functions
+
+void MacroAssembler::rotateLeft64(Register count, Register64 src,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+ MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
+
+ Label done;
+
+ movl(dest.high, temp);
+ shldl_cl(dest.low, dest.high);
+ shldl_cl(temp, dest.low);
+
+ testl(Imm32(0x20), count);
+ j(Condition::Equal, &done);
+ xchgl(dest.high, dest.low);
+
+ bind(&done);
+}
+
+void MacroAssembler::rotateRight64(Register count, Register64 src,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+ MOZ_ASSERT(count == ecx, "defineFixed(ecx)");
+
+ Label done;
+
+ movl(dest.high, temp);
+ shrdl_cl(dest.low, dest.high);
+ shrdl_cl(temp, dest.low);
+
+ testl(Imm32(0x20), count);
+ j(Condition::Equal, &done);
+ xchgl(dest.high, dest.low);
+
+ bind(&done);
+}
+
+void MacroAssembler::rotateLeft64(Imm32 count, Register64 src, Register64 dest,
+ Register temp) {
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+
+ int32_t amount = count.value & 0x3f;
+ if ((amount & 0x1f) != 0) {
+ movl(dest.high, temp);
+ shldl(Imm32(amount & 0x1f), dest.low, dest.high);
+ shldl(Imm32(amount & 0x1f), temp, dest.low);
+ }
+
+ if (!!(amount & 0x20)) {
+ xchgl(dest.high, dest.low);
+ }
+}
+
+void MacroAssembler::rotateRight64(Imm32 count, Register64 src, Register64 dest,
+ Register temp) {
+ MOZ_ASSERT(src == dest, "defineReuseInput");
+
+ int32_t amount = count.value & 0x3f;
+ if ((amount & 0x1f) != 0) {
+ movl(dest.high, temp);
+ shrdl(Imm32(amount & 0x1f), dest.low, dest.high);
+ shrdl(Imm32(amount & 0x1f), temp, dest.low);
+ }
+
+ if (!!(amount & 0x20)) {
+ xchgl(dest.high, dest.low);
+ }
+}
+
+// ===============================================================
+// Bit counting functions
+
+void MacroAssembler::clz64(Register64 src, Register dest) {
+ if (AssemblerX86Shared::HasLZCNT()) {
+ Label nonzero, zero;
+
+ testl(src.high, src.high);
+ j(Assembler::Zero, &zero);
+
+ lzcntl(src.high, dest);
+ jump(&nonzero);
+
+ bind(&zero);
+ lzcntl(src.low, dest);
+ addl(Imm32(32), dest);
+
+ bind(&nonzero);
+ return;
+ }
+
+ // Because |dest| may be equal to |src.low|, we rely on BSR not modifying its
+ // output when the input is zero. AMD ISA documents BSR not modifying the
+ // output and current Intel CPUs follow AMD.
+
+ Label nonzero, zero;
+
+ bsrl(src.high, dest);
+ j(Assembler::Zero, &zero);
+ orl(Imm32(32), dest);
+ jump(&nonzero);
+
+ bind(&zero);
+ bsrl(src.low, dest);
+ j(Assembler::NonZero, &nonzero);
+ movl(Imm32(0x7F), dest);
+
+ bind(&nonzero);
+ xorl(Imm32(0x3F), dest);
+}
+
+void MacroAssembler::ctz64(Register64 src, Register dest) {
+ if (AssemblerX86Shared::HasBMI1()) {
+ Label nonzero, zero;
+
+ testl(src.low, src.low);
+ j(Assembler::Zero, &zero);
+
+ tzcntl(src.low, dest);
+ jump(&nonzero);
+
+ bind(&zero);
+ tzcntl(src.high, dest);
+ addl(Imm32(32), dest);
+
+ bind(&nonzero);
+ return;
+ }
+
+ // Because |dest| may be equal to |src.low|, we rely on BSF not modifying its
+ // output when the input is zero. AMD ISA documents BSF not modifying the
+ // output and current Intel CPUs follow AMD.
+
+ Label done, nonzero;
+
+ bsfl(src.low, dest);
+ j(Assembler::NonZero, &done);
+ bsfl(src.high, dest);
+ j(Assembler::NonZero, &nonzero);
+ movl(Imm32(64), dest);
+ jump(&done);
+
+ bind(&nonzero);
+ orl(Imm32(32), dest);
+
+ bind(&done);
+}
+
+void MacroAssembler::popcnt64(Register64 src, Register64 dest, Register tmp) {
+ // The tmp register is only needed if there is no native POPCNT.
+
+ MOZ_ASSERT(src.low != tmp && src.high != tmp);
+ MOZ_ASSERT(dest.low != tmp && dest.high != tmp);
+
+ if (dest.low != src.high) {
+ popcnt32(src.low, dest.low, tmp);
+ popcnt32(src.high, dest.high, tmp);
+ } else {
+ MOZ_ASSERT(dest.high != src.high);
+ popcnt32(src.low, dest.high, tmp);
+ popcnt32(src.high, dest.low, tmp);
+ }
+ addl(dest.high, dest.low);
+ xorl(dest.high, dest.high);
+}
+
+// ===============================================================
+// Condition functions
+
+void MacroAssembler::cmp64Set(Condition cond, Address lhs, Imm64 rhs,
+ Register dest) {
+ Label success, done;
+
+ branch64(cond, lhs, rhs, &success);
+ move32(Imm32(0), dest);
+ jump(&done);
+ bind(&success);
+ move32(Imm32(1), dest);
+ bind(&done);
+}
+
+template <typename T1, typename T2>
+void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ cmpPtr(lhs, rhs);
+ emitSet(cond, dest);
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ cmp32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ cmp32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress lhs,
+ Imm32 rhs, Label* label) {
+ cmpl(rhs, lhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
+ Label* success, Label* fail) {
+ bool fallthrough = false;
+ Label fallthroughLabel;
+
+ if (!fail) {
+ fail = &fallthroughLabel;
+ fallthrough = true;
+ }
+
+ switch (cond) {
+ case Assembler::Equal:
+ branch32(Assembler::NotEqual, lhs.low, val.low(), fail);
+ branch32(Assembler::Equal, lhs.high, val.hi(), success);
+ if (!fallthrough) {
+ jump(fail);
+ }
+ break;
+ case Assembler::NotEqual:
+ branch32(Assembler::NotEqual, lhs.low, val.low(), success);
+ branch32(Assembler::NotEqual, lhs.high, val.hi(), success);
+ if (!fallthrough) {
+ jump(fail);
+ }
+ break;
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual: {
+ Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(cond);
+ Assembler::Condition cond2 =
+ Assembler::ConditionWithoutEqual(Assembler::InvertCondition(cond));
+ Assembler::Condition cond3 = Assembler::UnsignedCondition(cond);
+
+ cmp32(lhs.high, val.hi());
+ j(cond1, success);
+ j(cond2, fail);
+ cmp32(lhs.low, val.low());
+ j(cond3, success);
+ if (!fallthrough) {
+ jump(fail);
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+
+ if (fallthrough) {
+ bind(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
+ Label* success, Label* fail) {
+ bool fallthrough = false;
+ Label fallthroughLabel;
+
+ if (!fail) {
+ fail = &fallthroughLabel;
+ fallthrough = true;
+ }
+
+ switch (cond) {
+ case Assembler::Equal:
+ branch32(Assembler::NotEqual, lhs.low, rhs.low, fail);
+ branch32(Assembler::Equal, lhs.high, rhs.high, success);
+ if (!fallthrough) {
+ jump(fail);
+ }
+ break;
+ case Assembler::NotEqual:
+ branch32(Assembler::NotEqual, lhs.low, rhs.low, success);
+ branch32(Assembler::NotEqual, lhs.high, rhs.high, success);
+ if (!fallthrough) {
+ jump(fail);
+ }
+ break;
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual: {
+ Assembler::Condition cond1 = Assembler::ConditionWithoutEqual(cond);
+ Assembler::Condition cond2 =
+ Assembler::ConditionWithoutEqual(Assembler::InvertCondition(cond));
+ Assembler::Condition cond3 = Assembler::UnsignedCondition(cond);
+
+ cmp32(lhs.high, rhs.high);
+ j(cond1, success);
+ j(cond2, fail);
+ cmp32(lhs.low, rhs.low);
+ j(cond3, success);
+ if (!fallthrough) {
+ jump(fail);
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+
+ if (fallthrough) {
+ bind(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ Label done;
+
+ if (cond == Assembler::Equal) {
+ branch32(Assembler::NotEqual, lhs, val.firstHalf(), &done);
+ } else {
+ branch32(Assembler::NotEqual, lhs, val.firstHalf(), label);
+ }
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)),
+ val.secondHalf(), label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ Register64 rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ Label done;
+
+ if (cond == Assembler::Equal) {
+ branch32(Assembler::NotEqual, lhs, rhs.low, &done);
+ } else {
+ branch32(Assembler::NotEqual, lhs, rhs.low, label);
+ }
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), rhs.high,
+ label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ const Address& rhs, Register scratch,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ Label done;
+
+ load32(rhs, scratch);
+ if (cond == Assembler::Equal) {
+ branch32(Assembler::NotEqual, lhs, scratch, &done);
+ } else {
+ branch32(Assembler::NotEqual, lhs, scratch, label);
+ }
+
+ load32(Address(rhs.base, rhs.offset + sizeof(uint32_t)), scratch);
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), scratch,
+ label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ ImmWord rhs, Label* label) {
+ branchPtrImpl(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs,
+ Register rhs, Label* label) {
+ cmpl(rhs, lhs);
+ j(cond, label);
+}
+
+void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
+ Register rhs, Label* label) {
+ branchPtr(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchTruncateFloat32ToPtr(FloatRegister src,
+ Register dest, Label* fail) {
+ branchTruncateFloat32ToInt32(src, dest, fail);
+}
+
+void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ branchTruncateFloat32ToInt32(src, dest, fail);
+}
+
+void MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ vcvttss2si(src, dest);
+
+ // vcvttss2si returns 0x80000000 on failure. Test for it by
+ // subtracting 1 and testing overflow (this permits the use of a
+ // smaller immediate field).
+ cmp32(dest, Imm32(1));
+ j(Assembler::Overflow, fail);
+}
+
+void MacroAssembler::branchTruncateDoubleToPtr(FloatRegister src, Register dest,
+ Label* fail) {
+ branchTruncateDoubleToInt32(src, dest, fail);
+}
+
+void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ // TODO: X64 supports supports integers up till 64bits. Here we only support
+ // 32bits, before failing. Implementing this for x86 might give a x86 kraken
+ // win.
+ branchTruncateDoubleToInt32(src, dest, fail);
+}
+
+void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ vcvttsd2si(src, dest);
+
+ // vcvttsd2si returns 0x80000000 on failure. Test for it by
+ // subtracting 1 and testing overflow (this permits the use of a
+ // smaller immediate field).
+ cmp32(dest, Imm32(1));
+ j(Assembler::Overflow, fail);
+}
+
+void MacroAssembler::branchAdd64(Condition cond, Imm64 imm, Register64 dest,
+ Label* label) {
+ add64(imm, dest);
+ j(cond, label);
+}
+
+void MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ test32(Operand(lhs), rhs);
+ j(cond, label);
+}
+
+template <class L>
+void MacroAssembler::branchTest64(Condition cond, Register64 lhs,
+ Register64 rhs, Register temp, L label) {
+ if (cond == Assembler::Zero || cond == Assembler::NonZero) {
+ MOZ_ASSERT(lhs.low == rhs.low);
+ MOZ_ASSERT(lhs.high == rhs.high);
+ movl(lhs.low, temp);
+ orl(lhs.high, temp);
+ branchTestPtr(cond, temp, temp, label);
+ } else if (cond == Assembler::Signed || cond == Assembler::NotSigned) {
+ branchTest32(cond, lhs.high, rhs.high, label);
+ } else {
+ MOZ_CRASH("Unsupported condition");
+ }
+}
+
+void MacroAssembler::branchTestBooleanTruthy(bool truthy,
+ const ValueOperand& value,
+ Label* label) {
+ test32(value.payloadReg(), value.payloadReg());
+ j(truthy ? NonZero : Zero, label);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr,
+ JSWhyMagic why, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label notMagic;
+ if (cond == Assembler::Equal) {
+ branchTestMagic(Assembler::NotEqual, valaddr, &notMagic);
+ } else {
+ branchTestMagic(Assembler::NotEqual, valaddr, label);
+ }
+
+ branch32(cond, ToPayload(valaddr), Imm32(why), label);
+ bind(&notMagic);
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const BaseIndex& lhs,
+ const ValueOperand& rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label notSameValue;
+ if (cond == Assembler::Equal) {
+ branch32(Assembler::NotEqual, ToType(lhs), rhs.typeReg(), &notSameValue);
+ } else {
+ branch32(Assembler::NotEqual, ToType(lhs), rhs.typeReg(), label);
+ }
+
+ branch32(cond, ToPayload(lhs), rhs.payloadReg(), label);
+ bind(&notSameValue);
+}
+
+void MacroAssembler::branchToComputedAddress(const BaseIndex& addr) {
+ jmp(Operand(addr));
+}
+
+void MacroAssembler::cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
+ Register src, Register dest) {
+ cmp32(lhs, rhs);
+ cmovCCl(cond, Operand(src), dest);
+}
+
+void MacroAssembler::cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs,
+ const Address& src, Register dest) {
+ cmp32(lhs, rhs);
+ cmovCCl(cond, Operand(src), dest);
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ cmp32Move32(cond, lhs, rhs, src, dest);
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ cmp32Move32(cond, lhs, rhs, src, dest);
+}
+
+void MacroAssembler::test32LoadPtr(Condition cond, const Address& addr,
+ Imm32 mask, const Address& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
+ test32(addr, mask);
+ cmovCCl(cond, Operand(src), dest);
+}
+
+void MacroAssembler::test32MovePtr(Condition cond, const Address& addr,
+ Imm32 mask, Register src, Register dest) {
+ MOZ_ASSERT(cond == Assembler::Zero || cond == Assembler::NonZero);
+ test32(addr, mask);
+ cmovCCl(cond, Operand(src), dest);
+}
+
+void MacroAssembler::spectreMovePtr(Condition cond, Register src,
+ Register dest) {
+ cmovCCl(cond, Operand(src), dest);
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, const Operand& length,
+ Register maybeScratch,
+ Label* failure) {
+ Label failurePopValue;
+ bool pushedValue = false;
+ if (JitOptions.spectreIndexMasking) {
+ if (maybeScratch == InvalidReg) {
+ push(Imm32(0));
+ pushedValue = true;
+ } else {
+ move32(Imm32(0), maybeScratch);
+ }
+ }
+
+ cmp32(index, length);
+ j(Assembler::AboveOrEqual, pushedValue ? &failurePopValue : failure);
+
+ if (JitOptions.spectreIndexMasking) {
+ if (maybeScratch == InvalidReg) {
+ Label done;
+ cmovCCl(Assembler::AboveOrEqual, Operand(StackPointer, 0), index);
+ lea(Operand(StackPointer, sizeof(void*)), StackPointer);
+ jump(&done);
+
+ bind(&failurePopValue);
+ lea(Operand(StackPointer, sizeof(void*)), StackPointer);
+ jump(failure);
+
+ bind(&done);
+ } else {
+ cmovCCl(Assembler::AboveOrEqual, maybeScratch, index);
+ }
+ }
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_ASSERT(length != maybeScratch);
+ MOZ_ASSERT(index != maybeScratch);
+
+ spectreBoundsCheck32(index, Operand(length), maybeScratch, failure);
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_ASSERT(index != length.base);
+ MOZ_ASSERT(length.base != maybeScratch);
+ MOZ_ASSERT(index != maybeScratch);
+
+ spectreBoundsCheck32(index, Operand(length), maybeScratch, failure);
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ spectreBoundsCheck32(index, length, maybeScratch, failure);
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index,
+ const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ spectreBoundsCheck32(index, length, maybeScratch, failure);
+}
+
+// ========================================================================
+// SIMD
+
+void MacroAssembler::extractLaneInt64x2(uint32_t lane, FloatRegister src,
+ Register64 dest) {
+ if (lane == 0) {
+ vmovd(src, dest.low);
+ } else {
+ vpextrd(2 * lane, src, dest.low);
+ }
+ vpextrd(2 * lane + 1, src, dest.high);
+}
+
+void MacroAssembler::replaceLaneInt64x2(unsigned lane, Register64 rhs,
+ FloatRegister lhsDest) {
+ vpinsrd(2 * lane, rhs.low, lhsDest, lhsDest);
+ vpinsrd(2 * lane + 1, rhs.high, lhsDest, lhsDest);
+}
+
+void MacroAssembler::replaceLaneInt64x2(unsigned lane, FloatRegister lhs,
+ Register64 rhs, FloatRegister dest) {
+ vpinsrd(2 * lane, rhs.low, lhs, dest);
+ vpinsrd(2 * lane + 1, rhs.high, dest, dest);
+}
+
+void MacroAssembler::splatX2(Register64 src, FloatRegister dest) {
+ vmovd(src.low, dest);
+ vpinsrd(1, src.high, dest, dest);
+ vpunpcklqdq(dest, dest, dest);
+}
+
+// ========================================================================
+// Truncate floating point.
+
+void MacroAssembler::truncateFloat32ToUInt64(Address src, Address dest,
+ Register temp,
+ FloatRegister floatTemp) {
+ Label done;
+
+ loadFloat32(src, floatTemp);
+
+ truncateFloat32ToInt64(src, dest, temp);
+
+ // For unsigned conversion the case of [INT64, UINT64] needs to get handle
+ // seperately.
+ load32(HighWord(dest), temp);
+ branch32(Assembler::Condition::NotSigned, temp, Imm32(0), &done);
+
+ // Move the value inside INT64 range.
+ storeFloat32(floatTemp, dest);
+ loadConstantFloat32(double(int64_t(0x8000000000000000)), floatTemp);
+ vaddss(Operand(dest), floatTemp, floatTemp);
+ storeFloat32(floatTemp, dest);
+ truncateFloat32ToInt64(dest, dest, temp);
+
+ load32(HighWord(dest), temp);
+ orl(Imm32(0x80000000), temp);
+ store32(temp, HighWord(dest));
+
+ bind(&done);
+}
+
+void MacroAssembler::truncateDoubleToUInt64(Address src, Address dest,
+ Register temp,
+ FloatRegister floatTemp) {
+ Label done;
+
+ loadDouble(src, floatTemp);
+
+ truncateDoubleToInt64(src, dest, temp);
+
+ // For unsigned conversion the case of [INT64, UINT64] needs to get handle
+ // seperately.
+ load32(HighWord(dest), temp);
+ branch32(Assembler::Condition::NotSigned, temp, Imm32(0), &done);
+
+ // Move the value inside INT64 range.
+ storeDouble(floatTemp, dest);
+ loadConstantDouble(double(int64_t(0x8000000000000000)), floatTemp);
+ vaddsd(Operand(dest), floatTemp, floatTemp);
+ storeDouble(floatTemp, dest);
+ truncateDoubleToInt64(dest, dest, temp);
+
+ load32(HighWord(dest), temp);
+ orl(Imm32(0x80000000), temp);
+ store32(temp, HighWord(dest));
+
+ bind(&done);
+}
+
+template <typename T>
+void MacroAssemblerX86::fallibleUnboxPtrImpl(const T& src, Register dest,
+ JSValueType type, Label* fail) {
+ switch (type) {
+ case JSVAL_TYPE_OBJECT:
+ asMasm().branchTestObject(Assembler::NotEqual, src, fail);
+ break;
+ case JSVAL_TYPE_STRING:
+ asMasm().branchTestString(Assembler::NotEqual, src, fail);
+ break;
+ case JSVAL_TYPE_SYMBOL:
+ asMasm().branchTestSymbol(Assembler::NotEqual, src, fail);
+ break;
+ case JSVAL_TYPE_BIGINT:
+ asMasm().branchTestBigInt(Assembler::NotEqual, src, fail);
+ break;
+ default:
+ MOZ_CRASH("Unexpected type");
+ }
+ unboxNonDouble(src, dest, type);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const ValueOperand& src, Register dest,
+ JSValueType type, Label* fail) {
+ fallibleUnboxPtrImpl(src, dest, type, fail);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const Address& src, Register dest,
+ JSValueType type, Label* fail) {
+ fallibleUnboxPtrImpl(src, dest, type, fail);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const BaseIndex& src, Register dest,
+ JSValueType type, Label* fail) {
+ fallibleUnboxPtrImpl(src, dest, type, fail);
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+// Note: this function clobbers the source register.
+void MacroAssemblerX86::convertUInt32ToDouble(Register src,
+ FloatRegister dest) {
+ // src is [0, 2^32-1]
+ subl(Imm32(0x80000000), src);
+
+ // Now src is [-2^31, 2^31-1] - int range, but not the same value.
+ convertInt32ToDouble(src, dest);
+
+ // dest is now a double with the int range.
+ // correct the double value by adding 0x80000000.
+ asMasm().addConstantDouble(2147483648.0, dest);
+}
+
+// Note: this function clobbers the source register.
+void MacroAssemblerX86::convertUInt32ToFloat32(Register src,
+ FloatRegister dest) {
+ convertUInt32ToDouble(src, dest);
+ convertDoubleToFloat32(dest, dest);
+}
+
+void MacroAssemblerX86::unboxValue(const ValueOperand& src, AnyRegister dest,
+ JSValueType) {
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.payloadReg(), dest.fpu());
+ jump(&end);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else {
+ if (src.payloadReg() != dest.gpr()) {
+ movl(src.payloadReg(), dest.gpr());
+ }
+ }
+}
+
+template <typename T>
+void MacroAssemblerX86::loadInt32OrDouble(const T& src, FloatRegister dest) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(ToPayload(src), dest);
+ jump(&end);
+ bind(&notInt32);
+ loadDouble(src, dest);
+ bind(&end);
+}
+
+template <typename T>
+void MacroAssemblerX86::loadUnboxedValue(const T& src, MIRType type,
+ AnyRegister dest) {
+ if (dest.isFloat()) {
+ loadInt32OrDouble(src, dest.fpu());
+ } else {
+ movl(Operand(src), dest.gpr());
+ }
+}
+
+// If source is a double, load it into dest. If source is int32,
+// convert it to double. Else, branch to failure.
+void MacroAssemblerX86::ensureDouble(const ValueOperand& source,
+ FloatRegister dest, Label* failure) {
+ Label isDouble, done;
+ asMasm().branchTestDouble(Assembler::Equal, source.typeReg(), &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, source.typeReg(), failure);
+
+ convertInt32ToDouble(source.payloadReg(), dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_MacroAssembler_x86_inl_h */
diff --git a/js/src/jit/x86/MacroAssembler-x86.cpp b/js/src/jit/x86/MacroAssembler-x86.cpp
new file mode 100644
index 0000000000..a68d7b03b7
--- /dev/null
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -0,0 +1,1897 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/x86/MacroAssembler-x86.h"
+
+#include "mozilla/Alignment.h"
+#include "mozilla/Casting.h"
+
+#include "jit/AtomicOp.h"
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MoveEmitter.h"
+#include "util/Memory.h"
+#include "vm/BigIntType.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+#include "vm/StringType.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void MacroAssemblerX86::loadConstantDouble(double d, FloatRegister dest) {
+ if (maybeInlineDouble(d, dest)) {
+ return;
+ }
+ Double* dbl = getDouble(d);
+ if (!dbl) {
+ return;
+ }
+ masm.vmovsd_mr(nullptr, dest.encoding());
+ propagateOOM(dbl->uses.append(CodeOffset(masm.size())));
+}
+
+void MacroAssemblerX86::loadConstantFloat32(float f, FloatRegister dest) {
+ if (maybeInlineFloat(f, dest)) {
+ return;
+ }
+ Float* flt = getFloat(f);
+ if (!flt) {
+ return;
+ }
+ masm.vmovss_mr(nullptr, dest.encoding());
+ propagateOOM(flt->uses.append(CodeOffset(masm.size())));
+}
+
+void MacroAssemblerX86::loadConstantSimd128Int(const SimdConstant& v,
+ FloatRegister dest) {
+ if (maybeInlineSimd128Int(v, dest)) {
+ return;
+ }
+ SimdData* i4 = getSimdData(v);
+ if (!i4) {
+ return;
+ }
+ masm.vmovdqa_mr(nullptr, dest.encoding());
+ propagateOOM(i4->uses.append(CodeOffset(masm.size())));
+}
+
+void MacroAssemblerX86::loadConstantSimd128Float(const SimdConstant& v,
+ FloatRegister dest) {
+ if (maybeInlineSimd128Float(v, dest)) {
+ return;
+ }
+ SimdData* f4 = getSimdData(v);
+ if (!f4) {
+ return;
+ }
+ masm.vmovaps_mr(nullptr, dest.encoding());
+ propagateOOM(f4->uses.append(CodeOffset(masm.size())));
+}
+
+void MacroAssemblerX86::vpPatchOpSimd128(
+ const SimdConstant& v, FloatRegister src, FloatRegister dest,
+ void (X86Encoding::BaseAssemblerX86::*op)(
+ const void* address, X86Encoding::XMMRegisterID srcId,
+ X86Encoding::XMMRegisterID destId)) {
+ SimdData* val = getSimdData(v);
+ if (!val) {
+ return;
+ }
+ (masm.*op)(nullptr, src.encoding(), dest.encoding());
+ propagateOOM(val->uses.append(CodeOffset(masm.size())));
+}
+
+void MacroAssemblerX86::vpPatchOpSimd128(
+ const SimdConstant& v, FloatRegister src, FloatRegister dest,
+ size_t (X86Encoding::BaseAssemblerX86::*op)(
+ const void* address, X86Encoding::XMMRegisterID srcId,
+ X86Encoding::XMMRegisterID destId)) {
+ SimdData* val = getSimdData(v);
+ if (!val) {
+ return;
+ }
+ size_t patchOffsetFromEnd =
+ (masm.*op)(nullptr, src.encoding(), dest.encoding());
+ propagateOOM(val->uses.append(CodeOffset(masm.size() - patchOffsetFromEnd)));
+}
+
+void MacroAssemblerX86::vpaddbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpaddb_mr);
+}
+
+void MacroAssemblerX86::vpaddwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpaddw_mr);
+}
+
+void MacroAssemblerX86::vpadddSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpaddd_mr);
+}
+
+void MacroAssemblerX86::vpaddqSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpaddq_mr);
+}
+
+void MacroAssemblerX86::vpsubbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpsubb_mr);
+}
+
+void MacroAssemblerX86::vpsubwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpsubw_mr);
+}
+
+void MacroAssemblerX86::vpsubdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpsubd_mr);
+}
+
+void MacroAssemblerX86::vpsubqSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpsubq_mr);
+}
+
+void MacroAssemblerX86::vpmullwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmullw_mr);
+}
+
+void MacroAssemblerX86::vpmulldSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmulld_mr);
+}
+
+void MacroAssemblerX86::vpaddsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpaddsb_mr);
+}
+
+void MacroAssemblerX86::vpaddusbSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpaddusb_mr);
+}
+
+void MacroAssemblerX86::vpaddswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpaddsw_mr);
+}
+
+void MacroAssemblerX86::vpadduswSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpaddusw_mr);
+}
+
+void MacroAssemblerX86::vpsubsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpsubsb_mr);
+}
+
+void MacroAssemblerX86::vpsubusbSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpsubusb_mr);
+}
+
+void MacroAssemblerX86::vpsubswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpsubsw_mr);
+}
+
+void MacroAssemblerX86::vpsubuswSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpsubusw_mr);
+}
+
+void MacroAssemblerX86::vpminsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpminsb_mr);
+}
+
+void MacroAssemblerX86::vpminubSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpminub_mr);
+}
+
+void MacroAssemblerX86::vpminswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpminsw_mr);
+}
+
+void MacroAssemblerX86::vpminuwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpminuw_mr);
+}
+
+void MacroAssemblerX86::vpminsdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpminsd_mr);
+}
+
+void MacroAssemblerX86::vpminudSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpminud_mr);
+}
+
+void MacroAssemblerX86::vpmaxsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmaxsb_mr);
+}
+
+void MacroAssemblerX86::vpmaxubSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmaxub_mr);
+}
+
+void MacroAssemblerX86::vpmaxswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmaxsw_mr);
+}
+
+void MacroAssemblerX86::vpmaxuwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmaxuw_mr);
+}
+
+void MacroAssemblerX86::vpmaxsdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmaxsd_mr);
+}
+
+void MacroAssemblerX86::vpmaxudSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmaxud_mr);
+}
+
+void MacroAssemblerX86::vpandSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpand_mr);
+}
+
+void MacroAssemblerX86::vpxorSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpxor_mr);
+}
+
+void MacroAssemblerX86::vporSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpor_mr);
+}
+
+void MacroAssemblerX86::vaddpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vaddps_mr);
+}
+
+void MacroAssemblerX86::vaddpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vaddpd_mr);
+}
+
+void MacroAssemblerX86::vsubpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vsubps_mr);
+}
+
+void MacroAssemblerX86::vsubpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vsubpd_mr);
+}
+
+void MacroAssemblerX86::vdivpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vdivps_mr);
+}
+
+void MacroAssemblerX86::vdivpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vdivpd_mr);
+}
+
+void MacroAssemblerX86::vmulpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vmulps_mr);
+}
+
+void MacroAssemblerX86::vmulpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vmulpd_mr);
+}
+
+void MacroAssemblerX86::vandpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vandpd_mr);
+}
+
+void MacroAssemblerX86::vminpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vminpd_mr);
+}
+
+void MacroAssemblerX86::vpacksswbSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpacksswb_mr);
+}
+
+void MacroAssemblerX86::vpackuswbSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpackuswb_mr);
+}
+
+void MacroAssemblerX86::vpackssdwSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpackssdw_mr);
+}
+
+void MacroAssemblerX86::vpackusdwSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpackusdw_mr);
+}
+
+void MacroAssemblerX86::vpunpckldqSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpunpckldq_mr);
+}
+
+void MacroAssemblerX86::vunpcklpsSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vunpcklps_mr);
+}
+
+void MacroAssemblerX86::vpshufbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpshufb_mr);
+}
+
+void MacroAssemblerX86::vptestSimd128(const SimdConstant& v,
+ FloatRegister lhs) {
+ vpPatchOpSimd128(v, lhs, &X86Encoding::BaseAssemblerX86::vptest_mr);
+}
+
+void MacroAssemblerX86::vpmaddwdSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmaddwd_mr);
+}
+
+void MacroAssemblerX86::vpcmpeqbSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpcmpeqb_mr);
+}
+
+void MacroAssemblerX86::vpcmpgtbSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpcmpgtb_mr);
+}
+
+void MacroAssemblerX86::vpcmpeqwSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpcmpeqw_mr);
+}
+
+void MacroAssemblerX86::vpcmpgtwSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpcmpgtw_mr);
+}
+
+void MacroAssemblerX86::vpcmpeqdSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpcmpeqd_mr);
+}
+
+void MacroAssemblerX86::vpcmpgtdSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpcmpgtd_mr);
+}
+
+void MacroAssemblerX86::vcmpeqpsSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vcmpeqps_mr);
+}
+
+void MacroAssemblerX86::vcmpneqpsSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vcmpneqps_mr);
+}
+
+void MacroAssemblerX86::vcmpltpsSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vcmpltps_mr);
+}
+
+void MacroAssemblerX86::vcmplepsSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vcmpleps_mr);
+}
+
+void MacroAssemblerX86::vcmpgepsSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vcmpgeps_mr);
+}
+
+void MacroAssemblerX86::vcmpeqpdSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vcmpeqpd_mr);
+}
+
+void MacroAssemblerX86::vcmpneqpdSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vcmpneqpd_mr);
+}
+
+void MacroAssemblerX86::vcmpltpdSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vcmpltpd_mr);
+}
+
+void MacroAssemblerX86::vcmplepdSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vcmplepd_mr);
+}
+
+void MacroAssemblerX86::vpmaddubswSimd128(const SimdConstant& v,
+ FloatRegister lhs,
+ FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmaddubsw_mr);
+}
+
+void MacroAssemblerX86::vpmuludqSimd128(const SimdConstant& v,
+ FloatRegister lhs, FloatRegister dest) {
+ vpPatchOpSimd128(v, lhs, dest, &X86Encoding::BaseAssemblerX86::vpmuludq_mr);
+}
+
+void MacroAssemblerX86::finish() {
+ // Last instruction may be an indirect jump so eagerly insert an undefined
+ // instruction byte to prevent processors from decoding data values into
+ // their pipelines. See Intel performance guides.
+ masm.ud2();
+
+ if (!doubles_.empty()) {
+ masm.haltingAlign(sizeof(double));
+ }
+ for (const Double& d : doubles_) {
+ CodeOffset cst(masm.currentOffset());
+ for (CodeOffset use : d.uses) {
+ addCodeLabel(CodeLabel(use, cst));
+ }
+ masm.doubleConstant(d.value);
+ if (!enoughMemory_) {
+ return;
+ }
+ }
+
+ if (!floats_.empty()) {
+ masm.haltingAlign(sizeof(float));
+ }
+ for (const Float& f : floats_) {
+ CodeOffset cst(masm.currentOffset());
+ for (CodeOffset use : f.uses) {
+ addCodeLabel(CodeLabel(use, cst));
+ }
+ masm.floatConstant(f.value);
+ if (!enoughMemory_) {
+ return;
+ }
+ }
+
+ // SIMD memory values must be suitably aligned.
+ if (!simds_.empty()) {
+ masm.haltingAlign(SimdMemoryAlignment);
+ }
+ for (const SimdData& v : simds_) {
+ CodeOffset cst(masm.currentOffset());
+ for (CodeOffset use : v.uses) {
+ addCodeLabel(CodeLabel(use, cst));
+ }
+ masm.simd128Constant(v.value.bytes());
+ if (!enoughMemory_) {
+ return;
+ }
+ }
+}
+
+void MacroAssemblerX86::handleFailureWithHandlerTail(Label* profilerExitTail,
+ Label* bailoutTail) {
+ // Reserve space for exception information.
+ subl(Imm32(sizeof(ResumeFromException)), esp);
+ movl(esp, eax);
+
+ // Call the handler.
+ using Fn = void (*)(ResumeFromException* rfe);
+ asMasm().setupUnalignedABICall(ecx);
+ asMasm().passABIArg(eax);
+ asMasm().callWithABI<Fn, HandleException>(
+ ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label returnBaseline;
+ Label returnIon;
+ Label bailout;
+ Label wasm;
+ Label wasmCatch;
+
+ loadPtr(Address(esp, ResumeFromException::offsetOfKind()), eax);
+ asMasm().branch32(Assembler::Equal, eax,
+ Imm32(ExceptionResumeKind::EntryFrame), &entryFrame);
+ asMasm().branch32(Assembler::Equal, eax, Imm32(ExceptionResumeKind::Catch),
+ &catch_);
+ asMasm().branch32(Assembler::Equal, eax, Imm32(ExceptionResumeKind::Finally),
+ &finally);
+ asMasm().branch32(Assembler::Equal, eax,
+ Imm32(ExceptionResumeKind::ForcedReturnBaseline),
+ &returnBaseline);
+ asMasm().branch32(Assembler::Equal, eax,
+ Imm32(ExceptionResumeKind::ForcedReturnIon), &returnIon);
+ asMasm().branch32(Assembler::Equal, eax, Imm32(ExceptionResumeKind::Bailout),
+ &bailout);
+ asMasm().branch32(Assembler::Equal, eax, Imm32(ExceptionResumeKind::Wasm),
+ &wasm);
+ asMasm().branch32(Assembler::Equal, eax,
+ Imm32(ExceptionResumeKind::WasmCatch), &wasmCatch);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, restore state and return from
+ // the entry frame.
+ bind(&entryFrame);
+ asMasm().moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ loadPtr(Address(esp, ResumeFromException::offsetOfFramePointer()), ebp);
+ loadPtr(Address(esp, ResumeFromException::offsetOfStackPointer()), esp);
+ ret();
+
+ // If we found a catch handler, this must be a baseline frame. Restore state
+ // and jump to the catch block.
+ bind(&catch_);
+ loadPtr(Address(esp, ResumeFromException::offsetOfTarget()), eax);
+ loadPtr(Address(esp, ResumeFromException::offsetOfFramePointer()), ebp);
+ loadPtr(Address(esp, ResumeFromException::offsetOfStackPointer()), esp);
+ jmp(Operand(eax));
+
+ // If we found a finally block, this must be a baseline frame. Push three
+ // values expected by the finally block: the exception, the exception stack,
+ // and BooleanValue(true).
+ bind(&finally);
+ ValueOperand exception = ValueOperand(ecx, edx);
+ loadValue(Address(esp, ResumeFromException::offsetOfException()), exception);
+
+ ValueOperand exceptionStack = ValueOperand(esi, edi);
+ loadValue(Address(esp, ResumeFromException::offsetOfExceptionStack()),
+ exceptionStack);
+
+ loadPtr(Address(esp, ResumeFromException::offsetOfTarget()), eax);
+ loadPtr(Address(esp, ResumeFromException::offsetOfFramePointer()), ebp);
+ loadPtr(Address(esp, ResumeFromException::offsetOfStackPointer()), esp);
+
+ pushValue(exception);
+ pushValue(exceptionStack);
+ pushValue(BooleanValue(true));
+ jmp(Operand(eax));
+
+ // Return BaselineFrame->returnValue() to the caller.
+ // Used in debug mode and for GeneratorReturn.
+ Label profilingInstrumentation;
+ bind(&returnBaseline);
+ loadPtr(Address(esp, ResumeFromException::offsetOfFramePointer()), ebp);
+ loadPtr(Address(esp, ResumeFromException::offsetOfStackPointer()), esp);
+ loadValue(Address(ebp, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ jump(&profilingInstrumentation);
+
+ // Return the given value to the caller.
+ bind(&returnIon);
+ loadValue(Address(esp, ResumeFromException::offsetOfException()),
+ JSReturnOperand);
+ loadPtr(Address(esp, ResumeFromException::offsetOfFramePointer()), ebp);
+ loadPtr(Address(esp, ResumeFromException::offsetOfStackPointer()), esp);
+
+ // If profiling is enabled, then update the lastProfilingFrame to refer to
+ // caller frame before returning. This code is shared by ForcedReturnIon
+ // and ForcedReturnBaseline.
+ bind(&profilingInstrumentation);
+ {
+ Label skipProfilingInstrumentation;
+ // Test if profiler enabled.
+ AbsoluteAddress addressOfEnabled(
+ asMasm().runtime()->geckoProfiler().addressOfEnabled());
+ asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ jump(profilerExitTail);
+ bind(&skipProfilingInstrumentation);
+ }
+
+ movl(ebp, esp);
+ pop(ebp);
+ ret();
+
+ // If we are bailing out to baseline to handle an exception, jump to the
+ // bailout tail stub. Load 1 (true) in ReturnReg to indicate success.
+ bind(&bailout);
+ loadPtr(Address(esp, ResumeFromException::offsetOfBailoutInfo()), ecx);
+ loadPtr(Address(esp, ResumeFromException::offsetOfStackPointer()), esp);
+ move32(Imm32(1), ReturnReg);
+ jump(bailoutTail);
+
+ // If we are throwing and the innermost frame was a wasm frame, reset SP and
+ // FP; SP is pointing to the unwound return address to the wasm entry, so
+ // we can just ret().
+ bind(&wasm);
+ loadPtr(Address(esp, ResumeFromException::offsetOfFramePointer()), ebp);
+ loadPtr(Address(esp, ResumeFromException::offsetOfStackPointer()), esp);
+ movePtr(ImmPtr((const void*)wasm::FailInstanceReg), InstanceReg);
+ masm.ret();
+
+ // Found a wasm catch handler, restore state and jump to it.
+ bind(&wasmCatch);
+ loadPtr(Address(esp, ResumeFromException::offsetOfTarget()), eax);
+ loadPtr(Address(esp, ResumeFromException::offsetOfFramePointer()), ebp);
+ loadPtr(Address(esp, ResumeFromException::offsetOfStackPointer()), esp);
+ jmp(Operand(eax));
+}
+
+void MacroAssemblerX86::profilerEnterFrame(Register framePtr,
+ Register scratch) {
+ asMasm().loadJSContext(scratch);
+ loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
+ storePtr(framePtr,
+ Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr),
+ Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void MacroAssemblerX86::profilerExitFrame() {
+ jump(asMasm().runtime()->jitRuntime()->getProfilerExitFrameTail());
+}
+
+Assembler::Condition MacroAssemblerX86::testStringTruthy(
+ bool truthy, const ValueOperand& value) {
+ Register string = value.payloadReg();
+ cmp32(Operand(string, JSString::offsetOfLength()), Imm32(0));
+ return truthy ? Assembler::NotEqual : Assembler::Equal;
+}
+
+Assembler::Condition MacroAssemblerX86::testBigIntTruthy(
+ bool truthy, const ValueOperand& value) {
+ Register bi = value.payloadReg();
+ cmp32(Operand(bi, JS::BigInt::offsetOfDigitLength()), Imm32(0));
+ return truthy ? Assembler::NotEqual : Assembler::Equal;
+}
+
+MacroAssembler& MacroAssemblerX86::asMasm() {
+ return *static_cast<MacroAssembler*>(this);
+}
+
+const MacroAssembler& MacroAssemblerX86::asMasm() const {
+ return *static_cast<const MacroAssembler*>(this);
+}
+
+void MacroAssembler::subFromStackPtr(Imm32 imm32) {
+ if (imm32.value) {
+ // On windows, we cannot skip very far down the stack without touching the
+ // memory pages in-between. This is a corner-case code for situations where
+ // the Ion frame data for a piece of code is very large. To handle this
+ // special case, for frames over 4k in size we allocate memory on the stack
+ // incrementally, touching it as we go.
+ //
+ // When the amount is quite large, which it can be, we emit an actual loop,
+ // in order to keep the function prologue compact. Compactness is a
+ // requirement for eg Wasm's CodeRange data structure, which can encode only
+ // 8-bit offsets.
+ uint32_t amountLeft = imm32.value;
+ uint32_t fullPages = amountLeft / 4096;
+ if (fullPages <= 8) {
+ while (amountLeft > 4096) {
+ subl(Imm32(4096), StackPointer);
+ store32(Imm32(0), Address(StackPointer, 0));
+ amountLeft -= 4096;
+ }
+ subl(Imm32(amountLeft), StackPointer);
+ } else {
+ // Save scratch register.
+ push(eax);
+ amountLeft -= 4;
+ fullPages = amountLeft / 4096;
+
+ Label top;
+ move32(Imm32(fullPages), eax);
+ bind(&top);
+ subl(Imm32(4096), StackPointer);
+ store32(Imm32(0), Address(StackPointer, 0));
+ subl(Imm32(1), eax);
+ j(Assembler::NonZero, &top);
+ amountLeft -= fullPages * 4096;
+ if (amountLeft) {
+ subl(Imm32(amountLeft), StackPointer);
+ }
+
+ // Restore scratch register.
+ movl(Operand(StackPointer, uint32_t(imm32.value) - 4), eax);
+ }
+ }
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// ABI function calls.
+
+void MacroAssembler::setupUnalignedABICall(Register scratch) {
+ setupNativeABICall();
+ dynamicAlignment_ = true;
+
+ movl(esp, scratch);
+ andl(Imm32(~(ABIStackAlignment - 1)), esp);
+ push(scratch);
+}
+
+void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ if (dynamicAlignment_) {
+ // sizeof(intptr_t) accounts for the saved stack pointer pushed by
+ // setupUnalignedABICall.
+ stackForCall += ComputeByteAlignment(stackForCall + sizeof(intptr_t),
+ ABIStackAlignment);
+ } else {
+ uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
+ stackForCall += ComputeByteAlignment(
+ stackForCall + framePushed() + alignmentAtPrologue, ABIStackAlignment);
+ }
+
+ *stackAdjust = stackForCall;
+ reserveStack(stackForCall);
+
+ // Position all arguments.
+ {
+ enoughMemory_ &= moveResolver_.resolve();
+ if (!enoughMemory_) {
+ return;
+ }
+
+ MoveEmitter emitter(*this);
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ assertStackAlignment(ABIStackAlignment);
+}
+
+void MacroAssembler::callWithABIPost(uint32_t stackAdjust, ABIType result,
+ bool callFromWasm) {
+ freeStack(stackAdjust);
+
+ // Calls to native functions in wasm pass through a thunk which already
+ // fixes up the return value for us.
+ if (!callFromWasm) {
+ if (result == ABIType::Float64) {
+ reserveStack(sizeof(double));
+ fstp(Operand(esp, 0));
+ loadDouble(Operand(esp, 0), ReturnDoubleReg);
+ freeStack(sizeof(double));
+ } else if (result == ABIType::Float32) {
+ reserveStack(sizeof(float));
+ fstp32(Operand(esp, 0));
+ loadFloat32(Operand(esp, 0), ReturnFloat32Reg);
+ freeStack(sizeof(float));
+ }
+ }
+
+ if (dynamicAlignment_) {
+ pop(esp);
+ }
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+void MacroAssembler::callWithABINoProfiler(Register fun, ABIType result) {
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(fun);
+ callWithABIPost(stackAdjust, result);
+}
+
+void MacroAssembler::callWithABINoProfiler(const Address& fun, ABIType result) {
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(fun);
+ callWithABIPost(stackAdjust, result);
+}
+
+// ===============================================================
+// Move instructions
+
+void MacroAssembler::moveValue(const TypedOrValueRegister& src,
+ const ValueOperand& dest) {
+ if (src.hasValue()) {
+ moveValue(src.valueReg(), dest);
+ return;
+ }
+
+ MIRType type = src.type();
+ AnyRegister reg = src.typedReg();
+
+ if (!IsFloatingPointType(type)) {
+ if (reg.gpr() != dest.payloadReg()) {
+ movl(reg.gpr(), dest.payloadReg());
+ }
+ mov(ImmWord(MIRTypeToTag(type)), dest.typeReg());
+ return;
+ }
+
+ ScratchDoubleScope scratch(*this);
+ FloatRegister freg = reg.fpu();
+ if (type == MIRType::Float32) {
+ convertFloat32ToDouble(freg, scratch);
+ freg = scratch;
+ }
+ boxDouble(freg, dest, scratch);
+}
+
+void MacroAssembler::moveValue(const ValueOperand& src,
+ const ValueOperand& dest) {
+ Register s0 = src.typeReg();
+ Register s1 = src.payloadReg();
+ Register d0 = dest.typeReg();
+ Register d1 = dest.payloadReg();
+
+ // Either one or both of the source registers could be the same as a
+ // destination register.
+ if (s1 == d0) {
+ if (s0 == d1) {
+ // If both are, this is just a swap of two registers.
+ xchgl(d0, d1);
+ return;
+ }
+ // If only one is, copy that source first.
+ std::swap(s0, s1);
+ std::swap(d0, d1);
+ }
+
+ if (s0 != d0) {
+ movl(s0, d0);
+ }
+ if (s1 != d1) {
+ movl(s1, d1);
+ }
+}
+
+void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
+ movl(Imm32(src.toNunboxTag()), dest.typeReg());
+ if (src.isGCThing()) {
+ movl(ImmGCPtr(src.toGCThing()), dest.payloadReg());
+ } else {
+ movl(Imm32(src.toNunboxPayload()), dest.payloadReg());
+ }
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::loadStoreBuffer(Register ptr, Register buffer) {
+ if (ptr != buffer) {
+ movePtr(ptr, buffer);
+ }
+ andPtr(Imm32(~gc::ChunkMask), buffer);
+ loadPtr(Address(buffer, gc::ChunkStoreBufferOffset), buffer);
+}
+
+void MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr,
+ Register temp, Label* label) {
+ MOZ_ASSERT(temp != InvalidReg); // A temp register is required for x86.
+ MOZ_ASSERT(ptr != temp);
+ movePtr(ptr, temp);
+ branchPtrInNurseryChunkImpl(cond, temp, label);
+}
+
+void MacroAssembler::branchPtrInNurseryChunk(Condition cond,
+ const Address& address,
+ Register temp, Label* label) {
+ MOZ_ASSERT(temp != InvalidReg); // A temp register is required for x86.
+ loadPtr(address, temp);
+ branchPtrInNurseryChunkImpl(cond, temp, label);
+}
+
+void MacroAssembler::branchPtrInNurseryChunkImpl(Condition cond, Register ptr,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ andPtr(Imm32(~gc::ChunkMask), ptr);
+ branchPtr(InvertCondition(cond), Address(ptr, gc::ChunkStoreBufferOffset),
+ ImmWord(0), label);
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ const Address& address,
+ Register temp, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label done;
+
+ branchTestGCThing(Assembler::NotEqual, address,
+ cond == Assembler::Equal ? &done : label);
+ branchPtrInNurseryChunk(cond, ToPayload(address), temp, label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ ValueOperand value, Register temp,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label done;
+
+ branchTestGCThing(Assembler::NotEqual, value,
+ cond == Assembler::Equal ? &done : label);
+ branchPtrInNurseryChunk(cond, value.payloadReg(), temp, label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ if (rhs.isGCThing()) {
+ cmpPtr(lhs.payloadReg(), ImmGCPtr(rhs.toGCThing()));
+ } else {
+ cmpPtr(lhs.payloadReg(), ImmWord(rhs.toNunboxPayload()));
+ }
+
+ if (cond == Equal) {
+ Label done;
+ j(NotEqual, &done);
+ {
+ cmp32(lhs.typeReg(), Imm32(rhs.toNunboxTag()));
+ j(Equal, label);
+ }
+ bind(&done);
+ } else {
+ j(NotEqual, label);
+
+ cmp32(lhs.typeReg(), Imm32(rhs.toNunboxTag()));
+ j(NotEqual, label);
+ }
+}
+
+// ========================================================================
+// Memory access primitives.
+template <typename T>
+void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType, const T& dest) {
+ MOZ_ASSERT(valueType < MIRType::Value);
+
+ if (valueType == MIRType::Double) {
+ storeDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ // Store the type tag.
+ storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), Operand(dest));
+
+ // Store the payload.
+ if (value.constant()) {
+ storePayload(value.value(), Operand(dest));
+ } else {
+ storePayload(value.reg().typedReg().gpr(), Operand(dest));
+ }
+}
+
+template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType,
+ const Address& dest);
+template void MacroAssembler::storeUnboxedValue(
+ const ConstantOrRegister& value, MIRType valueType,
+ const BaseObjectElementIndex& dest);
+
+// wasm specific methods, used in both the wasm baseline compiler and ion.
+
+void MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access,
+ Operand srcAddr, AnyRegister out) {
+ MOZ_ASSERT(srcAddr.kind() == Operand::MEM_REG_DISP ||
+ srcAddr.kind() == Operand::MEM_SCALE);
+
+ MOZ_ASSERT_IF(
+ access.isZeroExtendSimd128Load(),
+ access.type() == Scalar::Float32 || access.type() == Scalar::Float64);
+ MOZ_ASSERT_IF(
+ access.isSplatSimd128Load(),
+ access.type() == Scalar::Uint8 || access.type() == Scalar::Uint16 ||
+ access.type() == Scalar::Float32 || access.type() == Scalar::Float64);
+ MOZ_ASSERT_IF(access.isWidenSimd128Load(), access.type() == Scalar::Float64);
+
+ // NOTE: the generated code must match the assembly code in gen_load in
+ // GenerateAtomicOperations.py
+ memoryBarrierBefore(access.sync());
+
+ switch (access.type()) {
+ case Scalar::Int8:
+ append(access, wasm::TrapMachineInsn::Load8,
+ FaultingCodeOffset(currentOffset()));
+ movsbl(srcAddr, out.gpr());
+ break;
+ case Scalar::Uint8:
+ append(access, wasm::TrapMachineInsn::Load8,
+ FaultingCodeOffset(currentOffset()));
+ if (access.isSplatSimd128Load()) {
+ vbroadcastb(srcAddr, out.fpu());
+ } else {
+ movzbl(srcAddr, out.gpr());
+ }
+ break;
+ case Scalar::Int16:
+ append(access, wasm::TrapMachineInsn::Load16,
+ FaultingCodeOffset(currentOffset()));
+ movswl(srcAddr, out.gpr());
+ break;
+ case Scalar::Uint16:
+ append(access, wasm::TrapMachineInsn::Load16,
+ FaultingCodeOffset(currentOffset()));
+ if (access.isSplatSimd128Load()) {
+ vbroadcastw(srcAddr, out.fpu());
+ } else {
+ movzwl(srcAddr, out.gpr());
+ }
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ append(access, wasm::TrapMachineInsn::Load32,
+ FaultingCodeOffset(currentOffset()));
+ movl(srcAddr, out.gpr());
+ break;
+ case Scalar::Float32:
+ append(access, wasm::TrapMachineInsn::Load32,
+ FaultingCodeOffset(currentOffset()));
+ if (access.isSplatSimd128Load()) {
+ vbroadcastss(srcAddr, out.fpu());
+ } else {
+ // vmovss does the right thing also for access.isZeroExtendSimd128Load()
+ vmovss(srcAddr, out.fpu());
+ }
+ break;
+ case Scalar::Float64:
+ append(access, wasm::TrapMachineInsn::Load64,
+ FaultingCodeOffset(currentOffset()));
+ if (access.isSplatSimd128Load()) {
+ vmovddup(srcAddr, out.fpu());
+ } else if (access.isWidenSimd128Load()) {
+ switch (access.widenSimdOp()) {
+ case wasm::SimdOp::V128Load8x8S:
+ vpmovsxbw(srcAddr, out.fpu());
+ break;
+ case wasm::SimdOp::V128Load8x8U:
+ vpmovzxbw(srcAddr, out.fpu());
+ break;
+ case wasm::SimdOp::V128Load16x4S:
+ vpmovsxwd(srcAddr, out.fpu());
+ break;
+ case wasm::SimdOp::V128Load16x4U:
+ vpmovzxwd(srcAddr, out.fpu());
+ break;
+ case wasm::SimdOp::V128Load32x2S:
+ vpmovsxdq(srcAddr, out.fpu());
+ break;
+ case wasm::SimdOp::V128Load32x2U:
+ vpmovzxdq(srcAddr, out.fpu());
+ break;
+ default:
+ MOZ_CRASH("Unexpected widening op for wasmLoad");
+ }
+ } else {
+ // vmovsd does the right thing also for access.isZeroExtendSimd128Load()
+ vmovsd(srcAddr, out.fpu());
+ }
+ break;
+ case Scalar::Simd128:
+ append(access, wasm::TrapMachineInsn::Load128,
+ FaultingCodeOffset(currentOffset()));
+ vmovups(srcAddr, out.fpu());
+ break;
+ case Scalar::Int64:
+ case Scalar::Uint8Clamped:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected type");
+ }
+
+ memoryBarrierAfter(access.sync());
+}
+
+void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
+ Operand srcAddr, Register64 out) {
+ // Atomic i64 load must use lock_cmpxchg8b.
+ MOZ_ASSERT_IF(access.isAtomic(), access.byteSize() <= 4);
+ MOZ_ASSERT(srcAddr.kind() == Operand::MEM_REG_DISP ||
+ srcAddr.kind() == Operand::MEM_SCALE);
+ MOZ_ASSERT(!access.isZeroExtendSimd128Load()); // Use wasmLoad()
+ MOZ_ASSERT(!access.isSplatSimd128Load()); // Use wasmLoad()
+ MOZ_ASSERT(!access.isWidenSimd128Load()); // Use wasmLoad()
+
+ memoryBarrierBefore(access.sync());
+
+ switch (access.type()) {
+ case Scalar::Int8:
+ MOZ_ASSERT(out == Register64(edx, eax));
+ append(access, wasm::TrapMachineInsn::Load8,
+ FaultingCodeOffset(currentOffset()));
+ movsbl(srcAddr, out.low);
+
+ cdq();
+ break;
+ case Scalar::Uint8:
+ append(access, wasm::TrapMachineInsn::Load8,
+ FaultingCodeOffset(currentOffset()));
+ movzbl(srcAddr, out.low);
+
+ xorl(out.high, out.high);
+ break;
+ case Scalar::Int16:
+ MOZ_ASSERT(out == Register64(edx, eax));
+ append(access, wasm::TrapMachineInsn::Load16,
+ FaultingCodeOffset(currentOffset()));
+ movswl(srcAddr, out.low);
+
+ cdq();
+ break;
+ case Scalar::Uint16:
+ append(access, wasm::TrapMachineInsn::Load16,
+ FaultingCodeOffset(currentOffset()));
+ movzwl(srcAddr, out.low);
+
+ xorl(out.high, out.high);
+ break;
+ case Scalar::Int32:
+ MOZ_ASSERT(out == Register64(edx, eax));
+ append(access, wasm::TrapMachineInsn::Load32,
+ FaultingCodeOffset(currentOffset()));
+ movl(srcAddr, out.low);
+
+ cdq();
+ break;
+ case Scalar::Uint32:
+ append(access, wasm::TrapMachineInsn::Load32,
+ FaultingCodeOffset(currentOffset()));
+ movl(srcAddr, out.low);
+
+ xorl(out.high, out.high);
+ break;
+ case Scalar::Int64: {
+ if (srcAddr.kind() == Operand::MEM_SCALE) {
+ MOZ_RELEASE_ASSERT(srcAddr.toBaseIndex().base != out.low &&
+ srcAddr.toBaseIndex().index != out.low);
+ }
+ if (srcAddr.kind() == Operand::MEM_REG_DISP) {
+ MOZ_RELEASE_ASSERT(srcAddr.toAddress().base != out.low);
+ }
+
+ append(access, wasm::TrapMachineInsn::Load32,
+ FaultingCodeOffset(currentOffset()));
+ movl(LowWord(srcAddr), out.low);
+
+ append(access, wasm::TrapMachineInsn::Load32,
+ FaultingCodeOffset(currentOffset()));
+ movl(HighWord(srcAddr), out.high);
+
+ break;
+ }
+ case Scalar::Float32:
+ case Scalar::Float64:
+ MOZ_CRASH("non-int64 loads should use load()");
+ case Scalar::Simd128:
+ case Scalar::Uint8Clamped:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ memoryBarrierAfter(access.sync());
+}
+
+void MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access,
+ AnyRegister value, Operand dstAddr) {
+ MOZ_ASSERT(dstAddr.kind() == Operand::MEM_REG_DISP ||
+ dstAddr.kind() == Operand::MEM_SCALE);
+
+ // NOTE: the generated code must match the assembly code in gen_store in
+ // GenerateAtomicOperations.py
+ memoryBarrierBefore(access.sync());
+
+ switch (access.type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8Clamped:
+ case Scalar::Uint8:
+ append(access, wasm::TrapMachineInsn::Store8,
+ FaultingCodeOffset(currentOffset()));
+ // FIXME figure out where this movb goes
+ movb(value.gpr(), dstAddr);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ append(access, wasm::TrapMachineInsn::Store16,
+ FaultingCodeOffset(currentOffset()));
+ movw(value.gpr(), dstAddr);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ append(access, wasm::TrapMachineInsn::Store32,
+ FaultingCodeOffset(currentOffset()));
+ movl(value.gpr(), dstAddr);
+ break;
+ case Scalar::Float32:
+ append(access, wasm::TrapMachineInsn::Store32,
+ FaultingCodeOffset(currentOffset()));
+ vmovss(value.fpu(), dstAddr);
+ break;
+ case Scalar::Float64:
+ append(access, wasm::TrapMachineInsn::Store64,
+ FaultingCodeOffset(currentOffset()));
+ vmovsd(value.fpu(), dstAddr);
+ break;
+ case Scalar::Simd128:
+ append(access, wasm::TrapMachineInsn::Store128,
+ FaultingCodeOffset(currentOffset()));
+ vmovups(value.fpu(), dstAddr);
+ break;
+ case Scalar::Int64:
+ MOZ_CRASH("Should be handled in storeI64.");
+ case Scalar::MaxTypedArrayViewType:
+ case Scalar::BigInt64:
+ case Scalar::BigUint64:
+ MOZ_CRASH("unexpected type");
+ }
+
+ memoryBarrierAfter(access.sync());
+}
+
+void MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access,
+ Register64 value, Operand dstAddr) {
+ // Atomic i64 store must use lock_cmpxchg8b.
+ MOZ_ASSERT(!access.isAtomic());
+ MOZ_ASSERT(dstAddr.kind() == Operand::MEM_REG_DISP ||
+ dstAddr.kind() == Operand::MEM_SCALE);
+
+ // Store the high word first so as to hit guard-page-based OOB checks without
+ // writing partial data.
+ append(access, wasm::TrapMachineInsn::Store32,
+ FaultingCodeOffset(currentOffset()));
+ movl(value.high, HighWord(dstAddr));
+
+ append(access, wasm::TrapMachineInsn::Store32,
+ FaultingCodeOffset(currentOffset()));
+ movl(value.low, LowWord(dstAddr));
+}
+
+template <typename T>
+static void AtomicLoad64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access, const T& address,
+ Register64 temp, Register64 output) {
+ MOZ_ASSERT(temp.low == ebx);
+ MOZ_ASSERT(temp.high == ecx);
+ MOZ_ASSERT(output.high == edx);
+ MOZ_ASSERT(output.low == eax);
+
+ // In the event edx:eax matches what's in memory, ecx:ebx will be
+ // stored. The two pairs must therefore have the same values.
+ masm.movl(edx, ecx);
+ masm.movl(eax, ebx);
+
+ if (access) {
+ masm.append(*access, wasm::TrapMachineInsn::Atomic,
+ FaultingCodeOffset(masm.currentOffset()));
+ }
+ masm.lock_cmpxchg8b(edx, eax, ecx, ebx, Operand(address));
+}
+
+void MacroAssembler::wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 temp,
+ Register64 output) {
+ AtomicLoad64(*this, &access, mem, temp, output);
+}
+
+void MacroAssembler::wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register64 temp,
+ Register64 output) {
+ AtomicLoad64(*this, &access, mem, temp, output);
+}
+
+template <typename T>
+static void CompareExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const T& mem, Register64 expected,
+ Register64 replacement, Register64 output) {
+ MOZ_ASSERT(expected == output);
+ MOZ_ASSERT(expected.high == edx);
+ MOZ_ASSERT(expected.low == eax);
+ MOZ_ASSERT(replacement.high == ecx);
+ MOZ_ASSERT(replacement.low == ebx);
+
+ // NOTE: the generated code must match the assembly code in gen_cmpxchg in
+ // GenerateAtomicOperations.py
+ if (access) {
+ masm.append(*access, wasm::TrapMachineInsn::Atomic,
+ FaultingCodeOffset(masm.currentOffset()));
+ }
+ masm.lock_cmpxchg8b(edx, eax, ecx, ebx, Operand(mem));
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem,
+ Register64 expected,
+ Register64 replacement,
+ Register64 output) {
+ CompareExchange64(*this, &access, mem, expected, replacement, output);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem,
+ Register64 expected,
+ Register64 replacement,
+ Register64 output) {
+ CompareExchange64(*this, &access, mem, expected, replacement, output);
+}
+
+template <typename T>
+static void AtomicExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access, const T& mem,
+ Register64 value, Register64 output) {
+ MOZ_ASSERT(value.low == ebx);
+ MOZ_ASSERT(value.high == ecx);
+ MOZ_ASSERT(output.high == edx);
+ MOZ_ASSERT(output.low == eax);
+
+ // edx:eax has garbage initially, and that is the best we can do unless
+ // we can guess with high probability what's in memory.
+
+ MOZ_ASSERT(mem.base != edx && mem.base != eax);
+ if constexpr (std::is_same_v<T, BaseIndex>) {
+ MOZ_ASSERT(mem.index != edx && mem.index != eax);
+ } else {
+ static_assert(std::is_same_v<T, Address>);
+ }
+
+ Label again;
+ masm.bind(&again);
+ if (access) {
+ masm.append(*access, wasm::TrapMachineInsn::Atomic,
+ FaultingCodeOffset(masm.currentOffset()));
+ }
+ masm.lock_cmpxchg8b(edx, eax, ecx, ebx, Operand(mem));
+ masm.j(MacroAssembler::NonZero, &again);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(*this, &access, mem, value, output);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem,
+ Register64 value, Register64 output) {
+ AtomicExchange64(*this, &access, mem, value, output);
+}
+
+template <typename T>
+static void AtomicFetchOp64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access, AtomicOp op,
+ const Address& value, const T& mem, Register64 temp,
+ Register64 output) {
+ // We don't have enough registers for all the operands on x86, so the rhs
+ // operand is in memory.
+
+#define ATOMIC_OP_BODY(OPERATE) \
+ do { \
+ MOZ_ASSERT(output.low == eax); \
+ MOZ_ASSERT(output.high == edx); \
+ MOZ_ASSERT(temp.low == ebx); \
+ MOZ_ASSERT(temp.high == ecx); \
+ FaultingCodeOffsetPair fcop = masm.load64(mem, output); \
+ if (access) { \
+ masm.append(*access, wasm::TrapMachineInsn::Load32, fcop.first); \
+ masm.append(*access, wasm::TrapMachineInsn::Load32, fcop.second); \
+ } \
+ Label again; \
+ masm.bind(&again); \
+ masm.move64(output, temp); \
+ masm.OPERATE(Operand(value), temp); \
+ masm.lock_cmpxchg8b(edx, eax, ecx, ebx, Operand(mem)); \
+ masm.j(MacroAssembler::NonZero, &again); \
+ } while (0)
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ ATOMIC_OP_BODY(add64FromMemory);
+ break;
+ case AtomicFetchSubOp:
+ ATOMIC_OP_BODY(sub64FromMemory);
+ break;
+ case AtomicFetchAndOp:
+ ATOMIC_OP_BODY(and64FromMemory);
+ break;
+ case AtomicFetchOrOp:
+ ATOMIC_OP_BODY(or64FromMemory);
+ break;
+ case AtomicFetchXorOp:
+ ATOMIC_OP_BODY(xor64FromMemory);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+#undef ATOMIC_OP_BODY
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, const Address& value,
+ const Address& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, &access, op, value, mem, temp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, const Address& value,
+ const BaseIndex& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, &access, op, value, mem, temp, output);
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ Label done;
+ vcvttsd2si(input, output);
+ branch32(Assembler::Condition::NotSigned, output, Imm32(0), &done);
+
+ ScratchDoubleScope fpscratch(*this);
+ loadConstantDouble(double(int32_t(0x80000000)), fpscratch);
+ addDouble(input, fpscratch);
+ vcvttsd2si(fpscratch, output);
+
+ branch32(Assembler::Condition::Signed, output, Imm32(0), oolEntry);
+ or32(Imm32(0x80000000), output);
+
+ bind(&done);
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ Label done;
+ vcvttss2si(input, output);
+ branch32(Assembler::Condition::NotSigned, output, Imm32(0), &done);
+
+ ScratchFloat32Scope fpscratch(*this);
+ loadConstantFloat32(float(int32_t(0x80000000)), fpscratch);
+ addFloat32(input, fpscratch);
+ vcvttss2si(fpscratch, output);
+
+ branch32(Assembler::Condition::Signed, output, Imm32(0), oolEntry);
+ or32(Imm32(0x80000000), output);
+
+ bind(&done);
+}
+
+void MacroAssembler::wasmTruncateDoubleToInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg) {
+ Label ok;
+ Register temp = output.high;
+
+ reserveStack(2 * sizeof(int32_t));
+ storeDouble(input, Operand(esp, 0));
+
+ truncateDoubleToInt64(Address(esp, 0), Address(esp, 0), temp);
+ load64(Address(esp, 0), output);
+
+ cmpl(Imm32(0), Operand(esp, 0));
+ j(Assembler::NotEqual, &ok);
+
+ cmpl(Imm32(1), Operand(esp, 4));
+ j(Assembler::Overflow, oolEntry);
+
+ bind(&ok);
+ bind(oolRejoin);
+
+ freeStack(2 * sizeof(int32_t));
+}
+
+void MacroAssembler::wasmTruncateFloat32ToInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg) {
+ Label ok;
+ Register temp = output.high;
+
+ reserveStack(2 * sizeof(int32_t));
+ storeFloat32(input, Operand(esp, 0));
+
+ truncateFloat32ToInt64(Address(esp, 0), Address(esp, 0), temp);
+ load64(Address(esp, 0), output);
+
+ cmpl(Imm32(0), Operand(esp, 0));
+ j(Assembler::NotEqual, &ok);
+
+ cmpl(Imm32(1), Operand(esp, 4));
+ j(Assembler::Overflow, oolEntry);
+
+ bind(&ok);
+ bind(oolRejoin);
+
+ freeStack(2 * sizeof(int32_t));
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg) {
+ Label fail, convert;
+ Register temp = output.high;
+
+ // Make sure input fits in uint64.
+ reserveStack(2 * sizeof(int32_t));
+ storeDouble(input, Operand(esp, 0));
+ branchDoubleNotInUInt64Range(Address(esp, 0), temp, &fail);
+ size_t stackBeforeBranch = framePushed();
+ jump(&convert);
+
+ bind(&fail);
+ freeStack(2 * sizeof(int32_t));
+ jump(oolEntry);
+ if (isSaturating) {
+ // The OOL path computes the right values.
+ setFramePushed(stackBeforeBranch);
+ } else {
+ // The OOL path just checks the input values.
+ bind(oolRejoin);
+ reserveStack(2 * sizeof(int32_t));
+ storeDouble(input, Operand(esp, 0));
+ }
+
+ // Convert the double/float to uint64.
+ bind(&convert);
+ truncateDoubleToUInt64(Address(esp, 0), Address(esp, 0), temp, tempReg);
+
+ // Load value into int64 register.
+ load64(Address(esp, 0), output);
+ freeStack(2 * sizeof(int32_t));
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempReg) {
+ Label fail, convert;
+ Register temp = output.high;
+
+ // Make sure input fits in uint64.
+ reserveStack(2 * sizeof(int32_t));
+ storeFloat32(input, Operand(esp, 0));
+ branchFloat32NotInUInt64Range(Address(esp, 0), temp, &fail);
+ size_t stackBeforeBranch = framePushed();
+ jump(&convert);
+
+ bind(&fail);
+ freeStack(2 * sizeof(int32_t));
+ jump(oolEntry);
+ if (isSaturating) {
+ // The OOL path computes the right values.
+ setFramePushed(stackBeforeBranch);
+ } else {
+ // The OOL path just checks the input values.
+ bind(oolRejoin);
+ reserveStack(2 * sizeof(int32_t));
+ storeFloat32(input, Operand(esp, 0));
+ }
+
+ // Convert the float to uint64.
+ bind(&convert);
+ truncateFloat32ToUInt64(Address(esp, 0), Address(esp, 0), temp, tempReg);
+
+ // Load value into int64 register.
+ load64(Address(esp, 0), output);
+ freeStack(2 * sizeof(int32_t));
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ }
+}
+
+// ========================================================================
+// Primitive atomic operations.
+
+void MacroAssembler::atomicLoad64(const Synchronization&, const Address& mem,
+ Register64 temp, Register64 output) {
+ AtomicLoad64(*this, nullptr, mem, temp, output);
+}
+
+void MacroAssembler::atomicLoad64(const Synchronization&, const BaseIndex& mem,
+ Register64 temp, Register64 output) {
+ AtomicLoad64(*this, nullptr, mem, temp, output);
+}
+
+void MacroAssembler::atomicStore64(const Synchronization&, const Address& mem,
+ Register64 value, Register64 temp) {
+ AtomicExchange64(*this, nullptr, mem, value, temp);
+}
+
+void MacroAssembler::atomicStore64(const Synchronization&, const BaseIndex& mem,
+ Register64 value, Register64 temp) {
+ AtomicExchange64(*this, nullptr, mem, value, temp);
+}
+
+void MacroAssembler::compareExchange64(const Synchronization&,
+ const Address& mem, Register64 expected,
+ Register64 replacement,
+ Register64 output) {
+ CompareExchange64(*this, nullptr, mem, expected, replacement, output);
+}
+
+void MacroAssembler::compareExchange64(const Synchronization&,
+ const BaseIndex& mem,
+ Register64 expected,
+ Register64 replacement,
+ Register64 output) {
+ CompareExchange64(*this, nullptr, mem, expected, replacement, output);
+}
+
+void MacroAssembler::atomicExchange64(const Synchronization&,
+ const Address& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(*this, nullptr, mem, value, output);
+}
+
+void MacroAssembler::atomicExchange64(const Synchronization&,
+ const BaseIndex& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(*this, nullptr, mem, value, output);
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization&, AtomicOp op,
+ const Address& value, const Address& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp64(*this, nullptr, op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization&, AtomicOp op,
+ const Address& value, const BaseIndex& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp64(*this, nullptr, op, value, mem, temp, output);
+}
+
+// ========================================================================
+// Convert floating point.
+
+bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return HasSSE3(); }
+
+void MacroAssembler::convertUInt64ToDouble(Register64 src, FloatRegister dest,
+ Register temp) {
+ // SUBPD needs SSE2, HADDPD needs SSE3.
+ if (!HasSSE3()) {
+ MOZ_ASSERT(temp == Register::Invalid());
+
+ // Zero the dest register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(dest);
+
+ Push(src.high);
+ Push(src.low);
+ fild(Operand(esp, 0));
+
+ Label notNegative;
+ branch32(Assembler::NotSigned, src.high, Imm32(0), &notNegative);
+ double add_constant = 18446744073709551616.0; // 2^64
+ store64(Imm64(mozilla::BitwiseCast<uint64_t>(add_constant)),
+ Address(esp, 0));
+ fld(Operand(esp, 0));
+ faddp();
+ bind(&notNegative);
+
+ fstp(Operand(esp, 0));
+ vmovsd(Address(esp, 0), dest);
+ freeStack(2 * sizeof(intptr_t));
+ return;
+ }
+
+ // Following operation uses entire 128-bit of dest XMM register.
+ // Currently higher 64-bit is free when we have access to lower 64-bit.
+ MOZ_ASSERT(dest.size() == 8);
+ FloatRegister dest128 =
+ FloatRegister(dest.encoding(), FloatRegisters::Simd128);
+
+ // Assume that src is represented as following:
+ // src = 0x HHHHHHHH LLLLLLLL
+
+ {
+ // Move src to dest (=dest128) and ScratchInt32x4Reg (=scratch):
+ // dest = 0x 00000000 00000000 00000000 LLLLLLLL
+ // scratch = 0x 00000000 00000000 00000000 HHHHHHHH
+ ScratchSimd128Scope scratch(*this);
+ vmovd(src.low, dest128);
+ vmovd(src.high, scratch);
+
+ // Unpack and interleave dest and scratch to dest:
+ // dest = 0x 00000000 00000000 HHHHHHHH LLLLLLLL
+ vpunpckldq(scratch, dest128, dest128);
+ }
+
+ // Unpack and interleave dest and a constant C1 to dest:
+ // C1 = 0x 00000000 00000000 45300000 43300000
+ // dest = 0x 45300000 HHHHHHHH 43300000 LLLLLLLL
+ // here, each 64-bit part of dest represents following double:
+ // HI(dest) = 0x 1.00000HHHHHHHH * 2**84 == 2**84 + 0x HHHHHHHH 00000000
+ // LO(dest) = 0x 1.00000LLLLLLLL * 2**52 == 2**52 + 0x 00000000 LLLLLLLL
+ // See convertUInt64ToDouble for the details.
+ static const int32_t CST1[4] = {
+ 0x43300000,
+ 0x45300000,
+ 0x0,
+ 0x0,
+ };
+
+ vpunpckldqSimd128(SimdConstant::CreateX4(CST1), dest128, dest128);
+
+ // Subtract a constant C2 from dest, for each 64-bit part:
+ // C2 = 0x 45300000 00000000 43300000 00000000
+ // here, each 64-bit part of C2 represents following double:
+ // HI(C2) = 0x 1.0000000000000 * 2**84 == 2**84
+ // LO(C2) = 0x 1.0000000000000 * 2**52 == 2**52
+ // after the operation each 64-bit part of dest represents following:
+ // HI(dest) = double(0x HHHHHHHH 00000000)
+ // LO(dest) = double(0x 00000000 LLLLLLLL)
+ static const int32_t CST2[4] = {
+ 0x0,
+ 0x43300000,
+ 0x0,
+ 0x45300000,
+ };
+
+ vsubpdSimd128(SimdConstant::CreateX4(CST2), dest128, dest128);
+
+ // Add HI(dest) and LO(dest) in double and store it into LO(dest),
+ // LO(dest) = double(0x HHHHHHHH 00000000) + double(0x 00000000 LLLLLLLL)
+ // = double(0x HHHHHHHH LLLLLLLL)
+ // = double(src)
+ vhaddpd(dest128, dest128);
+}
+
+void MacroAssembler::convertInt64ToDouble(Register64 input,
+ FloatRegister output) {
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(output);
+
+ Push(input.high);
+ Push(input.low);
+ fild(Operand(esp, 0));
+
+ fstp(Operand(esp, 0));
+ vmovsd(Address(esp, 0), output);
+ freeStack(2 * sizeof(intptr_t));
+}
+
+void MacroAssembler::convertUInt64ToFloat32(Register64 input,
+ FloatRegister output,
+ Register temp) {
+ // Zero the dest register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(output);
+
+ // Set the FPU precision to 80 bits.
+ reserveStack(2 * sizeof(intptr_t));
+ fnstcw(Operand(esp, 0));
+ load32(Operand(esp, 0), temp);
+ orl(Imm32(0x300), temp);
+ store32(temp, Operand(esp, sizeof(intptr_t)));
+ fldcw(Operand(esp, sizeof(intptr_t)));
+
+ Push(input.high);
+ Push(input.low);
+ fild(Operand(esp, 0));
+
+ Label notNegative;
+ branch32(Assembler::NotSigned, input.high, Imm32(0), &notNegative);
+ double add_constant = 18446744073709551616.0; // 2^64
+ uint64_t add_constant_u64 = mozilla::BitwiseCast<uint64_t>(add_constant);
+ store64(Imm64(add_constant_u64), Address(esp, 0));
+
+ fld(Operand(esp, 0));
+ faddp();
+ bind(&notNegative);
+
+ fstp32(Operand(esp, 0));
+ vmovss(Address(esp, 0), output);
+ freeStack(2 * sizeof(intptr_t));
+
+ // Restore FPU precision to the initial value.
+ fldcw(Operand(esp, 0));
+ freeStack(2 * sizeof(intptr_t));
+}
+
+void MacroAssembler::convertInt64ToFloat32(Register64 input,
+ FloatRegister output) {
+ // Zero the output register to break dependencies, see convertInt32ToDouble.
+ zeroDouble(output);
+
+ Push(input.high);
+ Push(input.low);
+ fild(Operand(esp, 0));
+
+ fstp32(Operand(esp, 0));
+ vmovss(Address(esp, 0), output);
+ freeStack(2 * sizeof(intptr_t));
+}
+
+void MacroAssembler::convertIntPtrToDouble(Register src, FloatRegister dest) {
+ convertInt32ToDouble(src, dest);
+}
+
+void MacroAssembler::PushBoxed(FloatRegister reg) { Push(reg); }
+
+CodeOffset MacroAssembler::moveNearAddressWithPatch(Register dest) {
+ return movWithPatch(ImmPtr(nullptr), dest);
+}
+
+void MacroAssembler::patchNearAddressMove(CodeLocationLabel loc,
+ CodeLocationLabel target) {
+ PatchDataWithValueCheck(loc, ImmPtr(target.raw()), ImmPtr(nullptr));
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Register64 boundsCheckLimit, Label* ok) {
+ Label notOk;
+ cmp32(index.high, Imm32(0));
+ j(Assembler::NonZero, &notOk);
+ wasmBoundsCheck32(cond, index.low, boundsCheckLimit.low, ok);
+ bind(&notOk);
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Address boundsCheckLimit, Label* ok) {
+ Label notOk;
+ cmp32(index.high, Imm32(0));
+ j(Assembler::NonZero, &notOk);
+ wasmBoundsCheck32(cond, index.low, boundsCheckLimit, ok);
+ bind(&notOk);
+}
+
+#ifdef ENABLE_WASM_TAIL_CALLS
+void MacroAssembler::wasmMarkSlowCall() {
+ static_assert(esi == InstanceReg);
+ or32(esi, esi);
+}
+
+const int32_t SlowCallMarker = 0xf60b; // OR esi, esi
+
+void MacroAssembler::wasmCheckSlowCallsite(Register ra, Label* notSlow,
+ Register temp1, Register temp2) {
+ // Check if RA has slow marker.
+ cmp16(Address(ra, 0), Imm32(SlowCallMarker));
+ j(Assembler::NotEqual, notSlow);
+}
+#endif // ENABLE_WASM_TAIL_CALLS
+
+//}}} check_macroassembler_style
diff --git a/js/src/jit/x86/MacroAssembler-x86.h b/js/src/jit/x86/MacroAssembler-x86.h
new file mode 100644
index 0000000000..205a2baaa6
--- /dev/null
+++ b/js/src/jit/x86/MacroAssembler-x86.h
@@ -0,0 +1,1186 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_MacroAssembler_x86_h
+#define jit_x86_MacroAssembler_x86_h
+
+#include "jit/JitOptions.h"
+#include "jit/MoveResolver.h"
+#include "jit/x86-shared/MacroAssembler-x86-shared.h"
+#include "js/HeapAPI.h"
+#include "wasm/WasmBuiltins.h"
+#include "wasm/WasmCodegenTypes.h"
+
+using js::wasm::FaultingCodeOffsetPair;
+
+namespace js {
+namespace jit {
+
+// See documentation for ScratchTagScope and ScratchTagScopeRelease in
+// MacroAssembler-x64.h.
+
+class ScratchTagScope {
+ const ValueOperand& v_;
+
+ public:
+ ScratchTagScope(MacroAssembler&, const ValueOperand& v) : v_(v) {}
+ operator Register() { return v_.typeReg(); }
+ void release() {}
+ void reacquire() {}
+};
+
+class ScratchTagScopeRelease {
+ public:
+ explicit ScratchTagScopeRelease(ScratchTagScope*) {}
+};
+
+class MacroAssemblerX86 : public MacroAssemblerX86Shared {
+ private:
+ // Perform a downcast. Should be removed by Bug 996602.
+ MacroAssembler& asMasm();
+ const MacroAssembler& asMasm() const;
+
+ protected:
+ MoveResolver moveResolver_;
+
+ private:
+ Operand payloadOfAfterStackPush(const Address& address) {
+ // If we are basing off %esp, the address will be invalid after the
+ // first push.
+ if (address.base == StackPointer) {
+ return Operand(address.base, address.offset + 4);
+ }
+ return payloadOf(address);
+ }
+ Operand payloadOfAfterStackPush(const BaseIndex& address) {
+ // If we are basing off %esp, the address will be invalid after the
+ // first push.
+ if (address.base == StackPointer) {
+ return Operand(address.base, address.index, address.scale,
+ address.offset + 4);
+ }
+ return payloadOf(address);
+ }
+ Operand payloadOf(const Address& address) {
+ return Operand(address.base, address.offset);
+ }
+ Operand payloadOf(const BaseIndex& address) {
+ return Operand(address.base, address.index, address.scale, address.offset);
+ }
+ Operand tagOf(const Address& address) {
+ return Operand(address.base, address.offset + 4);
+ }
+ Operand tagOf(const BaseIndex& address) {
+ return Operand(address.base, address.index, address.scale,
+ address.offset + 4);
+ }
+
+ void setupABICall(uint32_t args);
+
+ void vpPatchOpSimd128(const SimdConstant& v, FloatRegister reg,
+ void (X86Encoding::BaseAssemblerX86::*op)(
+ const void* address,
+ X86Encoding::XMMRegisterID srcId,
+ X86Encoding::XMMRegisterID destId)) {
+ vpPatchOpSimd128(v, reg, reg, op);
+ }
+
+ void vpPatchOpSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest,
+ void (X86Encoding::BaseAssemblerX86::*op)(
+ const void* address,
+ X86Encoding::XMMRegisterID srcId,
+ X86Encoding::XMMRegisterID destId));
+
+ void vpPatchOpSimd128(const SimdConstant& v, FloatRegister reg,
+ size_t (X86Encoding::BaseAssemblerX86::*op)(
+ const void* address,
+ X86Encoding::XMMRegisterID srcId,
+ X86Encoding::XMMRegisterID destId)) {
+ vpPatchOpSimd128(v, reg, reg, op);
+ }
+
+ void vpPatchOpSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest,
+ size_t (X86Encoding::BaseAssemblerX86::*op)(
+ const void* address,
+ X86Encoding::XMMRegisterID srcId,
+ X86Encoding::XMMRegisterID destId));
+
+ public:
+ using MacroAssemblerX86Shared::call;
+ using MacroAssemblerX86Shared::load32;
+ using MacroAssemblerX86Shared::store16;
+ using MacroAssemblerX86Shared::store32;
+
+ MacroAssemblerX86() {}
+
+ // The buffer is about to be linked, make sure any constant pools or excess
+ // bookkeeping has been flushed to the instruction stream.
+ void finish();
+
+ /////////////////////////////////////////////////////////////////
+ // X86-specific interface.
+ /////////////////////////////////////////////////////////////////
+
+ Operand ToPayload(Operand base) { return base; }
+ Address ToPayload(Address base) { return base; }
+ BaseIndex ToPayload(BaseIndex base) { return base; }
+ Operand ToType(Operand base) {
+ switch (base.kind()) {
+ case Operand::MEM_REG_DISP:
+ return Operand(Register::FromCode(base.base()),
+ base.disp() + sizeof(void*));
+
+ case Operand::MEM_SCALE:
+ return Operand(Register::FromCode(base.base()),
+ Register::FromCode(base.index()), base.scale(),
+ base.disp() + sizeof(void*));
+
+ default:
+ MOZ_CRASH("unexpected operand kind");
+ }
+ }
+ Address ToType(Address base) { return ToType(Operand(base)).toAddress(); }
+ BaseIndex ToType(BaseIndex base) {
+ return ToType(Operand(base)).toBaseIndex();
+ }
+
+ template <typename T>
+ void add64FromMemory(const T& address, Register64 dest) {
+ addl(Operand(LowWord(address)), dest.low);
+ adcl(Operand(HighWord(address)), dest.high);
+ }
+ template <typename T>
+ void sub64FromMemory(const T& address, Register64 dest) {
+ subl(Operand(LowWord(address)), dest.low);
+ sbbl(Operand(HighWord(address)), dest.high);
+ }
+ template <typename T>
+ void and64FromMemory(const T& address, Register64 dest) {
+ andl(Operand(LowWord(address)), dest.low);
+ andl(Operand(HighWord(address)), dest.high);
+ }
+ template <typename T>
+ void or64FromMemory(const T& address, Register64 dest) {
+ orl(Operand(LowWord(address)), dest.low);
+ orl(Operand(HighWord(address)), dest.high);
+ }
+ template <typename T>
+ void xor64FromMemory(const T& address, Register64 dest) {
+ xorl(Operand(LowWord(address)), dest.low);
+ xorl(Operand(HighWord(address)), dest.high);
+ }
+
+ /////////////////////////////////////////////////////////////////
+ // X86/X64-common interface.
+ /////////////////////////////////////////////////////////////////
+ void storeValue(ValueOperand val, Operand dest) {
+ movl(val.payloadReg(), ToPayload(dest));
+ movl(val.typeReg(), ToType(dest));
+ }
+ void storeValue(ValueOperand val, const Address& dest) {
+ storeValue(val, Operand(dest));
+ }
+ template <typename T>
+ void storeValue(JSValueType type, Register reg, const T& dest) {
+ storeTypeTag(ImmTag(JSVAL_TYPE_TO_TAG(type)), Operand(dest));
+ storePayload(reg, Operand(dest));
+ }
+ template <typename T>
+ void storeValue(const Value& val, const T& dest) {
+ storeTypeTag(ImmTag(val.toNunboxTag()), Operand(dest));
+ storePayload(val, Operand(dest));
+ }
+ void storeValue(ValueOperand val, BaseIndex dest) {
+ storeValue(val, Operand(dest));
+ }
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ MOZ_ASSERT(src.base != temp);
+ MOZ_ASSERT(dest.base != temp);
+
+ load32(ToType(src), temp);
+ store32(temp, ToType(dest));
+
+ load32(ToPayload(src), temp);
+ store32(temp, ToPayload(dest));
+ }
+ void storePrivateValue(Register src, const Address& dest) {
+ store32(Imm32(0), ToType(dest));
+ store32(src, ToPayload(dest));
+ }
+ void storePrivateValue(ImmGCPtr imm, const Address& dest) {
+ store32(Imm32(0), ToType(dest));
+ movl(imm, Operand(ToPayload(dest)));
+ }
+ void loadValue(Operand src, ValueOperand val) {
+ Operand payload = ToPayload(src);
+ Operand type = ToType(src);
+
+ // Ensure that loading the payload does not erase the pointer to the
+ // Value in memory or the index.
+ Register baseReg = Register::FromCode(src.base());
+ Register indexReg = (src.kind() == Operand::MEM_SCALE)
+ ? Register::FromCode(src.index())
+ : InvalidReg;
+
+ // If we have a BaseIndex that uses both result registers, first compute
+ // the address and then load the Value from there.
+ if ((baseReg == val.payloadReg() && indexReg == val.typeReg()) ||
+ (baseReg == val.typeReg() && indexReg == val.payloadReg())) {
+ computeEffectiveAddress(src, val.scratchReg());
+ loadValue(Address(val.scratchReg(), 0), val);
+ return;
+ }
+
+ if (baseReg == val.payloadReg() || indexReg == val.payloadReg()) {
+ MOZ_ASSERT(baseReg != val.typeReg());
+ MOZ_ASSERT(indexReg != val.typeReg());
+
+ movl(type, val.typeReg());
+ movl(payload, val.payloadReg());
+ } else {
+ MOZ_ASSERT(baseReg != val.payloadReg());
+ MOZ_ASSERT(indexReg != val.payloadReg());
+
+ movl(payload, val.payloadReg());
+ movl(type, val.typeReg());
+ }
+ }
+ void loadValue(Address src, ValueOperand val) {
+ loadValue(Operand(src), val);
+ }
+ void loadValue(const BaseIndex& src, ValueOperand val) {
+ loadValue(Operand(src), val);
+ }
+ void loadUnalignedValue(const Address& src, ValueOperand dest) {
+ loadValue(src, dest);
+ }
+ void tagValue(JSValueType type, Register payload, ValueOperand dest) {
+ MOZ_ASSERT(dest.typeReg() != dest.payloadReg());
+ if (payload != dest.payloadReg()) {
+ movl(payload, dest.payloadReg());
+ }
+ movl(ImmType(type), dest.typeReg());
+ }
+ void pushValue(ValueOperand val) {
+ push(val.typeReg());
+ push(val.payloadReg());
+ }
+ void popValue(ValueOperand val) {
+ pop(val.payloadReg());
+ pop(val.typeReg());
+ }
+ void pushValue(const Value& val) {
+ push(Imm32(val.toNunboxTag()));
+ if (val.isGCThing()) {
+ push(ImmGCPtr(val.toGCThing()));
+ } else {
+ push(Imm32(val.toNunboxPayload()));
+ }
+ }
+ void pushValue(JSValueType type, Register reg) {
+ push(ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ push(reg);
+ }
+ void pushValue(const Address& addr) {
+ push(tagOf(addr));
+ push(payloadOfAfterStackPush(addr));
+ }
+ void pushValue(const BaseIndex& addr, Register scratch) {
+ push(tagOf(addr));
+ push(payloadOfAfterStackPush(addr));
+ }
+ void push64(Register64 src) {
+ push(src.high);
+ push(src.low);
+ }
+ void pop64(Register64 dest) {
+ pop(dest.low);
+ pop(dest.high);
+ }
+ void storePayload(const Value& val, Operand dest) {
+ if (val.isGCThing()) {
+ movl(ImmGCPtr(val.toGCThing()), ToPayload(dest));
+ } else {
+ movl(Imm32(val.toNunboxPayload()), ToPayload(dest));
+ }
+ }
+ void storePayload(Register src, Operand dest) { movl(src, ToPayload(dest)); }
+ void storeTypeTag(ImmTag tag, Operand dest) { movl(tag, ToType(dest)); }
+
+ void movePtr(Register src, Register dest) { movl(src, dest); }
+ void movePtr(Register src, const Operand& dest) { movl(src, dest); }
+
+ void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag) {
+ MOZ_ASSERT(value.typeReg() == tag);
+ }
+
+ Condition testUndefined(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_UNDEFINED));
+ return cond;
+ }
+ Condition testBoolean(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_BOOLEAN));
+ return cond;
+ }
+ Condition testInt32(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_INT32));
+ return cond;
+ }
+ Condition testDouble(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ cmp32(tag, ImmTag(JSVAL_TAG_CLEAR));
+ return actual;
+ }
+ Condition testNull(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_NULL));
+ return cond;
+ }
+ Condition testString(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_STRING));
+ return cond;
+ }
+ Condition testSymbol(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_SYMBOL));
+ return cond;
+ }
+ Condition testBigInt(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_BIGINT));
+ return cond;
+ }
+ Condition testObject(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_OBJECT));
+ return cond;
+ }
+ Condition testNumber(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JS::detail::ValueUpperInclNumberTag));
+ return cond == Equal ? BelowOrEqual : Above;
+ }
+ Condition testGCThing(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JS::detail::ValueLowerInclGCThingTag));
+ return cond == Equal ? AboveOrEqual : Below;
+ }
+ Condition testGCThing(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JS::detail::ValueLowerInclGCThingTag));
+ return cond == Equal ? AboveOrEqual : Below;
+ }
+ Condition testMagic(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+ }
+ Condition testMagic(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+ }
+ Condition testMagic(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+ }
+ Condition testPrimitive(Condition cond, Register tag) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tag, ImmTag(JS::detail::ValueUpperExclPrimitiveTag));
+ return cond == Equal ? Below : AboveOrEqual;
+ }
+ Condition testError(Condition cond, Register tag) {
+ return testMagic(cond, tag);
+ }
+ Condition testBoolean(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(Operand(ToType(address)), ImmTag(JSVAL_TAG_BOOLEAN));
+ return cond;
+ }
+ Condition testInt32(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_INT32));
+ return cond;
+ }
+ Condition testInt32(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ return testInt32(cond, Operand(address));
+ }
+ Condition testObject(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_OBJECT));
+ return cond;
+ }
+ Condition testObject(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ return testObject(cond, Operand(address));
+ }
+ Condition testDouble(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_CLEAR));
+ return actual;
+ }
+ Condition testDouble(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ return testDouble(cond, Operand(address));
+ }
+
+ Condition testUndefined(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_UNDEFINED));
+ return cond;
+ }
+ Condition testUndefined(Condition cond, const Address& addr) {
+ return testUndefined(cond, Operand(addr));
+ }
+ Condition testNull(Condition cond, const Operand& operand) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(ToType(operand), ImmTag(JSVAL_TAG_NULL));
+ return cond;
+ }
+ Condition testNull(Condition cond, const Address& addr) {
+ return testNull(cond, Operand(addr));
+ }
+
+ Condition testUndefined(Condition cond, const ValueOperand& value) {
+ return testUndefined(cond, value.typeReg());
+ }
+ Condition testBoolean(Condition cond, const ValueOperand& value) {
+ return testBoolean(cond, value.typeReg());
+ }
+ Condition testInt32(Condition cond, const ValueOperand& value) {
+ return testInt32(cond, value.typeReg());
+ }
+ Condition testDouble(Condition cond, const ValueOperand& value) {
+ return testDouble(cond, value.typeReg());
+ }
+ Condition testNull(Condition cond, const ValueOperand& value) {
+ return testNull(cond, value.typeReg());
+ }
+ Condition testString(Condition cond, const ValueOperand& value) {
+ return testString(cond, value.typeReg());
+ }
+ Condition testSymbol(Condition cond, const ValueOperand& value) {
+ return testSymbol(cond, value.typeReg());
+ }
+ Condition testBigInt(Condition cond, const ValueOperand& value) {
+ return testBigInt(cond, value.typeReg());
+ }
+ Condition testObject(Condition cond, const ValueOperand& value) {
+ return testObject(cond, value.typeReg());
+ }
+ Condition testMagic(Condition cond, const ValueOperand& value) {
+ return testMagic(cond, value.typeReg());
+ }
+ Condition testError(Condition cond, const ValueOperand& value) {
+ return testMagic(cond, value);
+ }
+ Condition testNumber(Condition cond, const ValueOperand& value) {
+ return testNumber(cond, value.typeReg());
+ }
+ Condition testGCThing(Condition cond, const ValueOperand& value) {
+ return testGCThing(cond, value.typeReg());
+ }
+ Condition testPrimitive(Condition cond, const ValueOperand& value) {
+ return testPrimitive(cond, value.typeReg());
+ }
+
+ Condition testUndefined(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_UNDEFINED));
+ return cond;
+ }
+ Condition testNull(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_NULL));
+ return cond;
+ }
+ Condition testBoolean(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_BOOLEAN));
+ return cond;
+ }
+ Condition testString(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_STRING));
+ return cond;
+ }
+ Condition testString(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_STRING));
+ return cond;
+ }
+ Condition testSymbol(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_SYMBOL));
+ return cond;
+ }
+ Condition testSymbol(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_SYMBOL));
+ return cond;
+ }
+ Condition testBigInt(Condition cond, const Address& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_BIGINT));
+ return cond;
+ }
+ Condition testBigInt(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_BIGINT));
+ return cond;
+ }
+ Condition testInt32(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_INT32));
+ return cond;
+ }
+ Condition testObject(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_OBJECT));
+ return cond;
+ }
+ Condition testDouble(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_CLEAR));
+ return actual;
+ }
+ Condition testMagic(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JSVAL_TAG_MAGIC));
+ return cond;
+ }
+ Condition testGCThing(Condition cond, const BaseIndex& address) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ cmp32(tagOf(address), ImmTag(JS::detail::ValueLowerInclGCThingTag));
+ return cond == Equal ? AboveOrEqual : Below;
+ }
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testNull(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest) {
+ cond = testObject(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void testUndefinedSet(Condition cond, const ValueOperand& value,
+ Register dest) {
+ cond = testUndefined(cond, value);
+ emitSet(cond, dest);
+ }
+
+ void cmpPtr(Register lhs, const ImmWord rhs) { cmpl(Imm32(rhs.value), lhs); }
+ void cmpPtr(Register lhs, const ImmPtr imm) {
+ cmpPtr(lhs, ImmWord(uintptr_t(imm.value)));
+ }
+ void cmpPtr(Register lhs, const ImmGCPtr rhs) { cmpl(rhs, lhs); }
+ void cmpPtr(const Operand& lhs, Imm32 rhs) { cmp32(lhs, rhs); }
+ void cmpPtr(const Operand& lhs, const ImmWord rhs) {
+ cmp32(lhs, Imm32(rhs.value));
+ }
+ void cmpPtr(const Operand& lhs, const ImmPtr imm) {
+ cmpPtr(lhs, ImmWord(uintptr_t(imm.value)));
+ }
+ void cmpPtr(const Operand& lhs, const ImmGCPtr rhs) { cmpl(rhs, lhs); }
+ void cmpPtr(const Address& lhs, Register rhs) { cmpPtr(Operand(lhs), rhs); }
+ void cmpPtr(const Operand& lhs, Register rhs) { cmp32(lhs, rhs); }
+ void cmpPtr(const Address& lhs, const ImmWord rhs) {
+ cmpPtr(Operand(lhs), rhs);
+ }
+ void cmpPtr(const Address& lhs, const ImmPtr rhs) {
+ cmpPtr(lhs, ImmWord(uintptr_t(rhs.value)));
+ }
+ void cmpPtr(const Address& lhs, const ImmGCPtr rhs) {
+ cmpPtr(Operand(lhs), rhs);
+ }
+ void cmpPtr(Register lhs, Register rhs) { cmp32(lhs, rhs); }
+ void testPtr(Register lhs, Register rhs) { test32(lhs, rhs); }
+ void testPtr(Register lhs, Imm32 rhs) { test32(lhs, rhs); }
+ void testPtr(Register lhs, ImmWord rhs) { test32(lhs, Imm32(rhs.value)); }
+ void testPtr(const Operand& lhs, Imm32 rhs) { test32(lhs, rhs); }
+ void testPtr(const Operand& lhs, ImmWord rhs) {
+ test32(lhs, Imm32(rhs.value));
+ }
+
+ /////////////////////////////////////////////////////////////////
+ // Common interface.
+ /////////////////////////////////////////////////////////////////
+
+ void movePtr(ImmWord imm, Register dest) { movl(Imm32(imm.value), dest); }
+ void movePtr(ImmPtr imm, Register dest) { movl(imm, dest); }
+ void movePtr(wasm::SymbolicAddress imm, Register dest) { mov(imm, dest); }
+ void movePtr(ImmGCPtr imm, Register dest) { movl(imm, dest); }
+ FaultingCodeOffset loadPtr(const Address& address, Register dest) {
+ FaultingCodeOffset fco = FaultingCodeOffset(currentOffset());
+ movl(Operand(address), dest);
+ return fco;
+ }
+ void loadPtr(const Operand& src, Register dest) { movl(src, dest); }
+ FaultingCodeOffset loadPtr(const BaseIndex& src, Register dest) {
+ FaultingCodeOffset fco = FaultingCodeOffset(currentOffset());
+ movl(Operand(src), dest);
+ return fco;
+ }
+ void loadPtr(AbsoluteAddress address, Register dest) {
+ movl(Operand(address), dest);
+ }
+ void loadPrivate(const Address& src, Register dest) {
+ movl(payloadOf(src), dest);
+ }
+ void load32(AbsoluteAddress address, Register dest) {
+ movl(Operand(address), dest);
+ }
+ FaultingCodeOffsetPair load64(const Address& address, Register64 dest) {
+ FaultingCodeOffset fco1, fco2;
+ bool highBeforeLow = address.base == dest.low;
+ if (highBeforeLow) {
+ fco1 = FaultingCodeOffset(currentOffset());
+ movl(Operand(HighWord(address)), dest.high);
+ fco2 = FaultingCodeOffset(currentOffset());
+ movl(Operand(LowWord(address)), dest.low);
+ } else {
+ fco1 = FaultingCodeOffset(currentOffset());
+ movl(Operand(LowWord(address)), dest.low);
+ fco2 = FaultingCodeOffset(currentOffset());
+ movl(Operand(HighWord(address)), dest.high);
+ }
+ return FaultingCodeOffsetPair(fco1, fco2);
+ }
+ FaultingCodeOffsetPair load64(const BaseIndex& address, Register64 dest) {
+ // If you run into this, relax your register allocation constraints.
+ MOZ_RELEASE_ASSERT(
+ !((address.base == dest.low || address.base == dest.high) &&
+ (address.index == dest.low || address.index == dest.high)));
+ FaultingCodeOffset fco1, fco2;
+ bool highBeforeLow = address.base == dest.low || address.index == dest.low;
+ if (highBeforeLow) {
+ fco1 = FaultingCodeOffset(currentOffset());
+ movl(Operand(HighWord(address)), dest.high);
+ fco2 = FaultingCodeOffset(currentOffset());
+ movl(Operand(LowWord(address)), dest.low);
+ } else {
+ fco1 = FaultingCodeOffset(currentOffset());
+ movl(Operand(LowWord(address)), dest.low);
+ fco2 = FaultingCodeOffset(currentOffset());
+ movl(Operand(HighWord(address)), dest.high);
+ }
+ return FaultingCodeOffsetPair(fco1, fco2);
+ }
+ template <typename T>
+ void load64Unaligned(const T& address, Register64 dest) {
+ load64(address, dest);
+ }
+ template <typename T>
+ void storePtr(ImmWord imm, T address) {
+ movl(Imm32(imm.value), Operand(address));
+ }
+ template <typename T>
+ void storePtr(ImmPtr imm, T address) {
+ storePtr(ImmWord(uintptr_t(imm.value)), address);
+ }
+ template <typename T>
+ void storePtr(ImmGCPtr imm, T address) {
+ movl(imm, Operand(address));
+ }
+ FaultingCodeOffset storePtr(Register src, const Address& address) {
+ FaultingCodeOffset fco = FaultingCodeOffset(currentOffset());
+ movl(src, Operand(address));
+ return fco;
+ }
+ FaultingCodeOffset storePtr(Register src, const BaseIndex& address) {
+ FaultingCodeOffset fco = FaultingCodeOffset(currentOffset());
+ movl(src, Operand(address));
+ return fco;
+ }
+ void storePtr(Register src, const Operand& dest) { movl(src, dest); }
+ void storePtr(Register src, AbsoluteAddress address) {
+ movl(src, Operand(address));
+ }
+ void store32(Register src, AbsoluteAddress address) {
+ movl(src, Operand(address));
+ }
+ void store16(Register src, AbsoluteAddress address) {
+ movw(src, Operand(address));
+ }
+ template <typename T>
+ FaultingCodeOffsetPair store64(Register64 src, const T& address) {
+ FaultingCodeOffset fco1 = FaultingCodeOffset(currentOffset());
+ movl(src.low, Operand(LowWord(address)));
+ FaultingCodeOffset fco2 = FaultingCodeOffset(currentOffset());
+ movl(src.high, Operand(HighWord(address)));
+ return FaultingCodeOffsetPair(fco1, fco2);
+ }
+ void store64(Imm64 imm, Address address) {
+ movl(imm.low(), Operand(LowWord(address)));
+ movl(imm.hi(), Operand(HighWord(address)));
+ }
+ template <typename S, typename T>
+ void store64Unaligned(const S& src, const T& dest) {
+ store64(src, dest);
+ }
+
+ void setStackArg(Register reg, uint32_t arg) {
+ movl(reg, Operand(esp, arg * sizeof(intptr_t)));
+ }
+
+ void boxDouble(FloatRegister src, const ValueOperand& dest,
+ FloatRegister temp) {
+ if (Assembler::HasSSE41()) {
+ vmovd(src, dest.payloadReg());
+ vpextrd(1, src, dest.typeReg());
+ } else {
+ vmovd(src, dest.payloadReg());
+ if (src != temp) {
+ moveDouble(src, temp);
+ }
+ vpsrldq(Imm32(4), temp, temp);
+ vmovd(temp, dest.typeReg());
+ }
+ }
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest) {
+ if (src != dest.payloadReg()) {
+ movl(src, dest.payloadReg());
+ }
+ movl(ImmType(type), dest.typeReg());
+ }
+
+ void unboxNonDouble(const ValueOperand& src, Register dest, JSValueType type,
+ Register scratch = InvalidReg) {
+ unboxNonDouble(Operand(src.typeReg()), Operand(src.payloadReg()), dest,
+ type, scratch);
+ }
+ void unboxNonDouble(const Operand& tag, const Operand& payload, Register dest,
+ JSValueType type, Register scratch = InvalidReg) {
+ auto movPayloadToDest = [&]() {
+ if (payload.kind() != Operand::REG || !payload.containsReg(dest)) {
+ movl(payload, dest);
+ }
+ };
+ if (!JitOptions.spectreValueMasking) {
+ movPayloadToDest();
+ return;
+ }
+
+ // Spectre mitigation: We zero the payload if the tag does not match the
+ // expected type and if this is a pointer type.
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ movPayloadToDest();
+ return;
+ }
+
+ if (!tag.containsReg(dest) && !payload.containsReg(dest)) {
+ // We zero the destination register and move the payload into it if
+ // the tag corresponds to the given type.
+ xorl(dest, dest);
+ cmpl(Imm32(JSVAL_TYPE_TO_TAG(type)), tag);
+ cmovCCl(Condition::Equal, payload, dest);
+ return;
+ }
+
+ if (scratch == InvalidReg || scratch == dest || tag.containsReg(scratch) ||
+ payload.containsReg(scratch)) {
+ // UnboxedLayout::makeConstructorCode calls extractObject with a
+ // scratch register which aliases the tag register, thus we cannot
+ // assert the above condition.
+ scratch = InvalidReg;
+ }
+
+ // The destination register aliases one of the operands. We create a
+ // zero value either in a scratch register or on the stack and use it
+ // to reset the destination register after reading both the tag and the
+ // payload.
+ Operand zero(Address(esp, 0));
+ if (scratch == InvalidReg) {
+ push(Imm32(0));
+ } else {
+ xorl(scratch, scratch);
+ zero = Operand(scratch);
+ }
+ cmpl(Imm32(JSVAL_TYPE_TO_TAG(type)), tag);
+ movPayloadToDest();
+ cmovCCl(Condition::NotEqual, zero, dest);
+ if (scratch == InvalidReg) {
+ addl(Imm32(sizeof(void*)), esp);
+ }
+ }
+ void unboxNonDouble(const Address& src, Register dest, JSValueType type) {
+ unboxNonDouble(tagOf(src), payloadOf(src), dest, type);
+ }
+ void unboxNonDouble(const BaseIndex& src, Register dest, JSValueType type) {
+ unboxNonDouble(tagOf(src), payloadOf(src), dest, type);
+ }
+ void unboxInt32(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_INT32);
+ }
+ void unboxInt32(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_INT32);
+ }
+ void unboxInt32(const BaseIndex& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_INT32);
+ }
+ void unboxBoolean(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BOOLEAN);
+ }
+ void unboxBoolean(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BOOLEAN);
+ }
+ void unboxBoolean(const BaseIndex& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BOOLEAN);
+ }
+ void unboxString(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+ }
+ void unboxString(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+ }
+ void unboxSymbol(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
+ }
+ void unboxSymbol(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
+ }
+ void unboxBigInt(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+ }
+ void unboxBigInt(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+ }
+ void unboxObject(const ValueOperand& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxObject(const Address& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxObject(const BaseIndex& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ template <typename T>
+ void unboxObjectOrNull(const T& src, Register dest) {
+ // Due to Spectre mitigation logic (see Value.h), if the value is an Object
+ // then this yields the object; otherwise it yields zero (null), as desired.
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ template <typename T>
+ void unboxDouble(const T& src, FloatRegister dest) {
+ loadDouble(Operand(src), dest);
+ }
+ void unboxDouble(const ValueOperand& src, FloatRegister dest) {
+ if (Assembler::HasSSE41()) {
+ vmovd(src.payloadReg(), dest);
+ vpinsrd(1, src.typeReg(), dest, dest);
+ } else {
+ ScratchDoubleScope fpscratch(asMasm());
+ vmovd(src.payloadReg(), dest);
+ vmovd(src.typeReg(), fpscratch);
+ vunpcklps(fpscratch, dest, dest);
+ }
+ }
+ void unboxDouble(const Operand& payload, const Operand& type,
+ Register scratch, FloatRegister dest) {
+ if (Assembler::HasSSE41()) {
+ movl(payload, scratch);
+ vmovd(scratch, dest);
+ movl(type, scratch);
+ vpinsrd(1, scratch, dest, dest);
+ } else {
+ ScratchDoubleScope fpscratch(asMasm());
+ movl(payload, scratch);
+ vmovd(scratch, dest);
+ movl(type, scratch);
+ vmovd(scratch, fpscratch);
+ vunpcklps(fpscratch, dest, dest);
+ }
+ }
+ inline void unboxValue(const ValueOperand& src, AnyRegister dest,
+ JSValueType type);
+
+ // See comment in MacroAssembler-x64.h.
+ void unboxGCThingForGCBarrier(const Address& src, Register dest) {
+ movl(payloadOf(src), dest);
+ }
+
+ void unboxWasmAnyRefGCThingForGCBarrier(const Address& src, Register dest) {
+ movl(ImmWord(wasm::AnyRef::GCThingMask), dest);
+ andl(Operand(src), dest);
+ }
+
+ void getWasmAnyRefGCThingChunk(Register src, Register dest) {
+ MOZ_ASSERT(src != dest);
+ movl(ImmWord(wasm::AnyRef::GCThingChunkMask), dest);
+ andl(src, dest);
+ }
+
+ void notBoolean(const ValueOperand& val) { xorl(Imm32(1), val.payloadReg()); }
+
+ template <typename T>
+ void fallibleUnboxPtrImpl(const T& src, Register dest, JSValueType type,
+ Label* fail);
+
+ // Extended unboxing API. If the payload is already in a register, returns
+ // that register. Otherwise, provides a move to the given scratch register,
+ // and returns that.
+ [[nodiscard]] Register extractObject(const Address& address, Register dest) {
+ unboxObject(address, dest);
+ return dest;
+ }
+ [[nodiscard]] Register extractObject(const ValueOperand& value,
+ Register scratch) {
+ unboxNonDouble(value, value.payloadReg(), JSVAL_TYPE_OBJECT, scratch);
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractSymbol(const ValueOperand& value,
+ Register scratch) {
+ unboxNonDouble(value, value.payloadReg(), JSVAL_TYPE_SYMBOL, scratch);
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractInt32(const ValueOperand& value,
+ Register scratch) {
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractBoolean(const ValueOperand& value,
+ Register scratch) {
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractTag(const Address& address, Register scratch) {
+ movl(tagOf(address), scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractTag(const ValueOperand& value,
+ Register scratch) {
+ return value.typeReg();
+ }
+
+ void convertDoubleToPtr(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true) {
+ convertDoubleToInt32(src, dest, fail, negativeZeroCheck);
+ }
+
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.payloadReg(), dest);
+ }
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.payloadReg(), dest);
+ }
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.payloadReg(), dest);
+ }
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.payloadReg(), dest);
+ }
+
+ void loadConstantDouble(double d, FloatRegister dest);
+ void loadConstantFloat32(float f, FloatRegister dest);
+
+ void loadConstantSimd128Int(const SimdConstant& v, FloatRegister dest);
+ void loadConstantSimd128Float(const SimdConstant& v, FloatRegister dest);
+ void vpaddbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpaddwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpadddSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpaddqSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubqSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmullwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmulldSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpaddsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpaddusbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpaddswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpadduswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubusbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpsubuswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpminsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpminubSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpminswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpminuwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpminsdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpminudSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmaxsbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmaxubSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmaxswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmaxuwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmaxsdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmaxudSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpandSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpxorSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vporSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vaddpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vaddpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vsubpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vsubpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vdivpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vdivpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vmulpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vmulpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vandpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vminpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpacksswbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpackuswbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpackssdwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpackusdwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpunpckldqSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vunpcklpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpshufbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vptestSimd128(const SimdConstant& v, FloatRegister lhs);
+ void vpmaddwdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpcmpeqbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpcmpgtbSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpcmpeqwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpcmpgtwSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpcmpeqdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpcmpgtdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmpeqpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmpneqpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmpltpsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmplepsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmpgepsSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmpeqpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmpneqpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmpltpdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vcmplepdSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmaddubswSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+ void vpmuludqSimd128(const SimdConstant& v, FloatRegister lhs,
+ FloatRegister dest);
+
+ Condition testInt32Truthy(bool truthy, const ValueOperand& operand) {
+ test32(operand.payloadReg(), operand.payloadReg());
+ return truthy ? NonZero : Zero;
+ }
+ Condition testStringTruthy(bool truthy, const ValueOperand& value);
+ Condition testBigIntTruthy(bool truthy, const ValueOperand& value);
+
+ template <typename T>
+ inline void loadInt32OrDouble(const T& src, FloatRegister dest);
+
+ template <typename T>
+ inline void loadUnboxedValue(const T& src, MIRType type, AnyRegister dest);
+
+ template <typename T>
+ void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes,
+ JSValueType) {
+ switch (nbytes) {
+ case 4:
+ storePtr(value.payloadReg(), address);
+ return;
+ case 1:
+ store8(value.payloadReg(), address);
+ return;
+ default:
+ MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ // Note: this function clobbers the source register.
+ inline void convertUInt32ToDouble(Register src, FloatRegister dest);
+
+ // Note: this function clobbers the source register.
+ inline void convertUInt32ToFloat32(Register src, FloatRegister dest);
+
+ void incrementInt32Value(const Address& addr) {
+ addl(Imm32(1), payloadOf(addr));
+ }
+
+ inline void ensureDouble(const ValueOperand& source, FloatRegister dest,
+ Label* failure);
+
+ public:
+ // Used from within an Exit frame to handle a pending exception.
+ void handleFailureWithHandlerTail(Label* profilerExitTail,
+ Label* bailoutTail);
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register framePtr, Register scratch);
+ void profilerExitFrame();
+};
+
+typedef MacroAssemblerX86 MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_MacroAssembler_x86_h */
diff --git a/js/src/jit/x86/SharedICHelpers-x86-inl.h b/js/src/jit/x86/SharedICHelpers-x86-inl.h
new file mode 100644
index 0000000000..2ab41c287a
--- /dev/null
+++ b/js/src/jit/x86/SharedICHelpers-x86-inl.h
@@ -0,0 +1,77 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_SharedICHelpers_x86_inl_h
+#define jit_x86_SharedICHelpers_x86_inl_h
+
+#include "jit/BaselineFrame.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+namespace js {
+namespace jit {
+
+inline void EmitBaselineTailCallVM(TrampolinePtr target, MacroAssembler& masm,
+ uint32_t argSize) {
+#ifdef DEBUG
+ // We assume during this that R0 and R1 have been pushed.
+ // Store frame size without VMFunction arguments for debug assertions.
+ masm.movl(FramePointer, eax);
+ masm.subl(StackPointer, eax);
+ masm.subl(Imm32(argSize), eax);
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(eax, frameSizeAddr);
+#endif
+
+ // Push frame descriptor and perform the tail call.
+ masm.pushFrameDescriptor(FrameType::BaselineJS);
+ masm.push(ICTailCallReg);
+ masm.jump(target);
+}
+
+inline void EmitBaselineCallVM(TrampolinePtr target, MacroAssembler& masm) {
+ masm.pushFrameDescriptor(FrameType::BaselineStub);
+ masm.call(target);
+}
+
+inline void EmitBaselineEnterStubFrame(MacroAssembler& masm, Register scratch) {
+#ifdef DEBUG
+ // Compute frame size. Because the return address is still on the stack,
+ // this is:
+ //
+ // FramePointer
+ // - StackPointer
+ // - sizeof(return address)
+
+ masm.movl(FramePointer, scratch);
+ masm.subl(StackPointer, scratch);
+ masm.subl(Imm32(sizeof(void*)), scratch); // Return address.
+
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(scratch, frameSizeAddr);
+#endif
+
+ // Push the return address that's currently on top of the stack.
+ masm.Push(Operand(StackPointer, 0));
+
+ // Replace the original return address with the frame descriptor.
+ masm.storePtr(ImmWord(MakeFrameDescriptor(FrameType::BaselineJS)),
+ Address(StackPointer, sizeof(uintptr_t)));
+
+ // Save old frame pointer, stack pointer and stub reg.
+ masm.Push(FramePointer);
+ masm.mov(StackPointer, FramePointer);
+
+ masm.Push(ICStubReg);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_SharedICHelpers_x86_inl_h */
diff --git a/js/src/jit/x86/SharedICHelpers-x86.h b/js/src/jit/x86/SharedICHelpers-x86.h
new file mode 100644
index 0000000000..07d2ae8a6f
--- /dev/null
+++ b/js/src/jit/x86/SharedICHelpers-x86.h
@@ -0,0 +1,70 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_SharedICHelpers_x86_h
+#define jit_x86_SharedICHelpers_x86_h
+
+#include "jit/BaselineIC.h"
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICRegisters.h"
+
+namespace js {
+namespace jit {
+
+// Distance from stack top to the top Value inside an IC stub (this is the
+// return address).
+static const size_t ICStackValueOffset = sizeof(void*);
+
+inline void EmitRestoreTailCallReg(MacroAssembler& masm) {
+ masm.Pop(ICTailCallReg);
+}
+
+inline void EmitRepushTailCallReg(MacroAssembler& masm) {
+ masm.Push(ICTailCallReg);
+}
+
+inline void EmitCallIC(MacroAssembler& masm, CodeOffset* callOffset) {
+ // The stub pointer must already be in ICStubReg.
+ // Call the stubcode.
+ masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
+ *callOffset = CodeOffset(masm.currentOffset());
+}
+
+inline void EmitReturnFromIC(MacroAssembler& masm) { masm.ret(); }
+
+inline void EmitBaselineLeaveStubFrame(MacroAssembler& masm) {
+ Address stubAddr(FramePointer, BaselineStubFrameLayout::ICStubOffsetFromFP);
+ masm.loadPtr(stubAddr, ICStubReg);
+
+ masm.mov(FramePointer, StackPointer);
+ masm.Pop(FramePointer);
+
+ // The return address is on top of the stack, followed by the frame
+ // descriptor. Use a pop instruction to overwrite the frame descriptor
+ // with the return address. Note that pop increments the stack pointer
+ // before computing the address.
+ masm.Pop(Operand(StackPointer, 0));
+}
+
+template <typename AddrType>
+inline void EmitPreBarrier(MacroAssembler& masm, const AddrType& addr,
+ MIRType type) {
+ masm.guardedCallPreBarrier(addr, type);
+}
+
+inline void EmitStubGuardFailure(MacroAssembler& masm) {
+ // Load next stub into ICStubReg
+ masm.loadPtr(Address(ICStubReg, ICCacheIRStub::offsetOfNext()), ICStubReg);
+
+ // Return address is already loaded, just jump to the next stubcode.
+ masm.jmp(Operand(ICStubReg, ICStub::offsetOfStubCode()));
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_SharedICHelpers_x86_h */
diff --git a/js/src/jit/x86/SharedICRegisters-x86.h b/js/src/jit/x86/SharedICRegisters-x86.h
new file mode 100644
index 0000000000..44edb5288f
--- /dev/null
+++ b/js/src/jit/x86/SharedICRegisters-x86.h
@@ -0,0 +1,36 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_x86_SharedICRegisters_x86_h
+#define jit_x86_SharedICRegisters_x86_h
+
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+#include "jit/x86/Assembler-x86.h"
+
+namespace js {
+namespace jit {
+
+// ValueOperands R0, R1, and R2
+static constexpr ValueOperand R0(ecx, edx);
+static constexpr ValueOperand R1(eax, ebx);
+static constexpr ValueOperand R2(esi, edi);
+
+// ICTailCallReg and ICStubReg reuse
+// registers from R2.
+static constexpr Register ICTailCallReg = esi;
+static constexpr Register ICStubReg = edi;
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static constexpr FloatRegister FloatReg0 = xmm0;
+static constexpr FloatRegister FloatReg1 = xmm1;
+static constexpr FloatRegister FloatReg2 = xmm2;
+static constexpr FloatRegister FloatReg3 = xmm3;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_x86_SharedICRegisters_x86_h */
diff --git a/js/src/jit/x86/Trampoline-x86.cpp b/js/src/jit/x86/Trampoline-x86.cpp
new file mode 100644
index 0000000000..c06ae29ac9
--- /dev/null
+++ b/js/src/jit/x86/Trampoline-x86.cpp
@@ -0,0 +1,735 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/BaselineJIT.h"
+#include "jit/CalleeToken.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitSpewer.h"
+#include "jit/PerfSpewer.h"
+#include "jit/VMFunctions.h"
+#include "jit/x86/SharedICHelpers-x86.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+#ifdef MOZ_VTUNE
+# include "vtune/VTuneWrapper.h"
+#endif
+
+#include "jit/MacroAssembler-inl.h"
+#include "vm/JSScript-inl.h"
+
+using mozilla::IsPowerOfTwo;
+
+using namespace js;
+using namespace js::jit;
+
+// All registers to save and restore. This includes the stack pointer, since we
+// use the ability to reference register values on the stack by index.
+static const LiveRegisterSet AllRegs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::AllMask),
+ FloatRegisterSet(FloatRegisters::AllMask));
+
+enum EnterJitEbpArgumentOffset {
+ ARG_JITCODE = 2 * sizeof(void*),
+ ARG_ARGC = 3 * sizeof(void*),
+ ARG_ARGV = 4 * sizeof(void*),
+ ARG_STACKFRAME = 5 * sizeof(void*),
+ ARG_CALLEETOKEN = 6 * sizeof(void*),
+ ARG_SCOPECHAIN = 7 * sizeof(void*),
+ ARG_STACKVALUES = 8 * sizeof(void*),
+ ARG_RESULT = 9 * sizeof(void*)
+};
+
+// Generates a trampoline for calling Jit compiled code from a C++ function.
+// The trampoline use the EnterJitCode signature, with the standard cdecl
+// calling convention.
+void JitRuntime::generateEnterJIT(JSContext* cx, MacroAssembler& masm) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateEnterJIT");
+
+ enterJITOffset_ = startTrampolineCode(masm);
+
+ masm.assertStackAlignment(ABIStackAlignment,
+ -int32_t(sizeof(uintptr_t)) /* return address */);
+
+ // Save old stack frame pointer, set new stack frame pointer.
+ masm.push(ebp);
+ masm.movl(esp, ebp);
+
+ // Save non-volatile registers. These must be saved by the trampoline,
+ // rather than the JIT'd code, because they are scanned by the conservative
+ // scanner.
+ masm.push(ebx);
+ masm.push(esi);
+ masm.push(edi);
+
+ // Load the number of values to be copied (argc) into eax
+ masm.loadPtr(Address(ebp, ARG_ARGC), eax);
+
+ // If we are constructing, that also needs to include newTarget
+ {
+ Label noNewTarget;
+ masm.loadPtr(Address(ebp, ARG_CALLEETOKEN), edx);
+ masm.branchTest32(Assembler::Zero, edx,
+ Imm32(CalleeToken_FunctionConstructing), &noNewTarget);
+
+ masm.addl(Imm32(1), eax);
+
+ masm.bind(&noNewTarget);
+ }
+
+ // eax <- 8*numValues, eax is now the offset betwen argv and the last value.
+ masm.shll(Imm32(3), eax);
+
+ // Guarantee stack alignment of Jit frames.
+ //
+ // This code compensates for the offset created by the copy of the vector of
+ // arguments, such that the jit frame will be aligned once the return
+ // address is pushed on the stack.
+ //
+ // In the computation of the offset, we omit the size of the JitFrameLayout
+ // which is pushed on the stack, as the JitFrameLayout size is a multiple of
+ // the JitStackAlignment.
+ masm.movl(esp, ecx);
+ masm.subl(eax, ecx);
+ static_assert(
+ sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+
+ // ecx = ecx & 15, holds alignment.
+ masm.andl(Imm32(JitStackAlignment - 1), ecx);
+ masm.subl(ecx, esp);
+
+ /***************************************************************
+ Loop over argv vector, push arguments onto stack in reverse order
+ ***************************************************************/
+
+ // ebx = argv --argv pointer is in ebp + 16
+ masm.loadPtr(Address(ebp, ARG_ARGV), ebx);
+
+ // eax = argv[8(argc)] --eax now points one value past the last argument
+ masm.addl(ebx, eax);
+
+ // while (eax > ebx) --while still looping through arguments
+ {
+ Label header, footer;
+ masm.bind(&header);
+
+ masm.cmp32(eax, ebx);
+ masm.j(Assembler::BelowOrEqual, &footer);
+
+ // eax -= 8 --move to previous argument
+ masm.subl(Imm32(8), eax);
+
+ // Push what eax points to on stack, a Value is 2 words
+ masm.push(Operand(eax, 4));
+ masm.push(Operand(eax, 0));
+
+ masm.jmp(&header);
+ masm.bind(&footer);
+ }
+
+ // Load the number of actual arguments. |result| is used to store the
+ // actual number of arguments without adding an extra argument to the enter
+ // JIT.
+ masm.mov(Operand(ebp, ARG_RESULT), eax);
+ masm.unboxInt32(Address(eax, 0x0), eax);
+
+ // Push the callee token.
+ masm.push(Operand(ebp, ARG_CALLEETOKEN));
+
+ // Load the InterpreterFrame address into the OsrFrameReg.
+ // This address is also used for setting the constructing bit on all paths.
+ masm.loadPtr(Address(ebp, ARG_STACKFRAME), OsrFrameReg);
+
+ // Push the descriptor.
+ masm.pushFrameDescriptorForJitCall(FrameType::CppToJSJit, eax, eax);
+
+ CodeLabel returnLabel;
+ Label oomReturnLabel;
+ {
+ // Handle Interpreter -> Baseline OSR.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ MOZ_ASSERT(!regs.has(ebp));
+ regs.take(OsrFrameReg);
+ regs.take(ReturnReg);
+
+ Register scratch = regs.takeAny();
+
+ Label notOsr;
+ masm.branchTestPtr(Assembler::Zero, OsrFrameReg, OsrFrameReg, &notOsr);
+
+ Register numStackValues = regs.takeAny();
+ masm.loadPtr(Address(ebp, ARG_STACKVALUES), numStackValues);
+
+ Register jitcode = regs.takeAny();
+ masm.loadPtr(Address(ebp, ARG_JITCODE), jitcode);
+
+ // Push return address.
+ masm.mov(&returnLabel, scratch);
+ masm.push(scratch);
+
+ // Frame prologue.
+ masm.push(ebp);
+ masm.mov(esp, ebp);
+
+ // Reserve frame.
+ masm.subPtr(Imm32(BaselineFrame::Size()), esp);
+
+ Register framePtrScratch = regs.takeAny();
+ masm.touchFrameValues(numStackValues, scratch, framePtrScratch);
+ masm.mov(esp, framePtrScratch);
+
+ // Reserve space for locals and stack values.
+ masm.mov(numStackValues, scratch);
+ masm.shll(Imm32(3), scratch);
+ masm.subPtr(scratch, esp);
+
+ // Enter exit frame.
+ masm.pushFrameDescriptor(FrameType::BaselineJS);
+ masm.push(Imm32(0)); // Fake return address.
+ masm.push(FramePointer);
+ // No GC things to mark on the stack, push a bare token.
+ masm.loadJSContext(scratch);
+ masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
+
+ masm.push(jitcode);
+
+ using Fn = bool (*)(BaselineFrame* frame, InterpreterFrame* interpFrame,
+ uint32_t numStackValues);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(framePtrScratch); // BaselineFrame
+ masm.passABIArg(OsrFrameReg); // InterpreterFrame
+ masm.passABIArg(numStackValues);
+ masm.callWithABI<Fn, jit::InitBaselineFrameForOsr>(
+ ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ masm.pop(jitcode);
+
+ MOZ_ASSERT(jitcode != ReturnReg);
+
+ Label error;
+ masm.addPtr(Imm32(ExitFrameLayout::SizeWithFooter()), esp);
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
+ // if profiler instrumentation is enabled.
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(
+ cx->runtime()->geckoProfiler().addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ masm.profilerEnterFrame(ebp, scratch);
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.jump(jitcode);
+
+ // OOM: frame epilogue, load error value, discard return address and return.
+ masm.bind(&error);
+ masm.mov(ebp, esp);
+ masm.pop(ebp);
+ masm.addPtr(Imm32(sizeof(uintptr_t)), esp); // Return address.
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.jump(&oomReturnLabel);
+
+ masm.bind(&notOsr);
+ masm.loadPtr(Address(ebp, ARG_SCOPECHAIN), R1.scratchReg());
+ }
+
+ // The call will push the return address and frame pointer on the stack, thus
+ // we check that the stack would be aligned once the call is complete.
+ masm.assertStackAlignment(JitStackAlignment, 2 * sizeof(uintptr_t));
+
+ /***************************************************************
+ Call passed-in code, get return value and fill in the
+ passed in return value pointer
+ ***************************************************************/
+ masm.call(Address(ebp, ARG_JITCODE));
+
+ {
+ // Interpreter -> Baseline OSR will return here.
+ masm.bind(&returnLabel);
+ masm.addCodeLabel(returnLabel);
+ masm.bind(&oomReturnLabel);
+ }
+
+ // Restore the stack pointer so the stack looks like this:
+ // +20 ... arguments ...
+ // +16 <return>
+ // +12 ebp <- %ebp pointing here.
+ // +8 ebx
+ // +4 esi
+ // +0 edi <- %esp pointing here.
+ masm.lea(Operand(ebp, -int32_t(3 * sizeof(void*))), esp);
+
+ // Store the return value.
+ masm.loadPtr(Address(ebp, ARG_RESULT), eax);
+ masm.storeValue(JSReturnOperand, Operand(eax, 0));
+
+ /**************************************************************
+ Return stack and registers to correct state
+ **************************************************************/
+
+ // Restore non-volatile registers
+ masm.pop(edi);
+ masm.pop(esi);
+ masm.pop(ebx);
+
+ // Restore old stack frame pointer
+ masm.pop(ebp);
+ masm.ret();
+}
+
+// static
+mozilla::Maybe<::JS::ProfilingFrameIterator::RegisterState>
+JitRuntime::getCppEntryRegisters(JitFrameLayout* frameStackAddress) {
+ // Not supported, or not implemented yet.
+ // TODO: Implement along with the corresponding stack-walker changes, in
+ // coordination with the Gecko Profiler, see bug 1635987 and follow-ups.
+ return mozilla::Nothing{};
+}
+
+// Push AllRegs in a way that is compatible with RegisterDump, regardless of
+// what PushRegsInMask might do to reduce the set size.
+static void DumpAllRegs(MacroAssembler& masm) {
+#ifdef ENABLE_WASM_SIMD
+ masm.PushRegsInMask(AllRegs);
+#else
+ // When SIMD isn't supported, PushRegsInMask reduces the set of float
+ // registers to be double-sized, while the RegisterDump expects each of
+ // the float registers to have the maximal possible size
+ // (Simd128DataSize). To work around this, we just spill the double
+ // registers by hand here, using the register dump offset directly.
+ for (GeneralRegisterBackwardIterator iter(AllRegs.gprs()); iter.more();
+ ++iter) {
+ masm.Push(*iter);
+ }
+
+ masm.reserveStack(sizeof(RegisterDump::FPUArray));
+ for (FloatRegisterBackwardIterator iter(AllRegs.fpus()); iter.more();
+ ++iter) {
+ FloatRegister reg = *iter;
+ Address spillAddress(StackPointer, reg.getRegisterDumpOffsetInBytes());
+ masm.storeDouble(reg, spillAddress);
+ }
+#endif
+}
+
+void JitRuntime::generateInvalidator(MacroAssembler& masm, Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateInvalidator");
+
+ invalidatorOffset_ = startTrampolineCode(masm);
+
+ // We do the minimum amount of work in assembly and shunt the rest
+ // off to InvalidationBailout. Assembly does:
+ //
+ // - Push the machine state onto the stack.
+ // - Call the InvalidationBailout routine with the stack pointer.
+ // - Now that the frame has been bailed out, convert the invalidated
+ // frame into an exit frame.
+ // - Do the normal check-return-code-and-thunk-to-the-interpreter dance.
+
+ // Push registers such that we can access them from [base + code].
+ DumpAllRegs(masm);
+
+ masm.movl(esp, eax); // Argument to jit::InvalidationBailout.
+
+ // Make space for InvalidationBailout's bailoutInfo outparam.
+ masm.reserveStack(sizeof(void*));
+ masm.movl(esp, ebx);
+
+ using Fn = bool (*)(InvalidationBailoutStack* sp, BaselineBailoutInfo** info);
+ masm.setupUnalignedABICall(edx);
+ masm.passABIArg(eax);
+ masm.passABIArg(ebx);
+ masm.callWithABI<Fn, InvalidationBailout>(
+ ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
+
+ masm.pop(ecx); // Get bailoutInfo outparam.
+
+ // Pop the machine state and the dead frame.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in ecx.
+ masm.jmp(bailoutTail);
+}
+
+void JitRuntime::generateArgumentsRectifier(MacroAssembler& masm,
+ ArgumentsRectifierKind kind) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateArgumentsRectifier");
+
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ argumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ trialInliningArgumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ }
+
+ // Caller:
+ // [arg2] [arg1] [this] [ [argc] [callee] [descr] [raddr] ] <- esp
+
+ // Frame prologue.
+ //
+ // NOTE: if this changes, fix the Baseline bailout code too!
+ // See BaselineStackBuilder::calculatePrevFramePtr and
+ // BaselineStackBuilder::buildRectifierFrame (in BaselineBailouts.cpp).
+ masm.push(FramePointer);
+ masm.movl(esp, FramePointer); // Save %esp.
+
+ // Load argc.
+ masm.loadNumActualArgs(FramePointer, esi);
+
+ // Load the number of |undefined|s to push into %ecx.
+ masm.loadPtr(Address(ebp, RectifierFrameLayout::offsetOfCalleeToken()), eax);
+ masm.mov(eax, ecx);
+ masm.andl(Imm32(CalleeTokenMask), ecx);
+ masm.loadFunctionArgCount(ecx, ecx);
+
+ // The frame pointer and its padding are pushed on the stack.
+ // Including |this|, there are (|nformals| + 1) arguments to push to the
+ // stack. Then we push a JitFrameLayout. We compute the padding expressed
+ // in the number of extra |undefined| values to push on the stack.
+ static_assert(
+ sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+ static_assert(
+ JitStackAlignment % sizeof(Value) == 0,
+ "Ensure that we can pad the stack by pushing extra UndefinedValue");
+ static_assert(IsPowerOfTwo(JitStackValueAlignment),
+ "must have power of two for masm.andl to do its job");
+
+ masm.addl(
+ Imm32(JitStackValueAlignment - 1 /* for padding */ + 1 /* for |this| */),
+ ecx);
+
+ // Account for newTarget, if necessary.
+ static_assert(
+ CalleeToken_FunctionConstructing == 1,
+ "Ensure that we can use the constructing bit to count an extra push");
+ masm.mov(eax, edx);
+ masm.andl(Imm32(CalleeToken_FunctionConstructing), edx);
+ masm.addl(edx, ecx);
+
+ masm.andl(Imm32(~(JitStackValueAlignment - 1)), ecx);
+ masm.subl(esi, ecx);
+ masm.subl(Imm32(1), ecx); // For |this|.
+
+ // Copy the number of actual arguments into edx.
+ masm.mov(esi, edx);
+
+ masm.moveValue(UndefinedValue(), ValueOperand(ebx, edi));
+
+ // Caller:
+ // [arg2] [arg1] [this] [ [argc] [callee] [descr] [raddr] ]
+ // '-- #esi ---'
+ //
+ // Rectifier frame:
+ // [ebp'] <- ebp [padding] <- esp [undef] [undef] [arg2] [arg1] [this]
+ // '--- #ecx ----' '-- #esi ---'
+ //
+ // [ [argc] [callee] [descr] [raddr] ]
+
+ // Push undefined.
+ {
+ Label undefLoopTop;
+ masm.bind(&undefLoopTop);
+
+ masm.push(ebx); // type(undefined);
+ masm.push(edi); // payload(undefined);
+ masm.subl(Imm32(1), ecx);
+ masm.j(Assembler::NonZero, &undefLoopTop);
+ }
+
+ // Get the topmost argument.
+ BaseIndex b(FramePointer, esi, TimesEight, sizeof(RectifierFrameLayout));
+ masm.lea(Operand(b), ecx);
+
+ // Push arguments, |nargs| + 1 times (to include |this|).
+ masm.addl(Imm32(1), esi);
+ {
+ Label copyLoopTop;
+
+ masm.bind(&copyLoopTop);
+ masm.push(Operand(ecx, sizeof(Value) / 2));
+ masm.push(Operand(ecx, 0x0));
+ masm.subl(Imm32(sizeof(Value)), ecx);
+ masm.subl(Imm32(1), esi);
+ masm.j(Assembler::NonZero, &copyLoopTop);
+ }
+
+ {
+ Label notConstructing;
+
+ masm.mov(eax, ebx);
+ masm.branchTest32(Assembler::Zero, ebx,
+ Imm32(CalleeToken_FunctionConstructing),
+ &notConstructing);
+
+ BaseValueIndex src(FramePointer, edx,
+ sizeof(RectifierFrameLayout) + sizeof(Value));
+
+ masm.andl(Imm32(CalleeTokenMask), ebx);
+ masm.loadFunctionArgCount(ebx, ebx);
+
+ BaseValueIndex dst(esp, ebx, sizeof(Value));
+
+ ValueOperand newTarget(ecx, edi);
+
+ masm.loadValue(src, newTarget);
+ masm.storeValue(newTarget, dst);
+
+ masm.bind(&notConstructing);
+ }
+
+ // Construct JitFrameLayout.
+ masm.push(eax); // callee token
+ masm.pushFrameDescriptorForJitCall(FrameType::Rectifier, edx, edx);
+
+ // Call the target function.
+ masm.andl(Imm32(CalleeTokenMask), eax);
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ masm.loadJitCodeRaw(eax, eax);
+ argumentsRectifierReturnOffset_ = masm.callJitNoProfiler(eax);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ Label noBaselineScript, done;
+ masm.loadBaselineJitCodeRaw(eax, ebx, &noBaselineScript);
+ masm.callJitNoProfiler(ebx);
+ masm.jump(&done);
+
+ // See BaselineCacheIRCompiler::emitCallInlinedFunction.
+ masm.bind(&noBaselineScript);
+ masm.loadJitCodeRaw(eax, eax);
+ masm.callJitNoProfiler(eax);
+ masm.bind(&done);
+ break;
+ }
+
+ masm.mov(FramePointer, StackPointer);
+ masm.pop(FramePointer);
+ masm.ret();
+}
+
+static void PushBailoutFrame(MacroAssembler& masm, Register spArg) {
+ // Push registers such that we can access them from [base + code].
+ DumpAllRegs(masm);
+
+ // The current stack pointer is the first argument to jit::Bailout.
+ masm.movl(esp, spArg);
+}
+
+static void GenerateBailoutThunk(MacroAssembler& masm, Label* bailoutTail) {
+ PushBailoutFrame(masm, eax);
+
+ // Make space for Bailout's bailoutInfo outparam.
+ masm.reserveStack(sizeof(void*));
+ masm.movl(esp, ebx);
+
+ // Call the bailout function.
+ using Fn = bool (*)(BailoutStack* sp, BaselineBailoutInfo** info);
+ masm.setupUnalignedABICall(ecx);
+ masm.passABIArg(eax);
+ masm.passABIArg(ebx);
+ masm.callWithABI<Fn, Bailout>(ABIType::General,
+ CheckUnsafeCallWithABI::DontCheckOther);
+
+ masm.pop(ecx); // Get the bailoutInfo outparam.
+
+ // Remove both the bailout frame and the topmost Ion frame's stack.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in ecx.
+ masm.jmp(bailoutTail);
+}
+
+void JitRuntime::generateBailoutHandler(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutHandler");
+
+ bailoutHandlerOffset_ = startTrampolineCode(masm);
+
+ GenerateBailoutThunk(masm, bailoutTail);
+}
+
+bool JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm,
+ VMFunctionId id, const VMFunctionData& f,
+ DynFn nativeFun, uint32_t* wrapperOffset) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateVMWrapper");
+
+ *wrapperOffset = startTrampolineCode(masm);
+
+ // Avoid conflicts with argument registers while discarding the result after
+ // the function call.
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ static_assert(
+ (Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0,
+ "Wrapper register set must be a superset of Volatile register set.");
+
+ // The context is the first argument.
+ Register cxreg = regs.takeAny();
+
+ // Stack is:
+ // ... frame ...
+ // +8 [args]
+ // +4 descriptor
+ // +0 returnAddress
+ //
+ // Push the frame pointer to finish the exit frame, then link it up.
+ masm.Push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+ masm.loadJSContext(cxreg);
+ masm.enterExitFrame(cxreg, regs.getAny(), id);
+
+ // Reserve space for the outparameter.
+ masm.reserveVMFunctionOutParamSpace(f);
+
+ masm.setupUnalignedABICallDontSaveRestoreSP();
+ masm.passABIArg(cxreg);
+
+ size_t argDisp = ExitFrameLayout::Size();
+
+ // Copy arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ switch (f.argProperties(explicitArg)) {
+ case VMFunctionData::WordByValue:
+ masm.passABIArg(MoveOperand(FramePointer, argDisp), ABIType::General);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::DoubleByValue:
+ // We don't pass doubles in float registers on x86, so no need
+ // to check for argPassedInFloatReg.
+ masm.passABIArg(MoveOperand(FramePointer, argDisp), ABIType::General);
+ argDisp += sizeof(void*);
+ masm.passABIArg(MoveOperand(FramePointer, argDisp), ABIType::General);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::WordByRef:
+ masm.passABIArg(MoveOperand(FramePointer, argDisp,
+ MoveOperand::Kind::EffectiveAddress),
+ ABIType::General);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::DoubleByRef:
+ masm.passABIArg(MoveOperand(FramePointer, argDisp,
+ MoveOperand::Kind::EffectiveAddress),
+ ABIType::General);
+ argDisp += 2 * sizeof(void*);
+ break;
+ }
+ }
+
+ // Copy the implicit outparam, if any.
+ const int32_t outParamOffset =
+ -int32_t(ExitFooterFrame::Size()) - f.sizeOfOutParamStackSlot();
+ if (f.outParam != Type_Void) {
+ masm.passABIArg(MoveOperand(FramePointer, outParamOffset,
+ MoveOperand::Kind::EffectiveAddress),
+ ABIType::General);
+ }
+
+ masm.callWithABI(nativeFun, ABIType::General,
+ CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Cell:
+ masm.branchTestPtr(Assembler::Zero, eax, eax, masm.failureLabel());
+ break;
+ case Type_Bool:
+ masm.testb(eax, eax);
+ masm.j(Assembler::Zero, masm.failureLabel());
+ break;
+ case Type_Void:
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ // Load the outparam.
+ masm.loadVMFunctionOutParam(f, Address(FramePointer, outParamOffset));
+
+ // Until C++ code is instrumented against Spectre, prevent speculative
+ // execution from returning any private data.
+ if (f.returnsData() && JitOptions.spectreJitToCxxCalls) {
+ masm.speculationBarrier();
+ }
+
+ // Pop frame and restore frame pointer.
+ masm.moveToStackPtr(FramePointer);
+ masm.pop(FramePointer);
+
+ // Return. Subtract sizeof(void*) for the frame pointer.
+ masm.retn(Imm32(sizeof(ExitFrameLayout) - sizeof(void*) +
+ f.explicitStackSlots() * sizeof(void*) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ return true;
+}
+
+uint32_t JitRuntime::generatePreBarrier(JSContext* cx, MacroAssembler& masm,
+ MIRType type) {
+ AutoCreatedBy acb(masm, "JitRuntime::generatePreBarrier");
+
+ uint32_t offset = startTrampolineCode(masm);
+
+ static_assert(PreBarrierReg == edx);
+ Register temp1 = eax;
+ Register temp2 = ebx;
+ Register temp3 = ecx;
+ masm.push(temp1);
+ masm.push(temp2);
+ masm.push(temp3);
+
+ Label noBarrier;
+ masm.emitPreBarrierFastPath(cx->runtime(), type, temp1, temp2, temp3,
+ &noBarrier);
+
+ // Call into C++ to mark this GC thing.
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+
+ LiveRegisterSet save;
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileMask));
+ masm.PushRegsInMask(save);
+
+ masm.movl(ImmPtr(cx->runtime()), ecx);
+
+ masm.setupUnalignedABICall(eax);
+ masm.passABIArg(ecx);
+ masm.passABIArg(edx);
+ masm.callWithABI(JitPreWriteBarrier(type));
+
+ masm.PopRegsInMask(save);
+ masm.ret();
+
+ masm.bind(&noBarrier);
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+ masm.ret();
+
+ return offset;
+}
+
+void JitRuntime::generateBailoutTailStub(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutTailStub");
+
+ masm.bind(bailoutTail);
+ masm.generateBailoutTail(edx, ecx);
+}