summaryrefslogtreecommitdiffstats
path: root/js/src/jit/mips64
diff options
context:
space:
mode:
Diffstat (limited to 'js/src/jit/mips64')
-rw-r--r--js/src/jit/mips64/Architecture-mips64.cpp88
-rw-r--r--js/src/jit/mips64/Architecture-mips64.h233
-rw-r--r--js/src/jit/mips64/Assembler-mips64.cpp371
-rw-r--r--js/src/jit/mips64/Assembler-mips64.h288
-rw-r--r--js/src/jit/mips64/CodeGenerator-mips64.cpp586
-rw-r--r--js/src/jit/mips64/CodeGenerator-mips64.h65
-rw-r--r--js/src/jit/mips64/LIR-mips64.h147
-rw-r--r--js/src/jit/mips64/Lowering-mips64.cpp201
-rw-r--r--js/src/jit/mips64/Lowering-mips64.h56
-rw-r--r--js/src/jit/mips64/MacroAssembler-mips64-inl.h845
-rw-r--r--js/src/jit/mips64/MacroAssembler-mips64.cpp2852
-rw-r--r--js/src/jit/mips64/MacroAssembler-mips64.h841
-rw-r--r--js/src/jit/mips64/MoveEmitter-mips64.cpp149
-rw-r--r--js/src/jit/mips64/MoveEmitter-mips64.h31
-rw-r--r--js/src/jit/mips64/SharedICRegisters-mips64.h45
-rw-r--r--js/src/jit/mips64/Simulator-mips64.cpp4402
-rw-r--r--js/src/jit/mips64/Simulator-mips64.h536
-rw-r--r--js/src/jit/mips64/Trampoline-mips64.cpp870
18 files changed, 12606 insertions, 0 deletions
diff --git a/js/src/jit/mips64/Architecture-mips64.cpp b/js/src/jit/mips64/Architecture-mips64.cpp
new file mode 100644
index 0000000000..54ae127954
--- /dev/null
+++ b/js/src/jit/mips64/Architecture-mips64.cpp
@@ -0,0 +1,88 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips64/Architecture-mips64.h"
+
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+const char* const Registers::RegNames[] = {
+ "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3", "a4", "a5", "a6",
+ "a7", "t0", "t1", "t2", "t3", "s0", "s1", "s2", "s3", "s4", "s5",
+ "s6", "s7", "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"};
+
+const uint32_t Allocatable = 22;
+
+const Registers::SetType Registers::ArgRegMask =
+ Registers::SharedArgRegMask | (1 << a4) | (1 << a5) | (1 << a6) | (1 << a7);
+
+const Registers::SetType Registers::JSCallMask = (1 << Registers::v1);
+
+const Registers::SetType Registers::CallMask = (1 << Registers::v0);
+
+FloatRegisters::Encoding FloatRegisters::FromName(const char* name) {
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(Encoding(i)), name) == 0) {
+ return Encoding(i);
+ }
+ }
+
+ return Invalid;
+}
+
+FloatRegister FloatRegister::singleOverlay() const {
+ MOZ_ASSERT(!isInvalid());
+ if (kind_ == Codes::Double) {
+ return FloatRegister(reg_, Codes::Single);
+ }
+ return *this;
+}
+
+FloatRegister FloatRegister::doubleOverlay() const {
+ MOZ_ASSERT(!isInvalid());
+ if (kind_ != Codes::Double) {
+ return FloatRegister(reg_, Codes::Double);
+ }
+ return *this;
+}
+
+FloatRegisterSet FloatRegister::ReduceSetForPush(const FloatRegisterSet& s) {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ LiveFloatRegisterSet mod;
+ for (FloatRegisterIterator iter(s); iter.more(); ++iter) {
+ if ((*iter).isSingle()) {
+ // Even for single size registers save complete double register.
+ mod.addUnchecked((*iter).doubleOverlay());
+ } else {
+ mod.addUnchecked(*iter);
+ }
+ }
+ return mod.set();
+}
+
+uint32_t FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s) {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ FloatRegisterSet ss = s.reduceSetForPush();
+ uint64_t bits = ss.bits();
+ // We are only pushing double registers.
+ MOZ_ASSERT((bits & 0xffffffff) == 0);
+ uint32_t ret = mozilla::CountPopulation32(bits >> 32) * sizeof(double);
+ return ret;
+}
+uint32_t FloatRegister::getRegisterDumpOffsetInBytes() {
+ return id() * sizeof(double);
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/mips64/Architecture-mips64.h b/js/src/jit/mips64/Architecture-mips64.h
new file mode 100644
index 0000000000..d3db37ea2c
--- /dev/null
+++ b/js/src/jit/mips64/Architecture-mips64.h
@@ -0,0 +1,233 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_Architecture_mips64_h
+#define jit_mips64_Architecture_mips64_h
+
+#include "mozilla/MathAlgorithms.h"
+
+#include <limits.h>
+#include <stdint.h>
+
+#include "jit/mips-shared/Architecture-mips-shared.h"
+
+#include "js/Utility.h"
+
+namespace js {
+namespace jit {
+
+// Shadow stack space is not required on MIPS64.
+static const uint32_t ShadowStackSpace = 0;
+
+// MIPS64 have 64 bit floating-point coprocessor. There are 32 double
+// precision register which can also be used as single precision registers.
+class FloatRegisters : public FloatRegistersMIPSShared {
+ public:
+ enum ContentType { Single, Double, NumTypes };
+
+ static const char* GetName(uint32_t i) {
+ MOZ_ASSERT(i < TotalPhys);
+ return FloatRegistersMIPSShared::GetName(Encoding(i));
+ }
+
+ static Encoding FromName(const char* name);
+
+ static const uint32_t Total = 32 * NumTypes;
+#ifdef MIPSR6
+ static const uint32_t Allocatable = 60;
+#else
+ static const uint32_t Allocatable = 62;
+#endif
+ // When saving all registers we only need to do is save double registers.
+ static const uint32_t TotalPhys = 32;
+
+ static_assert(sizeof(SetType) * 8 >= Total,
+ "SetType should be large enough to enumerate all registers.");
+
+ // Magic values which are used to duplicate a mask of physical register for
+ // a specific type of register. A multiplication is used to copy and shift
+ // the bits of the physical register mask.
+ static const SetType SpreadSingle = SetType(1)
+ << (uint32_t(Single) * TotalPhys);
+ static const SetType SpreadDouble = SetType(1)
+ << (uint32_t(Double) * TotalPhys);
+ static const SetType SpreadScalar = SpreadSingle | SpreadDouble;
+ static const SetType SpreadVector = 0;
+ static const SetType Spread = SpreadScalar | SpreadVector;
+
+ static const SetType AllPhysMask = ((SetType(1) << TotalPhys) - 1);
+ static const SetType AllMask = AllPhysMask * Spread;
+ static const SetType AllSingleMask = AllPhysMask * SpreadSingle;
+ static const SetType AllDoubleMask = AllPhysMask * SpreadDouble;
+
+ static const SetType NonVolatileMask =
+ ((1U << FloatRegisters::f24) | (1U << FloatRegisters::f25) |
+ (1U << FloatRegisters::f26) | (1U << FloatRegisters::f27) |
+ (1U << FloatRegisters::f28) | (1U << FloatRegisters::f29) |
+ (1U << FloatRegisters::f30) | (1U << FloatRegisters::f31)) *
+ SpreadScalar |
+ AllPhysMask * SpreadVector;
+
+ static const SetType VolatileMask = AllMask & ~NonVolatileMask;
+
+ static const SetType WrapperMask = VolatileMask;
+
+#ifdef MIPSR6
+ static const SetType NonAllocatableMask =
+ ((1U << FloatRegisters::f23) | (1U << FloatRegisters::f24)) * Spread;
+#else
+ static const SetType NonAllocatableMask =
+ (1U << FloatRegisters::f23) * Spread;
+#endif
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+};
+
+template <typename T>
+class TypedRegisterSet;
+
+class FloatRegister : public FloatRegisterMIPSShared {
+ public:
+ typedef FloatRegisters Codes;
+ typedef size_t Code;
+ typedef Codes::Encoding Encoding;
+ typedef Codes::ContentType ContentType;
+
+ Encoding reg_ : 6;
+
+ private:
+ ContentType kind_ : 3;
+
+ public:
+ constexpr FloatRegister(uint32_t r, ContentType kind = Codes::Double)
+ : reg_(Encoding(r)), kind_(kind) {}
+ constexpr FloatRegister()
+ : reg_(Encoding(FloatRegisters::invalid_freg)), kind_(Codes::Double) {}
+
+ static uint32_t SetSize(SetType x) {
+ // Count the number of non-aliased registers.
+ x |= x >> Codes::TotalPhys;
+ x &= Codes::AllPhysMask;
+ static_assert(Codes::AllPhysMask <= 0xffffffff,
+ "We can safely use CountPopulation32");
+ return mozilla::CountPopulation32(x);
+ }
+
+ bool operator==(const FloatRegister& other) const {
+ MOZ_ASSERT(!isInvalid());
+ MOZ_ASSERT(!other.isInvalid());
+ return kind_ == other.kind_ && reg_ == other.reg_;
+ }
+ bool equiv(const FloatRegister& other) const { return other.kind_ == kind_; }
+ size_t size() const {
+ return (kind_ == Codes::Double) ? sizeof(double) : sizeof(float);
+ }
+ // Always push doubles to maintain 8-byte stack alignment.
+ size_t pushSize() const { return sizeof(double); }
+ bool isInvalid() const { return reg_ == FloatRegisters::invalid_freg; }
+
+ bool isSingle() const { return kind_ == Codes::Single; }
+ bool isDouble() const { return kind_ == Codes::Double; }
+ bool isSimd128() const { return false; }
+
+ FloatRegister singleOverlay() const;
+ FloatRegister doubleOverlay() const;
+
+ FloatRegister asSingle() const { return singleOverlay(); }
+ FloatRegister asDouble() const { return doubleOverlay(); }
+ FloatRegister asSimd128() const { MOZ_CRASH("NYI"); }
+
+ Code code() const {
+ MOZ_ASSERT(!isInvalid());
+ return Code(reg_ | (kind_ << 5));
+ }
+ Encoding encoding() const {
+ MOZ_ASSERT(!isInvalid());
+ MOZ_ASSERT(uint32_t(reg_) < Codes::TotalPhys);
+ return reg_;
+ }
+ uint32_t id() const { return reg_; }
+ static FloatRegister FromCode(uint32_t i) {
+ uint32_t code = i & 0x1f;
+ uint32_t kind = i >> 5;
+ return FloatRegister(Code(code), ContentType(kind));
+ }
+
+ bool volatile_() const {
+ return !!((1 << reg_) & FloatRegisters::VolatileMask);
+ }
+ const char* name() const { return FloatRegisters::GetName(reg_); }
+ bool operator!=(const FloatRegister& other) const {
+ return kind_ != other.kind_ || reg_ != other.reg_;
+ }
+ bool aliases(const FloatRegister& other) { return reg_ == other.reg_; }
+ uint32_t numAliased() const { return 2; }
+ FloatRegister aliased(uint32_t aliasIdx) {
+ if (aliasIdx == 0) {
+ return *this;
+ }
+ MOZ_ASSERT(aliasIdx == 1);
+ if (isDouble()) {
+ return singleOverlay();
+ }
+ return doubleOverlay();
+ }
+ uint32_t numAlignedAliased() const { return 2; }
+ FloatRegister alignedAliased(uint32_t aliasIdx) {
+ MOZ_ASSERT(isDouble());
+ if (aliasIdx == 0) {
+ return *this;
+ }
+ MOZ_ASSERT(aliasIdx == 1);
+ return singleOverlay();
+ }
+
+ SetType alignedOrDominatedAliasedSet() const { return Codes::Spread << reg_; }
+
+ static constexpr RegTypeName DefaultType = RegTypeName::Float64;
+
+ template <RegTypeName = DefaultType>
+ static SetType LiveAsIndexableSet(SetType s) {
+ return SetType(0);
+ }
+
+ template <RegTypeName Name = DefaultType>
+ static SetType AllocatableAsIndexableSet(SetType s) {
+ static_assert(Name != RegTypeName::Any, "Allocatable set are not iterable");
+ return LiveAsIndexableSet<Name>(s);
+ }
+
+ static Code FromName(const char* name) {
+ return FloatRegisters::FromName(name);
+ }
+ static TypedRegisterSet<FloatRegister> ReduceSetForPush(
+ const TypedRegisterSet<FloatRegister>& s);
+ static uint32_t GetPushSizeInBytes(const TypedRegisterSet<FloatRegister>& s);
+ uint32_t getRegisterDumpOffsetInBytes();
+};
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Float32>(SetType set) {
+ return set & FloatRegisters::AllSingleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Float64>(SetType set) {
+ return set & FloatRegisters::AllDoubleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Any>(SetType set) {
+ return set;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_Architecture_mips64_h */
diff --git a/js/src/jit/mips64/Assembler-mips64.cpp b/js/src/jit/mips64/Assembler-mips64.cpp
new file mode 100644
index 0000000000..bae7c14a69
--- /dev/null
+++ b/js/src/jit/mips64/Assembler-mips64.cpp
@@ -0,0 +1,371 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips64/Assembler-mips64.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Maybe.h"
+
+#include "jit/AutoWritableJitCode.h"
+
+using mozilla::DebugOnly;
+
+using namespace js;
+using namespace js::jit;
+
+ABIArgGenerator::ABIArgGenerator()
+ : regIndex_(0), stackOffset_(0), current_() {}
+
+ABIArg ABIArgGenerator::next(MIRType type) {
+ static_assert(NumIntArgRegs == NumFloatArgRegs);
+ if (regIndex_ == NumIntArgRegs) {
+ if (type != MIRType::Simd128) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uint64_t);
+ } else {
+ // Mips platform does not support simd yet.
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+ }
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Int64:
+ case MIRType::Pointer:
+ case MIRType::RefOrNull:
+ case MIRType::StackResults: {
+ Register destReg;
+ GetIntArgReg(regIndex_++, &destReg);
+ current_ = ABIArg(destReg);
+ break;
+ }
+ case MIRType::Float32:
+ case MIRType::Double: {
+ FloatRegister::ContentType contentType;
+ contentType = (type == MIRType::Double) ? FloatRegisters::Double
+ : FloatRegisters::Single;
+ FloatRegister destFReg;
+ GetFloatArgReg(regIndex_++, &destFReg);
+ current_ = ABIArg(FloatRegister(destFReg.id(), contentType));
+ break;
+ }
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+}
+
+uint32_t js::jit::RT(FloatRegister r) {
+ MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
+ return r.id() << RTShift;
+}
+
+uint32_t js::jit::RD(FloatRegister r) {
+ MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
+ return r.id() << RDShift;
+}
+
+uint32_t js::jit::RZ(FloatRegister r) {
+ MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
+ return r.id() << RZShift;
+}
+
+uint32_t js::jit::SA(FloatRegister r) {
+ MOZ_ASSERT(r.id() < FloatRegisters::TotalPhys);
+ return r.id() << SAShift;
+}
+
+void Assembler::executableCopy(uint8_t* buffer) {
+ MOZ_ASSERT(isFinished);
+ m_buffer.executableCopy(buffer);
+}
+
+uintptr_t Assembler::GetPointer(uint8_t* instPtr) {
+ Instruction* inst = (Instruction*)instPtr;
+ return Assembler::ExtractLoad64Value(inst);
+}
+
+static JitCode* CodeFromJump(Instruction* jump) {
+ uint8_t* target = (uint8_t*)Assembler::ExtractLoad64Value(jump);
+ return JitCode::FromExecutable(target);
+}
+
+void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ while (reader.more()) {
+ JitCode* child =
+ CodeFromJump((Instruction*)(code->raw() + reader.readUnsigned()));
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ }
+}
+
+static void TraceOneDataRelocation(JSTracer* trc,
+ mozilla::Maybe<AutoWritableJitCode>& awjc,
+ JitCode* code, Instruction* inst) {
+ void* ptr = (void*)Assembler::ExtractLoad64Value(inst);
+ void* prior = ptr;
+
+ // Data relocations can be for Values or for raw pointers. If a Value is
+ // zero-tagged, we can trace it as if it were a raw pointer. If a Value
+ // is not zero-tagged, we have to interpret it as a Value to ensure that the
+ // tag bits are masked off to recover the actual pointer.
+ uintptr_t word = reinterpret_cast<uintptr_t>(ptr);
+ if (word >> JSVAL_TAG_SHIFT) {
+ // This relocation is a Value with a non-zero tag.
+ Value v = Value::fromRawBits(word);
+ TraceManuallyBarrieredEdge(trc, &v, "jit-masm-value");
+ ptr = (void*)v.bitsAsPunboxPointer();
+ } else {
+ // This relocation is a raw pointer or a Value with a zero tag.
+ // No barrier needed since these are constants.
+ TraceManuallyBarrieredGenericPointerEdge(
+ trc, reinterpret_cast<gc::Cell**>(&ptr), "jit-masm-ptr");
+ }
+
+ if (ptr != prior) {
+ if (awjc.isNothing()) {
+ awjc.emplace(code);
+ }
+ Assembler::UpdateLoad64Value(inst, uint64_t(ptr));
+ }
+}
+
+/* static */
+void Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ mozilla::Maybe<AutoWritableJitCode> awjc;
+ while (reader.more()) {
+ size_t offset = reader.readUnsigned();
+ Instruction* inst = (Instruction*)(code->raw() + offset);
+ TraceOneDataRelocation(trc, awjc, code, inst);
+ }
+}
+
+void Assembler::Bind(uint8_t* rawCode, const CodeLabel& label) {
+ if (label.patchAt().bound()) {
+ auto mode = label.linkMode();
+ intptr_t offset = label.patchAt().offset();
+ intptr_t target = label.target().offset();
+
+ if (mode == CodeLabel::RawPointer) {
+ *reinterpret_cast<const void**>(rawCode + offset) = rawCode + target;
+ } else {
+ MOZ_ASSERT(mode == CodeLabel::MoveImmediate ||
+ mode == CodeLabel::JumpImmediate);
+ Instruction* inst = (Instruction*)(rawCode + offset);
+ Assembler::UpdateLoad64Value(inst, (uint64_t)(rawCode + target));
+ }
+ }
+}
+
+void Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target) {
+ int64_t offset = target - branch;
+ InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+ // If encoded offset is 4, then the jump must be short
+ if (BOffImm16(inst[0]).decode() == 4) {
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop();
+ return;
+ }
+
+ // Generate the long jump for calls because return address has to be the
+ // address after the reserved block.
+ if (inst[0].encode() == inst_bgezal.encode()) {
+ addLongJump(BufferOffset(branch), BufferOffset(target));
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister,
+ LabelBase::INVALID_OFFSET);
+ inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
+ // There is 1 nop after this.
+ return;
+ }
+
+ if (BOffImm16::IsInRange(offset)) {
+ // Don't skip trailing nops can improve performance
+ // on Loongson3 platform.
+ bool skipNops =
+ !isLoongson() && (inst[0].encode() != inst_bgezal.encode() &&
+ inst[0].encode() != inst_beq.encode());
+
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop();
+
+ if (skipNops) {
+ inst[2] =
+ InstImm(op_regimm, zero, rt_bgez, BOffImm16(5 * sizeof(uint32_t)))
+ .encode();
+ // There are 4 nops after this
+ }
+ return;
+ }
+
+ if (inst[0].encode() == inst_beq.encode()) {
+ // Handle long unconditional jump.
+ addLongJump(BufferOffset(branch), BufferOffset(target));
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister,
+ LabelBase::INVALID_OFFSET);
+#ifdef MIPSR6
+ inst[4] =
+ InstReg(op_special, ScratchRegister, zero, zero, ff_jalr).encode();
+#else
+ inst[4] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+#endif
+ // There is 1 nop after this.
+ } else {
+ // Handle long conditional jump.
+ inst[0] = invertBranch(inst[0], BOffImm16(7 * sizeof(uint32_t)));
+ // No need for a "nop" here because we can clobber scratch.
+ addLongJump(BufferOffset(branch + sizeof(uint32_t)), BufferOffset(target));
+ Assembler::WriteLoad64Instructions(&inst[1], ScratchRegister,
+ LabelBase::INVALID_OFFSET);
+#ifdef MIPSR6
+ inst[5] =
+ InstReg(op_special, ScratchRegister, zero, zero, ff_jalr).encode();
+#else
+ inst[5] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+#endif
+ // There is 1 nop after this.
+ }
+}
+
+void Assembler::processCodeLabels(uint8_t* rawCode) {
+ for (const CodeLabel& label : codeLabels_) {
+ Bind(rawCode, label);
+ }
+}
+
+uint32_t Assembler::PatchWrite_NearCallSize() {
+ // Load an address needs 4 instructions, and a jump with a delay slot.
+ return (4 + 2) * sizeof(uint32_t);
+}
+
+void Assembler::PatchWrite_NearCall(CodeLocationLabel start,
+ CodeLocationLabel toCall) {
+ Instruction* inst = (Instruction*)start.raw();
+ uint8_t* dest = toCall.raw();
+
+ // Overwrite whatever instruction used to be here with a call.
+ // Always use long jump for two reasons:
+ // - Jump has to be the same size because of PatchWrite_NearCallSize.
+ // - Return address has to be at the end of replaced block.
+ // Short jump wouldn't be more efficient.
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)dest);
+ inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+ inst[5] = InstNOP();
+}
+
+uint64_t Assembler::ExtractLoad64Value(Instruction* inst0) {
+ InstImm* i0 = (InstImm*)inst0;
+ InstImm* i1 = (InstImm*)i0->next();
+ InstReg* i2 = (InstReg*)i1->next();
+ InstImm* i3 = (InstImm*)i2->next();
+ InstImm* i5 = (InstImm*)i3->next()->next();
+
+ MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+ MOZ_ASSERT(i3->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ if ((i2->extractOpcode() == ((uint32_t)op_special >> OpcodeShift)) &&
+ (i2->extractFunctionField() == ff_dsrl32)) {
+ uint64_t value = (uint64_t(i0->extractImm16Value()) << 32) |
+ (uint64_t(i1->extractImm16Value()) << 16) |
+ uint64_t(i3->extractImm16Value());
+ return uint64_t((int64_t(value) << 16) >> 16);
+ }
+
+ MOZ_ASSERT(i5->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+ uint64_t value = (uint64_t(i0->extractImm16Value()) << 48) |
+ (uint64_t(i1->extractImm16Value()) << 32) |
+ (uint64_t(i3->extractImm16Value()) << 16) |
+ uint64_t(i5->extractImm16Value());
+ return value;
+}
+
+void Assembler::UpdateLoad64Value(Instruction* inst0, uint64_t value) {
+ InstImm* i0 = (InstImm*)inst0;
+ InstImm* i1 = (InstImm*)i0->next();
+ InstReg* i2 = (InstReg*)i1->next();
+ InstImm* i3 = (InstImm*)i2->next();
+ InstImm* i5 = (InstImm*)i3->next()->next();
+
+ MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+ MOZ_ASSERT(i3->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ if ((i2->extractOpcode() == ((uint32_t)op_special >> OpcodeShift)) &&
+ (i2->extractFunctionField() == ff_dsrl32)) {
+ i0->setImm16(Imm16::Lower(Imm32(value >> 32)));
+ i1->setImm16(Imm16::Upper(Imm32(value)));
+ i3->setImm16(Imm16::Lower(Imm32(value)));
+ return;
+ }
+
+ MOZ_ASSERT(i5->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ i0->setImm16(Imm16::Upper(Imm32(value >> 32)));
+ i1->setImm16(Imm16::Lower(Imm32(value >> 32)));
+ i3->setImm16(Imm16::Upper(Imm32(value)));
+ i5->setImm16(Imm16::Lower(Imm32(value)));
+}
+
+void Assembler::WriteLoad64Instructions(Instruction* inst0, Register reg,
+ uint64_t value) {
+ Instruction* inst1 = inst0->next();
+ Instruction* inst2 = inst1->next();
+ Instruction* inst3 = inst2->next();
+
+ *inst0 = InstImm(op_lui, zero, reg, Imm16::Lower(Imm32(value >> 32)));
+ *inst1 = InstImm(op_ori, reg, reg, Imm16::Upper(Imm32(value)));
+ *inst2 = InstReg(op_special, rs_one, reg, reg, 48 - 32, ff_dsrl32);
+ *inst3 = InstImm(op_ori, reg, reg, Imm16::Lower(Imm32(value)));
+}
+
+void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
+ ImmPtr newValue, ImmPtr expectedValue) {
+ PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
+ PatchedImmPtr(expectedValue.value));
+}
+
+void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
+ PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue) {
+ Instruction* inst = (Instruction*)label.raw();
+
+ // Extract old Value
+ DebugOnly<uint64_t> value = Assembler::ExtractLoad64Value(inst);
+ MOZ_ASSERT(value == uint64_t(expectedValue.value));
+
+ // Replace with new value
+ Assembler::UpdateLoad64Value(inst, uint64_t(newValue.value));
+}
+
+uint64_t Assembler::ExtractInstructionImmediate(uint8_t* code) {
+ InstImm* inst = (InstImm*)code;
+ return Assembler::ExtractLoad64Value(inst);
+}
+
+void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
+ Instruction* inst = (Instruction*)inst_.raw();
+ InstImm* i0 = (InstImm*)inst;
+ InstImm* i1 = (InstImm*)i0->next();
+ InstImm* i3 = (InstImm*)i1->next()->next();
+ Instruction* i4 = (Instruction*)i3->next();
+
+ MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+ MOZ_ASSERT(i3->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ if (enabled) {
+ MOZ_ASSERT(i4->extractOpcode() != ((uint32_t)op_lui >> OpcodeShift));
+ InstReg jalr = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+ *i4 = jalr;
+ } else {
+ InstNOP nop;
+ *i4 = nop;
+ }
+}
diff --git a/js/src/jit/mips64/Assembler-mips64.h b/js/src/jit/mips64/Assembler-mips64.h
new file mode 100644
index 0000000000..7a51f12407
--- /dev/null
+++ b/js/src/jit/mips64/Assembler-mips64.h
@@ -0,0 +1,288 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_Assembler_mips64_h
+#define jit_mips64_Assembler_mips64_h
+
+#include <iterator>
+
+#include "jit/mips-shared/Assembler-mips-shared.h"
+
+#include "jit/mips64/Architecture-mips64.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register CallTempReg4 = a4;
+static constexpr Register CallTempReg5 = a5;
+
+static constexpr Register CallTempNonArgRegs[] = {t0, t1, t2, t3};
+static const uint32_t NumCallTempNonArgRegs = std::size(CallTempNonArgRegs);
+
+class ABIArgGenerator {
+ unsigned regIndex_;
+ uint32_t stackOffset_;
+ ABIArg current_;
+
+ public:
+ ABIArgGenerator();
+ ABIArg next(MIRType argType);
+ ABIArg& current() { return current_; }
+
+ uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
+ void increaseStackOffset(uint32_t bytes) { stackOffset_ += bytes; }
+};
+
+// These registers may be volatile or nonvolatile.
+static constexpr Register ABINonArgReg0 = t0;
+static constexpr Register ABINonArgReg1 = t1;
+static constexpr Register ABINonArgReg2 = t2;
+static constexpr Register ABINonArgReg3 = t3;
+
+// This register may be volatile or nonvolatile. Avoid f23 which is the
+// ScratchDoubleReg.
+static constexpr FloatRegister ABINonArgDoubleReg{FloatRegisters::f21,
+ FloatRegisters::Double};
+
+// These registers may be volatile or nonvolatile.
+// Note: these three registers are all guaranteed to be different
+static constexpr Register ABINonArgReturnReg0 = t0;
+static constexpr Register ABINonArgReturnReg1 = t1;
+static constexpr Register ABINonVolatileReg = s0;
+
+// This register is guaranteed to be clobberable during the prologue and
+// epilogue of an ABI call which must preserve both ABI argument, return
+// and non-volatile registers.
+static constexpr Register ABINonArgReturnVolatileReg = t0;
+
+// TLS pointer argument register for WebAssembly functions. This must not alias
+// any other register used for passing function arguments or return values.
+// Preserved by WebAssembly functions.
+static constexpr Register InstanceReg = s5;
+
+// Registers used for wasm table calls. These registers must be disjoint
+// from the ABI argument registers, InstanceReg and each other.
+static constexpr Register WasmTableCallScratchReg0 = ABINonArgReg0;
+static constexpr Register WasmTableCallScratchReg1 = ABINonArgReg1;
+static constexpr Register WasmTableCallSigReg = ABINonArgReg2;
+static constexpr Register WasmTableCallIndexReg = ABINonArgReg3;
+
+// Registers used for ref calls.
+static constexpr Register WasmCallRefCallScratchReg0 = ABINonArgReg0;
+static constexpr Register WasmCallRefCallScratchReg1 = ABINonArgReg1;
+static constexpr Register WasmCallRefReg = ABINonArgReg3;
+
+// Register used as a scratch along the return path in the fast js -> wasm stub
+// code. This must not overlap ReturnReg, JSReturnOperand, or InstanceReg.
+// It must be a volatile register.
+static constexpr Register WasmJitEntryReturnScratch = t1;
+
+static constexpr Register InterpreterPCReg = t5;
+
+static constexpr Register JSReturnReg = v1;
+static constexpr Register JSReturnReg_Type = JSReturnReg;
+static constexpr Register JSReturnReg_Data = JSReturnReg;
+static constexpr Register64 ReturnReg64(ReturnReg);
+static constexpr FloatRegister ReturnFloat32Reg = {FloatRegisters::f0,
+ FloatRegisters::Single};
+static constexpr FloatRegister ReturnDoubleReg = {FloatRegisters::f0,
+ FloatRegisters::Double};
+static constexpr FloatRegister ScratchFloat32Reg = {FloatRegisters::f23,
+ FloatRegisters::Single};
+static constexpr FloatRegister ScratchDoubleReg = {FloatRegisters::f23,
+ FloatRegisters::Double};
+
+struct ScratchFloat32Scope : public AutoFloatRegisterScope {
+ explicit ScratchFloat32Scope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchFloat32Reg) {}
+};
+
+struct ScratchDoubleScope : public AutoFloatRegisterScope {
+ explicit ScratchDoubleScope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchDoubleReg) {}
+};
+
+static constexpr FloatRegister f0 = {FloatRegisters::f0,
+ FloatRegisters::Double};
+static constexpr FloatRegister f1 = {FloatRegisters::f1,
+ FloatRegisters::Double};
+static constexpr FloatRegister f2 = {FloatRegisters::f2,
+ FloatRegisters::Double};
+static constexpr FloatRegister f3 = {FloatRegisters::f3,
+ FloatRegisters::Double};
+static constexpr FloatRegister f4 = {FloatRegisters::f4,
+ FloatRegisters::Double};
+static constexpr FloatRegister f5 = {FloatRegisters::f5,
+ FloatRegisters::Double};
+static constexpr FloatRegister f6 = {FloatRegisters::f6,
+ FloatRegisters::Double};
+static constexpr FloatRegister f7 = {FloatRegisters::f7,
+ FloatRegisters::Double};
+static constexpr FloatRegister f8 = {FloatRegisters::f8,
+ FloatRegisters::Double};
+static constexpr FloatRegister f9 = {FloatRegisters::f9,
+ FloatRegisters::Double};
+static constexpr FloatRegister f10 = {FloatRegisters::f10,
+ FloatRegisters::Double};
+static constexpr FloatRegister f11 = {FloatRegisters::f11,
+ FloatRegisters::Double};
+static constexpr FloatRegister f12 = {FloatRegisters::f12,
+ FloatRegisters::Double};
+static constexpr FloatRegister f13 = {FloatRegisters::f13,
+ FloatRegisters::Double};
+static constexpr FloatRegister f14 = {FloatRegisters::f14,
+ FloatRegisters::Double};
+static constexpr FloatRegister f15 = {FloatRegisters::f15,
+ FloatRegisters::Double};
+static constexpr FloatRegister f16 = {FloatRegisters::f16,
+ FloatRegisters::Double};
+static constexpr FloatRegister f17 = {FloatRegisters::f17,
+ FloatRegisters::Double};
+static constexpr FloatRegister f18 = {FloatRegisters::f18,
+ FloatRegisters::Double};
+static constexpr FloatRegister f19 = {FloatRegisters::f19,
+ FloatRegisters::Double};
+static constexpr FloatRegister f20 = {FloatRegisters::f20,
+ FloatRegisters::Double};
+static constexpr FloatRegister f21 = {FloatRegisters::f21,
+ FloatRegisters::Double};
+static constexpr FloatRegister f22 = {FloatRegisters::f22,
+ FloatRegisters::Double};
+static constexpr FloatRegister f23 = {FloatRegisters::f23,
+ FloatRegisters::Double};
+static constexpr FloatRegister f24 = {FloatRegisters::f24,
+ FloatRegisters::Double};
+static constexpr FloatRegister f25 = {FloatRegisters::f25,
+ FloatRegisters::Double};
+static constexpr FloatRegister f26 = {FloatRegisters::f26,
+ FloatRegisters::Double};
+static constexpr FloatRegister f27 = {FloatRegisters::f27,
+ FloatRegisters::Double};
+static constexpr FloatRegister f28 = {FloatRegisters::f28,
+ FloatRegisters::Double};
+static constexpr FloatRegister f29 = {FloatRegisters::f29,
+ FloatRegisters::Double};
+static constexpr FloatRegister f30 = {FloatRegisters::f30,
+ FloatRegisters::Double};
+static constexpr FloatRegister f31 = {FloatRegisters::f31,
+ FloatRegisters::Double};
+
+// MIPS64 CPUs can only load multibyte data that is "naturally"
+// eight-byte-aligned, sp register should be sixteen-byte-aligned.
+static constexpr uint32_t ABIStackAlignment = 16;
+static constexpr uint32_t JitStackAlignment = 16;
+
+static constexpr uint32_t JitStackValueAlignment =
+ JitStackAlignment / sizeof(Value);
+static_assert(JitStackAlignment % sizeof(Value) == 0 &&
+ JitStackValueAlignment >= 1,
+ "Stack alignment should be a non-zero multiple of sizeof(Value)");
+
+// TODO this is just a filler to prevent a build failure. The MIPS SIMD
+// alignment requirements still need to be explored.
+// TODO Copy the static_asserts from x64/x86 assembler files.
+static constexpr uint32_t SimdMemoryAlignment = 16;
+
+static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment;
+static const uint32_t WasmTrapInstructionLength = 4;
+
+// See comments in wasm::GenerateFunctionPrologue. The difference between these
+// is the size of the largest callable prologue on the platform.
+static constexpr uint32_t WasmCheckedCallEntryOffset = 0u;
+
+static constexpr Scale ScalePointer = TimesEight;
+
+class Assembler : public AssemblerMIPSShared {
+ public:
+ Assembler() : AssemblerMIPSShared() {}
+
+ static uintptr_t GetPointer(uint8_t*);
+
+ using AssemblerMIPSShared::bind;
+
+ static void Bind(uint8_t* rawCode, const CodeLabel& label);
+
+ void processCodeLabels(uint8_t* rawCode);
+
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+ static void TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+
+ void bind(InstImm* inst, uintptr_t branch, uintptr_t target);
+
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
+
+ static uint32_t PatchWrite_NearCallSize();
+
+ static uint64_t ExtractLoad64Value(Instruction* inst0);
+ static void UpdateLoad64Value(Instruction* inst0, uint64_t value);
+ static void WriteLoad64Instructions(Instruction* inst0, Register reg,
+ uint64_t value);
+
+ static void PatchWrite_NearCall(CodeLocationLabel start,
+ CodeLocationLabel toCall);
+ static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
+ ImmPtr expectedValue);
+ static void PatchDataWithValueCheck(CodeLocationLabel label,
+ PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue);
+
+ static uint64_t ExtractInstructionImmediate(uint8_t* code);
+
+ static void ToggleCall(CodeLocationLabel inst_, bool enabled);
+}; // Assembler
+
+static const uint32_t NumIntArgRegs = 8;
+static const uint32_t NumFloatArgRegs = NumIntArgRegs;
+
+static inline bool GetIntArgReg(uint32_t usedArgSlots, Register* out) {
+ if (usedArgSlots < NumIntArgRegs) {
+ *out = Register::FromCode(a0.code() + usedArgSlots);
+ return true;
+ }
+ return false;
+}
+
+static inline bool GetFloatArgReg(uint32_t usedArgSlots, FloatRegister* out) {
+ if (usedArgSlots < NumFloatArgRegs) {
+ *out = FloatRegister::FromCode(f12.code() + usedArgSlots);
+ return true;
+ }
+ return false;
+}
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool GetTempRegForIntArg(uint32_t usedIntArgs,
+ uint32_t usedFloatArgs, Register* out) {
+ // NOTE: We can't properly determine which regs are used if there are
+ // float arguments. If this is needed, we will have to guess.
+ MOZ_ASSERT(usedFloatArgs == 0);
+
+ if (GetIntArgReg(usedIntArgs, out)) {
+ return true;
+ }
+ // Unfortunately, we have to assume things about the point at which
+ // GetIntArgReg returns false, because we need to know how many registers it
+ // can allocate.
+ usedIntArgs -= NumIntArgRegs;
+ if (usedIntArgs >= NumCallTempNonArgRegs) {
+ return false;
+ }
+ *out = CallTempNonArgRegs[usedIntArgs];
+ return true;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_Assembler_mips64_h */
diff --git a/js/src/jit/mips64/CodeGenerator-mips64.cpp b/js/src/jit/mips64/CodeGenerator-mips64.cpp
new file mode 100644
index 0000000000..22e45663db
--- /dev/null
+++ b/js/src/jit/mips64/CodeGenerator-mips64.cpp
@@ -0,0 +1,586 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips64/CodeGenerator-mips64.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "js/Conversions.h"
+#include "vm/Shape.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+ValueOperand CodeGeneratorMIPS64::ToValue(LInstruction* ins, size_t pos) {
+ return ValueOperand(ToRegister(ins->getOperand(pos)));
+}
+
+ValueOperand CodeGeneratorMIPS64::ToTempValue(LInstruction* ins, size_t pos) {
+ return ValueOperand(ToRegister(ins->getTemp(pos)));
+}
+
+void CodeGenerator::visitBox(LBox* box) {
+ const LAllocation* in = box->getOperand(0);
+ ValueOperand result = ToOutValue(box);
+
+ masm.moveValue(TypedOrValueRegister(box->type(), ToAnyRegister(in)), result);
+}
+
+void CodeGenerator::visitUnbox(LUnbox* unbox) {
+ MUnbox* mir = unbox->mir();
+
+ Register result = ToRegister(unbox->output());
+
+ if (mir->fallible()) {
+ const ValueOperand value = ToValue(unbox, LUnbox::Input);
+ Label bail;
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.fallibleUnboxInt32(value, result, &bail);
+ break;
+ case MIRType::Boolean:
+ masm.fallibleUnboxBoolean(value, result, &bail);
+ break;
+ case MIRType::Object:
+ masm.fallibleUnboxObject(value, result, &bail);
+ break;
+ case MIRType::String:
+ masm.fallibleUnboxString(value, result, &bail);
+ break;
+ case MIRType::Symbol:
+ masm.fallibleUnboxSymbol(value, result, &bail);
+ break;
+ case MIRType::BigInt:
+ masm.fallibleUnboxBigInt(value, result, &bail);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+ bailoutFrom(&bail, unbox->snapshot());
+ return;
+ }
+
+ LAllocation* input = unbox->getOperand(LUnbox::Input);
+ if (input->isRegister()) {
+ Register inputReg = ToRegister(input);
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.unboxInt32(inputReg, result);
+ break;
+ case MIRType::Boolean:
+ masm.unboxBoolean(inputReg, result);
+ break;
+ case MIRType::Object:
+ masm.unboxObject(inputReg, result);
+ break;
+ case MIRType::String:
+ masm.unboxString(inputReg, result);
+ break;
+ case MIRType::Symbol:
+ masm.unboxSymbol(inputReg, result);
+ break;
+ case MIRType::BigInt:
+ masm.unboxBigInt(inputReg, result);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+ return;
+ }
+
+ Address inputAddr = ToAddress(input);
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.unboxInt32(inputAddr, result);
+ break;
+ case MIRType::Boolean:
+ masm.unboxBoolean(inputAddr, result);
+ break;
+ case MIRType::Object:
+ masm.unboxObject(inputAddr, result);
+ break;
+ case MIRType::String:
+ masm.unboxString(inputAddr, result);
+ break;
+ case MIRType::Symbol:
+ masm.unboxSymbol(inputAddr, result);
+ break;
+ case MIRType::BigInt:
+ masm.unboxBigInt(inputAddr, result);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+}
+
+void CodeGeneratorMIPS64::splitTagForTest(const ValueOperand& value,
+ ScratchTagScope& tag) {
+ masm.splitTag(value.valueReg(), tag);
+}
+
+void CodeGenerator::visitCompareI64(LCompareI64* lir) {
+ MCompare* mir = lir->mir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register lhsReg = ToRegister64(lhs).reg;
+ Register output = ToRegister(lir->output());
+ Register rhsReg;
+ ScratchRegisterScope scratch(masm);
+
+ if (IsConstant(rhs)) {
+ rhsReg = scratch;
+ masm.ma_li(rhsReg, ImmWord(ToInt64(rhs)));
+ } else if (rhs.value().isGeneralReg()) {
+ rhsReg = ToRegister64(rhs).reg;
+ } else {
+ rhsReg = scratch;
+ masm.loadPtr(ToAddress(rhs.value()), rhsReg);
+ }
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ masm.cmpPtrSet(JSOpToCondition(lir->jsop(), isSigned), lhsReg, rhsReg,
+ output);
+}
+
+void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
+ MCompare* mir = lir->cmpMir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register lhsReg = ToRegister64(lhs).reg;
+ Register rhsReg;
+ ScratchRegisterScope scratch(masm);
+
+ if (IsConstant(rhs)) {
+ rhsReg = scratch;
+ masm.ma_li(rhsReg, ImmWord(ToInt64(rhs)));
+ } else if (rhs.value().isGeneralReg()) {
+ rhsReg = ToRegister64(rhs).reg;
+ } else {
+ rhsReg = scratch;
+ masm.loadPtr(ToAddress(rhs.value()), rhsReg);
+ }
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition cond = JSOpToCondition(lir->jsop(), isSigned);
+ emitBranch(lhsReg, rhsReg, cond, lir->ifTrue(), lir->ifFalse());
+}
+
+void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+ Register output = ToRegister(lir->output());
+
+ Label done;
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ // Handle an integer overflow exception from INT64_MIN / -1.
+ if (lir->canBeNegativeOverflow()) {
+ Label notOverflow;
+ masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), &notOverflow);
+ masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), &notOverflow);
+ if (lir->mir()->isMod()) {
+ masm.ma_xor(output, output);
+ } else {
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
+ }
+ masm.jump(&done);
+ masm.bind(&notOverflow);
+ }
+
+#ifdef MIPSR6
+ if (lir->mir()->isMod()) {
+ masm.as_dmod(output, lhs, rhs);
+ } else {
+ masm.as_ddiv(output, lhs, rhs);
+ }
+#else
+ masm.as_ddiv(lhs, rhs);
+ if (lir->mir()->isMod()) {
+ masm.as_mfhi(output);
+ } else {
+ masm.as_mflo(output);
+ }
+#endif
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+ Register output = ToRegister(lir->output());
+
+ Label done;
+
+ // Prevent divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+#ifdef MIPSR6
+ if (lir->mir()->isMod()) {
+ masm.as_dmodu(output, lhs, rhs);
+ } else {
+ masm.as_ddivu(output, lhs, rhs);
+ }
+#else
+ masm.as_ddivu(lhs, rhs);
+ if (lir->mir()->isMod()) {
+ masm.as_mfhi(output);
+ } else {
+ masm.as_mflo(output);
+ }
+#endif
+ masm.bind(&done);
+}
+
+void CodeGeneratorMIPS64::emitBigIntDiv(LBigIntDiv* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+
+#ifdef MIPSR6
+ masm.as_ddiv(/* result= */ dividend, dividend, divisor);
+#else
+ masm.as_ddiv(dividend, divisor);
+ masm.as_mflo(dividend);
+#endif
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+void CodeGeneratorMIPS64::emitBigIntMod(LBigIntMod* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+
+#ifdef MIPSR6
+ masm.as_dmod(/* result= */ dividend, dividend, divisor);
+#else
+ masm.as_ddiv(dividend, divisor);
+ masm.as_mfhi(dividend);
+#endif
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+template <typename T>
+void CodeGeneratorMIPS64::emitWasmLoadI64(T* lir) {
+ const MWasmLoad* mir = lir->mir();
+
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ Register ptrReg = ToRegister(lir->ptr());
+ if (mir->base()->type() == MIRType::Int32) {
+ // See comment in visitWasmLoad re the type of 'base'.
+ masm.move32ZeroExtendToPtr(ptrReg, ptrReg);
+ }
+
+ if (IsUnaligned(mir->access())) {
+ masm.wasmUnalignedLoadI64(mir->access(), HeapReg, ptrReg, ptrScratch,
+ ToOutRegister64(lir),
+ ToRegister(lir->getTemp(1)));
+ } else {
+ masm.wasmLoadI64(mir->access(), HeapReg, ptrReg, ptrScratch,
+ ToOutRegister64(lir));
+ }
+}
+
+void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* lir) {
+ emitWasmLoadI64(lir);
+}
+
+void CodeGenerator::visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir) {
+ emitWasmLoadI64(lir);
+}
+
+template <typename T>
+void CodeGeneratorMIPS64::emitWasmStoreI64(T* lir) {
+ const MWasmStore* mir = lir->mir();
+
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ Register ptrReg = ToRegister(lir->ptr());
+ if (mir->base()->type() == MIRType::Int32) {
+ // See comment in visitWasmLoad re the type of 'base'.
+ masm.move32ZeroExtendToPtr(ptrReg, ptrReg);
+ }
+
+ if (IsUnaligned(mir->access())) {
+ masm.wasmUnalignedStoreI64(mir->access(), ToRegister64(lir->value()),
+ HeapReg, ptrReg, ptrScratch,
+ ToRegister(lir->getTemp(1)));
+ } else {
+ masm.wasmStoreI64(mir->access(), ToRegister64(lir->value()), HeapReg,
+ ptrReg, ptrScratch);
+ }
+}
+
+void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* lir) {
+ emitWasmStoreI64(lir);
+}
+
+void CodeGenerator::visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* lir) {
+ emitWasmStoreI64(lir);
+}
+
+void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+
+ Register cond = ToRegister(lir->condExpr());
+ const LInt64Allocation falseExpr = lir->falseExpr();
+
+ Register64 out = ToOutRegister64(lir);
+ MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
+ "true expr is reused for input");
+
+ if (falseExpr.value().isRegister()) {
+ masm.as_movz(out.reg, ToRegister(falseExpr.value()), cond);
+ } else {
+ Label done;
+ masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
+ masm.loadPtr(ToAddress(falseExpr.value()), out.reg);
+ masm.bind(&done);
+ }
+}
+
+void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
+ masm.as_dmtc1(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
+ masm.as_dmfc1(ToRegister(lir->output()), ToFloatRegister(lir->input()));
+}
+
+void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
+ const LAllocation* input = lir->getOperand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->isUnsigned()) {
+ masm.ma_dext(output, ToRegister(input), Imm32(0), Imm32(32));
+ } else {
+ masm.ma_sll(output, ToRegister(input), Imm32(0));
+ }
+}
+
+void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
+ const LAllocation* input = lir->getOperand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->bottomHalf()) {
+ if (input->isMemory()) {
+ masm.load32(ToAddress(input), output);
+ } else {
+ masm.ma_sll(output, ToRegister(input), Imm32(0));
+ }
+ } else {
+ MOZ_CRASH("Not implemented.");
+ }
+}
+
+void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ switch (lir->mode()) {
+ case MSignExtendInt64::Byte:
+ masm.move32To64SignExtend(input.reg, output);
+ masm.move8SignExtend(output.reg, output.reg);
+ break;
+ case MSignExtendInt64::Half:
+ masm.move32To64SignExtend(input.reg, output);
+ masm.move16SignExtend(output.reg, output.reg);
+ break;
+ case MSignExtendInt64::Word:
+ masm.move32To64SignExtend(input.reg, output);
+ break;
+ }
+}
+
+void CodeGenerator::visitWasmExtendU32Index(LWasmExtendU32Index* lir) {
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ MOZ_ASSERT(input == output);
+ masm.move32To64ZeroExtend(input, Register64(output));
+}
+
+void CodeGenerator::visitWasmWrapU32Index(LWasmWrapU32Index* lir) {
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ MOZ_ASSERT(input == output);
+ masm.move64To32(Register64(input), output);
+}
+
+void CodeGenerator::visitClzI64(LClzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.clz64(input, output.reg);
+}
+
+void CodeGenerator::visitCtzI64(LCtzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.ctz64(input, output.reg);
+}
+
+void CodeGenerator::visitNotI64(LNotI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register output = ToRegister(lir->output());
+
+ masm.ma_cmp_set(output, input.reg, zero, Assembler::Equal);
+}
+
+void CodeGenerator::visitBitNotI64(LBitNotI64* ins) {
+ const LAllocation* input = ins->getOperand(0);
+ MOZ_ASSERT(!input->isConstant());
+ Register inputReg = ToRegister(input);
+ MOZ_ASSERT(inputReg == ToRegister(ins->output()));
+ masm.ma_not(inputReg, inputReg);
+}
+
+void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register64 output = ToOutRegister64(lir);
+
+ MWasmTruncateToInt64* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
+ addOutOfLineCode(ool, mir);
+
+ Label* oolEntry = ool->entry();
+ Label* oolRejoin = ool->rejoin();
+ bool isSaturating = mir->isSaturating();
+
+ if (fromType == MIRType::Double) {
+ if (mir->isUnsigned()) {
+ masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ } else {
+ masm.wasmTruncateDoubleToInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ }
+ } else {
+ if (mir->isUnsigned()) {
+ masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ } else {
+ masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ }
+ }
+}
+
+void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ MIRType outputType = lir->mir()->type();
+ MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
+
+ if (outputType == MIRType::Double) {
+ if (lir->mir()->isUnsigned()) {
+ masm.convertUInt64ToDouble(input, output, Register::Invalid());
+ } else {
+ masm.convertInt64ToDouble(input, output);
+ }
+ } else {
+ if (lir->mir()->isUnsigned()) {
+ masm.convertUInt64ToFloat32(input, output, Register::Invalid());
+ } else {
+ masm.convertInt64ToFloat32(input, output);
+ }
+ }
+}
+
+void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ MBasicBlock* ifTrue = lir->ifTrue();
+ MBasicBlock* ifFalse = lir->ifFalse();
+
+ emitBranch(input.reg, Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
+}
+
+void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register temp = ToRegister(lir->temp());
+ Register64 temp64 = ToRegister64(lir->temp64());
+ Register out = ToRegister(lir->output());
+ const MLoadUnboxedScalar* mir = lir->mir();
+
+ Scalar::Type storageType = mir->storageType();
+
+ auto sync = Synchronization::Load();
+ masm.memoryBarrierBefore(sync);
+ if (lir->index()->isConstant()) {
+ Address source =
+ ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
+ masm.load64(source, temp64);
+ } else {
+ BaseIndex source(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(storageType), mir->offsetAdjustment());
+ masm.load64(source, temp64);
+ }
+ masm.memoryBarrierAfter(sync);
+ emitCreateBigInt(lir, storageType, temp64, out, temp);
+}
+
+void CodeGenerator::visitAtomicStore64(LAtomicStore64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+
+ Scalar::Type writeType = lir->mir()->writeType();
+
+ masm.loadBigInt64(value, temp1);
+ auto sync = Synchronization::Store();
+ masm.memoryBarrierBefore(sync);
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), writeType);
+ masm.store64(temp1, dest);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(writeType));
+ masm.store64(temp1, dest);
+ }
+ masm.memoryBarrierAfter(sync);
+}
diff --git a/js/src/jit/mips64/CodeGenerator-mips64.h b/js/src/jit/mips64/CodeGenerator-mips64.h
new file mode 100644
index 0000000000..81c30c913e
--- /dev/null
+++ b/js/src/jit/mips64/CodeGenerator-mips64.h
@@ -0,0 +1,65 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_CodeGenerator_mips64_h
+#define jit_mips64_CodeGenerator_mips64_h
+
+#include "jit/mips-shared/CodeGenerator-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class CodeGeneratorMIPS64 : public CodeGeneratorMIPSShared {
+ protected:
+ CodeGeneratorMIPS64(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
+ : CodeGeneratorMIPSShared(gen, graph, masm) {}
+
+ void testNullEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ MOZ_ASSERT(value.valueReg() != SecondScratchReg);
+ masm.splitTag(value.valueReg(), SecondScratchReg);
+ emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_NULL), cond, ifTrue, ifFalse);
+ }
+ void testUndefinedEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ MOZ_ASSERT(value.valueReg() != SecondScratchReg);
+ masm.splitTag(value.valueReg(), SecondScratchReg);
+ emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), cond, ifTrue,
+ ifFalse);
+ }
+ void testObjectEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ MOZ_ASSERT(value.valueReg() != SecondScratchReg);
+ masm.splitTag(value.valueReg(), SecondScratchReg);
+ emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), cond, ifTrue,
+ ifFalse);
+ }
+
+ void emitBigIntDiv(LBigIntDiv* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+ void emitBigIntMod(LBigIntMod* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+
+ template <typename T>
+ void emitWasmLoadI64(T* ins);
+ template <typename T>
+ void emitWasmStoreI64(T* ins);
+
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ // Functions for LTestVAndBranch.
+ void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag);
+};
+
+typedef CodeGeneratorMIPS64 CodeGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_CodeGenerator_mips64_h */
diff --git a/js/src/jit/mips64/LIR-mips64.h b/js/src/jit/mips64/LIR-mips64.h
new file mode 100644
index 0000000000..4d8228418c
--- /dev/null
+++ b/js/src/jit/mips64/LIR-mips64.h
@@ -0,0 +1,147 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_LIR_mips64_h
+#define jit_mips64_LIR_mips64_h
+
+namespace js {
+namespace jit {
+
+class LUnbox : public LInstructionHelper<1, 1, 0> {
+ protected:
+ LUnbox(LNode::Opcode opcode, const LAllocation& input)
+ : LInstructionHelper(opcode) {
+ setOperand(0, input);
+ }
+
+ public:
+ LIR_HEADER(Unbox);
+
+ explicit LUnbox(const LAllocation& input) : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+
+ static const size_t Input = 0;
+
+ MUnbox* mir() const { return mir_->toUnbox(); }
+ const char* extraName() const { return StringFromMIRType(mir()->type()); }
+};
+
+class LUnboxFloatingPoint : public LUnbox {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint);
+
+ LUnboxFloatingPoint(const LAllocation& input, MIRType type)
+ : LUnbox(classOpcode, input), type_(type) {}
+
+ MIRType type() const { return type_; }
+};
+
+class LDivOrModI64 : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(DivOrModI64)
+
+ LDivOrModI64(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() { return getTemp(0); }
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeNegativeDividend();
+ }
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LUDivOrModI64 : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(UDivOrModI64);
+
+ LUDivOrModI64(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() { return getTemp(0); }
+ const char* extraName() const {
+ return mir()->isTruncated() ? "Truncated" : nullptr;
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LWasmTruncateToInt64 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmTruncateToInt64);
+
+ explicit LWasmTruncateToInt64(const LAllocation& in)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ }
+
+ MWasmTruncateToInt64* mir() const { return mir_->toWasmTruncateToInt64(); }
+};
+
+class LInt64ToFloatingPoint : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(Int64ToFloatingPoint);
+
+ explicit LInt64ToFloatingPoint(const LInt64Allocation& in)
+ : LInstructionHelper(classOpcode) {
+ setInt64Operand(0, in);
+ }
+
+ MInt64ToFloatingPoint* mir() const { return mir_->toInt64ToFloatingPoint(); }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_LIR_mips64_h */
diff --git a/js/src/jit/mips64/Lowering-mips64.cpp b/js/src/jit/mips64/Lowering-mips64.cpp
new file mode 100644
index 0000000000..e9cda9299c
--- /dev/null
+++ b/js/src/jit/mips64/Lowering-mips64.cpp
@@ -0,0 +1,201 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips64/Lowering-mips64.h"
+
+#include "jit/Lowering.h"
+#include "jit/mips64/Assembler-mips64.h"
+#include "jit/MIR.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void LIRGeneratorMIPS64::defineInt64Phi(MPhi* phi, size_t lirIndex) {
+ defineTypedPhi(phi, lirIndex);
+}
+
+void LIRGeneratorMIPS64::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+}
+
+LBoxAllocation LIRGeneratorMIPS64::useBoxFixed(MDefinition* mir, Register reg1,
+ Register reg2, bool useAtStart) {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ ensureDefined(mir);
+ return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart));
+}
+
+void LIRGeneratorMIPS64::lowerDivI64(MDiv* div) {
+ if (div->isUnsigned()) {
+ lowerUDivI64(div);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc())
+ LDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ defineInt64(lir, div);
+}
+
+void LIRGeneratorMIPS64::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
+ MOZ_CRASH("We don't use runtime div for this architecture");
+}
+
+void LIRGeneratorMIPS64::lowerModI64(MMod* mod) {
+ if (mod->isUnsigned()) {
+ lowerUModI64(mod);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc())
+ LDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()), temp());
+ defineInt64(lir, mod);
+}
+
+void LIRGeneratorMIPS64::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
+ MOZ_CRASH("We don't use runtime mod for this architecture");
+}
+
+void LIRGeneratorMIPS64::lowerUDivI64(MDiv* div) {
+ LUDivOrModI64* lir = new (alloc())
+ LUDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ defineInt64(lir, div);
+}
+
+void LIRGeneratorMIPS64::lowerUModI64(MMod* mod) {
+ LUDivOrModI64* lir = new (alloc())
+ LUDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()), temp());
+ defineInt64(lir, mod);
+}
+
+void LIRGeneratorMIPS64::lowerBigIntDiv(MBigIntDiv* ins) {
+ auto* lir = new (alloc()) LBigIntDiv(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorMIPS64::lowerBigIntMod(MBigIntMod* ins) {
+ auto* lir = new (alloc()) LBigIntMod(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorMIPS64::lowerAtomicLoad64(MLoadUnboxedScalar* ins) {
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->storageType());
+
+ auto* lir = new (alloc()) LAtomicLoad64(elements, index, temp(), tempInt64());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorMIPS64::lowerAtomicStore64(MStoreUnboxedScalar* ins) {
+ LUse elements = useRegister(ins->elements());
+ LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->writeType());
+ LAllocation value = useRegister(ins->value());
+
+ add(new (alloc()) LAtomicStore64(elements, index, value, tempInt64()), ins);
+}
+
+void LIRGenerator::visitBox(MBox* box) {
+ MDefinition* opd = box->getOperand(0);
+
+ // If the operand is a constant, emit near its uses.
+ if (opd->isConstant() && box->canEmitAtUses()) {
+ emitAtUses(box);
+ return;
+ }
+
+ if (opd->isConstant()) {
+ define(new (alloc()) LValue(opd->toConstant()->toJSValue()), box,
+ LDefinition(LDefinition::BOX));
+ } else {
+ LBox* ins = new (alloc()) LBox(useRegister(opd), opd->type());
+ define(ins, box, LDefinition(LDefinition::BOX));
+ }
+}
+
+void LIRGenerator::visitUnbox(MUnbox* unbox) {
+ MDefinition* box = unbox->getOperand(0);
+ MOZ_ASSERT(box->type() == MIRType::Value);
+
+ LUnbox* lir;
+ if (IsFloatingPointType(unbox->type())) {
+ lir = new (alloc())
+ LUnboxFloatingPoint(useRegisterAtStart(box), unbox->type());
+ } else if (unbox->fallible()) {
+ // If the unbox is fallible, load the Value in a register first to
+ // avoid multiple loads.
+ lir = new (alloc()) LUnbox(useRegisterAtStart(box));
+ } else {
+ lir = new (alloc()) LUnbox(useAtStart(box));
+ }
+
+ if (unbox->fallible()) {
+ assignSnapshot(lir, unbox->bailoutKind());
+ }
+
+ define(lir, unbox);
+}
+
+void LIRGenerator::visitReturnImpl(MDefinition* opd, bool isGenerator) {
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LReturn* ins = new (alloc()) LReturn(isGenerator);
+ ins->setOperand(0, useFixed(opd, JSReturnReg));
+ add(ins);
+}
+
+void LIRGeneratorMIPS64::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+}
+
+void LIRGeneratorMIPS64::lowerTruncateDToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double);
+
+ define(new (alloc()) LTruncateDToInt32(useRegister(opd), tempDouble()), ins);
+}
+
+void LIRGeneratorMIPS64::lowerTruncateFToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Float32);
+
+ define(new (alloc()) LTruncateFToInt32(useRegister(opd), tempFloat32()), ins);
+}
+
+void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ defineInt64(new (alloc()) LWasmTruncateToInt64(useRegister(opd)), ins);
+}
+
+void LIRGeneratorMIPS64::lowerWasmBuiltinTruncateToInt64(
+ MWasmBuiltinTruncateToInt64* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
+
+void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Int64);
+ MOZ_ASSERT(IsFloatingPointType(ins->type()));
+
+ define(new (alloc()) LInt64ToFloatingPoint(useInt64Register(opd)), ins);
+}
+
+void LIRGeneratorMIPS64::lowerBuiltinInt64ToFloatingPoint(
+ MBuiltinInt64ToFloatingPoint* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
diff --git a/js/src/jit/mips64/Lowering-mips64.h b/js/src/jit/mips64/Lowering-mips64.h
new file mode 100644
index 0000000000..b8543de6d2
--- /dev/null
+++ b/js/src/jit/mips64/Lowering-mips64.h
@@ -0,0 +1,56 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_Lowering_mips64_h
+#define jit_mips64_Lowering_mips64_h
+
+#include "jit/mips-shared/Lowering-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorMIPS64 : public LIRGeneratorMIPSShared {
+ protected:
+ LIRGeneratorMIPS64(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorMIPSShared(gen, graph, lirGraph) {}
+
+ void lowerInt64PhiInput(MPhi*, uint32_t, LBlock*, size_t);
+ void defineInt64Phi(MPhi*, size_t);
+
+ // Returns a box allocation. reg2 is ignored on 64-bit platforms.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2,
+ bool useAtStart = false);
+
+ inline LDefinition tempToUnbox() { return temp(); }
+
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex);
+
+ void lowerBuiltinInt64ToFloatingPoint(MBuiltinInt64ToFloatingPoint* ins);
+ void lowerWasmBuiltinTruncateToInt64(MWasmBuiltinTruncateToInt64* ins);
+ void lowerTruncateDToInt32(MTruncateToInt32* ins);
+ void lowerTruncateFToInt32(MTruncateToInt32* ins);
+
+ void lowerDivI64(MDiv* div);
+ void lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div);
+ void lowerModI64(MMod* mod);
+ void lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod);
+ void lowerUDivI64(MDiv* div);
+ void lowerUModI64(MMod* mod);
+
+ void lowerBigIntDiv(MBigIntDiv* ins);
+ void lowerBigIntMod(MBigIntMod* ins);
+
+ void lowerAtomicLoad64(MLoadUnboxedScalar* ins);
+ void lowerAtomicStore64(MStoreUnboxedScalar* ins);
+};
+
+typedef LIRGeneratorMIPS64 LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_Lowering_mips64_h */
diff --git a/js/src/jit/mips64/MacroAssembler-mips64-inl.h b/js/src/jit/mips64/MacroAssembler-mips64-inl.h
new file mode 100644
index 0000000000..ec166851a3
--- /dev/null
+++ b/js/src/jit/mips64/MacroAssembler-mips64-inl.h
@@ -0,0 +1,845 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_MacroAssembler_mips64_inl_h
+#define jit_mips64_MacroAssembler_mips64_inl_h
+
+#include "jit/mips64/MacroAssembler-mips64.h"
+
+#include "vm/BigIntType.h" // JS::BigInt
+
+#include "jit/mips-shared/MacroAssembler-mips-shared-inl.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+
+void MacroAssembler::move64(Register64 src, Register64 dest) {
+ movePtr(src.reg, dest.reg);
+}
+
+void MacroAssembler::move64(Imm64 imm, Register64 dest) {
+ movePtr(ImmWord(imm.value), dest.reg);
+}
+
+void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
+ moveFromDouble(src, dest.reg);
+}
+
+void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
+ moveToDouble(src.reg, dest);
+}
+
+void MacroAssembler::move64To32(Register64 src, Register dest) {
+ ma_sll(dest, src.reg, Imm32(0));
+}
+
+void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
+ ma_dext(dest.reg, src, Imm32(0), Imm32(32));
+}
+
+void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
+ move32To64SignExtend(src, dest);
+ move8SignExtend(dest.reg, dest.reg);
+}
+
+void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
+ move32To64SignExtend(src, dest);
+ move16SignExtend(dest.reg, dest.reg);
+}
+
+void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
+ ma_sll(dest.reg, src, Imm32(0));
+}
+
+void MacroAssembler::move32SignExtendToPtr(Register src, Register dest) {
+ ma_sll(dest, src, Imm32(0));
+}
+
+void MacroAssembler::move32ZeroExtendToPtr(Register src, Register dest) {
+ ma_dext(dest, src, Imm32(0), Imm32(32));
+}
+
+// ===============================================================
+// Load instructions
+
+void MacroAssembler::load32SignExtendToPtr(const Address& src, Register dest) {
+ load32(src, dest);
+}
+
+// ===============================================================
+// Logical instructions
+
+void MacroAssembler::notPtr(Register reg) { ma_not(reg, reg); }
+
+void MacroAssembler::andPtr(Register src, Register dest) { ma_and(dest, src); }
+
+void MacroAssembler::andPtr(Imm32 imm, Register dest) { ma_and(dest, imm); }
+
+void MacroAssembler::and64(Imm64 imm, Register64 dest) {
+ ma_li(ScratchRegister, ImmWord(imm.value));
+ ma_and(dest.reg, ScratchRegister);
+}
+
+void MacroAssembler::and64(Register64 src, Register64 dest) {
+ ma_and(dest.reg, src.reg);
+}
+
+void MacroAssembler::and64(const Operand& src, Register64 dest) {
+ if (src.getTag() == Operand::MEM) {
+ Register64 scratch(ScratchRegister);
+
+ load64(src.toAddress(), scratch);
+ and64(scratch, dest);
+ } else {
+ and64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::or64(Imm64 imm, Register64 dest) {
+ ma_li(ScratchRegister, ImmWord(imm.value));
+ ma_or(dest.reg, ScratchRegister);
+}
+
+void MacroAssembler::xor64(Imm64 imm, Register64 dest) {
+ ma_li(ScratchRegister, ImmWord(imm.value));
+ ma_xor(dest.reg, ScratchRegister);
+}
+
+void MacroAssembler::orPtr(Register src, Register dest) { ma_or(dest, src); }
+
+void MacroAssembler::orPtr(Imm32 imm, Register dest) { ma_or(dest, imm); }
+
+void MacroAssembler::or64(Register64 src, Register64 dest) {
+ ma_or(dest.reg, src.reg);
+}
+
+void MacroAssembler::or64(const Operand& src, Register64 dest) {
+ if (src.getTag() == Operand::MEM) {
+ Register64 scratch(ScratchRegister);
+
+ load64(src.toAddress(), scratch);
+ or64(scratch, dest);
+ } else {
+ or64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::xor64(Register64 src, Register64 dest) {
+ ma_xor(dest.reg, src.reg);
+}
+
+void MacroAssembler::xor64(const Operand& src, Register64 dest) {
+ if (src.getTag() == Operand::MEM) {
+ Register64 scratch(ScratchRegister);
+
+ load64(src.toAddress(), scratch);
+ xor64(scratch, dest);
+ } else {
+ xor64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::xorPtr(Register src, Register dest) { ma_xor(dest, src); }
+
+void MacroAssembler::xorPtr(Imm32 imm, Register dest) { ma_xor(dest, imm); }
+
+// ===============================================================
+// Swap instructions
+
+void MacroAssembler::byteSwap64(Register64 reg64) {
+ Register reg = reg64.reg;
+ ma_dsbh(reg, reg);
+ ma_dshd(reg, reg);
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void MacroAssembler::addPtr(Register src, Register dest) {
+ ma_daddu(dest, src);
+}
+
+void MacroAssembler::addPtr(Imm32 imm, Register dest) { ma_daddu(dest, imm); }
+
+void MacroAssembler::addPtr(ImmWord imm, Register dest) {
+ movePtr(imm, ScratchRegister);
+ addPtr(ScratchRegister, dest);
+}
+
+void MacroAssembler::add64(Register64 src, Register64 dest) {
+ addPtr(src.reg, dest.reg);
+}
+
+void MacroAssembler::add64(const Operand& src, Register64 dest) {
+ if (src.getTag() == Operand::MEM) {
+ Register64 scratch(ScratchRegister);
+
+ load64(src.toAddress(), scratch);
+ add64(scratch, dest);
+ } else {
+ add64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::add64(Imm32 imm, Register64 dest) {
+ ma_daddu(dest.reg, imm);
+}
+
+void MacroAssembler::add64(Imm64 imm, Register64 dest) {
+ MOZ_ASSERT(dest.reg != ScratchRegister);
+ mov(ImmWord(imm.value), ScratchRegister);
+ ma_daddu(dest.reg, ScratchRegister);
+}
+
+CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
+ CodeOffset offset = CodeOffset(currentOffset());
+ MacroAssemblerMIPSShared::ma_liPatchable(dest, Imm32(0));
+ as_dsubu(dest, StackPointer, dest);
+ return offset;
+}
+
+void MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) {
+ Instruction* lui =
+ (Instruction*)m_buffer.getInst(BufferOffset(offset.offset()));
+ MOZ_ASSERT(lui->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(lui->next()->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ MacroAssemblerMIPSShared::UpdateLuiOriValue(lui, lui->next(), imm.value);
+}
+
+void MacroAssembler::subPtr(Register src, Register dest) {
+ as_dsubu(dest, dest, src);
+}
+
+void MacroAssembler::subPtr(Imm32 imm, Register dest) {
+ ma_dsubu(dest, dest, imm);
+}
+
+void MacroAssembler::sub64(Register64 src, Register64 dest) {
+ as_dsubu(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::sub64(const Operand& src, Register64 dest) {
+ if (src.getTag() == Operand::MEM) {
+ Register64 scratch(ScratchRegister);
+
+ load64(src.toAddress(), scratch);
+ sub64(scratch, dest);
+ } else {
+ sub64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::sub64(Imm64 imm, Register64 dest) {
+ MOZ_ASSERT(dest.reg != ScratchRegister);
+ mov(ImmWord(imm.value), ScratchRegister);
+ as_dsubu(dest.reg, dest.reg, ScratchRegister);
+}
+
+void MacroAssembler::mulHighUnsigned32(Imm32 imm, Register src, Register dest) {
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(src != scratch);
+ move32(imm, scratch);
+#ifdef MIPSR6
+ as_muhu(dest, src, scratch);
+#else
+ as_multu(src, scratch);
+ as_mfhi(dest);
+#endif
+}
+
+void MacroAssembler::mulPtr(Register rhs, Register srcDest) {
+#ifdef MIPSR6
+ as_dmulu(srcDest, srcDest, rhs);
+#else
+ as_dmultu(srcDest, rhs);
+ as_mflo(srcDest);
+#endif
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest) {
+ MOZ_ASSERT(dest.reg != ScratchRegister);
+ mov(ImmWord(imm.value), ScratchRegister);
+#ifdef MIPSR6
+ as_dmulu(dest.reg, ScratchRegister, dest.reg);
+#else
+ as_dmultu(dest.reg, ScratchRegister);
+ as_mflo(dest.reg);
+#endif
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest,
+ const Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ mul64(imm, dest);
+}
+
+void MacroAssembler::mul64(const Register64& src, const Register64& dest,
+ const Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+#ifdef MIPSR6
+ as_dmulu(dest.reg, src.reg, dest.reg);
+#else
+ as_dmultu(dest.reg, src.reg);
+ as_mflo(dest.reg);
+#endif
+}
+
+void MacroAssembler::mul64(const Operand& src, const Register64& dest,
+ const Register temp) {
+ if (src.getTag() == Operand::MEM) {
+ Register64 scratch(ScratchRegister);
+
+ load64(src.toAddress(), scratch);
+ mul64(scratch, dest, temp);
+ } else {
+ mul64(Register64(src.toReg()), dest, temp);
+ }
+}
+
+void MacroAssembler::mulBy3(Register src, Register dest) {
+ MOZ_ASSERT(src != ScratchRegister);
+ as_daddu(ScratchRegister, src, src);
+ as_daddu(dest, ScratchRegister, src);
+}
+
+void MacroAssembler::inc64(AbsoluteAddress dest) {
+ ma_li(ScratchRegister, ImmWord(uintptr_t(dest.addr)));
+ as_ld(SecondScratchReg, ScratchRegister, 0);
+ as_daddiu(SecondScratchReg, SecondScratchReg, 1);
+ as_sd(SecondScratchReg, ScratchRegister, 0);
+}
+
+void MacroAssembler::neg64(Register64 reg) { as_dsubu(reg.reg, zero, reg.reg); }
+
+void MacroAssembler::negPtr(Register reg) { as_dsubu(reg, zero, reg); }
+
+// ===============================================================
+// Shift functions
+
+void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ma_dsll(dest, dest, imm);
+}
+
+void MacroAssembler::lshiftPtr(Register shift, Register dest) {
+ ma_dsll(dest, dest, shift);
+}
+
+void MacroAssembler::lshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ma_dsll(dest.reg, dest.reg, imm);
+}
+
+void MacroAssembler::lshift64(Register shift, Register64 dest) {
+ ma_dsll(dest.reg, dest.reg, shift);
+}
+
+void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ma_dsrl(dest, dest, imm);
+}
+
+void MacroAssembler::rshiftPtr(Register shift, Register dest) {
+ ma_dsrl(dest, dest, shift);
+}
+
+void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ma_dsrl(dest.reg, dest.reg, imm);
+}
+
+void MacroAssembler::rshift64(Register shift, Register64 dest) {
+ ma_dsrl(dest.reg, dest.reg, shift);
+}
+
+void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ma_dsra(dest, dest, imm);
+}
+
+void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ma_dsra(dest.reg, dest.reg, imm);
+}
+
+void MacroAssembler::rshift64Arithmetic(Register shift, Register64 dest) {
+ ma_dsra(dest.reg, dest.reg, shift);
+}
+
+// ===============================================================
+// Rotation functions
+
+void MacroAssembler::rotateLeft64(Imm32 count, Register64 src, Register64 dest,
+ Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+
+ if (count.value) {
+ ma_drol(dest.reg, src.reg, count);
+ } else {
+ ma_move(dest.reg, src.reg);
+ }
+}
+
+void MacroAssembler::rotateLeft64(Register count, Register64 src,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ ma_drol(dest.reg, src.reg, count);
+}
+
+void MacroAssembler::rotateRight64(Imm32 count, Register64 src, Register64 dest,
+ Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+
+ if (count.value) {
+ ma_dror(dest.reg, src.reg, count);
+ } else {
+ ma_move(dest.reg, src.reg);
+ }
+}
+
+void MacroAssembler::rotateRight64(Register count, Register64 src,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ ma_dror(dest.reg, src.reg, count);
+}
+
+// ===============================================================
+// Condition functions
+
+template <typename T1, typename T2>
+void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+
+// Also see below for specializations of cmpPtrSet.
+
+template <typename T1, typename T2>
+void MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+
+void MacroAssembler::cmp64Set(Condition cond, Address lhs, Imm64 rhs,
+ Register dest) {
+ ma_cmp_set(dest, lhs, ImmWord(uint64_t(rhs.value)), cond);
+}
+
+// ===============================================================
+// Bit counting functions
+
+void MacroAssembler::clz64(Register64 src, Register dest) {
+ as_dclz(dest, src.reg);
+}
+
+void MacroAssembler::ctz64(Register64 src, Register dest) {
+ ma_dctz(dest, src.reg);
+}
+
+void MacroAssembler::popcnt64(Register64 input, Register64 output,
+ Register tmp) {
+ ma_move(output.reg, input.reg);
+ ma_dsra(tmp, input.reg, Imm32(1));
+ ma_li(ScratchRegister, ImmWord(0x5555555555555555UL));
+ ma_and(tmp, ScratchRegister);
+ ma_dsubu(output.reg, tmp);
+ ma_dsra(tmp, output.reg, Imm32(2));
+ ma_li(ScratchRegister, ImmWord(0x3333333333333333UL));
+ ma_and(output.reg, ScratchRegister);
+ ma_and(tmp, ScratchRegister);
+ ma_daddu(output.reg, tmp);
+ ma_dsrl(tmp, output.reg, Imm32(4));
+ ma_daddu(output.reg, tmp);
+ ma_li(ScratchRegister, ImmWord(0xF0F0F0F0F0F0F0FUL));
+ ma_and(output.reg, ScratchRegister);
+ ma_dsll(tmp, output.reg, Imm32(8));
+ ma_daddu(output.reg, tmp);
+ ma_dsll(tmp, output.reg, Imm32(16));
+ ma_daddu(output.reg, tmp);
+ ma_dsll(tmp, output.reg, Imm32(32));
+ ma_daddu(output.reg, tmp);
+ ma_dsra(output.reg, output.reg, Imm32(56));
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
+ Label* success, Label* fail) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+ cond == Assembler::LessThan ||
+ cond == Assembler::LessThanOrEqual ||
+ cond == Assembler::GreaterThan ||
+ cond == Assembler::GreaterThanOrEqual ||
+ cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+ cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs.reg, ImmWord(val.value), success);
+ if (fail) {
+ jump(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
+ Label* success, Label* fail) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+ cond == Assembler::LessThan ||
+ cond == Assembler::LessThanOrEqual ||
+ cond == Assembler::GreaterThan ||
+ cond == Assembler::GreaterThanOrEqual ||
+ cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+ cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs.reg, rhs.reg, success);
+ if (fail) {
+ jump(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs, ImmWord(val.value), label);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ Register64 rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs, rhs.reg, label);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ const Address& rhs, Register scratch,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ loadPtr(rhs, scratch);
+ branchPtr(cond, lhs, scratch, label);
+}
+
+void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
+ Register rhs, Label* label) {
+ branchPtr(cond, lhs, rhs, label);
+}
+
+template <class L>
+void MacroAssembler::branchTest64(Condition cond, Register64 lhs,
+ Register64 rhs, Register temp, L label) {
+ branchTestPtr(cond, lhs.reg, rhs.reg, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestUndefined(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestInt32(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestInt32Truthy(bool b, const ValueOperand& value,
+ Label* label) {
+ ScratchRegisterScope scratch(*this);
+ ma_dext(scratch, value.valueReg(), Imm32(0), Imm32(32));
+ ma_b(scratch, scratch, label, b ? NonZero : Zero);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = (cond == Equal) ? BelowOrEqual : Above;
+ ma_b(tag, ImmTag(JSVAL_TAG_MAX_DOUBLE), label, actual);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestDouble(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestNumber(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestBoolean(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBooleanTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ unboxBoolean(value, scratch2);
+ ma_b(scratch2, scratch2, label, b ? NonZero : Zero);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestString(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestStringTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ unboxString(value, scratch2);
+ load32(Address(scratch2, JSString::offsetOfLength()), scratch2);
+ ma_b(scratch2, Imm32(0), label, b ? NotEqual : Equal);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestSymbol(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ computeEffectiveAddress(address, scratch2);
+ splitTag(scratch2, scratch2);
+ branchTestBigInt(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestBigInt(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBigIntTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ unboxBigInt(value, scratch2);
+ load32(Address(scratch2, BigInt::offsetOfDigitLength()), scratch2);
+ ma_b(scratch2, Imm32(0), label, b ? NotEqual : Equal);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestNull(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestObject(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestPrimitive(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestPrimitive(cond, scratch2, label);
+}
+
+template <class L>
+void MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value,
+ L label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ ma_b(scratch2, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr,
+ JSWhyMagic why, Label* label) {
+ uint64_t magic = MagicValue(why).asRawBits();
+ SecondScratchRegisterScope scratch(*this);
+ loadPtr(valaddr, scratch);
+ ma_b(scratch, ImmWord(magic), label, cond);
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const BaseIndex& lhs,
+ const ValueOperand& rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ branchPtr(cond, lhs, rhs.valueReg(), label);
+}
+
+void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ as_truncld(ScratchDoubleReg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromDouble(ScratchDoubleReg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+ ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
+
+ as_sll(dest, dest, 0);
+}
+
+void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ as_truncls(ScratchDoubleReg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromDouble(ScratchDoubleReg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+ ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
+
+ as_sll(dest, dest, 0);
+}
+
+void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+
+ // Convert scalar to signed 64-bit fixed-point, rounding toward zero.
+ // In the case of -0, the output is zero.
+ // In the case of overflow, the output is:
+ // - MIPS64R2: 2^63-1
+ // - MIPS64R6: saturated
+ // In the case of NaN, the output is:
+ // - MIPS64R2: 2^63-1
+ // - MIPS64R6: 0
+ as_truncld(fpscratch, src);
+ moveFromDouble(fpscratch, dest);
+
+ // Fail on overflow cases, besides MIPS64R2 will also fail here on NaN cases.
+ as_sll(scratch, dest, 0);
+ ma_b(dest, scratch, fail, Assembler::NotEqual);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const ValueOperand& src, Register dest,
+ JSValueType type, Label* fail) {
+ MOZ_ASSERT(type == JSVAL_TYPE_OBJECT || type == JSVAL_TYPE_STRING ||
+ type == JSVAL_TYPE_SYMBOL || type == JSVAL_TYPE_BIGINT);
+ // dest := src XOR mask
+ // scratch := dest >> JSVAL_TAG_SHIFT
+ // fail if scratch != 0
+ //
+ // Note: src and dest can be the same register
+ ScratchRegisterScope scratch(asMasm());
+ mov(ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)), scratch);
+ ma_xor(scratch, src.valueReg());
+ ma_move(dest, scratch);
+ ma_dsrl(scratch, scratch, Imm32(JSVAL_TAG_SHIFT));
+ ma_b(scratch, Imm32(0), fail, Assembler::NotEqual);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const Address& src, Register dest,
+ JSValueType type, Label* fail) {
+ loadValue(src, ValueOperand(dest));
+ fallibleUnboxPtr(ValueOperand(dest), dest, type, fail);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const BaseIndex& src, Register dest,
+ JSValueType type, Label* fail) {
+ loadValue(src, ValueOperand(dest));
+ fallibleUnboxPtr(ValueOperand(dest), dest, type, fail);
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+// The specializations for cmpPtrSet are outside the braces because
+// check_macroassembler_style can't yet deal with specializations.
+
+template <>
+inline void MacroAssembler::cmpPtrSet(Assembler::Condition cond, Address lhs,
+ ImmPtr rhs, Register dest) {
+ loadPtr(lhs, SecondScratchReg);
+ cmpPtrSet(cond, SecondScratchReg, rhs, dest);
+}
+
+template <>
+inline void MacroAssembler::cmpPtrSet(Assembler::Condition cond, Register lhs,
+ Address rhs, Register dest) {
+ MOZ_ASSERT(lhs != ScratchRegister);
+ loadPtr(rhs, ScratchRegister);
+ cmpPtrSet(cond, lhs, ScratchRegister, dest);
+}
+
+template <>
+inline void MacroAssembler::cmpPtrSet(Assembler::Condition cond, Address lhs,
+ Register rhs, Register dest) {
+ MOZ_ASSERT(rhs != ScratchRegister);
+ loadPtr(lhs, ScratchRegister);
+ cmpPtrSet(cond, ScratchRegister, rhs, dest);
+}
+
+template <>
+inline void MacroAssembler::cmp32Set(Assembler::Condition cond, Register lhs,
+ Address rhs, Register dest) {
+ MOZ_ASSERT(lhs != ScratchRegister);
+ load32(rhs, ScratchRegister);
+ cmp32Set(cond, lhs, ScratchRegister, dest);
+}
+
+template <>
+inline void MacroAssembler::cmp32Set(Assembler::Condition cond, Address lhs,
+ Register rhs, Register dest) {
+ MOZ_ASSERT(rhs != ScratchRegister);
+ load32(lhs, ScratchRegister);
+ cmp32Set(cond, ScratchRegister, rhs, dest);
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ Register scratch = ScratchRegister;
+ MOZ_ASSERT(src != scratch && dest != scratch);
+ cmpPtrSet(cond, lhs, rhs, scratch);
+#ifdef MIPSR6
+ as_selnez(src, src, scratch);
+ as_seleqz(dest, dest, scratch);
+ as_or(dest, dest, src);
+#else
+ as_movn(dest, src, scratch);
+#endif
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ MOZ_CRASH("NYI");
+}
+
+void MacroAssemblerMIPS64Compat::incrementInt32Value(const Address& addr) {
+ asMasm().add32(Imm32(1), addr);
+}
+
+void MacroAssemblerMIPS64Compat::retn(Imm32 n) {
+ // pc <- [sp]; sp += n
+ loadPtr(Address(StackPointer, 0), ra);
+ asMasm().addPtr(n, StackPointer);
+ as_jr(ra);
+ as_nop();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_MacroAssembler_mips64_inl_h */
diff --git a/js/src/jit/mips64/MacroAssembler-mips64.cpp b/js/src/jit/mips64/MacroAssembler-mips64.cpp
new file mode 100644
index 0000000000..2d466d7efd
--- /dev/null
+++ b/js/src/jit/mips64/MacroAssembler-mips64.cpp
@@ -0,0 +1,2852 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips64/MacroAssembler-mips64.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/MacroAssembler.h"
+#include "jit/mips64/Simulator-mips64.h"
+#include "jit/MoveEmitter.h"
+#include "jit/SharedICRegisters.h"
+#include "util/Memory.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace jit;
+
+using mozilla::Abs;
+
+static_assert(sizeof(intptr_t) == 8, "Not 32-bit clean.");
+
+void MacroAssemblerMIPS64Compat::convertBoolToInt32(Register src,
+ Register dest) {
+ // Note that C++ bool is only 1 byte, so zero extend it to clear the
+ // higher-order bits.
+ ma_and(dest, src, Imm32(0xff));
+}
+
+void MacroAssemblerMIPS64Compat::convertInt32ToDouble(Register src,
+ FloatRegister dest) {
+ as_mtc1(src, dest);
+ as_cvtdw(dest, dest);
+}
+
+void MacroAssemblerMIPS64Compat::convertInt32ToDouble(const Address& src,
+ FloatRegister dest) {
+ ma_ls(dest, src);
+ as_cvtdw(dest, dest);
+}
+
+void MacroAssemblerMIPS64Compat::convertInt32ToDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ computeScaledAddress(src, ScratchRegister);
+ convertInt32ToDouble(Address(ScratchRegister, src.offset), dest);
+}
+
+void MacroAssemblerMIPS64Compat::convertUInt32ToDouble(Register src,
+ FloatRegister dest) {
+ ma_dext(ScratchRegister, src, Imm32(0), Imm32(32));
+ asMasm().convertInt64ToDouble(Register64(ScratchRegister), dest);
+}
+
+void MacroAssemblerMIPS64Compat::convertUInt64ToDouble(Register src,
+ FloatRegister dest) {
+ Label positive, done;
+ ma_b(src, src, &positive, NotSigned, ShortJump);
+
+ MOZ_ASSERT(src != ScratchRegister);
+ MOZ_ASSERT(src != SecondScratchReg);
+
+ ma_and(ScratchRegister, src, Imm32(1));
+ ma_dsrl(SecondScratchReg, src, Imm32(1));
+ ma_or(ScratchRegister, SecondScratchReg);
+ as_dmtc1(ScratchRegister, dest);
+ as_cvtdl(dest, dest);
+ asMasm().addDouble(dest, dest);
+ ma_b(&done, ShortJump);
+
+ bind(&positive);
+ as_dmtc1(src, dest);
+ as_cvtdl(dest, dest);
+
+ bind(&done);
+}
+
+void MacroAssemblerMIPS64Compat::convertUInt32ToFloat32(Register src,
+ FloatRegister dest) {
+ ma_dext(ScratchRegister, src, Imm32(0), Imm32(32));
+ asMasm().convertInt64ToFloat32(Register64(ScratchRegister), dest);
+}
+
+void MacroAssemblerMIPS64Compat::convertDoubleToFloat32(FloatRegister src,
+ FloatRegister dest) {
+ as_cvtsd(dest, src);
+}
+
+const int CauseBitPos = int(Assembler::CauseI);
+const int CauseBitCount = 1 + int(Assembler::CauseV) - int(Assembler::CauseI);
+const int CauseIOrVMask = ((1 << int(Assembler::CauseI)) |
+ (1 << int(Assembler::CauseV))) >>
+ int(Assembler::CauseI);
+
+// Checks whether a double is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void MacroAssemblerMIPS64Compat::convertDoubleToInt32(FloatRegister src,
+ Register dest,
+ Label* fail,
+ bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ moveFromDouble(src, dest);
+ ma_drol(dest, dest, Imm32(1));
+ ma_b(dest, Imm32(1), fail, Assembler::Equal);
+ }
+
+ // Truncate double to int ; if result is inexact or invalid fail.
+ as_truncwd(ScratchFloat32Reg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, CauseBitPos, CauseBitCount);
+ as_andi(ScratchRegister, ScratchRegister,
+ CauseIOrVMask); // masking for Inexact and Invalid flag.
+ ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
+}
+
+void MacroAssemblerMIPS64Compat::convertDoubleToPtr(FloatRegister src,
+ Register dest, Label* fail,
+ bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ moveFromDouble(src, dest);
+ ma_drol(dest, dest, Imm32(1));
+ ma_b(dest, Imm32(1), fail, Assembler::Equal);
+ }
+ as_truncld(ScratchDoubleReg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromDouble(ScratchDoubleReg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, CauseBitPos, CauseBitCount);
+ as_andi(ScratchRegister, ScratchRegister, CauseIOrVMask);
+ ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
+}
+
+// Checks whether a float32 is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void MacroAssemblerMIPS64Compat::convertFloat32ToInt32(FloatRegister src,
+ Register dest,
+ Label* fail,
+ bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ moveFromFloat32(src, dest);
+ ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
+ }
+
+ as_truncws(ScratchFloat32Reg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, CauseBitPos, CauseBitCount);
+ as_andi(ScratchRegister, ScratchRegister, CauseIOrVMask);
+ ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
+}
+
+void MacroAssemblerMIPS64Compat::convertFloat32ToDouble(FloatRegister src,
+ FloatRegister dest) {
+ as_cvtds(dest, src);
+}
+
+void MacroAssemblerMIPS64Compat::convertInt32ToFloat32(Register src,
+ FloatRegister dest) {
+ as_mtc1(src, dest);
+ as_cvtsw(dest, dest);
+}
+
+void MacroAssemblerMIPS64Compat::convertInt32ToFloat32(const Address& src,
+ FloatRegister dest) {
+ ma_ls(dest, src);
+ as_cvtsw(dest, dest);
+}
+
+void MacroAssembler::convertIntPtrToDouble(Register src, FloatRegister dest) {
+ convertInt64ToDouble(Register64(src), dest);
+}
+
+void MacroAssemblerMIPS64Compat::movq(Register rs, Register rd) {
+ ma_move(rd, rs);
+}
+
+void MacroAssemblerMIPS64::ma_li(Register dest, CodeLabel* label) {
+ BufferOffset bo = m_buffer.nextOffset();
+ ma_liPatchable(dest, ImmWord(/* placeholder */ 0));
+ label->patchAt()->bind(bo.getOffset());
+ label->setLinkMode(CodeLabel::MoveImmediate);
+}
+
+void MacroAssemblerMIPS64::ma_li(Register dest, ImmWord imm) {
+ int64_t value = imm.value;
+
+ if (-1 == (value >> 15) || 0 == (value >> 15)) {
+ as_addiu(dest, zero, value);
+ return;
+ }
+ if (0 == (value >> 16)) {
+ as_ori(dest, zero, value);
+ return;
+ }
+
+ if (-1 == (value >> 31) || 0 == (value >> 31)) {
+ as_lui(dest, uint16_t(value >> 16));
+ } else if (0 == (value >> 32)) {
+ as_lui(dest, uint16_t(value >> 16));
+ as_dinsu(dest, zero, 32, 32);
+ } else if (-1 == (value >> 47) || 0 == (value >> 47)) {
+ as_lui(dest, uint16_t(value >> 32));
+ if (uint16_t(value >> 16)) {
+ as_ori(dest, dest, uint16_t(value >> 16));
+ }
+ as_dsll(dest, dest, 16);
+ } else if (0 == (value >> 48)) {
+ as_lui(dest, uint16_t(value >> 32));
+ as_dinsu(dest, zero, 32, 32);
+ if (uint16_t(value >> 16)) {
+ as_ori(dest, dest, uint16_t(value >> 16));
+ }
+ as_dsll(dest, dest, 16);
+ } else {
+ as_lui(dest, uint16_t(value >> 48));
+ if (uint16_t(value >> 32)) {
+ as_ori(dest, dest, uint16_t(value >> 32));
+ }
+ if (uint16_t(value >> 16)) {
+ as_dsll(dest, dest, 16);
+ as_ori(dest, dest, uint16_t(value >> 16));
+ as_dsll(dest, dest, 16);
+ } else {
+ as_dsll32(dest, dest, 32);
+ }
+ }
+ if (uint16_t(value)) {
+ as_ori(dest, dest, uint16_t(value));
+ }
+}
+
+// This method generates lui, dsll and ori instruction block that can be
+// modified by UpdateLoad64Value, either during compilation (eg.
+// Assembler::bind), or during execution (eg. jit::PatchJump).
+void MacroAssemblerMIPS64::ma_liPatchable(Register dest, ImmPtr imm) {
+ return ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
+}
+
+void MacroAssemblerMIPS64::ma_liPatchable(Register dest, ImmWord imm,
+ LiFlags flags) {
+ if (Li64 == flags) {
+ m_buffer.ensureSpace(6 * sizeof(uint32_t));
+ as_lui(dest, Imm16::Upper(Imm32(imm.value >> 32)).encode());
+ as_ori(dest, dest, Imm16::Lower(Imm32(imm.value >> 32)).encode());
+ as_dsll(dest, dest, 16);
+ as_ori(dest, dest, Imm16::Upper(Imm32(imm.value)).encode());
+ as_dsll(dest, dest, 16);
+ as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode());
+ } else {
+ m_buffer.ensureSpace(4 * sizeof(uint32_t));
+ as_lui(dest, Imm16::Lower(Imm32(imm.value >> 32)).encode());
+ as_ori(dest, dest, Imm16::Upper(Imm32(imm.value)).encode());
+ as_drotr32(dest, dest, 48);
+ as_ori(dest, dest, Imm16::Lower(Imm32(imm.value)).encode());
+ }
+}
+
+void MacroAssemblerMIPS64::ma_dnegu(Register rd, Register rs) {
+ as_dsubu(rd, zero, rs);
+}
+
+// Shifts
+void MacroAssemblerMIPS64::ma_dsll(Register rd, Register rt, Imm32 shift) {
+ if (31 < shift.value) {
+ as_dsll32(rd, rt, shift.value);
+ } else {
+ as_dsll(rd, rt, shift.value);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_dsrl(Register rd, Register rt, Imm32 shift) {
+ if (31 < shift.value) {
+ as_dsrl32(rd, rt, shift.value);
+ } else {
+ as_dsrl(rd, rt, shift.value);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_dsra(Register rd, Register rt, Imm32 shift) {
+ if (31 < shift.value) {
+ as_dsra32(rd, rt, shift.value);
+ } else {
+ as_dsra(rd, rt, shift.value);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_dror(Register rd, Register rt, Imm32 shift) {
+ if (31 < shift.value) {
+ as_drotr32(rd, rt, shift.value);
+ } else {
+ as_drotr(rd, rt, shift.value);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_drol(Register rd, Register rt, Imm32 shift) {
+ uint32_t s = 64 - shift.value;
+
+ if (31 < s) {
+ as_drotr32(rd, rt, s);
+ } else {
+ as_drotr(rd, rt, s);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_dsll(Register rd, Register rt, Register shift) {
+ as_dsllv(rd, rt, shift);
+}
+
+void MacroAssemblerMIPS64::ma_dsrl(Register rd, Register rt, Register shift) {
+ as_dsrlv(rd, rt, shift);
+}
+
+void MacroAssemblerMIPS64::ma_dsra(Register rd, Register rt, Register shift) {
+ as_dsrav(rd, rt, shift);
+}
+
+void MacroAssemblerMIPS64::ma_dror(Register rd, Register rt, Register shift) {
+ as_drotrv(rd, rt, shift);
+}
+
+void MacroAssemblerMIPS64::ma_drol(Register rd, Register rt, Register shift) {
+ as_dsubu(ScratchRegister, zero, shift);
+ as_drotrv(rd, rt, ScratchRegister);
+}
+
+void MacroAssemblerMIPS64::ma_dins(Register rt, Register rs, Imm32 pos,
+ Imm32 size) {
+ if (pos.value >= 0 && pos.value < 32) {
+ if (pos.value + size.value > 32) {
+ as_dinsm(rt, rs, pos.value, size.value);
+ } else {
+ as_dins(rt, rs, pos.value, size.value);
+ }
+ } else {
+ as_dinsu(rt, rs, pos.value, size.value);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_dext(Register rt, Register rs, Imm32 pos,
+ Imm32 size) {
+ if (pos.value >= 0 && pos.value < 32) {
+ if (size.value > 32) {
+ as_dextm(rt, rs, pos.value, size.value);
+ } else {
+ as_dext(rt, rs, pos.value, size.value);
+ }
+ } else {
+ as_dextu(rt, rs, pos.value, size.value);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_dsbh(Register rd, Register rt) {
+ as_dsbh(rd, rt);
+}
+
+void MacroAssemblerMIPS64::ma_dshd(Register rd, Register rt) {
+ as_dshd(rd, rt);
+}
+
+void MacroAssemblerMIPS64::ma_dctz(Register rd, Register rs) {
+ ma_dnegu(ScratchRegister, rs);
+ as_and(rd, ScratchRegister, rs);
+ as_dclz(rd, rd);
+ ma_dnegu(SecondScratchReg, rd);
+ ma_daddu(SecondScratchReg, Imm32(0x3f));
+#ifdef MIPS64
+ as_selnez(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ as_seleqz(rd, rd, ScratchRegister);
+ as_or(rd, rd, SecondScratchReg);
+#else
+ as_movn(rd, SecondScratchReg, ScratchRegister);
+#endif
+}
+
+// Arithmetic-based ops.
+
+// Add.
+void MacroAssemblerMIPS64::ma_daddu(Register rd, Register rs, Imm32 imm) {
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_daddiu(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_daddu(rd, rs, ScratchRegister);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_daddu(Register rd, Register rs) {
+ as_daddu(rd, rd, rs);
+}
+
+void MacroAssemblerMIPS64::ma_daddu(Register rd, Imm32 imm) {
+ ma_daddu(rd, rd, imm);
+}
+
+void MacroAssemblerMIPS64::ma_add32TestOverflow(Register rd, Register rs,
+ Register rt, Label* overflow) {
+ as_daddu(SecondScratchReg, rs, rt);
+ as_addu(rd, rs, rt);
+ ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual);
+}
+
+void MacroAssemblerMIPS64::ma_add32TestOverflow(Register rd, Register rs,
+ Imm32 imm, Label* overflow) {
+ // Check for signed range because of as_daddiu
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_daddiu(SecondScratchReg, rs, imm.value);
+ as_addiu(rd, rs, imm.value);
+ ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual);
+ } else {
+ ma_li(ScratchRegister, imm);
+ ma_add32TestOverflow(rd, rs, ScratchRegister, overflow);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_addPtrTestOverflow(Register rd, Register rs,
+ Register rt, Label* overflow) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(rd != rt);
+ MOZ_ASSERT(rd != scratch2);
+
+ if (rs == rt) {
+ as_daddu(rd, rs, rs);
+ as_xor(scratch2, rs, rd);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rs != scratch2);
+ MOZ_ASSERT(rt != scratch2);
+
+ // If the sign of rs and rt are different, no overflow
+ as_xor(scratch2, rs, rt);
+ as_nor(scratch2, scratch2, zero);
+
+ as_daddu(rd, rs, rt);
+ as_xor(scratch, rd, rt);
+ as_and(scratch, scratch, scratch2);
+ }
+
+ ma_b(scratch2, zero, overflow, Assembler::LessThan);
+}
+
+void MacroAssemblerMIPS64::ma_addPtrTestOverflow(Register rd, Register rs,
+ Imm32 imm, Label* overflow) {
+ ma_li(ScratchRegister, imm);
+ ma_addPtrTestOverflow(rd, rs, ScratchRegister, overflow);
+}
+
+void MacroAssemblerMIPS64::ma_addPtrTestOverflow(Register rd, Register rs,
+ ImmWord imm, Label* overflow) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ ma_addPtrTestOverflow(rd, rs, scratch, overflow);
+}
+
+void MacroAssemblerMIPS64::ma_addPtrTestCarry(Condition cond, Register rd,
+ Register rs, Register rt,
+ Label* overflow) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ as_daddu(rd, rs, rt);
+ as_sltu(scratch2, rd, rt);
+ ma_b(scratch2, scratch2, overflow,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+}
+
+void MacroAssemblerMIPS64::ma_addPtrTestCarry(Condition cond, Register rd,
+ Register rs, Imm32 imm,
+ Label* overflow) {
+ // Check for signed range because of as_daddiu
+ if (Imm16::IsInSignedRange(imm.value)) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ as_daddiu(rd, rs, imm.value);
+ as_sltiu(scratch2, rd, imm.value);
+ ma_b(scratch2, scratch2, overflow,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+ } else {
+ ma_li(ScratchRegister, imm);
+ ma_addPtrTestCarry(cond, rd, rs, ScratchRegister, overflow);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_addPtrTestCarry(Condition cond, Register rd,
+ Register rs, ImmWord imm,
+ Label* overflow) {
+ // Check for signed range because of as_daddiu
+ if (Imm16::IsInSignedRange(imm.value)) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ as_daddiu(rd, rs, imm.value);
+ as_sltiu(scratch2, rd, imm.value);
+ ma_b(scratch2, scratch2, overflow,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ ma_addPtrTestCarry(cond, rd, rs, scratch, overflow);
+ }
+}
+
+// Subtract.
+void MacroAssemblerMIPS64::ma_dsubu(Register rd, Register rs, Imm32 imm) {
+ if (Imm16::IsInSignedRange(-imm.value)) {
+ as_daddiu(rd, rs, -imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_dsubu(rd, rs, ScratchRegister);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_dsubu(Register rd, Register rs) {
+ as_dsubu(rd, rd, rs);
+}
+
+void MacroAssemblerMIPS64::ma_dsubu(Register rd, Imm32 imm) {
+ ma_dsubu(rd, rd, imm);
+}
+
+void MacroAssemblerMIPS64::ma_sub32TestOverflow(Register rd, Register rs,
+ Register rt, Label* overflow) {
+ as_dsubu(SecondScratchReg, rs, rt);
+ as_subu(rd, rs, rt);
+ ma_b(rd, SecondScratchReg, overflow, Assembler::NotEqual);
+}
+
+void MacroAssemblerMIPS64::ma_subPtrTestOverflow(Register rd, Register rs,
+ Register rt, Label* overflow) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT_IF(rs == rd, rs != rt);
+ MOZ_ASSERT(rd != rt);
+ MOZ_ASSERT(rs != scratch2);
+ MOZ_ASSERT(rt != scratch2);
+ MOZ_ASSERT(rd != scratch2);
+
+ Register rs_copy = rs;
+
+ if (rs == rd) {
+ ma_move(scratch2, rs);
+ rs_copy = scratch2;
+ }
+
+ {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rd != scratch);
+
+ as_dsubu(rd, rs, rt);
+ // If the sign of rs and rt are the same, no overflow
+ as_xor(scratch, rs_copy, rt);
+ // Check if the sign of rd and rs are the same
+ as_xor(scratch2, rd, rs_copy);
+ as_and(scratch2, scratch2, scratch);
+ }
+
+ ma_b(scratch2, zero, overflow, Assembler::LessThan);
+}
+
+void MacroAssemblerMIPS64::ma_subPtrTestOverflow(Register rd, Register rs,
+ Imm32 imm, Label* overflow) {
+ ma_li(ScratchRegister, imm);
+ ma_subPtrTestOverflow(rd, rs, ScratchRegister, overflow);
+}
+
+void MacroAssemblerMIPS64::ma_dmult(Register rs, Imm32 imm) {
+ ma_li(ScratchRegister, imm);
+#ifdef MIPSR6
+ as_dmul(rs, ScratchRegister, SecondScratchReg);
+ as_dmuh(rs, ScratchRegister, rs);
+ ma_move(rs, SecondScratchReg);
+#else
+ as_dmult(rs, ScratchRegister);
+#endif
+}
+
+void MacroAssemblerMIPS64::ma_mulPtrTestOverflow(Register rd, Register rs,
+ Register rt, Label* overflow) {
+#ifdef MIPSR6
+ if (rd == rs) {
+ ma_move(SecondScratchReg, rs);
+ rs = SecondScratchReg;
+ }
+ as_dmul(rd, rs, rt);
+ as_dmuh(SecondScratchReg, rs, rt);
+#else
+ as_dmult(rs, rt);
+ as_mflo(rd);
+ as_mfhi(SecondScratchReg);
+#endif
+ as_dsra32(ScratchRegister, rd, 63);
+ ma_b(ScratchRegister, SecondScratchReg, overflow, Assembler::NotEqual);
+}
+
+// Memory.
+void MacroAssemblerMIPS64::ma_load(Register dest, Address address,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ int16_t encodedOffset;
+ Register base;
+
+ if (isLoongson() && ZeroExtend != extension &&
+ !Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ base = address.base;
+
+ switch (size) {
+ case SizeByte:
+ as_gslbx(dest, base, ScratchRegister, 0);
+ break;
+ case SizeHalfWord:
+ as_gslhx(dest, base, ScratchRegister, 0);
+ break;
+ case SizeWord:
+ as_gslwx(dest, base, ScratchRegister, 0);
+ break;
+ case SizeDouble:
+ as_gsldx(dest, base, ScratchRegister, 0);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+ return;
+ }
+
+ if (!Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_daddu(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = Imm16(0).encode();
+ } else {
+ encodedOffset = Imm16(address.offset).encode();
+ base = address.base;
+ }
+
+ switch (size) {
+ case SizeByte:
+ if (ZeroExtend == extension) {
+ as_lbu(dest, base, encodedOffset);
+ } else {
+ as_lb(dest, base, encodedOffset);
+ }
+ break;
+ case SizeHalfWord:
+ if (ZeroExtend == extension) {
+ as_lhu(dest, base, encodedOffset);
+ } else {
+ as_lh(dest, base, encodedOffset);
+ }
+ break;
+ case SizeWord:
+ if (ZeroExtend == extension) {
+ as_lwu(dest, base, encodedOffset);
+ } else {
+ as_lw(dest, base, encodedOffset);
+ }
+ break;
+ case SizeDouble:
+ as_ld(dest, base, encodedOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+}
+
+void MacroAssemblerMIPS64::ma_store(Register data, Address address,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ int16_t encodedOffset;
+ Register base;
+
+ if (isLoongson() && !Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ base = address.base;
+
+ switch (size) {
+ case SizeByte:
+ as_gssbx(data, base, ScratchRegister, 0);
+ break;
+ case SizeHalfWord:
+ as_gsshx(data, base, ScratchRegister, 0);
+ break;
+ case SizeWord:
+ as_gsswx(data, base, ScratchRegister, 0);
+ break;
+ case SizeDouble:
+ as_gssdx(data, base, ScratchRegister, 0);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+ return;
+ }
+
+ if (!Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_daddu(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = Imm16(0).encode();
+ } else {
+ encodedOffset = Imm16(address.offset).encode();
+ base = address.base;
+ }
+
+ switch (size) {
+ case SizeByte:
+ as_sb(data, base, encodedOffset);
+ break;
+ case SizeHalfWord:
+ as_sh(data, base, encodedOffset);
+ break;
+ case SizeWord:
+ as_sw(data, base, encodedOffset);
+ break;
+ case SizeDouble:
+ as_sd(data, base, encodedOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+}
+
+void MacroAssemblerMIPS64Compat::computeScaledAddress(const BaseIndex& address,
+ Register dest) {
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+ if (shift) {
+ ma_dsll(ScratchRegister, address.index, Imm32(shift));
+ as_daddu(dest, address.base, ScratchRegister);
+ } else {
+ as_daddu(dest, address.base, address.index);
+ }
+}
+
+void MacroAssemblerMIPS64Compat::computeEffectiveAddress(
+ const BaseIndex& address, Register dest) {
+ computeScaledAddress(address, dest);
+ if (address.offset) {
+ asMasm().addPtr(Imm32(address.offset), dest);
+ }
+}
+
+// Shortcut for when we know we're transferring 32 bits of data.
+void MacroAssemblerMIPS64::ma_pop(Register r) {
+ as_ld(r, StackPointer, 0);
+ as_daddiu(StackPointer, StackPointer, sizeof(intptr_t));
+}
+
+void MacroAssemblerMIPS64::ma_push(Register r) {
+ if (r == sp) {
+ // Pushing sp requires one more instruction.
+ ma_move(ScratchRegister, sp);
+ r = ScratchRegister;
+ }
+
+ as_daddiu(StackPointer, StackPointer, (int32_t) - sizeof(intptr_t));
+ as_sd(r, StackPointer, 0);
+}
+
+// Branches when done from within mips-specific code.
+void MacroAssemblerMIPS64::ma_b(Register lhs, ImmWord imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ if (imm.value <= INT32_MAX) {
+ ma_b(lhs, Imm32(uint32_t(imm.value)), label, c, jumpKind);
+ } else {
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_li(ScratchRegister, imm);
+ ma_b(lhs, ScratchRegister, label, c, jumpKind);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_b(Register lhs, Address addr, Label* label,
+ Condition c, JumpKind jumpKind) {
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_load(ScratchRegister, addr, SizeDouble);
+ ma_b(lhs, ScratchRegister, label, c, jumpKind);
+}
+
+void MacroAssemblerMIPS64::ma_b(Address addr, Imm32 imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ ma_load(SecondScratchReg, addr, SizeDouble);
+ ma_b(SecondScratchReg, imm, label, c, jumpKind);
+}
+
+void MacroAssemblerMIPS64::ma_b(Address addr, ImmGCPtr imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ ma_load(SecondScratchReg, addr, SizeDouble);
+ ma_b(SecondScratchReg, imm, label, c, jumpKind);
+}
+
+void MacroAssemblerMIPS64::ma_bal(Label* label, DelaySlotFill delaySlotFill) {
+ spew("branch .Llabel %p\n", label);
+ if (label->bound()) {
+ // Generate the long jump for calls because return address has to be
+ // the address after the reserved block.
+ addLongJump(nextOffset(), BufferOffset(label->offset()));
+ ma_liPatchable(ScratchRegister, ImmWord(LabelBase::INVALID_OFFSET));
+ as_jalr(ScratchRegister);
+ if (delaySlotFill == FillDelaySlot) {
+ as_nop();
+ }
+ return;
+ }
+
+ // Second word holds a pointer to the next branch in label's chain.
+ uint32_t nextInChain =
+ label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ // Make the whole branch continous in the buffer. The '6'
+ // instructions are writing at below (contain delay slot).
+ m_buffer.ensureSpace(6 * sizeof(uint32_t));
+
+ spew("bal .Llabel %p\n", label);
+ BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
+ writeInst(nextInChain);
+ if (!oom()) {
+ label->use(bo.getOffset());
+ }
+ // Leave space for long jump.
+ as_nop();
+ as_nop();
+ as_nop();
+ if (delaySlotFill == FillDelaySlot) {
+ as_nop();
+ }
+}
+
+void MacroAssemblerMIPS64::branchWithCode(InstImm code, Label* label,
+ JumpKind jumpKind) {
+ // simply output the pointer of one label as its id,
+ // notice that after one label destructor, the pointer will be reused.
+ spew("branch .Llabel %p", label);
+ MOZ_ASSERT(code.encode() !=
+ InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+ if (label->bound()) {
+ int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
+
+ if (BOffImm16::IsInRange(offset)) {
+ jumpKind = ShortJump;
+ }
+
+ if (jumpKind == ShortJump) {
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ code.setBOffImm16(BOffImm16(offset));
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code);
+#endif
+ writeInst(code.encode());
+ as_nop();
+ return;
+ }
+
+ if (code.encode() == inst_beq.encode()) {
+ // Handle long jump
+ addLongJump(nextOffset(), BufferOffset(label->offset()));
+ ma_liPatchable(ScratchRegister, ImmWord(LabelBase::INVALID_OFFSET));
+ as_jr(ScratchRegister);
+ as_nop();
+ return;
+ }
+
+ // Handle long conditional branch, the target offset is based on self,
+ // point to next instruction of nop at below.
+ spew("invert branch .Llabel %p", label);
+ InstImm code_r = invertBranch(code, BOffImm16(7 * sizeof(uint32_t)));
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code_r);
+#endif
+ writeInst(code_r.encode());
+ // No need for a "nop" here because we can clobber scratch.
+ addLongJump(nextOffset(), BufferOffset(label->offset()));
+ ma_liPatchable(ScratchRegister, ImmWord(LabelBase::INVALID_OFFSET));
+ as_jr(ScratchRegister);
+ as_nop();
+ return;
+ }
+
+ // Generate open jump and link it to a label.
+
+ // Second word holds a pointer to the next branch in label's chain.
+ uint32_t nextInChain =
+ label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ if (jumpKind == ShortJump) {
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
+
+ // Indicate that this is short jump with offset 4.
+ code.setBOffImm16(BOffImm16(4));
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code);
+#endif
+ BufferOffset bo = writeInst(code.encode());
+ writeInst(nextInChain);
+ if (!oom()) {
+ label->use(bo.getOffset());
+ }
+ return;
+ }
+
+ bool conditional = code.encode() != inst_beq.encode();
+
+ // Make the whole branch continous in the buffer. The '7'
+ // instructions are writing at below (contain conditional nop).
+ m_buffer.ensureSpace(7 * sizeof(uint32_t));
+
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code);
+#endif
+ BufferOffset bo = writeInst(code.encode());
+ writeInst(nextInChain);
+ if (!oom()) {
+ label->use(bo.getOffset());
+ }
+ // Leave space for potential long jump.
+ as_nop();
+ as_nop();
+ as_nop();
+ as_nop();
+ if (conditional) {
+ as_nop();
+ }
+}
+
+void MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmWord imm,
+ Condition c) {
+ if (imm.value <= INT32_MAX) {
+ ma_cmp_set(rd, rs, Imm32(uint32_t(imm.value)), c);
+ } else {
+ ma_li(ScratchRegister, imm);
+ ma_cmp_set(rd, rs, ScratchRegister, c);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_cmp_set(Register rd, Address address, ImmWord imm,
+ Condition c) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_load(scratch2, address, SizeDouble);
+ ma_cmp_set(rd, scratch2, imm, c);
+}
+
+void MacroAssemblerMIPS64::ma_cmp_set(Register rd, Register rs, ImmPtr imm,
+ Condition c) {
+ ma_cmp_set(rd, rs, ImmWord(uintptr_t(imm.value)), c);
+}
+
+void MacroAssemblerMIPS64::ma_cmp_set(Register rd, Address address, Imm32 imm,
+ Condition c) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_load(scratch2, address, SizeWord, SignExtend);
+ ma_cmp_set(rd, scratch2, imm, c);
+}
+
+// fp instructions
+void MacroAssemblerMIPS64::ma_lid(FloatRegister dest, double value) {
+ ImmWord imm(mozilla::BitwiseCast<uint64_t>(value));
+
+ if (imm.value != 0) {
+ ma_li(ScratchRegister, imm);
+ moveToDouble(ScratchRegister, dest);
+ } else {
+ moveToDouble(zero, dest);
+ }
+}
+
+void MacroAssemblerMIPS64::ma_mv(FloatRegister src, ValueOperand dest) {
+ as_dmfc1(dest.valueReg(), src);
+}
+
+void MacroAssemblerMIPS64::ma_mv(ValueOperand src, FloatRegister dest) {
+ as_dmtc1(src.valueReg(), dest);
+}
+
+void MacroAssemblerMIPS64::ma_ls(FloatRegister ft, Address address) {
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_lwc1(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gslsx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_daddu(ScratchRegister, address.base, ScratchRegister);
+ as_lwc1(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void MacroAssemblerMIPS64::ma_ld(FloatRegister ft, Address address) {
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_ldc1(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gsldx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_daddu(ScratchRegister, address.base, ScratchRegister);
+ as_ldc1(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void MacroAssemblerMIPS64::ma_sd(FloatRegister ft, Address address) {
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_sdc1(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gssdx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_daddu(ScratchRegister, address.base, ScratchRegister);
+ as_sdc1(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void MacroAssemblerMIPS64::ma_ss(FloatRegister ft, Address address) {
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_swc1(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gsssx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_daddu(ScratchRegister, address.base, ScratchRegister);
+ as_swc1(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void MacroAssemblerMIPS64::ma_pop(FloatRegister f) {
+ as_ldc1(f, StackPointer, 0);
+ as_daddiu(StackPointer, StackPointer, sizeof(double));
+}
+
+void MacroAssemblerMIPS64::ma_push(FloatRegister f) {
+ as_daddiu(StackPointer, StackPointer, (int32_t) - sizeof(double));
+ as_sdc1(f, StackPointer, 0);
+}
+
+bool MacroAssemblerMIPS64Compat::buildOOLFakeExitFrame(void* fakeReturnAddr) {
+ asMasm().PushFrameDescriptor(FrameType::IonJS); // descriptor_
+ asMasm().Push(ImmPtr(fakeReturnAddr));
+ asMasm().Push(FramePointer);
+ return true;
+}
+
+void MacroAssemblerMIPS64Compat::move32(Imm32 imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerMIPS64Compat::move32(Register src, Register dest) {
+ ma_move(dest, src);
+}
+
+void MacroAssemblerMIPS64Compat::movePtr(Register src, Register dest) {
+ ma_move(dest, src);
+}
+void MacroAssemblerMIPS64Compat::movePtr(ImmWord imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerMIPS64Compat::movePtr(ImmGCPtr imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerMIPS64Compat::movePtr(ImmPtr imm, Register dest) {
+ movePtr(ImmWord(uintptr_t(imm.value)), dest);
+}
+void MacroAssemblerMIPS64Compat::movePtr(wasm::SymbolicAddress imm,
+ Register dest) {
+ append(wasm::SymbolicAccess(CodeOffset(nextOffset().getOffset()), imm));
+ ma_liPatchable(dest, ImmWord(-1));
+}
+
+void MacroAssemblerMIPS64Compat::load8ZeroExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeByte, ZeroExtend);
+}
+
+void MacroAssemblerMIPS64Compat::load8ZeroExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeByte, ZeroExtend);
+}
+
+void MacroAssemblerMIPS64Compat::load8SignExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeByte, SignExtend);
+}
+
+void MacroAssemblerMIPS64Compat::load8SignExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeByte, SignExtend);
+}
+
+void MacroAssemblerMIPS64Compat::load16ZeroExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeHalfWord, ZeroExtend);
+}
+
+void MacroAssemblerMIPS64Compat::load16ZeroExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeHalfWord, ZeroExtend);
+}
+
+void MacroAssemblerMIPS64Compat::load16SignExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeHalfWord, SignExtend);
+}
+
+void MacroAssemblerMIPS64Compat::load16SignExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeHalfWord, SignExtend);
+}
+
+void MacroAssemblerMIPS64Compat::load32(const Address& address, Register dest) {
+ ma_load(dest, address, SizeWord);
+}
+
+void MacroAssemblerMIPS64Compat::load32(const BaseIndex& address,
+ Register dest) {
+ ma_load(dest, address, SizeWord);
+}
+
+void MacroAssemblerMIPS64Compat::load32(AbsoluteAddress address,
+ Register dest) {
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ load32(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerMIPS64Compat::load32(wasm::SymbolicAddress address,
+ Register dest) {
+ movePtr(address, ScratchRegister);
+ load32(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerMIPS64Compat::loadPtr(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeDouble);
+}
+
+void MacroAssemblerMIPS64Compat::loadPtr(const BaseIndex& src, Register dest) {
+ ma_load(dest, src, SizeDouble);
+}
+
+void MacroAssemblerMIPS64Compat::loadPtr(AbsoluteAddress address,
+ Register dest) {
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ loadPtr(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerMIPS64Compat::loadPtr(wasm::SymbolicAddress address,
+ Register dest) {
+ movePtr(address, ScratchRegister);
+ loadPtr(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerMIPS64Compat::loadPrivate(const Address& address,
+ Register dest) {
+ loadPtr(address, dest);
+}
+
+void MacroAssemblerMIPS64Compat::loadUnalignedDouble(
+ const wasm::MemoryAccessDesc& access, const BaseIndex& src, Register temp,
+ FloatRegister dest) {
+ computeScaledAddress(src, SecondScratchReg);
+ BufferOffset load;
+ if (Imm16::IsInSignedRange(src.offset) &&
+ Imm16::IsInSignedRange(src.offset + 7)) {
+ load = as_ldl(temp, SecondScratchReg, src.offset + 7);
+ as_ldr(temp, SecondScratchReg, src.offset);
+ } else {
+ ma_li(ScratchRegister, Imm32(src.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ load = as_ldl(temp, ScratchRegister, 7);
+ as_ldr(temp, ScratchRegister, 0);
+ }
+ append(access, load.getOffset());
+ moveToDouble(temp, dest);
+}
+
+void MacroAssemblerMIPS64Compat::loadUnalignedFloat32(
+ const wasm::MemoryAccessDesc& access, const BaseIndex& src, Register temp,
+ FloatRegister dest) {
+ computeScaledAddress(src, SecondScratchReg);
+ BufferOffset load;
+ if (Imm16::IsInSignedRange(src.offset) &&
+ Imm16::IsInSignedRange(src.offset + 3)) {
+ load = as_lwl(temp, SecondScratchReg, src.offset + 3);
+ as_lwr(temp, SecondScratchReg, src.offset);
+ } else {
+ ma_li(ScratchRegister, Imm32(src.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ load = as_lwl(temp, ScratchRegister, 3);
+ as_lwr(temp, ScratchRegister, 0);
+ }
+ append(access, load.getOffset());
+ moveToFloat32(temp, dest);
+}
+
+void MacroAssemblerMIPS64Compat::store8(Imm32 imm, const Address& address) {
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeByte);
+}
+
+void MacroAssemblerMIPS64Compat::store8(Register src, const Address& address) {
+ ma_store(src, address, SizeByte);
+}
+
+void MacroAssemblerMIPS64Compat::store8(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeByte);
+}
+
+void MacroAssemblerMIPS64Compat::store8(Register src, const BaseIndex& dest) {
+ ma_store(src, dest, SizeByte);
+}
+
+void MacroAssemblerMIPS64Compat::store16(Imm32 imm, const Address& address) {
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeHalfWord);
+}
+
+void MacroAssemblerMIPS64Compat::store16(Register src, const Address& address) {
+ ma_store(src, address, SizeHalfWord);
+}
+
+void MacroAssemblerMIPS64Compat::store16(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeHalfWord);
+}
+
+void MacroAssemblerMIPS64Compat::store16(Register src,
+ const BaseIndex& address) {
+ ma_store(src, address, SizeHalfWord);
+}
+
+void MacroAssemblerMIPS64Compat::store32(Register src,
+ AbsoluteAddress address) {
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ store32(src, Address(ScratchRegister, 0));
+}
+
+void MacroAssemblerMIPS64Compat::store32(Register src, const Address& address) {
+ ma_store(src, address, SizeWord);
+}
+
+void MacroAssemblerMIPS64Compat::store32(Imm32 src, const Address& address) {
+ move32(src, SecondScratchReg);
+ ma_store(SecondScratchReg, address, SizeWord);
+}
+
+void MacroAssemblerMIPS64Compat::store32(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeWord);
+}
+
+void MacroAssemblerMIPS64Compat::store32(Register src, const BaseIndex& dest) {
+ ma_store(src, dest, SizeWord);
+}
+
+template <typename T>
+void MacroAssemblerMIPS64Compat::storePtr(ImmWord imm, T address) {
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeDouble);
+}
+
+template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmWord imm,
+ Address address);
+template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(
+ ImmWord imm, BaseIndex address);
+
+template <typename T>
+void MacroAssemblerMIPS64Compat::storePtr(ImmPtr imm, T address) {
+ storePtr(ImmWord(uintptr_t(imm.value)), address);
+}
+
+template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmPtr imm,
+ Address address);
+template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(
+ ImmPtr imm, BaseIndex address);
+
+template <typename T>
+void MacroAssemblerMIPS64Compat::storePtr(ImmGCPtr imm, T address) {
+ movePtr(imm, SecondScratchReg);
+ storePtr(SecondScratchReg, address);
+}
+
+template void MacroAssemblerMIPS64Compat::storePtr<Address>(ImmGCPtr imm,
+ Address address);
+template void MacroAssemblerMIPS64Compat::storePtr<BaseIndex>(
+ ImmGCPtr imm, BaseIndex address);
+
+void MacroAssemblerMIPS64Compat::storePtr(Register src,
+ const Address& address) {
+ ma_store(src, address, SizeDouble);
+}
+
+void MacroAssemblerMIPS64Compat::storePtr(Register src,
+ const BaseIndex& address) {
+ ma_store(src, address, SizeDouble);
+}
+
+void MacroAssemblerMIPS64Compat::storePtr(Register src, AbsoluteAddress dest) {
+ movePtr(ImmPtr(dest.addr), ScratchRegister);
+ storePtr(src, Address(ScratchRegister, 0));
+}
+
+void MacroAssemblerMIPS64Compat::storeUnalignedFloat32(
+ const wasm::MemoryAccessDesc& access, FloatRegister src, Register temp,
+ const BaseIndex& dest) {
+ computeScaledAddress(dest, SecondScratchReg);
+ moveFromFloat32(src, temp);
+ BufferOffset store;
+ if (Imm16::IsInSignedRange(dest.offset) &&
+ Imm16::IsInSignedRange(dest.offset + 3)) {
+ store = as_swl(temp, SecondScratchReg, dest.offset + 3);
+ as_swr(temp, SecondScratchReg, dest.offset);
+ } else {
+ ma_li(ScratchRegister, Imm32(dest.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ store = as_swl(temp, ScratchRegister, 3);
+ as_swr(temp, ScratchRegister, 0);
+ }
+ append(access, store.getOffset());
+}
+
+void MacroAssemblerMIPS64Compat::storeUnalignedDouble(
+ const wasm::MemoryAccessDesc& access, FloatRegister src, Register temp,
+ const BaseIndex& dest) {
+ computeScaledAddress(dest, SecondScratchReg);
+ moveFromDouble(src, temp);
+
+ BufferOffset store;
+ if (Imm16::IsInSignedRange(dest.offset) &&
+ Imm16::IsInSignedRange(dest.offset + 7)) {
+ store = as_sdl(temp, SecondScratchReg, dest.offset + 7);
+ as_sdr(temp, SecondScratchReg, dest.offset);
+ } else {
+ ma_li(ScratchRegister, Imm32(dest.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ store = as_sdl(temp, ScratchRegister, 7);
+ as_sdr(temp, ScratchRegister, 0);
+ }
+ append(access, store.getOffset());
+}
+
+void MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) {
+ as_roundwd(ScratchDoubleReg, input);
+ ma_li(ScratchRegister, Imm32(255));
+ as_mfc1(output, ScratchDoubleReg);
+#ifdef MIPSR6
+ as_slti(SecondScratchReg, output, 0);
+ as_seleqz(output, output, SecondScratchReg);
+ as_sltiu(SecondScratchReg, output, 255);
+ as_selnez(output, output, SecondScratchReg);
+ as_seleqz(ScratchRegister, ScratchRegister, SecondScratchReg);
+ as_or(output, output, ScratchRegister);
+#else
+ zeroDouble(ScratchDoubleReg);
+ as_sltiu(SecondScratchReg, output, 255);
+ as_colt(DoubleFloat, ScratchDoubleReg, input);
+ // if res > 255; res = 255;
+ as_movz(output, ScratchRegister, SecondScratchReg);
+ // if !(input > 0); res = 0;
+ as_movf(output, zero);
+#endif
+}
+
+void MacroAssemblerMIPS64Compat::testNullSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ splitTag(value, SecondScratchReg);
+ ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_NULL), cond);
+}
+
+void MacroAssemblerMIPS64Compat::testObjectSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ splitTag(value, SecondScratchReg);
+ ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), cond);
+}
+
+void MacroAssemblerMIPS64Compat::testUndefinedSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ splitTag(value, SecondScratchReg);
+ ma_cmp_set(dest, SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), cond);
+}
+
+void MacroAssemblerMIPS64Compat::unboxInt32(const ValueOperand& operand,
+ Register dest) {
+ ma_sll(dest, operand.valueReg(), Imm32(0));
+}
+
+void MacroAssemblerMIPS64Compat::unboxInt32(Register src, Register dest) {
+ ma_sll(dest, src, Imm32(0));
+}
+
+void MacroAssemblerMIPS64Compat::unboxInt32(const Address& src, Register dest) {
+ load32(Address(src.base, src.offset), dest);
+}
+
+void MacroAssemblerMIPS64Compat::unboxInt32(const BaseIndex& src,
+ Register dest) {
+ computeScaledAddress(src, SecondScratchReg);
+ load32(Address(SecondScratchReg, src.offset), dest);
+}
+
+void MacroAssemblerMIPS64Compat::unboxBoolean(const ValueOperand& operand,
+ Register dest) {
+ ma_dext(dest, operand.valueReg(), Imm32(0), Imm32(32));
+}
+
+void MacroAssemblerMIPS64Compat::unboxBoolean(Register src, Register dest) {
+ ma_dext(dest, src, Imm32(0), Imm32(32));
+}
+
+void MacroAssemblerMIPS64Compat::unboxBoolean(const Address& src,
+ Register dest) {
+ ma_load(dest, Address(src.base, src.offset), SizeWord, ZeroExtend);
+}
+
+void MacroAssemblerMIPS64Compat::unboxBoolean(const BaseIndex& src,
+ Register dest) {
+ computeScaledAddress(src, SecondScratchReg);
+ ma_load(dest, Address(SecondScratchReg, src.offset), SizeWord, ZeroExtend);
+}
+
+void MacroAssemblerMIPS64Compat::unboxDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ as_dmtc1(operand.valueReg(), dest);
+}
+
+void MacroAssemblerMIPS64Compat::unboxDouble(const Address& src,
+ FloatRegister dest) {
+ ma_ld(dest, Address(src.base, src.offset));
+}
+void MacroAssemblerMIPS64Compat::unboxDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ SecondScratchRegisterScope scratch(asMasm());
+ loadPtr(src, scratch);
+ unboxDouble(ValueOperand(scratch), dest);
+}
+
+void MacroAssemblerMIPS64Compat::unboxString(const ValueOperand& operand,
+ Register dest) {
+ unboxNonDouble(operand, dest, JSVAL_TYPE_STRING);
+}
+
+void MacroAssemblerMIPS64Compat::unboxString(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+}
+
+void MacroAssemblerMIPS64Compat::unboxString(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+}
+
+void MacroAssemblerMIPS64Compat::unboxSymbol(const ValueOperand& operand,
+ Register dest) {
+ unboxNonDouble(operand, dest, JSVAL_TYPE_SYMBOL);
+}
+
+void MacroAssemblerMIPS64Compat::unboxSymbol(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
+}
+
+void MacroAssemblerMIPS64Compat::unboxSymbol(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
+}
+
+void MacroAssemblerMIPS64Compat::unboxBigInt(const ValueOperand& operand,
+ Register dest) {
+ unboxNonDouble(operand, dest, JSVAL_TYPE_BIGINT);
+}
+
+void MacroAssemblerMIPS64Compat::unboxBigInt(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+}
+
+void MacroAssemblerMIPS64Compat::unboxBigInt(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+}
+
+void MacroAssemblerMIPS64Compat::unboxObject(const ValueOperand& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+}
+
+void MacroAssemblerMIPS64Compat::unboxObject(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+}
+
+void MacroAssemblerMIPS64Compat::unboxObject(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+}
+
+void MacroAssemblerMIPS64Compat::unboxValue(const ValueOperand& src,
+ AnyRegister dest,
+ JSValueType type) {
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.valueReg(), dest.fpu());
+ ma_b(&end, ShortJump);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else {
+ unboxNonDouble(src, dest.gpr(), type);
+ }
+}
+
+void MacroAssemblerMIPS64Compat::boxDouble(FloatRegister src,
+ const ValueOperand& dest,
+ FloatRegister) {
+ as_dmfc1(dest.valueReg(), src);
+}
+
+void MacroAssemblerMIPS64Compat::boxNonDouble(JSValueType type, Register src,
+ const ValueOperand& dest) {
+ MOZ_ASSERT(src != dest.valueReg());
+ boxValue(type, src, dest.valueReg());
+}
+
+void MacroAssemblerMIPS64Compat::boolValueToDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ convertBoolToInt32(operand.valueReg(), ScratchRegister);
+ convertInt32ToDouble(ScratchRegister, dest);
+}
+
+void MacroAssemblerMIPS64Compat::int32ValueToDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ convertInt32ToDouble(operand.valueReg(), dest);
+}
+
+void MacroAssemblerMIPS64Compat::boolValueToFloat32(const ValueOperand& operand,
+ FloatRegister dest) {
+ convertBoolToInt32(operand.valueReg(), ScratchRegister);
+ convertInt32ToFloat32(ScratchRegister, dest);
+}
+
+void MacroAssemblerMIPS64Compat::int32ValueToFloat32(
+ const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.valueReg(), dest);
+}
+
+void MacroAssemblerMIPS64Compat::loadConstantFloat32(float f,
+ FloatRegister dest) {
+ ma_lis(dest, f);
+}
+
+void MacroAssemblerMIPS64Compat::loadInt32OrDouble(const Address& src,
+ FloatRegister dest) {
+ Label notInt32, end;
+ // If it's an int, convert it to double.
+ loadPtr(Address(src.base, src.offset), ScratchRegister);
+ ma_dsrl(SecondScratchReg, ScratchRegister, Imm32(JSVAL_TAG_SHIFT));
+ asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+ loadPtr(Address(src.base, src.offset), SecondScratchReg);
+ convertInt32ToDouble(SecondScratchReg, dest);
+ ma_b(&end, ShortJump);
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ unboxDouble(src, dest);
+ bind(&end);
+}
+
+void MacroAssemblerMIPS64Compat::loadInt32OrDouble(const BaseIndex& addr,
+ FloatRegister dest) {
+ Label notInt32, end;
+
+ // If it's an int, convert it to double.
+ computeScaledAddress(addr, SecondScratchReg);
+ // Since we only have one scratch, we need to stomp over it with the tag.
+ loadPtr(Address(SecondScratchReg, 0), ScratchRegister);
+ ma_dsrl(SecondScratchReg, ScratchRegister, Imm32(JSVAL_TAG_SHIFT));
+ asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+
+ computeScaledAddress(addr, SecondScratchReg);
+ loadPtr(Address(SecondScratchReg, 0), SecondScratchReg);
+ convertInt32ToDouble(SecondScratchReg, dest);
+ ma_b(&end, ShortJump);
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ // First, recompute the offset that had been stored in the scratch register
+ // since the scratch register was overwritten loading in the type.
+ computeScaledAddress(addr, SecondScratchReg);
+ unboxDouble(Address(SecondScratchReg, 0), dest);
+ bind(&end);
+}
+
+void MacroAssemblerMIPS64Compat::loadConstantDouble(double dp,
+ FloatRegister dest) {
+ ma_lid(dest, dp);
+}
+
+Register MacroAssemblerMIPS64Compat::extractObject(const Address& address,
+ Register scratch) {
+ loadPtr(Address(address.base, address.offset), scratch);
+ ma_dext(scratch, scratch, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+ return scratch;
+}
+
+Register MacroAssemblerMIPS64Compat::extractTag(const Address& address,
+ Register scratch) {
+ loadPtr(Address(address.base, address.offset), scratch);
+ ma_dext(scratch, scratch, Imm32(JSVAL_TAG_SHIFT),
+ Imm32(64 - JSVAL_TAG_SHIFT));
+ return scratch;
+}
+
+Register MacroAssemblerMIPS64Compat::extractTag(const BaseIndex& address,
+ Register scratch) {
+ computeScaledAddress(address, scratch);
+ return extractTag(Address(scratch, address.offset), scratch);
+}
+
+/////////////////////////////////////////////////////////////////
+// X86/X64-common/ARM/MIPS interface.
+/////////////////////////////////////////////////////////////////
+void MacroAssemblerMIPS64Compat::storeValue(ValueOperand val, Operand dst) {
+ storeValue(val, Address(Register::FromCode(dst.base()), dst.disp()));
+}
+
+void MacroAssemblerMIPS64Compat::storeValue(ValueOperand val,
+ const BaseIndex& dest) {
+ computeScaledAddress(dest, SecondScratchReg);
+ storeValue(val, Address(SecondScratchReg, dest.offset));
+}
+
+void MacroAssemblerMIPS64Compat::storeValue(JSValueType type, Register reg,
+ BaseIndex dest) {
+ computeScaledAddress(dest, ScratchRegister);
+
+ int32_t offset = dest.offset;
+ if (!Imm16::IsInSignedRange(offset)) {
+ ma_li(SecondScratchReg, Imm32(offset));
+ as_daddu(ScratchRegister, ScratchRegister, SecondScratchReg);
+ offset = 0;
+ }
+
+ storeValue(type, reg, Address(ScratchRegister, offset));
+}
+
+void MacroAssemblerMIPS64Compat::storeValue(ValueOperand val,
+ const Address& dest) {
+ storePtr(val.valueReg(), Address(dest.base, dest.offset));
+}
+
+void MacroAssemblerMIPS64Compat::storeValue(JSValueType type, Register reg,
+ Address dest) {
+ MOZ_ASSERT(dest.base != SecondScratchReg);
+
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ store32(reg, dest);
+ JSValueShiftedTag tag = (JSValueShiftedTag)JSVAL_TYPE_TO_SHIFTED_TAG(type);
+ store32(((Imm64(tag)).secondHalf()), Address(dest.base, dest.offset + 4));
+ } else {
+ ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ ma_dsll(SecondScratchReg, SecondScratchReg, Imm32(JSVAL_TAG_SHIFT));
+ ma_dins(SecondScratchReg, reg, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+ storePtr(SecondScratchReg, Address(dest.base, dest.offset));
+ }
+}
+
+void MacroAssemblerMIPS64Compat::storeValue(const Value& val, Address dest) {
+ if (val.isGCThing()) {
+ writeDataRelocation(val);
+ movWithPatch(ImmWord(val.asRawBits()), SecondScratchReg);
+ } else {
+ ma_li(SecondScratchReg, ImmWord(val.asRawBits()));
+ }
+ storePtr(SecondScratchReg, Address(dest.base, dest.offset));
+}
+
+void MacroAssemblerMIPS64Compat::storeValue(const Value& val, BaseIndex dest) {
+ computeScaledAddress(dest, ScratchRegister);
+
+ int32_t offset = dest.offset;
+ if (!Imm16::IsInSignedRange(offset)) {
+ ma_li(SecondScratchReg, Imm32(offset));
+ as_daddu(ScratchRegister, ScratchRegister, SecondScratchReg);
+ offset = 0;
+ }
+ storeValue(val, Address(ScratchRegister, offset));
+}
+
+void MacroAssemblerMIPS64Compat::loadValue(const BaseIndex& addr,
+ ValueOperand val) {
+ computeScaledAddress(addr, SecondScratchReg);
+ loadValue(Address(SecondScratchReg, addr.offset), val);
+}
+
+void MacroAssemblerMIPS64Compat::loadValue(Address src, ValueOperand val) {
+ loadPtr(Address(src.base, src.offset), val.valueReg());
+}
+
+void MacroAssemblerMIPS64Compat::tagValue(JSValueType type, Register payload,
+ ValueOperand dest) {
+ MOZ_ASSERT(dest.valueReg() != ScratchRegister);
+ if (payload != dest.valueReg()) {
+ ma_move(dest.valueReg(), payload);
+ }
+ ma_li(ScratchRegister, ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ ma_dins(dest.valueReg(), ScratchRegister, Imm32(JSVAL_TAG_SHIFT),
+ Imm32(64 - JSVAL_TAG_SHIFT));
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ ma_dins(dest.valueReg(), zero, Imm32(32), Imm32(JSVAL_TAG_SHIFT - 32));
+ }
+}
+
+void MacroAssemblerMIPS64Compat::pushValue(ValueOperand val) {
+ // Allocate stack slots for Value. One for each.
+ asMasm().subPtr(Imm32(sizeof(Value)), StackPointer);
+ // Store Value
+ storeValue(val, Address(StackPointer, 0));
+}
+
+void MacroAssemblerMIPS64Compat::pushValue(const Address& addr) {
+ // Load value before allocate stack, addr.base may be is sp.
+ loadPtr(Address(addr.base, addr.offset), ScratchRegister);
+ ma_dsubu(StackPointer, StackPointer, Imm32(sizeof(Value)));
+ storePtr(ScratchRegister, Address(StackPointer, 0));
+}
+
+void MacroAssemblerMIPS64Compat::popValue(ValueOperand val) {
+ as_ld(val.valueReg(), StackPointer, 0);
+ as_daddiu(StackPointer, StackPointer, sizeof(Value));
+}
+
+void MacroAssemblerMIPS64Compat::breakpoint() { as_break(0); }
+
+void MacroAssemblerMIPS64Compat::ensureDouble(const ValueOperand& source,
+ FloatRegister dest,
+ Label* failure) {
+ Label isDouble, done;
+ {
+ ScratchTagScope tag(asMasm(), source);
+ splitTagForTest(source, tag);
+ asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
+ }
+
+ unboxInt32(source, ScratchRegister);
+ convertInt32ToDouble(ScratchRegister, dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+void MacroAssemblerMIPS64Compat::checkStackAlignment() {
+#ifdef DEBUG
+ Label aligned;
+ as_andi(ScratchRegister, sp, ABIStackAlignment - 1);
+ ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump);
+ as_break(BREAK_STACK_UNALIGNED);
+ bind(&aligned);
+#endif
+}
+
+void MacroAssemblerMIPS64Compat::handleFailureWithHandlerTail(
+ Label* profilerExitTail, Label* bailoutTail) {
+ // Reserve space for exception information.
+ int size = (sizeof(ResumeFromException) + ABIStackAlignment) &
+ ~(ABIStackAlignment - 1);
+ asMasm().subPtr(Imm32(size), StackPointer);
+ ma_move(a0, StackPointer); // Use a0 since it is a first function argument
+
+ // Call the handler.
+ using Fn = void (*)(ResumeFromException * rfe);
+ asMasm().setupUnalignedABICall(a1);
+ asMasm().passABIArg(a0);
+ asMasm().callWithABI<Fn, HandleException>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label returnBaseline;
+ Label returnIon;
+ Label bailout;
+ Label wasm;
+ Label wasmCatch;
+
+ // Already clobbered a0, so use it...
+ load32(Address(StackPointer, ResumeFromException::offsetOfKind()), a0);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ExceptionResumeKind::EntryFrame), &entryFrame);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Catch),
+ &catch_);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Finally),
+ &finally);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ExceptionResumeKind::ForcedReturnBaseline),
+ &returnBaseline);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ExceptionResumeKind::ForcedReturnIon), &returnIon);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Bailout),
+ &bailout);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Wasm),
+ &wasm);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::WasmCatch),
+ &wasmCatch);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, restore state and return from
+ // the entry frame.
+ bind(&entryFrame);
+ asMasm().moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+
+ // We're going to be returning by the ion calling convention
+ ma_pop(ra);
+ as_jr(ra);
+ as_nop();
+
+ // If we found a catch handler, this must be a baseline frame. Restore
+ // state and jump to the catch block.
+ bind(&catch_);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfTarget()), a0);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ jump(a0);
+
+ // If we found a finally block, this must be a baseline frame. Push two
+ // values expected by the finally block: the exception and BooleanValue(true).
+ bind(&finally);
+ ValueOperand exception = ValueOperand(a1);
+ loadValue(Address(sp, ResumeFromException::offsetOfException()), exception);
+
+ loadPtr(Address(sp, ResumeFromException::offsetOfTarget()), a0);
+ loadPtr(Address(sp, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(sp, ResumeFromException::offsetOfStackPointer()), sp);
+
+ pushValue(exception);
+ pushValue(BooleanValue(true));
+ jump(a0);
+
+ // Return BaselineFrame->returnValue() to the caller.
+ // Used in debug mode and for GeneratorReturn.
+ Label profilingInstrumentation;
+ bind(&returnBaseline);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ loadValue(Address(FramePointer, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ jump(&profilingInstrumentation);
+
+ // Return the given value to the caller.
+ bind(&returnIon);
+ loadValue(Address(StackPointer, ResumeFromException::offsetOfException()),
+ JSReturnOperand);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+
+ // If profiling is enabled, then update the lastProfilingFrame to refer to
+ // caller frame before returning. This code is shared by ForcedReturnIon
+ // and ForcedReturnBaseline.
+ bind(&profilingInstrumentation);
+ {
+ Label skipProfilingInstrumentation;
+ // Test if profiler enabled.
+ AbsoluteAddress addressOfEnabled(
+ asMasm().runtime()->geckoProfiler().addressOfEnabled());
+ asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ jump(profilerExitTail);
+ bind(&skipProfilingInstrumentation);
+ }
+
+ ma_move(StackPointer, FramePointer);
+ pop(FramePointer);
+ ret();
+
+ // If we are bailing out to baseline to handle an exception, jump to
+ // the bailout tail stub. Load 1 (true) in ReturnReg to indicate success.
+ bind(&bailout);
+ loadPtr(Address(sp, ResumeFromException::offsetOfBailoutInfo()), a2);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ ma_li(ReturnReg, Imm32(1));
+ jump(bailoutTail);
+
+ // If we are throwing and the innermost frame was a wasm frame, reset SP and
+ // FP; SP is pointing to the unwound return address to the wasm entry, so
+ // we can just ret().
+ bind(&wasm);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ ma_li(InstanceReg, ImmWord(wasm::FailInstanceReg));
+ ret();
+
+ // Found a wasm catch handler, restore state and jump to it.
+ bind(&wasmCatch);
+ loadPtr(Address(sp, ResumeFromException::offsetOfTarget()), a1);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ jump(a1);
+}
+
+CodeOffset MacroAssemblerMIPS64Compat::toggledJump(Label* label) {
+ CodeOffset ret(nextOffset().getOffset());
+ ma_b(label);
+ return ret;
+}
+
+CodeOffset MacroAssemblerMIPS64Compat::toggledCall(JitCode* target,
+ bool enabled) {
+ BufferOffset bo = nextOffset();
+ CodeOffset offset(bo.getOffset());
+ addPendingJump(bo, ImmPtr(target->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
+ if (enabled) {
+ as_jalr(ScratchRegister);
+ as_nop();
+ } else {
+ as_nop();
+ as_nop();
+ }
+ MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() ==
+ ToggledCallSize(nullptr));
+ return offset;
+}
+
+void MacroAssemblerMIPS64Compat::profilerEnterFrame(Register framePtr,
+ Register scratch) {
+ asMasm().loadJSContext(scratch);
+ loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
+ storePtr(framePtr,
+ Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr),
+ Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void MacroAssemblerMIPS64Compat::profilerExitFrame() {
+ jump(asMasm().runtime()->jitRuntime()->getProfilerExitFrameTail());
+}
+
+void MacroAssembler::subFromStackPtr(Imm32 imm32) {
+ if (imm32.value) {
+ asMasm().subPtr(imm32, StackPointer);
+ }
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// Stack manipulation functions.
+
+size_t MacroAssembler::PushRegsInMaskSizeInBytes(LiveRegisterSet set) {
+ return set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
+}
+
+void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
+ int32_t diff =
+ set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
+ const int32_t reserved = diff;
+
+ reserveStack(reserved);
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diff -= sizeof(intptr_t);
+ storePtr(*iter, Address(StackPointer, diff));
+ }
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
+ iter.more(); ++iter) {
+ diff -= sizeof(double);
+ storeDouble(*iter, Address(StackPointer, diff));
+ }
+ MOZ_ASSERT(diff == 0);
+}
+
+void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
+ LiveRegisterSet ignore) {
+ int32_t diff =
+ set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
+ const int32_t reserved = diff;
+
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diff -= sizeof(intptr_t);
+ if (!ignore.has(*iter)) {
+ loadPtr(Address(StackPointer, diff), *iter);
+ }
+ }
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
+ iter.more(); ++iter) {
+ diff -= sizeof(double);
+ if (!ignore.has(*iter)) {
+ loadDouble(Address(StackPointer, diff), *iter);
+ }
+ }
+ MOZ_ASSERT(diff == 0);
+ freeStack(reserved);
+}
+
+void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
+ Register) {
+ FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
+ unsigned numFpu = fpuSet.size();
+ int32_t diffF = fpuSet.getPushSizeInBytes();
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+
+ MOZ_ASSERT(dest.offset >= diffG + diffF);
+
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ dest.offset -= sizeof(intptr_t);
+ storePtr(*iter, dest);
+ }
+ MOZ_ASSERT(diffG == 0);
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ diffF -= reg.size();
+ numFpu -= 1;
+ dest.offset -= reg.size();
+ if (reg.isDouble()) {
+ storeDouble(reg, dest);
+ } else if (reg.isSingle()) {
+ storeFloat32(reg, dest);
+ } else {
+ MOZ_CRASH("Unknown register type.");
+ }
+ }
+ MOZ_ASSERT(numFpu == 0);
+ diffF -= diffF % sizeof(uintptr_t);
+ MOZ_ASSERT(diffF == 0);
+}
+// ===============================================================
+// ABI function calls.
+
+void MacroAssembler::setupUnalignedABICall(Register scratch) {
+ MOZ_ASSERT(!IsCompilingWasm(), "wasm should only use aligned ABI calls");
+ setupNativeABICall();
+ dynamicAlignment_ = true;
+
+ ma_move(scratch, StackPointer);
+
+ // Force sp to be aligned
+ asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
+ storePtr(scratch, Address(StackPointer, 0));
+}
+
+void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ // Reserve place for $ra.
+ stackForCall += sizeof(intptr_t);
+
+ if (dynamicAlignment_) {
+ stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
+ } else {
+ uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
+ stackForCall += ComputeByteAlignment(
+ stackForCall + framePushed() + alignmentAtPrologue, ABIStackAlignment);
+ }
+
+ *stackAdjust = stackForCall;
+ reserveStack(stackForCall);
+
+ // Save $ra because call is going to clobber it. Restore it in
+ // callWithABIPost. NOTE: This is needed for calls from SharedIC.
+ // Maybe we can do this differently.
+ storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t)));
+
+ // Position all arguments.
+ {
+ enoughMemory_ &= moveResolver_.resolve();
+ if (!enoughMemory_) {
+ return;
+ }
+
+ MoveEmitter emitter(*this);
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ assertStackAlignment(ABIStackAlignment);
+}
+
+void MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result,
+ bool callFromWasm) {
+ // Restore ra value (as stored in callWithABIPre()).
+ loadPtr(Address(StackPointer, stackAdjust - sizeof(intptr_t)), ra);
+
+ if (dynamicAlignment_) {
+ // Restore sp value from stack (as stored in setupUnalignedABICall()).
+ loadPtr(Address(StackPointer, stackAdjust), StackPointer);
+ // Use adjustFrame instead of freeStack because we already restored sp.
+ adjustFrame(-stackAdjust);
+ } else {
+ freeStack(stackAdjust);
+ }
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+void MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result) {
+ // Load the callee in t9, no instruction between the lw and call
+ // should clobber it. Note that we can't use fun.base because it may
+ // be one of the IntArg registers clobbered before the call.
+ ma_move(t9, fun);
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(t9);
+ callWithABIPost(stackAdjust, result);
+}
+
+void MacroAssembler::callWithABINoProfiler(const Address& fun,
+ MoveOp::Type result) {
+ // Load the callee in t9, as above.
+ loadPtr(Address(fun.base, fun.offset), t9);
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(t9);
+ callWithABIPost(stackAdjust, result);
+}
+
+// ===============================================================
+// Move
+
+void MacroAssembler::moveValue(const TypedOrValueRegister& src,
+ const ValueOperand& dest) {
+ if (src.hasValue()) {
+ moveValue(src.valueReg(), dest);
+ return;
+ }
+
+ MIRType type = src.type();
+ AnyRegister reg = src.typedReg();
+
+ if (!IsFloatingPointType(type)) {
+ boxNonDouble(ValueTypeFromMIRType(type), reg.gpr(), dest);
+ return;
+ }
+
+ FloatRegister scratch = ScratchDoubleReg;
+ FloatRegister freg = reg.fpu();
+ if (type == MIRType::Float32) {
+ convertFloat32ToDouble(freg, scratch);
+ freg = scratch;
+ }
+ boxDouble(freg, dest, scratch);
+}
+
+void MacroAssembler::moveValue(const ValueOperand& src,
+ const ValueOperand& dest) {
+ if (src == dest) {
+ return;
+ }
+ movePtr(src.valueReg(), dest.valueReg());
+}
+
+void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
+ if (!src.isGCThing()) {
+ ma_li(dest.valueReg(), ImmWord(src.asRawBits()));
+ return;
+ }
+
+ writeDataRelocation(src);
+ movWithPatch(ImmWord(src.asRawBits()), dest.valueReg());
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ const Address& address,
+ Register temp, Label* label) {
+ branchValueIsNurseryCellImpl(cond, address, temp, label);
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ ValueOperand value, Register temp,
+ Label* label) {
+ branchValueIsNurseryCellImpl(cond, value, temp, label);
+}
+
+template <typename T>
+void MacroAssembler::branchValueIsNurseryCellImpl(Condition cond,
+ const T& value, Register temp,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ Label done;
+ branchTestGCThing(Assembler::NotEqual, value,
+ cond == Assembler::Equal ? &done : label);
+
+ // temp may be InvalidReg, use scratch2 instead.
+ SecondScratchRegisterScope scratch2(*this);
+
+ getGCThingValueChunk(value, scratch2);
+ loadPtr(Address(scratch2, gc::ChunkStoreBufferOffset), scratch2);
+ branchPtr(InvertCondition(cond), scratch2, ImmWord(0), label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(lhs.valueReg() != scratch);
+ moveValue(rhs, ValueOperand(scratch));
+ ma_b(lhs.valueReg(), scratch, label, cond);
+}
+
+// ========================================================================
+// Memory access primitives.
+template <typename T>
+void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType, const T& dest) {
+ MOZ_ASSERT(valueType < MIRType::Value);
+
+ if (valueType == MIRType::Double) {
+ boxDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ if (value.constant()) {
+ storeValue(value.value(), dest);
+ } else {
+ storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(),
+ dest);
+ }
+}
+
+template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType,
+ const Address& dest);
+template void MacroAssembler::storeUnboxedValue(
+ const ConstantOrRegister& value, MIRType valueType,
+ const BaseObjectElementIndex& dest);
+
+void MacroAssembler::PushBoxed(FloatRegister reg) {
+ subFromStackPtr(Imm32(sizeof(double)));
+ boxDouble(reg, Address(getStackPointer(), 0));
+ adjustFrame(sizeof(double));
+}
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Register boundsCheckLimit, Label* ok) {
+ ma_b(index, boundsCheckLimit, ok, cond);
+}
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Address boundsCheckLimit, Label* ok) {
+ SecondScratchRegisterScope scratch2(*this);
+ load32(boundsCheckLimit, scratch2);
+ ma_b(index, scratch2, ok, cond);
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Register64 boundsCheckLimit, Label* ok) {
+ ma_b(index.reg, boundsCheckLimit.reg, ok, cond);
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Address boundsCheckLimit, Label* ok) {
+ SecondScratchRegisterScope scratch2(*this);
+ loadPtr(boundsCheckLimit, scratch2);
+ ma_b(index.reg, scratch2, ok, cond);
+}
+
+void MacroAssembler::widenInt32(Register r) {
+ // I *think* this is correct. It may be redundant.
+ move32To64SignExtend(r, Register64(r));
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ as_truncld(ScratchDoubleReg, input);
+ moveFromDouble(ScratchDoubleReg, output);
+ ma_dsrl(ScratchRegister, output, Imm32(32));
+ as_sll(output, output, 0);
+ ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ as_truncls(ScratchDoubleReg, input);
+ moveFromDouble(ScratchDoubleReg, output);
+ ma_dsrl(ScratchRegister, output, Imm32(32));
+ as_sll(output, output, 0);
+ ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
+}
+
+void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register64 output) {
+ wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
+}
+
+void MacroAssembler::wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch,
+ Register64 output, Register tmp) {
+ wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, tmp);
+}
+
+void MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access,
+ Register64 value, Register memoryBase,
+ Register ptr, Register ptrScratch) {
+ wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
+}
+
+void MacroAssembler::wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access,
+ Register64 value,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register tmp) {
+ wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, tmp);
+}
+
+void MacroAssembler::wasmTruncateDoubleToInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble) {
+ MOZ_ASSERT(tempDouble.isInvalid());
+
+ as_truncld(ScratchDoubleReg, input);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromDouble(ScratchDoubleReg, output.reg);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+ ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt64(
+ FloatRegister input, Register64 output_, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble) {
+ MOZ_ASSERT(tempDouble.isInvalid());
+ Register output = output_.reg;
+
+ Label done;
+
+ as_truncld(ScratchDoubleReg, input);
+ // ma_li INT64_MAX
+ ma_li(SecondScratchReg, Imm32(-1));
+ ma_dext(SecondScratchReg, SecondScratchReg, Imm32(0), Imm32(63));
+ moveFromDouble(ScratchDoubleReg, output);
+ // For numbers in -1.[ : ]INT64_MAX range do nothing more
+ ma_b(output, SecondScratchReg, &done, Assembler::Below, ShortJump);
+
+ loadConstantDouble(double(INT64_MAX + 1ULL), ScratchDoubleReg);
+ // ma_li INT64_MIN
+ ma_daddu(SecondScratchReg, Imm32(1));
+ as_subd(ScratchDoubleReg, input, ScratchDoubleReg);
+ as_truncld(ScratchDoubleReg, ScratchDoubleReg);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromDouble(ScratchDoubleReg, output);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+ ma_daddu(output, SecondScratchReg);
+
+ // Guard against negative values that result in 0 due the precision loss.
+ as_sltiu(SecondScratchReg, output, 1);
+ ma_or(ScratchRegister, SecondScratchReg);
+
+ ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
+
+ bind(&done);
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempFloat) {
+ MOZ_ASSERT(tempFloat.isInvalid());
+
+ as_truncls(ScratchDoubleReg, input);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromDouble(ScratchDoubleReg, output.reg);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+ ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt64(
+ FloatRegister input, Register64 output_, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempFloat) {
+ MOZ_ASSERT(tempFloat.isInvalid());
+ Register output = output_.reg;
+
+ Label done;
+
+ as_truncls(ScratchDoubleReg, input);
+ // ma_li INT64_MAX
+ ma_li(SecondScratchReg, Imm32(-1));
+ ma_dext(SecondScratchReg, SecondScratchReg, Imm32(0), Imm32(63));
+ moveFromDouble(ScratchDoubleReg, output);
+ // For numbers in -1.[ : ]INT64_MAX range do nothing more
+ ma_b(output, SecondScratchReg, &done, Assembler::Below, ShortJump);
+
+ loadConstantFloat32(float(INT64_MAX + 1ULL), ScratchFloat32Reg);
+ // ma_li INT64_MIN
+ ma_daddu(SecondScratchReg, Imm32(1));
+ as_subs(ScratchFloat32Reg, input, ScratchFloat32Reg);
+ as_truncls(ScratchDoubleReg, ScratchFloat32Reg);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromDouble(ScratchDoubleReg, output);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+ ma_daddu(output, SecondScratchReg);
+
+ // Guard against negative values that result in 0 due the precision loss.
+ as_sltiu(SecondScratchReg, output, 1);
+ ma_or(ScratchRegister, SecondScratchReg);
+
+ ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
+
+ bind(&done);
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssemblerMIPS64Compat::wasmLoadI64Impl(
+ const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
+ Register ptrScratch, Register64 output, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ MOZ_ASSERT(!access.isZeroExtendSimd128Load());
+ MOZ_ASSERT(!access.isSplatSimd128Load());
+ MOZ_ASSERT(!access.isWidenSimd128Load());
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ unsigned byteSize = access.byteSize();
+ bool isSigned;
+
+ switch (access.type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ break;
+ case Scalar::Int64:
+ isSigned = true;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ BaseIndex address(memoryBase, ptr, TimesOne);
+ if (IsUnaligned(access)) {
+ MOZ_ASSERT(tmp != InvalidReg);
+ asMasm().ma_load_unaligned(access, output.reg, address, tmp,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ return;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ asMasm().ma_load(output.reg, address,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerMIPS64Compat::wasmStoreI64Impl(
+ const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
+ Register ptr, Register ptrScratch, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ unsigned byteSize = access.byteSize();
+ bool isSigned;
+ switch (access.type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ break;
+ case Scalar::Int64:
+ isSigned = true;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ BaseIndex address(memoryBase, ptr, TimesOne);
+
+ if (IsUnaligned(access)) {
+ MOZ_ASSERT(tmp != InvalidReg);
+ asMasm().ma_store_unaligned(access, value.reg, address, tmp,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ return;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ asMasm().ma_store(value.reg, address,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+template <typename T>
+static void CompareExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, const T& mem,
+ Register64 expect, Register64 replace,
+ Register64 output) {
+ MOZ_ASSERT(expect != output && replace != output);
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ Label tryAgain;
+ Label exit;
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&tryAgain);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+ masm.as_lld(output.reg, SecondScratchReg, 0);
+
+ masm.ma_b(output.reg, expect.reg, &exit, Assembler::NotEqual, ShortJump);
+ masm.movePtr(replace.reg, ScratchRegister);
+ masm.as_scd(ScratchRegister, SecondScratchReg, 0);
+ masm.ma_b(ScratchRegister, ScratchRegister, &tryAgain, Assembler::Zero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ masm.bind(&exit);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem,
+ Register64 expect,
+ Register64 replace,
+ Register64 output) {
+ CompareExchange64(*this, &access, access.sync(), mem, expect, replace,
+ output);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem,
+ Register64 expect,
+ Register64 replace,
+ Register64 output) {
+ CompareExchange64(*this, &access, access.sync(), mem, expect, replace,
+ output);
+}
+
+void MacroAssembler::compareExchange64(const Synchronization& sync,
+ const Address& mem, Register64 expect,
+ Register64 replace, Register64 output) {
+ CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
+}
+
+void MacroAssembler::compareExchange64(const Synchronization& sync,
+ const BaseIndex& mem, Register64 expect,
+ Register64 replace, Register64 output) {
+ CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
+}
+
+template <typename T>
+static void AtomicExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, const T& mem,
+ Register64 value, Register64 output) {
+ MOZ_ASSERT(value != output);
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ Label tryAgain;
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&tryAgain);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_lld(output.reg, SecondScratchReg, 0);
+ masm.movePtr(value.reg, ScratchRegister);
+ masm.as_scd(ScratchRegister, SecondScratchReg, 0);
+ masm.ma_b(ScratchRegister, ScratchRegister, &tryAgain, Assembler::Zero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+template <typename T>
+static void WasmAtomicExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc& access,
+ const T& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(masm, &access, access.sync(), mem, value, output);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 src,
+ Register64 output) {
+ WasmAtomicExchange64(*this, access, mem, src, output);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register64 src,
+ Register64 output) {
+ WasmAtomicExchange64(*this, access, mem, src, output);
+}
+
+void MacroAssembler::atomicExchange64(const Synchronization& sync,
+ const Address& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(*this, nullptr, sync, mem, value, output);
+}
+
+void MacroAssembler::atomicExchange64(const Synchronization& sync,
+ const BaseIndex& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(*this, nullptr, sync, mem, value, output);
+}
+
+template <typename T>
+static void AtomicFetchOp64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, AtomicOp op,
+ Register64 value, const T& mem, Register64 temp,
+ Register64 output) {
+ MOZ_ASSERT(value != output);
+ MOZ_ASSERT(value != temp);
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ Label tryAgain;
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&tryAgain);
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_lld(output.reg, SecondScratchReg, 0);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_daddu(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_dsubu(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(temp.reg, output.reg, value.reg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ masm.as_scd(temp.reg, SecondScratchReg, 0);
+ masm.ma_b(temp.reg, temp.reg, &tryAgain, Assembler::Zero, ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const Address& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const BaseIndex& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const Address& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const BaseIndex& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const Address& mem,
+ Register64 temp) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
+}
+
+void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const BaseIndex& mem,
+ Register64 temp) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
+}
+
+// ========================================================================
+// Convert floating point.
+
+void MacroAssembler::convertInt64ToDouble(Register64 src, FloatRegister dest) {
+ as_dmtc1(src.reg, dest);
+ as_cvtdl(dest, dest);
+}
+
+void MacroAssembler::convertInt64ToFloat32(Register64 src, FloatRegister dest) {
+ as_dmtc1(src.reg, dest);
+ as_cvtsl(dest, dest);
+}
+
+bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return false; }
+
+void MacroAssembler::convertUInt64ToDouble(Register64 src, FloatRegister dest,
+ Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ MacroAssemblerSpecific::convertUInt64ToDouble(src.reg, dest);
+}
+
+void MacroAssembler::convertUInt64ToFloat32(Register64 src_, FloatRegister dest,
+ Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+
+ Register src = src_.reg;
+ Label positive, done;
+ ma_b(src, src, &positive, NotSigned, ShortJump);
+
+ MOZ_ASSERT(src != ScratchRegister);
+ MOZ_ASSERT(src != SecondScratchReg);
+
+ ma_and(ScratchRegister, src, Imm32(1));
+ ma_dsrl(SecondScratchReg, src, Imm32(1));
+ ma_or(ScratchRegister, SecondScratchReg);
+ as_dmtc1(ScratchRegister, dest);
+ as_cvtsl(dest, dest);
+ addFloat32(dest, dest);
+ ma_b(&done, ShortJump);
+
+ bind(&positive);
+ as_dmtc1(src, dest);
+ as_cvtsl(dest, dest);
+
+ bind(&done);
+}
+
+//}}} check_macroassembler_style
diff --git a/js/src/jit/mips64/MacroAssembler-mips64.h b/js/src/jit/mips64/MacroAssembler-mips64.h
new file mode 100644
index 0000000000..5add3bf1ee
--- /dev/null
+++ b/js/src/jit/mips64/MacroAssembler-mips64.h
@@ -0,0 +1,841 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_MacroAssembler_mips64_h
+#define jit_mips64_MacroAssembler_mips64_h
+
+#include "jit/mips-shared/MacroAssembler-mips-shared.h"
+#include "jit/MoveResolver.h"
+#include "vm/BytecodeUtil.h"
+#include "wasm/WasmBuiltins.h"
+
+namespace js {
+namespace jit {
+
+enum LiFlags {
+ Li64 = 0,
+ Li48 = 1,
+};
+
+struct ImmShiftedTag : public ImmWord {
+ explicit ImmShiftedTag(JSValueShiftedTag shtag) : ImmWord((uintptr_t)shtag) {}
+
+ explicit ImmShiftedTag(JSValueType type)
+ : ImmWord(uintptr_t(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type)))) {
+ }
+};
+
+struct ImmTag : public Imm32 {
+ ImmTag(JSValueTag mask) : Imm32(int32_t(mask)) {}
+};
+
+static constexpr ValueOperand JSReturnOperand{JSReturnReg};
+
+static const int defaultShift = 3;
+static_assert(1 << defaultShift == sizeof(JS::Value),
+ "The defaultShift is wrong");
+
+// See documentation for ScratchTagScope and ScratchTagScopeRelease in
+// MacroAssembler-x64.h.
+
+class ScratchTagScope : public SecondScratchRegisterScope {
+ public:
+ ScratchTagScope(MacroAssembler& masm, const ValueOperand&)
+ : SecondScratchRegisterScope(masm) {}
+};
+
+class ScratchTagScopeRelease {
+ ScratchTagScope* ts_;
+
+ public:
+ explicit ScratchTagScopeRelease(ScratchTagScope* ts) : ts_(ts) {
+ ts_->release();
+ }
+ ~ScratchTagScopeRelease() { ts_->reacquire(); }
+};
+
+class MacroAssemblerMIPS64 : public MacroAssemblerMIPSShared {
+ public:
+ using MacroAssemblerMIPSShared::ma_b;
+ using MacroAssemblerMIPSShared::ma_cmp_set;
+ using MacroAssemblerMIPSShared::ma_ld;
+ using MacroAssemblerMIPSShared::ma_li;
+ using MacroAssemblerMIPSShared::ma_load;
+ using MacroAssemblerMIPSShared::ma_ls;
+ using MacroAssemblerMIPSShared::ma_sd;
+ using MacroAssemblerMIPSShared::ma_ss;
+ using MacroAssemblerMIPSShared::ma_store;
+ using MacroAssemblerMIPSShared::ma_sub32TestOverflow;
+
+ void ma_li(Register dest, CodeLabel* label);
+ void ma_li(Register dest, ImmWord imm);
+ void ma_liPatchable(Register dest, ImmPtr imm);
+ void ma_liPatchable(Register dest, ImmWord imm, LiFlags flags = Li48);
+
+ // Negate
+ void ma_dnegu(Register rd, Register rs);
+
+ // Shift operations
+ void ma_dsll(Register rd, Register rt, Imm32 shift);
+ void ma_dsrl(Register rd, Register rt, Imm32 shift);
+ void ma_dsra(Register rd, Register rt, Imm32 shift);
+ void ma_dror(Register rd, Register rt, Imm32 shift);
+ void ma_drol(Register rd, Register rt, Imm32 shift);
+
+ void ma_dsll(Register rd, Register rt, Register shift);
+ void ma_dsrl(Register rd, Register rt, Register shift);
+ void ma_dsra(Register rd, Register rt, Register shift);
+ void ma_dror(Register rd, Register rt, Register shift);
+ void ma_drol(Register rd, Register rt, Register shift);
+
+ void ma_dins(Register rt, Register rs, Imm32 pos, Imm32 size);
+ void ma_dext(Register rt, Register rs, Imm32 pos, Imm32 size);
+
+ // doubleword swap bytes
+ void ma_dsbh(Register rd, Register rt);
+ void ma_dshd(Register rd, Register rt);
+
+ void ma_dctz(Register rd, Register rs);
+
+ // load
+ void ma_load(Register dest, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // store
+ void ma_store(Register data, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // arithmetic based ops
+ // add
+ void ma_daddu(Register rd, Register rs, Imm32 imm);
+ void ma_daddu(Register rd, Register rs);
+ void ma_daddu(Register rd, Imm32 imm);
+ void ma_add32TestOverflow(Register rd, Register rs, Register rt,
+ Label* overflow);
+ void ma_add32TestOverflow(Register rd, Register rs, Imm32 imm,
+ Label* overflow);
+ void ma_addPtrTestOverflow(Register rd, Register rs, Register rt,
+ Label* overflow);
+ void ma_addPtrTestOverflow(Register rd, Register rs, Imm32 imm,
+ Label* overflow);
+ void ma_addPtrTestOverflow(Register rd, Register rs, ImmWord imm,
+ Label* overflow);
+ void ma_addPtrTestCarry(Condition cond, Register rd, Register rs, Register rt,
+ Label* overflow);
+ void ma_addPtrTestCarry(Condition cond, Register rd, Register rs, Imm32 imm,
+ Label* overflow);
+ void ma_addPtrTestCarry(Condition cond, Register rd, Register rs, ImmWord imm,
+ Label* overflow);
+ // subtract
+ void ma_dsubu(Register rd, Register rs, Imm32 imm);
+ void ma_dsubu(Register rd, Register rs);
+ void ma_dsubu(Register rd, Imm32 imm);
+ void ma_sub32TestOverflow(Register rd, Register rs, Register rt,
+ Label* overflow);
+ void ma_subPtrTestOverflow(Register rd, Register rs, Register rt,
+ Label* overflow);
+ void ma_subPtrTestOverflow(Register rd, Register rs, Imm32 imm,
+ Label* overflow);
+
+ // multiplies. For now, there are only few that we care about.
+ void ma_dmult(Register rs, Imm32 imm);
+ void ma_mulPtrTestOverflow(Register rd, Register rs, Register rt,
+ Label* overflow);
+
+ // stack
+ void ma_pop(Register r);
+ void ma_push(Register r);
+
+ void branchWithCode(InstImm code, Label* label, JumpKind jumpKind);
+ // branches when done from within mips-specific code
+ void ma_b(Register lhs, ImmWord imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, Address addr, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Imm32 imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Register rhs, Label* l, Condition c,
+ JumpKind jumpKind = LongJump) {
+ MOZ_ASSERT(rhs != ScratchRegister);
+ ma_load(ScratchRegister, addr, SizeDouble);
+ ma_b(ScratchRegister, rhs, l, c, jumpKind);
+ }
+
+ void ma_bal(Label* l, DelaySlotFill delaySlotFill = FillDelaySlot);
+
+ // fp instructions
+ void ma_lid(FloatRegister dest, double value);
+
+ void ma_mv(FloatRegister src, ValueOperand dest);
+ void ma_mv(ValueOperand src, FloatRegister dest);
+
+ void ma_ls(FloatRegister ft, Address address);
+ void ma_ld(FloatRegister ft, Address address);
+ void ma_sd(FloatRegister ft, Address address);
+ void ma_ss(FloatRegister ft, Address address);
+
+ void ma_pop(FloatRegister f);
+ void ma_push(FloatRegister f);
+
+ void ma_cmp_set(Register dst, Register lhs, ImmWord imm, Condition c);
+ void ma_cmp_set(Register dst, Address address, ImmWord imm, Condition c);
+ void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c);
+ void ma_cmp_set(Register dst, Address address, Imm32 imm, Condition c);
+
+ // These functions abstract the access to high part of the double precision
+ // float register. They are intended to work on both 32 bit and 64 bit
+ // floating point coprocessor.
+ void moveToDoubleHi(Register src, FloatRegister dest) { as_mthc1(src, dest); }
+ void moveFromDoubleHi(FloatRegister src, Register dest) {
+ as_mfhc1(dest, src);
+ }
+
+ void moveToDouble(Register src, FloatRegister dest) { as_dmtc1(src, dest); }
+ void moveFromDouble(FloatRegister src, Register dest) { as_dmfc1(dest, src); }
+};
+
+class MacroAssembler;
+
+class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64 {
+ public:
+ using MacroAssemblerMIPS64::call;
+
+ MacroAssemblerMIPS64Compat() {}
+
+ void convertBoolToInt32(Register source, Register dest);
+ void convertInt32ToDouble(Register src, FloatRegister dest);
+ void convertInt32ToDouble(const Address& src, FloatRegister dest);
+ void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest);
+ void convertUInt32ToDouble(Register src, FloatRegister dest);
+ void convertUInt32ToFloat32(Register src, FloatRegister dest);
+ void convertDoubleToFloat32(FloatRegister src, FloatRegister dest);
+ void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+ void convertDoubleToPtr(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+ void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+
+ void convertFloat32ToDouble(FloatRegister src, FloatRegister dest);
+ void convertInt32ToFloat32(Register src, FloatRegister dest);
+ void convertInt32ToFloat32(const Address& src, FloatRegister dest);
+
+ void movq(Register rs, Register rd);
+
+ void computeScaledAddress(const BaseIndex& address, Register dest);
+
+ void computeEffectiveAddress(const Address& address, Register dest) {
+ ma_daddu(dest, address.base, Imm32(address.offset));
+ }
+
+ void computeEffectiveAddress(const BaseIndex& address, Register dest);
+
+ void j(Label* dest) { ma_b(dest); }
+
+ void mov(Register src, Register dest) { as_ori(dest, src, 0); }
+ void mov(ImmWord imm, Register dest) { ma_li(dest, imm); }
+ void mov(ImmPtr imm, Register dest) {
+ mov(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void mov(CodeLabel* label, Register dest) { ma_li(dest, label); }
+ void mov(Register src, Address dest) { MOZ_CRASH("NYI-IC"); }
+ void mov(Address src, Register dest) { MOZ_CRASH("NYI-IC"); }
+
+ void writeDataRelocation(const Value& val) {
+ // Raw GC pointer relocations and Value relocations both end up in
+ // TraceOneDataRelocation.
+ if (val.isGCThing()) {
+ gc::Cell* cell = val.toGCThing();
+ if (cell && gc::IsInsideNursery(cell)) {
+ embedsNurseryPointers_ = true;
+ }
+ dataRelocations_.writeUnsigned(currentOffset());
+ }
+ }
+
+ void branch(JitCode* c) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(c->raw()));
+ as_jr(ScratchRegister);
+ as_nop();
+ }
+ void branch(const Register reg) {
+ as_jr(reg);
+ as_nop();
+ }
+ void nop() { as_nop(); }
+ void ret() {
+ ma_pop(ra);
+ as_jr(ra);
+ as_nop();
+ }
+ inline void retn(Imm32 n);
+ void push(Imm32 imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(ImmWord imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(ImmGCPtr imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(const Address& address) {
+ loadPtr(address, ScratchRegister);
+ ma_push(ScratchRegister);
+ }
+ void push(Register reg) { ma_push(reg); }
+ void push(FloatRegister reg) { ma_push(reg); }
+ void pop(Register reg) { ma_pop(reg); }
+ void pop(FloatRegister reg) { ma_pop(reg); }
+
+ // Emit a branch that can be toggled to a non-operation. On MIPS64 we use
+ // "andi" instruction to toggle the branch.
+ // See ToggleToJmp(), ToggleToCmp().
+ CodeOffset toggledJump(Label* label);
+
+ // Emit a "jalr" or "nop" instruction. ToggleCall can be used to patch
+ // this instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled);
+
+ static size_t ToggledCallSize(uint8_t* code) {
+ // Six instructions used in: MacroAssemblerMIPS64Compat::toggledCall
+ return 6 * sizeof(uint32_t);
+ }
+
+ CodeOffset pushWithPatch(ImmWord imm) {
+ CodeOffset offset = movWithPatch(imm, ScratchRegister);
+ ma_push(ScratchRegister);
+ return offset;
+ }
+
+ CodeOffset movWithPatch(ImmWord imm, Register dest) {
+ CodeOffset offset = CodeOffset(currentOffset());
+ ma_liPatchable(dest, imm, Li64);
+ return offset;
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ CodeOffset offset = CodeOffset(currentOffset());
+ ma_liPatchable(dest, imm);
+ return offset;
+ }
+
+ void writeCodePointer(CodeLabel* label) {
+ label->patchAt()->bind(currentOffset());
+ label->setLinkMode(CodeLabel::RawPointer);
+ m_buffer.ensureSpace(sizeof(void*));
+ writeInst(-1);
+ writeInst(-1);
+ }
+
+ void jump(Label* label) { ma_b(label); }
+ void jump(Register reg) {
+ as_jr(reg);
+ as_nop();
+ }
+ void jump(const Address& address) {
+ loadPtr(address, ScratchRegister);
+ as_jr(ScratchRegister);
+ as_nop();
+ }
+
+ void jump(JitCode* code) { branch(code); }
+
+ void jump(ImmPtr ptr) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ptr, RelocationKind::HARDCODED);
+ ma_jump(ptr);
+ }
+
+ void jump(TrampolinePtr code) { jump(ImmPtr(code.value)); }
+
+ void splitTag(Register src, Register dest) {
+ ma_dsrl(dest, src, Imm32(JSVAL_TAG_SHIFT));
+ }
+
+ void splitTag(const ValueOperand& operand, Register dest) {
+ splitTag(operand.valueReg(), dest);
+ }
+
+ void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag) {
+ splitTag(value, tag);
+ }
+
+ // unboxing code
+ void unboxNonDouble(const ValueOperand& operand, Register dest,
+ JSValueType type) {
+ unboxNonDouble(operand.valueReg(), dest, type);
+ }
+
+ template <typename T>
+ void unboxNonDouble(T src, Register dest, JSValueType type) {
+ MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE);
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ load32(src, dest);
+ return;
+ }
+ loadPtr(src, dest);
+ unboxNonDouble(dest, dest, type);
+ }
+
+ void unboxNonDouble(Register src, Register dest, JSValueType type) {
+ MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE);
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ ma_sll(dest, src, Imm32(0));
+ return;
+ }
+ MOZ_ASSERT(ScratchRegister != src);
+ mov(ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)), ScratchRegister);
+ as_xor(dest, src, ScratchRegister);
+ }
+
+ template <typename T>
+ void unboxObjectOrNull(const T& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ static_assert(JS::detail::ValueObjectOrNullBit ==
+ (uint64_t(0x8) << JSVAL_TAG_SHIFT));
+ ma_dins(dest, zero, Imm32(JSVAL_TAG_SHIFT + 3), Imm32(1));
+ }
+
+ void unboxGCThingForGCBarrier(const Address& src, Register dest) {
+ loadPtr(src, dest);
+ ma_dext(dest, dest, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+ }
+ void unboxGCThingForGCBarrier(const ValueOperand& src, Register dest) {
+ ma_dext(dest, src.valueReg(), Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+ }
+
+ // Like unboxGCThingForGCBarrier, but loads the GC thing's chunk base.
+ void getGCThingValueChunk(const Address& src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(scratch != dest);
+ loadPtr(src, dest);
+ movePtr(ImmWord(JS::detail::ValueGCThingPayloadChunkMask), scratch);
+ as_and(dest, dest, scratch);
+ }
+ void getGCThingValueChunk(const ValueOperand& src, Register dest) {
+ MOZ_ASSERT(src.valueReg() != dest);
+ movePtr(ImmWord(JS::detail::ValueGCThingPayloadChunkMask), dest);
+ as_and(dest, dest, src.valueReg());
+ }
+
+ void unboxInt32(const ValueOperand& operand, Register dest);
+ void unboxInt32(Register src, Register dest);
+ void unboxInt32(const Address& src, Register dest);
+ void unboxInt32(const BaseIndex& src, Register dest);
+ void unboxBoolean(const ValueOperand& operand, Register dest);
+ void unboxBoolean(Register src, Register dest);
+ void unboxBoolean(const Address& src, Register dest);
+ void unboxBoolean(const BaseIndex& src, Register dest);
+ void unboxDouble(const ValueOperand& operand, FloatRegister dest);
+ void unboxDouble(Register src, Register dest);
+ void unboxDouble(const Address& src, FloatRegister dest);
+ void unboxDouble(const BaseIndex& src, FloatRegister dest);
+ void unboxString(const ValueOperand& operand, Register dest);
+ void unboxString(Register src, Register dest);
+ void unboxString(const Address& src, Register dest);
+ void unboxSymbol(const ValueOperand& src, Register dest);
+ void unboxSymbol(Register src, Register dest);
+ void unboxSymbol(const Address& src, Register dest);
+ void unboxBigInt(const ValueOperand& operand, Register dest);
+ void unboxBigInt(Register src, Register dest);
+ void unboxBigInt(const Address& src, Register dest);
+ void unboxObject(const ValueOperand& src, Register dest);
+ void unboxObject(Register src, Register dest);
+ void unboxObject(const Address& src, Register dest);
+ void unboxObject(const BaseIndex& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxValue(const ValueOperand& src, AnyRegister dest, JSValueType type);
+
+ void notBoolean(const ValueOperand& val) {
+ as_xori(val.valueReg(), val.valueReg(), 1);
+ }
+
+ // boxing code
+ void boxDouble(FloatRegister src, const ValueOperand& dest, FloatRegister);
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest);
+
+ // Extended unboxing API. If the payload is already in a register, returns
+ // that register. Otherwise, provides a move to the given scratch register,
+ // and returns that.
+ [[nodiscard]] Register extractObject(const Address& address,
+ Register scratch);
+ [[nodiscard]] Register extractObject(const ValueOperand& value,
+ Register scratch) {
+ unboxObject(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractString(const ValueOperand& value,
+ Register scratch) {
+ unboxString(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractSymbol(const ValueOperand& value,
+ Register scratch) {
+ unboxSymbol(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractInt32(const ValueOperand& value,
+ Register scratch) {
+ unboxInt32(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractBoolean(const ValueOperand& value,
+ Register scratch) {
+ unboxBoolean(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractTag(const Address& address, Register scratch);
+ [[nodiscard]] Register extractTag(const BaseIndex& address, Register scratch);
+ [[nodiscard]] Register extractTag(const ValueOperand& value,
+ Register scratch) {
+ MOZ_ASSERT(scratch != ScratchRegister);
+ splitTag(value, scratch);
+ return scratch;
+ }
+
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void loadInt32OrDouble(const Address& src, FloatRegister dest);
+ void loadInt32OrDouble(const BaseIndex& addr, FloatRegister dest);
+ void loadConstantDouble(double dp, FloatRegister dest);
+
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void loadConstantFloat32(float f, FloatRegister dest);
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testUndefinedSet(Condition cond, const ValueOperand& value,
+ Register dest);
+
+ template <typename T>
+ void loadUnboxedValue(const T& address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat()) {
+ loadInt32OrDouble(address, dest.fpu());
+ } else {
+ unboxNonDouble(address, dest.gpr(), ValueTypeFromMIRType(type));
+ }
+ }
+
+ void storeUnboxedPayload(ValueOperand value, BaseIndex address, size_t nbytes,
+ JSValueType type) {
+ switch (nbytes) {
+ case 8:
+ if (type == JSVAL_TYPE_OBJECT) {
+ unboxObjectOrNull(value, SecondScratchReg);
+ } else {
+ unboxNonDouble(value, SecondScratchReg, type);
+ }
+ computeEffectiveAddress(address, ScratchRegister);
+ as_sd(SecondScratchReg, ScratchRegister, 0);
+ return;
+ case 4:
+ store32(value.valueReg(), address);
+ return;
+ case 1:
+ store8(value.valueReg(), address);
+ return;
+ default:
+ MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void storeUnboxedPayload(ValueOperand value, Address address, size_t nbytes,
+ JSValueType type) {
+ switch (nbytes) {
+ case 8:
+ if (type == JSVAL_TYPE_OBJECT) {
+ unboxObjectOrNull(value, SecondScratchReg);
+ } else {
+ unboxNonDouble(value, SecondScratchReg, type);
+ }
+ storePtr(SecondScratchReg, address);
+ return;
+ case 4:
+ store32(value.valueReg(), address);
+ return;
+ case 1:
+ store8(value.valueReg(), address);
+ return;
+ default:
+ MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void boxValue(JSValueType type, Register src, Register dest) {
+ MOZ_ASSERT(src != dest);
+
+ JSValueTag tag = (JSValueTag)JSVAL_TYPE_TO_TAG(type);
+ ma_li(dest, Imm32(tag));
+ ma_dsll(dest, dest, Imm32(JSVAL_TAG_SHIFT));
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ ma_dins(dest, src, Imm32(0), Imm32(32));
+ } else {
+ ma_dins(dest, src, Imm32(0), Imm32(JSVAL_TAG_SHIFT));
+ }
+ }
+
+ void storeValue(ValueOperand val, Operand dst);
+ void storeValue(ValueOperand val, const BaseIndex& dest);
+ void storeValue(JSValueType type, Register reg, BaseIndex dest);
+ void storeValue(ValueOperand val, const Address& dest);
+ void storeValue(JSValueType type, Register reg, Address dest);
+ void storeValue(const Value& val, Address dest);
+ void storeValue(const Value& val, BaseIndex dest);
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ loadPtr(src, temp);
+ storePtr(temp, dest);
+ }
+
+ void storePrivateValue(Register src, const Address& dest) {
+ storePtr(src, dest);
+ }
+ void storePrivateValue(ImmGCPtr imm, const Address& dest) {
+ storePtr(imm, dest);
+ }
+
+ void loadValue(Address src, ValueOperand val);
+ void loadValue(Operand dest, ValueOperand val) {
+ loadValue(dest.toAddress(), val);
+ }
+ void loadValue(const BaseIndex& addr, ValueOperand val);
+
+ void loadUnalignedValue(const Address& src, ValueOperand dest) {
+ loadValue(src, dest);
+ }
+
+ void tagValue(JSValueType type, Register payload, ValueOperand dest);
+
+ void pushValue(ValueOperand val);
+ void popValue(ValueOperand val);
+ void pushValue(const Value& val) {
+ if (val.isGCThing()) {
+ writeDataRelocation(val);
+ movWithPatch(ImmWord(val.asRawBits()), ScratchRegister);
+ push(ScratchRegister);
+ } else {
+ push(ImmWord(val.asRawBits()));
+ }
+ }
+ void pushValue(JSValueType type, Register reg) {
+ boxValue(type, reg, ScratchRegister);
+ push(ScratchRegister);
+ }
+ void pushValue(const Address& addr);
+ void pushValue(const BaseIndex& addr, Register scratch) {
+ loadValue(addr, ValueOperand(scratch));
+ pushValue(ValueOperand(scratch));
+ }
+
+ void handleFailureWithHandlerTail(Label* profilerExitTail,
+ Label* bailoutTail);
+
+ /////////////////////////////////////////////////////////////////
+ // Common interface.
+ /////////////////////////////////////////////////////////////////
+ public:
+ // The following functions are exposed for use in platform-shared code.
+
+ inline void incrementInt32Value(const Address& addr);
+
+ void move32(Imm32 imm, Register dest);
+ void move32(Register src, Register dest);
+
+ void movePtr(Register src, Register dest);
+ void movePtr(ImmWord imm, Register dest);
+ void movePtr(ImmPtr imm, Register dest);
+ void movePtr(wasm::SymbolicAddress imm, Register dest);
+ void movePtr(ImmGCPtr imm, Register dest);
+
+ void load8SignExtend(const Address& address, Register dest);
+ void load8SignExtend(const BaseIndex& src, Register dest);
+
+ void load8ZeroExtend(const Address& address, Register dest);
+ void load8ZeroExtend(const BaseIndex& src, Register dest);
+
+ void load16SignExtend(const Address& address, Register dest);
+ void load16SignExtend(const BaseIndex& src, Register dest);
+
+ template <typename S>
+ void load16UnalignedSignExtend(const S& src, Register dest) {
+ ma_load_unaligned(dest, src, SizeHalfWord, SignExtend);
+ }
+
+ void load16ZeroExtend(const Address& address, Register dest);
+ void load16ZeroExtend(const BaseIndex& src, Register dest);
+
+ template <typename S>
+ void load16UnalignedZeroExtend(const S& src, Register dest) {
+ ma_load_unaligned(dest, src, SizeHalfWord, ZeroExtend);
+ }
+
+ void load32(const Address& address, Register dest);
+ void load32(const BaseIndex& address, Register dest);
+ void load32(AbsoluteAddress address, Register dest);
+ void load32(wasm::SymbolicAddress address, Register dest);
+
+ template <typename S>
+ void load32Unaligned(const S& src, Register dest) {
+ ma_load_unaligned(dest, src, SizeWord, SignExtend);
+ }
+
+ void load64(const Address& address, Register64 dest) {
+ loadPtr(address, dest.reg);
+ }
+ void load64(const BaseIndex& address, Register64 dest) {
+ loadPtr(address, dest.reg);
+ }
+
+ template <typename S>
+ void load64Unaligned(const S& src, Register64 dest) {
+ ma_load_unaligned(dest.reg, src, SizeDouble, ZeroExtend);
+ }
+
+ void loadPtr(const Address& address, Register dest);
+ void loadPtr(const BaseIndex& src, Register dest);
+ void loadPtr(AbsoluteAddress address, Register dest);
+ void loadPtr(wasm::SymbolicAddress address, Register dest);
+
+ void loadPrivate(const Address& address, Register dest);
+
+ void loadUnalignedDouble(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& src, Register temp,
+ FloatRegister dest);
+ void loadUnalignedFloat32(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& src, Register temp,
+ FloatRegister dest);
+
+ void store8(Register src, const Address& address);
+ void store8(Imm32 imm, const Address& address);
+ void store8(Register src, const BaseIndex& address);
+ void store8(Imm32 imm, const BaseIndex& address);
+
+ void store16(Register src, const Address& address);
+ void store16(Imm32 imm, const Address& address);
+ void store16(Register src, const BaseIndex& address);
+ void store16(Imm32 imm, const BaseIndex& address);
+
+ template <typename T>
+ void store16Unaligned(Register src, const T& dest) {
+ ma_store_unaligned(src, dest, SizeHalfWord);
+ }
+
+ void store32(Register src, AbsoluteAddress address);
+ void store32(Register src, const Address& address);
+ void store32(Register src, const BaseIndex& address);
+ void store32(Imm32 src, const Address& address);
+ void store32(Imm32 src, const BaseIndex& address);
+
+ template <typename T>
+ void store32Unaligned(Register src, const T& dest) {
+ ma_store_unaligned(src, dest, SizeWord);
+ }
+
+ void store64(Imm64 imm, Address address) {
+ storePtr(ImmWord(imm.value), address);
+ }
+ void store64(Imm64 imm, const BaseIndex& address) {
+ storePtr(ImmWord(imm.value), address);
+ }
+
+ void store64(Register64 src, Address address) { storePtr(src.reg, address); }
+ void store64(Register64 src, const BaseIndex& address) {
+ storePtr(src.reg, address);
+ }
+
+ template <typename T>
+ void store64Unaligned(Register64 src, const T& dest) {
+ ma_store_unaligned(src.reg, dest, SizeDouble);
+ }
+
+ template <typename T>
+ void storePtr(ImmWord imm, T address);
+ template <typename T>
+ void storePtr(ImmPtr imm, T address);
+ template <typename T>
+ void storePtr(ImmGCPtr imm, T address);
+ void storePtr(Register src, const Address& address);
+ void storePtr(Register src, const BaseIndex& address);
+ void storePtr(Register src, AbsoluteAddress dest);
+
+ void storeUnalignedFloat32(const wasm::MemoryAccessDesc& access,
+ FloatRegister src, Register temp,
+ const BaseIndex& dest);
+ void storeUnalignedDouble(const wasm::MemoryAccessDesc& access,
+ FloatRegister src, Register temp,
+ const BaseIndex& dest);
+
+ void moveDouble(FloatRegister src, FloatRegister dest) { as_movd(dest, src); }
+
+ void zeroDouble(FloatRegister reg) { moveToDouble(zero, reg); }
+
+ void convertUInt64ToDouble(Register src, FloatRegister dest);
+
+ void breakpoint();
+
+ void checkStackAlignment();
+
+ static void calculateAlignedStackPointer(void** stackPointer);
+
+ // If source is a double, load it into dest. If source is int32,
+ // convert it to double. Else, branch to failure.
+ void ensureDouble(const ValueOperand& source, FloatRegister dest,
+ Label* failure);
+
+ void cmpPtrSet(Assembler::Condition cond, Address lhs, ImmPtr rhs,
+ Register dest);
+ void cmpPtrSet(Assembler::Condition cond, Register lhs, Address rhs,
+ Register dest);
+ void cmpPtrSet(Assembler::Condition cond, Address lhs, Register rhs,
+ Register dest);
+
+ void cmp32Set(Assembler::Condition cond, Register lhs, Address rhs,
+ Register dest);
+
+ protected:
+ bool buildOOLFakeExitFrame(void* fakeReturnAddr);
+
+ void wasmLoadI64Impl(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register64 output, Register tmp);
+ void wasmStoreI64Impl(const wasm::MemoryAccessDesc& access, Register64 value,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register tmp);
+
+ public:
+ void lea(Operand addr, Register dest) {
+ ma_daddu(dest, addr.baseReg(), Imm32(addr.disp()));
+ }
+
+ void abiret() {
+ as_jr(ra);
+ as_nop();
+ }
+
+ void moveFloat32(FloatRegister src, FloatRegister dest) {
+ as_movs(dest, src);
+ }
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register framePtr, Register scratch);
+ void profilerExitFrame();
+};
+
+typedef MacroAssemblerMIPS64Compat MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_MacroAssembler_mips64_h */
diff --git a/js/src/jit/mips64/MoveEmitter-mips64.cpp b/js/src/jit/mips64/MoveEmitter-mips64.cpp
new file mode 100644
index 0000000000..70217a37f8
--- /dev/null
+++ b/js/src/jit/mips64/MoveEmitter-mips64.cpp
@@ -0,0 +1,149 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips64/MoveEmitter-mips64.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void MoveEmitterMIPS64::breakCycle(const MoveOperand& from,
+ const MoveOperand& to, MoveOp::Type type,
+ uint32_t slotId) {
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (A -> B), which we reach first. We save B, then allow
+ // the original move to continue.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchFloat32Reg;
+ masm.loadFloat32(getAdjustedAddress(to), temp);
+ masm.storeFloat32(temp, cycleSlot(slotId));
+ } else {
+ masm.storeFloat32(to.floatReg(), cycleSlot(slotId));
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchDoubleReg;
+ masm.loadDouble(getAdjustedAddress(to), temp);
+ masm.storeDouble(temp, cycleSlot(slotId));
+ } else {
+ masm.storeDouble(to.floatReg(), cycleSlot(slotId));
+ }
+ break;
+ case MoveOp::INT32:
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.load32(getAdjustedAddress(to), temp);
+ masm.store32(temp, cycleSlot(0));
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.store32(to.reg(), cycleSlot(0));
+ }
+ break;
+ case MoveOp::GENERAL:
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.loadPtr(getAdjustedAddress(to), temp);
+ masm.storePtr(temp, cycleSlot(0));
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.storePtr(to.reg(), cycleSlot(0));
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterMIPS64::completeCycle(const MoveOperand& from,
+ const MoveOperand& to, MoveOp::Type type,
+ uint32_t slotId) {
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (B -> A), which we reach last. We emit a move from the
+ // saved value of B, to A.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchFloat32Reg;
+ masm.loadFloat32(cycleSlot(slotId), temp);
+ masm.storeFloat32(temp, getAdjustedAddress(to));
+ } else {
+ masm.loadFloat32(cycleSlot(slotId), to.floatReg());
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchDoubleReg;
+ masm.loadDouble(cycleSlot(slotId), temp);
+ masm.storeDouble(temp, getAdjustedAddress(to));
+ } else {
+ masm.loadDouble(cycleSlot(slotId), to.floatReg());
+ }
+ break;
+ case MoveOp::INT32:
+ MOZ_ASSERT(slotId == 0);
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.load32(cycleSlot(0), temp);
+ masm.store32(temp, getAdjustedAddress(to));
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.load32(cycleSlot(0), to.reg());
+ }
+ break;
+ case MoveOp::GENERAL:
+ MOZ_ASSERT(slotId == 0);
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.loadPtr(cycleSlot(0), temp);
+ masm.storePtr(temp, getAdjustedAddress(to));
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.loadPtr(cycleSlot(0), to.reg());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterMIPS64::emitDoubleMove(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.moveDouble(from.floatReg(), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ masm.moveFromDouble(from.floatReg(), to.reg());
+ } else {
+ MOZ_ASSERT(to.isMemory());
+ masm.storeDouble(from.floatReg(), getAdjustedAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ if (from.isMemory()) {
+ masm.loadDouble(getAdjustedAddress(from), to.floatReg());
+ } else {
+ masm.moveToDouble(from.reg(), to.floatReg());
+ }
+ } else {
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.isMemory());
+ masm.loadDouble(getAdjustedAddress(from), ScratchDoubleReg);
+ masm.storeDouble(ScratchDoubleReg, getAdjustedAddress(to));
+ }
+}
diff --git a/js/src/jit/mips64/MoveEmitter-mips64.h b/js/src/jit/mips64/MoveEmitter-mips64.h
new file mode 100644
index 0000000000..e6dbcd0693
--- /dev/null
+++ b/js/src/jit/mips64/MoveEmitter-mips64.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_MoveEmitter_mips64_h
+#define jit_mips64_MoveEmitter_mips64_h
+
+#include "jit/mips-shared/MoveEmitter-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class MoveEmitterMIPS64 : public MoveEmitterMIPSShared {
+ void emitDoubleMove(const MoveOperand& from, const MoveOperand& to);
+ void breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+ void completeCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+
+ public:
+ MoveEmitterMIPS64(MacroAssembler& masm) : MoveEmitterMIPSShared(masm) {}
+};
+
+typedef MoveEmitterMIPS64 MoveEmitter;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_MoveEmitter_mips64_h */
diff --git a/js/src/jit/mips64/SharedICRegisters-mips64.h b/js/src/jit/mips64/SharedICRegisters-mips64.h
new file mode 100644
index 0000000000..99b263ca1e
--- /dev/null
+++ b/js/src/jit/mips64/SharedICRegisters-mips64.h
@@ -0,0 +1,45 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips64_SharedICRegisters_mips64_h
+#define jit_mips64_SharedICRegisters_mips64_h
+
+#include "jit/mips64/Assembler-mips64.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+// ValueOperands R0, R1, and R2.
+// R0 == JSReturnReg, and R2 uses registers not preserved across calls. R1 value
+// should be preserved across calls.
+static constexpr ValueOperand R0(v1);
+static constexpr ValueOperand R1(s4);
+static constexpr ValueOperand R2(a6);
+
+// ICTailCallReg and ICStubReg
+// These use registers that are not preserved across calls.
+static constexpr Register ICTailCallReg = ra;
+static constexpr Register ICStubReg = a5;
+
+// Register used internally by MacroAssemblerMIPS.
+static constexpr Register BaselineSecondScratchReg = SecondScratchReg;
+
+// Note that ICTailCallReg is actually just the link register.
+// In MIPS code emission, we do not clobber ICTailCallReg since we keep
+// the return address for calls there.
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static constexpr FloatRegister FloatReg0 = f0;
+static constexpr FloatRegister FloatReg1 = f2;
+static constexpr FloatRegister FloatReg2 = f4;
+static constexpr FloatRegister FloatReg3 = f6;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips64_SharedICRegisters_mips64_h */
diff --git a/js/src/jit/mips64/Simulator-mips64.cpp b/js/src/jit/mips64/Simulator-mips64.cpp
new file mode 100644
index 0000000000..0cdac18365
--- /dev/null
+++ b/js/src/jit/mips64/Simulator-mips64.cpp
@@ -0,0 +1,4402 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/mips64/Simulator-mips64.h"
+
+#include "mozilla/Casting.h"
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/Likely.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <float.h>
+#include <limits>
+
+#include "jit/AtomicOperations.h"
+#include "jit/mips64/Assembler-mips64.h"
+#include "js/Conversions.h"
+#include "js/UniquePtr.h"
+#include "js/Utility.h"
+#include "threading/LockGuard.h"
+#include "vm/JSContext.h"
+#include "vm/Runtime.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmSignalHandlers.h"
+
+#define I8(v) static_cast<int8_t>(v)
+#define I16(v) static_cast<int16_t>(v)
+#define U16(v) static_cast<uint16_t>(v)
+#define I32(v) static_cast<int32_t>(v)
+#define U32(v) static_cast<uint32_t>(v)
+#define I64(v) static_cast<int64_t>(v)
+#define U64(v) static_cast<uint64_t>(v)
+#define I128(v) static_cast<__int128_t>(v)
+#define U128(v) static_cast<__uint128_t>(v)
+
+#define I32_CHECK(v) \
+ ({ \
+ MOZ_ASSERT(I64(I32(v)) == I64(v)); \
+ I32((v)); \
+ })
+
+namespace js {
+namespace jit {
+
+static const Instr kCallRedirInstr =
+ op_special | MAX_BREAK_CODE << FunctionBits | ff_break;
+
+// Utils functions.
+static uint32_t GetFCSRConditionBit(uint32_t cc) {
+ if (cc == 0) {
+ return 23;
+ }
+ return 24 + cc;
+}
+
+// -----------------------------------------------------------------------------
+// MIPS assembly various constants.
+
+class SimInstruction {
+ public:
+ enum {
+ kInstrSize = 4,
+ // On MIPS PC cannot actually be directly accessed. We behave as if PC was
+ // always the value of the current instruction being executed.
+ kPCReadOffset = 0
+ };
+
+ // Get the raw instruction bits.
+ inline Instr instructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void setInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int bit(int nr) const { return (instructionBits() >> nr) & 1; }
+
+ // Read a bit field out of the instruction bits.
+ inline int bits(int hi, int lo) const {
+ return (instructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Instruction type.
+ enum Type { kRegisterType, kImmediateType, kJumpType, kUnsupported = -1 };
+
+ // Get the encoding type of the instruction.
+ Type instructionType() const;
+
+ // Accessors for the different named fields used in the MIPS encoding.
+ inline OpcodeField opcodeValue() const {
+ return static_cast<OpcodeField>(
+ bits(OpcodeShift + OpcodeBits - 1, OpcodeShift));
+ }
+
+ inline int rsValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType ||
+ instructionType() == kImmediateType);
+ return bits(RSShift + RSBits - 1, RSShift);
+ }
+
+ inline int rtValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType ||
+ instructionType() == kImmediateType);
+ return bits(RTShift + RTBits - 1, RTShift);
+ }
+
+ inline int rdValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return bits(RDShift + RDBits - 1, RDShift);
+ }
+
+ inline int saValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return bits(SAShift + SABits - 1, SAShift);
+ }
+
+ inline int functionValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType ||
+ instructionType() == kImmediateType);
+ return bits(FunctionShift + FunctionBits - 1, FunctionShift);
+ }
+
+ inline int fdValue() const { return bits(FDShift + FDBits - 1, FDShift); }
+
+ inline int fsValue() const { return bits(FSShift + FSBits - 1, FSShift); }
+
+ inline int ftValue() const { return bits(FTShift + FTBits - 1, FTShift); }
+
+ inline int frValue() const { return bits(FRShift + FRBits - 1, FRShift); }
+
+ // Float Compare condition code instruction bits.
+ inline int fcccValue() const {
+ return bits(FCccShift + FCccBits - 1, FCccShift);
+ }
+
+ // Float Branch condition code instruction bits.
+ inline int fbccValue() const {
+ return bits(FBccShift + FBccBits - 1, FBccShift);
+ }
+
+ // Float Branch true/false instruction bit.
+ inline int fbtrueValue() const {
+ return bits(FBtrueShift + FBtrueBits - 1, FBtrueShift);
+ }
+
+ // Return the fields at their original place in the instruction encoding.
+ inline OpcodeField opcodeFieldRaw() const {
+ return static_cast<OpcodeField>(instructionBits() & OpcodeMask);
+ }
+
+ inline int rsFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType ||
+ instructionType() == kImmediateType);
+ return instructionBits() & RSMask;
+ }
+
+ // Same as above function, but safe to call within instructionType().
+ inline int rsFieldRawNoAssert() const { return instructionBits() & RSMask; }
+
+ inline int rtFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType ||
+ instructionType() == kImmediateType);
+ return instructionBits() & RTMask;
+ }
+
+ inline int rdFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return instructionBits() & RDMask;
+ }
+
+ inline int saFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return instructionBits() & SAMask;
+ }
+
+ inline int functionFieldRaw() const {
+ return instructionBits() & FunctionMask;
+ }
+
+ // Get the secondary field according to the opcode.
+ inline int secondaryValue() const {
+ OpcodeField op = opcodeFieldRaw();
+ switch (op) {
+ case op_special:
+ case op_special2:
+ return functionValue();
+ case op_cop1:
+ return rsValue();
+ case op_regimm:
+ return rtValue();
+ default:
+ return ff_null;
+ }
+ }
+
+ inline int32_t imm16Value() const {
+ MOZ_ASSERT(instructionType() == kImmediateType);
+ return bits(Imm16Shift + Imm16Bits - 1, Imm16Shift);
+ }
+
+ inline int32_t imm26Value() const {
+ MOZ_ASSERT(instructionType() == kJumpType);
+ return bits(Imm26Shift + Imm26Bits - 1, Imm26Shift);
+ }
+
+ // Say if the instruction should not be used in a branch delay slot.
+ bool isForbiddenInBranchDelay() const;
+ // Say if the instruction 'links'. e.g. jal, bal.
+ bool isLinkingInstruction() const;
+ // Say if the instruction is a debugger break/trap.
+ bool isTrap() const;
+
+ private:
+ SimInstruction() = delete;
+ SimInstruction(const SimInstruction& other) = delete;
+ void operator=(const SimInstruction& other) = delete;
+};
+
+bool SimInstruction::isForbiddenInBranchDelay() const {
+ const int op = opcodeFieldRaw();
+ switch (op) {
+ case op_j:
+ case op_jal:
+ case op_beq:
+ case op_bne:
+ case op_blez:
+ case op_bgtz:
+ case op_beql:
+ case op_bnel:
+ case op_blezl:
+ case op_bgtzl:
+ return true;
+ case op_regimm:
+ switch (rtFieldRaw()) {
+ case rt_bltz:
+ case rt_bgez:
+ case rt_bltzal:
+ case rt_bgezal:
+ return true;
+ default:
+ return false;
+ };
+ break;
+ case op_special:
+ switch (functionFieldRaw()) {
+ case ff_jr:
+ case ff_jalr:
+ return true;
+ default:
+ return false;
+ };
+ break;
+ default:
+ return false;
+ };
+}
+
+bool SimInstruction::isLinkingInstruction() const {
+ const int op = opcodeFieldRaw();
+ switch (op) {
+ case op_jal:
+ return true;
+ case op_regimm:
+ switch (rtFieldRaw()) {
+ case rt_bgezal:
+ case rt_bltzal:
+ return true;
+ default:
+ return false;
+ };
+ case op_special:
+ switch (functionFieldRaw()) {
+ case ff_jalr:
+ return true;
+ default:
+ return false;
+ };
+ default:
+ return false;
+ };
+}
+
+bool SimInstruction::isTrap() const {
+ if (opcodeFieldRaw() != op_special) {
+ return false;
+ } else {
+ switch (functionFieldRaw()) {
+ case ff_break:
+ return instructionBits() != kCallRedirInstr;
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ return bits(15, 6) != kWasmTrapCode;
+ default:
+ return false;
+ };
+ }
+}
+
+SimInstruction::Type SimInstruction::instructionType() const {
+ switch (opcodeFieldRaw()) {
+ case op_special:
+ switch (functionFieldRaw()) {
+ case ff_jr:
+ case ff_jalr:
+ case ff_sync:
+ case ff_break:
+ case ff_sll:
+ case ff_dsll:
+ case ff_dsll32:
+ case ff_srl:
+ case ff_dsrl:
+ case ff_dsrl32:
+ case ff_sra:
+ case ff_dsra:
+ case ff_dsra32:
+ case ff_sllv:
+ case ff_dsllv:
+ case ff_srlv:
+ case ff_dsrlv:
+ case ff_srav:
+ case ff_dsrav:
+ case ff_mfhi:
+ case ff_mflo:
+ case ff_mult:
+ case ff_dmult:
+ case ff_multu:
+ case ff_dmultu:
+ case ff_div:
+ case ff_ddiv:
+ case ff_divu:
+ case ff_ddivu:
+ case ff_add:
+ case ff_dadd:
+ case ff_addu:
+ case ff_daddu:
+ case ff_sub:
+ case ff_dsub:
+ case ff_subu:
+ case ff_dsubu:
+ case ff_and:
+ case ff_or:
+ case ff_xor:
+ case ff_nor:
+ case ff_slt:
+ case ff_sltu:
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ case ff_movz:
+ case ff_movn:
+ case ff_movci:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ };
+ break;
+ case op_special2:
+ switch (functionFieldRaw()) {
+ case ff_mul:
+ case ff_clz:
+ case ff_dclz:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ };
+ break;
+ case op_special3:
+ switch (functionFieldRaw()) {
+ case ff_ins:
+ case ff_dins:
+ case ff_dinsm:
+ case ff_dinsu:
+ case ff_ext:
+ case ff_dext:
+ case ff_dextm:
+ case ff_dextu:
+ case ff_bshfl:
+ case ff_dbshfl:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ };
+ break;
+ case op_cop1: // Coprocessor instructions.
+ switch (rsFieldRawNoAssert()) {
+ case rs_bc1: // Branch on coprocessor condition.
+ return kImmediateType;
+ default:
+ return kRegisterType;
+ };
+ break;
+ case op_cop1x:
+ return kRegisterType;
+ // 16 bits Immediate type instructions. e.g.: addi dest, src, imm16.
+ case op_regimm:
+ case op_beq:
+ case op_bne:
+ case op_blez:
+ case op_bgtz:
+ case op_addi:
+ case op_daddi:
+ case op_addiu:
+ case op_daddiu:
+ case op_slti:
+ case op_sltiu:
+ case op_andi:
+ case op_ori:
+ case op_xori:
+ case op_lui:
+ case op_beql:
+ case op_bnel:
+ case op_blezl:
+ case op_bgtzl:
+ case op_lb:
+ case op_lbu:
+ case op_lh:
+ case op_lhu:
+ case op_lw:
+ case op_lwu:
+ case op_lwl:
+ case op_lwr:
+ case op_ll:
+ case op_lld:
+ case op_ld:
+ case op_ldl:
+ case op_ldr:
+ case op_sb:
+ case op_sh:
+ case op_sw:
+ case op_swl:
+ case op_swr:
+ case op_sc:
+ case op_scd:
+ case op_sd:
+ case op_sdl:
+ case op_sdr:
+ case op_lwc1:
+ case op_ldc1:
+ case op_swc1:
+ case op_sdc1:
+ return kImmediateType;
+ // 26 bits immediate type instructions. e.g.: j imm26.
+ case op_j:
+ case op_jal:
+ return kJumpType;
+ default:
+ return kUnsupported;
+ };
+ return kUnsupported;
+}
+
+// C/C++ argument slots size.
+const int kCArgSlotCount = 0;
+const int kCArgsSlotsSize = kCArgSlotCount * sizeof(uintptr_t);
+const int kBranchReturnOffset = 2 * SimInstruction::kInstrSize;
+
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
+
+ char* validityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+
+ char* cachedData(int offset) { return &data_[offset]; }
+
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
+
+// Protects the icache() and redirection() properties of the
+// Simulator.
+class AutoLockSimulatorCache : public LockGuard<Mutex> {
+ using Base = LockGuard<Mutex>;
+
+ public:
+ explicit AutoLockSimulatorCache()
+ : Base(SimulatorProcess::singleton_->cacheLock_) {}
+};
+
+mozilla::Atomic<size_t, mozilla::ReleaseAcquire>
+ SimulatorProcess::ICacheCheckingDisableCount(
+ 1); // Checking is disabled by default.
+SimulatorProcess* SimulatorProcess::singleton_ = nullptr;
+
+int64_t Simulator::StopSimAt = -1;
+
+Simulator* Simulator::Create() {
+ auto sim = MakeUnique<Simulator>();
+ if (!sim) {
+ return nullptr;
+ }
+
+ if (!sim->init()) {
+ return nullptr;
+ }
+
+ int64_t stopAt;
+ char* stopAtStr = getenv("MIPS_SIM_STOP_AT");
+ if (stopAtStr && sscanf(stopAtStr, "%" PRIi64, &stopAt) == 1) {
+ fprintf(stderr, "\nStopping simulation at icount %" PRIi64 "\n", stopAt);
+ Simulator::StopSimAt = stopAt;
+ }
+
+ return sim.release();
+}
+
+void Simulator::Destroy(Simulator* sim) { js_delete(sim); }
+
+// The MipsDebugger class is used by the simulator while debugging simulated
+// code.
+class MipsDebugger {
+ public:
+ explicit MipsDebugger(Simulator* sim) : sim_(sim) {}
+
+ void stop(SimInstruction* instr);
+ void debug();
+ // Print all registers with a nice formatting.
+ void printAllRegs();
+ void printAllRegsIncludingFPU();
+
+ private:
+ // We set the breakpoint code to 0xfffff to easily recognize it.
+ static const Instr kBreakpointInstr = op_special | ff_break | 0xfffff << 6;
+ static const Instr kNopInstr = op_special | ff_sll;
+
+ Simulator* sim_;
+
+ int64_t getRegisterValue(int regnum);
+ int64_t getFPURegisterValueLong(int regnum);
+ float getFPURegisterValueFloat(int regnum);
+ double getFPURegisterValueDouble(int regnum);
+ bool getValue(const char* desc, int64_t* value);
+
+ // Set or delete a breakpoint. Returns true if successful.
+ bool setBreakpoint(SimInstruction* breakpc);
+ bool deleteBreakpoint(SimInstruction* breakpc);
+
+ // Undo and redo all breakpoints. This is needed to bracket disassembly and
+ // execution to skip past breakpoints when run from the debugger.
+ void undoBreakpoints();
+ void redoBreakpoints();
+};
+
+static void UNSUPPORTED() {
+ printf("Unsupported instruction.\n");
+ MOZ_CRASH();
+}
+
+void MipsDebugger::stop(SimInstruction* instr) {
+ // Get the stop code.
+ uint32_t code = instr->bits(25, 6);
+ // Retrieve the encoded address, which comes just after this stop.
+ char* msg =
+ *reinterpret_cast<char**>(sim_->get_pc() + SimInstruction::kInstrSize);
+ // Update this stop description.
+ if (!sim_->watchedStops_[code].desc_) {
+ sim_->watchedStops_[code].desc_ = msg;
+ }
+ // Print the stop message and code if it is not the default code.
+ if (code != kMaxStopCode) {
+ printf("Simulator hit stop %u: %s\n", code, msg);
+ } else {
+ printf("Simulator hit %s\n", msg);
+ }
+ sim_->set_pc(sim_->get_pc() + 2 * SimInstruction::kInstrSize);
+ debug();
+}
+
+int64_t MipsDebugger::getRegisterValue(int regnum) {
+ if (regnum == kPCRegister) {
+ return sim_->get_pc();
+ }
+ return sim_->getRegister(regnum);
+}
+
+int64_t MipsDebugger::getFPURegisterValueLong(int regnum) {
+ return sim_->getFpuRegister(regnum);
+}
+
+float MipsDebugger::getFPURegisterValueFloat(int regnum) {
+ return sim_->getFpuRegisterFloat(regnum);
+}
+
+double MipsDebugger::getFPURegisterValueDouble(int regnum) {
+ return sim_->getFpuRegisterDouble(regnum);
+}
+
+bool MipsDebugger::getValue(const char* desc, int64_t* value) {
+ Register reg = Register::FromName(desc);
+ if (reg != InvalidReg) {
+ *value = getRegisterValue(reg.code());
+ return true;
+ }
+
+ if (strncmp(desc, "0x", 2) == 0) {
+ return sscanf(desc, "%" PRIu64, reinterpret_cast<uint64_t*>(value)) == 1;
+ }
+ return sscanf(desc, "%" PRIi64, value) == 1;
+}
+
+bool MipsDebugger::setBreakpoint(SimInstruction* breakpc) {
+ // Check if a breakpoint can be set. If not return without any side-effects.
+ if (sim_->break_pc_ != nullptr) {
+ return false;
+ }
+
+ // Set the breakpoint.
+ sim_->break_pc_ = breakpc;
+ sim_->break_instr_ = breakpc->instructionBits();
+ // Not setting the breakpoint instruction in the code itself. It will be set
+ // when the debugger shell continues.
+ return true;
+}
+
+bool MipsDebugger::deleteBreakpoint(SimInstruction* breakpc) {
+ if (sim_->break_pc_ != nullptr) {
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+ }
+
+ sim_->break_pc_ = nullptr;
+ sim_->break_instr_ = 0;
+ return true;
+}
+
+void MipsDebugger::undoBreakpoints() {
+ if (sim_->break_pc_) {
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+ }
+}
+
+void MipsDebugger::redoBreakpoints() {
+ if (sim_->break_pc_) {
+ sim_->break_pc_->setInstructionBits(kBreakpointInstr);
+ }
+}
+
+void MipsDebugger::printAllRegs() {
+ int64_t value;
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ value = getRegisterValue(i);
+ printf("%3s: 0x%016" PRIx64 " %20" PRIi64 " ", Registers::GetName(i),
+ value, value);
+
+ if (i % 2) {
+ printf("\n");
+ }
+ }
+ printf("\n");
+
+ value = getRegisterValue(Simulator::LO);
+ printf(" LO: 0x%016" PRIx64 " %20" PRIi64 " ", value, value);
+ value = getRegisterValue(Simulator::HI);
+ printf(" HI: 0x%016" PRIx64 " %20" PRIi64 "\n", value, value);
+ value = getRegisterValue(Simulator::pc);
+ printf(" pc: 0x%016" PRIx64 "\n", value);
+}
+
+void MipsDebugger::printAllRegsIncludingFPU() {
+ printAllRegs();
+
+ printf("\n\n");
+ // f0, f1, f2, ... f31.
+ for (uint32_t i = 0; i < FloatRegisters::TotalPhys; i++) {
+ printf("%3s: 0x%016" PRIi64 "\tflt: %-8.4g\tdbl: %-16.4g\n",
+ FloatRegisters::GetName(i), getFPURegisterValueLong(i),
+ getFPURegisterValueFloat(i), getFPURegisterValueDouble(i));
+ }
+}
+
+static char* ReadLine(const char* prompt) {
+ UniqueChars result;
+ char lineBuf[256];
+ int offset = 0;
+ bool keepGoing = true;
+ fprintf(stdout, "%s", prompt);
+ fflush(stdout);
+ while (keepGoing) {
+ if (fgets(lineBuf, sizeof(lineBuf), stdin) == nullptr) {
+ // fgets got an error. Just give up.
+ return nullptr;
+ }
+ int len = strlen(lineBuf);
+ if (len > 0 && lineBuf[len - 1] == '\n') {
+ // Since we read a new line we are done reading the line. This
+ // will exit the loop after copying this buffer into the result.
+ keepGoing = false;
+ }
+ if (!result) {
+ // Allocate the initial result and make room for the terminating '\0'
+ result.reset(js_pod_malloc<char>(len + 1));
+ if (!result) {
+ return nullptr;
+ }
+ } else {
+ // Allocate a new result with enough room for the new addition.
+ int new_len = offset + len + 1;
+ char* new_result = js_pod_malloc<char>(new_len);
+ if (!new_result) {
+ return nullptr;
+ }
+ // Copy the existing input into the new array and set the new
+ // array as the result.
+ memcpy(new_result, result.get(), offset * sizeof(char));
+ result.reset(new_result);
+ }
+ // Copy the newly read line into the result.
+ memcpy(result.get() + offset, lineBuf, len * sizeof(char));
+ offset += len;
+ }
+
+ MOZ_ASSERT(result);
+ result[offset] = '\0';
+ return result.release();
+}
+
+static void DisassembleInstruction(uint64_t pc) {
+ uint8_t* bytes = reinterpret_cast<uint8_t*>(pc);
+ char hexbytes[256];
+ sprintf(hexbytes, "0x%x 0x%x 0x%x 0x%x", bytes[0], bytes[1], bytes[2],
+ bytes[3]);
+ char llvmcmd[1024];
+ sprintf(llvmcmd,
+ "bash -c \"echo -n '%p'; echo '%s' | "
+ "llvm-mc -disassemble -arch=mips64el -mcpu=mips64r2 | "
+ "grep -v pure_instructions | grep -v .text\"",
+ static_cast<void*>(bytes), hexbytes);
+ if (system(llvmcmd)) {
+ printf("Cannot disassemble instruction.\n");
+ }
+}
+
+void MipsDebugger::debug() {
+ intptr_t lastPC = -1;
+ bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = {cmd, arg1, arg2};
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ // Undo all set breakpoints while running in the debugger shell. This will
+ // make them invisible to all commands.
+ undoBreakpoints();
+
+ while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
+ if (lastPC != sim_->get_pc()) {
+ DisassembleInstruction(sim_->get_pc());
+ lastPC = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == nullptr) {
+ break;
+ } else {
+ char* last_input = sim_->lastDebuggerInput();
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->setLastDebuggerInput(line);
+ }
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = sscanf(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ SimInstruction* instr =
+ reinterpret_cast<SimInstruction*>(sim_->get_pc());
+ if (!instr->isTrap()) {
+ sim_->instructionDecode(
+ reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ } else {
+ // Allow si to jump over generated breakpoints.
+ printf("/!\\ Jumping over generated breakpoint.\n");
+ sim_->set_pc(sim_->get_pc() + SimInstruction::kInstrSize);
+ }
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Execute the one instruction we broke at with breakpoints disabled.
+ sim_->instructionDecode(
+ reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ if (strcmp(arg1, "all") == 0) {
+ printAllRegs();
+ } else if (strcmp(arg1, "allf") == 0) {
+ printAllRegsIncludingFPU();
+ } else {
+ Register reg = Register::FromName(arg1);
+ FloatRegisters::Encoding fReg = FloatRegisters::FromName(arg1);
+ if (reg != InvalidReg) {
+ value = getRegisterValue(reg.code());
+ printf("%s: 0x%016" PRIi64 " %20" PRIi64 " \n", arg1, value,
+ value);
+ } else if (fReg != FloatRegisters::Invalid) {
+ printf("%3s: 0x%016" PRIi64 "\tflt: %-8.4g\tdbl: %-16.4g\n",
+ FloatRegisters::GetName(fReg),
+ getFPURegisterValueLong(fReg),
+ getFPURegisterValueFloat(fReg),
+ getFPURegisterValueDouble(fReg));
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ printf("print <register> or print <fpu register> single\n");
+ }
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int64_t* cur = nullptr;
+ int64_t* end = nullptr;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int64_t*>(sim_->getRegister(Simulator::sp));
+ } else { // Command "mem".
+ int64_t value;
+ if (!getValue(arg1, &value)) {
+ printf("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int64_t*>(value);
+ next_arg++;
+ }
+
+ int64_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!getValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ printf(" %p: 0x%016" PRIx64 " %20" PRIi64, cur, *cur, *cur);
+ printf("\n");
+ cur++;
+ }
+
+ } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0) ||
+ (strcmp(cmd, "di") == 0)) {
+ uint8_t* cur = nullptr;
+ uint8_t* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ end = cur + (10 * SimInstruction::kInstrSize);
+ } else if (argc == 2) {
+ Register reg = Register::FromName(arg1);
+ if (reg != InvalidReg || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ int64_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * SimInstruction::kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ int64_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * SimInstruction::kInstrSize);
+ }
+ }
+ } else {
+ int64_t value1;
+ int64_t value2;
+ if (getValue(arg1, &value1) && getValue(arg2, &value2)) {
+ cur = reinterpret_cast<uint8_t*>(value1);
+ end = cur + (value2 * SimInstruction::kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ DisassembleInstruction(uint64_t(cur));
+ cur += SimInstruction::kInstrSize;
+ }
+ } else if (strcmp(cmd, "gdb") == 0) {
+ printf("relinquishing control to gdb\n");
+ asm("int $3");
+ printf("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0) {
+ if (argc == 2) {
+ int64_t value;
+ if (getValue(arg1, &value)) {
+ if (!setBreakpoint(reinterpret_cast<SimInstruction*>(value))) {
+ printf("setting breakpoint failed\n");
+ }
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ } else {
+ printf("break <address>\n");
+ }
+ } else if (strcmp(cmd, "del") == 0) {
+ if (!deleteBreakpoint(nullptr)) {
+ printf("deleting breakpoint failed\n");
+ }
+ } else if (strcmp(cmd, "flags") == 0) {
+ printf("No flags on MIPS !\n");
+ } else if (strcmp(cmd, "stop") == 0) {
+ int64_t value;
+ intptr_t stop_pc = sim_->get_pc() - 2 * SimInstruction::kInstrSize;
+ SimInstruction* stop_instr = reinterpret_cast<SimInstruction*>(stop_pc);
+ SimInstruction* msg_address = reinterpret_cast<SimInstruction*>(
+ stop_pc + SimInstruction::kInstrSize);
+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+ // Remove the current stop.
+ if (sim_->isStopInstruction(stop_instr)) {
+ stop_instr->setInstructionBits(kNopInstr);
+ msg_address->setInstructionBits(kNopInstr);
+ } else {
+ printf("Not at debugger stop.\n");
+ }
+ } else if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ printf("Stop information:\n");
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->printStopInfo(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->printStopInfo(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->enableStop(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->enableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->disableStop(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->disableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ printf("Wrong usage. Use help command for more information.\n");
+ }
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ printf("cont\n");
+ printf(" continue execution (alias 'c')\n");
+ printf("stepi\n");
+ printf(" step one instruction (alias 'si')\n");
+ printf("print <register>\n");
+ printf(" print register content (alias 'p')\n");
+ printf(" use register name 'all' to print all registers\n");
+ printf("printobject <register>\n");
+ printf(" print an object from a register (alias 'po')\n");
+ printf("stack [<words>]\n");
+ printf(" dump stack content, default dump 10 words)\n");
+ printf("mem <address> [<words>]\n");
+ printf(" dump memory content, default dump 10 words)\n");
+ printf("flags\n");
+ printf(" print flags\n");
+ printf("disasm [<instructions>]\n");
+ printf("disasm [<address/register>]\n");
+ printf("disasm [[<address/register>] <instructions>]\n");
+ printf(" disassemble code, default is 10 instructions\n");
+ printf(" from pc (alias 'di')\n");
+ printf("gdb\n");
+ printf(" enter gdb\n");
+ printf("break <address>\n");
+ printf(" set a break point on the address\n");
+ printf("del\n");
+ printf(" delete the breakpoint\n");
+ printf("stop feature:\n");
+ printf(" Description:\n");
+ printf(" Stops are debug instructions inserted by\n");
+ printf(" the Assembler::stop() function.\n");
+ printf(" When hitting a stop, the Simulator will\n");
+ printf(" stop and and give control to the Debugger.\n");
+ printf(" All stop codes are watched:\n");
+ printf(" - They can be enabled / disabled: the Simulator\n");
+ printf(" will / won't stop when hitting them.\n");
+ printf(" - The Simulator keeps track of how many times they \n");
+ printf(" are met. (See the info command.) Going over a\n");
+ printf(" disabled stop still increases its counter. \n");
+ printf(" Commands:\n");
+ printf(" stop info all/<code> : print infos about number <code>\n");
+ printf(" or all stop(s).\n");
+ printf(" stop enable/disable all/<code> : enables / disables\n");
+ printf(" all or number <code> stop(s)\n");
+ printf(" stop unstop\n");
+ printf(" ignore the stop instruction at the current location\n");
+ printf(" from now on\n");
+ } else {
+ printf("Unknown command: %s\n", cmd);
+ }
+ }
+ }
+
+ // Add all the breakpoints back to stop execution and enter the debugger
+ // shell when hit.
+ redoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+static bool AllOnOnePage(uintptr_t start, int size) {
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
+
+void Simulator::setLastDebuggerInput(char* input) {
+ js_free(lastDebuggerInput_);
+ lastDebuggerInput_ = input;
+}
+
+static CachePage* GetCachePageLocked(SimulatorProcess::ICacheMap& i_cache,
+ void* page) {
+ SimulatorProcess::ICacheMap::AddPtr p = i_cache.lookupForAdd(page);
+ if (p) {
+ return p->value();
+ }
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ CachePage* new_page = js_new<CachePage>();
+ if (!new_page || !i_cache.add(p, page, new_page)) {
+ oomUnsafe.crash("Simulator CachePage");
+ }
+ return new_page;
+}
+
+// Flush from start up to and not including start + size.
+static void FlushOnePageLocked(SimulatorProcess::ICacheMap& i_cache,
+ intptr_t start, int size) {
+ MOZ_ASSERT(size <= CachePage::kPageSize);
+ MOZ_ASSERT(AllOnOnePage(start, size - 1));
+ MOZ_ASSERT((start & CachePage::kLineMask) == 0);
+ MOZ_ASSERT((size & CachePage::kLineMask) == 0);
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(i_cache, page);
+ char* valid_bytemap = cache_page->validityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+static void FlushICacheLocked(SimulatorProcess::ICacheMap& i_cache,
+ void* start_addr, size_t size) {
+ intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+ int intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePageLocked(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ MOZ_ASSERT((start & CachePage::kPageMask) == 0);
+ offset = 0;
+ }
+ if (size != 0) {
+ FlushOnePageLocked(i_cache, start, size);
+ }
+}
+
+/* static */
+void SimulatorProcess::checkICacheLocked(SimInstruction* instr) {
+ intptr_t address = reinterpret_cast<intptr_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(icache(), page);
+ char* cache_valid_byte = cache_page->validityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->cachedData(offset & ~CachePage::kLineMask);
+
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ int cmpret =
+ memcmp(reinterpret_cast<void*>(instr), cache_page->cachedData(offset),
+ SimInstruction::kInstrSize);
+ MOZ_ASSERT(cmpret == 0);
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
+}
+
+HashNumber SimulatorProcess::ICacheHasher::hash(const Lookup& l) {
+ return U32(reinterpret_cast<uintptr_t>(l)) >> 2;
+}
+
+bool SimulatorProcess::ICacheHasher::match(const Key& k, const Lookup& l) {
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(k) & CachePage::kPageMask) == 0);
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(l) & CachePage::kPageMask) == 0);
+ return k == l;
+}
+
+/* static */
+void SimulatorProcess::FlushICache(void* start_addr, size_t size) {
+ if (!ICacheCheckingDisableCount) {
+ AutoLockSimulatorCache als;
+ js::jit::FlushICacheLocked(icache(), start_addr, size);
+ }
+}
+
+Simulator::Simulator() {
+ // Set up simulator support first. Some of this information is needed to
+ // setup the architecture state.
+
+ // Note, allocation and anything that depends on allocated memory is
+ // deferred until init(), in order to handle OOM properly.
+
+ stack_ = nullptr;
+ stackLimit_ = 0;
+ pc_modified_ = false;
+ icount_ = 0;
+ break_count_ = 0;
+ break_pc_ = nullptr;
+ break_instr_ = 0;
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+
+ // Set up architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < Register::kNumSimuRegisters; i++) {
+ registers_[i] = 0;
+ }
+ for (int i = 0; i < Simulator::FPURegister::kNumFPURegisters; i++) {
+ FPUregisters_[i] = 0;
+ }
+ FCSR_ = 0;
+ LLBit_ = false;
+ LLAddr_ = 0;
+ lastLLValue_ = 0;
+
+ // The ra and pc are initialized to a known bad value that will cause an
+ // access violation if the simulator ever tries to execute it.
+ registers_[pc] = bad_ra;
+ registers_[ra] = bad_ra;
+
+ for (int i = 0; i < kNumExceptions; i++) {
+ exceptions[i] = 0;
+ }
+
+ lastDebuggerInput_ = nullptr;
+}
+
+bool Simulator::init() {
+ // Allocate 2MB for the stack. Note that we will only use 1MB, see below.
+ static const size_t stackSize = 2 * 1024 * 1024;
+ stack_ = js_pod_malloc<char>(stackSize);
+ if (!stack_) {
+ return false;
+ }
+
+ // Leave a safety margin of 1MB to prevent overrunning the stack when
+ // pushing values (total stack size is 2MB).
+ stackLimit_ = reinterpret_cast<uintptr_t>(stack_) + 1024 * 1024;
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<int64_t>(stack_) + stackSize - 64;
+
+ return true;
+}
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a swi (software-interrupt) instruction that is handled by
+// the simulator. We write the original destination of the jump just at a known
+// offset from the swi instruction so the simulator knows what to call.
+class Redirection {
+ friend class SimulatorProcess;
+
+ // sim's lock must already be held.
+ Redirection(void* nativeFunction, ABIFunctionType type)
+ : nativeFunction_(nativeFunction),
+ swiInstruction_(kCallRedirInstr),
+ type_(type),
+ next_(nullptr) {
+ next_ = SimulatorProcess::redirection();
+ if (!SimulatorProcess::ICacheCheckingDisableCount) {
+ FlushICacheLocked(SimulatorProcess::icache(), addressOfSwiInstruction(),
+ SimInstruction::kInstrSize);
+ }
+ SimulatorProcess::setRedirection(this);
+ }
+
+ public:
+ void* addressOfSwiInstruction() { return &swiInstruction_; }
+ void* nativeFunction() const { return nativeFunction_; }
+ ABIFunctionType type() const { return type_; }
+
+ static Redirection* Get(void* nativeFunction, ABIFunctionType type) {
+ AutoLockSimulatorCache als;
+
+ Redirection* current = SimulatorProcess::redirection();
+ for (; current != nullptr; current = current->next_) {
+ if (current->nativeFunction_ == nativeFunction) {
+ MOZ_ASSERT(current->type() == type);
+ return current;
+ }
+ }
+
+ // Note: we can't use js_new here because the constructor is private.
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ Redirection* redir = js_pod_malloc<Redirection>(1);
+ if (!redir) {
+ oomUnsafe.crash("Simulator redirection");
+ }
+ new (redir) Redirection(nativeFunction, type);
+ return redir;
+ }
+
+ static Redirection* FromSwiInstruction(SimInstruction* swiInstruction) {
+ uint8_t* addrOfSwi = reinterpret_cast<uint8_t*>(swiInstruction);
+ uint8_t* addrOfRedirection =
+ addrOfSwi - offsetof(Redirection, swiInstruction_);
+ return reinterpret_cast<Redirection*>(addrOfRedirection);
+ }
+
+ private:
+ void* nativeFunction_;
+ uint32_t swiInstruction_;
+ ABIFunctionType type_;
+ Redirection* next_;
+};
+
+Simulator::~Simulator() { js_free(stack_); }
+
+SimulatorProcess::SimulatorProcess()
+ : cacheLock_(mutexid::SimulatorCacheLock), redirection_(nullptr) {
+ if (getenv("MIPS_SIM_ICACHE_CHECKS")) {
+ ICacheCheckingDisableCount = 0;
+ }
+}
+
+SimulatorProcess::~SimulatorProcess() {
+ Redirection* r = redirection_;
+ while (r) {
+ Redirection* next = r->next_;
+ js_delete(r);
+ r = next;
+ }
+}
+
+/* static */
+void* Simulator::RedirectNativeFunction(void* nativeFunction,
+ ABIFunctionType type) {
+ Redirection* redirection = Redirection::Get(nativeFunction, type);
+ return redirection->addressOfSwiInstruction();
+}
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::Current() {
+ JSContext* cx = TlsContext.get();
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
+ return cx->simulator();
+}
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::setRegister(int reg, int64_t value) {
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters));
+ if (reg == pc) {
+ pc_modified_ = true;
+ }
+
+ // Zero register always holds 0.
+ registers_[reg] = (reg == 0) ? 0 : value;
+}
+
+void Simulator::setFpuRegister(int fpureg, int64_t value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ FPUregisters_[fpureg] = value;
+}
+
+void Simulator::setFpuRegisterLo(int fpureg, int32_t value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg]) = value;
+}
+
+void Simulator::setFpuRegisterHi(int fpureg, int32_t value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *((mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg])) + 1) = value;
+}
+
+void Simulator::setFpuRegisterFloat(int fpureg, float value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<float*>(&FPUregisters_[fpureg]) = value;
+}
+
+void Simulator::setFpuRegisterDouble(int fpureg, double value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<double*>(&FPUregisters_[fpureg]) = value;
+}
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int64_t Simulator::getRegister(int reg) const {
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters));
+ if (reg == 0) {
+ return 0;
+ }
+ return registers_[reg] + ((reg == pc) ? SimInstruction::kPCReadOffset : 0);
+}
+
+int64_t Simulator::getFpuRegister(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return FPUregisters_[fpureg];
+}
+
+int32_t Simulator::getFpuRegisterLo(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg]);
+}
+
+int32_t Simulator::getFpuRegisterHi(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *((mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg])) + 1);
+}
+
+float Simulator::getFpuRegisterFloat(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<float*>(&FPUregisters_[fpureg]);
+}
+
+double Simulator::getFpuRegisterDouble(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<double*>(&FPUregisters_[fpureg]);
+}
+
+void Simulator::setCallResultDouble(double result) {
+ setFpuRegisterDouble(f0, result);
+}
+
+void Simulator::setCallResultFloat(float result) {
+ setFpuRegisterFloat(f0, result);
+}
+
+void Simulator::setCallResult(int64_t res) { setRegister(v0, res); }
+
+void Simulator::setCallResult(__int128_t res) {
+ setRegister(v0, I64(res));
+ setRegister(v1, I64(res >> 64));
+}
+
+// Helper functions for setting and testing the FCSR register's bits.
+void Simulator::setFCSRBit(uint32_t cc, bool value) {
+ if (value) {
+ FCSR_ |= (1 << cc);
+ } else {
+ FCSR_ &= ~(1 << cc);
+ }
+}
+
+bool Simulator::testFCSRBit(uint32_t cc) { return FCSR_ & (1 << cc); }
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+template <typename T>
+bool Simulator::setFCSRRoundError(double original, double rounded) {
+ bool ret = false;
+
+ setFCSRBit(kFCSRInexactCauseBit, false);
+ setFCSRBit(kFCSRUnderflowCauseBit, false);
+ setFCSRBit(kFCSROverflowCauseBit, false);
+ setFCSRBit(kFCSRInvalidOpCauseBit, false);
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ setFCSRBit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ setFCSRBit(kFCSRInexactFlagBit, true);
+ setFCSRBit(kFCSRInexactCauseBit, true);
+ }
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ setFCSRBit(kFCSRUnderflowFlagBit, true);
+ setFCSRBit(kFCSRUnderflowCauseBit, true);
+ ret = true;
+ }
+
+ if ((long double)rounded > (long double)std::numeric_limits<T>::max() ||
+ (long double)rounded < (long double)std::numeric_limits<T>::min()) {
+ setFCSRBit(kFCSROverflowFlagBit, true);
+ setFCSRBit(kFCSROverflowCauseBit, true);
+ // The reference is not really clear but it seems this is required:
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ setFCSRBit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+// Raw access to the PC register.
+void Simulator::set_pc(int64_t value) {
+ pc_modified_ = true;
+ registers_[pc] = value;
+}
+
+bool Simulator::has_bad_pc() const {
+ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+int64_t Simulator::get_pc() const { return registers_[pc]; }
+
+JS::ProfilingFrameIterator::RegisterState Simulator::registerState() {
+ wasm::RegisterState state;
+ state.pc = (void*)get_pc();
+ state.fp = (void*)getRegister(fp);
+ state.sp = (void*)getRegister(sp);
+ state.lr = (void*)getRegister(ra);
+ return state;
+}
+
+static bool AllowUnaligned() {
+ static bool hasReadFlag = false;
+ static bool unalignedAllowedFlag = false;
+ if (!hasReadFlag) {
+ unalignedAllowedFlag = !!getenv("MIPS_UNALIGNED");
+ hasReadFlag = true;
+ }
+ return unalignedAllowedFlag;
+}
+
+// MIPS memory instructions (except lw(d)l/r , sw(d)l/r) trap on unaligned
+// memory access enabling the OS to handle them via trap-and-emulate. Note that
+// simulator runs have the runtime system running directly on the host system
+// and only generated code is executed in the simulator. Since the host is
+// typically IA32 it will not trap on unaligned memory access. We assume that
+// that executing correct generated code will not produce unaligned memory
+// access, so we explicitly check for address alignment and trap. Note that
+// trapping does not occur when executing wasm code, which requires that
+// unaligned memory access provides correct result.
+
+uint8_t Simulator::readBU(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 1)) {
+ return 0xff;
+ }
+
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ return *ptr;
+}
+
+int8_t Simulator::readB(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 1)) {
+ return -1;
+ }
+
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ return *ptr;
+}
+
+void Simulator::writeB(uint64_t addr, uint8_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 1)) {
+ return;
+ }
+
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ *ptr = value;
+}
+
+void Simulator::writeB(uint64_t addr, int8_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 1)) {
+ return;
+ }
+
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ *ptr = value;
+}
+
+uint16_t Simulator::readHU(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return 0xffff;
+ }
+
+ if (AllowUnaligned() || (addr & 1) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned unsigned halfword read at 0x%016" PRIx64
+ ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int16_t Simulator::readH(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return -1;
+ }
+
+ if (AllowUnaligned() || (addr & 1) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned signed halfword read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR
+ "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void Simulator::writeH(uint64_t addr, uint16_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return;
+ }
+
+ if (AllowUnaligned() || (addr & 1) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned unsigned halfword write at 0x%016" PRIx64
+ ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+void Simulator::writeH(uint64_t addr, int16_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return;
+ }
+
+ if (AllowUnaligned() || (addr & 1) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned halfword write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+uint32_t Simulator::readWU(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ return -1;
+ }
+
+ if (AllowUnaligned() || (addr & 3) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int32_t Simulator::readW(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ return -1;
+ }
+
+ if (AllowUnaligned() || (addr & 3) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void Simulator::writeW(uint64_t addr, uint32_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ return;
+ }
+
+ if (AllowUnaligned() || (addr & 3) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+void Simulator::writeW(uint64_t addr, int32_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ return;
+ }
+
+ if (AllowUnaligned() || (addr & 3) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+int64_t Simulator::readDW(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ return -1;
+ }
+
+ if (AllowUnaligned() || (addr & kPointerAlignmentMask) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void Simulator::writeDW(uint64_t addr, int64_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ return;
+ }
+
+ if (AllowUnaligned() || (addr & kPointerAlignmentMask) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+double Simulator::readD(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ return NAN;
+ }
+
+ if (AllowUnaligned() || (addr & kDoubleAlignmentMask) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ double* ptr = reinterpret_cast<double*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned (double) read at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void Simulator::writeD(uint64_t addr, double value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ return;
+ }
+
+ if (AllowUnaligned() || (addr & kDoubleAlignmentMask) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ double* ptr = reinterpret_cast<double*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned (double) write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+int Simulator::loadLinkedW(uint64_t addr, SimInstruction* instr) {
+ if ((addr & 3) == 0) {
+ if (handleWasmSegFault(addr, 4)) {
+ return -1;
+ }
+
+ volatile int32_t* ptr = reinterpret_cast<volatile int32_t*>(addr);
+ int32_t value = *ptr;
+ lastLLValue_ = value;
+ LLAddr_ = addr;
+ // Note that any memory write or "external" interrupt should reset this
+ // value to false.
+ LLBit_ = true;
+ return value;
+ }
+ printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int Simulator::storeConditionalW(uint64_t addr, int value,
+ SimInstruction* instr) {
+ // Correct behavior in this case, as defined by architecture, is to just
+ // return 0, but there is no point at allowing that. It is certainly an
+ // indicator of a bug.
+ if (addr != LLAddr_) {
+ printf("SC to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIx64
+ ", expected: 0x%016" PRIx64 "\n",
+ addr, reinterpret_cast<intptr_t>(instr), LLAddr_);
+ MOZ_CRASH();
+ }
+
+ if ((addr & 3) == 0) {
+ SharedMem<int32_t*> ptr =
+ SharedMem<int32_t*>::shared(reinterpret_cast<int32_t*>(addr));
+
+ if (!LLBit_) {
+ return 0;
+ }
+
+ LLBit_ = false;
+ LLAddr_ = 0;
+ int32_t expected = int32_t(lastLLValue_);
+ int32_t old =
+ AtomicOperations::compareExchangeSeqCst(ptr, expected, int32_t(value));
+ return (old == expected) ? 1 : 0;
+ }
+ printf("Unaligned SC at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int64_t Simulator::loadLinkedD(uint64_t addr, SimInstruction* instr) {
+ if ((addr & kPointerAlignmentMask) == 0) {
+ if (handleWasmSegFault(addr, 8)) {
+ return -1;
+ }
+
+ volatile int64_t* ptr = reinterpret_cast<volatile int64_t*>(addr);
+ int64_t value = *ptr;
+ lastLLValue_ = value;
+ LLAddr_ = addr;
+ // Note that any memory write or "external" interrupt should reset this
+ // value to false.
+ LLBit_ = true;
+ return value;
+ }
+ printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int Simulator::storeConditionalD(uint64_t addr, int64_t value,
+ SimInstruction* instr) {
+ // Correct behavior in this case, as defined by architecture, is to just
+ // return 0, but there is no point at allowing that. It is certainly an
+ // indicator of a bug.
+ if (addr != LLAddr_) {
+ printf("SC to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIx64
+ ", expected: 0x%016" PRIx64 "\n",
+ addr, reinterpret_cast<intptr_t>(instr), LLAddr_);
+ MOZ_CRASH();
+ }
+
+ if ((addr & kPointerAlignmentMask) == 0) {
+ SharedMem<int64_t*> ptr =
+ SharedMem<int64_t*>::shared(reinterpret_cast<int64_t*>(addr));
+
+ if (!LLBit_) {
+ return 0;
+ }
+
+ LLBit_ = false;
+ LLAddr_ = 0;
+ int64_t expected = lastLLValue_;
+ int64_t old =
+ AtomicOperations::compareExchangeSeqCst(ptr, expected, int64_t(value));
+ return (old == expected) ? 1 : 0;
+ }
+ printf("Unaligned SC at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+uintptr_t Simulator::stackLimit() const { return stackLimit_; }
+
+uintptr_t* Simulator::addressOfStackLimit() { return &stackLimit_; }
+
+bool Simulator::overRecursed(uintptr_t newsp) const {
+ if (newsp == 0) {
+ newsp = getRegister(sp);
+ }
+ return newsp <= stackLimit();
+}
+
+bool Simulator::overRecursedWithExtra(uint32_t extra) const {
+ uintptr_t newsp = getRegister(sp) - extra;
+ return newsp <= stackLimit();
+}
+
+// Unsupported instructions use format to print an error and stop execution.
+void Simulator::format(SimInstruction* instr, const char* format) {
+ printf("Simulator found unsupported instruction:\n 0x%016lx: %s\n",
+ reinterpret_cast<intptr_t>(instr), format);
+ MOZ_CRASH();
+}
+
+// Note: With the code below we assume that all runtime calls return a 64 bits
+// result. If they don't, the v1 result register contains a bogus value, which
+// is fine because it is caller-saved.
+typedef int64_t (*Prototype_General0)();
+typedef int64_t (*Prototype_General1)(int64_t arg0);
+typedef int64_t (*Prototype_General2)(int64_t arg0, int64_t arg1);
+typedef int64_t (*Prototype_General3)(int64_t arg0, int64_t arg1, int64_t arg2);
+typedef int64_t (*Prototype_General4)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3);
+typedef int64_t (*Prototype_General5)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3, int64_t arg4);
+typedef int64_t (*Prototype_General6)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3, int64_t arg4, int64_t arg5);
+typedef int64_t (*Prototype_General7)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3, int64_t arg4, int64_t arg5,
+ int64_t arg6);
+typedef int64_t (*Prototype_General8)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3, int64_t arg4, int64_t arg5,
+ int64_t arg6, int64_t arg7);
+typedef int64_t (*Prototype_GeneralGeneralGeneralInt64)(int64_t arg0,
+ int64_t arg1,
+ int64_t arg2,
+ int64_t arg3);
+typedef int64_t (*Prototype_GeneralGeneralInt64Int64)(int64_t arg0,
+ int64_t arg1,
+ int64_t arg2,
+ int64_t arg3);
+
+typedef int64_t (*Prototype_Int_Double)(double arg0);
+typedef int64_t (*Prototype_Int_IntDouble)(int64_t arg0, double arg1);
+typedef int64_t (*Prototype_Int_DoubleInt)(double arg0, int64_t arg1);
+typedef int64_t (*Prototype_Int_DoubleIntInt)(double arg0, int64_t arg1,
+ int64_t arg2);
+typedef int64_t (*Prototype_Int_IntDoubleIntInt)(int64_t arg0, double arg1,
+ int64_t arg2, int64_t arg3);
+
+typedef float (*Prototype_Float32_Float32)(float arg0);
+typedef int64_t (*Prototype_Int_Float32)(float arg0);
+typedef float (*Prototype_Float32_Float32Float32)(float arg0, float arg1);
+
+typedef double (*Prototype_Double_None)();
+typedef double (*Prototype_Double_Double)(double arg0);
+typedef double (*Prototype_Double_Int)(int64_t arg0);
+typedef double (*Prototype_Double_DoubleInt)(double arg0, int64_t arg1);
+typedef double (*Prototype_Double_IntDouble)(int64_t arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDouble)(double arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDoubleDouble)(double arg0, double arg1,
+ double arg2);
+typedef double (*Prototype_Double_DoubleDoubleDoubleDouble)(double arg0,
+ double arg1,
+ double arg2,
+ double arg3);
+
+typedef int32_t (*Prototype_Int32_General)(int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32)(int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32)(int64_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32)(int64_t, int32_t,
+ int32_t, int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32Int32)(
+ int64_t, int32_t, int32_t, int32_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32General)(
+ int64_t, int32_t, int32_t, int32_t, int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32General)(
+ int64_t, int32_t, int32_t, int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int64)(int64_t, int32_t,
+ int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32General)(int64_t, int32_t,
+ int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int64Int64)(int64_t, int32_t,
+ int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32GeneralInt32)(int64_t, int32_t,
+ int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32GeneralInt32Int32)(
+ int64_t, int32_t, int64_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneral)(int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralGeneral)(int64_t, int64_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32Int32)(int64_t, int64_t,
+ int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int32Int32)(int64_t, int64_t,
+ int32_t, int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32)(int64_t, int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int64)(int64_t, int64_t,
+ int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int64General)(
+ int64_t, int64_t, int32_t, int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64Int64)(int64_t, int64_t,
+ int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64General)(int64_t, int64_t,
+ int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64Int64General)(
+ int64_t, int64_t, int64_t, int64_t, int64_t);
+typedef int64_t (*Prototype_General_GeneralInt32)(int64_t, int32_t);
+typedef int64_t (*Prototype_General_GeneralInt32Int32)(int64_t, int32_t,
+ int32_t);
+typedef int64_t (*Prototype_General_GeneralInt32General)(int64_t, int32_t,
+ int64_t);
+typedef int64_t (*Prototype_General_GeneralInt32Int32GeneralInt32)(
+ int64_t, int32_t, int32_t, int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32GeneralInt32Int32Int32)(
+ int64_t, int64_t, int32_t, int64_t, int32_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32General)(int64_t, int64_t,
+ int32_t, int64_t);
+typedef int64_t (*Prototype_Int64_General)(int64_t);
+typedef int64_t (*Prototype_Int64_GeneralInt64)(int64_t, int64_t);
+
+// Software interrupt instructions are used by the simulator to call into C++.
+void Simulator::softwareInterrupt(SimInstruction* instr) {
+ int32_t func = instr->functionFieldRaw();
+ uint32_t code = (func == ff_break) ? instr->bits(25, 6) : -1;
+
+ // We first check if we met a call_rt_redirected.
+ if (instr->instructionBits() == kCallRedirInstr) {
+#if !defined(USES_N64_ABI)
+ MOZ_CRASH("Only N64 ABI supported.");
+#else
+ Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ uintptr_t nativeFn =
+ reinterpret_cast<uintptr_t>(redirection->nativeFunction());
+
+ int64_t arg0 = getRegister(a0);
+ int64_t arg1 = getRegister(a1);
+ int64_t arg2 = getRegister(a2);
+ int64_t arg3 = getRegister(a3);
+ int64_t arg4 = getRegister(a4);
+ int64_t arg5 = getRegister(a5);
+
+ // This is dodgy but it works because the C entry stubs are never moved.
+ // See comment in codegen-arm.cc and bug 1242173.
+ int64_t saved_ra = getRegister(ra);
+
+ intptr_t external =
+ reinterpret_cast<intptr_t>(redirection->nativeFunction());
+
+ bool stack_aligned = (getRegister(sp) & (ABIStackAlignment - 1)) == 0;
+ if (!stack_aligned) {
+ fprintf(stderr, "Runtime call with unaligned stack!\n");
+ MOZ_CRASH();
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ switch (redirection->type()) {
+ case Args_General0: {
+ Prototype_General0 target =
+ reinterpret_cast<Prototype_General0>(external);
+ int64_t result = target();
+ setCallResult(result);
+ break;
+ }
+ case Args_General1: {
+ Prototype_General1 target =
+ reinterpret_cast<Prototype_General1>(external);
+ int64_t result = target(arg0);
+ setCallResult(result);
+ break;
+ }
+ case Args_General2: {
+ Prototype_General2 target =
+ reinterpret_cast<Prototype_General2>(external);
+ int64_t result = target(arg0, arg1);
+ setCallResult(result);
+ break;
+ }
+ case Args_General3: {
+ Prototype_General3 target =
+ reinterpret_cast<Prototype_General3>(external);
+ int64_t result = target(arg0, arg1, arg2);
+ if (external == intptr_t(&js::wasm::Instance::wake_m32)) {
+ result = int32_t(result);
+ }
+ setCallResult(result);
+ break;
+ }
+ case Args_General4: {
+ Prototype_General4 target =
+ reinterpret_cast<Prototype_General4>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ setCallResult(result);
+ break;
+ }
+ case Args_General5: {
+ Prototype_General5 target =
+ reinterpret_cast<Prototype_General5>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4);
+ setCallResult(result);
+ break;
+ }
+ case Args_General6: {
+ Prototype_General6 target =
+ reinterpret_cast<Prototype_General6>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ setCallResult(result);
+ break;
+ }
+ case Args_General7: {
+ Prototype_General7 target =
+ reinterpret_cast<Prototype_General7>(external);
+ int64_t arg6 = getRegister(a6);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6);
+ setCallResult(result);
+ break;
+ }
+ case Args_General8: {
+ Prototype_General8 target =
+ reinterpret_cast<Prototype_General8>(external);
+ int64_t arg6 = getRegister(a6);
+ int64_t arg7 = getRegister(a7);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+ setCallResult(result);
+ break;
+ }
+ case Args_Double_None: {
+ Prototype_Double_None target =
+ reinterpret_cast<Prototype_Double_None>(external);
+ double dresult = target();
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_Double: {
+ double dval0 = getFpuRegisterDouble(12);
+ Prototype_Int_Double target =
+ reinterpret_cast<Prototype_Int_Double>(external);
+ int64_t result = target(dval0);
+ if (external == intptr_t((int32_t(*)(double))JS::ToInt32)) {
+ result = int32_t(result);
+ }
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Int_GeneralGeneralGeneralInt64: {
+ Prototype_GeneralGeneralGeneralInt64 target =
+ reinterpret_cast<Prototype_GeneralGeneralGeneralInt64>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ if (external == intptr_t(&js::wasm::Instance::wait_i32_m32)) {
+ result = int32_t(result);
+ }
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Int_GeneralGeneralInt64Int64: {
+ Prototype_GeneralGeneralInt64Int64 target =
+ reinterpret_cast<Prototype_GeneralGeneralInt64Int64>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ if (external == intptr_t(&js::wasm::Instance::wait_i64_m32)) {
+ result = int32_t(result);
+ }
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Int_DoubleInt: {
+ double dval = getFpuRegisterDouble(12);
+ Prototype_Int_DoubleInt target =
+ reinterpret_cast<Prototype_Int_DoubleInt>(external);
+ int64_t result = target(dval, arg1);
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Int_DoubleIntInt: {
+ double dval = getFpuRegisterDouble(12);
+ Prototype_Int_DoubleIntInt target =
+ reinterpret_cast<Prototype_Int_DoubleIntInt>(external);
+ int64_t result = target(dval, arg1, arg2);
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Int_IntDoubleIntInt: {
+ double dval = getFpuRegisterDouble(13);
+ Prototype_Int_IntDoubleIntInt target =
+ reinterpret_cast<Prototype_Int_IntDoubleIntInt>(external);
+ int64_t result = target(arg0, dval, arg2, arg3);
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Double_Double: {
+ double dval0 = getFpuRegisterDouble(12);
+ Prototype_Double_Double target =
+ reinterpret_cast<Prototype_Double_Double>(external);
+ double dresult = target(dval0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Float32_Float32: {
+ float fval0;
+ fval0 = getFpuRegisterFloat(12);
+ Prototype_Float32_Float32 target =
+ reinterpret_cast<Prototype_Float32_Float32>(external);
+ float fresult = target(fval0);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Int_Float32: {
+ float fval0;
+ fval0 = getFpuRegisterFloat(12);
+ Prototype_Int_Float32 target =
+ reinterpret_cast<Prototype_Int_Float32>(external);
+ int64_t result = target(fval0);
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Float32_Float32Float32: {
+ float fval0;
+ float fval1;
+ fval0 = getFpuRegisterFloat(12);
+ fval1 = getFpuRegisterFloat(13);
+ Prototype_Float32_Float32Float32 target =
+ reinterpret_cast<Prototype_Float32_Float32Float32>(external);
+ float fresult = target(fval0, fval1);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Double_Int: {
+ Prototype_Double_Int target =
+ reinterpret_cast<Prototype_Double_Int>(external);
+ double dresult = target(arg0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleInt: {
+ double dval0 = getFpuRegisterDouble(12);
+ Prototype_Double_DoubleInt target =
+ reinterpret_cast<Prototype_Double_DoubleInt>(external);
+ double dresult = target(dval0, arg1);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDouble: {
+ double dval0 = getFpuRegisterDouble(12);
+ double dval1 = getFpuRegisterDouble(13);
+ Prototype_Double_DoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDouble>(external);
+ double dresult = target(dval0, dval1);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_IntDouble: {
+ double dval1 = getFpuRegisterDouble(13);
+ Prototype_Double_IntDouble target =
+ reinterpret_cast<Prototype_Double_IntDouble>(external);
+ double dresult = target(arg0, dval1);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_IntDouble: {
+ double dval1 = getFpuRegisterDouble(13);
+ Prototype_Int_IntDouble target =
+ reinterpret_cast<Prototype_Int_IntDouble>(external);
+ int64_t result = target(arg0, dval1);
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Double_DoubleDoubleDouble: {
+ double dval0 = getFpuRegisterDouble(12);
+ double dval1 = getFpuRegisterDouble(13);
+ double dval2 = getFpuRegisterDouble(14);
+ Prototype_Double_DoubleDoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDoubleDouble>(external);
+ double dresult = target(dval0, dval1, dval2);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDoubleDoubleDouble: {
+ double dval0 = getFpuRegisterDouble(12);
+ double dval1 = getFpuRegisterDouble(13);
+ double dval2 = getFpuRegisterDouble(14);
+ double dval3 = getFpuRegisterDouble(15);
+ Prototype_Double_DoubleDoubleDoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDoubleDoubleDouble>(
+ external);
+ double dresult = target(dval0, dval1, dval2, dval3);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int32_General: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_General>(nativeFn)(arg0);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32>(nativeFn)(
+ arg0, I32(arg1));
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2));
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32Int32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), I32(arg3), I32(arg4));
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32Int32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32Int32Int32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), I32(arg3), I32(arg4),
+ I32(arg5));
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32Int32General: {
+ int32_t ret = reinterpret_cast<
+ Prototype_Int32_GeneralInt32Int32Int32Int32General>(nativeFn)(
+ arg0, I32(arg1), I32(arg2), I32(arg3), I32(arg4), arg5);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32General>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), I32(arg3), arg4);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int64>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), arg3);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32General>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), arg3);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32Int64Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int64Int64>(
+ nativeFn)(arg0, I32(arg1), arg2, arg3);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32GeneralInt32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32GeneralInt32>(
+ nativeFn)(arg0, I32(arg1), arg2, I32(arg3));
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralInt32GeneralInt32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32GeneralInt32Int32>(
+ nativeFn)(arg0, I32(arg1), arg2, I32(arg3), I32(arg4));
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralGeneral: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralGeneral>(
+ nativeFn)(arg0, arg1);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralGeneralGeneral: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralGeneralGeneral>(
+ nativeFn)(arg0, arg1, arg2);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_Int32_GeneralGeneralInt32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralGeneralInt32Int32>(
+ nativeFn)(arg0, arg1, I32(arg2), I32(arg3));
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32Int32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int32Int32>(
+ nativeFn)(arg0, arg1, I32(arg2), I32(arg3), I32(arg4));
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int32>(
+ nativeFn)(arg0, arg1, I32(arg2));
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int64>(
+ nativeFn)(arg0, arg1, I32(arg2), arg3);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32Int64General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int64General>(
+ nativeFn)(arg0, arg1, I32(arg2), arg3, arg4);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int64Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int64Int64>(
+ nativeFn)(arg0, arg1, arg2, arg3);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int64General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int64General>(
+ nativeFn)(arg0, arg1, arg2, arg3);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int64Int64General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt64Int64Int64General>(
+ nativeFn)(arg0, arg1, arg2, arg3, arg4);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case Args_General_GeneralInt32: {
+ int64_t ret = reinterpret_cast<Prototype_General_GeneralInt32>(
+ nativeFn)(arg0, I32(arg1));
+ setRegister(v0, ret);
+ break;
+ }
+ case Args_General_GeneralInt32Int32: {
+ int64_t ret = reinterpret_cast<Prototype_General_GeneralInt32Int32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2));
+ setRegister(v0, ret);
+ break;
+ }
+ case Args_General_GeneralInt32General: {
+ int64_t ret = reinterpret_cast<Prototype_General_GeneralInt32General>(
+ nativeFn)(arg0, I32(arg1), arg2);
+ setRegister(v0, ret);
+ break;
+ }
+ case js::jit::Args_General_GeneralInt32Int32GeneralInt32: {
+ int64_t ret =
+ reinterpret_cast<Prototype_General_GeneralInt32Int32GeneralInt32>(
+ nativeFn)(arg0, I32(arg1), I32(arg2), arg3, I32(arg4));
+ setRegister(v0, ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralGeneralInt32GeneralInt32Int32Int32: {
+ int64_t arg6 = getRegister(a6);
+ int32_t ret = reinterpret_cast<
+ Prototype_Int32_GeneralGeneralInt32GeneralInt32Int32Int32>(
+ nativeFn)(arg0, arg1, I32(arg2), arg3, I32(arg4), I32(arg5),
+ I32(arg6));
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int32_GeneralGeneralInt32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralGeneralInt32General>(
+ nativeFn)(arg0, arg1, I32(arg2), arg3);
+ setRegister(v0, I64(ret));
+ break;
+ }
+ case js::jit::Args_Int64_General: {
+ int64_t ret = reinterpret_cast<Prototype_Int64_General>(nativeFn)(arg0);
+ setRegister(v0, ret);
+ break;
+ }
+ case js::jit::Args_Int64_GeneralInt64: {
+ int64_t ret = reinterpret_cast<Prototype_Int64_GeneralInt64>(nativeFn)(
+ arg0, arg1);
+ setRegister(v0, ret);
+ break;
+ }
+ default:
+ MOZ_CRASH("Unknown function type.");
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ setRegister(ra, saved_ra);
+ set_pc(getRegister(ra));
+#endif
+ } else if (func == ff_break && code <= kMaxStopCode) {
+ if (isWatchpoint(code)) {
+ printWatchpoint(code);
+ } else {
+ increaseStopCounter(code);
+ handleStop(code, instr);
+ }
+ } else {
+ switch (func) {
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ if (instr->bits(15, 6) == kWasmTrapCode) {
+ uint8_t* newPC;
+ if (wasm::HandleIllegalInstruction(registerState(), &newPC)) {
+ set_pc(int64_t(newPC));
+ return;
+ }
+ }
+ };
+ // All remaining break_ codes, and all traps are handled here.
+ MipsDebugger dbg(this);
+ dbg.debug();
+ }
+}
+
+// Stop helper functions.
+bool Simulator::isWatchpoint(uint32_t code) {
+ return (code <= kMaxWatchpointCode);
+}
+
+void Simulator::printWatchpoint(uint32_t code) {
+ MipsDebugger dbg(this);
+ ++break_count_;
+ printf("\n---- break %d marker: %20" PRIi64 " (instr count: %20" PRIi64
+ ") ----\n",
+ code, break_count_, icount_);
+ dbg.printAllRegs(); // Print registers and continue running.
+}
+
+void Simulator::handleStop(uint32_t code, SimInstruction* instr) {
+ // Stop if it is enabled, otherwise go on jumping over the stop
+ // and the message address.
+ if (isEnabledStop(code)) {
+ MipsDebugger dbg(this);
+ dbg.stop(instr);
+ } else {
+ set_pc(get_pc() + 2 * SimInstruction::kInstrSize);
+ }
+}
+
+bool Simulator::isStopInstruction(SimInstruction* instr) {
+ int32_t func = instr->functionFieldRaw();
+ uint32_t code = U32(instr->bits(25, 6));
+ return (func == ff_break) && code > kMaxWatchpointCode &&
+ code <= kMaxStopCode;
+}
+
+bool Simulator::isEnabledStop(uint32_t code) {
+ MOZ_ASSERT(code <= kMaxStopCode);
+ MOZ_ASSERT(code > kMaxWatchpointCode);
+ return !(watchedStops_[code].count_ & kStopDisabledBit);
+}
+
+void Simulator::enableStop(uint32_t code) {
+ if (!isEnabledStop(code)) {
+ watchedStops_[code].count_ &= ~kStopDisabledBit;
+ }
+}
+
+void Simulator::disableStop(uint32_t code) {
+ if (isEnabledStop(code)) {
+ watchedStops_[code].count_ |= kStopDisabledBit;
+ }
+}
+
+void Simulator::increaseStopCounter(uint32_t code) {
+ MOZ_ASSERT(code <= kMaxStopCode);
+ if ((watchedStops_[code].count_ & ~(1 << 31)) == 0x7fffffff) {
+ printf(
+ "Stop counter for code %i has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n",
+ code);
+ watchedStops_[code].count_ = 0;
+ enableStop(code);
+ } else {
+ watchedStops_[code].count_++;
+ }
+}
+
+// Print a stop status.
+void Simulator::printStopInfo(uint32_t code) {
+ if (code <= kMaxWatchpointCode) {
+ printf("That is a watchpoint, not a stop.\n");
+ return;
+ } else if (code > kMaxStopCode) {
+ printf("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
+ return;
+ }
+ const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watchedStops_[code].count_ & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watchedStops_[code].desc_) {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n", code, code, state,
+ count, watchedStops_[code].desc_);
+ } else {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i\n", code, code, state,
+ count);
+ }
+ }
+}
+
+void Simulator::signalExceptions() {
+ for (int i = 1; i < kNumExceptions; i++) {
+ if (exceptions[i] != 0) {
+ MOZ_CRASH("Error: Exception raised.");
+ }
+ }
+}
+
+// Helper function for decodeTypeRegister.
+void Simulator::configureTypeRegister(SimInstruction* instr, int64_t& alu_out,
+ __int128& i128hilo,
+ unsigned __int128& u128hilo,
+ int64_t& next_pc,
+ int32_t& return_addr_reg,
+ bool& do_interrupt) {
+ // Every local variable declared here needs to be const.
+ // This is to make sure that changed values are sent back to
+ // decodeTypeRegister correctly.
+
+ // Instruction fields.
+ const OpcodeField op = instr->opcodeFieldRaw();
+ const int32_t rs_reg = instr->rsValue();
+ const int64_t rs = getRegister(rs_reg);
+ const int32_t rt_reg = instr->rtValue();
+ const int64_t rt = getRegister(rt_reg);
+ const int32_t rd_reg = instr->rdValue();
+ const uint32_t sa = instr->saValue();
+
+ const int32_t fs_reg = instr->fsValue();
+ __int128 temp;
+
+ // ---------- Configuration.
+ switch (op) {
+ case op_cop1: // Coprocessor instructions.
+ switch (instr->rsFieldRaw()) {
+ case rs_bc1: // Handled in DecodeTypeImmed, should never come here.
+ MOZ_CRASH();
+ break;
+ case rs_cfc1:
+ // At the moment only FCSR is supported.
+ MOZ_ASSERT(fs_reg == kFCSRRegister);
+ alu_out = FCSR_;
+ break;
+ case rs_mfc1:
+ alu_out = getFpuRegisterLo(fs_reg);
+ break;
+ case rs_dmfc1:
+ alu_out = getFpuRegister(fs_reg);
+ break;
+ case rs_mfhc1:
+ alu_out = getFpuRegisterHi(fs_reg);
+ break;
+ case rs_ctc1:
+ case rs_mtc1:
+ case rs_dmtc1:
+ case rs_mthc1:
+ // Do the store in the execution step.
+ break;
+ case rs_s:
+ case rs_d:
+ case rs_w:
+ case rs_l:
+ case rs_ps:
+ // Do everything in the execution step.
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ case op_cop1x:
+ break;
+ case op_special:
+ switch (instr->functionFieldRaw()) {
+ case ff_jr:
+ case ff_jalr:
+ next_pc = getRegister(instr->rsValue());
+ return_addr_reg = instr->rdValue();
+ break;
+ case ff_sll:
+ alu_out = I64(I32(rt) << sa);
+ break;
+ case ff_dsll:
+ alu_out = rt << sa;
+ break;
+ case ff_dsll32:
+ alu_out = rt << (sa + 32);
+ break;
+ case ff_srl:
+ if (rs_reg == 0) {
+ // Regular logical right shift of a word by a fixed number of
+ // bits instruction. RS field is always equal to 0.
+ alu_out = I64(I32(U32(I32_CHECK(rt)) >> sa));
+ } else {
+ // Logical right-rotate of a word by a fixed number of bits. This
+ // is special case of SRL instruction, added in MIPS32 Release 2.
+ // RS field is equal to 00001.
+ alu_out = I64(I32((U32(I32_CHECK(rt)) >> sa) |
+ (U32(I32_CHECK(rt)) << (32 - sa))));
+ }
+ break;
+ case ff_dsrl:
+ if (rs_reg == 0) {
+ // Regular logical right shift of a double word by a fixed number of
+ // bits instruction. RS field is always equal to 0.
+ alu_out = U64(rt) >> sa;
+ } else {
+ // Logical right-rotate of a word by a fixed number of bits. This
+ // is special case of DSRL instruction, added in MIPS64 Release 2.
+ // RS field is equal to 00001.
+ alu_out = (U64(rt) >> sa) | (U64(rt) << (64 - sa));
+ }
+ break;
+ case ff_dsrl32:
+ if (rs_reg == 0) {
+ // Regular logical right shift of a double word by a fixed number of
+ // bits instruction. RS field is always equal to 0.
+ alu_out = U64(rt) >> (sa + 32);
+ } else {
+ // Logical right-rotate of a double word by a fixed number of bits.
+ // This is special case of DSRL instruction, added in MIPS64
+ // Release 2. RS field is equal to 00001.
+ alu_out = (U64(rt) >> (sa + 32)) | (U64(rt) << (64 - (sa + 32)));
+ }
+ break;
+ case ff_sra:
+ alu_out = I64(I32_CHECK(rt)) >> sa;
+ break;
+ case ff_dsra:
+ alu_out = rt >> sa;
+ break;
+ case ff_dsra32:
+ alu_out = rt >> (sa + 32);
+ break;
+ case ff_sllv:
+ alu_out = I64(I32(rt) << rs);
+ break;
+ case ff_dsllv:
+ alu_out = rt << rs;
+ break;
+ case ff_srlv:
+ if (sa == 0) {
+ // Regular logical right-shift of a word by a variable number of
+ // bits instruction. SA field is always equal to 0.
+ alu_out = I64(I32(U32(I32_CHECK(rt)) >> rs));
+ } else {
+ // Logical right-rotate of a word by a variable number of bits.
+ // This is special case od SRLV instruction, added in MIPS32
+ // Release 2. SA field is equal to 00001.
+ alu_out = I64(I32((U32(I32_CHECK(rt)) >> rs) |
+ (U32(I32_CHECK(rt)) << (32 - rs))));
+ }
+ break;
+ case ff_dsrlv:
+ if (sa == 0) {
+ // Regular logical right-shift of a double word by a variable number
+ // of bits instruction. SA field is always equal to 0.
+ alu_out = U64(rt) >> rs;
+ } else {
+ // Logical right-rotate of a double word by a variable number of
+ // bits. This is special case od DSRLV instruction, added in MIPS64
+ // Release 2. SA field is equal to 00001.
+ alu_out = (U64(rt) >> rs) | (U64(rt) << (64 - rs));
+ }
+ break;
+ case ff_srav:
+ alu_out = I64(I32_CHECK(rt) >> rs);
+ break;
+ case ff_dsrav:
+ alu_out = rt >> rs;
+ break;
+ case ff_mfhi:
+ alu_out = getRegister(HI);
+ break;
+ case ff_mflo:
+ alu_out = getRegister(LO);
+ break;
+ case ff_mult:
+ i128hilo = I64(U32(I32_CHECK(rs))) * I64(U32(I32_CHECK(rt)));
+ break;
+ case ff_dmult:
+ i128hilo = I128(rs) * I128(rt);
+ break;
+ case ff_multu:
+ u128hilo = U64(U32(I32_CHECK(rs))) * U64(U32(I32_CHECK(rt)));
+ break;
+ case ff_dmultu:
+ u128hilo = U128(rs) * U128(rt);
+ break;
+ case ff_add:
+ alu_out = I32_CHECK(rs) + I32_CHECK(rt);
+ if ((alu_out << 32) != (alu_out << 31)) {
+ exceptions[kIntegerOverflow] = 1;
+ }
+ alu_out = I32(alu_out);
+ break;
+ case ff_dadd:
+ temp = I128(rs) + I128(rt);
+ if ((temp << 64) != (temp << 63)) {
+ exceptions[kIntegerOverflow] = 1;
+ }
+ alu_out = I64(temp);
+ break;
+ case ff_addu:
+ alu_out = I32(I32_CHECK(rs) + I32_CHECK(rt));
+ break;
+ case ff_daddu:
+ alu_out = rs + rt;
+ break;
+ case ff_sub:
+ alu_out = I32_CHECK(rs) - I32_CHECK(rt);
+ if ((alu_out << 32) != (alu_out << 31)) {
+ exceptions[kIntegerUnderflow] = 1;
+ }
+ alu_out = I32(alu_out);
+ break;
+ case ff_dsub:
+ temp = I128(rs) - I128(rt);
+ if ((temp << 64) != (temp << 63)) {
+ exceptions[kIntegerUnderflow] = 1;
+ }
+ alu_out = I64(temp);
+ break;
+ case ff_subu:
+ alu_out = I32(I32_CHECK(rs) - I32_CHECK(rt));
+ break;
+ case ff_dsubu:
+ alu_out = rs - rt;
+ break;
+ case ff_and:
+ alu_out = rs & rt;
+ break;
+ case ff_or:
+ alu_out = rs | rt;
+ break;
+ case ff_xor:
+ alu_out = rs ^ rt;
+ break;
+ case ff_nor:
+ alu_out = ~(rs | rt);
+ break;
+ case ff_slt:
+ alu_out = I64(rs) < I64(rt) ? 1 : 0;
+ break;
+ case ff_sltu:
+ alu_out = U64(rs) < U64(rt) ? 1 : 0;
+ break;
+ case ff_sync:
+ break;
+ // Break and trap instructions.
+ case ff_break:
+ do_interrupt = true;
+ break;
+ case ff_tge:
+ do_interrupt = rs >= rt;
+ break;
+ case ff_tgeu:
+ do_interrupt = U64(rs) >= U64(rt);
+ break;
+ case ff_tlt:
+ do_interrupt = rs < rt;
+ break;
+ case ff_tltu:
+ do_interrupt = U64(rs) < U64(rt);
+ break;
+ case ff_teq:
+ do_interrupt = rs == rt;
+ break;
+ case ff_tne:
+ do_interrupt = rs != rt;
+ break;
+ case ff_movn:
+ case ff_movz:
+ case ff_movci:
+ // No action taken on decode.
+ break;
+ case ff_div:
+ if (I32_CHECK(rs) == INT_MIN && I32_CHECK(rt) == -1) {
+ i128hilo = U32(INT_MIN);
+ } else {
+ uint32_t div = I32_CHECK(rs) / I32_CHECK(rt);
+ uint32_t mod = I32_CHECK(rs) % I32_CHECK(rt);
+ i128hilo = (I64(mod) << 32) | div;
+ }
+ break;
+ case ff_ddiv:
+ if (I64(rs) == INT64_MIN && I64(rt) == -1) {
+ i128hilo = U64(INT64_MIN);
+ } else {
+ uint64_t div = rs / rt;
+ uint64_t mod = rs % rt;
+ i128hilo = (I128(mod) << 64) | div;
+ }
+ break;
+ case ff_divu: {
+ uint32_t div = U32(I32_CHECK(rs)) / U32(I32_CHECK(rt));
+ uint32_t mod = U32(I32_CHECK(rs)) % U32(I32_CHECK(rt));
+ i128hilo = (U64(mod) << 32) | div;
+ } break;
+ case ff_ddivu:
+ if (0 == rt) {
+ i128hilo = (I128(Unpredictable) << 64) | I64(Unpredictable);
+ } else {
+ uint64_t div = U64(rs) / U64(rt);
+ uint64_t mod = U64(rs) % U64(rt);
+ i128hilo = (I128(mod) << 64) | div;
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ case op_special2:
+ switch (instr->functionFieldRaw()) {
+ case ff_mul:
+ alu_out = I32(I32_CHECK(rs) *
+ I32_CHECK(rt)); // Only the lower 32 bits are kept.
+ break;
+ case ff_clz:
+ alu_out = U32(I32_CHECK(rs)) ? __builtin_clz(U32(I32_CHECK(rs))) : 32;
+ break;
+ case ff_dclz:
+ alu_out = U64(rs) ? __builtin_clzl(U64(rs)) : 64;
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ case op_special3:
+ switch (instr->functionFieldRaw()) {
+ case ff_ins: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa;
+ uint16_t size = msb - lsb + 1;
+ uint32_t mask = (1 << size) - 1;
+ if (lsb > msb) {
+ alu_out = Unpredictable;
+ } else {
+ alu_out = I32((U32(I32_CHECK(rt)) & ~(mask << lsb)) |
+ ((U32(I32_CHECK(rs)) & mask) << lsb));
+ }
+ break;
+ }
+ case ff_dins: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa;
+ uint16_t size = msb - lsb + 1;
+ uint64_t mask = (1ul << size) - 1;
+ if (lsb > msb) {
+ alu_out = Unpredictable;
+ } else {
+ alu_out = (U64(rt) & ~(mask << lsb)) | ((U64(rs) & mask) << lsb);
+ }
+ break;
+ }
+ case ff_dinsm: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa;
+ uint16_t size = msb - lsb + 33;
+ uint64_t mask = (1ul << size) - 1;
+ alu_out = (U64(rt) & ~(mask << lsb)) | ((U64(rs) & mask) << lsb);
+ break;
+ }
+ case ff_dinsu: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa + 32;
+ uint16_t size = msb - lsb + 33;
+ uint64_t mask = (1ul << size) - 1;
+ if (sa > msb) {
+ alu_out = Unpredictable;
+ } else {
+ alu_out = (U64(rt) & ~(mask << lsb)) | ((U64(rs) & mask) << lsb);
+ }
+ break;
+ }
+ case ff_ext: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa;
+ uint16_t size = msb + 1;
+ uint32_t mask = (1 << size) - 1;
+ if ((lsb + msb) > 31) {
+ alu_out = Unpredictable;
+ } else {
+ alu_out = (U32(I32_CHECK(rs)) & (mask << lsb)) >> lsb;
+ }
+ break;
+ }
+ case ff_dext: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa;
+ uint16_t size = msb + 1;
+ uint64_t mask = (1ul << size) - 1;
+ alu_out = (U64(rs) & (mask << lsb)) >> lsb;
+ break;
+ }
+ case ff_dextm: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa;
+ uint16_t size = msb + 33;
+ uint64_t mask = (1ul << size) - 1;
+ if ((lsb + msb + 32 + 1) > 64) {
+ alu_out = Unpredictable;
+ } else {
+ alu_out = (U64(rs) & (mask << lsb)) >> lsb;
+ }
+ break;
+ }
+ case ff_dextu: { // Mips64r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa + 32;
+ uint16_t size = msb + 1;
+ uint64_t mask = (1ul << size) - 1;
+ if ((lsb + msb + 1) > 64) {
+ alu_out = Unpredictable;
+ } else {
+ alu_out = (U64(rs) & (mask << lsb)) >> lsb;
+ }
+ break;
+ }
+ case ff_bshfl: { // Mips32r2 instruction.
+ if (16 == sa) { // seb
+ alu_out = I64(I8(I32_CHECK(rt)));
+ } else if (24 == sa) { // seh
+ alu_out = I64(I16(I32_CHECK(rt)));
+ } else if (2 == sa) { // wsbh
+ uint32_t input = U32(I32_CHECK(rt));
+ uint64_t output = 0;
+
+ uint32_t mask = 0xFF000000;
+ for (int i = 0; i < 4; i++) {
+ uint32_t tmp = mask & input;
+ if (i % 2 == 0) {
+ tmp = tmp >> 8;
+ } else {
+ tmp = tmp << 8;
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+ alu_out = I64(I32(output));
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ }
+ case ff_dbshfl: { // Mips64r2 instruction.
+ uint64_t input = U64(rt);
+ uint64_t output = 0;
+
+ if (2 == sa) { // dsbh
+ uint64_t mask = 0xFF00000000000000;
+ for (int i = 0; i < 8; i++) {
+ uint64_t tmp = mask & input;
+ if (i % 2 == 0)
+ tmp = tmp >> 8;
+ else
+ tmp = tmp << 8;
+
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+ } else if (5 == sa) { // dshd
+ uint64_t mask = 0xFFFF000000000000;
+ for (int i = 0; i < 4; i++) {
+ uint64_t tmp = mask & input;
+ if (i == 0)
+ tmp = tmp >> 48;
+ else if (i == 1)
+ tmp = tmp >> 16;
+ else if (i == 2)
+ tmp = tmp << 16;
+ else
+ tmp = tmp << 48;
+ output = output | tmp;
+ mask = mask >> 16;
+ }
+ } else {
+ MOZ_CRASH();
+ }
+
+ alu_out = I64(output);
+ break;
+ }
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ default:
+ MOZ_CRASH();
+ };
+}
+
+// Handle execution based on instruction types.
+void Simulator::decodeTypeRegister(SimInstruction* instr) {
+ // Instruction fields.
+ const OpcodeField op = instr->opcodeFieldRaw();
+ const int32_t rs_reg = instr->rsValue();
+ const int64_t rs = getRegister(rs_reg);
+ const int32_t rt_reg = instr->rtValue();
+ const int64_t rt = getRegister(rt_reg);
+ const int32_t rd_reg = instr->rdValue();
+
+ const int32_t fr_reg = instr->frValue();
+ const int32_t fs_reg = instr->fsValue();
+ const int32_t ft_reg = instr->ftValue();
+ const int32_t fd_reg = instr->fdValue();
+ __int128 i128hilo = 0;
+ unsigned __int128 u128hilo = 0;
+
+ // ALU output.
+ // It should not be used as is. Instructions using it should always
+ // initialize it first.
+ int64_t alu_out = 0x12345678;
+
+ // For break and trap instructions.
+ bool do_interrupt = false;
+
+ // For jr and jalr.
+ // Get current pc.
+ int64_t current_pc = get_pc();
+ // Next pc
+ int64_t next_pc = 0;
+ int32_t return_addr_reg = 31;
+
+ // Set up the variables if needed before executing the instruction.
+ configureTypeRegister(instr, alu_out, i128hilo, u128hilo, next_pc,
+ return_addr_reg, do_interrupt);
+
+ // ---------- Raise exceptions triggered.
+ signalExceptions();
+
+ // ---------- Execution.
+ switch (op) {
+ case op_cop1:
+ switch (instr->rsFieldRaw()) {
+ case rs_bc1: // Branch on coprocessor condition.
+ MOZ_CRASH();
+ break;
+ case rs_cfc1:
+ setRegister(rt_reg, alu_out);
+ [[fallthrough]];
+ case rs_mfc1:
+ setRegister(rt_reg, alu_out);
+ break;
+ case rs_dmfc1:
+ setRegister(rt_reg, alu_out);
+ break;
+ case rs_mfhc1:
+ setRegister(rt_reg, alu_out);
+ break;
+ case rs_ctc1:
+ // At the moment only FCSR is supported.
+ MOZ_ASSERT(fs_reg == kFCSRRegister);
+ FCSR_ = registers_[rt_reg];
+ break;
+ case rs_mtc1:
+ setFpuRegisterLo(fs_reg, registers_[rt_reg]);
+ break;
+ case rs_dmtc1:
+ setFpuRegister(fs_reg, registers_[rt_reg]);
+ break;
+ case rs_mthc1:
+ setFpuRegisterHi(fs_reg, registers_[rt_reg]);
+ break;
+ case rs_s:
+ float f, ft_value, fs_value;
+ uint32_t cc, fcsr_cc;
+ int64_t i64;
+ fs_value = getFpuRegisterFloat(fs_reg);
+ ft_value = getFpuRegisterFloat(ft_reg);
+ cc = instr->fcccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ switch (instr->functionFieldRaw()) {
+ case ff_add_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value + ft_value);
+ break;
+ case ff_sub_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value - ft_value);
+ break;
+ case ff_mul_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value * ft_value);
+ break;
+ case ff_div_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value / ft_value);
+ break;
+ case ff_abs_fmt:
+ setFpuRegisterFloat(fd_reg, fabsf(fs_value));
+ break;
+ case ff_mov_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value);
+ break;
+ case ff_neg_fmt:
+ setFpuRegisterFloat(fd_reg, -fs_value);
+ break;
+ case ff_sqrt_fmt:
+ setFpuRegisterFloat(fd_reg, sqrtf(fs_value));
+ break;
+ case ff_c_un_fmt:
+ setFCSRBit(fcsr_cc, std::isnan(fs_value) || std::isnan(ft_value));
+ break;
+ case ff_c_eq_fmt:
+ setFCSRBit(fcsr_cc, (fs_value == ft_value));
+ break;
+ case ff_c_ueq_fmt:
+ setFCSRBit(fcsr_cc,
+ (fs_value == ft_value) ||
+ (std::isnan(fs_value) || std::isnan(ft_value)));
+ break;
+ case ff_c_olt_fmt:
+ setFCSRBit(fcsr_cc, (fs_value < ft_value));
+ break;
+ case ff_c_ult_fmt:
+ setFCSRBit(fcsr_cc,
+ (fs_value < ft_value) ||
+ (std::isnan(fs_value) || std::isnan(ft_value)));
+ break;
+ case ff_c_ole_fmt:
+ setFCSRBit(fcsr_cc, (fs_value <= ft_value));
+ break;
+ case ff_c_ule_fmt:
+ setFCSRBit(fcsr_cc,
+ (fs_value <= ft_value) ||
+ (std::isnan(fs_value) || std::isnan(ft_value)));
+ break;
+ case ff_cvt_d_fmt:
+ f = getFpuRegisterFloat(fs_reg);
+ setFpuRegisterDouble(fd_reg, static_cast<double>(f));
+ break;
+ case ff_cvt_w_fmt: // Convert float to word.
+ // Rounding modes are not yet supported.
+ MOZ_ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ [[fallthrough]];
+ case ff_round_w_fmt: { // Round double to word (round half to
+ // even).
+ float rounded = std::floor(fs_value + 0.5);
+ int32_t result = I32(rounded);
+ if ((result & 1) != 0 && result - fs_value == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError<int32_t>(fs_value, rounded)) {
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_trunc_w_fmt: { // Truncate float to word (round towards 0).
+ float rounded = truncf(fs_value);
+ int32_t result = I32(rounded);
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError<int32_t>(fs_value, rounded)) {
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_floor_w_fmt: { // Round float to word towards negative
+ // infinity.
+ float rounded = std::floor(fs_value);
+ int32_t result = I32(rounded);
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError<int32_t>(fs_value, rounded)) {
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_ceil_w_fmt: { // Round double to word towards positive
+ // infinity.
+ float rounded = std::ceil(fs_value);
+ int32_t result = I32(rounded);
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError<int32_t>(fs_value, rounded)) {
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_cvt_l_fmt: // Mips64r2: Truncate float to 64-bit long-word.
+ // Rounding modes are not yet supported.
+ MOZ_ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ [[fallthrough]];
+ case ff_round_l_fmt: { // Mips64r2 instruction.
+ float rounded = fs_value > 0 ? std::floor(fs_value + 0.5)
+ : std::ceil(fs_value - 0.5);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ if (setFCSRRoundError<int64_t>(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult64);
+ }
+ break;
+ }
+ case ff_trunc_l_fmt: { // Mips64r2 instruction.
+ float rounded = truncf(fs_value);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ if (setFCSRRoundError<int64_t>(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult64);
+ }
+ break;
+ }
+ case ff_floor_l_fmt: { // Mips64r2 instruction.
+ float rounded = std::floor(fs_value);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ if (setFCSRRoundError<int64_t>(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult64);
+ }
+ break;
+ }
+ case ff_ceil_l_fmt: { // Mips64r2 instruction.
+ float rounded = std::ceil(fs_value);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ if (setFCSRRoundError<int64_t>(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult64);
+ }
+ break;
+ }
+ case ff_cvt_ps_s:
+ case ff_c_f_fmt:
+ MOZ_CRASH();
+ break;
+ case ff_movf_fmt:
+ if (testFCSRBit(fcsr_cc)) {
+ setFpuRegisterFloat(fd_reg, getFpuRegisterFloat(fs_reg));
+ }
+ break;
+ case ff_movz_fmt:
+ if (rt == 0) {
+ setFpuRegisterFloat(fd_reg, getFpuRegisterFloat(fs_reg));
+ }
+ break;
+ case ff_movn_fmt:
+ if (rt != 0) {
+ setFpuRegisterFloat(fd_reg, getFpuRegisterFloat(fs_reg));
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_d:
+ double dt_value, ds_value;
+ ds_value = getFpuRegisterDouble(fs_reg);
+ dt_value = getFpuRegisterDouble(ft_reg);
+ cc = instr->fcccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ switch (instr->functionFieldRaw()) {
+ case ff_add_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value + dt_value);
+ break;
+ case ff_sub_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value - dt_value);
+ break;
+ case ff_mul_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value * dt_value);
+ break;
+ case ff_div_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value / dt_value);
+ break;
+ case ff_abs_fmt:
+ setFpuRegisterDouble(fd_reg, fabs(ds_value));
+ break;
+ case ff_mov_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value);
+ break;
+ case ff_neg_fmt:
+ setFpuRegisterDouble(fd_reg, -ds_value);
+ break;
+ case ff_sqrt_fmt:
+ setFpuRegisterDouble(fd_reg, sqrt(ds_value));
+ break;
+ case ff_c_un_fmt:
+ setFCSRBit(fcsr_cc, std::isnan(ds_value) || std::isnan(dt_value));
+ break;
+ case ff_c_eq_fmt:
+ setFCSRBit(fcsr_cc, (ds_value == dt_value));
+ break;
+ case ff_c_ueq_fmt:
+ setFCSRBit(fcsr_cc,
+ (ds_value == dt_value) ||
+ (std::isnan(ds_value) || std::isnan(dt_value)));
+ break;
+ case ff_c_olt_fmt:
+ setFCSRBit(fcsr_cc, (ds_value < dt_value));
+ break;
+ case ff_c_ult_fmt:
+ setFCSRBit(fcsr_cc,
+ (ds_value < dt_value) ||
+ (std::isnan(ds_value) || std::isnan(dt_value)));
+ break;
+ case ff_c_ole_fmt:
+ setFCSRBit(fcsr_cc, (ds_value <= dt_value));
+ break;
+ case ff_c_ule_fmt:
+ setFCSRBit(fcsr_cc,
+ (ds_value <= dt_value) ||
+ (std::isnan(ds_value) || std::isnan(dt_value)));
+ break;
+ case ff_cvt_w_fmt: // Convert double to word.
+ // Rounding modes are not yet supported.
+ MOZ_ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ [[fallthrough]];
+ case ff_round_w_fmt: { // Round double to word (round half to
+ // even).
+ double rounded = std::floor(ds_value + 0.5);
+ int32_t result = I32(rounded);
+ if ((result & 1) != 0 && result - ds_value == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError<int32_t>(ds_value, rounded)) {
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_trunc_w_fmt: { // Truncate double to word (round towards
+ // 0).
+ double rounded = trunc(ds_value);
+ int32_t result = I32(rounded);
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError<int32_t>(ds_value, rounded)) {
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_floor_w_fmt: { // Round double to word towards negative
+ // infinity.
+ double rounded = std::floor(ds_value);
+ int32_t result = I32(rounded);
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError<int32_t>(ds_value, rounded)) {
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_ceil_w_fmt: { // Round double to word towards positive
+ // infinity.
+ double rounded = std::ceil(ds_value);
+ int32_t result = I32(rounded);
+ setFpuRegisterLo(fd_reg, result);
+ if (setFCSRRoundError<int32_t>(ds_value, rounded)) {
+ setFpuRegisterLo(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_cvt_s_fmt: // Convert double to float (single).
+ setFpuRegisterFloat(fd_reg, static_cast<float>(ds_value));
+ break;
+ case ff_cvt_l_fmt: // Mips64r2: Truncate double to 64-bit
+ // long-word.
+ // Rounding modes are not yet supported.
+ MOZ_ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ [[fallthrough]];
+ case ff_round_l_fmt: { // Mips64r2 instruction.
+ double rounded = ds_value > 0 ? std::floor(ds_value + 0.5)
+ : std::ceil(ds_value - 0.5);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ if (setFCSRRoundError<int64_t>(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult64);
+ }
+ break;
+ }
+ case ff_trunc_l_fmt: { // Mips64r2 instruction.
+ double rounded = trunc(ds_value);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ if (setFCSRRoundError<int64_t>(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult64);
+ }
+ break;
+ }
+ case ff_floor_l_fmt: { // Mips64r2 instruction.
+ double rounded = std::floor(ds_value);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ if (setFCSRRoundError<int64_t>(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult64);
+ }
+ break;
+ }
+ case ff_ceil_l_fmt: { // Mips64r2 instruction.
+ double rounded = std::ceil(ds_value);
+ i64 = I64(rounded);
+ setFpuRegister(fd_reg, i64);
+ if (setFCSRRoundError<int64_t>(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult64);
+ }
+ break;
+ }
+ case ff_c_f_fmt:
+ MOZ_CRASH();
+ break;
+ case ff_movz_fmt:
+ if (rt == 0) {
+ setFpuRegisterDouble(fd_reg, getFpuRegisterDouble(fs_reg));
+ }
+ break;
+ case ff_movn_fmt:
+ if (rt != 0) {
+ setFpuRegisterDouble(fd_reg, getFpuRegisterDouble(fs_reg));
+ }
+ break;
+ case ff_movf_fmt:
+ // location of cc field in MOVF is equal to float branch
+ // instructions
+ cc = instr->fbccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ if (testFCSRBit(fcsr_cc)) {
+ setFpuRegisterDouble(fd_reg, getFpuRegisterDouble(fs_reg));
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_w:
+ switch (instr->functionFieldRaw()) {
+ case ff_cvt_s_fmt: // Convert word to float (single).
+ i64 = getFpuRegisterLo(fs_reg);
+ setFpuRegisterFloat(fd_reg, static_cast<float>(i64));
+ break;
+ case ff_cvt_d_fmt: // Convert word to double.
+ i64 = getFpuRegisterLo(fs_reg);
+ setFpuRegisterDouble(fd_reg, static_cast<double>(i64));
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ case rs_l:
+ switch (instr->functionFieldRaw()) {
+ case ff_cvt_d_fmt: // Mips64r2 instruction.
+ i64 = getFpuRegister(fs_reg);
+ setFpuRegisterDouble(fd_reg, static_cast<double>(i64));
+ break;
+ case ff_cvt_s_fmt:
+ i64 = getFpuRegister(fs_reg);
+ setFpuRegisterFloat(fd_reg, static_cast<float>(i64));
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_ps:
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ case op_cop1x:
+ switch (instr->functionFieldRaw()) {
+ case ff_madd_s:
+ float fr, ft, fs;
+ fr = getFpuRegisterFloat(fr_reg);
+ fs = getFpuRegisterFloat(fs_reg);
+ ft = getFpuRegisterFloat(ft_reg);
+ setFpuRegisterFloat(fd_reg, fs * ft + fr);
+ break;
+ case ff_madd_d:
+ double dr, dt, ds;
+ dr = getFpuRegisterDouble(fr_reg);
+ ds = getFpuRegisterDouble(fs_reg);
+ dt = getFpuRegisterDouble(ft_reg);
+ setFpuRegisterDouble(fd_reg, ds * dt + dr);
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ case op_special:
+ switch (instr->functionFieldRaw()) {
+ case ff_jr: {
+ SimInstruction* branch_delay_instr =
+ reinterpret_cast<SimInstruction*>(current_pc +
+ SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ case ff_jalr: {
+ SimInstruction* branch_delay_instr =
+ reinterpret_cast<SimInstruction*>(current_pc +
+ SimInstruction::kInstrSize);
+ setRegister(return_addr_reg,
+ current_pc + 2 * SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ // Instructions using HI and LO registers.
+ case ff_mult:
+ setRegister(LO, I32(i128hilo & 0xffffffff));
+ setRegister(HI, I32(i128hilo >> 32));
+ break;
+ case ff_dmult:
+ setRegister(LO, I64(i128hilo & 0xfffffffffffffffful));
+ setRegister(HI, I64(i128hilo >> 64));
+ break;
+ case ff_multu:
+ setRegister(LO, I32(u128hilo & 0xffffffff));
+ setRegister(HI, I32(u128hilo >> 32));
+ break;
+ case ff_dmultu:
+ setRegister(LO, I64(u128hilo & 0xfffffffffffffffful));
+ setRegister(HI, I64(u128hilo >> 64));
+ break;
+ case ff_div:
+ case ff_divu:
+ // Divide by zero and overflow was not checked in the configuration
+ // step - div and divu do not raise exceptions. On division by 0
+ // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1),
+ // return INT_MIN which is what the hardware does.
+ setRegister(LO, I32(i128hilo & 0xffffffff));
+ setRegister(HI, I32(i128hilo >> 32));
+ break;
+ case ff_ddiv:
+ case ff_ddivu:
+ // Divide by zero and overflow was not checked in the configuration
+ // step - div and divu do not raise exceptions. On division by 0
+ // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1),
+ // return INT_MIN which is what the hardware does.
+ setRegister(LO, I64(i128hilo & 0xfffffffffffffffful));
+ setRegister(HI, I64(i128hilo >> 64));
+ break;
+ case ff_sync:
+ break;
+ // Break and trap instructions.
+ case ff_break:
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ if (do_interrupt) {
+ softwareInterrupt(instr);
+ }
+ break;
+ // Conditional moves.
+ case ff_movn:
+ if (rt) {
+ setRegister(rd_reg, rs);
+ }
+ break;
+ case ff_movci: {
+ uint32_t cc = instr->fbccValue();
+ uint32_t fcsr_cc = GetFCSRConditionBit(cc);
+ if (instr->bit(16)) { // Read Tf bit.
+ if (testFCSRBit(fcsr_cc)) {
+ setRegister(rd_reg, rs);
+ }
+ } else {
+ if (!testFCSRBit(fcsr_cc)) {
+ setRegister(rd_reg, rs);
+ }
+ }
+ break;
+ }
+ case ff_movz:
+ if (!rt) {
+ setRegister(rd_reg, rs);
+ }
+ break;
+ default: // For other special opcodes we do the default operation.
+ setRegister(rd_reg, alu_out);
+ };
+ break;
+ case op_special2:
+ switch (instr->functionFieldRaw()) {
+ case ff_mul:
+ setRegister(rd_reg, alu_out);
+ // HI and LO are UNPREDICTABLE after the operation.
+ setRegister(LO, Unpredictable);
+ setRegister(HI, Unpredictable);
+ break;
+ default: // For other special2 opcodes we do the default operation.
+ setRegister(rd_reg, alu_out);
+ }
+ break;
+ case op_special3:
+ switch (instr->functionFieldRaw()) {
+ case ff_ins:
+ case ff_dins:
+ case ff_dinsm:
+ case ff_dinsu:
+ // Ins instr leaves result in Rt, rather than Rd.
+ setRegister(rt_reg, alu_out);
+ break;
+ case ff_ext:
+ case ff_dext:
+ case ff_dextm:
+ case ff_dextu:
+ // Ext instr leaves result in Rt, rather than Rd.
+ setRegister(rt_reg, alu_out);
+ break;
+ case ff_bshfl:
+ setRegister(rd_reg, alu_out);
+ break;
+ case ff_dbshfl:
+ setRegister(rd_reg, alu_out);
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ // Unimplemented opcodes raised an error in the configuration step before,
+ // so we can use the default here to set the destination register in
+ // common cases.
+ default:
+ setRegister(rd_reg, alu_out);
+ };
+}
+
+// Type 2: instructions using a 16 bits immediate. (e.g. addi, beq).
+void Simulator::decodeTypeImmediate(SimInstruction* instr) {
+ // Instruction fields.
+ OpcodeField op = instr->opcodeFieldRaw();
+ int64_t rs = getRegister(instr->rsValue());
+ int32_t rt_reg = instr->rtValue(); // Destination register.
+ int64_t rt = getRegister(rt_reg);
+ int16_t imm16 = instr->imm16Value();
+
+ int32_t ft_reg = instr->ftValue(); // Destination register.
+
+ // Zero extended immediate.
+ uint32_t oe_imm16 = 0xffff & imm16;
+ // Sign extended immediate.
+ int32_t se_imm16 = imm16;
+
+ // Get current pc.
+ int64_t current_pc = get_pc();
+ // Next pc.
+ int64_t next_pc = bad_ra;
+
+ // Used for conditional branch instructions.
+ bool do_branch = false;
+ bool execute_branch_delay_instruction = false;
+
+ // Used for arithmetic instructions.
+ int64_t alu_out = 0;
+ // Floating point.
+ double fp_out = 0.0;
+ uint32_t cc, cc_value, fcsr_cc;
+
+ // Used for memory instructions.
+ uint64_t addr = 0x0;
+ // Value to be written in memory.
+ uint64_t mem_value = 0x0;
+ __int128 temp;
+
+ // ---------- Configuration (and execution for op_regimm).
+ switch (op) {
+ // ------------- op_cop1. Coprocessor instructions.
+ case op_cop1:
+ switch (instr->rsFieldRaw()) {
+ case rs_bc1: // Branch on coprocessor condition.
+ cc = instr->fbccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ cc_value = testFCSRBit(fcsr_cc);
+ do_branch = (instr->fbtrueValue()) ? cc_value : !cc_value;
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize;
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ break;
+ // ------------- op_regimm class.
+ case op_regimm:
+ switch (instr->rtFieldRaw()) {
+ case rt_bltz:
+ do_branch = (rs < 0);
+ break;
+ case rt_bltzal:
+ do_branch = rs < 0;
+ break;
+ case rt_bgez:
+ do_branch = rs >= 0;
+ break;
+ case rt_bgezal:
+ do_branch = rs >= 0;
+ break;
+ default:
+ MOZ_CRASH();
+ };
+ switch (instr->rtFieldRaw()) {
+ case rt_bltz:
+ case rt_bltzal:
+ case rt_bgez:
+ case rt_bgezal:
+ // Branch instructions common part.
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize;
+ if (instr->isLinkingInstruction()) {
+ setRegister(31, current_pc + kBranchReturnOffset);
+ }
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ break;
+ default:
+ break;
+ };
+ break; // case op_regimm.
+ // ------------- Branch instructions.
+ // When comparing to zero, the encoding of rt field is always 0, so we
+ // don't need to replace rt with zero.
+ case op_beq:
+ do_branch = (rs == rt);
+ break;
+ case op_bne:
+ do_branch = rs != rt;
+ break;
+ case op_blez:
+ do_branch = rs <= 0;
+ break;
+ case op_bgtz:
+ do_branch = rs > 0;
+ break;
+ // ------------- Arithmetic instructions.
+ case op_addi:
+ alu_out = I32_CHECK(rs) + se_imm16;
+ if ((alu_out << 32) != (alu_out << 31)) {
+ exceptions[kIntegerOverflow] = 1;
+ }
+ alu_out = I32_CHECK(alu_out);
+ break;
+ case op_daddi:
+ temp = alu_out = rs + se_imm16;
+ if ((temp << 64) != (temp << 63)) {
+ exceptions[kIntegerOverflow] = 1;
+ }
+ alu_out = I64(temp);
+ break;
+ case op_addiu:
+ alu_out = I32(I32_CHECK(rs) + se_imm16);
+ break;
+ case op_daddiu:
+ alu_out = rs + se_imm16;
+ break;
+ case op_slti:
+ alu_out = (rs < se_imm16) ? 1 : 0;
+ break;
+ case op_sltiu:
+ alu_out = (U64(rs) < U64(se_imm16)) ? 1 : 0;
+ break;
+ case op_andi:
+ alu_out = rs & oe_imm16;
+ break;
+ case op_ori:
+ alu_out = rs | oe_imm16;
+ break;
+ case op_xori:
+ alu_out = rs ^ oe_imm16;
+ break;
+ case op_lui:
+ alu_out = (se_imm16 << 16);
+ break;
+ // ------------- Memory instructions.
+ case op_lbu:
+ addr = rs + se_imm16;
+ alu_out = readBU(addr, instr);
+ break;
+ case op_lb:
+ addr = rs + se_imm16;
+ alu_out = readB(addr, instr);
+ break;
+ case op_lhu:
+ addr = rs + se_imm16;
+ alu_out = readHU(addr, instr);
+ break;
+ case op_lh:
+ addr = rs + se_imm16;
+ alu_out = readH(addr, instr);
+ break;
+ case op_lwu:
+ addr = rs + se_imm16;
+ alu_out = readWU(addr, instr);
+ break;
+ case op_lw:
+ addr = rs + se_imm16;
+ alu_out = readW(addr, instr);
+ break;
+ case op_lwl: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & 3;
+ uint8_t byte_shift = 3 - al_offset;
+ uint32_t mask = (1 << byte_shift * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = readW(addr, instr);
+ alu_out <<= byte_shift * 8;
+ alu_out |= rt & mask;
+ break;
+ }
+ case op_lwr: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & 3;
+ uint8_t byte_shift = 3 - al_offset;
+ uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = readW(addr, instr);
+ alu_out = U32(alu_out) >> al_offset * 8;
+ alu_out |= rt & mask;
+ alu_out = I32(alu_out);
+ break;
+ }
+ case op_ll:
+ addr = rs + se_imm16;
+ alu_out = loadLinkedW(addr, instr);
+ break;
+ case op_lld:
+ addr = rs + se_imm16;
+ alu_out = loadLinkedD(addr, instr);
+ break;
+ case op_ld:
+ addr = rs + se_imm16;
+ alu_out = readDW(addr, instr);
+ break;
+ case op_ldl: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & 7;
+ uint8_t byte_shift = 7 - al_offset;
+ uint64_t mask = (1ul << byte_shift * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = readDW(addr, instr);
+ alu_out <<= byte_shift * 8;
+ alu_out |= rt & mask;
+ break;
+ }
+ case op_ldr: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & 7;
+ uint8_t byte_shift = 7 - al_offset;
+ uint64_t mask = al_offset ? (~0ul << (byte_shift + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = readDW(addr, instr);
+ alu_out = U64(alu_out) >> al_offset * 8;
+ alu_out |= rt & mask;
+ break;
+ }
+ case op_sb:
+ addr = rs + se_imm16;
+ break;
+ case op_sh:
+ addr = rs + se_imm16;
+ break;
+ case op_sw:
+ addr = rs + se_imm16;
+ break;
+ case op_swl: {
+ uint8_t al_offset = (rs + se_imm16) & 3;
+ uint8_t byte_shift = 3 - al_offset;
+ uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = readW(addr, instr) & mask;
+ mem_value |= U32(rt) >> byte_shift * 8;
+ break;
+ }
+ case op_swr: {
+ uint8_t al_offset = (rs + se_imm16) & 3;
+ uint32_t mask = (1 << al_offset * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = readW(addr, instr);
+ mem_value = (rt << al_offset * 8) | (mem_value & mask);
+ break;
+ }
+ case op_sc:
+ addr = rs + se_imm16;
+ break;
+ case op_scd:
+ addr = rs + se_imm16;
+ break;
+ case op_sd:
+ addr = rs + se_imm16;
+ break;
+ case op_sdl: {
+ uint8_t al_offset = (rs + se_imm16) & 7;
+ uint8_t byte_shift = 7 - al_offset;
+ uint64_t mask = byte_shift ? (~0ul << (al_offset + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = readW(addr, instr) & mask;
+ mem_value |= U64(rt) >> byte_shift * 8;
+ break;
+ }
+ case op_sdr: {
+ uint8_t al_offset = (rs + se_imm16) & 7;
+ uint64_t mask = (1ul << al_offset * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = readW(addr, instr);
+ mem_value = (rt << al_offset * 8) | (mem_value & mask);
+ break;
+ }
+ case op_lwc1:
+ addr = rs + se_imm16;
+ alu_out = readW(addr, instr);
+ break;
+ case op_ldc1:
+ addr = rs + se_imm16;
+ fp_out = readD(addr, instr);
+ break;
+ case op_swc1:
+ case op_sdc1:
+ addr = rs + se_imm16;
+ break;
+ default:
+ MOZ_CRASH();
+ };
+
+ // ---------- Raise exceptions triggered.
+ signalExceptions();
+
+ // ---------- Execution.
+ switch (op) {
+ // ------------- Branch instructions.
+ case op_beq:
+ case op_bne:
+ case op_blez:
+ case op_bgtz:
+ // Branch instructions common part.
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize;
+ if (instr->isLinkingInstruction()) {
+ setRegister(31, current_pc + 2 * SimInstruction::kInstrSize);
+ }
+ } else {
+ next_pc = current_pc + 2 * SimInstruction::kInstrSize;
+ }
+ break;
+ // ------------- Arithmetic instructions.
+ case op_addi:
+ case op_daddi:
+ case op_addiu:
+ case op_daddiu:
+ case op_slti:
+ case op_sltiu:
+ case op_andi:
+ case op_ori:
+ case op_xori:
+ case op_lui:
+ setRegister(rt_reg, alu_out);
+ break;
+ // ------------- Memory instructions.
+ case op_lbu:
+ case op_lb:
+ case op_lhu:
+ case op_lh:
+ case op_lwu:
+ case op_lw:
+ case op_lwl:
+ case op_lwr:
+ case op_ll:
+ case op_lld:
+ case op_ld:
+ case op_ldl:
+ case op_ldr:
+ setRegister(rt_reg, alu_out);
+ break;
+ case op_sb:
+ writeB(addr, I8(rt), instr);
+ break;
+ case op_sh:
+ writeH(addr, U16(rt), instr);
+ break;
+ case op_sw:
+ writeW(addr, I32(rt), instr);
+ break;
+ case op_swl:
+ writeW(addr, I32(mem_value), instr);
+ break;
+ case op_swr:
+ writeW(addr, I32(mem_value), instr);
+ break;
+ case op_sc:
+ setRegister(rt_reg, storeConditionalW(addr, I32(rt), instr));
+ break;
+ case op_scd:
+ setRegister(rt_reg, storeConditionalD(addr, rt, instr));
+ break;
+ case op_sd:
+ writeDW(addr, rt, instr);
+ break;
+ case op_sdl:
+ writeDW(addr, mem_value, instr);
+ break;
+ case op_sdr:
+ writeDW(addr, mem_value, instr);
+ break;
+ case op_lwc1:
+ setFpuRegisterLo(ft_reg, alu_out);
+ break;
+ case op_ldc1:
+ setFpuRegisterDouble(ft_reg, fp_out);
+ break;
+ case op_swc1:
+ writeW(addr, getFpuRegisterLo(ft_reg), instr);
+ break;
+ case op_sdc1:
+ writeD(addr, getFpuRegisterDouble(ft_reg), instr);
+ break;
+ default:
+ break;
+ };
+
+ if (execute_branch_delay_instruction) {
+ // Execute branch delay slot
+ // We don't check for end_sim_pc. First it should not be met as the current
+ // pc is valid. Secondly a jump should always execute its branch delay slot.
+ SimInstruction* branch_delay_instr = reinterpret_cast<SimInstruction*>(
+ current_pc + SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+ }
+
+ // If needed update pc after the branch delay execution.
+ if (next_pc != bad_ra) {
+ set_pc(next_pc);
+ }
+}
+
+// Type 3: instructions using a 26 bits immediate. (e.g. j, jal).
+void Simulator::decodeTypeJump(SimInstruction* instr) {
+ // Get current pc.
+ int64_t current_pc = get_pc();
+ // Get unchanged bits of pc.
+ int64_t pc_high_bits = current_pc & 0xfffffffff0000000ul;
+ // Next pc.
+ int64_t next_pc = pc_high_bits | (instr->imm26Value() << 2);
+
+ // Execute branch delay slot.
+ // We don't check for end_sim_pc. First it should not be met as the current pc
+ // is valid. Secondly a jump should always execute its branch delay slot.
+ SimInstruction* branch_delay_instr = reinterpret_cast<SimInstruction*>(
+ current_pc + SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+
+ // Update pc and ra if necessary.
+ // Do this after the branch delay execution.
+ if (instr->isLinkingInstruction()) {
+ setRegister(31, current_pc + 2 * SimInstruction::kInstrSize);
+ }
+ set_pc(next_pc);
+ pc_modified_ = true;
+}
+
+// Executes the current instruction.
+void Simulator::instructionDecode(SimInstruction* instr) {
+ if (!SimulatorProcess::ICacheCheckingDisableCount) {
+ AutoLockSimulatorCache als;
+ SimulatorProcess::checkICacheLocked(instr);
+ }
+ pc_modified_ = false;
+
+ switch (instr->instructionType()) {
+ case SimInstruction::kRegisterType:
+ decodeTypeRegister(instr);
+ break;
+ case SimInstruction::kImmediateType:
+ decodeTypeImmediate(instr);
+ break;
+ case SimInstruction::kJumpType:
+ decodeTypeJump(instr);
+ break;
+ default:
+ UNSUPPORTED();
+ }
+ if (!pc_modified_) {
+ setRegister(pc,
+ reinterpret_cast<int64_t>(instr) + SimInstruction::kInstrSize);
+ }
+}
+
+void Simulator::branchDelayInstructionDecode(SimInstruction* instr) {
+ if (instr->instructionBits() == NopInst) {
+ // Short-cut generic nop instructions. They are always valid and they
+ // never change the simulator state.
+ return;
+ }
+
+ if (instr->isForbiddenInBranchDelay()) {
+ MOZ_CRASH("Eror:Unexpected opcode in a branch delay slot.");
+ }
+ instructionDecode(instr);
+}
+
+void Simulator::enable_single_stepping(SingleStepCallback cb, void* arg) {
+ single_stepping_ = true;
+ single_step_callback_ = cb;
+ single_step_callback_arg_ = arg;
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+}
+
+void Simulator::disable_single_stepping() {
+ if (!single_stepping_) {
+ return;
+ }
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+}
+
+template <bool enableStopSimAt>
+void Simulator::execute() {
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ // Get the PC to simulate. Cannot use the accessor here as we need the
+ // raw PC value and not the one used as input to arithmetic instructions.
+ int64_t program_counter = get_pc();
+
+ while (program_counter != end_sim_pc) {
+ if (enableStopSimAt && (icount_ == Simulator::StopSimAt)) {
+ MipsDebugger dbg(this);
+ dbg.debug();
+ } else {
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this,
+ (void*)program_counter);
+ }
+ SimInstruction* instr =
+ reinterpret_cast<SimInstruction*>(program_counter);
+ instructionDecode(instr);
+ icount_++;
+ }
+ program_counter = get_pc();
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+}
+
+void Simulator::callInternal(uint8_t* entry) {
+ // Prepare to execute the code at entry.
+ setRegister(pc, reinterpret_cast<int64_t>(entry));
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ setRegister(ra, end_sim_pc);
+
+ // Remember the values of callee-saved registers.
+ // The code below assumes that r9 is not used as sb (static base) in
+ // simulator code and therefore is regarded as a callee-saved register.
+ int64_t s0_val = getRegister(s0);
+ int64_t s1_val = getRegister(s1);
+ int64_t s2_val = getRegister(s2);
+ int64_t s3_val = getRegister(s3);
+ int64_t s4_val = getRegister(s4);
+ int64_t s5_val = getRegister(s5);
+ int64_t s6_val = getRegister(s6);
+ int64_t s7_val = getRegister(s7);
+ int64_t gp_val = getRegister(gp);
+ int64_t sp_val = getRegister(sp);
+ int64_t fp_val = getRegister(fp);
+
+ // Set up the callee-saved registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ int64_t callee_saved_value = icount_;
+ setRegister(s0, callee_saved_value);
+ setRegister(s1, callee_saved_value);
+ setRegister(s2, callee_saved_value);
+ setRegister(s3, callee_saved_value);
+ setRegister(s4, callee_saved_value);
+ setRegister(s5, callee_saved_value);
+ setRegister(s6, callee_saved_value);
+ setRegister(s7, callee_saved_value);
+ setRegister(gp, callee_saved_value);
+ setRegister(fp, callee_saved_value);
+
+ // Start the simulation.
+ if (Simulator::StopSimAt != -1) {
+ execute<true>();
+ } else {
+ execute<false>();
+ }
+
+ // Check that the callee-saved registers have been preserved.
+ MOZ_ASSERT(callee_saved_value == getRegister(s0));
+ MOZ_ASSERT(callee_saved_value == getRegister(s1));
+ MOZ_ASSERT(callee_saved_value == getRegister(s2));
+ MOZ_ASSERT(callee_saved_value == getRegister(s3));
+ MOZ_ASSERT(callee_saved_value == getRegister(s4));
+ MOZ_ASSERT(callee_saved_value == getRegister(s5));
+ MOZ_ASSERT(callee_saved_value == getRegister(s6));
+ MOZ_ASSERT(callee_saved_value == getRegister(s7));
+ MOZ_ASSERT(callee_saved_value == getRegister(gp));
+ MOZ_ASSERT(callee_saved_value == getRegister(fp));
+
+ // Restore callee-saved registers with the original value.
+ setRegister(s0, s0_val);
+ setRegister(s1, s1_val);
+ setRegister(s2, s2_val);
+ setRegister(s3, s3_val);
+ setRegister(s4, s4_val);
+ setRegister(s5, s5_val);
+ setRegister(s6, s6_val);
+ setRegister(s7, s7_val);
+ setRegister(gp, gp_val);
+ setRegister(sp, sp_val);
+ setRegister(fp, fp_val);
+}
+
+int64_t Simulator::call(uint8_t* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+
+ int64_t original_stack = getRegister(sp);
+ // Compute position of stack on entry to generated code.
+ int64_t entry_stack = original_stack;
+ if (argument_count > kCArgSlotCount) {
+ entry_stack = entry_stack - argument_count * sizeof(int64_t);
+ } else {
+ entry_stack = entry_stack - kCArgsSlotsSize;
+ }
+
+ entry_stack &= ~U64(ABIStackAlignment - 1);
+
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+
+ // Setup the arguments.
+ for (int i = 0; i < argument_count; i++) {
+ js::jit::Register argReg;
+ if (GetIntArgReg(i, &argReg)) {
+ setRegister(argReg.code(), va_arg(parameters, int64_t));
+ } else {
+ stack_argument[i] = va_arg(parameters, int64_t);
+ }
+ }
+
+ va_end(parameters);
+ setRegister(sp, entry_stack);
+
+ callInternal(entry);
+
+ // Pop stack passed arguments.
+ MOZ_ASSERT(entry_stack == getRegister(sp));
+ setRegister(sp, original_stack);
+
+ int64_t result = getRegister(v0);
+ return result;
+}
+
+uintptr_t Simulator::pushAddress(uintptr_t address) {
+ int new_sp = getRegister(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ setRegister(sp, new_sp);
+ return new_sp;
+}
+
+uintptr_t Simulator::popAddress() {
+ int current_sp = getRegister(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ setRegister(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+
+} // namespace jit
+} // namespace js
+
+js::jit::Simulator* JSContext::simulator() const { return simulator_; }
diff --git a/js/src/jit/mips64/Simulator-mips64.h b/js/src/jit/mips64/Simulator-mips64.h
new file mode 100644
index 0000000000..02b2774f24
--- /dev/null
+++ b/js/src/jit/mips64/Simulator-mips64.h
@@ -0,0 +1,536 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef jit_mips64_Simulator_mips64_h
+#define jit_mips64_Simulator_mips64_h
+
+#ifdef JS_SIMULATOR_MIPS64
+
+# include "mozilla/Atomics.h"
+
+# include "jit/IonTypes.h"
+# include "js/ProfilingFrameIterator.h"
+# include "threading/Thread.h"
+# include "vm/MutexIDs.h"
+# include "wasm/WasmSignalHandlers.h"
+
+namespace js {
+
+namespace jit {
+
+class JitActivation;
+
+class Simulator;
+class Redirection;
+class CachePage;
+class AutoLockSimulator;
+
+// When the SingleStepCallback is called, the simulator is about to execute
+// sim->get_pc() and the current machine state represents the completed
+// execution of the previous pc.
+typedef void (*SingleStepCallback)(void* arg, Simulator* sim, void* pc);
+
+const intptr_t kPointerAlignment = 8;
+const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
+
+const intptr_t kDoubleAlignment = 8;
+const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
+
+// Number of general purpose registers.
+const int kNumRegisters = 32;
+
+// In the simulator, the PC register is simulated as the 34th register.
+const int kPCRegister = 34;
+
+// Number coprocessor registers.
+const int kNumFPURegisters = 32;
+
+// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
+const int kFCSRRegister = 31;
+const int kInvalidFPUControlRegister = -1;
+const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
+const uint64_t kFPUInvalidResult64 = static_cast<uint64_t>(1ULL << 63) - 1;
+
+// FCSR constants.
+const uint32_t kFCSRInexactFlagBit = 2;
+const uint32_t kFCSRUnderflowFlagBit = 3;
+const uint32_t kFCSROverflowFlagBit = 4;
+const uint32_t kFCSRDivideByZeroFlagBit = 5;
+const uint32_t kFCSRInvalidOpFlagBit = 6;
+
+const uint32_t kFCSRInexactCauseBit = 12;
+const uint32_t kFCSRUnderflowCauseBit = 13;
+const uint32_t kFCSROverflowCauseBit = 14;
+const uint32_t kFCSRDivideByZeroCauseBit = 15;
+const uint32_t kFCSRInvalidOpCauseBit = 16;
+
+const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
+const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
+const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
+const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
+const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+
+const uint32_t kFCSRFlagMask =
+ kFCSRInexactFlagMask | kFCSRUnderflowFlagMask | kFCSROverflowFlagMask |
+ kFCSRDivideByZeroFlagMask | kFCSRInvalidOpFlagMask;
+
+const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
+
+// On MIPS64 Simulator breakpoints can have different codes:
+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
+// the simulator will run through them and print the registers.
+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
+// instructions (see Assembler::stop()).
+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
+// debugger.
+const uint32_t kMaxWatchpointCode = 31;
+const uint32_t kMaxStopCode = 127;
+const uint32_t kWasmTrapCode = 6;
+
+// -----------------------------------------------------------------------------
+// Utility functions
+
+typedef uint32_t Instr;
+class SimInstruction;
+
+// Per thread simulator state.
+class Simulator {
+ friend class MipsDebugger;
+
+ public:
+ // Registers are declared in order. See "See MIPS Run Linux" chapter 2.
+ enum Register {
+ no_reg = -1,
+ zero_reg = 0,
+ at,
+ v0,
+ v1,
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ t0,
+ t1,
+ t2,
+ t3,
+ s0,
+ s1,
+ s2,
+ s3,
+ s4,
+ s5,
+ s6,
+ s7,
+ t8,
+ t9,
+ k0,
+ k1,
+ gp,
+ sp,
+ s8,
+ ra,
+ // LO, HI, and pc.
+ LO,
+ HI,
+ pc, // pc must be the last register.
+ kNumSimuRegisters,
+ // aliases
+ fp = s8
+ };
+
+ // Coprocessor registers.
+ enum FPURegister {
+ f0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
+ kNumFPURegisters
+ };
+
+ // Returns nullptr on OOM.
+ static Simulator* Create();
+
+ static void Destroy(Simulator* simulator);
+
+ // Constructor/destructor are for internal use only; use the static methods
+ // above.
+ Simulator();
+ ~Simulator();
+
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ static Simulator* Current();
+
+ static inline uintptr_t StackLimit() {
+ return Simulator::Current()->stackLimit();
+ }
+
+ uintptr_t* addressOfStackLimit();
+
+ // Accessors for register state. Reading the pc value adheres to the MIPS
+ // architecture specification and is off by a 8 from the currently executing
+ // instruction.
+ void setRegister(int reg, int64_t value);
+ int64_t getRegister(int reg) const;
+ // Same for FPURegisters.
+ void setFpuRegister(int fpureg, int64_t value);
+ void setFpuRegisterLo(int fpureg, int32_t value);
+ void setFpuRegisterHi(int fpureg, int32_t value);
+ void setFpuRegisterFloat(int fpureg, float value);
+ void setFpuRegisterDouble(int fpureg, double value);
+ int64_t getFpuRegister(int fpureg) const;
+ int32_t getFpuRegisterLo(int fpureg) const;
+ int32_t getFpuRegisterHi(int fpureg) const;
+ float getFpuRegisterFloat(int fpureg) const;
+ double getFpuRegisterDouble(int fpureg) const;
+ void setFCSRBit(uint32_t cc, bool value);
+ bool testFCSRBit(uint32_t cc);
+ template <typename T>
+ bool setFCSRRoundError(double original, double rounded);
+
+ // Special case of set_register and get_register to access the raw PC value.
+ void set_pc(int64_t value);
+ int64_t get_pc() const;
+
+ template <typename T>
+ T get_pc_as() const {
+ return reinterpret_cast<T>(get_pc());
+ }
+
+ void enable_single_stepping(SingleStepCallback cb, void* arg);
+ void disable_single_stepping();
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t stackLimit() const;
+ bool overRecursed(uintptr_t newsp = 0) const;
+ bool overRecursedWithExtra(uint32_t extra) const;
+
+ // Executes MIPS instructions until the PC reaches end_sim_pc.
+ template <bool enableStopSimAt>
+ void execute();
+
+ // Sets up the simulator state and grabs the result on return.
+ int64_t call(uint8_t* entry, int argument_count, ...);
+
+ // Push an address onto the JS stack.
+ uintptr_t pushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t popAddress();
+
+ // Debugger input.
+ void setLastDebuggerInput(char* input);
+ char* lastDebuggerInput() { return lastDebuggerInput_; }
+
+ // Returns true if pc register contains one of the 'SpecialValues' defined
+ // below (bad_ra, end_sim_pc).
+ bool has_bad_pc() const;
+
+ private:
+ enum SpecialValues {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_ra = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the ra is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2,
+ // Unpredictable value.
+ Unpredictable = 0xbadbeaf
+ };
+
+ bool init();
+
+ // Unsupported instructions use Format to print an error and stop execution.
+ void format(SimInstruction* instr, const char* format);
+
+ // Read and write memory.
+ inline uint8_t readBU(uint64_t addr, SimInstruction* instr);
+ inline int8_t readB(uint64_t addr, SimInstruction* instr);
+ inline void writeB(uint64_t addr, uint8_t value, SimInstruction* instr);
+ inline void writeB(uint64_t addr, int8_t value, SimInstruction* instr);
+
+ inline uint16_t readHU(uint64_t addr, SimInstruction* instr);
+ inline int16_t readH(uint64_t addr, SimInstruction* instr);
+ inline void writeH(uint64_t addr, uint16_t value, SimInstruction* instr);
+ inline void writeH(uint64_t addr, int16_t value, SimInstruction* instr);
+
+ inline uint32_t readWU(uint64_t addr, SimInstruction* instr);
+ inline int32_t readW(uint64_t addr, SimInstruction* instr);
+ inline void writeW(uint64_t addr, uint32_t value, SimInstruction* instr);
+ inline void writeW(uint64_t addr, int32_t value, SimInstruction* instr);
+
+ inline int64_t readDW(uint64_t addr, SimInstruction* instr);
+ inline int64_t readDWL(uint64_t addr, SimInstruction* instr);
+ inline int64_t readDWR(uint64_t addr, SimInstruction* instr);
+ inline void writeDW(uint64_t addr, int64_t value, SimInstruction* instr);
+
+ inline double readD(uint64_t addr, SimInstruction* instr);
+ inline void writeD(uint64_t addr, double value, SimInstruction* instr);
+
+ inline int32_t loadLinkedW(uint64_t addr, SimInstruction* instr);
+ inline int storeConditionalW(uint64_t addr, int32_t value,
+ SimInstruction* instr);
+
+ inline int64_t loadLinkedD(uint64_t addr, SimInstruction* instr);
+ inline int storeConditionalD(uint64_t addr, int64_t value,
+ SimInstruction* instr);
+
+ // Helper function for decodeTypeRegister.
+ void configureTypeRegister(SimInstruction* instr, int64_t& alu_out,
+ __int128& i128hilo, unsigned __int128& u128hilo,
+ int64_t& next_pc, int32_t& return_addr_reg,
+ bool& do_interrupt);
+
+ // Executing is handled based on the instruction type.
+ void decodeTypeRegister(SimInstruction* instr);
+ void decodeTypeImmediate(SimInstruction* instr);
+ void decodeTypeJump(SimInstruction* instr);
+
+ // Used for breakpoints and traps.
+ void softwareInterrupt(SimInstruction* instr);
+
+ // Stop helper functions.
+ bool isWatchpoint(uint32_t code);
+ void printWatchpoint(uint32_t code);
+ void handleStop(uint32_t code, SimInstruction* instr);
+ bool isStopInstruction(SimInstruction* instr);
+ bool isEnabledStop(uint32_t code);
+ void enableStop(uint32_t code);
+ void disableStop(uint32_t code);
+ void increaseStopCounter(uint32_t code);
+ void printStopInfo(uint32_t code);
+
+ JS::ProfilingFrameIterator::RegisterState registerState();
+
+ // Handle any wasm faults, returning true if the fault was handled.
+ // This method is rather hot so inline the normal (no-wasm) case.
+ bool MOZ_ALWAYS_INLINE handleWasmSegFault(uint64_t addr, unsigned numBytes) {
+ if (MOZ_LIKELY(!js::wasm::CodeExists)) {
+ return false;
+ }
+
+ uint8_t* newPC;
+ if (!js::wasm::MemoryAccessTraps(registerState(), (uint8_t*)addr, numBytes,
+ &newPC)) {
+ return false;
+ }
+
+ LLBit_ = false;
+ set_pc(int64_t(newPC));
+ return true;
+ }
+
+ // Executes one instruction.
+ void instructionDecode(SimInstruction* instr);
+ // Execute one instruction placed in a branch delay slot.
+ void branchDelayInstructionDecode(SimInstruction* instr);
+
+ public:
+ static int64_t StopSimAt;
+
+ // Runtime call support.
+ static void* RedirectNativeFunction(void* nativeFunction,
+ ABIFunctionType type);
+
+ private:
+ enum Exception {
+ kNone,
+ kIntegerOverflow,
+ kIntegerUnderflow,
+ kDivideByZero,
+ kNumExceptions
+ };
+ int16_t exceptions[kNumExceptions];
+
+ // Exceptions.
+ void signalExceptions();
+
+ // Handle return value for runtime FP functions.
+ void setCallResultDouble(double result);
+ void setCallResultFloat(float result);
+ void setCallResult(int64_t res);
+ void setCallResult(__int128 res);
+
+ void callInternal(uint8_t* entry);
+
+ // Architecture state.
+ // Registers.
+ int64_t registers_[kNumSimuRegisters];
+ // Coprocessor Registers.
+ int64_t FPUregisters_[kNumFPURegisters];
+ // FPU control register.
+ uint32_t FCSR_;
+
+ bool LLBit_;
+ uintptr_t LLAddr_;
+ int64_t lastLLValue_;
+
+ // Simulator support.
+ char* stack_;
+ uintptr_t stackLimit_;
+ bool pc_modified_;
+ int64_t icount_;
+ int64_t break_count_;
+
+ // Debugger input.
+ char* lastDebuggerInput_;
+
+ // Registered breakpoints.
+ SimInstruction* break_pc_;
+ Instr break_instr_;
+
+ // Single-stepping support
+ bool single_stepping_;
+ SingleStepCallback single_step_callback_;
+ void* single_step_callback_arg_;
+
+ // A stop is watched if its code is less than kNumOfWatchedStops.
+ // Only watched stops support enabling/disabling and the counter feature.
+ static const uint32_t kNumOfWatchedStops = 256;
+
+ // Stop is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1U << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watchedStops_[code].count is unset.
+ // The value watchedStops_[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count_;
+ char* desc_;
+ };
+ StopCountAndDesc watchedStops_[kNumOfWatchedStops];
+};
+
+// Process wide simulator state.
+class SimulatorProcess {
+ friend class Redirection;
+ friend class AutoLockSimulatorCache;
+
+ private:
+ // ICache checking.
+ struct ICacheHasher {
+ typedef void* Key;
+ typedef void* Lookup;
+ static HashNumber hash(const Lookup& l);
+ static bool match(const Key& k, const Lookup& l);
+ };
+
+ public:
+ typedef HashMap<void*, CachePage*, ICacheHasher, SystemAllocPolicy> ICacheMap;
+
+ static mozilla::Atomic<size_t, mozilla::ReleaseAcquire>
+ ICacheCheckingDisableCount;
+ static void FlushICache(void* start, size_t size);
+
+ static void checkICacheLocked(SimInstruction* instr);
+
+ static bool initialize() {
+ singleton_ = js_new<SimulatorProcess>();
+ return singleton_;
+ }
+ static void destroy() {
+ js_delete(singleton_);
+ singleton_ = nullptr;
+ }
+
+ SimulatorProcess();
+ ~SimulatorProcess();
+
+ private:
+ static SimulatorProcess* singleton_;
+
+ // This lock creates a critical section around 'redirection_' and
+ // 'icache_', which are referenced both by the execution engine
+ // and by the off-thread compiler (see Redirection::Get in the cpp file).
+ Mutex cacheLock_ MOZ_UNANNOTATED;
+
+ Redirection* redirection_;
+ ICacheMap icache_;
+
+ public:
+ static ICacheMap& icache() {
+ // Technically we need the lock to access the innards of the
+ // icache, not to take its address, but the latter condition
+ // serves as a useful complement to the former.
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ return singleton_->icache_;
+ }
+
+ static Redirection* redirection() {
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ return singleton_->redirection_;
+ }
+
+ static void setRedirection(js::jit::Redirection* redirection) {
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ singleton_->redirection_ = redirection;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* JS_SIMULATOR_MIPS64 */
+
+#endif /* jit_mips64_Simulator_mips64_h */
diff --git a/js/src/jit/mips64/Trampoline-mips64.cpp b/js/src/jit/mips64/Trampoline-mips64.cpp
new file mode 100644
index 0000000000..a85e7b3702
--- /dev/null
+++ b/js/src/jit/mips64/Trampoline-mips64.cpp
@@ -0,0 +1,870 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/DebugOnly.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/CalleeToken.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitSpewer.h"
+#include "jit/mips-shared/SharedICHelpers-mips-shared.h"
+#include "jit/PerfSpewer.h"
+#include "jit/VMFunctions.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+// All registers to save and restore. This includes the stack pointer, since we
+// use the ability to reference register values on the stack by index.
+static const LiveRegisterSet AllRegs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::AllMask),
+ FloatRegisterSet(FloatRegisters::AllMask));
+
+static_assert(sizeof(uintptr_t) == sizeof(uint64_t), "Not 32-bit clean.");
+
+struct EnterJITRegs {
+ double f31;
+ double f30;
+ double f29;
+ double f28;
+ double f27;
+ double f26;
+ double f25;
+ double f24;
+
+ uintptr_t align;
+
+ // non-volatile registers.
+ uint64_t ra;
+ uint64_t fp;
+ uint64_t s7;
+ uint64_t s6;
+ uint64_t s5;
+ uint64_t s4;
+ uint64_t s3;
+ uint64_t s2;
+ uint64_t s1;
+ uint64_t s0;
+ // Save reg_vp(a7) on stack, use it after call jit code.
+ uint64_t a7;
+};
+
+static void GenerateReturn(MacroAssembler& masm, int returnCode) {
+ MOZ_ASSERT(masm.framePushed() == sizeof(EnterJITRegs));
+
+ if (isLoongson()) {
+ // Restore non-volatile registers
+ masm.as_ld(s0, StackPointer, offsetof(EnterJITRegs, s0));
+ masm.as_gslq(s1, s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.as_gslq(s3, s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.as_gslq(s5, s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.as_gslq(s7, fp, StackPointer, offsetof(EnterJITRegs, fp));
+ masm.as_ld(ra, StackPointer, offsetof(EnterJITRegs, ra));
+
+ // Restore non-volatile floating point registers
+ masm.as_gslq(f24, f25, StackPointer, offsetof(EnterJITRegs, f25));
+ masm.as_gslq(f26, f27, StackPointer, offsetof(EnterJITRegs, f27));
+ masm.as_gslq(f28, f29, StackPointer, offsetof(EnterJITRegs, f29));
+ masm.as_gslq(f30, f31, StackPointer, offsetof(EnterJITRegs, f31));
+ } else {
+ // Restore non-volatile registers
+ masm.as_ld(s0, StackPointer, offsetof(EnterJITRegs, s0));
+ masm.as_ld(s1, StackPointer, offsetof(EnterJITRegs, s1));
+ masm.as_ld(s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.as_ld(s3, StackPointer, offsetof(EnterJITRegs, s3));
+ masm.as_ld(s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.as_ld(s5, StackPointer, offsetof(EnterJITRegs, s5));
+ masm.as_ld(s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.as_ld(s7, StackPointer, offsetof(EnterJITRegs, s7));
+ masm.as_ld(fp, StackPointer, offsetof(EnterJITRegs, fp));
+ masm.as_ld(ra, StackPointer, offsetof(EnterJITRegs, ra));
+
+ // Restore non-volatile floating point registers
+ masm.as_ldc1(f24, StackPointer, offsetof(EnterJITRegs, f24));
+ masm.as_ldc1(f25, StackPointer, offsetof(EnterJITRegs, f25));
+ masm.as_ldc1(f26, StackPointer, offsetof(EnterJITRegs, f26));
+ masm.as_ldc1(f27, StackPointer, offsetof(EnterJITRegs, f27));
+ masm.as_ldc1(f28, StackPointer, offsetof(EnterJITRegs, f28));
+ masm.as_ldc1(f29, StackPointer, offsetof(EnterJITRegs, f29));
+ masm.as_ldc1(f30, StackPointer, offsetof(EnterJITRegs, f30));
+ masm.as_ldc1(f31, StackPointer, offsetof(EnterJITRegs, f31));
+ }
+
+ masm.freeStack(sizeof(EnterJITRegs));
+
+ masm.branch(ra);
+}
+
+static void GeneratePrologue(MacroAssembler& masm) {
+ masm.reserveStack(sizeof(EnterJITRegs));
+
+ if (isLoongson()) {
+ masm.as_gssq(a7, s0, StackPointer, offsetof(EnterJITRegs, s0));
+ masm.as_gssq(s1, s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.as_gssq(s3, s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.as_gssq(s5, s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.as_gssq(s7, fp, StackPointer, offsetof(EnterJITRegs, fp));
+ masm.as_sd(ra, StackPointer, offsetof(EnterJITRegs, ra));
+
+ masm.as_gssq(f24, f25, StackPointer, offsetof(EnterJITRegs, f25));
+ masm.as_gssq(f26, f27, StackPointer, offsetof(EnterJITRegs, f27));
+ masm.as_gssq(f28, f29, StackPointer, offsetof(EnterJITRegs, f29));
+ masm.as_gssq(f30, f31, StackPointer, offsetof(EnterJITRegs, f31));
+ return;
+ }
+
+ masm.as_sd(s0, StackPointer, offsetof(EnterJITRegs, s0));
+ masm.as_sd(s1, StackPointer, offsetof(EnterJITRegs, s1));
+ masm.as_sd(s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.as_sd(s3, StackPointer, offsetof(EnterJITRegs, s3));
+ masm.as_sd(s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.as_sd(s5, StackPointer, offsetof(EnterJITRegs, s5));
+ masm.as_sd(s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.as_sd(s7, StackPointer, offsetof(EnterJITRegs, s7));
+ masm.as_sd(fp, StackPointer, offsetof(EnterJITRegs, fp));
+ masm.as_sd(ra, StackPointer, offsetof(EnterJITRegs, ra));
+ masm.as_sd(a7, StackPointer, offsetof(EnterJITRegs, a7));
+
+ masm.as_sdc1(f24, StackPointer, offsetof(EnterJITRegs, f24));
+ masm.as_sdc1(f25, StackPointer, offsetof(EnterJITRegs, f25));
+ masm.as_sdc1(f26, StackPointer, offsetof(EnterJITRegs, f26));
+ masm.as_sdc1(f27, StackPointer, offsetof(EnterJITRegs, f27));
+ masm.as_sdc1(f28, StackPointer, offsetof(EnterJITRegs, f28));
+ masm.as_sdc1(f29, StackPointer, offsetof(EnterJITRegs, f29));
+ masm.as_sdc1(f30, StackPointer, offsetof(EnterJITRegs, f30));
+ masm.as_sdc1(f31, StackPointer, offsetof(EnterJITRegs, f31));
+}
+
+// Generates a trampoline for calling Jit compiled code from a C++ function.
+// The trampoline use the EnterJitCode signature, with the standard x64 fastcall
+// calling convention.
+void JitRuntime::generateEnterJIT(JSContext* cx, MacroAssembler& masm) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateEnterJIT");
+
+ enterJITOffset_ = startTrampolineCode(masm);
+
+ const Register reg_code = IntArgReg0;
+ const Register reg_argc = IntArgReg1;
+ const Register reg_argv = IntArgReg2;
+ const mozilla::DebugOnly<Register> reg_frame = IntArgReg3;
+ const Register reg_token = IntArgReg4;
+ const Register reg_chain = IntArgReg5;
+ const Register reg_values = IntArgReg6;
+ const Register reg_vp = IntArgReg7;
+
+ MOZ_ASSERT(OsrFrameReg == reg_frame);
+
+ GeneratePrologue(masm);
+
+ // Save stack pointer as baseline frame.
+ masm.movePtr(StackPointer, FramePointer);
+
+ // Load the number of actual arguments into s3.
+ masm.unboxInt32(Address(reg_vp, 0), s3);
+
+ /***************************************************************
+ Loop over argv vector, push arguments onto stack in reverse order
+ ***************************************************************/
+
+ // if we are constructing, that also needs to include newTarget
+ {
+ Label noNewTarget;
+ masm.branchTest32(Assembler::Zero, reg_token,
+ Imm32(CalleeToken_FunctionConstructing), &noNewTarget);
+
+ masm.add32(Imm32(1), reg_argc);
+
+ masm.bind(&noNewTarget);
+ }
+
+ // Make stack algined
+ masm.ma_and(s0, reg_argc, Imm32(1));
+ masm.ma_dsubu(s1, StackPointer, Imm32(sizeof(Value)));
+#ifdef MIPSR6
+ masm.as_selnez(s1, s1, s0);
+ masm.as_seleqz(StackPointer, StackPointer, s0);
+ masm.as_or(StackPointer, StackPointer, s1);
+#else
+ masm.as_movn(StackPointer, s1, s0);
+#endif
+
+ masm.as_dsll(s0, reg_argc, 3); // Value* argv
+ masm.addPtr(reg_argv, s0); // s0 = &argv[argc]
+
+ // Loop over arguments, copying them from an unknown buffer onto the Ion
+ // stack so they can be accessed from JIT'ed code.
+ Label header, footer;
+ // If there aren't any arguments, don't do anything
+ masm.ma_b(s0, reg_argv, &footer, Assembler::BelowOrEqual, ShortJump);
+ {
+ masm.bind(&header);
+
+ masm.subPtr(Imm32(sizeof(Value)), s0);
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+
+ ValueOperand value = ValueOperand(s6);
+ masm.loadValue(Address(s0, 0), value);
+ masm.storeValue(value, Address(StackPointer, 0));
+
+ masm.ma_b(s0, reg_argv, &header, Assembler::Above, ShortJump);
+ }
+ masm.bind(&footer);
+
+ masm.push(reg_token);
+ masm.pushFrameDescriptorForJitCall(FrameType::CppToJSJit, s3, s3);
+
+ CodeLabel returnLabel;
+ Label oomReturnLabel;
+ {
+ // Handle Interpreter -> Baseline OSR.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ MOZ_ASSERT(!regs.has(FramePointer));
+ regs.take(OsrFrameReg);
+ regs.take(reg_code);
+
+ Label notOsr;
+ masm.ma_b(OsrFrameReg, OsrFrameReg, &notOsr, Assembler::Zero, ShortJump);
+
+ Register numStackValues = reg_values;
+ regs.take(numStackValues);
+ Register scratch = regs.takeAny();
+
+ // Push return address.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.ma_li(scratch, &returnLabel);
+ masm.storePtr(scratch, Address(StackPointer, 0));
+
+ // Push previous frame pointer.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.storePtr(FramePointer, Address(StackPointer, 0));
+
+ // Reserve frame.
+ Register framePtr = FramePointer;
+ masm.movePtr(StackPointer, framePtr);
+ masm.subPtr(Imm32(BaselineFrame::Size()), StackPointer);
+
+ Register framePtrScratch = regs.takeAny();
+ masm.movePtr(sp, framePtrScratch);
+
+ // Reserve space for locals and stack values.
+ masm.ma_dsll(scratch, numStackValues, Imm32(3));
+ masm.subPtr(scratch, StackPointer);
+
+ // Enter exit frame.
+ masm.reserveStack(3 * sizeof(uintptr_t));
+ masm.storePtr(
+ ImmWord(MakeFrameDescriptor(FrameType::BaselineJS)),
+ Address(StackPointer, 2 * sizeof(uintptr_t))); // Frame descriptor
+ masm.storePtr(
+ zero, Address(StackPointer, sizeof(uintptr_t))); // fake return address
+ masm.storePtr(FramePointer, Address(StackPointer, 0));
+
+ // No GC things to mark, push a bare token.
+ masm.loadJSContext(scratch);
+ masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
+
+ masm.reserveStack(2 * sizeof(uintptr_t));
+ masm.storePtr(framePtr,
+ Address(StackPointer, sizeof(uintptr_t))); // BaselineFrame
+ masm.storePtr(reg_code, Address(StackPointer, 0)); // jitcode
+
+ using Fn = bool (*)(BaselineFrame * frame, InterpreterFrame * interpFrame,
+ uint32_t numStackValues);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(framePtrScratch); // BaselineFrame
+ masm.passABIArg(OsrFrameReg); // InterpreterFrame
+ masm.passABIArg(numStackValues);
+ masm.callWithABI<Fn, jit::InitBaselineFrameForOsr>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ regs.add(OsrFrameReg);
+ Register jitcode = regs.takeAny();
+ masm.loadPtr(Address(StackPointer, 0), jitcode);
+ masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), framePtr);
+ masm.freeStack(2 * sizeof(uintptr_t));
+
+ Label error;
+ masm.freeStack(ExitFrameLayout::SizeWithFooter());
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
+ // if profiler instrumentation is enabled.
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(
+ cx->runtime()->geckoProfiler().addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ masm.profilerEnterFrame(framePtr, scratch);
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.jump(jitcode);
+
+ // OOM: load error value, discard return address and previous frame
+ // pointer and return.
+ masm.bind(&error);
+ masm.movePtr(framePtr, StackPointer);
+ masm.addPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.jump(&oomReturnLabel);
+
+ masm.bind(&notOsr);
+ // Load the scope chain in R1.
+ MOZ_ASSERT(R1.scratchReg() != reg_code);
+ masm.ma_move(R1.scratchReg(), reg_chain);
+ }
+
+ // The call will push the return address on the stack, thus we check that
+ // the stack would be aligned once the call is complete.
+ masm.assertStackAlignment(JitStackAlignment, 2 * sizeof(uintptr_t));
+
+ // Call the function with pushing return address to stack.
+ masm.callJitNoProfiler(reg_code);
+
+ {
+ // Interpreter -> Baseline OSR will return here.
+ masm.bind(&returnLabel);
+ masm.addCodeLabel(returnLabel);
+ masm.bind(&oomReturnLabel);
+ }
+
+ // Discard arguments and padding. Set sp to the address of the EnterJITRegs
+ // on the stack.
+ masm.mov(FramePointer, StackPointer);
+
+ // Store the returned value into the vp
+ masm.as_ld(reg_vp, StackPointer, offsetof(EnterJITRegs, a7));
+ masm.storeValue(JSReturnOperand, Address(reg_vp, 0));
+
+ // Restore non-volatile registers and return.
+ GenerateReturn(masm, ShortJump);
+}
+
+// static
+mozilla::Maybe<::JS::ProfilingFrameIterator::RegisterState>
+JitRuntime::getCppEntryRegisters(JitFrameLayout* frameStackAddress) {
+ // Not supported, or not implemented yet.
+ // TODO: Implement along with the corresponding stack-walker changes, in
+ // coordination with the Gecko Profiler, see bug 1635987 and follow-ups.
+ return mozilla::Nothing{};
+}
+
+void JitRuntime::generateInvalidator(MacroAssembler& masm, Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateInvalidator");
+
+ invalidatorOffset_ = startTrampolineCode(masm);
+
+ // Stack has to be alligned here. If not, we will have to fix it.
+ masm.checkStackAlignment();
+
+ // Push registers such that we can access them from [base + code].
+ masm.PushRegsInMask(AllRegs);
+
+ // Pass pointer to InvalidationBailoutStack structure.
+ masm.movePtr(StackPointer, a0);
+
+ // Reserve place for BailoutInfo pointer. Two words to ensure alignment for
+ // setupAlignedABICall.
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ // Pass pointer to BailoutInfo
+ masm.movePtr(StackPointer, a1);
+
+ using Fn =
+ bool (*)(InvalidationBailoutStack * sp, BaselineBailoutInfo * *info);
+ masm.setupAlignedABICall();
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI<Fn, InvalidationBailout>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
+
+ masm.pop(a2);
+
+ // Pop the machine state and the dead frame.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
+ masm.jump(bailoutTail);
+}
+
+void JitRuntime::generateArgumentsRectifier(MacroAssembler& masm,
+ ArgumentsRectifierKind kind) {
+ // Do not erase the frame pointer in this function.
+
+ AutoCreatedBy acb(masm, "JitRuntime::generateArgumentsRectifier");
+
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ argumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ trialInliningArgumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ }
+ masm.pushReturnAddress();
+
+ // Caller:
+ // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- sp
+
+ // Frame prologue.
+ //
+ // NOTE: if this changes, fix the Baseline bailout code too!
+ // See BaselineStackBuilder::calculatePrevFramePtr and
+ // BaselineStackBuilder::buildRectifierFrame (in BaselineBailouts.cpp).
+ masm.push(FramePointer);
+ masm.mov(StackPointer, FramePointer);
+
+ // Load argc.
+ masm.loadNumActualArgs(FramePointer, s3);
+
+ Register numActArgsReg = a6;
+ Register calleeTokenReg = a7;
+ Register numArgsReg = a5;
+
+ // Load |nformals| into numArgsReg.
+ masm.loadPtr(
+ Address(FramePointer, RectifierFrameLayout::offsetOfCalleeToken()),
+ calleeTokenReg);
+ masm.mov(calleeTokenReg, numArgsReg);
+ masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), numArgsReg);
+ masm.loadFunctionArgCount(numArgsReg, numArgsReg);
+
+ // Stash another copy in t3, since we are going to do destructive operations
+ // on numArgsReg
+ masm.mov(numArgsReg, t3);
+
+ static_assert(
+ CalleeToken_FunctionConstructing == 1,
+ "Ensure that we can use the constructing bit to count the value");
+ masm.mov(calleeTokenReg, t2);
+ masm.ma_and(t2, Imm32(uint32_t(CalleeToken_FunctionConstructing)));
+
+ // Including |this|, and |new.target|, there are (|nformals| + 1 +
+ // isConstructing) arguments to push to the stack. Then we push a
+ // JitFrameLayout. We compute the padding expressed in the number of extra
+ // |undefined| values to push on the stack.
+ static_assert(
+ sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+ static_assert(
+ JitStackAlignment % sizeof(Value) == 0,
+ "Ensure that we can pad the stack by pushing extra UndefinedValue");
+
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(JitStackValueAlignment));
+ masm.add32(
+ Imm32(JitStackValueAlignment - 1 /* for padding */ + 1 /* for |this| */),
+ numArgsReg);
+ masm.add32(t2, numArgsReg);
+ masm.and32(Imm32(~(JitStackValueAlignment - 1)), numArgsReg);
+
+ // Load the number of |undefined|s to push into t1. Subtract 1 for |this|.
+ masm.as_dsubu(t1, numArgsReg, s3);
+ masm.sub32(Imm32(1), t1);
+
+ // Caller:
+ // [arg2] [arg1] [this] [ [argc] [callee] [descr] [raddr] ] <- sp
+ // '--- s3 ----'
+ //
+ // Rectifier frame:
+ // [fp'][undef] [undef] [undef] [arg2] [arg1] [this] [ [argc] [callee]
+ // [descr] [raddr] ]
+ // '-------- t1 ---------' '--- s3 ----'
+
+ // Copy number of actual arguments into numActArgsReg
+ masm.mov(s3, numActArgsReg); // Save %sp.
+
+ masm.moveValue(UndefinedValue(), ValueOperand(t0));
+
+ // Push undefined. (including the padding)
+ {
+ Label undefLoopTop;
+
+ masm.bind(&undefLoopTop);
+ masm.sub32(Imm32(1), t1);
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.storeValue(ValueOperand(t0), Address(StackPointer, 0));
+
+ masm.ma_b(t1, t1, &undefLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // Get the topmost argument.
+ static_assert(sizeof(Value) == 8, "TimesEight is used to skip arguments");
+
+ // Get the topmost argument.
+ masm.ma_dsll(t0, s3, Imm32(3)); // t0 <- nargs * 8
+ masm.as_daddu(t1, FramePointer, t0); // t1 <- fp(saved sp) + nargs * 8
+ masm.addPtr(Imm32(sizeof(RectifierFrameLayout)), t1);
+
+ // Push arguments, |nargs| + 1 times (to include |this|).
+
+ masm.addPtr(Imm32(1), s3);
+ {
+ Label copyLoopTop;
+
+ masm.bind(&copyLoopTop);
+ masm.sub32(Imm32(1), s3);
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.loadValue(Address(t1, 0), ValueOperand(t0));
+ masm.storeValue(ValueOperand(t0), Address(StackPointer, 0));
+ masm.subPtr(Imm32(sizeof(Value)), t1);
+
+ masm.ma_b(s3, s3, &copyLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // if constructing, copy newTarget
+ {
+ Label notConstructing;
+
+ masm.branchTest32(Assembler::Zero, calleeTokenReg,
+ Imm32(CalleeToken_FunctionConstructing),
+ &notConstructing);
+
+ // thisFrame[numFormals] = prevFrame[argc]
+ ValueOperand newTarget(t0);
+
+ // Load vp[argc]. Add sizeof(Value) for |this|.
+ BaseIndex newTargetSrc(FramePointer, numActArgsReg, TimesEight,
+ sizeof(RectifierFrameLayout) + sizeof(Value));
+ masm.loadValue(newTargetSrc, newTarget);
+
+ // Again, 1 for |this|
+ BaseIndex newTargetDest(StackPointer, t3, TimesEight, sizeof(Value));
+ masm.storeValue(newTarget, newTargetDest);
+
+ masm.bind(&notConstructing);
+ }
+
+ // Caller:
+ // [arg2] [arg1] [this] [ [argc] [callee] [descr] [raddr] ]
+ //
+ //
+ // Rectifier frame:
+ // [fp'] <- fp [undef] [undef] [undef] [arg2] [arg1] [this] <- sp [ [argc]
+ // [callee] [descr] [raddr] ]
+
+ // Construct JitFrameLayout.
+ masm.push(calleeTokenReg);
+ masm.pushFrameDescriptorForJitCall(FrameType::Rectifier, numActArgsReg,
+ numActArgsReg);
+
+ // Call the target function.
+ masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), calleeTokenReg);
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ masm.loadJitCodeRaw(calleeTokenReg, t1);
+ argumentsRectifierReturnOffset_ = masm.callJitNoProfiler(t1);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ Label noBaselineScript, done;
+ masm.loadBaselineJitCodeRaw(calleeTokenReg, t1, &noBaselineScript);
+ masm.callJitNoProfiler(t1);
+ masm.jump(&done);
+
+ // See BaselineCacheIRCompiler::emitCallInlinedFunction.
+ masm.bind(&noBaselineScript);
+ masm.loadJitCodeRaw(calleeTokenReg, t1);
+ masm.callJitNoProfiler(t1);
+ masm.bind(&done);
+ break;
+ }
+
+ masm.mov(FramePointer, StackPointer);
+ masm.pop(FramePointer);
+ masm.ret();
+}
+
+/* - When bailout is done via out of line code (lazy bailout).
+ * Frame size is stored in $ra (look at
+ * CodeGeneratorMIPS64::generateOutOfLineCode()) and thunk code should save it
+ * on stack. Other difference is that members snapshotOffset_ and padding_ are
+ * pushed to the stack by CodeGeneratorMIPS64::visitOutOfLineBailout().
+ */
+static void PushBailoutFrame(MacroAssembler& masm, Register spArg) {
+ // Push the frameSize_ stored in ra
+ // See: CodeGeneratorMIPS64::generateOutOfLineCode()
+ masm.push(ra);
+
+ // Push registers such that we can access them from [base + code].
+ masm.PushRegsInMask(AllRegs);
+
+ // Put pointer to BailoutStack as first argument to the Bailout()
+ masm.movePtr(StackPointer, spArg);
+}
+
+static void GenerateBailoutThunk(MacroAssembler& masm, Label* bailoutTail) {
+ PushBailoutFrame(masm, a0);
+
+ // Put pointer to BailoutInfo
+ static const uint32_t sizeOfBailoutInfo = sizeof(uintptr_t) * 2;
+ masm.subPtr(Imm32(sizeOfBailoutInfo), StackPointer);
+ masm.movePtr(StackPointer, a1);
+
+ using Fn = bool (*)(BailoutStack * sp, BaselineBailoutInfo * *info);
+ masm.setupAlignedABICall();
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI<Fn, Bailout>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+
+ // Get BailoutInfo pointer
+ masm.loadPtr(Address(StackPointer, 0), a2);
+
+ // Remove both the bailout frame and the topmost Ion frame's stack.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in a2.
+ masm.jump(bailoutTail);
+}
+
+void JitRuntime::generateBailoutHandler(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutHandler");
+
+ bailoutHandlerOffset_ = startTrampolineCode(masm);
+
+ GenerateBailoutThunk(masm, bailoutTail);
+}
+
+bool JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm,
+ const VMFunctionData& f, DynFn nativeFun,
+ uint32_t* wrapperOffset) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateVMWrapper");
+
+ *wrapperOffset = startTrampolineCode(masm);
+
+ // Avoid conflicts with argument registers while discarding the result after
+ // the function call.
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ static_assert(
+ (Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0,
+ "Wrapper register set should be a superset of Volatile register set.");
+
+ // The context is the first argument; a0 is the first argument register.
+ Register cxreg = a0;
+ regs.take(cxreg);
+
+ // If it isn't a tail call, then the return address needs to be saved
+ if (f.expectTailCall == NonTailCall) {
+ masm.pushReturnAddress();
+ }
+
+ // Push the frame pointer to finish the exit frame, then link it up.
+ masm.Push(FramePointer);
+ masm.loadJSContext(cxreg);
+ masm.enterExitFrame(cxreg, regs.getAny(), &f);
+
+ // Save the base of the argument set stored on the stack.
+ Register argsBase = InvalidReg;
+ if (f.explicitArgs) {
+ argsBase = t1; // Use temporary register.
+ regs.take(argsBase);
+ masm.ma_daddu(argsBase, StackPointer,
+ Imm32(ExitFrameLayout::SizeWithFooter()));
+ }
+
+ // Reserve space for the outparameter.
+ Register outReg = InvalidReg;
+ switch (f.outParam) {
+ case Type_Value:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(Value));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Handle:
+ outReg = regs.takeAny();
+ masm.PushEmptyRooted(f.outParamRootType);
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Bool:
+ case Type_Int32:
+ outReg = regs.takeAny();
+ // Reserve 4-byte space to make stack aligned to 8-byte.
+ masm.reserveStack(2 * sizeof(int32_t));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Pointer:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(uintptr_t));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ case Type_Double:
+ outReg = regs.takeAny();
+ masm.reserveStack(sizeof(double));
+ masm.movePtr(StackPointer, outReg);
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ masm.setupUnalignedABICall(regs.getAny());
+ masm.passABIArg(cxreg);
+
+ size_t argDisp = 0;
+
+ // Copy any arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ switch (f.argProperties(explicitArg)) {
+ case VMFunctionData::WordByValue:
+ if (f.argPassedInFloatReg(explicitArg)) {
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE);
+ } else {
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ }
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::WordByRef:
+ masm.passABIArg(
+ MoveOperand(argsBase, argDisp, MoveOperand::Kind::EffectiveAddress),
+ MoveOp::GENERAL);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::DoubleByValue:
+ case VMFunctionData::DoubleByRef:
+ MOZ_CRASH("NYI: MIPS64 callVM should not be used with 128bits values.");
+ break;
+ }
+ }
+
+ // Copy the implicit outparam, if any.
+ if (InvalidReg != outReg) {
+ masm.passABIArg(outReg);
+ }
+
+ masm.callWithABI(nativeFun, MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Cell:
+ masm.branchTestPtr(Assembler::Zero, v0, v0, masm.failureLabel());
+ break;
+ case Type_Bool:
+ // Called functions return bools, which are 0/false and non-zero/true
+ masm.branchIfFalseBool(v0, masm.failureLabel());
+ break;
+ case Type_Void:
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ // Load the outparam and free any allocated stack.
+ switch (f.outParam) {
+ case Type_Handle:
+ masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
+ break;
+
+ case Type_Value:
+ masm.loadValue(Address(StackPointer, 0), JSReturnOperand);
+ masm.freeStack(sizeof(Value));
+ break;
+
+ case Type_Int32:
+ masm.load32(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(2 * sizeof(int32_t));
+ break;
+
+ case Type_Pointer:
+ masm.loadPtr(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ case Type_Bool:
+ masm.load8ZeroExtend(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(2 * sizeof(int32_t));
+ break;
+
+ case Type_Double:
+ masm.as_ldc1(ReturnDoubleReg, StackPointer, 0);
+ masm.freeStack(sizeof(double));
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ // Pop ExitFooterFrame and the frame pointer.
+ masm.leaveExitFrame(sizeof(void*));
+
+ // Return. Subtract sizeof(void*) for the frame pointer.
+ masm.retn(Imm32(sizeof(ExitFrameLayout) - sizeof(void*) +
+ f.explicitStackSlots() * sizeof(void*) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ return true;
+}
+
+uint32_t JitRuntime::generatePreBarrier(JSContext* cx, MacroAssembler& masm,
+ MIRType type) {
+ AutoCreatedBy acb(masm, "JitRuntime::generatePreBarrier");
+
+ uint32_t offset = startTrampolineCode(masm);
+
+ MOZ_ASSERT(PreBarrierReg == a1);
+ Register temp1 = a0;
+ Register temp2 = a2;
+ Register temp3 = a3;
+ masm.push(temp1);
+ masm.push(temp2);
+ masm.push(temp3);
+
+ Label noBarrier;
+ masm.emitPreBarrierFastPath(cx->runtime(), type, temp1, temp2, temp3,
+ &noBarrier);
+
+ // Call into C++ to mark this GC thing.
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+
+ LiveRegisterSet save;
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileMask));
+ save.add(ra);
+ masm.PushRegsInMask(save);
+
+ masm.movePtr(ImmPtr(cx->runtime()), a0);
+
+ masm.setupUnalignedABICall(a2);
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI(JitPreWriteBarrier(type));
+
+ save.take(AnyRegister(ra));
+ masm.PopRegsInMask(save);
+ masm.ret();
+
+ masm.bind(&noBarrier);
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+ masm.abiret();
+
+ return offset;
+}
+
+void JitRuntime::generateBailoutTailStub(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutTailStub");
+
+ masm.bind(bailoutTail);
+ masm.generateBailoutTail(a1, a2);
+}