summaryrefslogtreecommitdiffstats
path: root/js/src/jit/mips32
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
commit2aa4a82499d4becd2284cdb482213d541b8804dd (patch)
treeb80bf8bf13c3766139fbacc530efd0dd9d54394c /js/src/jit/mips32
parentInitial commit. (diff)
downloadfirefox-upstream.tar.xz
firefox-upstream.zip
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--js/src/jit/mips32/Architecture-mips32.cpp94
-rw-r--r--js/src/jit/mips32/Architecture-mips32.h286
-rw-r--r--js/src/jit/mips32/Assembler-mips32.cpp369
-rw-r--r--js/src/jit/mips32/Assembler-mips32.h261
-rw-r--r--js/src/jit/mips32/Bailouts-mips32.cpp57
-rw-r--r--js/src/jit/mips32/Bailouts-mips32.h71
-rw-r--r--js/src/jit/mips32/CodeGenerator-mips32.cpp508
-rw-r--r--js/src/jit/mips32/CodeGenerator-mips32.h60
-rw-r--r--js/src/jit/mips32/LIR-mips32.h197
-rw-r--r--js/src/jit/mips32/Lowering-mips32.cpp257
-rw-r--r--js/src/jit/mips32/Lowering-mips32.h54
-rw-r--r--js/src/jit/mips32/MacroAssembler-mips32-inl.h960
-rw-r--r--js/src/jit/mips32/MacroAssembler-mips32.cpp2787
-rw-r--r--js/src/jit/mips32/MacroAssembler-mips32.h802
-rw-r--r--js/src/jit/mips32/MoveEmitter-mips32.cpp152
-rw-r--r--js/src/jit/mips32/MoveEmitter-mips32.h31
-rw-r--r--js/src/jit/mips32/SharedICRegisters-mips32.h48
-rw-r--r--js/src/jit/mips32/Simulator-mips32.cpp3631
-rw-r--r--js/src/jit/mips32/Simulator-mips32.h526
-rw-r--r--js/src/jit/mips32/Trampoline-mips32.cpp1345
20 files changed, 12496 insertions, 0 deletions
diff --git a/js/src/jit/mips32/Architecture-mips32.cpp b/js/src/jit/mips32/Architecture-mips32.cpp
new file mode 100644
index 0000000000..598551eafe
--- /dev/null
+++ b/js/src/jit/mips32/Architecture-mips32.cpp
@@ -0,0 +1,94 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/Architecture-mips32.h"
+
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+const char* const Registers::RegNames[] = {
+ "zero", "at", "v0", "v1", "a0", "a1", "a2", "a3", "t0", "t1", "t2",
+ "t3", "t4", "t5", "t6", "t7", "s0", "s1", "s2", "s3", "s4", "s5",
+ "s6", "s7", "t8", "t9", "k0", "k1", "gp", "sp", "fp", "ra"};
+
+const uint32_t Allocatable = 14;
+
+const Registers::SetType Registers::ArgRegMask = Registers::SharedArgRegMask;
+
+const Registers::SetType Registers::JSCallMask =
+ (1 << Registers::a2) | (1 << Registers::a3);
+
+const Registers::SetType Registers::CallMask =
+ (1 << Registers::v0) |
+ (1 << Registers::v1); // used for double-size returns
+
+FloatRegisters::Encoding FloatRegisters::FromName(const char* name) {
+ for (size_t i = 0; i < RegisterIdLimit; i++) {
+ if (strcmp(GetName(i), name) == 0) {
+ return Encoding(i);
+ }
+ }
+
+ return Invalid;
+}
+
+FloatRegister FloatRegister::doubleOverlay() const {
+ MOZ_ASSERT(isNotOdd());
+ if (isSingle()) {
+ return FloatRegister(code_, Double);
+ }
+ return *this;
+}
+
+FloatRegister FloatRegister::singleOverlay() const {
+ MOZ_ASSERT(isNotOdd());
+ if (isDouble()) {
+ return FloatRegister(code_, Single);
+ }
+ return *this;
+}
+
+FloatRegisterSet FloatRegister::ReduceSetForPush(const FloatRegisterSet& s) {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ LiveFloatRegisterSet mod;
+ for (FloatRegisterIterator iter(s); iter.more(); ++iter) {
+ // Even for single size registers save complete double register.
+ mod.addUnchecked((*iter).doubleOverlay());
+ }
+ return mod.set();
+}
+
+uint32_t FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s) {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ FloatRegisterSet ss = s.reduceSetForPush();
+ uint64_t bits = ss.bits();
+ // We are only pushing double registers.
+ MOZ_ASSERT((bits & 0xFFFF) == 0);
+ uint32_t ret = mozilla::CountPopulation32(bits) * sizeof(double);
+
+ // Additional space needed by MacroAssembler::PushRegsInMask to ensure
+ // correct alignment of double values.
+ if (ret) {
+ ret += sizeof(double);
+ }
+
+ return ret;
+}
+uint32_t FloatRegister::getRegisterDumpOffsetInBytes() {
+ MOZ_ASSERT(isNotOdd());
+ return id() * sizeof(float);
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/mips32/Architecture-mips32.h b/js/src/jit/mips32/Architecture-mips32.h
new file mode 100644
index 0000000000..b8b3869adc
--- /dev/null
+++ b/js/src/jit/mips32/Architecture-mips32.h
@@ -0,0 +1,286 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_Architecture_mips32_h
+#define jit_mips32_Architecture_mips32_h
+
+#include "mozilla/EndianUtils.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <limits.h>
+#include <stdint.h>
+
+#include "jit/mips-shared/Architecture-mips-shared.h"
+
+#include "js/Utility.h"
+
+namespace js {
+namespace jit {
+
+static const uint32_t ShadowStackSpace = 4 * sizeof(uintptr_t);
+
+// These offsets are specific to nunboxing, and capture offsets into the
+// components of a js::Value.
+// Size of MIPS32 general purpose registers is 32 bits.
+#if MOZ_LITTLE_ENDIAN()
+static const int32_t NUNBOX32_TYPE_OFFSET = 4;
+static const int32_t NUNBOX32_PAYLOAD_OFFSET = 0;
+#else
+static const int32_t NUNBOX32_TYPE_OFFSET = 0;
+static const int32_t NUNBOX32_PAYLOAD_OFFSET = 4;
+#endif
+
+// Size of each bailout table entry.
+// For MIPS this is 2 instructions relative call.
+static const uint32_t BAILOUT_TABLE_ENTRY_SIZE = 2 * sizeof(void*);
+
+// MIPS32 can have two types of floating-point coprocessors modes:
+// - FR=0 mode/ 32-bit FPRs - Historical default, there are 32 single
+// precision registers and pairs of even and odd float registers are used as
+// double precision registers. Example: f0 (double) is composed of
+// f0 and f1 (single). Loongson3A FPU running in this mode doesn't allow
+// use of odd registers for single precision arithmetic.
+// - FR=1 mode/ 64-bit FPRs - In this case, there are 32 double precision
+// register which can also be used as single precision registers. More info
+// https://dmz-portal.imgtec.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking
+
+// Currently we enable 16 even single precision registers which can be also can
+// be used as double precision registers. It enables jit code to run even on
+// Loongson3A. It does not support FR=1 mode because MacroAssembler threats odd
+// single precision registers as high parts of even double precision registers.
+#ifdef __mips_fpr
+static_assert(__mips_fpr == 32, "MIPS32 jit only supports FR=0 fpu mode.");
+#endif
+
+class FloatRegisters : public FloatRegistersMIPSShared {
+ public:
+ static const char* GetName(uint32_t i) {
+ MOZ_ASSERT(i < RegisterIdLimit);
+ return FloatRegistersMIPSShared::GetName(Encoding(i % 32));
+ }
+
+ static Encoding FromName(const char* name);
+
+ static const uint32_t Total = 32;
+ static const uint32_t TotalDouble = 16;
+ static const uint32_t TotalSingle = 16;
+
+ static const uint32_t Allocatable = 30;
+ static const SetType AllSingleMask = (1ULL << TotalSingle) - 1;
+
+ static const SetType AllDoubleMask = ((1ULL << TotalDouble) - 1)
+ << TotalSingle;
+ static const SetType AllMask = AllDoubleMask | AllSingleMask;
+
+ // When saving all registers we only need to do is save double registers.
+ static const uint32_t TotalPhys = 16;
+ static const uint32_t RegisterIdLimit = 32;
+
+ static_assert(sizeof(SetType) * 8 >= Total,
+ "SetType should be large enough to enumerate all registers.");
+
+ static const SetType NonVolatileMask =
+ ((SetType(1) << (FloatRegisters::f20 >> 1)) |
+ (SetType(1) << (FloatRegisters::f22 >> 1)) |
+ (SetType(1) << (FloatRegisters::f24 >> 1)) |
+ (SetType(1) << (FloatRegisters::f26 >> 1)) |
+ (SetType(1) << (FloatRegisters::f28 >> 1)) |
+ (SetType(1) << (FloatRegisters::f30 >> 1))) *
+ ((1 << TotalSingle) + 1);
+
+ static const SetType VolatileMask = AllMask & ~NonVolatileMask;
+
+ static const SetType WrapperMask = VolatileMask;
+
+ static const SetType NonAllocatableMask =
+ (SetType(1) << (FloatRegisters::f18 >> 1)) * ((1 << TotalSingle) + 1);
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+};
+
+class FloatRegister : public FloatRegisterMIPSShared {
+ public:
+ enum RegType {
+ Single = 0x0,
+ Double = 0x1,
+ };
+
+ typedef FloatRegisters Codes;
+ typedef Codes::Code Code;
+ typedef Codes::Encoding Encoding;
+
+ Encoding code_ : 6;
+
+ protected:
+ RegType kind_ : 1;
+
+ public:
+ constexpr FloatRegister(uint32_t code, RegType kind = Double)
+ : code_(Encoding(code)), kind_(kind) {}
+ constexpr FloatRegister()
+ : code_(FloatRegisters::invalid_freg), kind_(Double) {}
+
+ bool operator==(const FloatRegister& other) const {
+ MOZ_ASSERT(!isInvalid());
+ MOZ_ASSERT(!other.isInvalid());
+ return kind_ == other.kind_ && code_ == other.code_;
+ }
+ bool equiv(const FloatRegister& other) const { return other.kind_ == kind_; }
+ size_t size() const { return (kind_ == Double) ? 8 : 4; }
+ size_t pushSize() const { return size(); }
+
+ bool isNotOdd() const { return !isInvalid() && ((code_ & 1) == 0); }
+
+ bool isSingle() const { return kind_ == Single; }
+ bool isDouble() const { return kind_ == Double; }
+ bool isInvalid() const { return code_ == FloatRegisters::invalid_freg; }
+ bool isSimd128() const { return false; }
+
+ FloatRegister doubleOverlay() const;
+ FloatRegister singleOverlay() const;
+
+ FloatRegister asSingle() const { return singleOverlay(); }
+ FloatRegister asDouble() const { return doubleOverlay(); }
+ FloatRegister asSimd128() const { MOZ_CRASH("NYI"); }
+
+ Code code() const {
+ MOZ_ASSERT(isNotOdd());
+ return Code((code_ >> 1) | (kind_ << 4));
+ }
+ Encoding encoding() const {
+ MOZ_ASSERT(!isInvalid());
+ return code_;
+ }
+ uint32_t id() const {
+ MOZ_ASSERT(!isInvalid());
+ return code_;
+ }
+ static FloatRegister FromCode(uint32_t i) {
+ uint32_t code = i & 15;
+ uint32_t kind = i >> 4;
+ return FloatRegister(Encoding(code << 1), RegType(kind));
+ }
+
+ static FloatRegister FromIndex(uint32_t index, RegType kind) {
+ MOZ_ASSERT(index < 16);
+ return FloatRegister(Encoding(index << 1), kind);
+ }
+
+ bool volatile_() const {
+ return !!((SetType(1) << code()) & FloatRegisters::VolatileMask);
+ }
+ const char* name() const { return FloatRegisters::GetName(code_); }
+ bool operator!=(const FloatRegister& other) const {
+ return other.kind_ != kind_ || code_ != other.code_;
+ }
+ bool aliases(const FloatRegister& other) {
+ MOZ_ASSERT(isNotOdd());
+ return code_ == other.code_;
+ }
+ uint32_t numAliased() const {
+ MOZ_ASSERT(isNotOdd());
+ return 2;
+ }
+ FloatRegister aliased(uint32_t aliasIdx) {
+ MOZ_ASSERT(isNotOdd());
+
+ if (aliasIdx == 0) {
+ return *this;
+ }
+ MOZ_ASSERT(aliasIdx == 1);
+ if (isDouble()) {
+ return singleOverlay();
+ }
+ return doubleOverlay();
+ }
+ uint32_t numAlignedAliased() const {
+ MOZ_ASSERT(isNotOdd());
+ return 2;
+ }
+ FloatRegister alignedAliased(uint32_t aliasIdx) {
+ MOZ_ASSERT(isNotOdd());
+
+ if (aliasIdx == 0) {
+ return *this;
+ }
+ MOZ_ASSERT(aliasIdx == 1);
+ if (isDouble()) {
+ return singleOverlay();
+ }
+ return doubleOverlay();
+ }
+
+ SetType alignedOrDominatedAliasedSet() const {
+ MOZ_ASSERT(isNotOdd());
+ return (SetType(1) << (code_ >> 1)) *
+ ((1 << FloatRegisters::TotalSingle) + 1);
+ }
+
+ static constexpr RegTypeName DefaultType = RegTypeName::Float64;
+
+ template <RegTypeName = DefaultType>
+ static SetType LiveAsIndexableSet(SetType s) {
+ return SetType(0);
+ }
+
+ template <RegTypeName Name = DefaultType>
+ static SetType AllocatableAsIndexableSet(SetType s) {
+ static_assert(Name != RegTypeName::Any, "Allocatable set are not iterable");
+ return LiveAsIndexableSet<Name>(s);
+ }
+
+ static Code FromName(const char* name) {
+ return FloatRegisters::FromName(name);
+ }
+ static TypedRegisterSet<FloatRegister> ReduceSetForPush(
+ const TypedRegisterSet<FloatRegister>& s);
+ static uint32_t GetPushSizeInBytes(const TypedRegisterSet<FloatRegister>& s);
+ uint32_t getRegisterDumpOffsetInBytes();
+};
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Float32>(SetType set) {
+ return set & FloatRegisters::AllSingleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Float64>(SetType set) {
+ return set & FloatRegisters::AllDoubleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Any>(SetType set) {
+ return set;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::AllocatableAsIndexableSet<RegTypeName::Float32>(SetType set) {
+ // Single registers are not dominating any smaller registers, thus masking
+ // is enough to convert an allocatable set into a set of register list all
+ // single register available.
+ return set & FloatRegisters::AllSingleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::AllocatableAsIndexableSet<RegTypeName::Float64>(SetType set) {
+ return set & FloatRegisters::AllDoubleMask;
+}
+
+// In order to handle functions such as int(*)(int, double) where the first
+// argument is a general purpose register, and the second argument is a floating
+// point register, we have to store the double content into 2 general purpose
+// registers, namely a2 and a3.
+#define JS_CODEGEN_REGISTER_PAIR 1
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_Architecture_mips32_h */
diff --git a/js/src/jit/mips32/Assembler-mips32.cpp b/js/src/jit/mips32/Assembler-mips32.cpp
new file mode 100644
index 0000000000..8073b8e4ec
--- /dev/null
+++ b/js/src/jit/mips32/Assembler-mips32.cpp
@@ -0,0 +1,369 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/Assembler-mips32.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Maybe.h"
+
+#include "jit/AutoWritableJitCode.h"
+
+using mozilla::DebugOnly;
+
+using namespace js;
+using namespace js::jit;
+
+ABIArgGenerator::ABIArgGenerator()
+ : usedArgSlots_(0),
+ firstArgFloatSize_(0),
+ useGPRForFloats_(false),
+ current_() {}
+
+ABIArg ABIArgGenerator::next(MIRType type) {
+ Register destReg;
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Pointer:
+ case MIRType::RefOrNull:
+ case MIRType::StackResults:
+ if (GetIntArgReg(usedArgSlots_, &destReg)) {
+ current_ = ABIArg(destReg);
+ } else {
+ current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
+ }
+ usedArgSlots_++;
+ break;
+ case MIRType::Int64:
+ if (!usedArgSlots_) {
+ current_ = ABIArg(a0, a1);
+ usedArgSlots_ = 2;
+ } else if (usedArgSlots_ <= 2) {
+ current_ = ABIArg(a2, a3);
+ usedArgSlots_ = 4;
+ } else {
+ if (usedArgSlots_ < NumIntArgRegs) {
+ usedArgSlots_ = NumIntArgRegs;
+ }
+ usedArgSlots_ += usedArgSlots_ % 2;
+ current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
+ usedArgSlots_ += 2;
+ }
+ break;
+ case MIRType::Float32:
+ if (!usedArgSlots_) {
+ current_ = ABIArg(f12.asSingle());
+ firstArgFloatSize_ = 1;
+ } else if (usedArgSlots_ == firstArgFloatSize_) {
+ current_ = ABIArg(f14.asSingle());
+ } else if (useGPRForFloats_ && GetIntArgReg(usedArgSlots_, &destReg)) {
+ current_ = ABIArg(destReg);
+ } else {
+ if (usedArgSlots_ < NumIntArgRegs) {
+ usedArgSlots_ = NumIntArgRegs;
+ }
+ current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
+ }
+ usedArgSlots_++;
+ break;
+ case MIRType::Double:
+ if (!usedArgSlots_) {
+ current_ = ABIArg(f12);
+ usedArgSlots_ = 2;
+ firstArgFloatSize_ = 2;
+ } else if (usedArgSlots_ == firstArgFloatSize_) {
+ current_ = ABIArg(f14);
+ usedArgSlots_ = 4;
+ } else if (useGPRForFloats_ && usedArgSlots_ <= 2) {
+ current_ = ABIArg(a2, a3);
+ usedArgSlots_ = 4;
+ } else {
+ if (usedArgSlots_ < NumIntArgRegs) {
+ usedArgSlots_ = NumIntArgRegs;
+ }
+ usedArgSlots_ += usedArgSlots_ % 2;
+ current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
+ usedArgSlots_ += 2;
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+}
+
+uint32_t js::jit::RT(FloatRegister r) {
+ MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit);
+ return r.id() << RTShift;
+}
+
+uint32_t js::jit::RD(FloatRegister r) {
+ MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit);
+ return r.id() << RDShift;
+}
+
+uint32_t js::jit::RZ(FloatRegister r) {
+ MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit);
+ return r.id() << RZShift;
+}
+
+uint32_t js::jit::SA(FloatRegister r) {
+ MOZ_ASSERT(r.id() < FloatRegisters::RegisterIdLimit);
+ return r.id() << SAShift;
+}
+
+void Assembler::executableCopy(uint8_t* buffer) {
+ MOZ_ASSERT(isFinished);
+ m_buffer.executableCopy(buffer);
+}
+
+uintptr_t Assembler::GetPointer(uint8_t* instPtr) {
+ Instruction* inst = (Instruction*)instPtr;
+ return Assembler::ExtractLuiOriValue(inst, inst->next());
+}
+
+static JitCode* CodeFromJump(Instruction* jump) {
+ uint8_t* target = (uint8_t*)Assembler::ExtractLuiOriValue(jump, jump->next());
+ return JitCode::FromExecutable(target);
+}
+
+void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ while (reader.more()) {
+ JitCode* child =
+ CodeFromJump((Instruction*)(code->raw() + reader.readUnsigned()));
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ }
+}
+
+static void TraceOneDataRelocation(JSTracer* trc,
+ mozilla::Maybe<AutoWritableJitCode>& awjc,
+ JitCode* code, Instruction* inst) {
+ void* ptr = (void*)Assembler::ExtractLuiOriValue(inst, inst->next());
+ void* prior = ptr;
+
+ // No barrier needed since these are constants.
+ TraceManuallyBarrieredGenericPointerEdge(
+ trc, reinterpret_cast<gc::Cell**>(&ptr), "jit-masm-ptr");
+ if (ptr != prior) {
+ if (awjc.isNothing()) {
+ awjc.emplace(code);
+ }
+ AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(), uint32_t(ptr));
+ }
+}
+
+/* static */
+void Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ mozilla::Maybe<AutoWritableJitCode> awjc;
+ while (reader.more()) {
+ size_t offset = reader.readUnsigned();
+ Instruction* inst = (Instruction*)(code->raw() + offset);
+ TraceOneDataRelocation(trc, awjc, code, inst);
+ }
+}
+
+Assembler::Condition Assembler::UnsignedCondition(Condition cond) {
+ switch (cond) {
+ case Zero:
+ case NonZero:
+ return cond;
+ case LessThan:
+ case Below:
+ return Below;
+ case LessThanOrEqual:
+ case BelowOrEqual:
+ return BelowOrEqual;
+ case GreaterThan:
+ case Above:
+ return Above;
+ case AboveOrEqual:
+ case GreaterThanOrEqual:
+ return AboveOrEqual;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+Assembler::Condition Assembler::ConditionWithoutEqual(Condition cond) {
+ switch (cond) {
+ case LessThan:
+ case LessThanOrEqual:
+ return LessThan;
+ case Below:
+ case BelowOrEqual:
+ return Below;
+ case GreaterThan:
+ case GreaterThanOrEqual:
+ return GreaterThan;
+ case Above:
+ case AboveOrEqual:
+ return Above;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void Assembler::Bind(uint8_t* rawCode, const CodeLabel& label) {
+ if (label.patchAt().bound()) {
+ auto mode = label.linkMode();
+ intptr_t offset = label.patchAt().offset();
+ intptr_t target = label.target().offset();
+
+ if (mode == CodeLabel::RawPointer) {
+ *reinterpret_cast<const void**>(rawCode + offset) = rawCode + target;
+ } else {
+ MOZ_ASSERT(mode == CodeLabel::MoveImmediate ||
+ mode == CodeLabel::JumpImmediate);
+ Instruction* inst = (Instruction*)(rawCode + offset);
+ AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(),
+ (uint32_t)(rawCode + target));
+ }
+ }
+}
+
+void Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target) {
+ int32_t offset = target - branch;
+ InstImm inst_bgezal = InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+ // If encoded offset is 4, then the jump must be short
+ if (BOffImm16(inst[0]).decode() == 4) {
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop();
+ return;
+ }
+
+ // Generate the long jump for calls because return address has to be the
+ // address after the reserved block.
+ if (inst[0].encode() == inst_bgezal.encode()) {
+ addLongJump(BufferOffset(branch), BufferOffset(target));
+ Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister,
+ LabelBase::INVALID_OFFSET);
+ inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
+ // There is 1 nop after this.
+ return;
+ }
+
+ if (BOffImm16::IsInRange(offset)) {
+ bool conditional = (inst[0].encode() != inst_bgezal.encode() &&
+ inst[0].encode() != inst_beq.encode());
+
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop();
+
+ // Skip the trailing nops in conditional branches.
+ if (conditional) {
+ inst[2] = InstImm(op_regimm, zero, rt_bgez, BOffImm16(3 * sizeof(void*)))
+ .encode();
+ // There are 2 nops after this
+ }
+ return;
+ }
+
+ if (inst[0].encode() == inst_beq.encode()) {
+ // Handle long unconditional jump.
+ addLongJump(BufferOffset(branch), BufferOffset(target));
+ Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister,
+ LabelBase::INVALID_OFFSET);
+ inst[2] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ // There is 1 nop after this.
+ } else {
+ // Handle long conditional jump.
+ inst[0] = invertBranch(inst[0], BOffImm16(5 * sizeof(void*)));
+ // No need for a "nop" here because we can clobber scratch.
+ addLongJump(BufferOffset(branch + sizeof(void*)), BufferOffset(target));
+ Assembler::WriteLuiOriInstructions(&inst[1], &inst[2], ScratchRegister,
+ LabelBase::INVALID_OFFSET);
+ inst[3] = InstReg(op_special, ScratchRegister, zero, zero, ff_jr).encode();
+ // There is 1 nop after this.
+ }
+}
+
+void Assembler::processCodeLabels(uint8_t* rawCode) {
+ for (const CodeLabel& label : codeLabels_) {
+ Bind(rawCode, label);
+ }
+}
+
+uint32_t Assembler::PatchWrite_NearCallSize() { return 4 * sizeof(uint32_t); }
+
+void Assembler::PatchWrite_NearCall(CodeLocationLabel start,
+ CodeLocationLabel toCall) {
+ Instruction* inst = (Instruction*)start.raw();
+ uint8_t* dest = toCall.raw();
+
+ // Overwrite whatever instruction used to be here with a call.
+ // Always use long jump for two reasons:
+ // - Jump has to be the same size because of PatchWrite_NearCallSize.
+ // - Return address has to be at the end of replaced block.
+ // Short jump wouldn't be more efficient.
+ Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister,
+ (uint32_t)dest);
+ inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+ inst[3] = InstNOP();
+}
+
+uint32_t Assembler::ExtractLuiOriValue(Instruction* inst0, Instruction* inst1) {
+ InstImm* i0 = (InstImm*)inst0;
+ InstImm* i1 = (InstImm*)inst1;
+ MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ uint32_t value = i0->extractImm16Value() << 16;
+ value = value | i1->extractImm16Value();
+ return value;
+}
+
+void Assembler::WriteLuiOriInstructions(Instruction* inst0, Instruction* inst1,
+ Register reg, uint32_t value) {
+ *inst0 = InstImm(op_lui, zero, reg, Imm16::Upper(Imm32(value)));
+ *inst1 = InstImm(op_ori, reg, reg, Imm16::Lower(Imm32(value)));
+}
+
+void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
+ ImmPtr newValue, ImmPtr expectedValue) {
+ PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
+ PatchedImmPtr(expectedValue.value));
+}
+
+void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
+ PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue) {
+ Instruction* inst = (Instruction*)label.raw();
+
+ // Extract old Value
+ DebugOnly<uint32_t> value = Assembler::ExtractLuiOriValue(&inst[0], &inst[1]);
+ MOZ_ASSERT(value == uint32_t(expectedValue.value));
+
+ // Replace with new value
+ AssemblerMIPSShared::UpdateLuiOriValue(inst, inst->next(),
+ uint32_t(newValue.value));
+}
+
+uint32_t Assembler::ExtractInstructionImmediate(uint8_t* code) {
+ InstImm* inst = (InstImm*)code;
+ return Assembler::ExtractLuiOriValue(inst, inst->next());
+}
+
+void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
+ Instruction* inst = (Instruction*)inst_.raw();
+ InstImm* i0 = (InstImm*)inst;
+ InstImm* i1 = (InstImm*)i0->next();
+ Instruction* i2 = (Instruction*)i1->next();
+
+ MOZ_ASSERT(i0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(i1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ if (enabled) {
+ InstReg jalr = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+ *i2 = jalr;
+ } else {
+ InstNOP nop;
+ *i2 = nop;
+ }
+}
diff --git a/js/src/jit/mips32/Assembler-mips32.h b/js/src/jit/mips32/Assembler-mips32.h
new file mode 100644
index 0000000000..e8523547e2
--- /dev/null
+++ b/js/src/jit/mips32/Assembler-mips32.h
@@ -0,0 +1,261 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_Assembler_mips32_h
+#define jit_mips32_Assembler_mips32_h
+
+#include <iterator>
+
+#include "jit/mips-shared/Assembler-mips-shared.h"
+
+#include "jit/mips32/Architecture-mips32.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register CallTempReg4 = t4;
+static constexpr Register CallTempReg5 = t5;
+
+static constexpr Register CallTempNonArgRegs[] = {t0, t1, t2, t3, t4};
+static const uint32_t NumCallTempNonArgRegs = std::size(CallTempNonArgRegs);
+
+class ABIArgGenerator {
+ unsigned usedArgSlots_;
+ unsigned firstArgFloatSize_;
+ // Note: This is not compliant with the system ABI. The Lowering phase
+ // expects to lower an MWasmParameter to only one register.
+ bool useGPRForFloats_;
+ ABIArg current_;
+
+ public:
+ ABIArgGenerator();
+ ABIArg next(MIRType argType);
+ ABIArg& current() { return current_; }
+
+ void enforceO32ABI() { useGPRForFloats_ = true; }
+
+ uint32_t stackBytesConsumedSoFar() const {
+ if (usedArgSlots_ <= 4) {
+ return ShadowStackSpace;
+ }
+
+ return usedArgSlots_ * sizeof(intptr_t);
+ }
+
+ void increaseStackOffset(uint32_t bytes) { MOZ_CRASH("NYI"); }
+};
+
+// These registers may be volatile or nonvolatile.
+static constexpr Register ABINonArgReg0 = t0;
+static constexpr Register ABINonArgReg1 = t1;
+static constexpr Register ABINonArgReg2 = t2;
+static constexpr Register ABINonArgReg3 = t3;
+
+// This register may be volatile or nonvolatile. Avoid f18 which is the
+// ScratchDoubleReg.
+static constexpr FloatRegister ABINonArgDoubleReg{FloatRegisters::f16,
+ FloatRegister::Double};
+
+// These registers may be volatile or nonvolatile.
+// Note: these three registers are all guaranteed to be different
+static constexpr Register ABINonArgReturnReg0 = t0;
+static constexpr Register ABINonArgReturnReg1 = t1;
+static constexpr Register ABINonVolatileReg = s0;
+
+// This register is guaranteed to be clobberable during the prologue and
+// epilogue of an ABI call which must preserve both ABI argument, return
+// and non-volatile registers.
+static constexpr Register ABINonArgReturnVolatileReg = t0;
+
+// TLS pointer argument register for WebAssembly functions. This must not alias
+// any other register used for passing function arguments or return values.
+// Preserved by WebAssembly functions.
+static constexpr Register WasmTlsReg = s5;
+
+// Registers used for asm.js/wasm table calls. These registers must be disjoint
+// from the ABI argument registers, WasmTlsReg and each other.
+static constexpr Register WasmTableCallScratchReg0 = ABINonArgReg0;
+static constexpr Register WasmTableCallScratchReg1 = ABINonArgReg1;
+static constexpr Register WasmTableCallSigReg = ABINonArgReg2;
+static constexpr Register WasmTableCallIndexReg = ABINonArgReg3;
+
+// Register used as a scratch along the return path in the fast js -> wasm stub
+// code. This must not overlap ReturnReg, JSReturnOperand, or WasmTlsReg. It
+// must be a volatile register.
+static constexpr Register WasmJitEntryReturnScratch = t1;
+
+static constexpr Register InterpreterPCReg = t5;
+
+static constexpr Register JSReturnReg_Type = a3;
+static constexpr Register JSReturnReg_Data = a2;
+static constexpr Register64 ReturnReg64(v1, v0);
+static constexpr FloatRegister ReturnFloat32Reg = {FloatRegisters::f0,
+ FloatRegister::Single};
+static constexpr FloatRegister ReturnDoubleReg = {FloatRegisters::f0,
+ FloatRegister::Double};
+static constexpr FloatRegister ScratchFloat32Reg = {FloatRegisters::f18,
+ FloatRegister::Single};
+static constexpr FloatRegister ScratchDoubleReg = {FloatRegisters::f18,
+ FloatRegister::Double};
+
+struct ScratchFloat32Scope : public AutoFloatRegisterScope {
+ explicit ScratchFloat32Scope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchFloat32Reg) {}
+};
+
+struct ScratchDoubleScope : public AutoFloatRegisterScope {
+ explicit ScratchDoubleScope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchDoubleReg) {}
+};
+
+static constexpr FloatRegister f0 = {FloatRegisters::f0, FloatRegister::Double};
+static constexpr FloatRegister f2 = {FloatRegisters::f2, FloatRegister::Double};
+static constexpr FloatRegister f4 = {FloatRegisters::f4, FloatRegister::Double};
+static constexpr FloatRegister f6 = {FloatRegisters::f6, FloatRegister::Double};
+static constexpr FloatRegister f8 = {FloatRegisters::f8, FloatRegister::Double};
+static constexpr FloatRegister f10 = {FloatRegisters::f10,
+ FloatRegister::Double};
+static constexpr FloatRegister f12 = {FloatRegisters::f12,
+ FloatRegister::Double};
+static constexpr FloatRegister f14 = {FloatRegisters::f14,
+ FloatRegister::Double};
+static constexpr FloatRegister f16 = {FloatRegisters::f16,
+ FloatRegister::Double};
+static constexpr FloatRegister f18 = {FloatRegisters::f18,
+ FloatRegister::Double};
+static constexpr FloatRegister f20 = {FloatRegisters::f20,
+ FloatRegister::Double};
+static constexpr FloatRegister f22 = {FloatRegisters::f22,
+ FloatRegister::Double};
+static constexpr FloatRegister f24 = {FloatRegisters::f24,
+ FloatRegister::Double};
+static constexpr FloatRegister f26 = {FloatRegisters::f26,
+ FloatRegister::Double};
+static constexpr FloatRegister f28 = {FloatRegisters::f28,
+ FloatRegister::Double};
+static constexpr FloatRegister f30 = {FloatRegisters::f30,
+ FloatRegister::Double};
+
+// MIPS CPUs can only load multibyte data that is "naturally"
+// four-byte-aligned, sp register should be eight-byte-aligned.
+static constexpr uint32_t ABIStackAlignment = 8;
+static constexpr uint32_t JitStackAlignment = 8;
+
+static constexpr uint32_t JitStackValueAlignment =
+ JitStackAlignment / sizeof(Value);
+static_assert(JitStackAlignment % sizeof(Value) == 0 &&
+ JitStackValueAlignment >= 1,
+ "Stack alignment should be a non-zero multiple of sizeof(Value)");
+
+// TODO this is just a filler to prevent a build failure. The MIPS SIMD
+// alignment requirements still need to be explored.
+// TODO Copy the static_asserts from x64/x86 assembler files.
+static constexpr uint32_t SimdMemoryAlignment = 8;
+static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment;
+static const uint32_t WasmTrapInstructionLength = 4;
+
+// The offsets are dynamically asserted during
+// code generation in the prologue/epilogue.
+static constexpr uint32_t WasmCheckedCallEntryOffset = 0u;
+static constexpr uint32_t WasmCheckedTailEntryOffset = 16u;
+
+static constexpr Scale ScalePointer = TimesFour;
+
+class Assembler : public AssemblerMIPSShared {
+ public:
+ Assembler() : AssemblerMIPSShared() {}
+
+ static Condition UnsignedCondition(Condition cond);
+ static Condition ConditionWithoutEqual(Condition cond);
+
+ static uintptr_t GetPointer(uint8_t*);
+
+ protected:
+ // This is used to access the odd register form the pair of single
+ // precision registers that make one double register.
+ FloatRegister getOddPair(FloatRegister reg) {
+ MOZ_ASSERT(reg.isDouble());
+ MOZ_ASSERT(reg.id() % 2 == 0);
+ FloatRegister odd(reg.id() | 1, FloatRegister::Single);
+ return odd;
+ }
+
+ public:
+ using AssemblerMIPSShared::bind;
+
+ static void Bind(uint8_t* rawCode, const CodeLabel& label);
+
+ void processCodeLabels(uint8_t* rawCode);
+
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+ static void TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+
+ void bind(InstImm* inst, uintptr_t branch, uintptr_t target);
+
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
+
+ static uint32_t PatchWrite_NearCallSize();
+
+ static uint32_t ExtractLuiOriValue(Instruction* inst0, Instruction* inst1);
+ static void WriteLuiOriInstructions(Instruction* inst, Instruction* inst1,
+ Register reg, uint32_t value);
+
+ static void PatchWrite_NearCall(CodeLocationLabel start,
+ CodeLocationLabel toCall);
+ static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
+ ImmPtr expectedValue);
+ static void PatchDataWithValueCheck(CodeLocationLabel label,
+ PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue);
+
+ static uint32_t ExtractInstructionImmediate(uint8_t* code);
+
+ static void ToggleCall(CodeLocationLabel inst_, bool enabled);
+}; // Assembler
+
+static const uint32_t NumIntArgRegs = 4;
+
+static inline bool GetIntArgReg(uint32_t usedArgSlots, Register* out) {
+ if (usedArgSlots < NumIntArgRegs) {
+ *out = Register::FromCode(a0.code() + usedArgSlots);
+ return true;
+ }
+ return false;
+}
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool GetTempRegForIntArg(uint32_t usedIntArgs,
+ uint32_t usedFloatArgs, Register* out) {
+ // NOTE: We can't properly determine which regs are used if there are
+ // float arguments. If this is needed, we will have to guess.
+ MOZ_ASSERT(usedFloatArgs == 0);
+
+ if (GetIntArgReg(usedIntArgs, out)) {
+ return true;
+ }
+ // Unfortunately, we have to assume things about the point at which
+ // GetIntArgReg returns false, because we need to know how many registers it
+ // can allocate.
+ usedIntArgs -= NumIntArgRegs;
+ if (usedIntArgs >= NumCallTempNonArgRegs) {
+ return false;
+ }
+ *out = CallTempNonArgRegs[usedIntArgs];
+ return true;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_Assembler_mips32_h */
diff --git a/js/src/jit/mips32/Bailouts-mips32.cpp b/js/src/jit/mips32/Bailouts-mips32.cpp
new file mode 100644
index 0000000000..ac0fd14bd1
--- /dev/null
+++ b/js/src/jit/mips32/Bailouts-mips32.cpp
@@ -0,0 +1,57 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/Bailouts-mips32.h"
+
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/ScriptFromCalleeToken.h"
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
+ BailoutStack* bailout)
+ : machine_(bailout->machine()) {
+ uint8_t* sp = bailout->parentStackPointer();
+ framePointer_ = sp + bailout->frameSize();
+ topFrameSize_ = framePointer_ - sp;
+
+ JSScript* script =
+ ScriptFromCalleeToken(((JitFrameLayout*)framePointer_)->calleeToken());
+ JitActivation* activation = activations.activation()->asJit();
+ topIonScript_ = script->ionScript();
+
+ attachOnJitActivation(activations);
+
+ if (bailout->frameClass() == FrameSizeClass::None()) {
+ snapshotOffset_ = bailout->snapshotOffset();
+ return;
+ }
+
+ // Compute the snapshot offset from the bailout ID.
+ JSRuntime* rt = activation->compartment()->runtimeFromMainThread();
+ TrampolinePtr code = rt->jitRuntime()->getBailoutTable(bailout->frameClass());
+#ifdef DEBUG
+ uint32_t tableSize =
+ rt->jitRuntime()->getBailoutTableSize(bailout->frameClass());
+#endif
+ uintptr_t tableOffset = bailout->tableOffset();
+ uintptr_t tableStart = reinterpret_cast<uintptr_t>(code.value);
+
+ MOZ_ASSERT(tableOffset >= tableStart && tableOffset < tableStart + tableSize);
+ MOZ_ASSERT((tableOffset - tableStart) % BAILOUT_TABLE_ENTRY_SIZE == 0);
+
+ uint32_t bailoutId =
+ ((tableOffset - tableStart) / BAILOUT_TABLE_ENTRY_SIZE) - 1;
+ MOZ_ASSERT(bailoutId < BAILOUT_TABLE_SIZE);
+
+ snapshotOffset_ = topIonScript_->bailoutToSnapshot(bailoutId);
+}
diff --git a/js/src/jit/mips32/Bailouts-mips32.h b/js/src/jit/mips32/Bailouts-mips32.h
new file mode 100644
index 0000000000..5d9d60a482
--- /dev/null
+++ b/js/src/jit/mips32/Bailouts-mips32.h
@@ -0,0 +1,71 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_Bailouts_mips32_h
+#define jit_mips32_Bailouts_mips32_h
+
+#include "jit/Bailouts.h"
+
+namespace js {
+namespace jit {
+
+class BailoutStack {
+ uintptr_t frameClassId_;
+ // This is pushed in the bailout handler. Both entry points into the
+ // handler inserts their own value int lr, which is then placed onto the
+ // stack along with frameClassId_ above. This should be migrated to ip.
+ public:
+ union {
+ uintptr_t frameSize_;
+ uintptr_t tableOffset_;
+ };
+
+ protected:
+ RegisterDump::FPUArray fpregs_;
+ RegisterDump::GPRArray regs_;
+
+ uintptr_t snapshotOffset_;
+ uintptr_t padding_;
+
+ public:
+ FrameSizeClass frameClass() const {
+ return FrameSizeClass::FromClass(frameClassId_);
+ }
+ uintptr_t tableOffset() const {
+ MOZ_ASSERT(frameClass() != FrameSizeClass::None());
+ return tableOffset_;
+ }
+ uint32_t frameSize() const {
+ if (frameClass() == FrameSizeClass::None()) {
+ return frameSize_;
+ }
+ return frameClass().frameSize();
+ }
+ MachineState machine() { return MachineState::FromBailout(regs_, fpregs_); }
+ SnapshotOffset snapshotOffset() const {
+ MOZ_ASSERT(frameClass() == FrameSizeClass::None());
+ return snapshotOffset_;
+ }
+ uint8_t* parentStackPointer() const {
+ if (frameClass() == FrameSizeClass::None()) {
+ return (uint8_t*)this + sizeof(BailoutStack);
+ }
+ return (uint8_t*)this + offsetof(BailoutStack, snapshotOffset_);
+ }
+ static size_t offsetOfFrameClass() {
+ return offsetof(BailoutStack, frameClassId_);
+ }
+ static size_t offsetOfFrameSize() {
+ return offsetof(BailoutStack, frameSize_);
+ }
+ static size_t offsetOfFpRegs() { return offsetof(BailoutStack, fpregs_); }
+ static size_t offsetOfRegs() { return offsetof(BailoutStack, regs_); }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_Bailouts_mips32_h */
diff --git a/js/src/jit/mips32/CodeGenerator-mips32.cpp b/js/src/jit/mips32/CodeGenerator-mips32.cpp
new file mode 100644
index 0000000000..2cee25a7a5
--- /dev/null
+++ b/js/src/jit/mips32/CodeGenerator-mips32.cpp
@@ -0,0 +1,508 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/CodeGenerator-mips32.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "js/Conversions.h"
+#include "vm/Shape.h"
+#include "vm/TraceLogging.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+ValueOperand CodeGeneratorMIPS::ToValue(LInstruction* ins, size_t pos) {
+ Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+ValueOperand CodeGeneratorMIPS::ToTempValue(LInstruction* ins, size_t pos) {
+ Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
+ Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
+ return ValueOperand(typeReg, payloadReg);
+}
+
+void CodeGenerator::visitBox(LBox* box) {
+ const LDefinition* type = box->getDef(TYPE_INDEX);
+
+ MOZ_ASSERT(!box->getOperand(0)->isConstant());
+
+ // For NUNBOX32, the input operand and the output payload have the same
+ // virtual register. All that needs to be written is the type tag for
+ // the type definition.
+ masm.move32(Imm32(MIRTypeToTag(box->type())), ToRegister(type));
+}
+
+void CodeGenerator::visitBoxFloatingPoint(LBoxFloatingPoint* box) {
+ const AnyRegister in = ToAnyRegister(box->getOperand(0));
+ const ValueOperand out = ToOutValue(box);
+
+ masm.moveValue(TypedOrValueRegister(box->type(), in), out);
+}
+
+void CodeGenerator::visitUnbox(LUnbox* unbox) {
+ // Note that for unbox, the type and payload indexes are switched on the
+ // inputs.
+ MUnbox* mir = unbox->mir();
+ Register type = ToRegister(unbox->type());
+
+ if (mir->fallible()) {
+ bailoutCmp32(Assembler::NotEqual, type, Imm32(MIRTypeToTag(mir->type())),
+ unbox->snapshot());
+ }
+}
+
+void CodeGeneratorMIPS::splitTagForTest(const ValueOperand& value,
+ ScratchTagScope& tag) {
+ MOZ_ASSERT(value.typeReg() == tag);
+}
+
+void CodeGenerator::visitCompareI64(LCompareI64* lir) {
+ MCompare* mir = lir->mir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register64 lhsRegs = ToRegister64(lhs);
+ Register output = ToRegister(lir->output());
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+
+ if (IsConstant(rhs)) {
+ Imm64 imm = Imm64(ToInt64(rhs));
+ masm.cmp64Set(condition, lhsRegs, imm, output);
+ } else {
+ Register64 rhsRegs = ToRegister64(rhs);
+ masm.cmp64Set(condition, lhsRegs, rhsRegs, output);
+ }
+}
+
+void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
+ MCompare* mir = lir->cmpMir();
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_Int64 ||
+ mir->compareType() == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register64 lhsRegs = ToRegister64(lhs);
+
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition condition = JSOpToCondition(lir->jsop(), isSigned);
+
+ Label* trueLabel = getJumpLabelForBranch(lir->ifTrue());
+ Label* falseLabel = getJumpLabelForBranch(lir->ifFalse());
+
+ if (isNextBlock(lir->ifFalse()->lir())) {
+ falseLabel = nullptr;
+ } else if (isNextBlock(lir->ifTrue()->lir())) {
+ condition = Assembler::InvertCondition(condition);
+ trueLabel = falseLabel;
+ falseLabel = nullptr;
+ }
+
+ if (IsConstant(rhs)) {
+ Imm64 imm = Imm64(ToInt64(rhs));
+ masm.branch64(condition, lhsRegs, imm, trueLabel, falseLabel);
+ } else {
+ Register64 rhsRegs = ToRegister64(rhs);
+ masm.branch64(condition, lhsRegs, rhsRegs, trueLabel, falseLabel);
+ }
+}
+
+void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
+ Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
+ Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+ Register64 output = ToOutRegister64(lir);
+
+ MOZ_ASSERT(output == ReturnReg64);
+
+ Label done;
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ masm.branchTest64(Assembler::NonZero, rhs, rhs, InvalidReg, &nonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ // Handle an integer overflow exception from INT64_MIN / -1.
+ if (lir->canBeNegativeOverflow()) {
+ Label notOverflow;
+ masm.branch64(Assembler::NotEqual, lhs, Imm64(INT64_MIN), &notOverflow);
+ masm.branch64(Assembler::NotEqual, rhs, Imm64(-1), &notOverflow);
+ if (lir->mir()->isMod()) {
+ masm.xor64(output, output);
+ } else {
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
+ }
+ masm.jump(&done);
+ masm.bind(&notOverflow);
+ }
+
+ masm.setupWasmABICall();
+ masm.passABIArg(lhs.high);
+ masm.passABIArg(lhs.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+
+ MOZ_ASSERT(gen->compilingWasm());
+ if (lir->mir()->isMod()) {
+ masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::ModI64);
+ } else {
+ masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::DivI64);
+ }
+ MOZ_ASSERT(ReturnReg64 == output);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
+ Register64 lhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Lhs));
+ Register64 rhs = ToRegister64(lir->getInt64Operand(LDivOrModI64::Rhs));
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ReturnReg64);
+
+ // Prevent divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ masm.branchTest64(Assembler::NonZero, rhs, rhs, InvalidReg, &nonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ masm.setupWasmABICall();
+ masm.passABIArg(lhs.high);
+ masm.passABIArg(lhs.low);
+ masm.passABIArg(rhs.high);
+ masm.passABIArg(rhs.low);
+
+ MOZ_ASSERT(gen->compilingWasm());
+ if (lir->mir()->isMod()) {
+ masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UModI64);
+ } else {
+ masm.callWithABI(lir->bytecodeOffset(), wasm::SymbolicAddress::UDivI64);
+ }
+}
+
+void CodeGeneratorMIPS::emitBigIntDiv(LBigIntDiv* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+
+#ifdef MIPSR6
+ masm.as_div(/* result= */ dividend, dividend, divisor);
+#else
+ masm.as_div(dividend, divisor);
+ masm.as_mflo(dividend);
+#endif
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, fail, bigIntsCanBeInNursery());
+ masm.initializeBigInt(output, dividend);
+}
+
+void CodeGeneratorMIPS::emitBigIntMod(LBigIntMod* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+
+#ifdef MIPSR6
+ masm.as_mod(/* result= */ dividend, dividend, divisor);
+#else
+ masm.as_div(dividend, divisor);
+ masm.as_mfhi(dividend);
+#endif
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, fail, bigIntsCanBeInNursery());
+ masm.initializeBigInt(output, dividend);
+}
+
+template <typename T>
+void CodeGeneratorMIPS::emitWasmLoadI64(T* lir) {
+ const MWasmLoad* mir = lir->mir();
+
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ if (IsUnaligned(mir->access())) {
+ masm.wasmUnalignedLoadI64(mir->access(), HeapReg, ToRegister(lir->ptr()),
+ ptrScratch, ToOutRegister64(lir),
+ ToRegister(lir->getTemp(1)));
+ } else {
+ masm.wasmLoadI64(mir->access(), HeapReg, ToRegister(lir->ptr()), ptrScratch,
+ ToOutRegister64(lir));
+ }
+}
+
+void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* lir) {
+ emitWasmLoadI64(lir);
+}
+
+void CodeGenerator::visitWasmUnalignedLoadI64(LWasmUnalignedLoadI64* lir) {
+ emitWasmLoadI64(lir);
+}
+
+template <typename T>
+void CodeGeneratorMIPS::emitWasmStoreI64(T* lir) {
+ const MWasmStore* mir = lir->mir();
+
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ if (IsUnaligned(mir->access())) {
+ masm.wasmUnalignedStoreI64(mir->access(), ToRegister64(lir->value()),
+ HeapReg, ToRegister(lir->ptr()), ptrScratch,
+ ToRegister(lir->getTemp(1)));
+ } else {
+ masm.wasmStoreI64(mir->access(), ToRegister64(lir->value()), HeapReg,
+ ToRegister(lir->ptr()), ptrScratch);
+ }
+}
+
+void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* lir) {
+ emitWasmStoreI64(lir);
+}
+
+void CodeGenerator::visitWasmUnalignedStoreI64(LWasmUnalignedStoreI64* lir) {
+ emitWasmStoreI64(lir);
+}
+
+void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ Register cond = ToRegister(lir->condExpr());
+ const LInt64Allocation trueExpr = lir->trueExpr();
+ const LInt64Allocation falseExpr = lir->falseExpr();
+
+ Register64 output = ToOutRegister64(lir);
+
+ masm.move64(ToRegister64(trueExpr), output);
+
+ if (falseExpr.low().isRegister()) {
+ masm.as_movz(output.low, ToRegister(falseExpr.low()), cond);
+ masm.as_movz(output.high, ToRegister(falseExpr.high()), cond);
+ } else {
+ Label done;
+ masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
+ masm.loadPtr(ToAddress(falseExpr.low()), output.low);
+ masm.loadPtr(ToAddress(falseExpr.high()), output.high);
+ masm.bind(&done);
+ }
+}
+
+void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ masm.moveToDoubleLo(input.low, output);
+ masm.moveToDoubleHi(input.high, output);
+}
+
+void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
+ FloatRegister input = ToFloatRegister(lir->getOperand(0));
+ Register64 output = ToOutRegister64(lir);
+
+ masm.moveFromDoubleLo(input, output.low);
+ masm.moveFromDoubleHi(input, output.high);
+}
+
+void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
+ Register input = ToRegister(lir->input());
+ Register64 output = ToOutRegister64(lir);
+
+ if (input != output.low) {
+ masm.move32(input, output.low);
+ }
+ if (lir->mir()->isUnsigned()) {
+ masm.move32(Imm32(0), output.high);
+ } else {
+ masm.ma_sra(output.high, output.low, Imm32(31));
+ }
+}
+
+void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
+ const LInt64Allocation& input = lir->getInt64Operand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->bottomHalf()) {
+ masm.move32(ToRegister(input.low()), output);
+ } else {
+ masm.move32(ToRegister(input.high()), output);
+ }
+}
+
+void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ switch (lir->mode()) {
+ case MSignExtendInt64::Byte:
+ masm.move8SignExtend(input.low, output.low);
+ break;
+ case MSignExtendInt64::Half:
+ masm.move16SignExtend(input.low, output.low);
+ break;
+ case MSignExtendInt64::Word:
+ masm.move32(input.low, output.low);
+ break;
+ }
+ masm.ma_sra(output.high, output.low, Imm32(31));
+}
+
+void CodeGenerator::visitClzI64(LClzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.clz64(input, output.low);
+ masm.move32(Imm32(0), output.high);
+}
+
+void CodeGenerator::visitCtzI64(LCtzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.ctz64(input, output.low);
+ masm.move32(Imm32(0), output.high);
+}
+
+void CodeGenerator::visitNotI64(LNotI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register output = ToRegister(lir->output());
+
+ masm.as_or(output, input.low, input.high);
+ masm.cmp32Set(Assembler::Equal, output, Imm32(0), output);
+}
+
+void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ FloatRegister arg = input;
+ Register64 output = ToOutRegister64(lir);
+ MWasmTruncateToInt64* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ auto* ool = new (alloc())
+ OutOfLineWasmTruncateCheck(mir, input, Register64::Invalid());
+ addOutOfLineCode(ool, mir);
+
+ if (fromType == MIRType::Float32) {
+ arg = ScratchDoubleReg;
+ masm.convertFloat32ToDouble(input, arg);
+ }
+
+ if (!lir->mir()->isSaturating()) {
+ masm.Push(input);
+
+ masm.setupWasmABICall();
+ masm.passABIArg(arg, MoveOp::DOUBLE);
+
+ if (lir->mir()->isUnsigned()) {
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::TruncateDoubleToUint64);
+ } else {
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::TruncateDoubleToInt64);
+ }
+
+ masm.Pop(input);
+
+ masm.ma_xor(ScratchRegister, output.high, Imm32(0x80000000));
+ masm.ma_or(ScratchRegister, output.low);
+ masm.ma_b(ScratchRegister, Imm32(0), ool->entry(), Assembler::Equal);
+
+ masm.bind(ool->rejoin());
+ } else {
+ masm.setupWasmABICall();
+ masm.passABIArg(arg, MoveOp::DOUBLE);
+ if (lir->mir()->isUnsigned()) {
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::SaturatingTruncateDoubleToUint64);
+ } else {
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::SaturatingTruncateDoubleToInt64);
+ }
+ }
+
+ MOZ_ASSERT(ReturnReg64 == output);
+}
+
+void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ mozilla::DebugOnly<FloatRegister> output = ToFloatRegister(lir->output());
+
+ MInt64ToFloatingPoint* mir = lir->mir();
+ MIRType toType = mir->type();
+
+ masm.setupWasmABICall();
+ masm.passABIArg(input.high);
+ masm.passABIArg(input.low);
+
+ if (lir->mir()->isUnsigned()) {
+ if (toType == MIRType::Double) {
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::Uint64ToDouble, MoveOp::DOUBLE);
+ } else {
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::Uint64ToFloat32, MoveOp::FLOAT32);
+ }
+ } else {
+ if (toType == MIRType::Double) {
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::Int64ToDouble, MoveOp::DOUBLE);
+ } else {
+ masm.callWithABI(mir->bytecodeOffset(),
+ wasm::SymbolicAddress::Int64ToFloat32, MoveOp::FLOAT32);
+ }
+ }
+
+ MOZ_ASSERT_IF(toType == MIRType::Double, *(&output) == ReturnDoubleReg);
+ MOZ_ASSERT_IF(toType == MIRType::Float32, *(&output) == ReturnFloat32Reg);
+}
+
+void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+
+ branchToBlock(input.high, Imm32(0), lir->ifTrue(), Assembler::NonZero);
+ emitBranch(input.low, Imm32(0), Assembler::NonZero, lir->ifTrue(),
+ lir->ifFalse());
+}
+
+void CodeGenerator::visitWasmAtomicLoadI64(LWasmAtomicLoadI64* lir) {
+ Register ptr = ToRegister(lir->ptr());
+ Register64 output = ToOutRegister64(lir);
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+
+ masm.wasmAtomicLoad64(lir->mir()->access(), addr, Register64::Invalid(),
+ output);
+}
+
+void CodeGenerator::visitWasmAtomicStoreI64(LWasmAtomicStoreI64* lir) {
+ Register ptr = ToRegister(lir->ptr());
+ Register64 value = ToRegister64(lir->value());
+ Register tmp = ToRegister(lir->tmp());
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(HeapReg, ptr, TimesOne, offset);
+
+ masm.wasmAtomicStore64(lir->mir()->access(), addr, tmp, value);
+}
diff --git a/js/src/jit/mips32/CodeGenerator-mips32.h b/js/src/jit/mips32/CodeGenerator-mips32.h
new file mode 100644
index 0000000000..161b94326f
--- /dev/null
+++ b/js/src/jit/mips32/CodeGenerator-mips32.h
@@ -0,0 +1,60 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_CodeGenerator_mips32_h
+#define jit_mips32_CodeGenerator_mips32_h
+
+#include "jit/mips-shared/CodeGenerator-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class CodeGeneratorMIPS : public CodeGeneratorMIPSShared {
+ protected:
+ CodeGeneratorMIPS(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
+ : CodeGeneratorMIPSShared(gen, graph, masm) {}
+
+ void testNullEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ emitBranch(value.typeReg(), (Imm32)ImmType(JSVAL_TYPE_NULL), cond, ifTrue,
+ ifFalse);
+ }
+ void testUndefinedEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ emitBranch(value.typeReg(), (Imm32)ImmType(JSVAL_TYPE_UNDEFINED), cond,
+ ifTrue, ifFalse);
+ }
+ void testObjectEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ emitBranch(value.typeReg(), (Imm32)ImmType(JSVAL_TYPE_OBJECT), cond, ifTrue,
+ ifFalse);
+ }
+
+ void emitBigIntDiv(LBigIntDiv* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+ void emitBigIntMod(LBigIntMod* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+
+ template <typename T>
+ void emitWasmLoadI64(T* ins);
+ template <typename T>
+ void emitWasmStoreI64(T* ins);
+
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ // Functions for LTestVAndBranch.
+ void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag);
+};
+
+typedef CodeGeneratorMIPS CodeGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_CodeGenerator_mips32_h */
diff --git a/js/src/jit/mips32/LIR-mips32.h b/js/src/jit/mips32/LIR-mips32.h
new file mode 100644
index 0000000000..da68ad7464
--- /dev/null
+++ b/js/src/jit/mips32/LIR-mips32.h
@@ -0,0 +1,197 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_LIR_mips32_h
+#define jit_mips32_LIR_mips32_h
+
+namespace js {
+namespace jit {
+
+class LBoxFloatingPoint : public LInstructionHelper<2, 1, 1> {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(BoxFloatingPoint);
+
+ LBoxFloatingPoint(const LAllocation& in, const LDefinition& temp,
+ MIRType type)
+ : LInstructionHelper(classOpcode), type_(type) {
+ setOperand(0, in);
+ setTemp(0, temp);
+ }
+
+ MIRType type() const { return type_; }
+ const char* extraName() const { return StringFromMIRType(type_); }
+};
+
+class LUnbox : public LInstructionHelper<1, 2, 0> {
+ public:
+ LIR_HEADER(Unbox);
+
+ LUnbox() : LInstructionHelper(classOpcode) {}
+
+ MUnbox* mir() const { return mir_->toUnbox(); }
+ const LAllocation* payload() { return getOperand(0); }
+ const LAllocation* type() { return getOperand(1); }
+ const char* extraName() const { return StringFromMIRType(mir()->type()); }
+};
+
+class LUnboxFloatingPoint : public LInstructionHelper<1, 2, 0> {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint);
+
+ static const size_t Input = 0;
+
+ LUnboxFloatingPoint(const LBoxAllocation& input, MIRType type)
+ : LInstructionHelper(classOpcode), type_(type) {
+ setBoxOperand(Input, input);
+ }
+
+ MUnbox* mir() const { return mir_->toUnbox(); }
+ MIRType type() const { return type_; }
+ const char* extraName() const { return StringFromMIRType(type_); }
+};
+
+class LDivOrModI64
+ : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES * 2, 0> {
+ public:
+ LIR_HEADER(DivOrModI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs)
+ : LCallInstructionHelper(classOpcode) {
+ setInt64Operand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeNegativeDividend();
+ }
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LUDivOrModI64
+ : public LCallInstructionHelper<INT64_PIECES, INT64_PIECES * 2, 0> {
+ public:
+ LIR_HEADER(UDivOrModI64)
+
+ static const size_t Lhs = 0;
+ static const size_t Rhs = INT64_PIECES;
+
+ LUDivOrModI64(const LInt64Allocation& lhs, const LInt64Allocation& rhs)
+ : LCallInstructionHelper(classOpcode) {
+ setInt64Operand(Lhs, lhs);
+ setInt64Operand(Rhs, rhs);
+ }
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeNegativeDividend();
+ }
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LWasmTruncateToInt64 : public LCallInstructionHelper<INT64_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(WasmTruncateToInt64);
+
+ explicit LWasmTruncateToInt64(const LAllocation& in)
+ : LCallInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ }
+
+ MWasmTruncateToInt64* mir() const { return mir_->toWasmTruncateToInt64(); }
+};
+
+class LInt64ToFloatingPoint
+ : public LCallInstructionHelper<1, INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(Int64ToFloatingPoint);
+
+ explicit LInt64ToFloatingPoint(const LInt64Allocation& in)
+ : LCallInstructionHelper(classOpcode) {
+ setInt64Operand(0, in);
+ }
+
+ MInt64ToFloatingPoint* mir() const { return mir_->toInt64ToFloatingPoint(); }
+};
+
+class LWasmAtomicLoadI64 : public LInstructionHelper<INT64_PIECES, 1, 0> {
+ public:
+ LIR_HEADER(WasmAtomicLoadI64);
+
+ LWasmAtomicLoadI64(const LAllocation& ptr) : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const MWasmLoad* mir() const { return mir_->toWasmLoad(); }
+};
+
+class LWasmAtomicStoreI64 : public LInstructionHelper<0, 1 + INT64_PIECES, 1> {
+ public:
+ LIR_HEADER(WasmAtomicStoreI64);
+
+ LWasmAtomicStoreI64(const LAllocation& ptr, const LInt64Allocation& value,
+ const LDefinition& tmp)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, value);
+ setTemp(0, tmp);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation value() { return getInt64Operand(1); }
+ const LDefinition* tmp() { return getTemp(0); }
+ const MWasmStore* mir() const { return mir_->toWasmStore(); }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_LIR_mips32_h */
diff --git a/js/src/jit/mips32/Lowering-mips32.cpp b/js/src/jit/mips32/Lowering-mips32.cpp
new file mode 100644
index 0000000000..721491c46b
--- /dev/null
+++ b/js/src/jit/mips32/Lowering-mips32.cpp
@@ -0,0 +1,257 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/Lowering-mips32.h"
+
+#include "jit/Lowering.h"
+#include "jit/mips32/Assembler-mips32.h"
+#include "jit/MIR.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+LBoxAllocation LIRGeneratorMIPS::useBoxFixed(MDefinition* mir, Register reg1,
+ Register reg2, bool useAtStart) {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+ MOZ_ASSERT(reg1 != reg2);
+
+ ensureDefined(mir);
+ return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart),
+ LUse(reg2, VirtualRegisterOfPayload(mir), useAtStart));
+}
+
+void LIRGenerator::visitBox(MBox* box) {
+ MDefinition* inner = box->getOperand(0);
+
+ // If the box wrapped a double, it needs a new register.
+ if (IsFloatingPointType(inner->type())) {
+ defineBox(new (alloc()) LBoxFloatingPoint(
+ useRegisterAtStart(inner), tempCopy(inner, 0), inner->type()),
+ box);
+ return;
+ }
+
+ if (box->canEmitAtUses()) {
+ emitAtUses(box);
+ return;
+ }
+
+ if (inner->isConstant()) {
+ defineBox(new (alloc()) LValue(inner->toConstant()->toJSValue()), box);
+ return;
+ }
+
+ LBox* lir = new (alloc()) LBox(use(inner), inner->type());
+
+ // Otherwise, we should not define a new register for the payload portion
+ // of the output, so bypass defineBox().
+ uint32_t vreg = getVirtualRegister();
+
+ // Note that because we're using BogusTemp(), we do not change the type of
+ // the definition. We also do not define the first output as "TYPE",
+ // because it has no corresponding payload at (vreg + 1). Also note that
+ // although we copy the input's original type for the payload half of the
+ // definition, this is only for clarity. BogusTemp() definitions are
+ // ignored.
+ lir->setDef(0, LDefinition(vreg, LDefinition::GENERAL));
+ lir->setDef(1, LDefinition::BogusTemp());
+ box->setVirtualRegister(vreg);
+ add(lir);
+}
+
+void LIRGenerator::visitUnbox(MUnbox* unbox) {
+ MDefinition* inner = unbox->getOperand(0);
+
+ // An unbox on mips reads in a type tag (either in memory or a register) and
+ // a payload. Unlike most instructions consuming a box, we ask for the type
+ // second, so that the result can re-use the first input.
+ MOZ_ASSERT(inner->type() == MIRType::Value);
+
+ ensureDefined(inner);
+
+ if (IsFloatingPointType(unbox->type())) {
+ LUnboxFloatingPoint* lir =
+ new (alloc()) LUnboxFloatingPoint(useBox(inner), unbox->type());
+ if (unbox->fallible()) {
+ assignSnapshot(lir, unbox->bailoutKind());
+ }
+ define(lir, unbox);
+ return;
+ }
+
+ // Swap the order we use the box pieces so we can re-use the payload
+ // register.
+ LUnbox* lir = new (alloc()) LUnbox;
+ lir->setOperand(0, usePayloadInRegisterAtStart(inner));
+ lir->setOperand(1, useType(inner, LUse::REGISTER));
+
+ if (unbox->fallible()) {
+ assignSnapshot(lir, unbox->bailoutKind());
+ }
+
+ // Types and payloads form two separate intervals. If the type becomes dead
+ // before the payload, it could be used as a Value without the type being
+ // recoverable. Unbox's purpose is to eagerly kill the definition of a type
+ // tag, so keeping both alive (for the purpose of gcmaps) is unappealing.
+ // Instead, we create a new virtual register.
+ defineReuseInput(lir, unbox, 0);
+}
+
+void LIRGenerator::visitReturnImpl(MDefinition* opd, bool isGenerator) {
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LReturn* ins = new (alloc()) LReturn(isGenerator);
+ ins->setOperand(0, LUse(JSReturnReg_Type));
+ ins->setOperand(1, LUse(JSReturnReg_Data));
+ fillBoxUses(ins, 0, opd);
+ add(ins);
+}
+
+void LIRGeneratorMIPS::lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* type = block->getPhi(lirIndex + VREG_TYPE_OFFSET);
+ LPhi* payload = block->getPhi(lirIndex + VREG_DATA_OFFSET);
+ type->setOperand(
+ inputPosition,
+ LUse(operand->virtualRegister() + VREG_TYPE_OFFSET, LUse::ANY));
+ payload->setOperand(inputPosition,
+ LUse(VirtualRegisterOfPayload(operand), LUse::ANY));
+}
+
+void LIRGeneratorMIPS::defineInt64Phi(MPhi* phi, size_t lirIndex) {
+ LPhi* low = current->getPhi(lirIndex + INT64LOW_INDEX);
+ LPhi* high = current->getPhi(lirIndex + INT64HIGH_INDEX);
+
+ uint32_t lowVreg = getVirtualRegister();
+
+ phi->setVirtualRegister(lowVreg);
+
+ uint32_t highVreg = getVirtualRegister();
+ MOZ_ASSERT(lowVreg + INT64HIGH_INDEX == highVreg + INT64LOW_INDEX);
+
+ low->setDef(0, LDefinition(lowVreg, LDefinition::INT32));
+ high->setDef(0, LDefinition(highVreg, LDefinition::INT32));
+ annotate(high);
+ annotate(low);
+}
+
+void LIRGeneratorMIPS::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ MDefinition* operand = phi->getOperand(inputPosition);
+ LPhi* low = block->getPhi(lirIndex + INT64LOW_INDEX);
+ LPhi* high = block->getPhi(lirIndex + INT64HIGH_INDEX);
+ low->setOperand(inputPosition,
+ LUse(operand->virtualRegister() + INT64LOW_INDEX, LUse::ANY));
+ high->setOperand(
+ inputPosition,
+ LUse(operand->virtualRegister() + INT64HIGH_INDEX, LUse::ANY));
+}
+
+void LIRGeneratorMIPS::lowerTruncateDToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double);
+
+ define(new (alloc())
+ LTruncateDToInt32(useRegister(opd), LDefinition::BogusTemp()),
+ ins);
+}
+
+void LIRGeneratorMIPS::lowerTruncateFToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Float32);
+
+ define(new (alloc())
+ LTruncateFToInt32(useRegister(opd), LDefinition::BogusTemp()),
+ ins);
+}
+
+void LIRGeneratorMIPS::lowerDivI64(MDiv* div) {
+ if (div->isUnsigned()) {
+ lowerUDivI64(div);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc()) LDivOrModI64(
+ useInt64RegisterAtStart(div->lhs()), useInt64RegisterAtStart(div->rhs()));
+
+ defineReturn(lir, div);
+}
+
+void LIRGeneratorMIPS::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
+ MOZ_CRASH("We don't use runtime div for this architecture");
+}
+
+void LIRGeneratorMIPS::lowerModI64(MMod* mod) {
+ if (mod->isUnsigned()) {
+ lowerUModI64(mod);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc()) LDivOrModI64(
+ useInt64RegisterAtStart(mod->lhs()), useInt64RegisterAtStart(mod->rhs()));
+
+ defineReturn(lir, mod);
+}
+
+void LIRGeneratorMIPS::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
+ MOZ_CRASH("We don't use runtime mod for this architecture");
+}
+
+void LIRGeneratorMIPS::lowerUDivI64(MDiv* div) {
+ LUDivOrModI64* lir = new (alloc()) LUDivOrModI64(
+ useInt64RegisterAtStart(div->lhs()), useInt64RegisterAtStart(div->rhs()));
+ defineReturn(lir, div);
+}
+
+void LIRGeneratorMIPS::lowerUModI64(MMod* mod) {
+ LUDivOrModI64* lir = new (alloc()) LUDivOrModI64(
+ useInt64RegisterAtStart(mod->lhs()), useInt64RegisterAtStart(mod->rhs()));
+ defineReturn(lir, mod);
+}
+
+void LIRGeneratorMIPS::lowerBigIntDiv(MBigIntDiv* ins) {
+ auto* lir = new (alloc()) LBigIntDiv(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorMIPS::lowerBigIntMod(MBigIntMod* ins) {
+ auto* lir = new (alloc()) LBigIntMod(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ defineReturn(new (alloc()) LWasmTruncateToInt64(useRegisterAtStart(opd)),
+ ins);
+}
+
+void LIRGeneratorMIPS::lowerWasmBuiltinTruncateToInt64(
+ MWasmBuiltinTruncateToInt64* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
+
+void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Int64);
+ MOZ_ASSERT(IsFloatingPointType(ins->type()));
+
+ defineReturn(
+ new (alloc()) LInt64ToFloatingPoint(useInt64RegisterAtStart(opd)), ins);
+}
+
+void LIRGeneratorMIPS::lowerBuiltinInt64ToFloatingPoint(
+ MBuiltinInt64ToFloatingPoint* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
diff --git a/js/src/jit/mips32/Lowering-mips32.h b/js/src/jit/mips32/Lowering-mips32.h
new file mode 100644
index 0000000000..1565c84656
--- /dev/null
+++ b/js/src/jit/mips32/Lowering-mips32.h
@@ -0,0 +1,54 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_Lowering_mips32_h
+#define jit_mips32_Lowering_mips32_h
+
+#include "jit/mips-shared/Lowering-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorMIPS : public LIRGeneratorMIPSShared {
+ protected:
+ LIRGeneratorMIPS(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorMIPSShared(gen, graph, lirGraph) {}
+
+ // Returns a box allocation with type set to reg1 and payload set to reg2.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2,
+ bool useAtStart = false);
+
+ inline LDefinition tempToUnbox() { return LDefinition::BogusTemp(); }
+
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex);
+
+ void lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex);
+ void defineInt64Phi(MPhi* phi, size_t lirIndex);
+
+ void lowerBuiltinInt64ToFloatingPoint(MBuiltinInt64ToFloatingPoint* ins);
+ void lowerWasmBuiltinTruncateToInt64(MWasmBuiltinTruncateToInt64* ins);
+ void lowerTruncateDToInt32(MTruncateToInt32* ins);
+ void lowerTruncateFToInt32(MTruncateToInt32* ins);
+
+ void lowerDivI64(MDiv* div);
+ void lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div);
+ void lowerModI64(MMod* mod);
+ void lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod);
+ void lowerUDivI64(MDiv* div);
+ void lowerUModI64(MMod* mod);
+
+ void lowerBigIntDiv(MBigIntDiv* ins);
+ void lowerBigIntMod(MBigIntMod* ins);
+};
+
+typedef LIRGeneratorMIPS LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_Lowering_mips32_h */
diff --git a/js/src/jit/mips32/MacroAssembler-mips32-inl.h b/js/src/jit/mips32/MacroAssembler-mips32-inl.h
new file mode 100644
index 0000000000..7899d4c335
--- /dev/null
+++ b/js/src/jit/mips32/MacroAssembler-mips32-inl.h
@@ -0,0 +1,960 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_MacroAssembler_mips32_inl_h
+#define jit_mips32_MacroAssembler_mips32_inl_h
+
+#include "jit/mips32/MacroAssembler-mips32.h"
+
+#include "vm/BigIntType.h" // JS::BigInt
+
+#include "jit/mips-shared/MacroAssembler-mips-shared-inl.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+
+void MacroAssembler::move64(Register64 src, Register64 dest) {
+ move32(src.low, dest.low);
+ move32(src.high, dest.high);
+}
+
+void MacroAssembler::move64(Imm64 imm, Register64 dest) {
+ move32(Imm32(imm.value & 0xFFFFFFFFL), dest.low);
+ move32(Imm32((imm.value >> 32) & 0xFFFFFFFFL), dest.high);
+}
+
+void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
+ moveFromDoubleHi(src, dest.high);
+ moveFromDoubleLo(src, dest.low);
+}
+
+void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
+ moveToDoubleHi(src.high, dest);
+ moveToDoubleLo(src.low, dest);
+}
+
+void MacroAssembler::move64To32(Register64 src, Register dest) {
+ if (src.low != dest) {
+ move32(src.low, dest);
+ }
+}
+
+void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
+ if (src != dest.low) {
+ move32(src, dest.low);
+ }
+ move32(Imm32(0), dest.high);
+}
+
+void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
+ move8SignExtend(src, dest.low);
+ move32To64SignExtend(dest.low, dest);
+}
+
+void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
+ move16SignExtend(src, dest.low);
+ move32To64SignExtend(dest.low, dest);
+}
+
+void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
+ if (src != dest.low) {
+ move32(src, dest.low);
+ }
+ ma_sra(dest.high, dest.low, Imm32(31));
+}
+
+void MacroAssembler::move32ZeroExtendToPtr(Register src, Register dest) {
+ move32(src, dest);
+}
+
+// ===============================================================
+// Load instructions
+
+void MacroAssembler::load32SignExtendToPtr(const Address& src, Register dest) {
+ load32(src, dest);
+}
+
+// ===============================================================
+// Logical instructions
+
+void MacroAssembler::notPtr(Register reg) { ma_not(reg, reg); }
+
+void MacroAssembler::andPtr(Register src, Register dest) { ma_and(dest, src); }
+
+void MacroAssembler::andPtr(Imm32 imm, Register dest) { ma_and(dest, imm); }
+
+void MacroAssembler::and64(Imm64 imm, Register64 dest) {
+ if (imm.low().value != int32_t(0xFFFFFFFF)) {
+ and32(imm.low(), dest.low);
+ }
+ if (imm.hi().value != int32_t(0xFFFFFFFF)) {
+ and32(imm.hi(), dest.high);
+ }
+}
+
+void MacroAssembler::and64(Register64 src, Register64 dest) {
+ and32(src.low, dest.low);
+ and32(src.high, dest.high);
+}
+
+void MacroAssembler::or64(Imm64 imm, Register64 dest) {
+ if (imm.low().value) {
+ or32(imm.low(), dest.low);
+ }
+ if (imm.hi().value) {
+ or32(imm.hi(), dest.high);
+ }
+}
+
+void MacroAssembler::xor64(Imm64 imm, Register64 dest) {
+ if (imm.low().value) {
+ xor32(imm.low(), dest.low);
+ }
+ if (imm.hi().value) {
+ xor32(imm.hi(), dest.high);
+ }
+}
+
+void MacroAssembler::orPtr(Register src, Register dest) { ma_or(dest, src); }
+
+void MacroAssembler::orPtr(Imm32 imm, Register dest) { ma_or(dest, imm); }
+
+void MacroAssembler::or64(Register64 src, Register64 dest) {
+ or32(src.low, dest.low);
+ or32(src.high, dest.high);
+}
+
+void MacroAssembler::xor64(Register64 src, Register64 dest) {
+ ma_xor(dest.low, src.low);
+ ma_xor(dest.high, src.high);
+}
+
+void MacroAssembler::xorPtr(Register src, Register dest) { ma_xor(dest, src); }
+
+void MacroAssembler::xorPtr(Imm32 imm, Register dest) { ma_xor(dest, imm); }
+
+// ===============================================================
+// Swap instructions
+
+void MacroAssembler::byteSwap64(Register64 reg) {
+ byteSwap32(reg.high);
+ byteSwap32(reg.low);
+
+ // swap reg.high and reg.low.
+ ma_xor(reg.high, reg.low);
+ ma_xor(reg.low, reg.high);
+ ma_xor(reg.high, reg.low);
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void MacroAssembler::addPtr(Register src, Register dest) { ma_addu(dest, src); }
+
+void MacroAssembler::addPtr(Imm32 imm, Register dest) { ma_addu(dest, imm); }
+
+void MacroAssembler::addPtr(ImmWord imm, Register dest) {
+ addPtr(Imm32(imm.value), dest);
+}
+
+void MacroAssembler::add64(Register64 src, Register64 dest) {
+ if (dest.low == src.low) {
+ as_sltu(ScratchRegister, src.low, zero);
+ as_addu(dest.low, dest.low, src.low);
+ } else {
+ as_addu(dest.low, dest.low, src.low);
+ as_sltu(ScratchRegister, dest.low, src.low);
+ }
+ as_addu(dest.high, dest.high, src.high);
+ as_addu(dest.high, dest.high, ScratchRegister);
+}
+
+void MacroAssembler::add64(Imm32 imm, Register64 dest) {
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_addiu(dest.low, dest.low, imm.value);
+ as_sltiu(ScratchRegister, dest.low, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_addu(dest.low, dest.low, ScratchRegister);
+ as_sltu(ScratchRegister, dest.low, ScratchRegister);
+ }
+ as_addu(dest.high, dest.high, ScratchRegister);
+}
+
+void MacroAssembler::add64(Imm64 imm, Register64 dest) {
+ add64(imm.low(), dest);
+ ma_addu(dest.high, dest.high, imm.hi());
+}
+
+CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
+ CodeOffset offset = CodeOffset(currentOffset());
+ ma_liPatchable(dest, Imm32(0));
+ as_subu(dest, StackPointer, dest);
+ return offset;
+}
+
+void MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) {
+ Instruction* lui =
+ (Instruction*)m_buffer.getInst(BufferOffset(offset.offset()));
+ MOZ_ASSERT(lui->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(lui->next()->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ UpdateLuiOriValue(lui, lui->next(), imm.value);
+}
+
+void MacroAssembler::subPtr(Register src, Register dest) {
+ as_subu(dest, dest, src);
+}
+
+void MacroAssembler::subPtr(Imm32 imm, Register dest) {
+ ma_subu(dest, dest, imm);
+}
+
+void MacroAssembler::sub64(Register64 src, Register64 dest) {
+ MOZ_ASSERT(dest.low != src.high);
+ MOZ_ASSERT(dest.high != src.low);
+ MOZ_ASSERT(dest.high != src.high);
+
+ as_sltu(ScratchRegister, dest.low, src.low);
+ as_subu(dest.high, dest.high, ScratchRegister);
+ as_subu(dest.low, dest.low, src.low);
+ as_subu(dest.high, dest.high, src.high);
+}
+
+void MacroAssembler::sub64(Imm64 imm, Register64 dest) {
+ if (Imm16::IsInSignedRange(imm.low().value) &&
+ Imm16::IsInSignedRange(-imm.value)) {
+ as_sltiu(ScratchRegister, dest.low, imm.low().value);
+ as_subu(dest.high, dest.high, ScratchRegister);
+ as_addiu(dest.low, dest.low, -imm.value);
+ } else {
+ ma_li(SecondScratchReg, imm.low());
+ as_sltu(ScratchRegister, dest.low, SecondScratchReg);
+ as_subu(dest.high, dest.high, ScratchRegister);
+ as_subu(dest.low, dest.low, SecondScratchReg);
+ }
+ ma_subu(dest.high, dest.high, imm.hi());
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest) {
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+
+ if (imm.low().value == 5) {
+ // Optimized case for Math.random().
+ as_sll(ScratchRegister, dest.low, 2);
+ as_srl(SecondScratchReg, dest.low, 32 - 2);
+ as_addu(dest.low, ScratchRegister, dest.low);
+ as_sltu(ScratchRegister, dest.low, ScratchRegister);
+ as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
+ as_sll(SecondScratchReg, dest.high, 2);
+ as_addu(SecondScratchReg, SecondScratchReg, dest.high);
+ as_addu(dest.high, ScratchRegister, SecondScratchReg);
+ } else {
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ ma_li(ScratchRegister, imm.low());
+ as_mult(dest.high, ScratchRegister);
+ ma_li(ScratchRegister, imm.hi());
+ as_madd(dest.low, ScratchRegister);
+ as_mflo(dest.high);
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ ma_li(ScratchRegister, imm.low());
+ as_multu(dest.low, ScratchRegister);
+ as_mfhi(ScratchRegister);
+ as_mflo(dest.low);
+ as_addu(dest.high, dest.high, ScratchRegister);
+ }
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest,
+ const Register temp) {
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+
+ MOZ_ASSERT(temp != dest.high && temp != dest.low);
+
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ ma_li(ScratchRegister, imm.low());
+ as_mult(dest.high, ScratchRegister);
+ ma_li(temp, imm.hi());
+ as_madd(dest.low, temp);
+ as_mflo(dest.high);
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ as_multu(dest.low, ScratchRegister);
+ as_mfhi(ScratchRegister);
+ as_mflo(dest.low);
+ as_addu(dest.high, dest.high, ScratchRegister);
+}
+
+void MacroAssembler::mul64(const Register64& src, const Register64& dest,
+ const Register temp) {
+ // LOW32 = LOW(LOW(dest) * LOW(imm));
+ // HIGH32 = LOW(HIGH(dest) * LOW(imm)) [multiply imm into upper bits]
+ // + LOW(LOW(dest) * HIGH(imm)) [multiply dest into upper bits]
+ // + HIGH(LOW(dest) * LOW(imm)) [carry]
+
+ MOZ_ASSERT(dest != src);
+ MOZ_ASSERT(dest.low != src.high && dest.high != src.low);
+
+ // HIGH32 = LOW(HIGH(dest) * LOW(src)) [multiply src into upper bits]
+ // + LOW(LOW(dest) * HIGH(src)) [multiply dest into upper bits]
+ as_mult(dest.high, src.low);
+ as_madd(dest.low, src.high);
+ as_mflo(dest.high);
+ // + HIGH(LOW(dest) * LOW(src)) [carry]
+ // LOW32 = LOW(LOW(dest) * LOW(src));
+ as_multu(dest.low, src.low);
+ as_mfhi(ScratchRegister);
+ as_mflo(dest.low);
+ as_addu(dest.high, dest.high, ScratchRegister);
+}
+
+void MacroAssembler::neg64(Register64 reg) {
+ as_subu(ScratchRegister, zero, reg.low);
+ as_sltu(ScratchRegister, reg.low, ScratchRegister);
+ as_subu(reg.high, zero, reg.high);
+ as_subu(reg.high, reg.high, ScratchRegister);
+}
+
+void MacroAssembler::negPtr(Register reg) { as_subu(reg, zero, reg); }
+
+void MacroAssembler::mulBy3(Register src, Register dest) {
+ MOZ_ASSERT(src != ScratchRegister);
+ as_addu(ScratchRegister, src, src);
+ as_addu(dest, ScratchRegister, src);
+}
+
+void MacroAssembler::inc64(AbsoluteAddress dest) {
+ ma_li(ScratchRegister, Imm32((int32_t)dest.addr));
+ as_lw(SecondScratchReg, ScratchRegister, 0);
+
+ as_addiu(SecondScratchReg, SecondScratchReg, 1);
+ as_sw(SecondScratchReg, ScratchRegister, 0);
+
+ as_sltiu(SecondScratchReg, SecondScratchReg, 1);
+ as_lw(ScratchRegister, ScratchRegister, 4);
+
+ as_addu(SecondScratchReg, ScratchRegister, SecondScratchReg);
+
+ ma_li(ScratchRegister, Imm32((int32_t)dest.addr));
+ as_sw(SecondScratchReg, ScratchRegister, 4);
+}
+
+// ===============================================================
+// Shift functions
+
+void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ ma_sll(dest, dest, imm);
+}
+
+void MacroAssembler::lshiftPtr(Register src, Register dest) {
+ ma_sll(dest, dest, src);
+}
+
+void MacroAssembler::lshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ScratchRegisterScope scratch(*this);
+
+ if (imm.value == 0) {
+ return;
+ } else if (imm.value < 32) {
+ as_sll(dest.high, dest.high, imm.value);
+ as_srl(scratch, dest.low, (32 - imm.value) % 32);
+ as_or(dest.high, dest.high, scratch);
+ as_sll(dest.low, dest.low, imm.value);
+ } else {
+ as_sll(dest.high, dest.low, imm.value - 32);
+ move32(Imm32(0), dest.low);
+ }
+}
+
+void MacroAssembler::lshift64(Register unmaskedShift, Register64 dest) {
+ Label done;
+ ScratchRegisterScope shift(*this);
+
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_b(shift, Imm32(0), &done, Equal);
+
+ mov(dest.low, SecondScratchReg);
+ ma_sll(dest.low, dest.low, shift);
+ as_nor(shift, zero, shift);
+ as_srl(SecondScratchReg, SecondScratchReg, 1);
+ ma_srl(SecondScratchReg, SecondScratchReg, shift);
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_sll(dest.high, dest.high, shift);
+ as_or(dest.high, dest.high, SecondScratchReg);
+
+ ma_and(SecondScratchReg, shift, Imm32(0x20));
+ as_movn(dest.high, dest.low, SecondScratchReg);
+ as_movn(dest.low, zero, SecondScratchReg);
+
+ bind(&done);
+}
+
+void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ ma_srl(dest, dest, imm);
+}
+
+void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 32);
+ ma_sra(dest, dest, imm);
+}
+
+void MacroAssembler::rshiftPtr(Register src, Register dest) {
+ ma_srl(dest, dest, src);
+}
+
+void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ScratchRegisterScope scratch(*this);
+
+ if (imm.value == 0) {
+ return;
+ } else if (imm.value < 32) {
+ as_srl(dest.low, dest.low, imm.value);
+ as_sll(scratch, dest.high, (32 - imm.value) % 32);
+ as_or(dest.low, dest.low, scratch);
+ as_srl(dest.high, dest.high, imm.value);
+ } else if (imm.value == 32) {
+ ma_move(dest.low, dest.high);
+ move32(Imm32(0), dest.high);
+ } else {
+ ma_srl(dest.low, dest.high, Imm32(imm.value - 32));
+ move32(Imm32(0), dest.high);
+ }
+}
+
+void MacroAssembler::rshift64(Register unmaskedShift, Register64 dest) {
+ Label done;
+ ScratchRegisterScope shift(*this);
+
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_b(shift, Imm32(0), &done, Equal);
+
+ mov(dest.high, SecondScratchReg);
+ ma_srl(dest.high, dest.high, shift);
+ as_nor(shift, zero, shift);
+ as_sll(SecondScratchReg, SecondScratchReg, 1);
+ ma_sll(SecondScratchReg, SecondScratchReg, shift);
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_srl(dest.low, dest.low, shift);
+ as_or(dest.low, dest.low, SecondScratchReg);
+ ma_and(SecondScratchReg, shift, Imm32(0x20));
+ as_movn(dest.low, dest.high, SecondScratchReg);
+ as_movn(dest.high, zero, SecondScratchReg);
+
+ bind(&done);
+}
+
+void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ ScratchRegisterScope scratch(*this);
+
+ if (imm.value == 0) {
+ return;
+ } else if (imm.value < 32) {
+ as_srl(dest.low, dest.low, imm.value);
+ as_sll(scratch, dest.high, (32 - imm.value) % 32);
+ as_or(dest.low, dest.low, scratch);
+ as_sra(dest.high, dest.high, imm.value);
+ } else if (imm.value == 32) {
+ ma_move(dest.low, dest.high);
+ as_sra(dest.high, dest.high, 31);
+ } else {
+ as_sra(dest.low, dest.high, imm.value - 32);
+ as_sra(dest.high, dest.high, 31);
+ }
+}
+
+void MacroAssembler::rshift64Arithmetic(Register unmaskedShift,
+ Register64 dest) {
+ Label done;
+
+ ScratchRegisterScope shift(*this);
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_b(shift, Imm32(0), &done, Equal);
+
+ mov(dest.high, SecondScratchReg);
+ ma_sra(dest.high, dest.high, shift);
+ as_nor(shift, zero, shift);
+ as_sll(SecondScratchReg, SecondScratchReg, 1);
+ ma_sll(SecondScratchReg, SecondScratchReg, shift);
+ ma_and(shift, unmaskedShift, Imm32(0x3f));
+ ma_srl(dest.low, dest.low, shift);
+ as_or(dest.low, dest.low, SecondScratchReg);
+ ma_and(SecondScratchReg, shift, Imm32(0x20));
+ as_sra(shift, dest.high, 31);
+ as_movn(dest.low, dest.high, SecondScratchReg);
+ as_movn(dest.high, shift, SecondScratchReg);
+
+ bind(&done);
+}
+
+// ===============================================================
+// Rotation functions
+
+void MacroAssembler::rotateLeft64(Imm32 count, Register64 input,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ MOZ_ASSERT(input.low != dest.high && input.high != dest.low);
+
+ int32_t amount = count.value & 0x3f;
+ if (amount > 32) {
+ rotateRight64(Imm32(64 - amount), input, dest, temp);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ if (amount == 0) {
+ ma_move(dest.low, input.low);
+ ma_move(dest.high, input.high);
+ } else if (amount == 32) {
+ ma_move(scratch, input.low);
+ ma_move(dest.low, input.high);
+ ma_move(dest.high, scratch);
+ } else {
+ MOZ_ASSERT(0 < amount && amount < 32);
+ ma_move(scratch, input.high);
+ ma_sll(dest.high, input.high, Imm32(amount));
+ ma_srl(SecondScratchReg, input.low, Imm32(32 - amount));
+ as_or(dest.high, dest.high, SecondScratchReg);
+ ma_sll(dest.low, input.low, Imm32(amount));
+ ma_srl(SecondScratchReg, scratch, Imm32(32 - amount));
+ as_or(dest.low, dest.low, SecondScratchReg);
+ }
+ }
+}
+
+void MacroAssembler::rotateLeft64(Register shift, Register64 src,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp != src.low && temp != src.high);
+ MOZ_ASSERT(shift != src.low && shift != src.high);
+ MOZ_ASSERT(temp != InvalidReg);
+
+ ScratchRegisterScope scratch(*this);
+
+ ma_and(scratch, shift, Imm32(0x3f));
+ as_nor(SecondScratchReg, zero, scratch);
+ ma_sll(temp, src.low, scratch);
+ ma_move(scratch, src.low);
+ as_srl(dest.low, src.high, 1);
+ ma_srl(dest.low, dest.low, SecondScratchReg);
+ as_or(dest.low, dest.low, temp);
+ ma_move(SecondScratchReg, src.high);
+ as_srl(dest.high, scratch, 1);
+ ma_and(scratch, shift, Imm32(0x3f));
+ ma_sll(temp, SecondScratchReg, scratch);
+ as_nor(SecondScratchReg, zero, scratch);
+ ma_srl(dest.high, dest.high, SecondScratchReg);
+ as_or(dest.high, dest.high, temp);
+ ma_and(temp, scratch, Imm32(32));
+ as_movn(SecondScratchReg, dest.high, temp);
+ as_movn(dest.high, dest.low, temp);
+ as_movn(dest.low, SecondScratchReg, temp);
+}
+
+void MacroAssembler::rotateRight64(Imm32 count, Register64 input,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp == InvalidReg);
+ MOZ_ASSERT(input.low != dest.high && input.high != dest.low);
+
+ int32_t amount = count.value & 0x3f;
+ if (amount > 32) {
+ rotateLeft64(Imm32(64 - amount), input, dest, temp);
+ } else {
+ ScratchRegisterScope scratch(*this);
+ if (amount == 0) {
+ ma_move(dest.low, input.low);
+ ma_move(dest.high, input.high);
+ } else if (amount == 32) {
+ ma_move(scratch, input.low);
+ ma_move(dest.low, input.high);
+ ma_move(dest.high, scratch);
+ } else {
+ MOZ_ASSERT(0 < amount && amount < 32);
+ ma_move(scratch, input.high);
+ ma_srl(dest.high, input.high, Imm32(amount));
+ ma_sll(SecondScratchReg, input.low, Imm32(32 - amount));
+ as_or(dest.high, dest.high, SecondScratchReg);
+ ma_srl(dest.low, input.low, Imm32(amount));
+ ma_sll(SecondScratchReg, scratch, Imm32(32 - amount));
+ as_or(dest.low, dest.low, SecondScratchReg);
+ }
+ }
+}
+
+void MacroAssembler::rotateRight64(Register shift, Register64 src,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp != src.low && temp != src.high);
+ MOZ_ASSERT(shift != src.low && shift != src.high);
+ MOZ_ASSERT(temp != InvalidReg);
+
+ ScratchRegisterScope scratch(*this);
+
+ ma_and(scratch, shift, Imm32(0x3f));
+ as_nor(SecondScratchReg, zero, scratch);
+ ma_srl(temp, src.low, scratch);
+ ma_move(scratch, src.low);
+ as_sll(dest.low, src.high, 1);
+ ma_sll(dest.low, dest.low, SecondScratchReg);
+ as_or(dest.low, dest.low, temp);
+ ma_move(SecondScratchReg, src.high);
+ as_sll(dest.high, scratch, 1);
+ ma_and(scratch, shift, Imm32(0x3f));
+ ma_srl(temp, SecondScratchReg, scratch);
+ as_nor(SecondScratchReg, zero, scratch);
+ ma_sll(dest.high, dest.high, SecondScratchReg);
+ as_or(dest.high, dest.high, temp);
+ ma_and(temp, scratch, Imm32(32));
+ as_movn(SecondScratchReg, dest.high, temp);
+ as_movn(dest.high, dest.low, temp);
+ as_movn(dest.low, SecondScratchReg, temp);
+}
+
+template <typename T1, typename T2>
+void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+
+template <typename T1, typename T2>
+void MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+
+// ===============================================================
+// Bit counting functions
+
+void MacroAssembler::clz64(Register64 src, Register dest) {
+ as_clz(ScratchRegister, src.high);
+ as_clz(SecondScratchReg, src.low);
+ as_movn(SecondScratchReg, zero, src.high);
+ as_addu(dest, ScratchRegister, SecondScratchReg);
+}
+
+void MacroAssembler::ctz64(Register64 src, Register dest) {
+ as_movz(SecondScratchReg, src.high, src.low);
+ as_movn(SecondScratchReg, src.low, src.low);
+ ma_ctz(SecondScratchReg, SecondScratchReg);
+ ma_li(ScratchRegister, Imm32(0x20));
+ as_movn(ScratchRegister, zero, src.low);
+ as_addu(dest, SecondScratchReg, ScratchRegister);
+}
+
+void MacroAssembler::popcnt64(Register64 src, Register64 dest, Register tmp) {
+ MOZ_ASSERT(dest.low != tmp);
+ MOZ_ASSERT(dest.high != tmp);
+ MOZ_ASSERT(dest.low != dest.high);
+
+ as_srl(tmp, src.low, 1);
+ as_srl(SecondScratchReg, src.high, 1);
+ ma_li(ScratchRegister, Imm32(0x55555555));
+ as_and(tmp, tmp, ScratchRegister);
+ as_subu(tmp, src.low, tmp);
+ as_and(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ as_subu(SecondScratchReg, src.high, SecondScratchReg);
+ ma_li(ScratchRegister, Imm32(0x33333333));
+ as_and(dest.low, tmp, ScratchRegister);
+ as_srl(tmp, tmp, 2);
+ as_and(tmp, tmp, ScratchRegister);
+ as_addu(tmp, dest.low, tmp);
+ as_and(dest.high, SecondScratchReg, ScratchRegister);
+ as_srl(SecondScratchReg, SecondScratchReg, 2);
+ as_and(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ as_addu(SecondScratchReg, dest.high, SecondScratchReg);
+ ma_li(ScratchRegister, Imm32(0x0F0F0F0F));
+ as_addu(tmp, SecondScratchReg, tmp);
+ as_srl(dest.low, tmp, 4);
+ as_and(dest.low, dest.low, ScratchRegister);
+ as_and(tmp, tmp, ScratchRegister);
+ as_addu(dest.low, dest.low, tmp);
+ ma_mul(dest.low, dest.low, Imm32(0x01010101));
+ as_srl(dest.low, dest.low, 24);
+ ma_move(dest.high, zero);
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual,
+ "other condition codes not supported");
+
+ branch32(cond, lhs, val.firstHalf(), label);
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)),
+ val.secondHalf(), label);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ const Address& rhs, Register scratch,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ load32(rhs, scratch);
+ branch32(cond, lhs, scratch, label);
+
+ load32(Address(rhs.base, rhs.offset + sizeof(uint32_t)), scratch);
+ branch32(cond, Address(lhs.base, lhs.offset + sizeof(uint32_t)), scratch,
+ label);
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
+ Label* success, Label* fail) {
+ if (val.value == 0) {
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::BelowOrEqual:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ as_or(ScratchRegister, lhs.high, lhs.low);
+ ma_b(ScratchRegister, ScratchRegister, success,
+ (cond == Assembler::Equal || cond == Assembler::BelowOrEqual)
+ ? Assembler::Zero
+ : Assembler::NonZero);
+ break;
+ case Assembler::LessThan:
+ case Assembler::GreaterThanOrEqual:
+ ma_b(lhs.high, Imm32(0), success, cond);
+ break;
+ case Assembler::LessThanOrEqual:
+ case Assembler::GreaterThan:
+ as_or(SecondScratchReg, lhs.high, lhs.low);
+ as_sra(ScratchRegister, lhs.high, 31);
+ as_sltu(ScratchRegister, ScratchRegister, SecondScratchReg);
+ ma_b(ScratchRegister, ScratchRegister, success,
+ (cond == Assembler::LessThanOrEqual) ? Assembler::Zero
+ : Assembler::NonZero);
+ break;
+ case Assembler::Below:
+ // This condition is always false. No branch required.
+ break;
+ case Assembler::AboveOrEqual:
+ ma_b(success);
+ break;
+ default:
+ MOZ_CRASH("Condition code not supported");
+ }
+ return;
+ }
+
+ Condition c = ma_cmp64(cond, lhs, val, SecondScratchReg);
+ ma_b(SecondScratchReg, SecondScratchReg, success, c);
+ if (fail) {
+ jump(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
+ Label* success, Label* fail) {
+ Condition c = ma_cmp64(cond, lhs, rhs, SecondScratchReg);
+ ma_b(SecondScratchReg, SecondScratchReg, success, c);
+ if (fail) {
+ jump(fail);
+ }
+}
+
+void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
+ Register rhs, Label* label) {
+ branchPtr(cond, lhs, rhs, label);
+}
+
+template <class L>
+void MacroAssembler::branchTest64(Condition cond, Register64 lhs,
+ Register64 rhs, Register temp, L label) {
+ if (cond == Assembler::Zero || cond == Assembler::NonZero) {
+ MOZ_ASSERT(lhs.low == rhs.low);
+ MOZ_ASSERT(lhs.high == rhs.high);
+ as_or(ScratchRegister, lhs.low, lhs.high);
+ ma_b(ScratchRegister, ScratchRegister, label, cond);
+ } else if (cond == Assembler::Signed || cond == Assembler::NotSigned) {
+ branchTest32(cond, lhs.high, rhs.high, label);
+ } else {
+ MOZ_CRASH("Unsupported condition");
+ }
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ branchTestUndefined(cond, value.typeReg(), label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestInt32(cond, value.typeReg(), label);
+}
+
+void MacroAssembler::branchTestInt32Truthy(bool b, const ValueOperand& value,
+ Label* label) {
+ ScratchRegisterScope scratch(*this);
+ as_and(scratch, value.payloadReg(), value.payloadReg());
+ ma_b(scratch, scratch, label, b ? NonZero : Zero);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = (cond == Equal) ? Below : AboveOrEqual;
+ ma_b(tag, ImmTag(JSVAL_TAG_CLEAR), label, actual);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestDouble(cond, value.typeReg(), label);
+}
+
+void MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestNumber(cond, value.typeReg(), label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(value.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN), label, cond);
+}
+
+void MacroAssembler::branchTestBooleanTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ ma_b(value.payloadReg(), value.payloadReg(), label, b ? NonZero : Zero);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestString(cond, value.typeReg(), label);
+}
+
+void MacroAssembler::branchTestStringTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ Register string = value.payloadReg();
+ SecondScratchRegisterScope scratch2(*this);
+ ma_lw(scratch2, Address(string, JSString::offsetOfLength()));
+ ma_b(scratch2, Imm32(0), label, b ? NotEqual : Equal);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestSymbol(cond, value.typeReg(), label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestBigInt(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestBigInt(cond, value.typeReg(), label);
+}
+
+void MacroAssembler::branchTestBigIntTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ Register bi = value.payloadReg();
+ SecondScratchRegisterScope scratch2(*this);
+ ma_lw(scratch2, Address(bi, BigInt::offsetOfDigitLength()));
+ ma_b(scratch2, Imm32(0), label, b ? NotEqual : Equal);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestNull(cond, value.typeReg(), label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value,
+ Label* label) {
+ branchTestObject(cond, value.typeReg(), label);
+}
+
+void MacroAssembler::branchTestPrimitive(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ branchTestPrimitive(cond, value.typeReg(), label);
+}
+
+template <class L>
+void MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value,
+ L label) {
+ ma_b(value.typeReg(), ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr,
+ JSWhyMagic why, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label notMagic;
+ if (cond == Assembler::Equal) {
+ branchTestMagic(Assembler::NotEqual, valaddr, &notMagic);
+ } else {
+ branchTestMagic(Assembler::NotEqual, valaddr, label);
+ }
+
+ branch32(cond, ToPayload(valaddr), Imm32(why), label);
+ bind(&notMagic);
+}
+
+void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ as_truncwd(ScratchFloat32Reg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+ ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
+}
+
+void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ as_truncws(ScratchFloat32Reg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+ ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+void MacroAssemblerMIPSCompat::incrementInt32Value(const Address& addr) {
+ asMasm().add32(Imm32(1), ToPayload(addr));
+}
+
+void MacroAssemblerMIPSCompat::computeEffectiveAddress(const BaseIndex& address,
+ Register dest) {
+ computeScaledAddress(address, dest);
+ if (address.offset) {
+ asMasm().addPtr(Imm32(address.offset), dest);
+ }
+}
+
+void MacroAssemblerMIPSCompat::retn(Imm32 n) {
+ // pc <- [sp]; sp += n
+ loadPtr(Address(StackPointer, 0), ra);
+ asMasm().addPtr(n, StackPointer);
+ as_jr(ra);
+ as_nop();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_MacroAssembler_mips32_inl_h */
diff --git a/js/src/jit/mips32/MacroAssembler-mips32.cpp b/js/src/jit/mips32/MacroAssembler-mips32.cpp
new file mode 100644
index 0000000000..6a07fff2c1
--- /dev/null
+++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp
@@ -0,0 +1,2787 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/MacroAssembler-mips32.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/EndianUtils.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/MacroAssembler.h"
+#include "jit/mips32/Simulator-mips32.h"
+#include "jit/MoveEmitter.h"
+#include "jit/SharedICRegisters.h"
+#include "util/Memory.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace jit;
+
+using mozilla::Abs;
+
+static const int32_t PAYLOAD_OFFSET = NUNBOX32_PAYLOAD_OFFSET;
+static const int32_t TAG_OFFSET = NUNBOX32_TYPE_OFFSET;
+
+static_assert(sizeof(intptr_t) == 4, "Not 64-bit clean.");
+
+void MacroAssemblerMIPSCompat::convertBoolToInt32(Register src, Register dest) {
+ // Note that C++ bool is only 1 byte, so zero extend it to clear the
+ // higher-order bits.
+ ma_and(dest, src, Imm32(0xff));
+}
+
+void MacroAssemblerMIPSCompat::convertInt32ToDouble(Register src,
+ FloatRegister dest) {
+ as_mtc1(src, dest);
+ as_cvtdw(dest, dest);
+}
+
+void MacroAssemblerMIPSCompat::convertInt32ToDouble(const Address& src,
+ FloatRegister dest) {
+ ma_ls(dest, src);
+ as_cvtdw(dest, dest);
+}
+
+void MacroAssemblerMIPSCompat::convertInt32ToDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ computeScaledAddress(src, ScratchRegister);
+ convertInt32ToDouble(Address(ScratchRegister, src.offset), dest);
+}
+
+void MacroAssemblerMIPSCompat::convertUInt32ToDouble(Register src,
+ FloatRegister dest) {
+ Label positive, done;
+ ma_b(src, src, &positive, NotSigned, ShortJump);
+
+ const uint32_t kExponentShift =
+ mozilla::FloatingPoint<double>::kExponentShift - 32;
+ const uint32_t kExponent =
+ (31 + mozilla::FloatingPoint<double>::kExponentBias);
+
+ ma_ext(SecondScratchReg, src, 31 - kExponentShift, kExponentShift);
+ ma_li(ScratchRegister, Imm32(kExponent << kExponentShift));
+ ma_or(SecondScratchReg, ScratchRegister);
+ ma_sll(ScratchRegister, src, Imm32(kExponentShift + 1));
+ moveToDoubleHi(SecondScratchReg, dest);
+ moveToDoubleLo(ScratchRegister, dest);
+
+ ma_b(&done, ShortJump);
+
+ bind(&positive);
+ convertInt32ToDouble(src, dest);
+
+ bind(&done);
+}
+
+void MacroAssemblerMIPSCompat::convertUInt32ToFloat32(Register src,
+ FloatRegister dest) {
+ Label positive, done;
+ ma_b(src, src, &positive, NotSigned, ShortJump);
+
+ const uint32_t kExponentShift =
+ mozilla::FloatingPoint<double>::kExponentShift - 32;
+ const uint32_t kExponent =
+ (31 + mozilla::FloatingPoint<double>::kExponentBias);
+
+ ma_ext(SecondScratchReg, src, 31 - kExponentShift, kExponentShift);
+ ma_li(ScratchRegister, Imm32(kExponent << kExponentShift));
+ ma_or(SecondScratchReg, ScratchRegister);
+ ma_sll(ScratchRegister, src, Imm32(kExponentShift + 1));
+ FloatRegister destDouble = dest.asDouble();
+ moveToDoubleHi(SecondScratchReg, destDouble);
+ moveToDoubleLo(ScratchRegister, destDouble);
+
+ convertDoubleToFloat32(destDouble, dest);
+
+ ma_b(&done, ShortJump);
+
+ bind(&positive);
+ convertInt32ToFloat32(src, dest);
+
+ bind(&done);
+}
+
+void MacroAssemblerMIPSCompat::convertDoubleToFloat32(FloatRegister src,
+ FloatRegister dest) {
+ as_cvtsd(dest, src);
+}
+
+const int CauseBitPos = int(Assembler::CauseI);
+const int CauseBitCount = 1 + int(Assembler::CauseV) - int(Assembler::CauseI);
+const int CauseIOrVMask = ((1 << int(Assembler::CauseI)) |
+ (1 << int(Assembler::CauseV))) >>
+ int(Assembler::CauseI);
+
+// Checks whether a double is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void MacroAssemblerMIPSCompat::convertDoubleToInt32(FloatRegister src,
+ Register dest, Label* fail,
+ bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ moveFromDoubleHi(src, dest);
+ moveFromDoubleLo(src, SecondScratchReg);
+ ma_xor(dest, Imm32(INT32_MIN));
+ ma_or(dest, SecondScratchReg);
+ ma_b(dest, Imm32(0), fail, Assembler::Equal);
+ }
+
+ // Truncate double to int ; if result is inexact or invalid fail.
+ as_truncwd(ScratchFloat32Reg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, CauseBitPos, CauseBitCount);
+ // Here adding the masking andi instruction just for a precaution.
+ // For the instruction of trunc.*.*, the Floating Point Exceptions can be
+ // only Inexact, Invalid Operation, Unimplemented Operation.
+ // Leaving it maybe is also ok.
+ as_andi(ScratchRegister, ScratchRegister, CauseIOrVMask);
+ ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
+}
+
+// Checks whether a float32 is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void MacroAssemblerMIPSCompat::convertFloat32ToInt32(FloatRegister src,
+ Register dest, Label* fail,
+ bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ moveFromFloat32(src, dest);
+ ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
+ }
+
+ as_truncws(ScratchFloat32Reg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, CauseBitPos, CauseBitCount);
+ as_andi(ScratchRegister, ScratchRegister, CauseIOrVMask);
+ ma_b(ScratchRegister, Imm32(0), fail, Assembler::NotEqual);
+}
+
+void MacroAssemblerMIPSCompat::convertFloat32ToDouble(FloatRegister src,
+ FloatRegister dest) {
+ as_cvtds(dest, src);
+}
+
+void MacroAssemblerMIPSCompat::convertInt32ToFloat32(Register src,
+ FloatRegister dest) {
+ as_mtc1(src, dest);
+ as_cvtsw(dest, dest);
+}
+
+void MacroAssemblerMIPSCompat::convertInt32ToFloat32(const Address& src,
+ FloatRegister dest) {
+ ma_ls(dest, src);
+ as_cvtsw(dest, dest);
+}
+
+void MacroAssemblerMIPS::ma_li(Register dest, CodeLabel* label) {
+ BufferOffset bo = m_buffer.nextOffset();
+ ma_liPatchable(dest, ImmWord(/* placeholder */ 0));
+ label->patchAt()->bind(bo.getOffset());
+ label->setLinkMode(CodeLabel::MoveImmediate);
+}
+
+void MacroAssemblerMIPS::ma_li(Register dest, ImmWord imm) {
+ ma_li(dest, Imm32(uint32_t(imm.value)));
+}
+
+void MacroAssemblerMIPS::ma_liPatchable(Register dest, ImmPtr imm) {
+ ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
+}
+
+void MacroAssemblerMIPS::ma_liPatchable(Register dest, ImmWord imm) {
+ ma_liPatchable(dest, Imm32(int32_t(imm.value)));
+}
+
+// Arithmetic-based ops.
+
+// Add.
+void MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs,
+ Register rt, Label* overflow) {
+ MOZ_ASSERT_IF(rs == rd, rs != rt);
+ MOZ_ASSERT(rs != ScratchRegister);
+ MOZ_ASSERT(rt != ScratchRegister);
+ MOZ_ASSERT(rd != rt);
+ MOZ_ASSERT(rd != ScratchRegister);
+ MOZ_ASSERT(rd != SecondScratchReg);
+
+ if (rs == rt) {
+ as_addu(rd, rs, rs);
+ as_xor(SecondScratchReg, rs, rd);
+ ma_b(SecondScratchReg, Imm32(0), overflow, Assembler::LessThan);
+ return;
+ }
+
+ // If different sign, no overflow
+ as_xor(ScratchRegister, rs, rt);
+
+ as_addu(rd, rs, rt);
+ as_nor(ScratchRegister, ScratchRegister, zero);
+ // If different sign, then overflow
+ as_xor(SecondScratchReg, rt, rd);
+ as_and(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ ma_b(SecondScratchReg, Imm32(0), overflow, Assembler::LessThan);
+}
+
+void MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Imm32 imm,
+ Label* overflow) {
+ MOZ_ASSERT(rs != ScratchRegister);
+ MOZ_ASSERT(rs != SecondScratchReg);
+ MOZ_ASSERT(rd != ScratchRegister);
+ MOZ_ASSERT(rd != SecondScratchReg);
+
+ Register rs_copy = rs;
+
+ if (imm.value > 0) {
+ as_nor(ScratchRegister, rs, zero);
+ } else if (rs == rd) {
+ ma_move(ScratchRegister, rs);
+ rs_copy = ScratchRegister;
+ }
+
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_addiu(rd, rs, imm.value);
+ } else {
+ ma_li(SecondScratchReg, imm);
+ as_addu(rd, rs, SecondScratchReg);
+ }
+
+ if (imm.value > 0) {
+ as_and(ScratchRegister, ScratchRegister, rd);
+ } else {
+ as_nor(SecondScratchReg, rd, zero);
+ as_and(ScratchRegister, rs_copy, SecondScratchReg);
+ }
+
+ ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
+}
+
+// Subtract.
+void MacroAssemblerMIPS::ma_subTestOverflow(Register rd, Register rs,
+ Register rt, Label* overflow) {
+ // The rs == rt case should probably be folded at MIR stage.
+ // Happens for Number_isInteger*. Not worth specializing here.
+ MOZ_ASSERT_IF(rs == rd, rs != rt);
+ MOZ_ASSERT(rs != SecondScratchReg);
+ MOZ_ASSERT(rt != SecondScratchReg);
+ MOZ_ASSERT(rd != rt);
+ MOZ_ASSERT(rd != ScratchRegister);
+ MOZ_ASSERT(rd != SecondScratchReg);
+
+ Register rs_copy = rs;
+
+ if (rs == rd) {
+ ma_move(SecondScratchReg, rs);
+ rs_copy = SecondScratchReg;
+ }
+
+ as_subu(rd, rs, rt);
+ // If same sign, no overflow
+ as_xor(ScratchRegister, rs_copy, rt);
+ // If different sign, then overflow
+ as_xor(SecondScratchReg, rs_copy, rd);
+ as_and(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ ma_b(SecondScratchReg, Imm32(0), overflow, Assembler::LessThan);
+}
+
+// Memory.
+
+void MacroAssemblerMIPS::ma_load(Register dest, Address address,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ int16_t encodedOffset;
+ Register base;
+
+ if (isLoongson() && ZeroExtend != extension &&
+ !Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ base = address.base;
+
+ switch (size) {
+ case SizeByte:
+ as_gslbx(dest, base, ScratchRegister, 0);
+ break;
+ case SizeHalfWord:
+ as_gslhx(dest, base, ScratchRegister, 0);
+ break;
+ case SizeWord:
+ as_gslwx(dest, base, ScratchRegister, 0);
+ break;
+ case SizeDouble:
+ as_gsldx(dest, base, ScratchRegister, 0);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+ return;
+ }
+
+ if (!Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = Imm16(0).encode();
+ } else {
+ encodedOffset = Imm16(address.offset).encode();
+ base = address.base;
+ }
+
+ switch (size) {
+ case SizeByte:
+ if (ZeroExtend == extension) {
+ as_lbu(dest, base, encodedOffset);
+ } else {
+ as_lb(dest, base, encodedOffset);
+ }
+ break;
+ case SizeHalfWord:
+ if (ZeroExtend == extension) {
+ as_lhu(dest, base, encodedOffset);
+ } else {
+ as_lh(dest, base, encodedOffset);
+ }
+ break;
+ case SizeWord:
+ as_lw(dest, base, encodedOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+}
+
+void MacroAssemblerMIPS::ma_store(Register data, Address address,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ int16_t encodedOffset;
+ Register base;
+
+ if (isLoongson() && !Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ base = address.base;
+
+ switch (size) {
+ case SizeByte:
+ as_gssbx(data, base, ScratchRegister, 0);
+ break;
+ case SizeHalfWord:
+ as_gsshx(data, base, ScratchRegister, 0);
+ break;
+ case SizeWord:
+ as_gsswx(data, base, ScratchRegister, 0);
+ break;
+ case SizeDouble:
+ as_gssdx(data, base, ScratchRegister, 0);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+ return;
+ }
+
+ if (!Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = Imm16(0).encode();
+ } else {
+ encodedOffset = Imm16(address.offset).encode();
+ base = address.base;
+ }
+
+ switch (size) {
+ case SizeByte:
+ as_sb(data, base, encodedOffset);
+ break;
+ case SizeHalfWord:
+ as_sh(data, base, encodedOffset);
+ break;
+ case SizeWord:
+ as_sw(data, base, encodedOffset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+}
+
+void MacroAssemblerMIPSCompat::computeScaledAddress(const BaseIndex& address,
+ Register dest) {
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+ if (shift) {
+ ma_sll(ScratchRegister, address.index, Imm32(shift));
+ as_addu(dest, address.base, ScratchRegister);
+ } else {
+ as_addu(dest, address.base, address.index);
+ }
+}
+
+// Shortcut for when we know we're transferring 32 bits of data.
+void MacroAssemblerMIPS::ma_lw(Register data, Address address) {
+ ma_load(data, address, SizeWord);
+}
+
+void MacroAssemblerMIPS::ma_sw(Register data, Address address) {
+ ma_store(data, address, SizeWord);
+}
+
+void MacroAssemblerMIPS::ma_sw(Imm32 imm, Address address) {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, imm);
+
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_sw(ScratchRegister, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != SecondScratchReg);
+
+ ma_li(SecondScratchReg, Imm32(address.offset));
+ as_addu(SecondScratchReg, address.base, SecondScratchReg);
+ as_sw(ScratchRegister, SecondScratchReg, 0);
+ }
+}
+
+void MacroAssemblerMIPS::ma_sw(Register data, BaseIndex& address) {
+ ma_store(data, address, SizeWord);
+}
+
+void MacroAssemblerMIPS::ma_pop(Register r) {
+ as_lw(r, StackPointer, 0);
+ as_addiu(StackPointer, StackPointer, sizeof(intptr_t));
+}
+
+void MacroAssemblerMIPS::ma_push(Register r) {
+ if (r == sp) {
+ // Pushing sp requires one more instruction.
+ ma_move(ScratchRegister, sp);
+ r = ScratchRegister;
+ }
+
+ as_addiu(StackPointer, StackPointer, -sizeof(intptr_t));
+ as_sw(r, StackPointer, 0);
+}
+
+// Branches when done from within mips-specific code.
+void MacroAssemblerMIPS::ma_b(Register lhs, Address addr, Label* label,
+ Condition c, JumpKind jumpKind) {
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_lw(ScratchRegister, addr);
+ ma_b(lhs, ScratchRegister, label, c, jumpKind);
+}
+
+void MacroAssemblerMIPS::ma_b(Address addr, Imm32 imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ ma_lw(SecondScratchReg, addr);
+ ma_b(SecondScratchReg, imm, label, c, jumpKind);
+}
+
+void MacroAssemblerMIPS::ma_b(Address addr, ImmGCPtr imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ ma_lw(SecondScratchReg, addr);
+ ma_b(SecondScratchReg, imm, label, c, jumpKind);
+}
+
+void MacroAssemblerMIPS::ma_bal(Label* label, DelaySlotFill delaySlotFill) {
+ spew("branch .Llabel %p\n", label);
+ if (label->bound()) {
+ // Generate the long jump for calls because return address has to be
+ // the address after the reserved block.
+ addLongJump(nextOffset(), BufferOffset(label->offset()));
+ ma_liPatchable(ScratchRegister, Imm32(LabelBase::INVALID_OFFSET));
+ as_jalr(ScratchRegister);
+ if (delaySlotFill == FillDelaySlot) {
+ as_nop();
+ }
+ return;
+ }
+
+ // Second word holds a pointer to the next branch in label's chain.
+ uint32_t nextInChain =
+ label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace(4 * sizeof(uint32_t));
+
+ spew("bal .Llabel %p\n", label);
+ BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
+ writeInst(nextInChain);
+ if (!oom()) {
+ label->use(bo.getOffset());
+ }
+ // Leave space for long jump.
+ as_nop();
+ if (delaySlotFill == FillDelaySlot) {
+ as_nop();
+ }
+}
+
+void MacroAssemblerMIPS::branchWithCode(InstImm code, Label* label,
+ JumpKind jumpKind) {
+ spew("branch .Llabel %p", label);
+ MOZ_ASSERT(code.encode() !=
+ InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0)).encode());
+ InstImm inst_beq = InstImm(op_beq, zero, zero, BOffImm16(0));
+
+ if (label->bound()) {
+ int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
+
+ if (BOffImm16::IsInRange(offset)) {
+ jumpKind = ShortJump;
+ }
+
+ if (jumpKind == ShortJump) {
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ code.setBOffImm16(BOffImm16(offset));
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code);
+#endif
+ writeInst(code.encode());
+ as_nop();
+ return;
+ }
+
+ if (code.encode() == inst_beq.encode()) {
+ // Handle long jump
+ addLongJump(nextOffset(), BufferOffset(label->offset()));
+ ma_liPatchable(ScratchRegister, Imm32(LabelBase::INVALID_OFFSET));
+ as_jr(ScratchRegister);
+ as_nop();
+ return;
+ }
+
+ // Handle long conditional branch
+ spew("invert branch .Llabel %p", label);
+ InstImm code_r = invertBranch(code, BOffImm16(5 * sizeof(uint32_t)));
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code_r);
+#endif
+ writeInst(code_r.encode());
+
+ // No need for a "nop" here because we can clobber scratch.
+ addLongJump(nextOffset(), BufferOffset(label->offset()));
+ ma_liPatchable(ScratchRegister, Imm32(LabelBase::INVALID_OFFSET));
+ as_jr(ScratchRegister);
+ as_nop();
+ return;
+ }
+
+ // Generate open jump and link it to a label.
+
+ // Second word holds a pointer to the next branch in label's chain.
+ uint32_t nextInChain =
+ label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ if (jumpKind == ShortJump) {
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
+
+ // Indicate that this is short jump with offset 4.
+ code.setBOffImm16(BOffImm16(4));
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code);
+#endif
+ BufferOffset bo = writeInst(code.encode());
+ writeInst(nextInChain);
+ if (!oom()) {
+ label->use(bo.getOffset());
+ }
+ return;
+ }
+
+ bool conditional = code.encode() != inst_beq.encode();
+
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace((conditional ? 5 : 4) * sizeof(uint32_t));
+
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code);
+#endif
+ BufferOffset bo = writeInst(code.encode());
+ writeInst(nextInChain);
+ if (!oom()) {
+ label->use(bo.getOffset());
+ }
+ // Leave space for potential long jump.
+ as_nop();
+ as_nop();
+ if (conditional) {
+ as_nop();
+ }
+}
+
+void MacroAssemblerMIPSCompat::cmp64Set(Condition cond, Register64 lhs,
+ Imm64 val, Register dest) {
+ if (val.value == 0) {
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::BelowOrEqual:
+ as_or(dest, lhs.high, lhs.low);
+ as_sltiu(dest, dest, 1);
+ break;
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ as_or(dest, lhs.high, lhs.low);
+ as_sltu(dest, zero, dest);
+ break;
+ case Assembler::LessThan:
+ case Assembler::GreaterThanOrEqual:
+ as_slt(dest, lhs.high, zero);
+ if (cond == Assembler::GreaterThanOrEqual) {
+ as_xori(dest, dest, 1);
+ }
+ break;
+ case Assembler::GreaterThan:
+ case Assembler::LessThanOrEqual:
+ as_or(SecondScratchReg, lhs.high, lhs.low);
+ as_sra(ScratchRegister, lhs.high, 31);
+ as_sltu(dest, ScratchRegister, SecondScratchReg);
+ if (cond == Assembler::LessThanOrEqual) {
+ as_xori(dest, dest, 1);
+ }
+ break;
+ case Assembler::Below:
+ case Assembler::AboveOrEqual:
+ as_ori(dest, zero, cond == Assembler::AboveOrEqual ? 1 : 0);
+ break;
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+ return;
+ }
+
+ Condition c = ma_cmp64(cond, lhs, val, dest);
+
+ switch (cond) {
+ // For Equal/NotEqual cond ma_cmp64 dest holds non boolean result.
+ case Assembler::Equal:
+ as_sltiu(dest, dest, 1);
+ break;
+ case Assembler::NotEqual:
+ as_sltu(dest, zero, dest);
+ break;
+ default:
+ if (c == Assembler::Zero) as_xori(dest, dest, 1);
+ break;
+ }
+}
+
+void MacroAssemblerMIPSCompat::cmp64Set(Condition cond, Register64 lhs,
+ Register64 rhs, Register dest) {
+ Condition c = ma_cmp64(cond, lhs, rhs, dest);
+
+ switch (cond) {
+ // For Equal/NotEqual cond ma_cmp64 dest holds non boolean result.
+ case Assembler::Equal:
+ as_sltiu(dest, dest, 1);
+ break;
+ case Assembler::NotEqual:
+ as_sltu(dest, zero, dest);
+ break;
+ default:
+ if (c == Assembler::Zero) as_xori(dest, dest, 1);
+ break;
+ }
+}
+
+Assembler::Condition MacroAssemblerMIPSCompat::ma_cmp64(Condition cond,
+ Register64 lhs,
+ Register64 rhs,
+ Register dest) {
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ as_xor(SecondScratchReg, lhs.high, rhs.high);
+ as_xor(ScratchRegister, lhs.low, rhs.low);
+ as_or(dest, SecondScratchReg, ScratchRegister);
+ return (cond == Assembler::Equal) ? Assembler::Zero : Assembler::NonZero;
+ break;
+ case Assembler::LessThan:
+ case Assembler::GreaterThanOrEqual:
+ as_slt(SecondScratchReg, rhs.high, lhs.high);
+ as_sltu(ScratchRegister, lhs.low, rhs.low);
+ as_slt(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ as_slt(ScratchRegister, lhs.high, rhs.high);
+ as_or(dest, ScratchRegister, SecondScratchReg);
+ return (cond == Assembler::GreaterThanOrEqual) ? Assembler::Zero
+ : Assembler::NonZero;
+ break;
+ case Assembler::GreaterThan:
+ case Assembler::LessThanOrEqual:
+ as_slt(SecondScratchReg, lhs.high, rhs.high);
+ as_sltu(ScratchRegister, rhs.low, lhs.low);
+ as_slt(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ as_slt(ScratchRegister, rhs.high, lhs.high);
+ as_or(dest, ScratchRegister, SecondScratchReg);
+ return (cond == Assembler::LessThanOrEqual) ? Assembler::Zero
+ : Assembler::NonZero;
+ break;
+ case Assembler::Below:
+ case Assembler::AboveOrEqual:
+ as_sltu(SecondScratchReg, rhs.high, lhs.high);
+ as_sltu(ScratchRegister, lhs.low, rhs.low);
+ as_slt(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ as_sltu(ScratchRegister, lhs.high, rhs.high);
+ as_or(dest, ScratchRegister, SecondScratchReg);
+ return (cond == Assembler::AboveOrEqual) ? Assembler::Zero
+ : Assembler::NonZero;
+ break;
+ case Assembler::Above:
+ case Assembler::BelowOrEqual:
+ as_sltu(SecondScratchReg, lhs.high, rhs.high);
+ as_sltu(ScratchRegister, rhs.low, lhs.low);
+ as_slt(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ as_sltu(ScratchRegister, rhs.high, lhs.high);
+ as_or(dest, ScratchRegister, SecondScratchReg);
+ return (cond == Assembler::BelowOrEqual) ? Assembler::Zero
+ : Assembler::NonZero;
+ break;
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+}
+
+Assembler::Condition MacroAssemblerMIPSCompat::ma_cmp64(Condition cond,
+ Register64 lhs,
+ Imm64 val,
+ Register dest) {
+ MOZ_ASSERT(val.value != 0);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ ma_xor(SecondScratchReg, lhs.high, val.hi());
+ ma_xor(ScratchRegister, lhs.low, val.low());
+ as_or(dest, SecondScratchReg, ScratchRegister);
+ return (cond == Assembler::Equal) ? Assembler::Zero : Assembler::NonZero;
+ break;
+ case Assembler::LessThan:
+ case Assembler::GreaterThanOrEqual:
+ ma_li(SecondScratchReg, val.hi());
+ as_slt(ScratchRegister, lhs.high, SecondScratchReg);
+ as_slt(SecondScratchReg, SecondScratchReg, lhs.high);
+ as_subu(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ ma_li(ScratchRegister, val.low());
+ as_sltu(ScratchRegister, lhs.low, ScratchRegister);
+ as_slt(dest, SecondScratchReg, ScratchRegister);
+ return (cond == Assembler::GreaterThanOrEqual) ? Assembler::Zero
+ : Assembler::NonZero;
+ break;
+ case Assembler::GreaterThan:
+ case Assembler::LessThanOrEqual:
+ ma_li(SecondScratchReg, val.hi());
+ as_slt(ScratchRegister, SecondScratchReg, lhs.high);
+ as_slt(SecondScratchReg, lhs.high, SecondScratchReg);
+ as_subu(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ ma_li(ScratchRegister, val.low());
+ as_sltu(ScratchRegister, ScratchRegister, lhs.low);
+ as_slt(dest, SecondScratchReg, ScratchRegister);
+ return (cond == Assembler::LessThanOrEqual) ? Assembler::Zero
+ : Assembler::NonZero;
+ break;
+ case Assembler::Below:
+ case Assembler::AboveOrEqual:
+ ma_li(SecondScratchReg, val.hi());
+ as_sltu(ScratchRegister, lhs.high, SecondScratchReg);
+ as_sltu(SecondScratchReg, SecondScratchReg, lhs.high);
+ as_subu(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ ma_li(ScratchRegister, val.low());
+ as_sltu(ScratchRegister, lhs.low, ScratchRegister);
+ as_slt(dest, SecondScratchReg, ScratchRegister);
+ return (cond == Assembler::AboveOrEqual) ? Assembler::Zero
+ : Assembler::NonZero;
+ break;
+ case Assembler::Above:
+ case Assembler::BelowOrEqual:
+ ma_li(SecondScratchReg, val.hi());
+ as_sltu(ScratchRegister, SecondScratchReg, lhs.high);
+ as_sltu(SecondScratchReg, lhs.high, SecondScratchReg);
+ as_subu(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ ma_li(ScratchRegister, val.low());
+ as_sltu(ScratchRegister, ScratchRegister, lhs.low);
+ as_slt(dest, SecondScratchReg, ScratchRegister);
+ return (cond == Assembler::BelowOrEqual) ? Assembler::Zero
+ : Assembler::NonZero;
+ break;
+ default:
+ MOZ_CRASH("Condition code not supported");
+ break;
+ }
+}
+
+// fp instructions
+void MacroAssemblerMIPS::ma_lid(FloatRegister dest, double value) {
+ struct DoubleStruct {
+ uint32_t lo;
+ uint32_t hi;
+ };
+ DoubleStruct intStruct = mozilla::BitwiseCast<DoubleStruct>(value);
+#if MOZ_BIG_ENDIAN()
+ std::swap(intStruct.hi, intStruct.lo);
+#endif
+
+ // put hi part of 64 bit value into the odd register
+ if (intStruct.hi == 0) {
+ moveToDoubleHi(zero, dest);
+ } else {
+ ma_li(ScratchRegister, Imm32(intStruct.hi));
+ moveToDoubleHi(ScratchRegister, dest);
+ }
+
+ // put low part of 64 bit value into the even register
+ if (intStruct.lo == 0) {
+ moveToDoubleLo(zero, dest);
+ } else {
+ ma_li(ScratchRegister, Imm32(intStruct.lo));
+ moveToDoubleLo(ScratchRegister, dest);
+ }
+}
+
+void MacroAssemblerMIPS::ma_mv(FloatRegister src, ValueOperand dest) {
+ moveFromDoubleLo(src, dest.payloadReg());
+ moveFromDoubleHi(src, dest.typeReg());
+}
+
+void MacroAssemblerMIPS::ma_mv(ValueOperand src, FloatRegister dest) {
+ moveToDoubleLo(src.payloadReg(), dest);
+ moveToDoubleHi(src.typeReg(), dest);
+}
+
+void MacroAssemblerMIPS::ma_ls(FloatRegister ft, Address address) {
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_lwc1(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gslsx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ as_lwc1(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void MacroAssemblerMIPS::ma_ld(FloatRegister ft, Address address) {
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_ldc1(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gsldx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ as_ldc1(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void MacroAssemblerMIPS::ma_sd(FloatRegister ft, Address address) {
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_sdc1(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gssdx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ as_sdc1(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void MacroAssemblerMIPS::ma_ss(FloatRegister ft, Address address) {
+ if (Imm16::IsInSignedRange(address.offset)) {
+ as_swc1(ft, address.base, address.offset);
+ } else {
+ MOZ_ASSERT(address.base != ScratchRegister);
+ ma_li(ScratchRegister, Imm32(address.offset));
+ if (isLoongson()) {
+ as_gsssx(ft, address.base, ScratchRegister, 0);
+ } else {
+ as_addu(ScratchRegister, address.base, ScratchRegister);
+ as_swc1(ft, ScratchRegister, 0);
+ }
+ }
+}
+
+void MacroAssemblerMIPS::ma_ldc1WordAligned(FloatRegister ft, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(off + PAYLOAD_OFFSET) &&
+ Imm16::IsInSignedRange(off + TAG_OFFSET));
+
+ as_lwc1(ft, base, off + PAYLOAD_OFFSET);
+ as_lwc1(getOddPair(ft), base, off + TAG_OFFSET);
+}
+
+void MacroAssemblerMIPS::ma_sdc1WordAligned(FloatRegister ft, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(off + PAYLOAD_OFFSET) &&
+ Imm16::IsInSignedRange(off + TAG_OFFSET));
+
+ as_swc1(ft, base, off + PAYLOAD_OFFSET);
+ as_swc1(getOddPair(ft), base, off + TAG_OFFSET);
+}
+
+void MacroAssemblerMIPS::ma_pop(FloatRegister f) {
+ if (f.isDouble()) {
+ ma_ldc1WordAligned(f, StackPointer, 0);
+ } else {
+ as_lwc1(f, StackPointer, 0);
+ }
+
+ as_addiu(StackPointer, StackPointer, f.size());
+}
+
+void MacroAssemblerMIPS::ma_push(FloatRegister f) {
+ as_addiu(StackPointer, StackPointer, -f.size());
+
+ if (f.isDouble()) {
+ ma_sdc1WordAligned(f, StackPointer, 0);
+ } else {
+ as_swc1(f, StackPointer, 0);
+ }
+}
+
+bool MacroAssemblerMIPSCompat::buildOOLFakeExitFrame(void* fakeReturnAddr) {
+ uint32_t descriptor = MakeFrameDescriptor(
+ asMasm().framePushed(), FrameType::IonJS, ExitFrameLayout::Size());
+
+ asMasm().Push(Imm32(descriptor)); // descriptor_
+ asMasm().Push(ImmPtr(fakeReturnAddr));
+
+ return true;
+}
+
+void MacroAssemblerMIPSCompat::move32(Imm32 imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerMIPSCompat::move32(Register src, Register dest) {
+ ma_move(dest, src);
+}
+
+void MacroAssemblerMIPSCompat::movePtr(Register src, Register dest) {
+ ma_move(dest, src);
+}
+void MacroAssemblerMIPSCompat::movePtr(ImmWord imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerMIPSCompat::movePtr(ImmGCPtr imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerMIPSCompat::movePtr(ImmPtr imm, Register dest) {
+ movePtr(ImmWord(uintptr_t(imm.value)), dest);
+}
+void MacroAssemblerMIPSCompat::movePtr(wasm::SymbolicAddress imm,
+ Register dest) {
+ append(wasm::SymbolicAccess(CodeOffset(nextOffset().getOffset()), imm));
+ ma_liPatchable(dest, ImmWord(-1));
+}
+
+void MacroAssemblerMIPSCompat::load8ZeroExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeByte, ZeroExtend);
+}
+
+void MacroAssemblerMIPSCompat::load8ZeroExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeByte, ZeroExtend);
+}
+
+void MacroAssemblerMIPSCompat::load8SignExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeByte, SignExtend);
+}
+
+void MacroAssemblerMIPSCompat::load8SignExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeByte, SignExtend);
+}
+
+void MacroAssemblerMIPSCompat::load16ZeroExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeHalfWord, ZeroExtend);
+}
+
+void MacroAssemblerMIPSCompat::load16ZeroExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeHalfWord, ZeroExtend);
+}
+
+void MacroAssemblerMIPSCompat::load16SignExtend(const Address& address,
+ Register dest) {
+ ma_load(dest, address, SizeHalfWord, SignExtend);
+}
+
+void MacroAssemblerMIPSCompat::load16SignExtend(const BaseIndex& src,
+ Register dest) {
+ ma_load(dest, src, SizeHalfWord, SignExtend);
+}
+
+void MacroAssemblerMIPSCompat::load32(const Address& address, Register dest) {
+ ma_load(dest, address, SizeWord);
+}
+
+void MacroAssemblerMIPSCompat::load32(const BaseIndex& address, Register dest) {
+ ma_load(dest, address, SizeWord);
+}
+
+void MacroAssemblerMIPSCompat::load32(AbsoluteAddress address, Register dest) {
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ load32(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerMIPSCompat::load32(wasm::SymbolicAddress address,
+ Register dest) {
+ movePtr(address, ScratchRegister);
+ load32(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerMIPSCompat::loadPtr(const Address& address, Register dest) {
+ ma_load(dest, address, SizeWord);
+}
+
+void MacroAssemblerMIPSCompat::loadPtr(const BaseIndex& src, Register dest) {
+ ma_load(dest, src, SizeWord);
+}
+
+void MacroAssemblerMIPSCompat::loadPtr(AbsoluteAddress address, Register dest) {
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ loadPtr(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerMIPSCompat::loadPtr(wasm::SymbolicAddress address,
+ Register dest) {
+ movePtr(address, ScratchRegister);
+ loadPtr(Address(ScratchRegister, 0), dest);
+}
+
+void MacroAssemblerMIPSCompat::loadPrivate(const Address& address,
+ Register dest) {
+ ma_lw(dest, Address(address.base, address.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::loadUnalignedDouble(
+ const wasm::MemoryAccessDesc& access, const BaseIndex& src, Register temp,
+ FloatRegister dest) {
+ MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Wasm-only; wasm is disabled on big-endian.");
+ computeScaledAddress(src, SecondScratchReg);
+
+ BufferOffset load;
+ if (Imm16::IsInSignedRange(src.offset) &&
+ Imm16::IsInSignedRange(src.offset + 7)) {
+ load = as_lwl(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET + 3);
+ as_lwr(temp, SecondScratchReg, src.offset + INT64LOW_OFFSET);
+ append(access, load.getOffset());
+ moveToDoubleLo(temp, dest);
+ load = as_lwl(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET + 3);
+ as_lwr(temp, SecondScratchReg, src.offset + INT64HIGH_OFFSET);
+ append(access, load.getOffset());
+ moveToDoubleHi(temp, dest);
+ } else {
+ ma_li(ScratchRegister, Imm32(src.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ load = as_lwl(temp, ScratchRegister, INT64LOW_OFFSET + 3);
+ as_lwr(temp, ScratchRegister, INT64LOW_OFFSET);
+ append(access, load.getOffset());
+ moveToDoubleLo(temp, dest);
+ load = as_lwl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
+ as_lwr(temp, ScratchRegister, INT64HIGH_OFFSET);
+ append(access, load.getOffset());
+ moveToDoubleHi(temp, dest);
+ }
+}
+
+void MacroAssemblerMIPSCompat::loadUnalignedFloat32(
+ const wasm::MemoryAccessDesc& access, const BaseIndex& src, Register temp,
+ FloatRegister dest) {
+ MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Wasm-only; wasm is disabled on big-endian.");
+ computeScaledAddress(src, SecondScratchReg);
+ BufferOffset load;
+ if (Imm16::IsInSignedRange(src.offset) &&
+ Imm16::IsInSignedRange(src.offset + 3)) {
+ load = as_lwl(temp, SecondScratchReg, src.offset + 3);
+ as_lwr(temp, SecondScratchReg, src.offset);
+ } else {
+ ma_li(ScratchRegister, Imm32(src.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ load = as_lwl(temp, ScratchRegister, 3);
+ as_lwr(temp, ScratchRegister, 0);
+ }
+ append(access, load.getOffset());
+ moveToFloat32(temp, dest);
+}
+
+void MacroAssemblerMIPSCompat::store8(Imm32 imm, const Address& address) {
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeByte);
+}
+
+void MacroAssemblerMIPSCompat::store8(Register src, const Address& address) {
+ ma_store(src, address, SizeByte);
+}
+
+void MacroAssemblerMIPSCompat::store8(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeByte);
+}
+
+void MacroAssemblerMIPSCompat::store8(Register src, const BaseIndex& dest) {
+ ma_store(src, dest, SizeByte);
+}
+
+void MacroAssemblerMIPSCompat::store16(Imm32 imm, const Address& address) {
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeHalfWord);
+}
+
+void MacroAssemblerMIPSCompat::store16(Register src, const Address& address) {
+ ma_store(src, address, SizeHalfWord);
+}
+
+void MacroAssemblerMIPSCompat::store16(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeHalfWord);
+}
+
+void MacroAssemblerMIPSCompat::store16(Register src, const BaseIndex& address) {
+ ma_store(src, address, SizeHalfWord);
+}
+
+void MacroAssemblerMIPSCompat::store32(Register src, AbsoluteAddress address) {
+ movePtr(ImmPtr(address.addr), ScratchRegister);
+ store32(src, Address(ScratchRegister, 0));
+}
+
+void MacroAssemblerMIPSCompat::store32(Register src, const Address& address) {
+ ma_store(src, address, SizeWord);
+}
+
+void MacroAssemblerMIPSCompat::store32(Imm32 src, const Address& address) {
+ move32(src, SecondScratchReg);
+ ma_store(SecondScratchReg, address, SizeWord);
+}
+
+void MacroAssemblerMIPSCompat::store32(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeWord);
+}
+
+void MacroAssemblerMIPSCompat::store32(Register src, const BaseIndex& dest) {
+ ma_store(src, dest, SizeWord);
+}
+
+template <typename T>
+void MacroAssemblerMIPSCompat::storePtr(ImmWord imm, T address) {
+ ma_li(SecondScratchReg, imm);
+ ma_store(SecondScratchReg, address, SizeWord);
+}
+
+template void MacroAssemblerMIPSCompat::storePtr<Address>(ImmWord imm,
+ Address address);
+template void MacroAssemblerMIPSCompat::storePtr<BaseIndex>(ImmWord imm,
+ BaseIndex address);
+
+template <typename T>
+void MacroAssemblerMIPSCompat::storePtr(ImmPtr imm, T address) {
+ storePtr(ImmWord(uintptr_t(imm.value)), address);
+}
+
+template void MacroAssemblerMIPSCompat::storePtr<Address>(ImmPtr imm,
+ Address address);
+template void MacroAssemblerMIPSCompat::storePtr<BaseIndex>(ImmPtr imm,
+ BaseIndex address);
+
+template <typename T>
+void MacroAssemblerMIPSCompat::storePtr(ImmGCPtr imm, T address) {
+ movePtr(imm, SecondScratchReg);
+ storePtr(SecondScratchReg, address);
+}
+
+template void MacroAssemblerMIPSCompat::storePtr<Address>(ImmGCPtr imm,
+ Address address);
+template void MacroAssemblerMIPSCompat::storePtr<BaseIndex>(ImmGCPtr imm,
+ BaseIndex address);
+
+void MacroAssemblerMIPSCompat::storePtr(Register src, const Address& address) {
+ ma_store(src, address, SizeWord);
+}
+
+void MacroAssemblerMIPSCompat::storePtr(Register src,
+ const BaseIndex& address) {
+ ma_store(src, address, SizeWord);
+}
+
+void MacroAssemblerMIPSCompat::storePtr(Register src, AbsoluteAddress dest) {
+ movePtr(ImmPtr(dest.addr), ScratchRegister);
+ storePtr(src, Address(ScratchRegister, 0));
+}
+
+void MacroAssemblerMIPSCompat::storeUnalignedFloat32(
+ const wasm::MemoryAccessDesc& access, FloatRegister src, Register temp,
+ const BaseIndex& dest) {
+ MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Wasm-only; wasm is disabled on big-endian.");
+ computeScaledAddress(dest, SecondScratchReg);
+ moveFromFloat32(src, temp);
+
+ BufferOffset store;
+ if (Imm16::IsInSignedRange(dest.offset) &&
+ Imm16::IsInSignedRange(dest.offset + 3)) {
+ store = as_swl(temp, SecondScratchReg, dest.offset + 3);
+ as_swr(temp, SecondScratchReg, dest.offset);
+ } else {
+ ma_li(ScratchRegister, Imm32(dest.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ store = as_swl(temp, ScratchRegister, 3);
+ as_swr(temp, ScratchRegister, 0);
+ }
+ append(access, store.getOffset());
+}
+
+void MacroAssemblerMIPSCompat::storeUnalignedDouble(
+ const wasm::MemoryAccessDesc& access, FloatRegister src, Register temp,
+ const BaseIndex& dest) {
+ MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Wasm-only; wasm is disabled on big-endian.");
+ computeScaledAddress(dest, SecondScratchReg);
+
+ BufferOffset store;
+ if (Imm16::IsInSignedRange(dest.offset) &&
+ Imm16::IsInSignedRange(dest.offset + 7)) {
+ moveFromDoubleHi(src, temp);
+ store = as_swl(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET + 3);
+ as_swr(temp, SecondScratchReg, dest.offset + INT64HIGH_OFFSET);
+ moveFromDoubleLo(src, temp);
+ as_swl(temp, SecondScratchReg, dest.offset + INT64LOW_OFFSET + 3);
+ as_swr(temp, SecondScratchReg, dest.offset + INT64LOW_OFFSET);
+
+ } else {
+ ma_li(ScratchRegister, Imm32(dest.offset));
+ as_daddu(ScratchRegister, SecondScratchReg, ScratchRegister);
+ moveFromDoubleHi(src, temp);
+ store = as_swl(temp, ScratchRegister, INT64HIGH_OFFSET + 3);
+ as_swr(temp, ScratchRegister, INT64HIGH_OFFSET);
+ moveFromDoubleLo(src, temp);
+ as_swl(temp, ScratchRegister, INT64LOW_OFFSET + 3);
+ as_swr(temp, ScratchRegister, INT64LOW_OFFSET);
+ }
+ append(access, store.getOffset());
+}
+
+void MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) {
+ as_roundwd(ScratchDoubleReg, input);
+ ma_li(ScratchRegister, Imm32(255));
+ as_mfc1(output, ScratchDoubleReg);
+ zeroDouble(ScratchDoubleReg);
+ as_sltiu(SecondScratchReg, output, 255);
+ as_colt(DoubleFloat, ScratchDoubleReg, input);
+ // if res > 255; res = 255;
+ as_movz(output, ScratchRegister, SecondScratchReg);
+ // if !(input > 0); res = 0;
+ as_movf(output, zero);
+}
+
+// higher level tag testing code
+Operand MacroAssemblerMIPSCompat::ToPayload(Operand base) {
+ return Operand(Register::FromCode(base.base()), base.disp() + PAYLOAD_OFFSET);
+}
+
+Operand MacroAssemblerMIPSCompat::ToType(Operand base) {
+ return Operand(Register::FromCode(base.base()), base.disp() + TAG_OFFSET);
+}
+
+void MacroAssemblerMIPSCompat::testNullSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_NULL), cond);
+}
+
+void MacroAssemblerMIPSCompat::testObjectSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_OBJECT), cond);
+}
+
+void MacroAssemblerMIPSCompat::testUndefinedSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_cmp_set(dest, value.typeReg(), ImmType(JSVAL_TYPE_UNDEFINED), cond);
+}
+
+// unboxing code
+void MacroAssemblerMIPSCompat::unboxNonDouble(const ValueOperand& operand,
+ Register dest, JSValueType) {
+ if (operand.payloadReg() != dest) {
+ ma_move(dest, operand.payloadReg());
+ }
+}
+
+void MacroAssemblerMIPSCompat::unboxNonDouble(const Address& src, Register dest,
+ JSValueType) {
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::unboxNonDouble(const BaseIndex& src,
+ Register dest, JSValueType) {
+ computeScaledAddress(src, SecondScratchReg);
+ ma_lw(dest, Address(SecondScratchReg, src.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::unboxInt32(const ValueOperand& operand,
+ Register dest) {
+ ma_move(dest, operand.payloadReg());
+}
+
+void MacroAssemblerMIPSCompat::unboxInt32(const Address& src, Register dest) {
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::unboxBoolean(const ValueOperand& operand,
+ Register dest) {
+ ma_move(dest, operand.payloadReg());
+}
+
+void MacroAssemblerMIPSCompat::unboxBoolean(const Address& src, Register dest) {
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::unboxDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ moveToDoubleLo(operand.payloadReg(), dest);
+ moveToDoubleHi(operand.typeReg(), dest);
+}
+
+void MacroAssemblerMIPSCompat::unboxDouble(const Address& src,
+ FloatRegister dest) {
+ ma_lw(ScratchRegister, Address(src.base, src.offset + PAYLOAD_OFFSET));
+ moveToDoubleLo(ScratchRegister, dest);
+ ma_lw(ScratchRegister, Address(src.base, src.offset + TAG_OFFSET));
+ moveToDoubleHi(ScratchRegister, dest);
+}
+
+void MacroAssemblerMIPSCompat::unboxDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ loadDouble(src, dest);
+}
+
+void MacroAssemblerMIPSCompat::unboxString(const ValueOperand& operand,
+ Register dest) {
+ ma_move(dest, operand.payloadReg());
+}
+
+void MacroAssemblerMIPSCompat::unboxString(const Address& src, Register dest) {
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::unboxBigInt(const ValueOperand& operand,
+ Register dest) {
+ ma_move(dest, operand.payloadReg());
+}
+
+void MacroAssemblerMIPSCompat::unboxBigInt(const Address& src, Register dest) {
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::unboxObject(const ValueOperand& src,
+ Register dest) {
+ ma_move(dest, src.payloadReg());
+}
+
+void MacroAssemblerMIPSCompat::unboxObject(const Address& src, Register dest) {
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::unboxObjectOrNull(const Address& src,
+ Register dest) {
+ ma_lw(dest, Address(src.base, src.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::unboxValue(const ValueOperand& src,
+ AnyRegister dest, JSValueType) {
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.payloadReg(), dest.fpu());
+ ma_b(&end, ShortJump);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else if (src.payloadReg() != dest.gpr()) {
+ ma_move(dest.gpr(), src.payloadReg());
+ }
+}
+
+void MacroAssemblerMIPSCompat::boxDouble(FloatRegister src,
+ const ValueOperand& dest,
+ FloatRegister) {
+ moveFromDoubleLo(src, dest.payloadReg());
+ moveFromDoubleHi(src, dest.typeReg());
+}
+
+void MacroAssemblerMIPSCompat::boxNonDouble(JSValueType type, Register src,
+ const ValueOperand& dest) {
+ if (src != dest.payloadReg()) {
+ ma_move(dest.payloadReg(), src);
+ }
+ ma_li(dest.typeReg(), ImmType(type));
+}
+
+void MacroAssemblerMIPSCompat::boolValueToDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ convertBoolToInt32(operand.payloadReg(), ScratchRegister);
+ convertInt32ToDouble(ScratchRegister, dest);
+}
+
+void MacroAssemblerMIPSCompat::int32ValueToDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ convertInt32ToDouble(operand.payloadReg(), dest);
+}
+
+void MacroAssemblerMIPSCompat::boolValueToFloat32(const ValueOperand& operand,
+ FloatRegister dest) {
+ convertBoolToInt32(operand.payloadReg(), ScratchRegister);
+ convertInt32ToFloat32(ScratchRegister, dest);
+}
+
+void MacroAssemblerMIPSCompat::int32ValueToFloat32(const ValueOperand& operand,
+ FloatRegister dest) {
+ convertInt32ToFloat32(operand.payloadReg(), dest);
+}
+
+void MacroAssemblerMIPSCompat::loadConstantFloat32(float f,
+ FloatRegister dest) {
+ ma_lis(dest, f);
+}
+
+void MacroAssemblerMIPSCompat::loadInt32OrDouble(const Address& src,
+ FloatRegister dest) {
+ Label notInt32, end;
+ // If it's an int, convert it to double.
+ ma_lw(SecondScratchReg, Address(src.base, src.offset + TAG_OFFSET));
+ asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+ ma_lw(SecondScratchReg, Address(src.base, src.offset + PAYLOAD_OFFSET));
+ convertInt32ToDouble(SecondScratchReg, dest);
+ ma_b(&end, ShortJump);
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ ma_ld(dest, src);
+ bind(&end);
+}
+
+void MacroAssemblerMIPSCompat::loadInt32OrDouble(Register base, Register index,
+ FloatRegister dest,
+ int32_t shift) {
+ Label notInt32, end;
+
+ // If it's an int, convert it to double.
+
+ computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)),
+ SecondScratchReg);
+ // Since we only have one scratch, we need to stomp over it with the tag.
+ load32(Address(SecondScratchReg, TAG_OFFSET), SecondScratchReg);
+ asMasm().branchTestInt32(Assembler::NotEqual, SecondScratchReg, &notInt32);
+
+ computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)),
+ SecondScratchReg);
+ load32(Address(SecondScratchReg, PAYLOAD_OFFSET), SecondScratchReg);
+ convertInt32ToDouble(SecondScratchReg, dest);
+ ma_b(&end, ShortJump);
+
+ // Not an int, just load as double.
+ bind(&notInt32);
+ // First, recompute the offset that had been stored in the scratch register
+ // since the scratch register was overwritten loading in the type.
+ computeScaledAddress(BaseIndex(base, index, ShiftToScale(shift)),
+ SecondScratchReg);
+ loadDouble(Address(SecondScratchReg, 0), dest);
+ bind(&end);
+}
+
+void MacroAssemblerMIPSCompat::loadConstantDouble(double dp,
+ FloatRegister dest) {
+ ma_lid(dest, dp);
+}
+
+Register MacroAssemblerMIPSCompat::extractObject(const Address& address,
+ Register scratch) {
+ ma_lw(scratch, Address(address.base, address.offset + PAYLOAD_OFFSET));
+ return scratch;
+}
+
+Register MacroAssemblerMIPSCompat::extractTag(const Address& address,
+ Register scratch) {
+ ma_lw(scratch, Address(address.base, address.offset + TAG_OFFSET));
+ return scratch;
+}
+
+Register MacroAssemblerMIPSCompat::extractTag(const BaseIndex& address,
+ Register scratch) {
+ computeScaledAddress(address, scratch);
+ return extractTag(Address(scratch, address.offset), scratch);
+}
+
+uint32_t MacroAssemblerMIPSCompat::getType(const Value& val) {
+ return val.toNunboxTag();
+}
+
+void MacroAssemblerMIPSCompat::moveData(const Value& val, Register data) {
+ if (val.isGCThing()) {
+ ma_li(data, ImmGCPtr(val.toGCThing()));
+ } else {
+ ma_li(data, Imm32(val.toNunboxPayload()));
+ }
+}
+
+/////////////////////////////////////////////////////////////////
+// X86/X64-common/ARM/MIPS interface.
+/////////////////////////////////////////////////////////////////
+void MacroAssemblerMIPSCompat::storeValue(ValueOperand val, Operand dst) {
+ storeValue(val, Address(Register::FromCode(dst.base()), dst.disp()));
+}
+
+void MacroAssemblerMIPSCompat::storeValue(ValueOperand val,
+ const BaseIndex& dest) {
+ computeScaledAddress(dest, SecondScratchReg);
+ storeValue(val, Address(SecondScratchReg, dest.offset));
+}
+
+void MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg,
+ BaseIndex dest) {
+ computeScaledAddress(dest, ScratchRegister);
+
+ // Make sure that ma_sw doesn't clobber ScratchRegister
+ int32_t offset = dest.offset;
+ if (!Imm16::IsInSignedRange(offset)) {
+ ma_li(SecondScratchReg, Imm32(offset));
+ as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
+ offset = 0;
+ }
+
+ storeValue(type, reg, Address(ScratchRegister, offset));
+}
+
+void MacroAssemblerMIPSCompat::storeValue(ValueOperand val,
+ const Address& dest) {
+ ma_sw(val.payloadReg(), Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+ ma_sw(val.typeReg(), Address(dest.base, dest.offset + TAG_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::storeValue(JSValueType type, Register reg,
+ Address dest) {
+ MOZ_ASSERT(dest.base != SecondScratchReg);
+
+ ma_sw(reg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+ ma_li(SecondScratchReg, ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::storeValue(const Value& val, Address dest) {
+ MOZ_ASSERT(dest.base != SecondScratchReg);
+
+ ma_li(SecondScratchReg, Imm32(getType(val)));
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
+ moveData(val, SecondScratchReg);
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::storeValue(const Value& val, BaseIndex dest) {
+ computeScaledAddress(dest, ScratchRegister);
+
+ // Make sure that ma_sw doesn't clobber ScratchRegister
+ int32_t offset = dest.offset;
+ if (!Imm16::IsInSignedRange(offset)) {
+ ma_li(SecondScratchReg, Imm32(offset));
+ as_addu(ScratchRegister, ScratchRegister, SecondScratchReg);
+ offset = 0;
+ }
+ storeValue(val, Address(ScratchRegister, offset));
+}
+
+void MacroAssemblerMIPSCompat::loadValue(const BaseIndex& addr,
+ ValueOperand val) {
+ computeScaledAddress(addr, SecondScratchReg);
+ loadValue(Address(SecondScratchReg, addr.offset), val);
+}
+
+void MacroAssemblerMIPSCompat::loadValue(Address src, ValueOperand val) {
+ // Ensure that loading the payload does not erase the pointer to the
+ // Value in memory.
+ if (src.base != val.payloadReg()) {
+ ma_lw(val.payloadReg(), Address(src.base, src.offset + PAYLOAD_OFFSET));
+ ma_lw(val.typeReg(), Address(src.base, src.offset + TAG_OFFSET));
+ } else {
+ ma_lw(val.typeReg(), Address(src.base, src.offset + TAG_OFFSET));
+ ma_lw(val.payloadReg(), Address(src.base, src.offset + PAYLOAD_OFFSET));
+ }
+}
+
+void MacroAssemblerMIPSCompat::tagValue(JSValueType type, Register payload,
+ ValueOperand dest) {
+ MOZ_ASSERT(dest.typeReg() != dest.payloadReg());
+ if (payload != dest.payloadReg()) {
+ ma_move(dest.payloadReg(), payload);
+ }
+ ma_li(dest.typeReg(), ImmType(type));
+}
+
+void MacroAssemblerMIPSCompat::pushValue(ValueOperand val) {
+ // Allocate stack slots for type and payload. One for each.
+ asMasm().subPtr(Imm32(sizeof(Value)), StackPointer);
+ // Store type and payload.
+ storeValue(val, Address(StackPointer, 0));
+}
+
+void MacroAssemblerMIPSCompat::pushValue(const Address& addr) {
+ // Allocate stack slots for type and payload. One for each.
+ ma_subu(StackPointer, StackPointer, Imm32(sizeof(Value)));
+ // If address is based on StackPointer its offset needs to be adjusted
+ // to accommodate for previous stack allocation.
+ int32_t offset =
+ addr.base != StackPointer ? addr.offset : addr.offset + sizeof(Value);
+ // Store type and payload.
+ ma_lw(ScratchRegister, Address(addr.base, offset + TAG_OFFSET));
+ ma_sw(ScratchRegister, Address(StackPointer, TAG_OFFSET));
+ ma_lw(ScratchRegister, Address(addr.base, offset + PAYLOAD_OFFSET));
+ ma_sw(ScratchRegister, Address(StackPointer, PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::popValue(ValueOperand val) {
+ // Load payload and type.
+ as_lw(val.payloadReg(), StackPointer, PAYLOAD_OFFSET);
+ as_lw(val.typeReg(), StackPointer, TAG_OFFSET);
+ // Free stack.
+ as_addiu(StackPointer, StackPointer, sizeof(Value));
+}
+
+void MacroAssemblerMIPSCompat::storePayload(const Value& val, Address dest) {
+ moveData(val, SecondScratchReg);
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::storePayload(Register src, Address dest) {
+ ma_sw(src, Address(dest.base, dest.offset + PAYLOAD_OFFSET));
+ return;
+}
+
+void MacroAssemblerMIPSCompat::storePayload(const Value& val,
+ const BaseIndex& dest) {
+ MOZ_ASSERT(dest.offset == 0);
+
+ computeScaledAddress(dest, SecondScratchReg);
+
+ moveData(val, ScratchRegister);
+
+ as_sw(ScratchRegister, SecondScratchReg, NUNBOX32_PAYLOAD_OFFSET);
+}
+
+void MacroAssemblerMIPSCompat::storePayload(Register src,
+ const BaseIndex& dest) {
+ MOZ_ASSERT(dest.offset == 0);
+
+ computeScaledAddress(dest, SecondScratchReg);
+ as_sw(src, SecondScratchReg, NUNBOX32_PAYLOAD_OFFSET);
+}
+
+void MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, Address dest) {
+ ma_li(SecondScratchReg, tag);
+ ma_sw(SecondScratchReg, Address(dest.base, dest.offset + TAG_OFFSET));
+}
+
+void MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, const BaseIndex& dest) {
+ MOZ_ASSERT(dest.offset == 0);
+
+ computeScaledAddress(dest, SecondScratchReg);
+ ma_li(ScratchRegister, tag);
+ as_sw(ScratchRegister, SecondScratchReg, TAG_OFFSET);
+}
+
+void MacroAssemblerMIPSCompat::breakpoint() { as_break(0); }
+
+void MacroAssemblerMIPSCompat::ensureDouble(const ValueOperand& source,
+ FloatRegister dest,
+ Label* failure) {
+ Label isDouble, done;
+ asMasm().branchTestDouble(Assembler::Equal, source.typeReg(), &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, source.typeReg(), failure);
+
+ convertInt32ToDouble(source.payloadReg(), dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+void MacroAssemblerMIPSCompat::checkStackAlignment() {
+#ifdef DEBUG
+ Label aligned;
+ as_andi(ScratchRegister, sp, ABIStackAlignment - 1);
+ ma_b(ScratchRegister, zero, &aligned, Equal, ShortJump);
+ as_break(BREAK_STACK_UNALIGNED);
+ bind(&aligned);
+#endif
+}
+
+void MacroAssemblerMIPSCompat::alignStackPointer() {
+ movePtr(StackPointer, SecondScratchReg);
+ asMasm().subPtr(Imm32(sizeof(intptr_t)), StackPointer);
+ asMasm().andPtr(Imm32(~(ABIStackAlignment - 1)), StackPointer);
+ storePtr(SecondScratchReg, Address(StackPointer, 0));
+}
+
+void MacroAssemblerMIPSCompat::restoreStackPointer() {
+ loadPtr(Address(StackPointer, 0), StackPointer);
+}
+
+void MacroAssemblerMIPSCompat::handleFailureWithHandlerTail(
+ Label* profilerExitTail) {
+ // Reserve space for exception information.
+ int size = (sizeof(ResumeFromException) + ABIStackAlignment) &
+ ~(ABIStackAlignment - 1);
+ asMasm().subPtr(Imm32(size), StackPointer);
+ ma_move(a0, StackPointer); // Use a0 since it is a first function argument
+
+ // Call the handler.
+ using Fn = void (*)(ResumeFromException * rfe);
+ asMasm().setupUnalignedABICall(a1);
+ asMasm().passABIArg(a0);
+ asMasm().callWithABI<Fn, HandleException>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label return_;
+ Label bailout;
+ Label wasm;
+
+ // Already clobbered a0, so use it...
+ load32(Address(StackPointer, offsetof(ResumeFromException, kind)), a0);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ResumeFromException::RESUME_ENTRY_FRAME),
+ &entryFrame);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ResumeFromException::RESUME_CATCH), &catch_);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ResumeFromException::RESUME_FINALLY), &finally);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ResumeFromException::RESUME_FORCED_RETURN), &return_);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ResumeFromException::RESUME_BAILOUT), &bailout);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ResumeFromException::RESUME_WASM), &wasm);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, load the new stack pointer
+ // and return from the entry frame.
+ bind(&entryFrame);
+ asMasm().moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
+ StackPointer);
+
+ // We're going to be returning by the ion calling convention
+ ma_pop(ra);
+ as_jr(ra);
+ as_nop();
+
+ // If we found a catch handler, this must be a baseline frame. Restore
+ // state and jump to the catch block.
+ bind(&catch_);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, target)), a0);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)),
+ BaselineFrameReg);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
+ StackPointer);
+ jump(a0);
+
+ // If we found a finally block, this must be a baseline frame. Push
+ // two values expected by JSOp::Retsub: BooleanValue(true) and the
+ // exception.
+ bind(&finally);
+ ValueOperand exception = ValueOperand(a1, a2);
+ loadValue(Address(sp, offsetof(ResumeFromException, exception)), exception);
+
+ loadPtr(Address(sp, offsetof(ResumeFromException, target)), a0);
+ loadPtr(Address(sp, offsetof(ResumeFromException, framePointer)),
+ BaselineFrameReg);
+ loadPtr(Address(sp, offsetof(ResumeFromException, stackPointer)), sp);
+
+ pushValue(BooleanValue(true));
+ pushValue(exception);
+ jump(a0);
+
+ // Only used in debug mode. Return BaselineFrame->returnValue() to the
+ // caller.
+ bind(&return_);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)),
+ BaselineFrameReg);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
+ StackPointer);
+ loadValue(
+ Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ ma_move(StackPointer, BaselineFrameReg);
+ pop(BaselineFrameReg);
+
+ // If profiling is enabled, then update the lastProfilingFrame to refer to
+ // caller frame before returning.
+ {
+ Label skipProfilingInstrumentation;
+ // Test if profiler enabled.
+ AbsoluteAddress addressOfEnabled(
+ GetJitContext()->runtime->geckoProfiler().addressOfEnabled());
+ asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ jump(profilerExitTail);
+ bind(&skipProfilingInstrumentation);
+ }
+
+ ret();
+
+ // If we are bailing out to baseline to handle an exception, jump to
+ // the bailout tail stub. Load 1 (true) in ReturnReg to indicate success.
+ bind(&bailout);
+ loadPtr(Address(sp, offsetof(ResumeFromException, bailoutInfo)), a2);
+ ma_li(ReturnReg, Imm32(1));
+ loadPtr(Address(sp, offsetof(ResumeFromException, target)), a1);
+ jump(a1);
+
+ // If we are throwing and the innermost frame was a wasm frame, reset SP and
+ // FP; SP is pointing to the unwound return address to the wasm entry, so
+ // we can just ret().
+ bind(&wasm);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, framePointer)),
+ FramePointer);
+ loadPtr(Address(StackPointer, offsetof(ResumeFromException, stackPointer)),
+ StackPointer);
+ ret();
+}
+
+CodeOffset MacroAssemblerMIPSCompat::toggledJump(Label* label) {
+ CodeOffset ret(nextOffset().getOffset());
+ ma_b(label);
+ return ret;
+}
+
+CodeOffset MacroAssemblerMIPSCompat::toggledCall(JitCode* target,
+ bool enabled) {
+ BufferOffset bo = nextOffset();
+ CodeOffset offset(bo.getOffset());
+ addPendingJump(bo, ImmPtr(target->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(target->raw()));
+ if (enabled) {
+ as_jalr(ScratchRegister);
+ as_nop();
+ } else {
+ as_nop();
+ as_nop();
+ }
+ MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() ==
+ ToggledCallSize(nullptr));
+ return offset;
+}
+
+void MacroAssemblerMIPSCompat::profilerEnterFrame(Register framePtr,
+ Register scratch) {
+ asMasm().loadJSContext(scratch);
+ loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
+ storePtr(framePtr,
+ Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr),
+ Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void MacroAssemblerMIPSCompat::profilerExitFrame() {
+ jump(GetJitContext()->runtime->jitRuntime()->getProfilerExitFrameTail());
+}
+
+void MacroAssembler::subFromStackPtr(Imm32 imm32) {
+ if (imm32.value) {
+ asMasm().subPtr(imm32, StackPointer);
+ }
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// Stack manipulation functions.
+
+void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
+ int32_t diffF = set.fpus().getPushSizeInBytes();
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+
+ reserveStack(diffG);
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ storePtr(*iter, Address(StackPointer, diffG));
+ }
+ MOZ_ASSERT(diffG == 0);
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ if (diffF > 0) {
+ // Double values have to be aligned. We reserve extra space so that we can
+ // start writing from the first aligned location.
+ // We reserve a whole extra double so that the buffer has even size.
+ ma_and(SecondScratchReg, sp, Imm32(~(ABIStackAlignment - 1)));
+ reserveStack(diffF);
+
+ diffF -= sizeof(double);
+
+ for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush());
+ iter.more(); ++iter) {
+ as_sdc1(*iter, SecondScratchReg, -diffF);
+ diffF -= sizeof(double);
+ }
+
+ MOZ_ASSERT(diffF == 0);
+ }
+}
+
+void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
+ LiveRegisterSet ignore) {
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+ int32_t diffF = set.fpus().getPushSizeInBytes();
+ const int32_t reservedG = diffG;
+ const int32_t reservedF = diffF;
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ if (reservedF > 0) {
+ // Read the buffer form the first aligned location.
+ ma_addu(SecondScratchReg, sp, Imm32(reservedF));
+ ma_and(SecondScratchReg, SecondScratchReg, Imm32(~(ABIStackAlignment - 1)));
+
+ diffF -= sizeof(double);
+
+ LiveFloatRegisterSet fpignore(ignore.fpus().reduceSetForPush());
+ for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush());
+ iter.more(); ++iter) {
+ if (!ignore.has(*iter)) {
+ as_ldc1(*iter, SecondScratchReg, -diffF);
+ }
+ diffF -= sizeof(double);
+ }
+ freeStack(reservedF);
+ MOZ_ASSERT(diffF == 0);
+ }
+
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ if (!ignore.has(*iter)) {
+ loadPtr(Address(StackPointer, diffG), *iter);
+ }
+ }
+ freeStack(reservedG);
+ MOZ_ASSERT(diffG == 0);
+}
+
+void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
+ Register scratch) {
+ int32_t diffF = set.fpus().getPushSizeInBytes();
+ int32_t diffG = set.gprs().size() * sizeof(intptr_t);
+
+ MOZ_ASSERT(dest.offset >= diffG + diffF);
+ MOZ_ASSERT(dest.base == StackPointer);
+
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ dest.offset -= sizeof(intptr_t);
+ storePtr(*iter, dest);
+ }
+ MOZ_ASSERT(diffG == 0);
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ if (diffF > 0) {
+ computeEffectiveAddress(dest, scratch);
+ ma_and(scratch, scratch, Imm32(~(ABIStackAlignment - 1)));
+
+ diffF -= sizeof(double);
+
+ for (FloatRegisterForwardIterator iter(set.fpus().reduceSetForPush());
+ iter.more(); ++iter) {
+ as_sdc1(*iter, scratch, -diffF);
+ diffF -= sizeof(double);
+ }
+ MOZ_ASSERT(diffF == 0);
+ }
+}
+// ===============================================================
+// ABI function calls.
+
+void MacroAssembler::setupUnalignedABICall(Register scratch) {
+ MOZ_ASSERT(!IsCompilingWasm(), "wasm should only use aligned ABI calls");
+ setupNativeABICall();
+ dynamicAlignment_ = true;
+
+ ma_move(scratch, StackPointer);
+
+ // Force sp to be aligned
+ asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
+ storePtr(scratch, Address(StackPointer, 0));
+}
+
+void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ // Reserve place for $ra.
+ stackForCall += sizeof(intptr_t);
+
+ if (dynamicAlignment_) {
+ stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
+ } else {
+ uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
+ stackForCall += ComputeByteAlignment(
+ stackForCall + framePushed() + alignmentAtPrologue, ABIStackAlignment);
+ }
+
+ *stackAdjust = stackForCall;
+ reserveStack(stackForCall);
+
+ // Save $ra because call is going to clobber it. Restore it in
+ // callWithABIPost. NOTE: This is needed for calls from SharedIC.
+ // Maybe we can do this differently.
+ storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t)));
+
+ // Position all arguments.
+ {
+ enoughMemory_ &= moveResolver_.resolve();
+ if (!enoughMemory_) {
+ return;
+ }
+
+ MoveEmitter emitter(*this);
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ assertStackAlignment(ABIStackAlignment);
+}
+
+void MacroAssembler::callWithABIPost(uint32_t stackAdjust, MoveOp::Type result,
+ bool callFromWasm) {
+ // Restore ra value (as stored in callWithABIPre()).
+ loadPtr(Address(StackPointer, stackAdjust - sizeof(intptr_t)), ra);
+
+ if (dynamicAlignment_) {
+ // Restore sp value from stack (as stored in setupUnalignedABICall()).
+ loadPtr(Address(StackPointer, stackAdjust), StackPointer);
+ // Use adjustFrame instead of freeStack because we already restored sp.
+ adjustFrame(-stackAdjust);
+ } else {
+ freeStack(stackAdjust);
+ }
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+void MacroAssembler::callWithABINoProfiler(Register fun, MoveOp::Type result) {
+ // Load the callee in t9, no instruction between the lw and call
+ // should clobber it. Note that we can't use fun.base because it may
+ // be one of the IntArg registers clobbered before the call.
+ ma_move(t9, fun);
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(t9);
+ callWithABIPost(stackAdjust, result);
+}
+
+void MacroAssembler::callWithABINoProfiler(const Address& fun,
+ MoveOp::Type result) {
+ // Load the callee in t9, as above.
+ loadPtr(Address(fun.base, fun.offset), t9);
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(t9);
+ callWithABIPost(stackAdjust, result);
+}
+// ===============================================================
+// Move instructions
+
+void MacroAssembler::moveValue(const TypedOrValueRegister& src,
+ const ValueOperand& dest) {
+ if (src.hasValue()) {
+ moveValue(src.valueReg(), dest);
+ return;
+ }
+
+ MIRType type = src.type();
+ AnyRegister reg = src.typedReg();
+
+ if (!IsFloatingPointType(type)) {
+ mov(ImmWord(MIRTypeToTag(type)), dest.typeReg());
+ if (reg.gpr() != dest.payloadReg()) {
+ move32(reg.gpr(), dest.payloadReg());
+ }
+ return;
+ }
+
+ ScratchDoubleScope scratch(*this);
+ FloatRegister freg = reg.fpu();
+ if (type == MIRType::Float32) {
+ convertFloat32ToDouble(freg, scratch);
+ freg = scratch;
+ }
+ boxDouble(freg, dest, scratch);
+}
+
+void MacroAssembler::moveValue(const ValueOperand& src,
+ const ValueOperand& dest) {
+ Register s0 = src.typeReg();
+ Register s1 = src.payloadReg();
+ Register d0 = dest.typeReg();
+ Register d1 = dest.payloadReg();
+
+ // Either one or both of the source registers could be the same as a
+ // destination register.
+ if (s1 == d0) {
+ if (s0 == d1) {
+ // If both are, this is just a swap of two registers.
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(d1 != scratch);
+ MOZ_ASSERT(d0 != scratch);
+ move32(d1, scratch);
+ move32(d0, d1);
+ move32(scratch, d0);
+ return;
+ }
+ // If only one is, copy that source first.
+ std::swap(s0, s1);
+ std::swap(d0, d1);
+ }
+
+ if (s0 != d0) {
+ move32(s0, d0);
+ }
+ if (s1 != d1) {
+ move32(s1, d1);
+ }
+}
+
+void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
+ move32(Imm32(src.toNunboxTag()), dest.typeReg());
+ if (src.isGCThing()) {
+ movePtr(ImmGCPtr(src.toGCThing()), dest.payloadReg());
+ } else {
+ move32(Imm32(src.toNunboxPayload()), dest.payloadReg());
+ }
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ const Address& address,
+ Register temp, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label done;
+
+ branchTestGCThing(Assembler::NotEqual, address,
+ cond == Assembler::Equal ? &done : label);
+
+ loadPtr(address, temp);
+ branchPtrInNurseryChunk(cond, temp, InvalidReg, label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ ValueOperand value, Register temp,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+
+ Label done;
+
+ branchTestGCThing(Assembler::NotEqual, value,
+ cond == Assembler::Equal ? &done : label);
+ branchPtrInNurseryChunk(cond, value.payloadReg(), temp, label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(*this);
+ moveData(rhs, scratch);
+
+ if (cond == Equal) {
+ Label done;
+ ma_b(lhs.payloadReg(), scratch, &done, NotEqual, ShortJump);
+ { ma_b(lhs.typeReg(), Imm32(getType(rhs)), label, Equal); }
+ bind(&done);
+ } else {
+ ma_b(lhs.payloadReg(), scratch, label, NotEqual);
+
+ ma_b(lhs.typeReg(), Imm32(getType(rhs)), label, NotEqual);
+ }
+}
+
+// ========================================================================
+// Memory access primitives.
+template <typename T>
+void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType, const T& dest,
+ MIRType slotType) {
+ if (valueType == MIRType::Double) {
+ storeDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ // Store the type tag if needed.
+ if (valueType != slotType) {
+ storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), dest);
+ }
+
+ // Store the payload.
+ if (value.constant()) {
+ storePayload(value.value(), dest);
+ } else {
+ storePayload(value.reg().typedReg().gpr(), dest);
+ }
+}
+
+template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType,
+ const Address& dest,
+ MIRType slotType);
+template void MacroAssembler::storeUnboxedValue(
+ const ConstantOrRegister& value, MIRType valueType,
+ const BaseObjectElementIndex& dest, MIRType slotType);
+
+void MacroAssembler::PushBoxed(FloatRegister reg) { Push(reg); }
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Register boundsCheckLimit,
+ Label* label) {
+ ma_b(index, boundsCheckLimit, label, cond);
+}
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Address boundsCheckLimit, Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ load32(boundsCheckLimit, SecondScratchReg);
+ ma_b(index, SecondScratchReg, label, cond);
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ Label done;
+
+ as_truncwd(ScratchFloat32Reg, input);
+ ma_li(ScratchRegister, Imm32(INT32_MAX));
+ moveFromFloat32(ScratchFloat32Reg, output);
+
+ // For numbers in -1.[ : ]INT32_MAX range do nothing more
+ ma_b(output, ScratchRegister, &done, Assembler::Below, ShortJump);
+
+ loadConstantDouble(double(INT32_MAX + 1ULL), ScratchDoubleReg);
+ ma_li(ScratchRegister, Imm32(INT32_MIN));
+ as_subd(ScratchDoubleReg, input, ScratchDoubleReg);
+ as_truncwd(ScratchFloat32Reg, ScratchDoubleReg);
+ as_cfc1(SecondScratchReg, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, output);
+ ma_ext(SecondScratchReg, SecondScratchReg, Assembler::CauseV, 1);
+ ma_addu(output, ScratchRegister);
+
+ ma_b(SecondScratchReg, Imm32(0), oolEntry, Assembler::NotEqual);
+
+ bind(&done);
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ Label done;
+
+ as_truncws(ScratchFloat32Reg, input);
+ ma_li(ScratchRegister, Imm32(INT32_MAX));
+ moveFromFloat32(ScratchFloat32Reg, output);
+ // For numbers in -1.[ : ]INT32_MAX range do nothing more
+ ma_b(output, ScratchRegister, &done, Assembler::Below, ShortJump);
+
+ loadConstantFloat32(float(INT32_MAX + 1ULL), ScratchFloat32Reg);
+ ma_li(ScratchRegister, Imm32(INT32_MIN));
+ as_subs(ScratchFloat32Reg, input, ScratchFloat32Reg);
+ as_truncws(ScratchFloat32Reg, ScratchFloat32Reg);
+ as_cfc1(SecondScratchReg, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, output);
+ ma_ext(SecondScratchReg, SecondScratchReg, Assembler::CauseV, 1);
+ ma_addu(output, ScratchRegister);
+
+ // Guard against negative values that result in 0 due the precision loss.
+ as_sltiu(ScratchRegister, output, 1);
+ ma_or(SecondScratchReg, ScratchRegister);
+
+ ma_b(SecondScratchReg, Imm32(0), oolEntry, Assembler::NotEqual);
+
+ bind(&done);
+}
+
+void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register64 output) {
+ wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
+}
+
+void MacroAssembler::wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch,
+ Register64 output, Register tmp) {
+ wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, tmp);
+}
+
+void MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access,
+ Register64 value, Register memoryBase,
+ Register ptr, Register ptrScratch) {
+ wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
+}
+
+void MacroAssembler::wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access,
+ Register64 value,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register tmp) {
+ wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, tmp);
+}
+
+void MacroAssemblerMIPSCompat::wasmLoadI64Impl(
+ const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
+ Register ptrScratch, Register64 output, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ MOZ_ASSERT(!access.isZeroExtendSimd128Load());
+ MOZ_ASSERT(!access.isSplatSimd128Load());
+ MOZ_ASSERT(!access.isWidenSimd128Load());
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().movePtr(ptr, ptrScratch);
+ asMasm().addPtr(Imm32(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ unsigned byteSize = access.byteSize();
+ bool isSigned;
+
+ switch (access.type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ break;
+ case Scalar::Int64:
+ isSigned = true;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ BaseIndex address(memoryBase, ptr, TimesOne);
+ MOZ_ASSERT(INT64LOW_OFFSET == 0);
+ if (IsUnaligned(access)) {
+ MOZ_ASSERT(tmp != InvalidReg);
+ if (byteSize <= 4) {
+ asMasm().ma_load_unaligned(access, output.low, address, tmp,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ if (!isSigned) {
+ asMasm().move32(Imm32(0), output.high);
+ } else {
+ asMasm().ma_sra(output.high, output.low, Imm32(31));
+ }
+ } else {
+ MOZ_ASSERT(output.low != ptr);
+ asMasm().ma_load_unaligned(access, output.low, address, tmp, SizeWord,
+ ZeroExtend);
+ asMasm().ma_load_unaligned(
+ access, output.high,
+ BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), tmp, SizeWord,
+ SignExtend);
+ }
+ return;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ if (byteSize <= 4) {
+ asMasm().ma_load(output.low, address,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ asMasm().append(access, asMasm().size() - 4);
+ if (!isSigned) {
+ asMasm().move32(Imm32(0), output.high);
+ } else {
+ asMasm().ma_sra(output.high, output.low, Imm32(31));
+ }
+ } else {
+ MOZ_ASSERT(output.low != ptr);
+ asMasm().ma_load(output.low, BaseIndex(HeapReg, ptr, TimesOne), SizeWord);
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().ma_load(output.high,
+ BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET),
+ SizeWord);
+ asMasm().append(access, asMasm().size() - 4);
+ }
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerMIPSCompat::wasmStoreI64Impl(
+ const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
+ Register ptr, Register ptrScratch, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT(offset < asMasm().wasmMaxOffsetGuardLimit());
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(Imm32(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ unsigned byteSize = access.byteSize();
+ bool isSigned;
+ switch (access.type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ break;
+ case Scalar::Int64:
+ isSigned = true;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ MOZ_ASSERT(INT64LOW_OFFSET == 0);
+ BaseIndex address(memoryBase, ptr, TimesOne);
+ if (IsUnaligned(access)) {
+ MOZ_ASSERT(tmp != InvalidReg);
+ if (byteSize <= 4) {
+ asMasm().ma_store_unaligned(access, value.low, address, tmp,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ } else {
+ asMasm().ma_store_unaligned(
+ access, value.high,
+ BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET), tmp, SizeWord,
+ SignExtend);
+ asMasm().ma_store_unaligned(access, value.low, address, tmp, SizeWord,
+ ZeroExtend);
+ }
+ return;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ if (byteSize <= 4) {
+ asMasm().ma_store(value.low, address,
+ static_cast<LoadStoreSize>(8 * byteSize));
+ asMasm().append(access, asMasm().size() - 4);
+ } else {
+ asMasm().ma_store(value.high,
+ BaseIndex(HeapReg, ptr, TimesOne, INT64HIGH_OFFSET),
+ SizeWord);
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().ma_store(value.low, address, SizeWord);
+ }
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+static void EnterAtomic64Region(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc& access,
+ Register addr, Register spinlock,
+ Register scratch) {
+ masm.movePtr(wasm::SymbolicAddress::js_jit_gAtomic64Lock, spinlock);
+
+ masm.append(access, masm.size());
+ masm.as_lbu(
+ zero, addr,
+ 7); // Force memory trap on invalid access before we enter the spinlock.
+
+ Label tryLock;
+
+ masm.memoryBarrier(MembarFull);
+
+ masm.bind(&tryLock);
+
+ masm.as_ll(scratch, spinlock, 0);
+ masm.ma_b(scratch, scratch, &tryLock, Assembler::NonZero, ShortJump);
+ masm.ma_li(scratch, Imm32(1));
+ masm.as_sc(scratch, spinlock, 0);
+ masm.ma_b(scratch, scratch, &tryLock, Assembler::Zero, ShortJump);
+
+ masm.memoryBarrier(MembarFull);
+}
+
+static void ExitAtomic64Region(MacroAssembler& masm, Register spinlock) {
+ masm.memoryBarrier(MembarFull);
+ masm.as_sw(zero, spinlock, 0);
+ masm.memoryBarrier(MembarFull);
+}
+
+template <typename T>
+static void AtomicLoad64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc& access, const T& mem,
+ Register64 temp, Register64 output) {
+ MOZ_ASSERT(temp.low == InvalidReg && temp.high == InvalidReg);
+
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ EnterAtomic64Region(masm, access, /* addr= */ SecondScratchReg,
+ /* spinlock= */ ScratchRegister,
+ /* scratch= */ output.low);
+
+ masm.load64(Address(SecondScratchReg, 0), output);
+
+ ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
+}
+
+void MacroAssembler::wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 temp,
+ Register64 output) {
+ AtomicLoad64(*this, access, mem, temp, output);
+}
+
+void MacroAssembler::wasmAtomicLoad64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register64 temp,
+ Register64 output) {
+ AtomicLoad64(*this, access, mem, temp, output);
+}
+
+template <typename T>
+void MacroAssemblerMIPSCompat::wasmAtomicStore64(
+ const wasm::MemoryAccessDesc& access, const T& mem, Register temp,
+ Register64 value) {
+ computeEffectiveAddress(mem, SecondScratchReg);
+
+ EnterAtomic64Region(asMasm(), access, /* addr= */ SecondScratchReg,
+ /* spinlock= */ ScratchRegister, /* scratch= */ temp);
+
+ store64(value, Address(SecondScratchReg, 0));
+
+ ExitAtomic64Region(asMasm(), /* spinlock= */ ScratchRegister);
+}
+
+template void MacroAssemblerMIPSCompat::wasmAtomicStore64(
+ const wasm::MemoryAccessDesc& access, const Address& mem, Register temp,
+ Register64 value);
+template void MacroAssemblerMIPSCompat::wasmAtomicStore64(
+ const wasm::MemoryAccessDesc& access, const BaseIndex& mem, Register temp,
+ Register64 value);
+
+template <typename T>
+static void WasmCompareExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc& access,
+ const T& mem, Register64 expect,
+ Register64 replace, Register64 output) {
+ MOZ_ASSERT(output != expect);
+ MOZ_ASSERT(output != replace);
+
+ Label exit;
+
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+ Address addr(SecondScratchReg, 0);
+
+ EnterAtomic64Region(masm, access, /* addr= */ SecondScratchReg,
+ /* spinlock= */ ScratchRegister,
+ /* scratch= */ output.low);
+
+ masm.load64(addr, output);
+
+ masm.ma_b(output.low, expect.low, &exit, Assembler::NotEqual, ShortJump);
+ masm.ma_b(output.high, expect.high, &exit, Assembler::NotEqual, ShortJump);
+ masm.store64(replace, addr);
+ masm.bind(&exit);
+ ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem,
+ Register64 expect,
+ Register64 replace,
+ Register64 output) {
+ WasmCompareExchange64(*this, access, mem, expect, replace, output);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem,
+ Register64 expect,
+ Register64 replace,
+ Register64 output) {
+ WasmCompareExchange64(*this, access, mem, expect, replace, output);
+}
+
+template <typename T>
+static void WasmAtomicExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc& access,
+ const T& mem, Register64 src,
+ Register64 output) {
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+ Address addr(SecondScratchReg, 0);
+
+ EnterAtomic64Region(masm, access, /* addr= */ SecondScratchReg,
+ /* spinlock= */ ScratchRegister,
+ /* scratch= */ output.low);
+
+ masm.load64(addr, output);
+ masm.store64(src, addr);
+
+ ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 src,
+ Register64 output) {
+ WasmAtomicExchange64(*this, access, mem, src, output);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register64 src,
+ Register64 output) {
+ WasmAtomicExchange64(*this, access, mem, src, output);
+}
+
+template <typename T>
+static void AtomicFetchOp64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc& access, AtomicOp op,
+ Register64 value, const T& mem, Register64 temp,
+ Register64 output) {
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ EnterAtomic64Region(masm, access, /* addr= */ SecondScratchReg,
+ /* spinlock= */ ScratchRegister,
+ /* scratch= */ output.low);
+
+ masm.load64(Address(SecondScratchReg, 0), output);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_addu(temp.low, output.low, value.low);
+ masm.as_sltu(temp.high, temp.low, output.low);
+ masm.as_addu(temp.high, temp.high, output.high);
+ masm.as_addu(temp.high, temp.high, value.high);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_sltu(temp.high, output.low, value.low);
+ masm.as_subu(temp.high, output.high, temp.high);
+ masm.as_subu(temp.low, output.low, value.low);
+ masm.as_subu(temp.high, temp.high, value.high);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(temp.low, output.low, value.low);
+ masm.as_and(temp.high, output.high, value.high);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(temp.low, output.low, value.low);
+ masm.as_or(temp.high, output.high, value.high);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(temp.low, output.low, value.low);
+ masm.as_xor(temp.high, output.high, value.high);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ masm.store64(temp, Address(SecondScratchReg, 0));
+
+ ExitAtomic64Region(masm, /* spinlock= */ ScratchRegister);
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const Address& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, access, op, value, mem, temp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const BaseIndex& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, access, op, value, mem, temp, output);
+}
+
+// ========================================================================
+// Convert floating point.
+
+static const double TO_DOUBLE_HIGH_SCALE = 0x100000000;
+
+bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return false; }
+
+void MacroAssembler::convertUInt64ToDouble(Register64 src, FloatRegister dest,
+ Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ convertUInt32ToDouble(src.high, dest);
+ loadConstantDouble(TO_DOUBLE_HIGH_SCALE, ScratchDoubleReg);
+ mulDouble(ScratchDoubleReg, dest);
+ convertUInt32ToDouble(src.low, ScratchDoubleReg);
+ addDouble(ScratchDoubleReg, dest);
+}
+
+void MacroAssembler::convertInt64ToDouble(Register64 src, FloatRegister dest) {
+ convertInt32ToDouble(src.high, dest);
+ loadConstantDouble(TO_DOUBLE_HIGH_SCALE, ScratchDoubleReg);
+ mulDouble(ScratchDoubleReg, dest);
+ convertUInt32ToDouble(src.low, ScratchDoubleReg);
+ addDouble(ScratchDoubleReg, dest);
+}
+
+//}}} check_macroassembler_style
diff --git a/js/src/jit/mips32/MacroAssembler-mips32.h b/js/src/jit/mips32/MacroAssembler-mips32.h
new file mode 100644
index 0000000000..142fcdc800
--- /dev/null
+++ b/js/src/jit/mips32/MacroAssembler-mips32.h
@@ -0,0 +1,802 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_MacroAssembler_mips32_h
+#define jit_mips32_MacroAssembler_mips32_h
+
+#include "mozilla/EndianUtils.h"
+
+#include "jit/mips-shared/MacroAssembler-mips-shared.h"
+#include "jit/MoveResolver.h"
+#include "vm/BytecodeUtil.h"
+#include "wasm/WasmTypes.h"
+
+namespace js {
+namespace jit {
+
+struct ImmTag : public Imm32 {
+ ImmTag(JSValueTag mask) : Imm32(int32_t(mask)) {}
+};
+
+struct ImmType : public ImmTag {
+ ImmType(JSValueType type) : ImmTag(JSVAL_TYPE_TO_TAG(type)) {}
+};
+
+static constexpr ValueOperand JSReturnOperand{JSReturnReg_Type,
+ JSReturnReg_Data};
+static const ValueOperand softfpReturnOperand = ValueOperand(v1, v0);
+
+static const int defaultShift = 3;
+static_assert(1 << defaultShift == sizeof(JS::Value),
+ "The defaultShift is wrong");
+
+static const uint32_t LOW_32_MASK = (1LL << 32) - 1;
+#if MOZ_LITTLE_ENDIAN()
+static const int32_t LOW_32_OFFSET = 0;
+static const int32_t HIGH_32_OFFSET = 4;
+#else
+static const int32_t LOW_32_OFFSET = 4;
+static const int32_t HIGH_32_OFFSET = 0;
+#endif
+
+// See documentation for ScratchTagScope and ScratchTagScopeRelease in
+// MacroAssembler-x64.h.
+
+class ScratchTagScope {
+ const ValueOperand& v_;
+
+ public:
+ ScratchTagScope(MacroAssembler&, const ValueOperand& v) : v_(v) {}
+ operator Register() { return v_.typeReg(); }
+ void release() {}
+ void reacquire() {}
+};
+
+class ScratchTagScopeRelease {
+ public:
+ explicit ScratchTagScopeRelease(ScratchTagScope*) {}
+};
+
+class MacroAssemblerMIPS : public MacroAssemblerMIPSShared {
+ public:
+ using MacroAssemblerMIPSShared::ma_b;
+ using MacroAssemblerMIPSShared::ma_cmp_set;
+ using MacroAssemblerMIPSShared::ma_ld;
+ using MacroAssemblerMIPSShared::ma_li;
+ using MacroAssemblerMIPSShared::ma_liPatchable;
+ using MacroAssemblerMIPSShared::ma_load;
+ using MacroAssemblerMIPSShared::ma_ls;
+ using MacroAssemblerMIPSShared::ma_sd;
+ using MacroAssemblerMIPSShared::ma_ss;
+ using MacroAssemblerMIPSShared::ma_store;
+ using MacroAssemblerMIPSShared::ma_subTestOverflow;
+
+ void ma_li(Register dest, CodeLabel* label);
+
+ void ma_li(Register dest, ImmWord imm);
+ void ma_liPatchable(Register dest, ImmPtr imm);
+ void ma_liPatchable(Register dest, ImmWord imm);
+
+ // load
+ void ma_load(Register dest, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // store
+ void ma_store(Register data, Address address, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // arithmetic based ops
+ // add
+ void ma_addTestOverflow(Register rd, Register rs, Register rt,
+ Label* overflow);
+ void ma_addTestOverflow(Register rd, Register rs, Imm32 imm, Label* overflow);
+
+ // subtract
+ void ma_subTestOverflow(Register rd, Register rs, Register rt,
+ Label* overflow);
+
+ // memory
+ // shortcut for when we know we're transferring 32 bits of data
+ void ma_lw(Register data, Address address);
+
+ void ma_sw(Register data, Address address);
+ void ma_sw(Imm32 imm, Address address);
+ void ma_sw(Register data, BaseIndex& address);
+
+ void ma_pop(Register r);
+ void ma_push(Register r);
+
+ void branchWithCode(InstImm code, Label* label, JumpKind jumpKind);
+ // branches when done from within mips-specific code
+ void ma_b(Register lhs, ImmWord imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump) {
+ ma_b(lhs, Imm32(uint32_t(imm.value)), l, c, jumpKind);
+ }
+ void ma_b(Address addr, ImmWord imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump) {
+ ma_b(addr, Imm32(uint32_t(imm.value)), l, c, jumpKind);
+ }
+
+ void ma_b(Register lhs, Address addr, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Imm32 imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Register rhs, Label* l, Condition c,
+ JumpKind jumpKind = LongJump) {
+ MOZ_ASSERT(rhs != ScratchRegister);
+ ma_lw(ScratchRegister, addr);
+ ma_b(ScratchRegister, rhs, l, c, jumpKind);
+ }
+
+ void ma_bal(Label* l, DelaySlotFill delaySlotFill = FillDelaySlot);
+
+ // fp instructions
+ void ma_lid(FloatRegister dest, double value);
+
+ void ma_mv(FloatRegister src, ValueOperand dest);
+ void ma_mv(ValueOperand src, FloatRegister dest);
+
+ void ma_ls(FloatRegister ft, Address address);
+ void ma_ld(FloatRegister ft, Address address);
+ void ma_sd(FloatRegister ft, Address address);
+ void ma_ss(FloatRegister ft, Address address);
+
+ void ma_ldc1WordAligned(FloatRegister ft, Register base, int32_t off);
+ void ma_sdc1WordAligned(FloatRegister ft, Register base, int32_t off);
+
+ void ma_pop(FloatRegister f);
+ void ma_push(FloatRegister f);
+
+ void ma_cmp_set(Register dst, Register lhs, ImmWord imm, Condition c) {
+ ma_cmp_set(dst, lhs, Imm32(uint32_t(imm.value)), c);
+ }
+ void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c) {
+ ma_cmp_set(dst, lhs, ImmWord(uintptr_t(imm.value)), c);
+ }
+ void ma_cmp_set(Register dst, Register lhs, Address addr, Condition c) {
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_lw(ScratchRegister, addr);
+ ma_cmp_set(dst, lhs, ScratchRegister, c);
+ }
+ void ma_cmp_set(Register dst, Address lhs, Register rhs, Condition c) {
+ MOZ_ASSERT(rhs != ScratchRegister);
+ ma_lw(ScratchRegister, lhs);
+ ma_cmp_set(dst, ScratchRegister, rhs, c);
+ }
+ void ma_cmp_set(Register dst, Address lhs, ImmPtr imm, Condition c) {
+ ma_lw(SecondScratchReg, lhs);
+ ma_cmp_set(dst, SecondScratchReg, imm, c);
+ }
+
+ // These fuctions abstract the access to high part of the double precision
+ // float register. It is intended to work on both 32 bit and 64 bit
+ // floating point coprocessor.
+ // :TODO: (Bug 985881) Modify this for N32 ABI to use mthc1 and mfhc1
+ void moveToDoubleHi(Register src, FloatRegister dest) {
+ as_mtc1(src, getOddPair(dest));
+ }
+ void moveFromDoubleHi(FloatRegister src, Register dest) {
+ as_mfc1(dest, getOddPair(src));
+ }
+};
+
+class MacroAssembler;
+
+class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS {
+ public:
+ using MacroAssemblerMIPS::call;
+
+ MacroAssemblerMIPSCompat() {}
+
+ void convertBoolToInt32(Register source, Register dest);
+ void convertInt32ToDouble(Register src, FloatRegister dest);
+ void convertInt32ToDouble(const Address& src, FloatRegister dest);
+ void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest);
+ void convertUInt32ToDouble(Register src, FloatRegister dest);
+ void convertUInt32ToFloat32(Register src, FloatRegister dest);
+ void convertDoubleToFloat32(FloatRegister src, FloatRegister dest);
+ void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+ void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+
+ void convertFloat32ToDouble(FloatRegister src, FloatRegister dest);
+ void convertInt32ToFloat32(Register src, FloatRegister dest);
+ void convertInt32ToFloat32(const Address& src, FloatRegister dest);
+
+ void computeScaledAddress(const BaseIndex& address, Register dest);
+
+ void computeEffectiveAddress(const Address& address, Register dest) {
+ ma_addu(dest, address.base, Imm32(address.offset));
+ }
+
+ inline void computeEffectiveAddress(const BaseIndex& address, Register dest);
+
+ void j(Label* dest) { ma_b(dest); }
+
+ void mov(Register src, Register dest) { as_ori(dest, src, 0); }
+ void mov(ImmWord imm, Register dest) { ma_li(dest, imm); }
+ void mov(ImmPtr imm, Register dest) {
+ mov(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void mov(CodeLabel* label, Register dest) { ma_li(dest, label); }
+ void mov(Register src, Address dest) { MOZ_CRASH("NYI-IC"); }
+ void mov(Address src, Register dest) { MOZ_CRASH("NYI-IC"); }
+
+ void branch(JitCode* c) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(c->raw()));
+ as_jr(ScratchRegister);
+ as_nop();
+ }
+ void branch(const Register reg) {
+ as_jr(reg);
+ as_nop();
+ }
+ void nop() { as_nop(); }
+ void ret() {
+ ma_pop(ra);
+ as_jr(ra);
+ as_nop();
+ }
+ inline void retn(Imm32 n);
+ void push(Imm32 imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(ImmWord imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(ImmGCPtr imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ }
+ void push(const Address& address) {
+ loadPtr(address, ScratchRegister);
+ ma_push(ScratchRegister);
+ }
+ void push(Register reg) { ma_push(reg); }
+ void push(FloatRegister reg) { ma_push(reg); }
+ void pop(Register reg) { ma_pop(reg); }
+ void pop(FloatRegister reg) { ma_pop(reg); }
+
+ // Emit a branch that can be toggled to a non-operation. On MIPS we use
+ // "andi" instruction to toggle the branch.
+ // See ToggleToJmp(), ToggleToCmp().
+ CodeOffset toggledJump(Label* label);
+
+ // Emit a "jalr" or "nop" instruction. ToggleCall can be used to patch
+ // this instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled);
+
+ static size_t ToggledCallSize(uint8_t* code) {
+ // Four instructions used in: MacroAssemblerMIPSCompat::toggledCall
+ return 4 * sizeof(uint32_t);
+ }
+
+ CodeOffset pushWithPatch(ImmWord imm) {
+ CodeOffset label = movWithPatch(imm, ScratchRegister);
+ ma_push(ScratchRegister);
+ return label;
+ }
+
+ CodeOffset movWithPatch(ImmWord imm, Register dest) {
+ CodeOffset label = CodeOffset(currentOffset());
+ ma_liPatchable(dest, imm);
+ return label;
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ return movWithPatch(ImmWord(uintptr_t(imm.value)), dest);
+ }
+
+ void writeCodePointer(CodeLabel* label) {
+ BufferOffset off = writeInst(-1);
+ label->patchAt()->bind(off.getOffset());
+ label->setLinkMode(CodeLabel::RawPointer);
+ }
+
+ void jump(Label* label) { ma_b(label); }
+ void jump(Register reg) {
+ as_jr(reg);
+ as_nop();
+ }
+ void jump(const Address& address) {
+ loadPtr(address, ScratchRegister);
+ as_jr(ScratchRegister);
+ as_nop();
+ }
+
+ void jump(JitCode* code) { branch(code); }
+
+ void jump(ImmPtr ptr) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ptr, RelocationKind::HARDCODED);
+ ma_jump(ptr);
+ }
+
+ void jump(TrampolinePtr code) { jump(ImmPtr(code.value)); }
+
+ void negl(Register reg) { ma_negu(reg, reg); }
+
+ void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag) {
+ MOZ_ASSERT(value.typeReg() == tag);
+ }
+
+ // unboxing code
+ void unboxNonDouble(const ValueOperand& operand, Register dest, JSValueType);
+ void unboxNonDouble(const Address& src, Register dest, JSValueType);
+ void unboxNonDouble(const BaseIndex& src, Register dest, JSValueType);
+ void unboxInt32(const ValueOperand& operand, Register dest);
+ void unboxInt32(const Address& src, Register dest);
+ void unboxBoolean(const ValueOperand& operand, Register dest);
+ void unboxBoolean(const Address& src, Register dest);
+ void unboxDouble(const ValueOperand& operand, FloatRegister dest);
+ void unboxDouble(const Address& src, FloatRegister dest);
+ void unboxDouble(const BaseIndex& src, FloatRegister dest);
+ void unboxString(const ValueOperand& operand, Register dest);
+ void unboxString(const Address& src, Register dest);
+ void unboxBigInt(const ValueOperand& operand, Register dest);
+ void unboxBigInt(const Address& src, Register dest);
+ void unboxObject(const ValueOperand& src, Register dest);
+ void unboxObject(const Address& src, Register dest);
+ void unboxObject(const BaseIndex& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxObjectOrNull(const Address& src, Register dest);
+ void unboxValue(const ValueOperand& src, AnyRegister dest, JSValueType);
+
+ void unboxGCThingForGCBarrier(const Address& src, Register dest) {
+ unboxObject(src, dest);
+ }
+ void unboxGCThingForGCBarrier(const ValueOperand& src, Register dest) {
+ unboxObject(src, dest);
+ }
+
+ void notBoolean(const ValueOperand& val) {
+ as_xori(val.payloadReg(), val.payloadReg(), 1);
+ }
+
+ // boxing code
+ void boxDouble(FloatRegister src, const ValueOperand& dest, FloatRegister);
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest);
+
+ // Extended unboxing API. If the payload is already in a register, returns
+ // that register. Otherwise, provides a move to the given scratch register,
+ // and returns that.
+ [[nodiscard]] Register extractObject(const Address& address,
+ Register scratch);
+ [[nodiscard]] Register extractObject(const ValueOperand& value,
+ Register scratch) {
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractString(const ValueOperand& value,
+ Register scratch) {
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractSymbol(const ValueOperand& value,
+ Register scratch) {
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractInt32(const ValueOperand& value,
+ Register scratch) {
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractBoolean(const ValueOperand& value,
+ Register scratch) {
+ return value.payloadReg();
+ }
+ [[nodiscard]] Register extractTag(const Address& address, Register scratch);
+ [[nodiscard]] Register extractTag(const BaseIndex& address, Register scratch);
+ [[nodiscard]] Register extractTag(const ValueOperand& value,
+ Register scratch) {
+ return value.typeReg();
+ }
+
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void loadInt32OrDouble(const Address& address, FloatRegister dest);
+ void loadInt32OrDouble(Register base, Register index, FloatRegister dest,
+ int32_t shift = defaultShift);
+ void loadConstantDouble(double dp, FloatRegister dest);
+
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void loadConstantFloat32(float f, FloatRegister dest);
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testUndefinedSet(Condition cond, const ValueOperand& value,
+ Register dest);
+
+ // higher level tag testing code
+ Operand ToPayload(Operand base);
+ Address ToPayload(Address base) {
+ return ToPayload(Operand(base)).toAddress();
+ }
+
+ BaseIndex ToPayload(BaseIndex base) {
+ return BaseIndex(base.base, base.index, base.scale,
+ base.offset + NUNBOX32_PAYLOAD_OFFSET);
+ }
+
+ protected:
+ Operand ToType(Operand base);
+ Address ToType(Address base) { return ToType(Operand(base)).toAddress(); }
+
+ uint32_t getType(const Value& val);
+ void moveData(const Value& val, Register data);
+
+ public:
+ void moveValue(const Value& val, Register type, Register data);
+
+ void loadUnboxedValue(Address address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat()) {
+ loadInt32OrDouble(address, dest.fpu());
+ } else {
+ ma_lw(dest.gpr(), ToPayload(address));
+ }
+ }
+
+ void loadUnboxedValue(BaseIndex address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat()) {
+ loadInt32OrDouble(address.base, address.index, dest.fpu(), address.scale);
+ } else {
+ load32(ToPayload(address), dest.gpr());
+ }
+ }
+
+ template <typename T>
+ void storeUnboxedValue(ConstantOrRegister value, MIRType valueType,
+ const T& dest, MIRType slotType);
+
+ template <typename T>
+ void storeUnboxedPayload(ValueOperand value, T address, size_t nbytes,
+ JSValueType) {
+ switch (nbytes) {
+ case 4:
+ store32(value.payloadReg(), address);
+ return;
+ case 1:
+ store8(value.payloadReg(), address);
+ return;
+ default:
+ MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void moveValue(const Value& val, const ValueOperand& dest);
+
+ void moveValue(const ValueOperand& src, const ValueOperand& dest) {
+ Register s0 = src.typeReg(), d0 = dest.typeReg(), s1 = src.payloadReg(),
+ d1 = dest.payloadReg();
+
+ // Either one or both of the source registers could be the same as a
+ // destination register.
+ if (s1 == d0) {
+ if (s0 == d1) {
+ // If both are, this is just a swap of two registers.
+ MOZ_ASSERT(d1 != ScratchRegister);
+ MOZ_ASSERT(d0 != ScratchRegister);
+ move32(d1, ScratchRegister);
+ move32(d0, d1);
+ move32(ScratchRegister, d0);
+ return;
+ }
+ // If only one is, copy that source first.
+ std::swap(s0, s1);
+ std::swap(d0, d1);
+ }
+
+ if (s0 != d0) {
+ move32(s0, d0);
+ }
+ if (s1 != d1) {
+ move32(s1, d1);
+ }
+ }
+
+ void storeValue(ValueOperand val, Operand dst);
+ void storeValue(ValueOperand val, const BaseIndex& dest);
+ void storeValue(JSValueType type, Register reg, BaseIndex dest);
+ void storeValue(ValueOperand val, const Address& dest);
+ void storeValue(JSValueType type, Register reg, Address dest);
+ void storeValue(const Value& val, Address dest);
+ void storeValue(const Value& val, BaseIndex dest);
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ load32(ToType(src), temp);
+ store32(temp, ToType(dest));
+
+ load32(ToPayload(src), temp);
+ store32(temp, ToPayload(dest));
+ }
+
+ void loadValue(Address src, ValueOperand val);
+ void loadValue(Operand dest, ValueOperand val) {
+ loadValue(dest.toAddress(), val);
+ }
+ void loadValue(const BaseIndex& addr, ValueOperand val);
+
+ void loadUnalignedValue(const Address& src, ValueOperand dest) {
+ loadValue(src, dest);
+ }
+
+ void tagValue(JSValueType type, Register payload, ValueOperand dest);
+
+ void pushValue(ValueOperand val);
+ void popValue(ValueOperand val);
+#if MOZ_LITTLE_ENDIAN()
+ void pushValue(const Value& val) {
+ push(Imm32(val.toNunboxTag()));
+ if (val.isGCThing()) {
+ push(ImmGCPtr(val.toGCThing()));
+ } else {
+ push(Imm32(val.toNunboxPayload()));
+ }
+ }
+ void pushValue(JSValueType type, Register reg) {
+ push(ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ ma_push(reg);
+ }
+#else
+ void pushValue(const Value& val) {
+ if (val.isGCThing()) {
+ push(ImmGCPtr(val.toGCThing()));
+ } else {
+ push(Imm32(val.toNunboxPayload()));
+ }
+ push(Imm32(val.toNunboxTag()));
+ }
+ void pushValue(JSValueType type, Register reg) {
+ ma_push(reg);
+ push(ImmTag(JSVAL_TYPE_TO_TAG(type)));
+ }
+#endif
+ void pushValue(const Address& addr);
+
+ void storePayload(const Value& val, Address dest);
+ void storePayload(Register src, Address dest);
+ void storePayload(const Value& val, const BaseIndex& dest);
+ void storePayload(Register src, const BaseIndex& dest);
+ void storeTypeTag(ImmTag tag, Address dest);
+ void storeTypeTag(ImmTag tag, const BaseIndex& dest);
+
+ void handleFailureWithHandlerTail(Label* profilerExitTail);
+
+ template <typename T>
+ void wasmAtomicStore64(const wasm::MemoryAccessDesc& access, const T& mem,
+ Register temp, Register64 value);
+
+ /////////////////////////////////////////////////////////////////
+ // Common interface.
+ /////////////////////////////////////////////////////////////////
+ public:
+ // The following functions are exposed for use in platform-shared code.
+
+ inline void incrementInt32Value(const Address& addr);
+
+ void move32(Imm32 imm, Register dest);
+ void move32(Register src, Register dest);
+
+ void movePtr(Register src, Register dest);
+ void movePtr(ImmWord imm, Register dest);
+ void movePtr(ImmPtr imm, Register dest);
+ void movePtr(wasm::SymbolicAddress imm, Register dest);
+ void movePtr(ImmGCPtr imm, Register dest);
+
+ void load8SignExtend(const Address& address, Register dest);
+ void load8SignExtend(const BaseIndex& src, Register dest);
+
+ void load8ZeroExtend(const Address& address, Register dest);
+ void load8ZeroExtend(const BaseIndex& src, Register dest);
+
+ void load16SignExtend(const Address& address, Register dest);
+ void load16SignExtend(const BaseIndex& src, Register dest);
+
+ template <typename S>
+ void load16UnalignedSignExtend(const S& src, Register dest) {
+ ma_load_unaligned(dest, src, SizeHalfWord, SignExtend);
+ }
+
+ void load16ZeroExtend(const Address& address, Register dest);
+ void load16ZeroExtend(const BaseIndex& src, Register dest);
+
+ template <typename S>
+ void load16UnalignedZeroExtend(const S& src, Register dest) {
+ ma_load_unaligned(dest, src, SizeHalfWord, ZeroExtend);
+ }
+
+ void load32(const Address& address, Register dest);
+ void load32(const BaseIndex& address, Register dest);
+ void load32(AbsoluteAddress address, Register dest);
+ void load32(wasm::SymbolicAddress address, Register dest);
+
+ template <typename S>
+ void load32Unaligned(const S& src, Register dest) {
+ ma_load_unaligned(dest, src);
+ }
+
+ void load64(const Address& address, Register64 dest) {
+ load32(LowWord(address), dest.low);
+ load32(HighWord(address), dest.high);
+ }
+ void load64(const BaseIndex& address, Register64 dest) {
+ load32(LowWord(address), dest.low);
+ load32(HighWord(address), dest.high);
+ }
+
+ template <typename S>
+ void load64Unaligned(const S& src, Register64 dest) {
+ ma_load_unaligned(dest.low, LowWord(src));
+ ma_load_unaligned(dest.high, HighWord(src));
+ }
+
+ void loadPtr(const Address& address, Register dest);
+ void loadPtr(const BaseIndex& src, Register dest);
+ void loadPtr(AbsoluteAddress address, Register dest);
+ void loadPtr(wasm::SymbolicAddress address, Register dest);
+
+ void loadPrivate(const Address& address, Register dest);
+
+ void loadUnalignedDouble(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& src, Register temp,
+ FloatRegister dest);
+
+ void loadUnalignedFloat32(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& src, Register temp,
+ FloatRegister dest);
+
+ void store8(Register src, const Address& address);
+ void store8(Imm32 imm, const Address& address);
+ void store8(Register src, const BaseIndex& address);
+ void store8(Imm32 imm, const BaseIndex& address);
+
+ void store16(Register src, const Address& address);
+ void store16(Imm32 imm, const Address& address);
+ void store16(Register src, const BaseIndex& address);
+ void store16(Imm32 imm, const BaseIndex& address);
+
+ template <typename T>
+ void store16Unaligned(Register src, const T& dest) {
+ ma_store_unaligned(src, dest, SizeHalfWord);
+ }
+
+ void store32(Register src, AbsoluteAddress address);
+ void store32(Register src, const Address& address);
+ void store32(Register src, const BaseIndex& address);
+ void store32(Imm32 src, const Address& address);
+ void store32(Imm32 src, const BaseIndex& address);
+
+ // NOTE: This will use second scratch on MIPS. Only ARM needs the
+ // implementation without second scratch.
+ void store32_NoSecondScratch(Imm32 src, const Address& address) {
+ store32(src, address);
+ }
+
+ template <typename T>
+ void store32Unaligned(Register src, const T& dest) {
+ ma_store_unaligned(src, dest);
+ }
+
+ void store64(Register64 src, Address address) {
+ store32(src.low, Address(address.base, address.offset + LOW_32_OFFSET));
+ store32(src.high, Address(address.base, address.offset + HIGH_32_OFFSET));
+ }
+ void store64(Register64 src, const BaseIndex& address) {
+ store32(src.low, Address(address.base, address.offset + LOW_32_OFFSET));
+ store32(src.high, Address(address.base, address.offset + HIGH_32_OFFSET));
+ }
+
+ void store64(Imm64 imm, Address address) {
+ store32(imm.low(), Address(address.base, address.offset + LOW_32_OFFSET));
+ store32(imm.hi(), Address(address.base, address.offset + HIGH_32_OFFSET));
+ }
+ void store64(Imm64 imm, const BaseIndex& address) {
+ store32(imm.low(), Address(address.base, address.offset + LOW_32_OFFSET));
+ store32(imm.hi(), Address(address.base, address.offset + HIGH_32_OFFSET));
+ }
+
+ template <typename T>
+ void store64Unaligned(Register64 src, const T& dest) {
+ ma_store_unaligned(src.low, LowWord(dest));
+ ma_store_unaligned(src.high, HighWord(dest));
+ }
+
+ template <typename T>
+ void storePtr(ImmWord imm, T address);
+ template <typename T>
+ void storePtr(ImmPtr imm, T address);
+ template <typename T>
+ void storePtr(ImmGCPtr imm, T address);
+ void storePtr(Register src, const Address& address);
+ void storePtr(Register src, const BaseIndex& address);
+ void storePtr(Register src, AbsoluteAddress dest);
+
+ void storeUnalignedFloat32(const wasm::MemoryAccessDesc& access,
+ FloatRegister src, Register temp,
+ const BaseIndex& dest);
+ void storeUnalignedDouble(const wasm::MemoryAccessDesc& access,
+ FloatRegister src, Register temp,
+ const BaseIndex& dest);
+
+ void moveDouble(FloatRegister src, FloatRegister dest) { as_movd(dest, src); }
+
+ void zeroDouble(FloatRegister reg) {
+ moveToDoubleLo(zero, reg);
+ moveToDoubleHi(zero, reg);
+ }
+
+ void breakpoint();
+
+ void checkStackAlignment();
+
+ void alignStackPointer();
+ void restoreStackPointer();
+ static void calculateAlignedStackPointer(void** stackPointer);
+
+ // If source is a double, load it into dest. If source is int32,
+ // convert it to double. Else, branch to failure.
+ void ensureDouble(const ValueOperand& source, FloatRegister dest,
+ Label* failure);
+
+ void cmp64Set(Condition cond, Register64 lhs, Register64 rhs, Register dest);
+ void cmp64Set(Condition cond, Register64 lhs, Imm64 val, Register dest);
+
+ protected:
+ bool buildOOLFakeExitFrame(void* fakeReturnAddr);
+
+ void enterAtomic64Region(Register addr, Register spinlock, Register tmp);
+ void exitAtomic64Region(Register spinlock);
+ void wasmLoadI64Impl(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register64 output, Register tmp);
+ void wasmStoreI64Impl(const wasm::MemoryAccessDesc& access, Register64 value,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register tmp);
+ Condition ma_cmp64(Condition cond, Register64 lhs, Register64 rhs,
+ Register dest);
+ Condition ma_cmp64(Condition cond, Register64 lhs, Imm64 val, Register dest);
+
+ public:
+ void lea(Operand addr, Register dest) {
+ ma_addu(dest, addr.baseReg(), Imm32(addr.disp()));
+ }
+
+ void abiret() {
+ as_jr(ra);
+ as_nop();
+ }
+
+ void ma_storeImm(Imm32 imm, const Address& addr) { ma_sw(imm, addr); }
+
+ void moveFloat32(FloatRegister src, FloatRegister dest) {
+ as_movs(dest, src);
+ }
+ void loadWasmGlobalPtr(uint32_t globalDataOffset, Register dest) {
+ loadPtr(Address(WasmTlsReg,
+ offsetof(wasm::TlsData, globalArea) + globalDataOffset),
+ dest);
+ }
+ void loadWasmPinnedRegsFromTls() {
+ loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, memoryBase)), HeapReg);
+ }
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register framePtr, Register scratch);
+ void profilerExitFrame();
+};
+
+typedef MacroAssemblerMIPSCompat MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_MacroAssembler_mips32_h */
diff --git a/js/src/jit/mips32/MoveEmitter-mips32.cpp b/js/src/jit/mips32/MoveEmitter-mips32.cpp
new file mode 100644
index 0000000000..2f52c73899
--- /dev/null
+++ b/js/src/jit/mips32/MoveEmitter-mips32.cpp
@@ -0,0 +1,152 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips32/MoveEmitter-mips32.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void MoveEmitterMIPS::breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slotId) {
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (A -> B), which we reach first. We save B, then allow
+ // the original move to continue.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchFloat32Reg;
+ masm.loadFloat32(getAdjustedAddress(to), temp);
+ // Since it is uncertain if the load will be aligned or not
+ // just fill both of them with the same value.
+ masm.storeFloat32(temp, cycleSlot(slotId, 0));
+ masm.storeFloat32(temp, cycleSlot(slotId, 4));
+ } else {
+ // Just always store the largest possible size.
+ masm.storeDouble(to.floatReg().doubleOverlay(), cycleSlot(slotId, 0));
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchDoubleReg;
+ masm.loadDouble(getAdjustedAddress(to), temp);
+ masm.storeDouble(temp, cycleSlot(slotId, 0));
+ } else {
+ masm.storeDouble(to.floatReg(), cycleSlot(slotId, 0));
+ }
+ break;
+ case MoveOp::INT32:
+ MOZ_ASSERT(sizeof(uintptr_t) == sizeof(int32_t));
+ [[fallthrough]];
+ case MoveOp::GENERAL:
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.loadPtr(getAdjustedAddress(to), temp);
+ masm.storePtr(temp, cycleSlot(0, 0));
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.storePtr(to.reg(), cycleSlot(0, 0));
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterMIPS::completeCycle(const MoveOperand& from,
+ const MoveOperand& to, MoveOp::Type type,
+ uint32_t slotId) {
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (B -> A), which we reach last. We emit a move from the
+ // saved value of B, to A.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchFloat32Reg;
+ masm.loadFloat32(cycleSlot(slotId, 0), temp);
+ masm.storeFloat32(temp, getAdjustedAddress(to));
+ } else {
+ uint32_t offset = 0;
+ if (from.floatReg().numAlignedAliased() == 1) {
+ offset = sizeof(float);
+ }
+ masm.loadFloat32(cycleSlot(slotId, offset), to.floatReg());
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ FloatRegister temp = ScratchDoubleReg;
+ masm.loadDouble(cycleSlot(slotId, 0), temp);
+ masm.storeDouble(temp, getAdjustedAddress(to));
+ } else {
+ masm.loadDouble(cycleSlot(slotId, 0), to.floatReg());
+ }
+ break;
+ case MoveOp::INT32:
+ MOZ_ASSERT(sizeof(uintptr_t) == sizeof(int32_t));
+ [[fallthrough]];
+ case MoveOp::GENERAL:
+ MOZ_ASSERT(slotId == 0);
+ if (to.isMemory()) {
+ Register temp = tempReg();
+ masm.loadPtr(cycleSlot(0, 0), temp);
+ masm.storePtr(temp, getAdjustedAddress(to));
+ } else {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(to.reg() != spilledReg_);
+ masm.loadPtr(cycleSlot(0, 0), to.reg());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterMIPS::emitDoubleMove(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.moveDouble(from.floatReg(), to.floatReg());
+ } else if (to.isGeneralRegPair()) {
+ // Used for passing double parameter in a2,a3 register pair.
+ // Two moves are added for one double parameter by
+ // MacroAssembler::passABIArg
+ MOZ_ASSERT(to.evenReg() == a2 && to.oddReg() == a3,
+ "Invalid emitDoubleMove arguments.");
+ masm.moveFromDoubleLo(from.floatReg(), a2);
+ masm.moveFromDoubleHi(from.floatReg(), a3);
+ } else {
+ MOZ_ASSERT(to.isMemory());
+ masm.storeDouble(from.floatReg(), getAdjustedAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ MOZ_ASSERT(from.isMemory());
+ masm.loadDouble(getAdjustedAddress(from), to.floatReg());
+ } else if (to.isGeneralRegPair()) {
+ // Used for passing double parameter in a2,a3 register pair.
+ // Two moves are added for one double parameter by
+ // MacroAssembler::passABIArg
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.evenReg() == a2 && to.oddReg() == a3,
+ "Invalid emitDoubleMove arguments.");
+ masm.loadPtr(getAdjustedAddress(from), a2);
+ masm.loadPtr(
+ Address(from.base(), getAdjustedOffset(from) + sizeof(uint32_t)), a3);
+ } else {
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.isMemory());
+ masm.loadDouble(getAdjustedAddress(from), ScratchDoubleReg);
+ masm.storeDouble(ScratchDoubleReg, getAdjustedAddress(to));
+ }
+}
diff --git a/js/src/jit/mips32/MoveEmitter-mips32.h b/js/src/jit/mips32/MoveEmitter-mips32.h
new file mode 100644
index 0000000000..4a669e9991
--- /dev/null
+++ b/js/src/jit/mips32/MoveEmitter-mips32.h
@@ -0,0 +1,31 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_MoveEmitter_mips32_h
+#define jit_mips32_MoveEmitter_mips32_h
+
+#include "jit/mips-shared/MoveEmitter-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+class MoveEmitterMIPS : public MoveEmitterMIPSShared {
+ void emitDoubleMove(const MoveOperand& from, const MoveOperand& to);
+ void breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+ void completeCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+
+ public:
+ MoveEmitterMIPS(MacroAssembler& masm) : MoveEmitterMIPSShared(masm) {}
+};
+
+typedef MoveEmitterMIPS MoveEmitter;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_MoveEmitter_mips32_h */
diff --git a/js/src/jit/mips32/SharedICRegisters-mips32.h b/js/src/jit/mips32/SharedICRegisters-mips32.h
new file mode 100644
index 0000000000..b8686b3521
--- /dev/null
+++ b/js/src/jit/mips32/SharedICRegisters-mips32.h
@@ -0,0 +1,48 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips32_SharedICRegisters_mips32_h
+#define jit_mips32_SharedICRegisters_mips32_h
+
+#include "jit/mips32/Assembler-mips32.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register BaselineFrameReg = s5;
+static constexpr Register BaselineStackReg = sp;
+
+static constexpr ValueOperand R0(a3, a2);
+static constexpr ValueOperand R1(s7, s6);
+static constexpr ValueOperand R2(t7, t6);
+
+// ICTailCallReg and ICStubReg
+// These use registers that are not preserved across calls.
+static constexpr Register ICTailCallReg = ra;
+static constexpr Register ICStubReg = t5;
+
+static constexpr Register ExtractTemp0 = InvalidReg;
+static constexpr Register ExtractTemp1 = InvalidReg;
+
+// Register used internally by MacroAssemblerMIPS.
+static constexpr Register BaselineSecondScratchReg = SecondScratchReg;
+
+// Note that ICTailCallReg is actually just the link register.
+// In MIPS code emission, we do not clobber ICTailCallReg since we keep
+// the return address for calls there.
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static constexpr FloatRegister FloatReg0 = f0;
+static constexpr FloatRegister FloatReg1 = f2;
+static constexpr FloatRegister FloatReg2 = f4;
+static constexpr FloatRegister FloatReg3 = f6;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips32_SharedICRegisters_mips32_h */
diff --git a/js/src/jit/mips32/Simulator-mips32.cpp b/js/src/jit/mips32/Simulator-mips32.cpp
new file mode 100644
index 0000000000..5efc0f20db
--- /dev/null
+++ b/js/src/jit/mips32/Simulator-mips32.cpp
@@ -0,0 +1,3631 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/mips32/Simulator-mips32.h"
+
+#include "mozilla/Casting.h"
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/Likely.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include <float.h>
+
+#include "jit/AtomicOperations.h"
+#include "jit/mips32/Assembler-mips32.h"
+#include "js/UniquePtr.h"
+#include "js/Utility.h"
+#include "vm/Runtime.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmSignalHandlers.h"
+
+#define I8(v) static_cast<int8_t>(v)
+#define I16(v) static_cast<int16_t>(v)
+#define U16(v) static_cast<uint16_t>(v)
+#define I32(v) static_cast<int32_t>(v)
+#define U32(v) static_cast<uint32_t>(v)
+
+namespace js {
+namespace jit {
+
+static const Instr kCallRedirInstr =
+ op_special | MAX_BREAK_CODE << FunctionBits | ff_break;
+
+// Utils functions.
+static bool HaveSameSign(int32_t a, int32_t b) { return ((a ^ b) >= 0); }
+
+static uint32_t GetFCSRConditionBit(uint32_t cc) {
+ if (cc == 0) {
+ return 23;
+ } else {
+ return 24 + cc;
+ }
+}
+
+static const int32_t kRegisterskMaxValue = 0x7fffffff;
+static const int32_t kRegisterskMinValue = 0x80000000;
+
+// -----------------------------------------------------------------------------
+// MIPS assembly various constants.
+
+class SimInstruction {
+ public:
+ enum {
+ kInstrSize = 4,
+ // On MIPS PC cannot actually be directly accessed. We behave as if PC was
+ // always the value of the current instruction being executed.
+ kPCReadOffset = 0
+ };
+
+ // Get the raw instruction bits.
+ inline Instr instructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void setInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int bit(int nr) const { return (instructionBits() >> nr) & 1; }
+
+ // Read a bit field out of the instruction bits.
+ inline int bits(int hi, int lo) const {
+ return (instructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Instruction type.
+ enum Type { kRegisterType, kImmediateType, kJumpType, kUnsupported = -1 };
+
+ // Get the encoding type of the instruction.
+ Type instructionType() const;
+
+ // Accessors for the different named fields used in the MIPS encoding.
+ inline OpcodeField opcodeValue() const {
+ return static_cast<OpcodeField>(
+ bits(OpcodeShift + OpcodeBits - 1, OpcodeShift));
+ }
+
+ inline int rsValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType ||
+ instructionType() == kImmediateType);
+ return bits(RSShift + RSBits - 1, RSShift);
+ }
+
+ inline int rtValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType ||
+ instructionType() == kImmediateType);
+ return bits(RTShift + RTBits - 1, RTShift);
+ }
+
+ inline int rdValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return bits(RDShift + RDBits - 1, RDShift);
+ }
+
+ inline int saValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return bits(SAShift + SABits - 1, SAShift);
+ }
+
+ inline int functionValue() const {
+ MOZ_ASSERT(instructionType() == kRegisterType ||
+ instructionType() == kImmediateType);
+ return bits(FunctionShift + FunctionBits - 1, FunctionShift);
+ }
+
+ inline int fdValue() const { return bits(FDShift + FDBits - 1, FDShift); }
+
+ inline int fsValue() const { return bits(FSShift + FSBits - 1, FSShift); }
+
+ inline int ftValue() const { return bits(FTShift + FTBits - 1, FTShift); }
+
+ inline int frValue() const { return bits(FRShift + FRBits - 1, FRShift); }
+
+ // Float Compare condition code instruction bits.
+ inline int fcccValue() const {
+ return bits(FCccShift + FCccBits - 1, FCccShift);
+ }
+
+ // Float Branch condition code instruction bits.
+ inline int fbccValue() const {
+ return bits(FBccShift + FBccBits - 1, FBccShift);
+ }
+
+ // Float Branch true/false instruction bit.
+ inline int fbtrueValue() const {
+ return bits(FBtrueShift + FBtrueBits - 1, FBtrueShift);
+ }
+
+ // Return the fields at their original place in the instruction encoding.
+ inline OpcodeField opcodeFieldRaw() const {
+ return static_cast<OpcodeField>(instructionBits() & OpcodeMask);
+ }
+
+ inline int rsFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType ||
+ instructionType() == kImmediateType);
+ return instructionBits() & RSMask;
+ }
+
+ // Same as above function, but safe to call within instructionType().
+ inline int rsFieldRawNoAssert() const { return instructionBits() & RSMask; }
+
+ inline int rtFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType ||
+ instructionType() == kImmediateType);
+ return instructionBits() & RTMask;
+ }
+
+ inline int rdFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return instructionBits() & RDMask;
+ }
+
+ inline int saFieldRaw() const {
+ MOZ_ASSERT(instructionType() == kRegisterType);
+ return instructionBits() & SAMask;
+ }
+
+ inline int functionFieldRaw() const {
+ return instructionBits() & FunctionMask;
+ }
+
+ // Get the secondary field according to the opcode.
+ inline int secondaryValue() const {
+ OpcodeField op = opcodeFieldRaw();
+ switch (op) {
+ case op_special:
+ case op_special2:
+ return functionValue();
+ case op_cop1:
+ return rsValue();
+ case op_regimm:
+ return rtValue();
+ default:
+ return ff_null;
+ }
+ }
+
+ inline int32_t imm16Value() const {
+ MOZ_ASSERT(instructionType() == kImmediateType);
+ return bits(Imm16Shift + Imm16Bits - 1, Imm16Shift);
+ }
+
+ inline int32_t imm26Value() const {
+ MOZ_ASSERT(instructionType() == kJumpType);
+ return bits(Imm26Shift + Imm26Bits - 1, Imm26Shift);
+ }
+
+ // Say if the instruction should not be used in a branch delay slot.
+ bool isForbiddenInBranchDelay() const;
+ // Say if the instruction 'links'. e.g. jal, bal.
+ bool isLinkingInstruction() const;
+ // Say if the instruction is a debugger break/trap.
+ bool isTrap() const;
+
+ private:
+ SimInstruction() = delete;
+ SimInstruction(const SimInstruction& other) = delete;
+ void operator=(const SimInstruction& other) = delete;
+};
+
+bool SimInstruction::isForbiddenInBranchDelay() const {
+ const int op = opcodeFieldRaw();
+ switch (op) {
+ case op_j:
+ case op_jal:
+ case op_beq:
+ case op_bne:
+ case op_blez:
+ case op_bgtz:
+ case op_beql:
+ case op_bnel:
+ case op_blezl:
+ case op_bgtzl:
+ return true;
+ case op_regimm:
+ switch (rtFieldRaw()) {
+ case rt_bltz:
+ case rt_bgez:
+ case rt_bltzal:
+ case rt_bgezal:
+ return true;
+ default:
+ return false;
+ };
+ break;
+ case op_special:
+ switch (functionFieldRaw()) {
+ case ff_jr:
+ case ff_jalr:
+ return true;
+ default:
+ return false;
+ };
+ break;
+ default:
+ return false;
+ }
+}
+
+bool SimInstruction::isLinkingInstruction() const {
+ const int op = opcodeFieldRaw();
+ switch (op) {
+ case op_jal:
+ return true;
+ case op_regimm:
+ switch (rtFieldRaw()) {
+ case rt_bgezal:
+ case rt_bltzal:
+ return true;
+ default:
+ return false;
+ };
+ case op_special:
+ switch (functionFieldRaw()) {
+ case ff_jalr:
+ return true;
+ default:
+ return false;
+ };
+ default:
+ return false;
+ };
+}
+
+bool SimInstruction::isTrap() const {
+ if (opcodeFieldRaw() != op_special) {
+ return false;
+ } else {
+ switch (functionFieldRaw()) {
+ case ff_break:
+ return instructionBits() != kCallRedirInstr;
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ return bits(15, 6) != kWasmTrapCode;
+ default:
+ return false;
+ };
+ }
+}
+
+SimInstruction::Type SimInstruction::instructionType() const {
+ switch (opcodeFieldRaw()) {
+ case op_special:
+ switch (functionFieldRaw()) {
+ case ff_jr:
+ case ff_jalr:
+ case ff_break:
+ case ff_sll:
+ case ff_srl:
+ case ff_sra:
+ case ff_sllv:
+ case ff_srlv:
+ case ff_srav:
+ case ff_mfhi:
+ case ff_mflo:
+ case ff_mult:
+ case ff_multu:
+ case ff_div:
+ case ff_divu:
+ case ff_add:
+ case ff_addu:
+ case ff_sub:
+ case ff_subu:
+ case ff_and:
+ case ff_or:
+ case ff_xor:
+ case ff_nor:
+ case ff_slt:
+ case ff_sltu:
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ case ff_movz:
+ case ff_movn:
+ case ff_movci:
+ case ff_sync:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ };
+ break;
+ case op_special2:
+ switch (functionFieldRaw()) {
+ case ff_mul:
+ case ff_madd:
+ case ff_maddu:
+ case ff_clz:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ };
+ break;
+ case op_special3:
+ switch (functionFieldRaw()) {
+ case ff_ins:
+ case ff_ext:
+ case ff_bshfl:
+ return kRegisterType;
+ default:
+ return kUnsupported;
+ };
+ break;
+ case op_cop1: // Coprocessor instructions.
+ switch (rsFieldRawNoAssert()) {
+ case rs_bc1: // Branch on coprocessor condition.
+ return kImmediateType;
+ default:
+ return kRegisterType;
+ };
+ break;
+ case op_cop1x:
+ return kRegisterType;
+ // 16 bits Immediate type instructions. e.g.: addi dest, src, imm16.
+ case op_regimm:
+ case op_beq:
+ case op_bne:
+ case op_blez:
+ case op_bgtz:
+ case op_addi:
+ case op_addiu:
+ case op_slti:
+ case op_sltiu:
+ case op_andi:
+ case op_ori:
+ case op_xori:
+ case op_lui:
+ case op_beql:
+ case op_bnel:
+ case op_blezl:
+ case op_bgtzl:
+ case op_lb:
+ case op_lh:
+ case op_lwl:
+ case op_lw:
+ case op_lbu:
+ case op_lhu:
+ case op_lwr:
+ case op_sb:
+ case op_sh:
+ case op_swl:
+ case op_sw:
+ case op_swr:
+ case op_lwc1:
+ case op_ldc1:
+ case op_swc1:
+ case op_sdc1:
+ case op_ll:
+ case op_sc:
+ return kImmediateType;
+ // 26 bits immediate type instructions. e.g.: j imm26.
+ case op_j:
+ case op_jal:
+ return kJumpType;
+ default:
+ return kUnsupported;
+ }
+ return kUnsupported;
+}
+
+// C/C++ argument slots size.
+const int kCArgSlotCount = 4;
+const int kCArgsSlotsSize = kCArgSlotCount * SimInstruction::kInstrSize;
+const int kBranchReturnOffset = 2 * SimInstruction::kInstrSize;
+
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
+
+ char* validityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+
+ char* cachedData(int offset) { return &data_[offset]; }
+
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
+
+// Protects the icache() and redirection() properties of the
+// Simulator.
+class AutoLockSimulatorCache : public LockGuard<Mutex> {
+ using Base = LockGuard<Mutex>;
+
+ public:
+ AutoLockSimulatorCache() : Base(SimulatorProcess::singleton_->cacheLock_) {}
+};
+
+mozilla::Atomic<size_t, mozilla::ReleaseAcquire>
+ SimulatorProcess::ICacheCheckingDisableCount(
+ 1); // Checking is disabled by default.
+SimulatorProcess* SimulatorProcess::singleton_ = nullptr;
+
+int Simulator::StopSimAt = -1;
+
+Simulator* Simulator::Create() {
+ auto sim = MakeUnique<Simulator>();
+ if (!sim) {
+ return nullptr;
+ }
+
+ if (!sim->init()) {
+ return nullptr;
+ }
+
+ char* stopAtStr = getenv("MIPS_SIM_STOP_AT");
+ int64_t stopAt;
+ if (stopAtStr && sscanf(stopAtStr, "%lld", &stopAt) == 1) {
+ fprintf(stderr, "\nStopping simulation at icount %lld\n", stopAt);
+ Simulator::StopSimAt = stopAt;
+ }
+
+ return sim.release();
+}
+
+void Simulator::Destroy(Simulator* sim) { js_delete(sim); }
+
+// The MipsDebugger class is used by the simulator while debugging simulated
+// code.
+class MipsDebugger {
+ public:
+ explicit MipsDebugger(Simulator* sim) : sim_(sim) {}
+
+ void stop(SimInstruction* instr);
+ void debug();
+ // Print all registers with a nice formatting.
+ void printAllRegs();
+ void printAllRegsIncludingFPU();
+
+ private:
+ // We set the breakpoint code to 0xfffff to easily recognize it.
+ static const Instr kBreakpointInstr = op_special | ff_break | 0xfffff << 6;
+ static const Instr kNopInstr = op_special | ff_sll;
+
+ Simulator* sim_;
+
+ int32_t getRegisterValue(int regnum);
+ int32_t getFPURegisterValueInt(int regnum);
+ int64_t getFPURegisterValueLong(int regnum);
+ float getFPURegisterValueFloat(int regnum);
+ double getFPURegisterValueDouble(int regnum);
+ bool getValue(const char* desc, int32_t* value);
+
+ // Set or delete a breakpoint. Returns true if successful.
+ bool setBreakpoint(SimInstruction* breakpc);
+ bool deleteBreakpoint(SimInstruction* breakpc);
+
+ // Undo and redo all breakpoints. This is needed to bracket disassembly and
+ // execution to skip past breakpoints when run from the debugger.
+ void undoBreakpoints();
+ void redoBreakpoints();
+};
+
+static void UNSUPPORTED() {
+ printf("Unsupported instruction.\n");
+ MOZ_CRASH();
+}
+
+void MipsDebugger::stop(SimInstruction* instr) {
+ // Get the stop code.
+ uint32_t code = instr->bits(25, 6);
+ // Retrieve the encoded address, which comes just after this stop.
+ char* msg =
+ *reinterpret_cast<char**>(sim_->get_pc() + SimInstruction::kInstrSize);
+ // Update this stop description.
+ if (!sim_->watchedStops_[code].desc_) {
+ sim_->watchedStops_[code].desc_ = msg;
+ }
+ // Print the stop message and code if it is not the default code.
+ if (code != kMaxStopCode) {
+ printf("Simulator hit stop %u: %s\n", code, msg);
+ } else {
+ printf("Simulator hit %s\n", msg);
+ }
+ sim_->set_pc(sim_->get_pc() + 2 * SimInstruction::kInstrSize);
+ debug();
+}
+
+int32_t MipsDebugger::getRegisterValue(int regnum) {
+ if (regnum == kPCRegister) {
+ return sim_->get_pc();
+ }
+ return sim_->getRegister(regnum);
+}
+
+int32_t MipsDebugger::getFPURegisterValueInt(int regnum) {
+ return sim_->getFpuRegister(regnum);
+}
+
+int64_t MipsDebugger::getFPURegisterValueLong(int regnum) {
+ return sim_->getFpuRegisterLong(regnum);
+}
+
+float MipsDebugger::getFPURegisterValueFloat(int regnum) {
+ return sim_->getFpuRegisterFloat(regnum);
+}
+
+double MipsDebugger::getFPURegisterValueDouble(int regnum) {
+ return sim_->getFpuRegisterDouble(regnum);
+}
+
+bool MipsDebugger::getValue(const char* desc, int32_t* value) {
+ Register reg = Register::FromName(desc);
+ if (reg != InvalidReg) {
+ *value = getRegisterValue(reg.code());
+ return true;
+ }
+
+ if (strncmp(desc, "0x", 2) == 0) {
+ return sscanf(desc, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
+ }
+ return sscanf(desc, "%i", value) == 1;
+}
+
+bool MipsDebugger::setBreakpoint(SimInstruction* breakpc) {
+ // Check if a breakpoint can be set. If not return without any side-effects.
+ if (sim_->break_pc_ != nullptr) {
+ return false;
+ }
+
+ // Set the breakpoint.
+ sim_->break_pc_ = breakpc;
+ sim_->break_instr_ = breakpc->instructionBits();
+ // Not setting the breakpoint instruction in the code itself. It will be set
+ // when the debugger shell continues.
+ return true;
+}
+
+bool MipsDebugger::deleteBreakpoint(SimInstruction* breakpc) {
+ if (sim_->break_pc_ != nullptr) {
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+ }
+
+ sim_->break_pc_ = nullptr;
+ sim_->break_instr_ = 0;
+ return true;
+}
+
+void MipsDebugger::undoBreakpoints() {
+ if (sim_->break_pc_) {
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+ }
+}
+
+void MipsDebugger::redoBreakpoints() {
+ if (sim_->break_pc_) {
+ sim_->break_pc_->setInstructionBits(kBreakpointInstr);
+ }
+}
+
+void MipsDebugger::printAllRegs() {
+ int32_t value;
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ value = getRegisterValue(i);
+ printf("%3s: 0x%08x %10d ", Registers::GetName(i), value, value);
+
+ if (i % 2) {
+ printf("\n");
+ }
+ }
+ printf("\n");
+
+ value = getRegisterValue(Simulator::LO);
+ printf(" LO: 0x%08x %10d ", value, value);
+ value = getRegisterValue(Simulator::HI);
+ printf(" HI: 0x%08x %10d\n", value, value);
+ value = getRegisterValue(Simulator::pc);
+ printf(" pc: 0x%08x\n", value);
+}
+
+void MipsDebugger::printAllRegsIncludingFPU() {
+ printAllRegs();
+
+ printf("\n\n");
+ // f0, f1, f2, ... f31.
+ for (uint32_t i = 0; i < FloatRegisters::RegisterIdLimit; i++) {
+ if (i & 0x1) {
+ printf("%3s: 0x%08x\tflt: %-8.4g\n", FloatRegisters::GetName(i),
+ getFPURegisterValueInt(i), getFPURegisterValueFloat(i));
+ } else {
+ printf("%3s: 0x%08x\tflt: %-8.4g\tdbl: %-16.4g\n",
+ FloatRegisters::GetName(i), getFPURegisterValueInt(i),
+ getFPURegisterValueFloat(i), getFPURegisterValueDouble(i));
+ }
+ }
+}
+
+static char* ReadLine(const char* prompt) {
+ UniqueChars result;
+ char lineBuf[256];
+ int offset = 0;
+ bool keepGoing = true;
+ fprintf(stdout, "%s", prompt);
+ fflush(stdout);
+ while (keepGoing) {
+ if (fgets(lineBuf, sizeof(lineBuf), stdin) == nullptr) {
+ // fgets got an error. Just give up.
+ return nullptr;
+ }
+ int len = strlen(lineBuf);
+ if (len > 0 && lineBuf[len - 1] == '\n') {
+ // Since we read a new line we are done reading the line. This
+ // will exit the loop after copying this buffer into the result.
+ keepGoing = false;
+ }
+ if (!result) {
+ // Allocate the initial result and make room for the terminating '\0'
+ result.reset(js_pod_malloc<char>(len + 1));
+ if (!result) {
+ return nullptr;
+ }
+ } else {
+ // Allocate a new result with enough room for the new addition.
+ int new_len = offset + len + 1;
+ char* new_result = js_pod_malloc<char>(new_len);
+ if (!new_result) {
+ return nullptr;
+ }
+ // Copy the existing input into the new array and set the new
+ // array as the result.
+ memcpy(new_result, result.get(), offset * sizeof(char));
+ result.reset(new_result);
+ }
+ // Copy the newly read line into the result.
+ memcpy(result.get() + offset, lineBuf, len * sizeof(char));
+ offset += len;
+ }
+
+ MOZ_ASSERT(result);
+ result[offset] = '\0';
+ return result.release();
+}
+
+static void DisassembleInstruction(uint32_t pc) {
+ uint8_t* bytes = reinterpret_cast<uint8_t*>(pc);
+ char hexbytes[256];
+ sprintf(hexbytes, "0x%x 0x%x 0x%x 0x%x", bytes[0], bytes[1], bytes[2],
+ bytes[3]);
+ char llvmcmd[1024];
+ sprintf(llvmcmd,
+ "bash -c \"echo -n '%p'; echo '%s' | "
+ "llvm-mc -disassemble -arch=mipsel -mcpu=mips32r2 | "
+ "grep -v pure_instructions | grep -v .text\"",
+ static_cast<void*>(bytes), hexbytes);
+ if (system(llvmcmd)) {
+ printf("Cannot disassemble instruction.\n");
+ }
+}
+
+void MipsDebugger::debug() {
+ intptr_t lastPC = -1;
+ bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = {cmd, arg1, arg2};
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ // Undo all set breakpoints while running in the debugger shell. This will
+ // make them invisible to all commands.
+ undoBreakpoints();
+
+ while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
+ if (lastPC != sim_->get_pc()) {
+ DisassembleInstruction(sim_->get_pc());
+ lastPC = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == nullptr) {
+ break;
+ } else {
+ char* last_input = sim_->lastDebuggerInput();
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->setLastDebuggerInput(line);
+ }
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = sscanf(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ SimInstruction* instr =
+ reinterpret_cast<SimInstruction*>(sim_->get_pc());
+ if (!instr->isTrap()) {
+ sim_->instructionDecode(
+ reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ } else {
+ // Allow si to jump over generated breakpoints.
+ printf("/!\\ Jumping over generated breakpoint.\n");
+ sim_->set_pc(sim_->get_pc() + SimInstruction::kInstrSize);
+ }
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Execute the one instruction we broke at with breakpoints disabled.
+ sim_->instructionDecode(
+ reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (argc == 2) {
+ int32_t value;
+ if (strcmp(arg1, "all") == 0) {
+ printAllRegs();
+ } else if (strcmp(arg1, "allf") == 0) {
+ printAllRegsIncludingFPU();
+ } else {
+ Register reg = Register::FromName(arg1);
+ FloatRegisters::Code fCode = FloatRegister::FromName(arg1);
+ if (reg != InvalidReg) {
+ value = getRegisterValue(reg.code());
+ printf("%s: 0x%08x %d \n", arg1, value, value);
+ } else if (fCode != FloatRegisters::Invalid) {
+ if (fCode & 0x1) {
+ printf("%3s: 0x%08x\tflt: %-8.4g\n",
+ FloatRegisters::GetName(fCode),
+ getFPURegisterValueInt(fCode),
+ getFPURegisterValueFloat(fCode));
+ } else {
+ printf("%3s: 0x%08x\tflt: %-8.4g\tdbl: %-16.4g\n",
+ FloatRegisters::GetName(fCode),
+ getFPURegisterValueInt(fCode),
+ getFPURegisterValueFloat(fCode),
+ getFPURegisterValueDouble(fCode));
+ }
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ printf("print <register> or print <fpu register> single\n");
+ }
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int32_t* cur = nullptr;
+ int32_t* end = nullptr;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int32_t*>(sim_->getRegister(Simulator::sp));
+ } else { // Command "mem".
+ int32_t value;
+ if (!getValue(arg1, &value)) {
+ printf("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int32_t*>(value);
+ next_arg++;
+ }
+
+ int32_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!getValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ printf(" %p: 0x%08x %10d", cur, *cur, *cur);
+ printf("\n");
+ cur++;
+ }
+
+ } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0) ||
+ (strcmp(cmd, "di") == 0)) {
+ uint8_t* cur = nullptr;
+ uint8_t* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ end = cur + (10 * SimInstruction::kInstrSize);
+ } else if (argc == 2) {
+ Register reg = Register::FromName(arg1);
+ if (reg != InvalidReg || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ int32_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * SimInstruction::kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ int32_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * SimInstruction::kInstrSize);
+ }
+ }
+ } else {
+ int32_t value1;
+ int32_t value2;
+ if (getValue(arg1, &value1) && getValue(arg2, &value2)) {
+ cur = reinterpret_cast<uint8_t*>(value1);
+ end = cur + (value2 * SimInstruction::kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ DisassembleInstruction(uint32_t(cur));
+ cur += SimInstruction::kInstrSize;
+ }
+ } else if (strcmp(cmd, "gdb") == 0) {
+ printf("relinquishing control to gdb\n");
+ asm("int $3");
+ printf("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0) {
+ if (argc == 2) {
+ int32_t value;
+ if (getValue(arg1, &value)) {
+ if (!setBreakpoint(reinterpret_cast<SimInstruction*>(value))) {
+ printf("setting breakpoint failed\n");
+ }
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ } else {
+ printf("break <address>\n");
+ }
+ } else if (strcmp(cmd, "del") == 0) {
+ if (!deleteBreakpoint(nullptr)) {
+ printf("deleting breakpoint failed\n");
+ }
+ } else if (strcmp(cmd, "flags") == 0) {
+ printf("No flags on MIPS !\n");
+ } else if (strcmp(cmd, "stop") == 0) {
+ int32_t value;
+ intptr_t stop_pc = sim_->get_pc() - 2 * SimInstruction::kInstrSize;
+ SimInstruction* stop_instr = reinterpret_cast<SimInstruction*>(stop_pc);
+ SimInstruction* msg_address = reinterpret_cast<SimInstruction*>(
+ stop_pc + SimInstruction::kInstrSize);
+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+ // Remove the current stop.
+ if (sim_->isStopInstruction(stop_instr)) {
+ stop_instr->setInstructionBits(kNopInstr);
+ msg_address->setInstructionBits(kNopInstr);
+ } else {
+ printf("Not at debugger stop.\n");
+ }
+ } else if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ printf("Stop information:\n");
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->printStopInfo(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->printStopInfo(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->enableStop(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->enableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->disableStop(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->disableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ printf("Wrong usage. Use help command for more information.\n");
+ }
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ printf("cont\n");
+ printf(" continue execution (alias 'c')\n");
+ printf("stepi\n");
+ printf(" step one instruction (alias 'si')\n");
+ printf("print <register>\n");
+ printf(" print register content (alias 'p')\n");
+ printf(" use register name 'all' to print all registers\n");
+ printf("printobject <register>\n");
+ printf(" print an object from a register (alias 'po')\n");
+ printf("stack [<words>]\n");
+ printf(" dump stack content, default dump 10 words)\n");
+ printf("mem <address> [<words>]\n");
+ printf(" dump memory content, default dump 10 words)\n");
+ printf("flags\n");
+ printf(" print flags\n");
+ printf("disasm [<instructions>]\n");
+ printf("disasm [<address/register>]\n");
+ printf("disasm [[<address/register>] <instructions>]\n");
+ printf(" disassemble code, default is 10 instructions\n");
+ printf(" from pc (alias 'di')\n");
+ printf("gdb\n");
+ printf(" enter gdb\n");
+ printf("break <address>\n");
+ printf(" set a break point on the address\n");
+ printf("del\n");
+ printf(" delete the breakpoint\n");
+ printf("stop feature:\n");
+ printf(" Description:\n");
+ printf(" Stops are debug instructions inserted by\n");
+ printf(" the Assembler::stop() function.\n");
+ printf(" When hitting a stop, the Simulator will\n");
+ printf(" stop and and give control to the Debugger.\n");
+ printf(" All stop codes are watched:\n");
+ printf(" - They can be enabled / disabled: the Simulator\n");
+ printf(" will / won't stop when hitting them.\n");
+ printf(" - The Simulator keeps track of how many times they \n");
+ printf(" are met. (See the info command.) Going over a\n");
+ printf(" disabled stop still increases its counter. \n");
+ printf(" Commands:\n");
+ printf(" stop info all/<code> : print infos about number <code>\n");
+ printf(" or all stop(s).\n");
+ printf(" stop enable/disable all/<code> : enables / disables\n");
+ printf(" all or number <code> stop(s)\n");
+ printf(" stop unstop\n");
+ printf(" ignore the stop instruction at the current location\n");
+ printf(" from now on\n");
+ } else {
+ printf("Unknown command: %s\n", cmd);
+ }
+ }
+ }
+
+ // Add all the breakpoints back to stop execution and enter the debugger
+ // shell when hit.
+ redoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+static bool AllOnOnePage(uintptr_t start, int size) {
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
+
+void Simulator::setLastDebuggerInput(char* input) {
+ js_free(lastDebuggerInput_);
+ lastDebuggerInput_ = input;
+}
+
+static CachePage* GetCachePageLocked(SimulatorProcess::ICacheMap& i_cache,
+ void* page) {
+ SimulatorProcess::ICacheMap::AddPtr p = i_cache.lookupForAdd(page);
+ if (p) {
+ return p->value();
+ }
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ CachePage* new_page = js_new<CachePage>();
+ if (!new_page || !i_cache.add(p, page, new_page)) {
+ oomUnsafe.crash("Simulator CachePage");
+ }
+ return new_page;
+}
+
+// Flush from start up to and not including start + size.
+static void FlushOnePageLocked(SimulatorProcess::ICacheMap& i_cache,
+ intptr_t start, int size) {
+ MOZ_ASSERT(size <= CachePage::kPageSize);
+ MOZ_ASSERT(AllOnOnePage(start, size - 1));
+ MOZ_ASSERT((start & CachePage::kLineMask) == 0);
+ MOZ_ASSERT((size & CachePage::kLineMask) == 0);
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(i_cache, page);
+ char* valid_bytemap = cache_page->validityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+static void FlushICacheLocked(SimulatorProcess::ICacheMap& i_cache,
+ void* start_addr, size_t size) {
+ intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+ int intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePageLocked(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ MOZ_ASSERT((start & CachePage::kPageMask) == 0);
+ offset = 0;
+ }
+ if (size != 0) {
+ FlushOnePageLocked(i_cache, start, size);
+ }
+}
+
+/* static */
+void SimulatorProcess::checkICacheLocked(SimInstruction* instr) {
+ intptr_t address = reinterpret_cast<intptr_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(icache(), page);
+ char* cache_valid_byte = cache_page->validityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->cachedData(offset & ~CachePage::kLineMask);
+
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ int cmpret =
+ memcmp(reinterpret_cast<void*>(instr), cache_page->cachedData(offset),
+ SimInstruction::kInstrSize);
+ MOZ_ASSERT(cmpret == 0);
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
+}
+
+HashNumber SimulatorProcess::ICacheHasher::hash(const Lookup& l) {
+ return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(l)) >> 2;
+}
+
+bool SimulatorProcess::ICacheHasher::match(const Key& k, const Lookup& l) {
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(k) & CachePage::kPageMask) == 0);
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(l) & CachePage::kPageMask) == 0);
+ return k == l;
+}
+
+/* static */
+void SimulatorProcess::FlushICache(void* start_addr, size_t size) {
+ if (!ICacheCheckingDisableCount) {
+ AutoLockSimulatorCache als;
+ js::jit::FlushICacheLocked(icache(), start_addr, size);
+ }
+}
+
+Simulator::Simulator() {
+ // Set up simulator support first. Some of this information is needed to
+ // setup the architecture state.
+
+ // Note, allocation and anything that depends on allocated memory is
+ // deferred until init(), in order to handle OOM properly.
+
+ stack_ = nullptr;
+ stackLimit_ = 0;
+ pc_modified_ = false;
+ icount_ = 0;
+ break_count_ = 0;
+ break_pc_ = nullptr;
+ break_instr_ = 0;
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+
+ // Set up architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < Register::kNumSimuRegisters; i++) {
+ registers_[i] = 0;
+ }
+ for (int i = 0; i < Simulator::FPURegister::kNumFPURegisters; i++) {
+ FPUregisters_[i] = 0;
+ }
+ FCSR_ = 0;
+ LLBit_ = false;
+ LLAddr_ = 0;
+ lastLLValue_ = 0;
+
+ // The ra and pc are initialized to a known bad value that will cause an
+ // access violation if the simulator ever tries to execute it.
+ registers_[pc] = bad_ra;
+ registers_[ra] = bad_ra;
+
+ for (int i = 0; i < kNumExceptions; i++) {
+ exceptions[i] = 0;
+ }
+
+ lastDebuggerInput_ = nullptr;
+}
+
+bool Simulator::init() {
+ // Allocate 2MB for the stack. Note that we will only use 1MB, see below.
+ static const size_t stackSize = 2 * 1024 * 1024;
+ stack_ = js_pod_malloc<char>(stackSize);
+ if (!stack_) {
+ return false;
+ }
+
+ // Leave a safety margin of 1MB to prevent overrunning the stack when
+ // pushing values (total stack size is 2MB).
+ stackLimit_ = reinterpret_cast<uintptr_t>(stack_) + 1024 * 1024;
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<int32_t>(stack_) + stackSize - 64;
+
+ return true;
+}
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a swi (software-interrupt) instruction that is handled by
+// the simulator. We write the original destination of the jump just at a known
+// offset from the swi instruction so the simulator knows what to call.
+class Redirection {
+ friend class SimulatorProcess;
+
+ // sim's lock must already be held.
+ Redirection(void* nativeFunction, ABIFunctionType type)
+ : nativeFunction_(nativeFunction),
+ swiInstruction_(kCallRedirInstr),
+ type_(type),
+ next_(nullptr) {
+ next_ = SimulatorProcess::redirection();
+ if (!SimulatorProcess::ICacheCheckingDisableCount) {
+ FlushICacheLocked(SimulatorProcess::icache(), addressOfSwiInstruction(),
+ SimInstruction::kInstrSize);
+ }
+ SimulatorProcess::setRedirection(this);
+ }
+
+ public:
+ void* addressOfSwiInstruction() { return &swiInstruction_; }
+ void* nativeFunction() const { return nativeFunction_; }
+ ABIFunctionType type() const { return type_; }
+
+ static Redirection* Get(void* nativeFunction, ABIFunctionType type) {
+ AutoLockSimulatorCache als;
+
+ Redirection* current = SimulatorProcess::redirection();
+ for (; current != nullptr; current = current->next_) {
+ if (current->nativeFunction_ == nativeFunction) {
+ MOZ_ASSERT(current->type() == type);
+ return current;
+ }
+ }
+
+ // Note: we can't use js_new here because the constructor is private.
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ Redirection* redir = js_pod_malloc<Redirection>(1);
+ if (!redir) {
+ oomUnsafe.crash("Simulator redirection");
+ }
+ new (redir) Redirection(nativeFunction, type);
+ return redir;
+ }
+
+ static Redirection* FromSwiInstruction(SimInstruction* swiInstruction) {
+ uint8_t* addrOfSwi = reinterpret_cast<uint8_t*>(swiInstruction);
+ uint8_t* addrOfRedirection =
+ addrOfSwi - offsetof(Redirection, swiInstruction_);
+ return reinterpret_cast<Redirection*>(addrOfRedirection);
+ }
+
+ private:
+ void* nativeFunction_;
+ uint32_t swiInstruction_;
+ ABIFunctionType type_;
+ Redirection* next_;
+};
+
+Simulator::~Simulator() { js_free(stack_); }
+
+SimulatorProcess::SimulatorProcess()
+ : cacheLock_(mutexid::SimulatorCacheLock), redirection_(nullptr) {
+ if (getenv("MIPS_SIM_ICACHE_CHECKS")) {
+ ICacheCheckingDisableCount = 0;
+ }
+}
+
+SimulatorProcess::~SimulatorProcess() {
+ Redirection* r = redirection_;
+ while (r) {
+ Redirection* next = r->next_;
+ js_delete(r);
+ r = next;
+ }
+}
+
+/* static */
+void* Simulator::RedirectNativeFunction(void* nativeFunction,
+ ABIFunctionType type) {
+ Redirection* redirection = Redirection::Get(nativeFunction, type);
+ return redirection->addressOfSwiInstruction();
+}
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::Current() {
+ JSContext* cx = TlsContext.get();
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
+ return cx->simulator();
+}
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::setRegister(int reg, int32_t value) {
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters));
+ if (reg == pc) {
+ pc_modified_ = true;
+ }
+
+ // Zero register always holds 0.
+ registers_[reg] = (reg == 0) ? 0 : value;
+}
+
+void Simulator::setFpuRegister(int fpureg, int32_t value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ FPUregisters_[fpureg] = value;
+}
+
+void Simulator::setFpuRegisterFloat(int fpureg, float value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<float*>(&FPUregisters_[fpureg]) = value;
+}
+
+void Simulator::setFpuRegisterDouble(int fpureg, double value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters) &&
+ ((fpureg % 2) == 0));
+ *mozilla::BitwiseCast<double*>(&FPUregisters_[fpureg]) = value;
+}
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int32_t Simulator::getRegister(int reg) const {
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters));
+ if (reg == 0) {
+ return 0;
+ }
+ return registers_[reg] + ((reg == pc) ? SimInstruction::kPCReadOffset : 0);
+}
+
+double Simulator::getDoubleFromRegisterPair(int reg) {
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters) &&
+ ((reg % 2) == 0));
+
+ double dm_val = 0.0;
+ // Read the bits from the unsigned integer register_[] array
+ // into the double precision floating point value and return it.
+ memcpy(&dm_val, &registers_[reg], sizeof(dm_val));
+ return (dm_val);
+}
+
+int32_t Simulator::getFpuRegister(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return FPUregisters_[fpureg];
+}
+
+int64_t Simulator::getFpuRegisterLong(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters) &&
+ ((fpureg % 2) == 0));
+ return *mozilla::BitwiseCast<int64_t*>(
+ const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+float Simulator::getFpuRegisterFloat(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<float*>(
+ const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+double Simulator::getFpuRegisterDouble(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters) &&
+ ((fpureg % 2) == 0));
+ return *mozilla::BitwiseCast<double*>(
+ const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+// Runtime FP routines take up to two double arguments and zero
+// or one integer arguments. All are constructed here,
+// from a0-a3 or f12 and f14.
+void Simulator::getFpArgs(double* x, double* y, int32_t* z) {
+ *x = getFpuRegisterDouble(12);
+ *y = getFpuRegisterDouble(14);
+ *z = getRegister(a2);
+}
+
+void Simulator::getFpFromStack(int32_t* stack, double* x) {
+ MOZ_ASSERT(stack);
+ MOZ_ASSERT(x);
+ memcpy(x, stack, sizeof(double));
+}
+
+void Simulator::setCallResultDouble(double result) {
+ setFpuRegisterDouble(f0, result);
+}
+
+void Simulator::setCallResultFloat(float result) {
+ setFpuRegisterFloat(f0, result);
+}
+
+void Simulator::setCallResult(int64_t res) {
+ setRegister(v0, static_cast<int32_t>(res));
+ setRegister(v1, static_cast<int32_t>(res >> 32));
+}
+
+// Helper functions for setting and testing the FCSR register's bits.
+void Simulator::setFCSRBit(uint32_t cc, bool value) {
+ if (value) {
+ FCSR_ |= (1 << cc);
+ } else {
+ FCSR_ &= ~(1 << cc);
+ }
+}
+
+bool Simulator::testFCSRBit(uint32_t cc) { return FCSR_ & (1 << cc); }
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::setFCSRRoundError(double original, double rounded) {
+ bool ret = false;
+
+ setFCSRBit(kFCSRInexactCauseBit, false);
+ setFCSRBit(kFCSRUnderflowCauseBit, false);
+ setFCSRBit(kFCSROverflowCauseBit, false);
+ setFCSRBit(kFCSRInvalidOpCauseBit, false);
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ setFCSRBit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ setFCSRBit(kFCSRInexactFlagBit, true);
+ setFCSRBit(kFCSRInexactCauseBit, true);
+ }
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ setFCSRBit(kFCSRUnderflowFlagBit, true);
+ setFCSRBit(kFCSRUnderflowCauseBit, true);
+ ret = true;
+ }
+
+ if (rounded > INT_MAX || rounded < INT_MIN) {
+ setFCSRBit(kFCSROverflowFlagBit, true);
+ setFCSRBit(kFCSROverflowCauseBit, true);
+ // The reference is not really clear but it seems this is required:
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ setFCSRBit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+// Raw access to the PC register.
+void Simulator::set_pc(int32_t value) {
+ pc_modified_ = true;
+ registers_[pc] = value;
+}
+
+bool Simulator::has_bad_pc() const {
+ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+int32_t Simulator::get_pc() const { return registers_[pc]; }
+
+JS::ProfilingFrameIterator::RegisterState Simulator::registerState() {
+ wasm::RegisterState state;
+ state.pc = (void*)get_pc();
+ state.fp = (void*)getRegister(fp);
+ state.sp = (void*)getRegister(sp);
+ state.lr = (void*)getRegister(ra);
+ return state;
+}
+
+// MIPS memory instructions (except lwl/r and swl/r) trap on unaligned memory
+// access enabling the OS to handle them via trap-and-emulate.
+// Note that simulator runs have the runtime system running directly on the host
+// system and only generated code is executed in the simulator.
+// Since the host is typically IA32 it will not trap on unaligned memory access.
+// We assume that that executing correct generated code will not produce
+// unaligned memory access, so we explicitly check for address alignment and
+// trap. Note that trapping does not occur when executing wasm code, which
+// requires that unaligned memory access provides correct result.
+int Simulator::readW(uint32_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ return -1;
+ }
+
+ if ((addr & kPointerAlignmentMask) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned read at 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void Simulator::writeW(uint32_t addr, int value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ return;
+ }
+
+ if ((addr & kPointerAlignmentMask) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned write at 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+double Simulator::readD(uint32_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ return NAN;
+ }
+
+ if ((addr & kDoubleAlignmentMask) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ double* ptr = reinterpret_cast<double*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned (double) read at 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void Simulator::writeD(uint32_t addr, double value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ return;
+ }
+
+ if ((addr & kDoubleAlignmentMask) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ double* ptr = reinterpret_cast<double*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned (double) write at 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+uint16_t Simulator::readHU(uint32_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return 0xffff;
+ }
+
+ if ((addr & 1) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int16_t Simulator::readH(uint32_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return -1;
+ }
+
+ if ((addr & 1) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ return *ptr;
+ }
+ printf("Unaligned signed halfword read at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+void Simulator::writeH(uint32_t addr, uint16_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return;
+ }
+
+ if ((addr & 1) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" PRIxPTR "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+void Simulator::writeH(uint32_t addr, int16_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return;
+ }
+
+ if ((addr & 1) == 0 ||
+ wasm::InCompiledCode(reinterpret_cast<void*>(get_pc()))) {
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+ }
+ printf("Unaligned halfword write at 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+}
+
+uint32_t Simulator::readBU(uint32_t addr) {
+ if (handleWasmSegFault(addr, 1)) {
+ return 0xff;
+ }
+
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ return *ptr;
+}
+
+int32_t Simulator::readB(uint32_t addr) {
+ if (handleWasmSegFault(addr, 1)) {
+ return -1;
+ }
+
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ return *ptr;
+}
+
+void Simulator::writeB(uint32_t addr, uint8_t value) {
+ if (handleWasmSegFault(addr, 1)) {
+ return;
+ }
+
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+}
+
+void Simulator::writeB(uint32_t addr, int8_t value) {
+ if (handleWasmSegFault(addr, 1)) {
+ return;
+ }
+
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+}
+
+int Simulator::loadLinkedW(uint32_t addr, SimInstruction* instr) {
+ if ((addr & kPointerAlignmentMask) == 0) {
+ if (handleWasmSegFault(addr, 1)) {
+ return -1;
+ }
+
+ volatile int32_t* ptr = reinterpret_cast<volatile int32_t*>(addr);
+ int32_t value = *ptr;
+ lastLLValue_ = value;
+ LLAddr_ = addr;
+ // Note that any memory write or "external" interrupt should reset this
+ // value to false.
+ LLBit_ = true;
+ return value;
+ }
+ printf("Unaligned read at 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int Simulator::storeConditionalW(uint32_t addr, int value,
+ SimInstruction* instr) {
+ // Correct behavior in this case, as defined by architecture, is to just
+ // return 0, but there is no point at allowing that. It is certainly an
+ // indicator of a bug.
+ if (addr != LLAddr_) {
+ printf("SC to bad address: 0x%08x, pc=0x%08" PRIxPTR ", expected: 0x%08x\n",
+ addr, reinterpret_cast<intptr_t>(instr), LLAddr_);
+ MOZ_CRASH();
+ }
+
+ if ((addr & kPointerAlignmentMask) == 0) {
+ SharedMem<int32_t*> ptr =
+ SharedMem<int32_t*>::shared(reinterpret_cast<int32_t*>(addr));
+
+ if (!LLBit_) {
+ return 0;
+ }
+
+ LLBit_ = false;
+ LLAddr_ = 0;
+ int32_t expected = lastLLValue_;
+ int32_t old =
+ AtomicOperations::compareExchangeSeqCst(ptr, expected, int32_t(value));
+ return (old == expected) ? 1 : 0;
+ }
+ printf("Unaligned SC at 0x%08x, pc=0x%08" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+uintptr_t Simulator::stackLimit() const { return stackLimit_; }
+
+uintptr_t* Simulator::addressOfStackLimit() { return &stackLimit_; }
+
+bool Simulator::overRecursed(uintptr_t newsp) const {
+ if (newsp == 0) {
+ newsp = getRegister(sp);
+ }
+ return newsp <= stackLimit();
+}
+
+bool Simulator::overRecursedWithExtra(uint32_t extra) const {
+ uintptr_t newsp = getRegister(sp) - extra;
+ return newsp <= stackLimit();
+}
+
+// Unsupported instructions use format to print an error and stop execution.
+void Simulator::format(SimInstruction* instr, const char* format) {
+ printf("Simulator found unsupported instruction:\n 0x%08" PRIxPTR ": %s\n",
+ reinterpret_cast<intptr_t>(instr), format);
+ MOZ_CRASH();
+}
+
+// Note: With the code below we assume that all runtime calls return a 64 bits
+// result. If they don't, the v1 result register contains a bogus value, which
+// is fine because it is caller-saved.
+typedef int64_t (*Prototype_General0)();
+typedef int64_t (*Prototype_General1)(int32_t arg0);
+typedef int64_t (*Prototype_General2)(int32_t arg0, int32_t arg1);
+typedef int64_t (*Prototype_General3)(int32_t arg0, int32_t arg1, int32_t arg2);
+typedef int64_t (*Prototype_General4)(int32_t arg0, int32_t arg1, int32_t arg2,
+ int32_t arg3);
+typedef int64_t (*Prototype_General5)(int32_t arg0, int32_t arg1, int32_t arg2,
+ int32_t arg3, int32_t arg4);
+typedef int64_t (*Prototype_General6)(int32_t arg0, int32_t arg1, int32_t arg2,
+ int32_t arg3, int32_t arg4, int32_t arg5);
+typedef int64_t (*Prototype_General7)(int32_t arg0, int32_t arg1, int32_t arg2,
+ int32_t arg3, int32_t arg4, int32_t arg5,
+ int32_t arg6);
+typedef int64_t (*Prototype_General8)(int32_t arg0, int32_t arg1, int32_t arg2,
+ int32_t arg3, int32_t arg4, int32_t arg5,
+ int32_t arg6, int32_t arg7);
+typedef int64_t (*Prototype_GeneralGeneralGeneralInt64)(int32_t arg0,
+ int32_t arg1,
+ int32_t arg2,
+ int64_t arg3);
+typedef int64_t (*Prototype_GeneralGeneralInt64Int64)(int32_t arg0,
+ int32_t arg1,
+ int64_t arg2,
+ int64_t arg3);
+
+typedef double (*Prototype_Double_None)();
+typedef double (*Prototype_Double_Double)(double arg0);
+typedef double (*Prototype_Double_Int)(int32_t arg0);
+typedef int32_t (*Prototype_Int_Double)(double arg0);
+typedef int64_t (*Prototype_Int64_Double)(double arg0);
+typedef int32_t (*Prototype_Int_DoubleIntInt)(double arg0, int32_t arg1,
+ int32_t arg2);
+typedef int32_t (*Prototype_Int_IntDoubleIntInt)(int32_t arg0, double arg1,
+ int32_t arg2, int32_t arg3);
+typedef float (*Prototype_Float32_Float32)(float arg0);
+typedef int32_t (*Prototype_Int_Float32)(float arg0);
+typedef float (*Prototype_Float32_Float32Float32)(float arg0, float arg1);
+typedef float (*Prototype_Float32_IntInt)(int arg0, int arg1);
+
+typedef double (*Prototype_Double_DoubleInt)(double arg0, int32_t arg1);
+typedef double (*Prototype_Double_IntInt)(int32_t arg0, int32_t arg1);
+typedef double (*Prototype_Double_IntDouble)(int32_t arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDouble)(double arg0, double arg1);
+typedef int32_t (*Prototype_Int_IntDouble)(int32_t arg0, double arg1);
+
+typedef double (*Prototype_Double_DoubleDoubleDouble)(double arg0, double arg1,
+ double arg2);
+typedef double (*Prototype_Double_DoubleDoubleDoubleDouble)(double arg0,
+ double arg1,
+ double arg2,
+ double arg3);
+
+static int64_t MakeInt64(int32_t first, int32_t second) {
+ // Little-endian order.
+ return ((int64_t)second << 32) | (uint32_t)first;
+}
+
+// Software interrupt instructions are used by the simulator to call into C++.
+void Simulator::softwareInterrupt(SimInstruction* instr) {
+ int32_t func = instr->functionFieldRaw();
+ uint32_t code = (func == ff_break) ? instr->bits(25, 6) : -1;
+
+ // We first check if we met a call_rt_redirected.
+ if (instr->instructionBits() == kCallRedirInstr) {
+#if !defined(USES_O32_ABI)
+ MOZ_CRASH("Only O32 ABI supported.");
+#else
+ Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ int32_t arg0 = getRegister(a0);
+ int32_t arg1 = getRegister(a1);
+ int32_t arg2 = getRegister(a2);
+ int32_t arg3 = getRegister(a3);
+
+ int32_t* stack_pointer = reinterpret_cast<int32_t*>(getRegister(sp));
+ // Args 4 and 5 are on the stack after the reserved space for args 0..3.
+ int32_t arg4 = stack_pointer[4];
+ int32_t arg5 = stack_pointer[5];
+
+ // This is dodgy but it works because the C entry stubs are never moved.
+ // See comment in codegen-arm.cc and bug 1242173.
+ int32_t saved_ra = getRegister(ra);
+
+ intptr_t external =
+ reinterpret_cast<intptr_t>(redirection->nativeFunction());
+
+ bool stack_aligned = (getRegister(sp) & (ABIStackAlignment - 1)) == 0;
+ if (!stack_aligned) {
+ fprintf(stderr, "Runtime call with unaligned stack!\n");
+ MOZ_CRASH();
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ switch (redirection->type()) {
+ case Args_General0: {
+ Prototype_General0 target =
+ reinterpret_cast<Prototype_General0>(external);
+ int64_t result = target();
+ setCallResult(result);
+ break;
+ }
+ case Args_General1: {
+ Prototype_General1 target =
+ reinterpret_cast<Prototype_General1>(external);
+ int64_t result = target(arg0);
+ setCallResult(result);
+ break;
+ }
+ case Args_General2: {
+ Prototype_General2 target =
+ reinterpret_cast<Prototype_General2>(external);
+ int64_t result = target(arg0, arg1);
+ setCallResult(result);
+ break;
+ }
+ case Args_General3: {
+ Prototype_General3 target =
+ reinterpret_cast<Prototype_General3>(external);
+ int64_t result = target(arg0, arg1, arg2);
+ setCallResult(result);
+ break;
+ }
+ case Args_General4: {
+ Prototype_General4 target =
+ reinterpret_cast<Prototype_General4>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3);
+ setCallResult(result);
+ break;
+ }
+ case Args_General5: {
+ Prototype_General5 target =
+ reinterpret_cast<Prototype_General5>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4);
+ setCallResult(result);
+ break;
+ }
+ case Args_General6: {
+ Prototype_General6 target =
+ reinterpret_cast<Prototype_General6>(external);
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ setCallResult(result);
+ break;
+ }
+ case Args_General7: {
+ Prototype_General7 target =
+ reinterpret_cast<Prototype_General7>(external);
+ int32_t arg6 = stack_pointer[6];
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6);
+ setCallResult(result);
+ break;
+ }
+ case Args_General8: {
+ Prototype_General8 target =
+ reinterpret_cast<Prototype_General8>(external);
+ int32_t arg6 = stack_pointer[6];
+ int32_t arg7 = stack_pointer[7];
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+ setCallResult(result);
+ break;
+ }
+ case Args_Double_None: {
+ Prototype_Double_None target =
+ reinterpret_cast<Prototype_Double_None>(external);
+ double dresult = target();
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_Double: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Int_Double target =
+ reinterpret_cast<Prototype_Int_Double>(external);
+ int32_t res = target(dval0);
+ setRegister(v0, res);
+ break;
+ }
+ case Args_Int_GeneralGeneralGeneralInt64: {
+ Prototype_GeneralGeneralGeneralInt64 target =
+ reinterpret_cast<Prototype_GeneralGeneralGeneralInt64>(external);
+ // The int64 arg is not split across register and stack
+ int64_t result = target(arg0, arg1, arg2, MakeInt64(arg4, arg5));
+ setCallResult(result);
+ break;
+ }
+ case Args_Int_GeneralGeneralInt64Int64: {
+ Prototype_GeneralGeneralInt64Int64 target =
+ reinterpret_cast<Prototype_GeneralGeneralInt64Int64>(external);
+ int64_t result =
+ target(arg0, arg1, MakeInt64(arg2, arg3), MakeInt64(arg4, arg5));
+ setCallResult(result);
+ break;
+ }
+ case Args_Int64_Double: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Int64_Double target =
+ reinterpret_cast<Prototype_Int64_Double>(external);
+ int64_t result = target(dval0);
+ setCallResult(result);
+ break;
+ }
+ case Args_Int_DoubleIntInt: {
+ double dval = getFpuRegisterDouble(12);
+ Prototype_Int_DoubleIntInt target =
+ reinterpret_cast<Prototype_Int_DoubleIntInt>(external);
+ int32_t res = target(dval, arg2, arg3);
+ setRegister(v0, res);
+ break;
+ }
+ case Args_Int_IntDoubleIntInt: {
+ double dval = getDoubleFromRegisterPair(a2);
+ Prototype_Int_IntDoubleIntInt target =
+ reinterpret_cast<Prototype_Int_IntDoubleIntInt>(external);
+ int32_t res = target(arg0, dval, arg4, arg5);
+ setRegister(v0, res);
+ break;
+ }
+ case Args_Double_Double: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Double_Double target =
+ reinterpret_cast<Prototype_Double_Double>(external);
+ double dresult = target(dval0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Float32_Float32: {
+ float fval0;
+ fval0 = getFpuRegisterFloat(12);
+ Prototype_Float32_Float32 target =
+ reinterpret_cast<Prototype_Float32_Float32>(external);
+ float fresult = target(fval0);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Int_Float32: {
+ float fval0;
+ fval0 = getFpuRegisterFloat(12);
+ Prototype_Int_Float32 target =
+ reinterpret_cast<Prototype_Int_Float32>(external);
+ int32_t result = target(fval0);
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Float32_Float32Float32: {
+ float fval0;
+ float fval1;
+ fval0 = getFpuRegisterFloat(12);
+ fval1 = getFpuRegisterFloat(14);
+ Prototype_Float32_Float32Float32 target =
+ reinterpret_cast<Prototype_Float32_Float32Float32>(external);
+ float fresult = target(fval0, fval1);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Float32_IntInt: {
+ Prototype_Float32_IntInt target =
+ reinterpret_cast<Prototype_Float32_IntInt>(external);
+ float fresult = target(arg0, arg1);
+ setCallResultFloat(fresult);
+ break;
+ }
+ case Args_Double_Int: {
+ Prototype_Double_Int target =
+ reinterpret_cast<Prototype_Double_Int>(external);
+ double dresult = target(arg0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_IntInt: {
+ Prototype_Double_IntInt target =
+ reinterpret_cast<Prototype_Double_IntInt>(external);
+ double dresult = target(arg0, arg1);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleInt: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Double_DoubleInt target =
+ reinterpret_cast<Prototype_Double_DoubleInt>(external);
+ double dresult = target(dval0, ival);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDouble: {
+ double dval0, dval1;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ Prototype_Double_DoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDouble>(external);
+ double dresult = target(dval0, dval1);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_IntDouble: {
+ int32_t ival = getRegister(a0);
+ double dval0 = getDoubleFromRegisterPair(a2);
+ Prototype_Double_IntDouble target =
+ reinterpret_cast<Prototype_Double_IntDouble>(external);
+ double dresult = target(ival, dval0);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Int_IntDouble: {
+ int32_t ival = getRegister(a0);
+ double dval0 = getDoubleFromRegisterPair(a2);
+ Prototype_Int_IntDouble target =
+ reinterpret_cast<Prototype_Int_IntDouble>(external);
+ int32_t result = target(ival, dval0);
+ setRegister(v0, result);
+ break;
+ }
+ case Args_Double_DoubleDoubleDouble: {
+ double dval0, dval1, dval2;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ // the last argument is on stack
+ getFpFromStack(stack_pointer + 4, &dval2);
+ Prototype_Double_DoubleDoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDoubleDouble>(external);
+ double dresult = target(dval0, dval1, dval2);
+ setCallResultDouble(dresult);
+ break;
+ }
+ case Args_Double_DoubleDoubleDoubleDouble: {
+ double dval0, dval1, dval2, dval3;
+ int32_t ival;
+ getFpArgs(&dval0, &dval1, &ival);
+ // the two last arguments are on stack
+ getFpFromStack(stack_pointer + 4, &dval2);
+ getFpFromStack(stack_pointer + 6, &dval3);
+ Prototype_Double_DoubleDoubleDoubleDouble target =
+ reinterpret_cast<Prototype_Double_DoubleDoubleDoubleDouble>(
+ external);
+ double dresult = target(dval0, dval1, dval2, dval3);
+ setCallResultDouble(dresult);
+ break;
+ }
+ default:
+ MOZ_CRASH("call");
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ setRegister(ra, saved_ra);
+ set_pc(getRegister(ra));
+#endif
+ } else if (func == ff_break && code <= kMaxStopCode) {
+ if (isWatchpoint(code)) {
+ printWatchpoint(code);
+ } else {
+ increaseStopCounter(code);
+ handleStop(code, instr);
+ }
+ } else {
+ switch (func) {
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ if (instr->bits(15, 6) == kWasmTrapCode) {
+ uint8_t* newPC;
+ if (wasm::HandleIllegalInstruction(registerState(), &newPC)) {
+ set_pc(int32_t(newPC));
+ return;
+ }
+ }
+ };
+ // All remaining break_ codes, and all traps are handled here.
+ MipsDebugger dbg(this);
+ dbg.debug();
+ }
+}
+
+// Stop helper functions.
+bool Simulator::isWatchpoint(uint32_t code) {
+ return (code <= kMaxWatchpointCode);
+}
+
+void Simulator::printWatchpoint(uint32_t code) {
+ MipsDebugger dbg(this);
+ ++break_count_;
+ printf(
+ "\n---- break %d marker: %3d (instr count: %8d) ----------"
+ "----------------------------------",
+ code, break_count_, icount_);
+ dbg.printAllRegs(); // Print registers and continue running.
+}
+
+void Simulator::handleStop(uint32_t code, SimInstruction* instr) {
+ // Stop if it is enabled, otherwise go on jumping over the stop
+ // and the message address.
+ if (isEnabledStop(code)) {
+ MipsDebugger dbg(this);
+ dbg.stop(instr);
+ } else {
+ set_pc(get_pc() + 2 * SimInstruction::kInstrSize);
+ }
+}
+
+bool Simulator::isStopInstruction(SimInstruction* instr) {
+ int32_t func = instr->functionFieldRaw();
+ uint32_t code = static_cast<uint32_t>(instr->bits(25, 6));
+ return (func == ff_break) && code > kMaxWatchpointCode &&
+ code <= kMaxStopCode;
+}
+
+bool Simulator::isEnabledStop(uint32_t code) {
+ MOZ_ASSERT(code <= kMaxStopCode);
+ MOZ_ASSERT(code > kMaxWatchpointCode);
+ return !(watchedStops_[code].count_ & kStopDisabledBit);
+}
+
+void Simulator::enableStop(uint32_t code) {
+ if (!isEnabledStop(code)) {
+ watchedStops_[code].count_ &= ~kStopDisabledBit;
+ }
+}
+
+void Simulator::disableStop(uint32_t code) {
+ if (isEnabledStop(code)) {
+ watchedStops_[code].count_ |= kStopDisabledBit;
+ }
+}
+
+void Simulator::increaseStopCounter(uint32_t code) {
+ MOZ_ASSERT(code <= kMaxStopCode);
+ if ((watchedStops_[code].count_ & ~(1 << 31)) == 0x7fffffff) {
+ printf(
+ "Stop counter for code %i has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n",
+ code);
+ watchedStops_[code].count_ = 0;
+ enableStop(code);
+ } else {
+ watchedStops_[code].count_++;
+ }
+}
+
+// Print a stop status.
+void Simulator::printStopInfo(uint32_t code) {
+ if (code <= kMaxWatchpointCode) {
+ printf("That is a watchpoint, not a stop.\n");
+ return;
+ } else if (code > kMaxStopCode) {
+ printf("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
+ return;
+ }
+ const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watchedStops_[code].count_ & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watchedStops_[code].desc_) {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n", code, code, state,
+ count, watchedStops_[code].desc_);
+ } else {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i\n", code, code, state,
+ count);
+ }
+ }
+}
+
+void Simulator::signalExceptions() {
+ for (int i = 1; i < kNumExceptions; i++) {
+ if (exceptions[i] != 0) {
+ MOZ_CRASH("Error: Exception raised.");
+ }
+ }
+}
+
+// Handle execution based on instruction types.
+void Simulator::configureTypeRegister(SimInstruction* instr, int32_t& alu_out,
+ int64_t& i64hilo, uint64_t& u64hilo,
+ int32_t& next_pc,
+ int32_t& return_addr_reg,
+ bool& do_interrupt) {
+ // Every local variable declared here needs to be const.
+ // This is to make sure that changed values are sent back to
+ // decodeTypeRegister correctly.
+
+ // Instruction fields.
+ const OpcodeField op = instr->opcodeFieldRaw();
+ const int32_t rs_reg = instr->rsValue();
+ const int32_t rs = getRegister(rs_reg);
+ const uint32_t rs_u = static_cast<uint32_t>(rs);
+ const int32_t rt_reg = instr->rtValue();
+ const int32_t rt = getRegister(rt_reg);
+ const uint32_t rt_u = static_cast<uint32_t>(rt);
+ const int32_t rd_reg = instr->rdValue();
+ const uint32_t sa = instr->saValue();
+
+ const int32_t fs_reg = instr->fsValue();
+
+ // ---------- Configuration.
+ switch (op) {
+ case op_cop1: // Coprocessor instructions.
+ switch (instr->rsFieldRaw()) {
+ case rs_bc1: // Handled in DecodeTypeImmed, should never come here.
+ MOZ_CRASH();
+ break;
+ case rs_cfc1:
+ // At the moment only FCSR is supported.
+ MOZ_ASSERT(fs_reg == kFCSRRegister);
+ alu_out = FCSR_;
+ break;
+ case rs_mfc1:
+ alu_out = getFpuRegister(fs_reg);
+ break;
+ case rs_mfhc1:
+ MOZ_CRASH();
+ break;
+ case rs_ctc1:
+ case rs_mtc1:
+ case rs_mthc1:
+ // Do the store in the execution step.
+ break;
+ case rs_s:
+ case rs_d:
+ case rs_w:
+ case rs_l:
+ case rs_ps:
+ // Do everything in the execution step.
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case op_cop1x:
+ break;
+ case op_special:
+ switch (instr->functionFieldRaw()) {
+ case ff_jr:
+ case ff_jalr:
+ next_pc = getRegister(instr->rsValue());
+ return_addr_reg = instr->rdValue();
+ break;
+ case ff_sll:
+ alu_out = rt << sa;
+ break;
+ case ff_srl:
+ if (rs_reg == 0) {
+ // Regular logical right shift of a word by a fixed number of
+ // bits instruction. RS field is always equal to 0.
+ alu_out = rt_u >> sa;
+ } else {
+ // Logical right-rotate of a word by a fixed number of bits. This
+ // is special case of SRL instruction, added in MIPS32 Release 2.
+ // RS field is equal to 00001.
+ alu_out = (rt_u >> sa) | (rt_u << (32 - sa));
+ }
+ break;
+ case ff_sra:
+ alu_out = rt >> sa;
+ break;
+ case ff_sllv:
+ alu_out = rt << rs;
+ break;
+ case ff_srlv:
+ if (sa == 0) {
+ // Regular logical right-shift of a word by a variable number of
+ // bits instruction. SA field is always equal to 0.
+ alu_out = rt_u >> rs;
+ } else {
+ // Logical right-rotate of a word by a variable number of bits.
+ // This is special case od SRLV instruction, added in MIPS32
+ // Release 2. SA field is equal to 00001.
+ alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
+ }
+ break;
+ case ff_srav:
+ alu_out = rt >> rs;
+ break;
+ case ff_mfhi:
+ alu_out = getRegister(HI);
+ break;
+ case ff_mflo:
+ alu_out = getRegister(LO);
+ break;
+ case ff_mult:
+ i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
+ break;
+ case ff_multu:
+ u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
+ break;
+ case ff_add:
+ if (HaveSameSign(rs, rt)) {
+ if (rs > 0) {
+ exceptions[kIntegerOverflow] = rs > (kRegisterskMaxValue - rt);
+ } else if (rs < 0) {
+ exceptions[kIntegerUnderflow] = rs < (kRegisterskMinValue - rt);
+ }
+ }
+ alu_out = rs + rt;
+ break;
+ case ff_addu:
+ alu_out = rs + rt;
+ break;
+ case ff_sub:
+ if (!HaveSameSign(rs, rt)) {
+ if (rs > 0) {
+ exceptions[kIntegerOverflow] = rs > (kRegisterskMaxValue + rt);
+ } else if (rs < 0) {
+ exceptions[kIntegerUnderflow] = rs < (kRegisterskMinValue + rt);
+ }
+ }
+ alu_out = rs - rt;
+ break;
+ case ff_subu:
+ alu_out = rs - rt;
+ break;
+ case ff_and:
+ alu_out = rs & rt;
+ break;
+ case ff_or:
+ alu_out = rs | rt;
+ break;
+ case ff_xor:
+ alu_out = rs ^ rt;
+ break;
+ case ff_nor:
+ alu_out = ~(rs | rt);
+ break;
+ case ff_slt:
+ alu_out = rs < rt ? 1 : 0;
+ break;
+ case ff_sltu:
+ alu_out = rs_u < rt_u ? 1 : 0;
+ break;
+ // Break and trap instructions.
+ case ff_break:
+ do_interrupt = true;
+ break;
+ case ff_tge:
+ do_interrupt = rs >= rt;
+ break;
+ case ff_tgeu:
+ do_interrupt = rs_u >= rt_u;
+ break;
+ case ff_tlt:
+ do_interrupt = rs < rt;
+ break;
+ case ff_tltu:
+ do_interrupt = rs_u < rt_u;
+ break;
+ case ff_teq:
+ do_interrupt = rs == rt;
+ break;
+ case ff_tne:
+ do_interrupt = rs != rt;
+ break;
+ case ff_movn:
+ case ff_movz:
+ case ff_movci:
+ case ff_sync:
+ // No action taken on decode.
+ break;
+ case ff_div:
+ case ff_divu:
+ // div and divu never raise exceptions.
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case op_special2:
+ switch (instr->functionFieldRaw()) {
+ case ff_mul:
+ alu_out = rs_u * rt_u; // Only the lower 32 bits are kept.
+ break;
+ case ff_mult:
+ i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
+ break;
+ case ff_multu:
+ u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
+ break;
+ case ff_madd:
+ i64hilo += static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
+ break;
+ case ff_maddu:
+ u64hilo += static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
+ break;
+ case ff_clz:
+ alu_out = rs_u ? __builtin_clz(rs_u) : 32;
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case op_special3:
+ switch (instr->functionFieldRaw()) {
+ case ff_ins: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa;
+ uint16_t size = msb - lsb + 1;
+ uint32_t mask = (1 << size) - 1;
+ alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb);
+ break;
+ }
+ case ff_ext: { // Mips32r2 instruction.
+ // Interpret rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa;
+ uint16_t size = msb + 1;
+ uint32_t mask = (1 << size) - 1;
+ alu_out = (rs_u & (mask << lsb)) >> lsb;
+ break;
+ }
+ case ff_bshfl: { // Mips32r2 instruction.
+ if (16 == sa) { // seb
+ alu_out = I32(I8(rt));
+ } else if (24 == sa) { // seh
+ alu_out = I32(I16(rt));
+ } else {
+ MOZ_CRASH();
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+}
+
+void Simulator::decodeTypeRegister(SimInstruction* instr) {
+ // Instruction fields.
+ const OpcodeField op = instr->opcodeFieldRaw();
+ const int32_t rs_reg = instr->rsValue();
+ const int32_t rs = getRegister(rs_reg);
+ const uint32_t rs_u = static_cast<uint32_t>(rs);
+ const int32_t rt_reg = instr->rtValue();
+ const int32_t rt = getRegister(rt_reg);
+ const uint32_t rt_u = static_cast<uint32_t>(rt);
+ const int32_t rd_reg = instr->rdValue();
+
+ const int32_t fr_reg = instr->frValue();
+ const int32_t fs_reg = instr->fsValue();
+ const int32_t ft_reg = instr->ftValue();
+ const int32_t fd_reg = instr->fdValue();
+ int64_t i64hilo = 0;
+ uint64_t u64hilo = 0;
+
+ // ALU output.
+ // It should not be used as is. Instructions using it should always
+ // initialize it first.
+ int32_t alu_out = 0x12345678;
+
+ // For break and trap instructions.
+ bool do_interrupt = false;
+
+ // For jr and jalr.
+ // Get current pc.
+ int32_t current_pc = get_pc();
+ // Next pc
+ int32_t next_pc = 0;
+ int32_t return_addr_reg = 31;
+
+ // Set up the variables if needed before executing the instruction.
+ configureTypeRegister(instr, alu_out, i64hilo, u64hilo, next_pc,
+ return_addr_reg, do_interrupt);
+
+ // ---------- Raise exceptions triggered.
+ signalExceptions();
+
+ // ---------- Execution.
+ switch (op) {
+ case op_cop1:
+ switch (instr->rsFieldRaw()) {
+ case rs_bc1: // Branch on coprocessor condition.
+ MOZ_CRASH();
+ break;
+ case rs_cfc1:
+ case rs_mfc1:
+ setRegister(rt_reg, alu_out);
+ break;
+ case rs_mfhc1:
+ MOZ_CRASH();
+ break;
+ case rs_ctc1:
+ // At the moment only FCSR is supported.
+ MOZ_ASSERT(fs_reg == kFCSRRegister);
+ FCSR_ = registers_[rt_reg];
+ break;
+ case rs_mtc1:
+ FPUregisters_[fs_reg] = registers_[rt_reg];
+ break;
+ case rs_mthc1:
+ MOZ_CRASH();
+ break;
+ case rs_s:
+ float f, ft_value, fs_value;
+ uint32_t cc, fcsr_cc;
+ fs_value = getFpuRegisterFloat(fs_reg);
+ ft_value = getFpuRegisterFloat(ft_reg);
+ cc = instr->fcccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ switch (instr->functionFieldRaw()) {
+ case ff_add_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value + ft_value);
+ break;
+ case ff_sub_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value - ft_value);
+ break;
+ case ff_mul_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value * ft_value);
+ break;
+ case ff_div_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value / ft_value);
+ break;
+ case ff_abs_fmt:
+ setFpuRegisterFloat(fd_reg, fabsf(fs_value));
+ break;
+ case ff_mov_fmt:
+ setFpuRegisterFloat(fd_reg, fs_value);
+ break;
+ case ff_neg_fmt:
+ setFpuRegisterFloat(fd_reg, -fs_value);
+ break;
+ case ff_sqrt_fmt:
+ setFpuRegisterFloat(fd_reg, sqrtf(fs_value));
+ break;
+ case ff_c_un_fmt:
+ setFCSRBit(fcsr_cc,
+ mozilla::IsNaN(fs_value) || mozilla::IsNaN(ft_value));
+ break;
+ case ff_c_eq_fmt:
+ setFCSRBit(fcsr_cc, (fs_value == ft_value));
+ break;
+ case ff_c_ueq_fmt:
+ setFCSRBit(fcsr_cc,
+ (fs_value == ft_value) || (mozilla::IsNaN(fs_value) ||
+ mozilla::IsNaN(ft_value)));
+ break;
+ case ff_c_olt_fmt:
+ setFCSRBit(fcsr_cc, (fs_value < ft_value));
+ break;
+ case ff_c_ult_fmt:
+ setFCSRBit(fcsr_cc,
+ (fs_value < ft_value) || (mozilla::IsNaN(fs_value) ||
+ mozilla::IsNaN(ft_value)));
+ break;
+ case ff_c_ole_fmt:
+ setFCSRBit(fcsr_cc, (fs_value <= ft_value));
+ break;
+ case ff_c_ule_fmt:
+ setFCSRBit(fcsr_cc,
+ (fs_value <= ft_value) || (mozilla::IsNaN(fs_value) ||
+ mozilla::IsNaN(ft_value)));
+ break;
+ case ff_cvt_d_fmt:
+ f = getFpuRegisterFloat(fs_reg);
+ setFpuRegisterDouble(fd_reg, static_cast<double>(f));
+ break;
+ case ff_cvt_w_fmt: // Convert float to word.
+ // Rounding modes are not yet supported.
+ MOZ_ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ [[fallthrough]];
+ case ff_round_w_fmt: { // Round double to word (round half to
+ // even).
+ float rounded = std::floor(fs_value + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - fs_value == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_trunc_w_fmt: { // Truncate float to word (round towards 0).
+ float rounded = truncf(fs_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_floor_w_fmt: { // Round float to word towards negative
+ // infinity.
+ float rounded = std::floor(fs_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_ceil_w_fmt: { // Round double to word towards positive
+ // infinity.
+ float rounded = std::ceil(fs_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(fs_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_cvt_l_fmt:
+ case ff_round_l_fmt:
+ case ff_trunc_l_fmt:
+ case ff_floor_l_fmt:
+ case ff_ceil_l_fmt:
+ case ff_cvt_ps_s:
+ case ff_c_f_fmt:
+ MOZ_CRASH();
+ break;
+ case ff_movf_fmt:
+ // location of cc field in MOVF is equal to float branch
+ // instructions
+ cc = instr->fbccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ if (testFCSRBit(fcsr_cc)) {
+ setFpuRegisterFloat(fd_reg, getFpuRegisterFloat(fs_reg));
+ }
+ break;
+ case ff_movz_fmt:
+ if (rt == 0) {
+ setFpuRegisterFloat(fd_reg, getFpuRegisterFloat(fs_reg));
+ }
+ break;
+ case ff_movn_fmt:
+ if (rt != 0) {
+ setFpuRegisterFloat(fd_reg, getFpuRegisterFloat(fs_reg));
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_d:
+ double dt_value, ds_value;
+ ds_value = getFpuRegisterDouble(fs_reg);
+ cc = instr->fcccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ switch (instr->functionFieldRaw()) {
+ case ff_add_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFpuRegisterDouble(fd_reg, ds_value + dt_value);
+ break;
+ case ff_sub_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFpuRegisterDouble(fd_reg, ds_value - dt_value);
+ break;
+ case ff_mul_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFpuRegisterDouble(fd_reg, ds_value * dt_value);
+ break;
+ case ff_div_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFpuRegisterDouble(fd_reg, ds_value / dt_value);
+ break;
+ case ff_abs_fmt:
+ setFpuRegisterDouble(fd_reg, fabs(ds_value));
+ break;
+ case ff_mov_fmt:
+ setFpuRegisterDouble(fd_reg, ds_value);
+ break;
+ case ff_neg_fmt:
+ setFpuRegisterDouble(fd_reg, -ds_value);
+ break;
+ case ff_sqrt_fmt:
+ setFpuRegisterDouble(fd_reg, sqrt(ds_value));
+ break;
+ case ff_c_un_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFCSRBit(fcsr_cc,
+ mozilla::IsNaN(ds_value) || mozilla::IsNaN(dt_value));
+ break;
+ case ff_c_eq_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFCSRBit(fcsr_cc, (ds_value == dt_value));
+ break;
+ case ff_c_ueq_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFCSRBit(fcsr_cc,
+ (ds_value == dt_value) || (mozilla::IsNaN(ds_value) ||
+ mozilla::IsNaN(dt_value)));
+ break;
+ case ff_c_olt_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFCSRBit(fcsr_cc, (ds_value < dt_value));
+ break;
+ case ff_c_ult_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFCSRBit(fcsr_cc,
+ (ds_value < dt_value) || (mozilla::IsNaN(ds_value) ||
+ mozilla::IsNaN(dt_value)));
+ break;
+ case ff_c_ole_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFCSRBit(fcsr_cc, (ds_value <= dt_value));
+ break;
+ case ff_c_ule_fmt:
+ dt_value = getFpuRegisterDouble(ft_reg);
+ setFCSRBit(fcsr_cc,
+ (ds_value <= dt_value) || (mozilla::IsNaN(ds_value) ||
+ mozilla::IsNaN(dt_value)));
+ break;
+ case ff_cvt_w_fmt: // Convert double to word.
+ // Rounding modes are not yet supported.
+ MOZ_ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ [[fallthrough]];
+ case ff_round_w_fmt: { // Round double to word (round half to
+ // even).
+ double rounded = std::floor(ds_value + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - ds_value == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_trunc_w_fmt: { // Truncate double to word (round towards
+ // 0).
+ double rounded = trunc(ds_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_floor_w_fmt: { // Round double to word towards negative
+ // infinity.
+ double rounded = std::floor(ds_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_ceil_w_fmt: { // Round double to word towards positive
+ // infinity.
+ double rounded = std::ceil(ds_value);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegister(fd_reg, result);
+ if (setFCSRRoundError(ds_value, rounded)) {
+ setFpuRegister(fd_reg, kFPUInvalidResult);
+ }
+ break;
+ }
+ case ff_cvt_s_fmt: // Convert double to float (single).
+ setFpuRegisterFloat(fd_reg, static_cast<float>(ds_value));
+ break;
+ case ff_cvt_l_fmt:
+ case ff_trunc_l_fmt:
+ case ff_round_l_fmt:
+ case ff_floor_l_fmt:
+ case ff_ceil_l_fmt:
+ case ff_c_f_fmt:
+ MOZ_CRASH();
+ break;
+ case ff_movf_fmt:
+ // location of cc field in MOVF is equal to float branch
+ // instructions
+ cc = instr->fbccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ if (testFCSRBit(fcsr_cc)) {
+ setFpuRegisterDouble(fd_reg, getFpuRegisterDouble(fs_reg));
+ }
+ break;
+ case ff_movz_fmt:
+ if (rt == 0) {
+ setFpuRegisterDouble(fd_reg, getFpuRegisterDouble(fs_reg));
+ }
+ break;
+ case ff_movn_fmt:
+ if (rt != 0) {
+ setFpuRegisterDouble(fd_reg, getFpuRegisterDouble(fs_reg));
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_w:
+ switch (instr->functionFieldRaw()) {
+ case ff_cvt_s_fmt: // Convert word to float (single).
+ alu_out = getFpuRegister(fs_reg);
+ setFpuRegisterFloat(fd_reg, static_cast<float>(alu_out));
+ break;
+ case ff_cvt_d_fmt: // Convert word to double.
+ alu_out = getFpuRegister(fs_reg);
+ setFpuRegisterDouble(fd_reg, static_cast<double>(alu_out));
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_l:
+ switch (instr->functionFieldRaw()) {
+ case ff_cvt_d_fmt:
+ case ff_cvt_s_fmt:
+ MOZ_CRASH();
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case rs_ps:
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case op_cop1x:
+ switch (instr->functionFieldRaw()) {
+ case ff_madd_s:
+ float fr, ft, fs;
+ fr = getFpuRegisterFloat(fr_reg);
+ fs = getFpuRegisterFloat(fs_reg);
+ ft = getFpuRegisterFloat(ft_reg);
+ setFpuRegisterFloat(fd_reg, fs * ft + fr);
+ break;
+ case ff_madd_d:
+ double dr, dt, ds;
+ dr = getFpuRegisterDouble(fr_reg);
+ ds = getFpuRegisterDouble(fs_reg);
+ dt = getFpuRegisterDouble(ft_reg);
+ setFpuRegisterDouble(fd_reg, ds * dt + dr);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ case op_special:
+ switch (instr->functionFieldRaw()) {
+ case ff_jr: {
+ SimInstruction* branch_delay_instr =
+ reinterpret_cast<SimInstruction*>(current_pc +
+ SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ case ff_jalr: {
+ SimInstruction* branch_delay_instr =
+ reinterpret_cast<SimInstruction*>(current_pc +
+ SimInstruction::kInstrSize);
+ setRegister(return_addr_reg,
+ current_pc + 2 * SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+ set_pc(next_pc);
+ pc_modified_ = true;
+ break;
+ }
+ // Instructions using HI and LO registers.
+ case ff_mult:
+ setRegister(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
+ setRegister(HI, static_cast<int32_t>(i64hilo >> 32));
+ break;
+ case ff_multu:
+ setRegister(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
+ setRegister(HI, static_cast<int32_t>(u64hilo >> 32));
+ break;
+ case ff_div:
+ // Divide by zero and overflow was not checked in the configuration
+ // step - div and divu do not raise exceptions. On division by 0
+ // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1),
+ // return INT_MIN which is what the hardware does.
+ if (rs == INT_MIN && rt == -1) {
+ setRegister(LO, INT_MIN);
+ setRegister(HI, 0);
+ } else if (rt != 0) {
+ setRegister(LO, rs / rt);
+ setRegister(HI, rs % rt);
+ }
+ break;
+ case ff_divu:
+ if (rt_u != 0) {
+ setRegister(LO, rs_u / rt_u);
+ setRegister(HI, rs_u % rt_u);
+ }
+ break;
+ // Break and trap instructions.
+ case ff_break:
+ case ff_tge:
+ case ff_tgeu:
+ case ff_tlt:
+ case ff_tltu:
+ case ff_teq:
+ case ff_tne:
+ if (do_interrupt) {
+ softwareInterrupt(instr);
+ }
+ break;
+ case ff_sync:
+ switch (instr->bits(10, 6)) {
+ case 0x0:
+ case 0x4:
+ case 0x10:
+ case 0x11:
+ case 0x12:
+ case 0x13:
+ AtomicOperations::fenceSeqCst();
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ // Conditional moves.
+ case ff_movn:
+ if (rt) setRegister(rd_reg, rs);
+ break;
+ case ff_movci: {
+ uint32_t cc = instr->fbccValue();
+ uint32_t fcsr_cc = GetFCSRConditionBit(cc);
+ if (instr->bit(16)) { // Read Tf bit.
+ if (testFCSRBit(fcsr_cc)) setRegister(rd_reg, rs);
+ } else {
+ if (!testFCSRBit(fcsr_cc)) setRegister(rd_reg, rs);
+ }
+ break;
+ }
+ case ff_movz:
+ if (!rt) setRegister(rd_reg, rs);
+ break;
+ default: // For other special opcodes we do the default operation.
+ setRegister(rd_reg, alu_out);
+ }
+ break;
+ case op_special2:
+ switch (instr->functionFieldRaw()) {
+ case ff_mul:
+ setRegister(rd_reg, alu_out);
+ // HI and LO are UNPREDICTABLE after the operation.
+ setRegister(LO, Unpredictable);
+ setRegister(HI, Unpredictable);
+ break;
+ case ff_madd:
+ setRegister(
+ LO, getRegister(LO) + static_cast<int32_t>(i64hilo & 0xffffffff));
+ setRegister(HI,
+ getRegister(HI) + static_cast<int32_t>(i64hilo >> 32));
+ break;
+ case ff_maddu:
+ setRegister(
+ LO, getRegister(LO) + static_cast<int32_t>(u64hilo & 0xffffffff));
+ setRegister(HI,
+ getRegister(HI) + static_cast<int32_t>(u64hilo >> 32));
+ break;
+ default: // For other special2 opcodes we do the default operation.
+ setRegister(rd_reg, alu_out);
+ }
+ break;
+ case op_special3:
+ switch (instr->functionFieldRaw()) {
+ case ff_ins:
+ // Ins instr leaves result in Rt, rather than Rd.
+ setRegister(rt_reg, alu_out);
+ break;
+ case ff_ext:
+ // Ext instr leaves result in Rt, rather than Rd.
+ setRegister(rt_reg, alu_out);
+ break;
+ case ff_bshfl:
+ setRegister(rd_reg, alu_out);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ // Unimplemented opcodes raised an error in the configuration step before,
+ // so we can use the default here to set the destination register in
+ // common cases.
+ default:
+ setRegister(rd_reg, alu_out);
+ }
+}
+
+// Type 2: instructions using a 16 bytes immediate. (e.g. addi, beq).
+void Simulator::decodeTypeImmediate(SimInstruction* instr) {
+ // Instruction fields.
+ OpcodeField op = instr->opcodeFieldRaw();
+ int32_t rs = getRegister(instr->rsValue());
+ uint32_t rs_u = static_cast<uint32_t>(rs);
+ int32_t rt_reg = instr->rtValue(); // Destination register.
+ int32_t rt = getRegister(rt_reg);
+ int16_t imm16 = instr->imm16Value();
+
+ int32_t ft_reg = instr->ftValue(); // Destination register.
+
+ // Zero extended immediate.
+ uint32_t oe_imm16 = 0xffff & imm16;
+ // Sign extended immediate.
+ int32_t se_imm16 = imm16;
+
+ // Get current pc.
+ int32_t current_pc = get_pc();
+ // Next pc.
+ int32_t next_pc = bad_ra;
+
+ // Used for conditional branch instructions.
+ bool do_branch = false;
+ bool execute_branch_delay_instruction = false;
+
+ // Used for arithmetic instructions.
+ int32_t alu_out = 0;
+ // Floating point.
+ double fp_out = 0.0;
+ uint32_t cc, cc_value, fcsr_cc;
+
+ // Used for memory instructions.
+ uint32_t addr = 0x0;
+ // Value to be written in memory.
+ uint32_t mem_value = 0x0;
+
+ // ---------- Configuration (and execution for op_regimm).
+ switch (op) {
+ // ------------- op_cop1. Coprocessor instructions.
+ case op_cop1:
+ switch (instr->rsFieldRaw()) {
+ case rs_bc1: // Branch on coprocessor condition.
+ cc = instr->fbccValue();
+ fcsr_cc = GetFCSRConditionBit(cc);
+ cc_value = testFCSRBit(fcsr_cc);
+ do_branch = (instr->fbtrueValue()) ? cc_value : !cc_value;
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize;
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ break;
+ // ------------- op_regimm class.
+ case op_regimm:
+ switch (instr->rtFieldRaw()) {
+ case rt_bltz:
+ do_branch = (rs < 0);
+ break;
+ case rt_bltzal:
+ do_branch = rs < 0;
+ break;
+ case rt_bgez:
+ do_branch = rs >= 0;
+ break;
+ case rt_bgezal:
+ do_branch = rs >= 0;
+ break;
+ default:
+ MOZ_CRASH();
+ }
+ switch (instr->rtFieldRaw()) {
+ case rt_bltz:
+ case rt_bltzal:
+ case rt_bgez:
+ case rt_bgezal:
+ // Branch instructions common part.
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize;
+ if (instr->isLinkingInstruction()) {
+ setRegister(31, current_pc + kBranchReturnOffset);
+ }
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
+ break;
+ default:
+ break;
+ }
+ break; // case op_regimm.
+ // ------------- Branch instructions.
+ // When comparing to zero, the encoding of rt field is always 0, so we
+ // don't need to replace rt with zero.
+ case op_beq:
+ do_branch = (rs == rt);
+ break;
+ case op_bne:
+ do_branch = rs != rt;
+ break;
+ case op_blez:
+ do_branch = rs <= 0;
+ break;
+ case op_bgtz:
+ do_branch = rs > 0;
+ break;
+ // ------------- Arithmetic instructions.
+ case op_addi:
+ if (HaveSameSign(rs, se_imm16)) {
+ if (rs > 0) {
+ exceptions[kIntegerOverflow] = rs > (kRegisterskMaxValue - se_imm16);
+ } else if (rs < 0) {
+ exceptions[kIntegerUnderflow] = rs < (kRegisterskMinValue - se_imm16);
+ }
+ }
+ alu_out = rs + se_imm16;
+ break;
+ case op_addiu:
+ alu_out = rs + se_imm16;
+ break;
+ case op_slti:
+ alu_out = (rs < se_imm16) ? 1 : 0;
+ break;
+ case op_sltiu:
+ alu_out = (rs_u < static_cast<uint32_t>(se_imm16)) ? 1 : 0;
+ break;
+ case op_andi:
+ alu_out = rs & oe_imm16;
+ break;
+ case op_ori:
+ alu_out = rs | oe_imm16;
+ break;
+ case op_xori:
+ alu_out = rs ^ oe_imm16;
+ break;
+ case op_lui:
+ alu_out = (oe_imm16 << 16);
+ break;
+ // ------------- Memory instructions.
+ case op_lb:
+ addr = rs + se_imm16;
+ alu_out = readB(addr);
+ break;
+ case op_lh:
+ addr = rs + se_imm16;
+ alu_out = readH(addr, instr);
+ break;
+ case op_lwl: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+ uint32_t mask = (1 << byte_shift * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = readW(addr, instr);
+ alu_out <<= byte_shift * 8;
+ alu_out |= rt & mask;
+ break;
+ }
+ case op_lw:
+ addr = rs + se_imm16;
+ alu_out = readW(addr, instr);
+ break;
+ case op_lbu:
+ addr = rs + se_imm16;
+ alu_out = readBU(addr);
+ break;
+ case op_lhu:
+ addr = rs + se_imm16;
+ alu_out = readHU(addr, instr);
+ break;
+ case op_lwr: {
+ // al_offset is offset of the effective address within an aligned word.
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+ uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = readW(addr, instr);
+ alu_out = static_cast<uint32_t>(alu_out) >> al_offset * 8;
+ alu_out |= rt & mask;
+ break;
+ }
+ case op_sb:
+ addr = rs + se_imm16;
+ break;
+ case op_sh:
+ addr = rs + se_imm16;
+ break;
+ case op_swl: {
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+ uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = readW(addr, instr) & mask;
+ mem_value |= static_cast<uint32_t>(rt) >> byte_shift * 8;
+ break;
+ }
+ case op_sw:
+ addr = rs + se_imm16;
+ break;
+ case op_swr: {
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint32_t mask = (1 << al_offset * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = readW(addr, instr);
+ mem_value = (rt << al_offset * 8) | (mem_value & mask);
+ break;
+ }
+ case op_lwc1:
+ addr = rs + se_imm16;
+ alu_out = readW(addr, instr);
+ break;
+ case op_ldc1:
+ addr = rs + se_imm16;
+ fp_out = readD(addr, instr);
+ break;
+ case op_swc1:
+ case op_sdc1:
+ addr = rs + se_imm16;
+ break;
+ case op_ll:
+ addr = rs + se_imm16;
+ alu_out = loadLinkedW(addr, instr);
+ break;
+ case op_sc:
+ addr = rs + se_imm16;
+ alu_out = storeConditionalW(addr, rt, instr);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ // ---------- Raise exceptions triggered.
+ signalExceptions();
+
+ // ---------- Execution.
+ switch (op) {
+ // ------------- Branch instructions.
+ case op_beq:
+ case op_bne:
+ case op_blez:
+ case op_bgtz:
+ // Branch instructions common part.
+ execute_branch_delay_instruction = true;
+ // Set next_pc.
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + SimInstruction::kInstrSize;
+ if (instr->isLinkingInstruction()) {
+ setRegister(31, current_pc + 2 * SimInstruction::kInstrSize);
+ }
+ } else {
+ next_pc = current_pc + 2 * SimInstruction::kInstrSize;
+ }
+ break;
+ // ------------- Arithmetic instructions.
+ case op_addi:
+ case op_addiu:
+ case op_slti:
+ case op_sltiu:
+ case op_andi:
+ case op_ori:
+ case op_xori:
+ case op_lui:
+ setRegister(rt_reg, alu_out);
+ break;
+ // ------------- Memory instructions.
+ case op_lb:
+ case op_lh:
+ case op_lwl:
+ case op_lw:
+ case op_lbu:
+ case op_lhu:
+ case op_lwr:
+ case op_ll:
+ case op_sc:
+ setRegister(rt_reg, alu_out);
+ break;
+ case op_sb:
+ writeB(addr, static_cast<int8_t>(rt));
+ break;
+ case op_sh:
+ writeH(addr, static_cast<uint16_t>(rt), instr);
+ break;
+ case op_swl:
+ writeW(addr, mem_value, instr);
+ break;
+ case op_sw:
+ writeW(addr, rt, instr);
+ break;
+ case op_swr:
+ writeW(addr, mem_value, instr);
+ break;
+ case op_lwc1:
+ setFpuRegister(ft_reg, alu_out);
+ break;
+ case op_ldc1:
+ setFpuRegisterDouble(ft_reg, fp_out);
+ break;
+ case op_swc1:
+ addr = rs + se_imm16;
+ writeW(addr, getFpuRegister(ft_reg), instr);
+ break;
+ case op_sdc1:
+ addr = rs + se_imm16;
+ writeD(addr, getFpuRegisterDouble(ft_reg), instr);
+ break;
+ default:
+ break;
+ }
+
+ if (execute_branch_delay_instruction) {
+ // Execute branch delay slot
+ // We don't check for end_sim_pc. First it should not be met as the current
+ // pc is valid. Secondly a jump should always execute its branch delay slot.
+ SimInstruction* branch_delay_instr = reinterpret_cast<SimInstruction*>(
+ current_pc + SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+ }
+
+ // If needed update pc after the branch delay execution.
+ if (next_pc != bad_ra) {
+ set_pc(next_pc);
+ }
+}
+
+// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
+void Simulator::decodeTypeJump(SimInstruction* instr) {
+ // Get current pc.
+ int32_t current_pc = get_pc();
+ // Get unchanged bits of pc.
+ int32_t pc_high_bits = current_pc & 0xf0000000;
+ // Next pc.
+ int32_t next_pc = pc_high_bits | (instr->imm26Value() << 2);
+
+ // Execute branch delay slot.
+ // We don't check for end_sim_pc. First it should not be met as the current pc
+ // is valid. Secondly a jump should always execute its branch delay slot.
+ SimInstruction* branch_delay_instr = reinterpret_cast<SimInstruction*>(
+ current_pc + SimInstruction::kInstrSize);
+ branchDelayInstructionDecode(branch_delay_instr);
+
+ // Update pc and ra if necessary.
+ // Do this after the branch delay execution.
+ if (instr->isLinkingInstruction()) {
+ setRegister(31, current_pc + 2 * SimInstruction::kInstrSize);
+ }
+ set_pc(next_pc);
+ pc_modified_ = true;
+}
+
+// Executes the current instruction.
+void Simulator::instructionDecode(SimInstruction* instr) {
+ if (!SimulatorProcess::ICacheCheckingDisableCount) {
+ AutoLockSimulatorCache als;
+ SimulatorProcess::checkICacheLocked(instr);
+ }
+ pc_modified_ = false;
+
+ switch (instr->instructionType()) {
+ case SimInstruction::kRegisterType:
+ decodeTypeRegister(instr);
+ break;
+ case SimInstruction::kImmediateType:
+ decodeTypeImmediate(instr);
+ break;
+ case SimInstruction::kJumpType:
+ decodeTypeJump(instr);
+ break;
+ default:
+ UNSUPPORTED();
+ }
+ if (!pc_modified_) {
+ setRegister(pc,
+ reinterpret_cast<int32_t>(instr) + SimInstruction::kInstrSize);
+ }
+}
+
+void Simulator::branchDelayInstructionDecode(SimInstruction* instr) {
+ if (instr->instructionBits() == NopInst) {
+ // Short-cut generic nop instructions. They are always valid and they
+ // never change the simulator state.
+ return;
+ }
+
+ if (instr->isForbiddenInBranchDelay()) {
+ MOZ_CRASH("Eror:Unexpected opcode in a branch delay slot.");
+ }
+ instructionDecode(instr);
+}
+
+void Simulator::enable_single_stepping(SingleStepCallback cb, void* arg) {
+ single_stepping_ = true;
+ single_step_callback_ = cb;
+ single_step_callback_arg_ = arg;
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+}
+
+void Simulator::disable_single_stepping() {
+ if (!single_stepping_) {
+ return;
+ }
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+}
+
+template <bool enableStopSimAt>
+void Simulator::execute() {
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ // Get the PC to simulate. Cannot use the accessor here as we need the
+ // raw PC value and not the one used as input to arithmetic instructions.
+ int program_counter = get_pc();
+
+ while (program_counter != end_sim_pc) {
+ if (enableStopSimAt && (icount_ == Simulator::StopSimAt)) {
+ MipsDebugger dbg(this);
+ dbg.debug();
+ } else {
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this,
+ (void*)program_counter);
+ }
+ SimInstruction* instr =
+ reinterpret_cast<SimInstruction*>(program_counter);
+ instructionDecode(instr);
+ icount_++;
+ }
+ program_counter = get_pc();
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+}
+
+void Simulator::callInternal(uint8_t* entry) {
+ // Prepare to execute the code at entry.
+ setRegister(pc, reinterpret_cast<int32_t>(entry));
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ setRegister(ra, end_sim_pc);
+
+ // Remember the values of callee-saved registers.
+ // The code below assumes that r9 is not used as sb (static base) in
+ // simulator code and therefore is regarded as a callee-saved register.
+ int32_t s0_val = getRegister(s0);
+ int32_t s1_val = getRegister(s1);
+ int32_t s2_val = getRegister(s2);
+ int32_t s3_val = getRegister(s3);
+ int32_t s4_val = getRegister(s4);
+ int32_t s5_val = getRegister(s5);
+ int32_t s6_val = getRegister(s6);
+ int32_t s7_val = getRegister(s7);
+ int32_t gp_val = getRegister(gp);
+ int32_t sp_val = getRegister(sp);
+ int32_t fp_val = getRegister(fp);
+
+ // Set up the callee-saved registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ int32_t callee_saved_value = icount_;
+ setRegister(s0, callee_saved_value);
+ setRegister(s1, callee_saved_value);
+ setRegister(s2, callee_saved_value);
+ setRegister(s3, callee_saved_value);
+ setRegister(s4, callee_saved_value);
+ setRegister(s5, callee_saved_value);
+ setRegister(s6, callee_saved_value);
+ setRegister(s7, callee_saved_value);
+ setRegister(gp, callee_saved_value);
+ setRegister(fp, callee_saved_value);
+
+ // Start the simulation.
+ if (Simulator::StopSimAt != -1) {
+ execute<true>();
+ } else {
+ execute<false>();
+ }
+
+ // Check that the callee-saved registers have been preserved.
+ MOZ_ASSERT(callee_saved_value == getRegister(s0));
+ MOZ_ASSERT(callee_saved_value == getRegister(s1));
+ MOZ_ASSERT(callee_saved_value == getRegister(s2));
+ MOZ_ASSERT(callee_saved_value == getRegister(s3));
+ MOZ_ASSERT(callee_saved_value == getRegister(s4));
+ MOZ_ASSERT(callee_saved_value == getRegister(s5));
+ MOZ_ASSERT(callee_saved_value == getRegister(s6));
+ MOZ_ASSERT(callee_saved_value == getRegister(s7));
+ MOZ_ASSERT(callee_saved_value == getRegister(gp));
+ MOZ_ASSERT(callee_saved_value == getRegister(fp));
+
+ // Restore callee-saved registers with the original value.
+ setRegister(s0, s0_val);
+ setRegister(s1, s1_val);
+ setRegister(s2, s2_val);
+ setRegister(s3, s3_val);
+ setRegister(s4, s4_val);
+ setRegister(s5, s5_val);
+ setRegister(s6, s6_val);
+ setRegister(s7, s7_val);
+ setRegister(gp, gp_val);
+ setRegister(sp, sp_val);
+ setRegister(fp, fp_val);
+}
+
+int32_t Simulator::call(uint8_t* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+
+ int original_stack = getRegister(sp);
+ // Compute position of stack on entry to generated code.
+ int entry_stack = original_stack;
+ if (argument_count > kCArgSlotCount) {
+ entry_stack = entry_stack - argument_count * sizeof(int32_t);
+ } else {
+ entry_stack = entry_stack - kCArgsSlotsSize;
+ }
+
+ entry_stack &= ~(ABIStackAlignment - 1);
+
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+
+ // Setup the arguments.
+ for (int i = 0; i < argument_count; i++) {
+ js::jit::Register argReg;
+ if (GetIntArgReg(i, &argReg)) {
+ setRegister(argReg.code(), va_arg(parameters, int32_t));
+ } else {
+ stack_argument[i] = va_arg(parameters, int32_t);
+ }
+ }
+
+ va_end(parameters);
+ setRegister(sp, entry_stack);
+
+ callInternal(entry);
+
+ // Pop stack passed arguments.
+ MOZ_ASSERT(entry_stack == getRegister(sp));
+ setRegister(sp, original_stack);
+
+ int32_t result = getRegister(v0);
+ return result;
+}
+
+uintptr_t Simulator::pushAddress(uintptr_t address) {
+ int new_sp = getRegister(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ setRegister(sp, new_sp);
+ return new_sp;
+}
+
+uintptr_t Simulator::popAddress() {
+ int current_sp = getRegister(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ setRegister(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+
+} // namespace jit
+} // namespace js
+
+js::jit::Simulator* JSContext::simulator() const { return simulator_; }
diff --git a/js/src/jit/mips32/Simulator-mips32.h b/js/src/jit/mips32/Simulator-mips32.h
new file mode 100644
index 0000000000..79acef3e63
--- /dev/null
+++ b/js/src/jit/mips32/Simulator-mips32.h
@@ -0,0 +1,526 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef jit_mips32_Simulator_mips32_h
+#define jit_mips32_Simulator_mips32_h
+
+#ifdef JS_SIMULATOR_MIPS32
+
+# include "mozilla/Atomics.h"
+
+# include "jit/IonTypes.h"
+# include "js/ProfilingFrameIterator.h"
+# include "threading/Thread.h"
+# include "vm/MutexIDs.h"
+# include "wasm/WasmSignalHandlers.h"
+
+namespace js {
+
+namespace jit {
+
+class JitActivation;
+
+class Simulator;
+class Redirection;
+class CachePage;
+class AutoLockSimulator;
+
+// When the SingleStepCallback is called, the simulator is about to execute
+// sim->get_pc() and the current machine state represents the completed
+// execution of the previous pc.
+typedef void (*SingleStepCallback)(void* arg, Simulator* sim, void* pc);
+
+const intptr_t kPointerAlignment = 4;
+const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
+
+const intptr_t kDoubleAlignment = 8;
+const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
+
+// Number of general purpose registers.
+const int kNumRegisters = 32;
+
+// In the simulator, the PC register is simulated as the 34th register.
+const int kPCRegister = 34;
+
+// Number coprocessor registers.
+const int kNumFPURegisters = 32;
+
+// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
+const int kFCSRRegister = 31;
+const int kInvalidFPUControlRegister = -1;
+const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
+
+// FCSR constants.
+const uint32_t kFCSRInexactFlagBit = 2;
+const uint32_t kFCSRUnderflowFlagBit = 3;
+const uint32_t kFCSROverflowFlagBit = 4;
+const uint32_t kFCSRDivideByZeroFlagBit = 5;
+const uint32_t kFCSRInvalidOpFlagBit = 6;
+
+const uint32_t kFCSRInexactCauseBit = 12;
+const uint32_t kFCSRUnderflowCauseBit = 13;
+const uint32_t kFCSROverflowCauseBit = 14;
+const uint32_t kFCSRDivideByZeroCauseBit = 15;
+const uint32_t kFCSRInvalidOpCauseBit = 16;
+
+const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
+const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
+const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
+const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
+const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+
+const uint32_t kFCSRFlagMask =
+ kFCSRInexactFlagMask | kFCSRUnderflowFlagMask | kFCSROverflowFlagMask |
+ kFCSRDivideByZeroFlagMask | kFCSRInvalidOpFlagMask;
+
+const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
+
+// On MIPS Simulator breakpoints can have different codes:
+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
+// the simulator will run through them and print the registers.
+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
+// instructions (see Assembler::stop()).
+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
+// debugger.
+const uint32_t kMaxWatchpointCode = 31;
+const uint32_t kMaxStopCode = 127;
+const uint32_t kWasmTrapCode = 6;
+
+// -----------------------------------------------------------------------------
+// Utility functions
+
+typedef uint32_t Instr;
+class SimInstruction;
+
+// Per thread simulator state.
+class Simulator {
+ friend class MipsDebugger;
+
+ public:
+ // Registers are declared in order. See "See MIPS Run Linux" chapter 2.
+ enum Register {
+ no_reg = -1,
+ zero_reg = 0,
+ at,
+ v0,
+ v1,
+ a0,
+ a1,
+ a2,
+ a3,
+ t0,
+ t1,
+ t2,
+ t3,
+ t4,
+ t5,
+ t6,
+ t7,
+ s0,
+ s1,
+ s2,
+ s3,
+ s4,
+ s5,
+ s6,
+ s7,
+ t8,
+ t9,
+ k0,
+ k1,
+ gp,
+ sp,
+ s8,
+ ra,
+ // LO, HI, and pc.
+ LO,
+ HI,
+ pc, // pc must be the last register.
+ kNumSimuRegisters,
+ // aliases
+ fp = s8
+ };
+
+ // Coprocessor registers.
+ enum FPURegister {
+ f0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15, // f12 and f14 are arguments FPURegisters.
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
+ kNumFPURegisters
+ };
+
+ // Returns nullptr on OOM.
+ static Simulator* Create();
+
+ static void Destroy(Simulator* simulator);
+
+ // Constructor/destructor are for internal use only; use the static methods
+ // above.
+ Simulator();
+ ~Simulator();
+
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ static Simulator* Current();
+
+ static inline uintptr_t StackLimit() {
+ return Simulator::Current()->stackLimit();
+ }
+
+ uintptr_t* addressOfStackLimit();
+
+ // Accessors for register state. Reading the pc value adheres to the MIPS
+ // architecture specification and is off by a 8 from the currently executing
+ // instruction.
+ void setRegister(int reg, int32_t value);
+ int32_t getRegister(int reg) const;
+ double getDoubleFromRegisterPair(int reg);
+ // Same for FPURegisters.
+ void setFpuRegister(int fpureg, int32_t value);
+ void setFpuRegisterFloat(int fpureg, float value);
+ void setFpuRegisterFloat(int fpureg, int64_t value);
+ void setFpuRegisterDouble(int fpureg, double value);
+ void setFpuRegisterDouble(int fpureg, int64_t value);
+ int32_t getFpuRegister(int fpureg) const;
+ int64_t getFpuRegisterLong(int fpureg) const;
+ float getFpuRegisterFloat(int fpureg) const;
+ double getFpuRegisterDouble(int fpureg) const;
+ void setFCSRBit(uint32_t cc, bool value);
+ bool testFCSRBit(uint32_t cc);
+ bool setFCSRRoundError(double original, double rounded);
+
+ // Special case of set_register and get_register to access the raw PC value.
+ void set_pc(int32_t value);
+ int32_t get_pc() const;
+
+ template <typename T>
+ T get_pc_as() const {
+ return reinterpret_cast<T>(get_pc());
+ }
+
+ void enable_single_stepping(SingleStepCallback cb, void* arg);
+ void disable_single_stepping();
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t stackLimit() const;
+ bool overRecursed(uintptr_t newsp = 0) const;
+ bool overRecursedWithExtra(uint32_t extra) const;
+
+ // Executes MIPS instructions until the PC reaches end_sim_pc.
+ template <bool enableStopSimAt>
+ void execute();
+
+ // Sets up the simulator state and grabs the result on return.
+ int32_t call(uint8_t* entry, int argument_count, ...);
+
+ // Push an address onto the JS stack.
+ uintptr_t pushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t popAddress();
+
+ // Debugger input.
+ void setLastDebuggerInput(char* input);
+ char* lastDebuggerInput() { return lastDebuggerInput_; }
+
+ // Returns true if pc register contains one of the 'SpecialValues' defined
+ // below (bad_ra, end_sim_pc).
+ bool has_bad_pc() const;
+
+ private:
+ enum SpecialValues {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_ra = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the ra is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2,
+ // Unpredictable value.
+ Unpredictable = 0xbadbeaf
+ };
+
+ bool init();
+
+ // Unsupported instructions use Format to print an error and stop execution.
+ void format(SimInstruction* instr, const char* format);
+
+ // Read and write memory.
+ inline uint32_t readBU(uint32_t addr);
+ inline int32_t readB(uint32_t addr);
+ inline void writeB(uint32_t addr, uint8_t value);
+ inline void writeB(uint32_t addr, int8_t value);
+
+ inline uint16_t readHU(uint32_t addr, SimInstruction* instr);
+ inline int16_t readH(uint32_t addr, SimInstruction* instr);
+ // Note: Overloaded on the sign of the value.
+ inline void writeH(uint32_t addr, uint16_t value, SimInstruction* instr);
+ inline void writeH(uint32_t addr, int16_t value, SimInstruction* instr);
+
+ inline int readW(uint32_t addr, SimInstruction* instr);
+ inline void writeW(uint32_t addr, int value, SimInstruction* instr);
+
+ inline double readD(uint32_t addr, SimInstruction* instr);
+ inline void writeD(uint32_t addr, double value, SimInstruction* instr);
+
+ inline int32_t loadLinkedW(uint32_t addr, SimInstruction* instr);
+ inline int32_t storeConditionalW(uint32_t addr, int32_t value,
+ SimInstruction* instr);
+
+ // Executing is handled based on the instruction type.
+ void decodeTypeRegister(SimInstruction* instr);
+
+ // Helper function for decodeTypeRegister.
+ void configureTypeRegister(SimInstruction* instr, int32_t& alu_out,
+ int64_t& i64hilo, uint64_t& u64hilo,
+ int32_t& next_pc, int32_t& return_addr_reg,
+ bool& do_interrupt);
+
+ void decodeTypeImmediate(SimInstruction* instr);
+ void decodeTypeJump(SimInstruction* instr);
+
+ // Used for breakpoints and traps.
+ void softwareInterrupt(SimInstruction* instr);
+
+ // Stop helper functions.
+ bool isWatchpoint(uint32_t code);
+ void printWatchpoint(uint32_t code);
+ void handleStop(uint32_t code, SimInstruction* instr);
+ bool isStopInstruction(SimInstruction* instr);
+ bool isEnabledStop(uint32_t code);
+ void enableStop(uint32_t code);
+ void disableStop(uint32_t code);
+ void increaseStopCounter(uint32_t code);
+ void printStopInfo(uint32_t code);
+
+ JS::ProfilingFrameIterator::RegisterState registerState();
+
+ // Handle any wasm faults, returning true if the fault was handled.
+ // This method is rather hot so inline the normal (no-wasm) case.
+ bool MOZ_ALWAYS_INLINE handleWasmSegFault(int32_t addr, unsigned numBytes) {
+ if (MOZ_LIKELY(!js::wasm::CodeExists)) {
+ return false;
+ }
+
+ uint8_t* newPC;
+ if (!js::wasm::MemoryAccessTraps(registerState(), (uint8_t*)addr, numBytes,
+ &newPC)) {
+ return false;
+ }
+
+ LLBit_ = false;
+ set_pc(int32_t(newPC));
+ return true;
+ }
+
+ // Executes one instruction.
+ void instructionDecode(SimInstruction* instr);
+ // Execute one instruction placed in a branch delay slot.
+ void branchDelayInstructionDecode(SimInstruction* instr);
+
+ public:
+ static int StopSimAt;
+
+ // Runtime call support.
+ static void* RedirectNativeFunction(void* nativeFunction,
+ ABIFunctionType type);
+
+ private:
+ enum Exception {
+ kNone,
+ kIntegerOverflow,
+ kIntegerUnderflow,
+ kDivideByZero,
+ kNumExceptions
+ };
+ int16_t exceptions[kNumExceptions];
+
+ // Exceptions.
+ void signalExceptions();
+
+ // Handle arguments and return value for runtime FP functions.
+ void getFpArgs(double* x, double* y, int32_t* z);
+ void getFpFromStack(int32_t* stack, double* x);
+
+ void setCallResultDouble(double result);
+ void setCallResultFloat(float result);
+ void setCallResult(int64_t res);
+
+ void callInternal(uint8_t* entry);
+
+ // Architecture state.
+ // Registers.
+ int32_t registers_[kNumSimuRegisters];
+ // Coprocessor Registers.
+ int32_t FPUregisters_[kNumFPURegisters];
+ // FPU control register.
+ uint32_t FCSR_;
+
+ bool LLBit_;
+ uint32_t LLAddr_;
+ int32_t lastLLValue_;
+
+ // Simulator support.
+ char* stack_;
+ uintptr_t stackLimit_;
+ bool pc_modified_;
+ int icount_;
+ int break_count_;
+
+ // Debugger input.
+ char* lastDebuggerInput_;
+
+ // Registered breakpoints.
+ SimInstruction* break_pc_;
+ Instr break_instr_;
+
+ // Single-stepping support
+ bool single_stepping_;
+ SingleStepCallback single_step_callback_;
+ void* single_step_callback_arg_;
+
+ // A stop is watched if its code is less than kNumOfWatchedStops.
+ // Only watched stops support enabling/disabling and the counter feature.
+ static const uint32_t kNumOfWatchedStops = 256;
+
+ // Stop is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1U << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watchedStops_[code].count is unset.
+ // The value watchedStops_[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count_;
+ char* desc_;
+ };
+ StopCountAndDesc watchedStops_[kNumOfWatchedStops];
+};
+
+// Process wide simulator state.
+class SimulatorProcess {
+ friend class Redirection;
+ friend class AutoLockSimulatorCache;
+
+ private:
+ // ICache checking.
+ struct ICacheHasher {
+ typedef void* Key;
+ typedef void* Lookup;
+ static HashNumber hash(const Lookup& l);
+ static bool match(const Key& k, const Lookup& l);
+ };
+
+ public:
+ typedef HashMap<void*, CachePage*, ICacheHasher, SystemAllocPolicy> ICacheMap;
+
+ static mozilla::Atomic<size_t, mozilla::ReleaseAcquire>
+ ICacheCheckingDisableCount;
+ static void FlushICache(void* start, size_t size);
+
+ static void checkICacheLocked(SimInstruction* instr);
+
+ static bool initialize() {
+ singleton_ = js_new<SimulatorProcess>();
+ return singleton_;
+ }
+ static void destroy() {
+ js_delete(singleton_);
+ singleton_ = nullptr;
+ }
+
+ SimulatorProcess();
+ ~SimulatorProcess();
+
+ private:
+ static SimulatorProcess* singleton_;
+
+ // This lock creates a critical section around 'redirection_' and
+ // 'icache_', which are referenced both by the execution engine
+ // and by the off-thread compiler (see Redirection::Get in the cpp file).
+ Mutex cacheLock_;
+
+ Redirection* redirection_;
+ ICacheMap icache_;
+
+ public:
+ static ICacheMap& icache() {
+ // Technically we need the lock to access the innards of the
+ // icache, not to take its address, but the latter condition
+ // serves as a useful complement to the former.
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ return singleton_->icache_;
+ }
+
+ static Redirection* redirection() {
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ return singleton_->redirection_;
+ }
+
+ static void setRedirection(js::jit::Redirection* redirection) {
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ singleton_->redirection_ = redirection;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* JS_SIMULATOR_MIPS32 */
+
+#endif /* jit_mips32_Simulator_mips32_h */
diff --git a/js/src/jit/mips32/Trampoline-mips32.cpp b/js/src/jit/mips32/Trampoline-mips32.cpp
new file mode 100644
index 0000000000..7506953cd6
--- /dev/null
+++ b/js/src/jit/mips32/Trampoline-mips32.cpp
@@ -0,0 +1,1345 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/DebugOnly.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/CalleeToken.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/JitSpewer.h"
+#include "jit/mips-shared/SharedICHelpers-mips-shared.h"
+#include "jit/mips32/Bailouts-mips32.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "jit/VMFunctions.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+static_assert(sizeof(uintptr_t) == sizeof(uint32_t), "Not 64-bit clean.");
+
+struct EnterJITRegs {
+ double f30;
+ double f28;
+ double f26;
+ double f24;
+ double f22;
+ double f20;
+
+ // non-volatile registers.
+ uintptr_t ra;
+ uintptr_t fp;
+ uintptr_t s7;
+ uintptr_t s6;
+ uintptr_t s5;
+ uintptr_t s4;
+ uintptr_t s3;
+ uintptr_t s2;
+ uintptr_t s1;
+ uintptr_t s0;
+};
+
+struct EnterJITArgs {
+ // First 4 argumet placeholders
+ void* jitcode; // <- sp points here when function is entered.
+ int maxArgc;
+ Value* maxArgv;
+ InterpreterFrame* fp;
+
+ // Arguments on stack
+ CalleeToken calleeToken;
+ JSObject* scopeChain;
+ size_t numStackValues;
+ Value* vp;
+};
+
+static void GenerateReturn(MacroAssembler& masm, int returnCode) {
+ MOZ_ASSERT(masm.framePushed() == sizeof(EnterJITRegs));
+
+ // Restore non-volatile registers
+ masm.as_lw(s0, StackPointer, offsetof(EnterJITRegs, s0));
+ masm.as_lw(s1, StackPointer, offsetof(EnterJITRegs, s1));
+ masm.as_lw(s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.as_lw(s3, StackPointer, offsetof(EnterJITRegs, s3));
+ masm.as_lw(s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.as_lw(s5, StackPointer, offsetof(EnterJITRegs, s5));
+ masm.as_lw(s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.as_lw(s7, StackPointer, offsetof(EnterJITRegs, s7));
+ masm.as_lw(fp, StackPointer, offsetof(EnterJITRegs, fp));
+ masm.as_lw(ra, StackPointer, offsetof(EnterJITRegs, ra));
+
+ // Restore non-volatile floating point registers
+ masm.as_ldc1(f20, StackPointer, offsetof(EnterJITRegs, f20));
+ masm.as_ldc1(f22, StackPointer, offsetof(EnterJITRegs, f22));
+ masm.as_ldc1(f24, StackPointer, offsetof(EnterJITRegs, f24));
+ masm.as_ldc1(f26, StackPointer, offsetof(EnterJITRegs, f26));
+ masm.as_ldc1(f28, StackPointer, offsetof(EnterJITRegs, f28));
+ masm.as_ldc1(f30, StackPointer, offsetof(EnterJITRegs, f30));
+
+ masm.freeStack(sizeof(EnterJITRegs));
+
+ masm.branch(ra);
+}
+
+static void GeneratePrologue(MacroAssembler& masm) {
+ // Save non-volatile registers. These must be saved by the trampoline,
+ // rather than the JIT'd code, because they are scanned by the conservative
+ // scanner.
+ masm.reserveStack(sizeof(EnterJITRegs));
+ masm.as_sw(s0, StackPointer, offsetof(EnterJITRegs, s0));
+ masm.as_sw(s1, StackPointer, offsetof(EnterJITRegs, s1));
+ masm.as_sw(s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.as_sw(s3, StackPointer, offsetof(EnterJITRegs, s3));
+ masm.as_sw(s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.as_sw(s5, StackPointer, offsetof(EnterJITRegs, s5));
+ masm.as_sw(s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.as_sw(s7, StackPointer, offsetof(EnterJITRegs, s7));
+ masm.as_sw(fp, StackPointer, offsetof(EnterJITRegs, fp));
+ masm.as_sw(ra, StackPointer, offsetof(EnterJITRegs, ra));
+
+ masm.as_sdc1(f20, StackPointer, offsetof(EnterJITRegs, f20));
+ masm.as_sdc1(f22, StackPointer, offsetof(EnterJITRegs, f22));
+ masm.as_sdc1(f24, StackPointer, offsetof(EnterJITRegs, f24));
+ masm.as_sdc1(f26, StackPointer, offsetof(EnterJITRegs, f26));
+ masm.as_sdc1(f28, StackPointer, offsetof(EnterJITRegs, f28));
+ masm.as_sdc1(f30, StackPointer, offsetof(EnterJITRegs, f30));
+}
+
+/*
+ * This method generates a trampoline for a c++ function with the following
+ * signature:
+ * void enter(void* code, int argc, Value* argv, InterpreterFrame* fp,
+ * CalleeToken calleeToken, JSObject* scopeChain, Value* vp)
+ * ...using standard EABI calling convention
+ */
+void JitRuntime::generateEnterJIT(JSContext* cx, MacroAssembler& masm) {
+ enterJITOffset_ = startTrampolineCode(masm);
+
+ const Register reg_code = a0;
+ const Register reg_argc = a1;
+ const Register reg_argv = a2;
+ const mozilla::DebugOnly<Register> reg_frame = a3;
+
+ MOZ_ASSERT(OsrFrameReg == reg_frame);
+
+ GeneratePrologue(masm);
+
+ const Address slotToken(
+ sp, sizeof(EnterJITRegs) + offsetof(EnterJITArgs, calleeToken));
+ const Address slotVp(sp, sizeof(EnterJITRegs) + offsetof(EnterJITArgs, vp));
+
+ // Save stack pointer into s4
+ masm.movePtr(StackPointer, s4);
+
+ // Load calleeToken into s2.
+ masm.loadPtr(slotToken, s2);
+
+ // Save stack pointer as baseline frame.
+ masm.movePtr(StackPointer, BaselineFrameReg);
+
+ // Load the number of actual arguments into s3.
+ masm.loadPtr(slotVp, s3);
+ masm.unboxInt32(Address(s3, 0), s3);
+
+ /***************************************************************
+ Loop over argv vector, push arguments onto stack in reverse order
+ ***************************************************************/
+
+ // if we are constructing, that also needs to include newTarget
+ {
+ Label noNewTarget;
+ masm.branchTest32(Assembler::Zero, s2,
+ Imm32(CalleeToken_FunctionConstructing), &noNewTarget);
+
+ masm.add32(Imm32(1), reg_argc);
+
+ masm.bind(&noNewTarget);
+ }
+
+ masm.as_sll(s0, reg_argc, 3); // s0 = argc * 8
+ masm.addPtr(reg_argv, s0); // s0 = argv + argc * 8
+
+ // Loop over arguments, copying them from an unknown buffer onto the Ion
+ // stack so they can be accessed from JIT'ed code.
+ Label header, footer;
+ // If there aren't any arguments, don't do anything
+ masm.ma_b(s0, reg_argv, &footer, Assembler::BelowOrEqual, ShortJump);
+ {
+ masm.bind(&header);
+
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), s0);
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+
+ ValueOperand value = ValueOperand(s6, s7);
+ masm.loadValue(Address(s0, 0), value);
+ masm.storeValue(value, Address(StackPointer, 0));
+
+ masm.ma_b(s0, reg_argv, &header, Assembler::Above, ShortJump);
+ }
+ masm.bind(&footer);
+
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.storePtr(s3,
+ Address(StackPointer, sizeof(uintptr_t))); // actual arguments
+ masm.storePtr(s2, Address(StackPointer, 0)); // callee token
+
+ masm.subPtr(StackPointer, s4);
+ masm.makeFrameDescriptor(s4, FrameType::CppToJSJit, JitFrameLayout::Size());
+ masm.push(s4); // descriptor
+
+ CodeLabel returnLabel;
+ CodeLabel oomReturnLabel;
+ {
+ // Handle Interpreter -> Baseline OSR.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ regs.take(OsrFrameReg);
+ regs.take(BaselineFrameReg);
+ regs.take(reg_code);
+ regs.take(ReturnReg);
+
+ const Address slotNumStackValues(
+ BaselineFrameReg,
+ sizeof(EnterJITRegs) + offsetof(EnterJITArgs, numStackValues));
+ const Address slotScopeChain(
+ BaselineFrameReg,
+ sizeof(EnterJITRegs) + offsetof(EnterJITArgs, scopeChain));
+
+ Label notOsr;
+ masm.ma_b(OsrFrameReg, OsrFrameReg, &notOsr, Assembler::Zero, ShortJump);
+
+ Register scratch = regs.takeAny();
+
+ Register numStackValues = regs.takeAny();
+ masm.load32(slotNumStackValues, numStackValues);
+
+ // Push return address.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.ma_li(scratch, &returnLabel);
+ masm.storePtr(scratch, Address(StackPointer, 0));
+
+ // Push previous frame pointer.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.storePtr(BaselineFrameReg, Address(StackPointer, 0));
+
+ // Reserve frame.
+ Register framePtr = BaselineFrameReg;
+ masm.subPtr(Imm32(BaselineFrame::Size()), StackPointer);
+ masm.movePtr(StackPointer, framePtr);
+
+ // Reserve space for locals and stack values.
+ masm.ma_sll(scratch, numStackValues, Imm32(3));
+ masm.subPtr(scratch, StackPointer);
+
+ // Enter exit frame.
+ masm.addPtr(
+ Imm32(BaselineFrame::Size() + BaselineFrame::FramePointerOffset),
+ scratch);
+ masm.makeFrameDescriptor(scratch, FrameType::BaselineJS,
+ ExitFrameLayout::Size());
+
+ // Push frame descriptor and fake return address.
+ masm.reserveStack(2 * sizeof(uintptr_t));
+ masm.storePtr(
+ scratch, Address(StackPointer, sizeof(uintptr_t))); // Frame descriptor
+ masm.storePtr(zero, Address(StackPointer, 0)); // fake return address
+
+ // No GC things to mark, push a bare token.
+ masm.loadJSContext(scratch);
+ masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
+
+ masm.reserveStack(2 * sizeof(uintptr_t));
+ masm.storePtr(framePtr,
+ Address(StackPointer, sizeof(uintptr_t))); // BaselineFrame
+ masm.storePtr(reg_code, Address(StackPointer, 0)); // jitcode
+
+ using Fn = bool (*)(BaselineFrame * frame, InterpreterFrame * interpFrame,
+ uint32_t numStackValues);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(BaselineFrameReg); // BaselineFrame
+ masm.passABIArg(OsrFrameReg); // InterpreterFrame
+ masm.passABIArg(numStackValues);
+ masm.callWithABI<Fn, jit::InitBaselineFrameForOsr>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ regs.add(OsrFrameReg);
+ regs.take(JSReturnOperand);
+ Register jitcode = regs.takeAny();
+ masm.loadPtr(Address(StackPointer, 0), jitcode);
+ masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), framePtr);
+ masm.freeStack(2 * sizeof(uintptr_t));
+
+ Label error;
+ masm.freeStack(ExitFrameLayout::SizeWithFooter());
+ masm.addPtr(Imm32(BaselineFrame::Size()), framePtr);
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
+ // if profiler instrumentation is enabled.
+ {
+ Label skipProfilingInstrumentation;
+ Register realFramePtr = numStackValues;
+ AbsoluteAddress addressOfEnabled(
+ cx->runtime()->geckoProfiler().addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ masm.ma_addu(realFramePtr, framePtr, Imm32(sizeof(void*)));
+ masm.profilerEnterFrame(realFramePtr, scratch);
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.jump(jitcode);
+
+ // OOM: load error value, discard return address and previous frame
+ // pointer and return.
+ masm.bind(&error);
+ masm.movePtr(framePtr, StackPointer);
+ masm.addPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.ma_li(scratch, &oomReturnLabel);
+ masm.jump(scratch);
+
+ masm.bind(&notOsr);
+ // Load the scope chain in R1.
+ MOZ_ASSERT(R1.scratchReg() != reg_code);
+ masm.loadPtr(slotScopeChain, R1.scratchReg());
+ }
+
+ // The call will push the return address on the stack, thus we check that
+ // the stack would be aligned once the call is complete.
+ masm.assertStackAlignment(JitStackAlignment, sizeof(uintptr_t));
+
+ // Call the function with pushing return address to stack.
+ masm.callJitNoProfiler(reg_code);
+
+ {
+ // Interpreter -> Baseline OSR will return here.
+ masm.bind(&returnLabel);
+ masm.addCodeLabel(returnLabel);
+ masm.bind(&oomReturnLabel);
+ masm.addCodeLabel(oomReturnLabel);
+ }
+
+ // Pop arguments off the stack.
+ // s0 <- 8*argc (size of all arguments we pushed on the stack)
+ masm.pop(s0);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), s0);
+ masm.addPtr(s0, StackPointer);
+
+ // Store the returned value into the slotVp
+ masm.loadPtr(slotVp, s1);
+ masm.storeValue(JSReturnOperand, Address(s1, 0));
+
+ // Restore non-volatile registers and return.
+ GenerateReturn(masm, ShortJump);
+}
+
+void JitRuntime::generateInvalidator(MacroAssembler& masm, Label* bailoutTail) {
+ invalidatorOffset_ = startTrampolineCode(masm);
+
+ // NOTE: Members ionScript_ and osiPointReturnAddress_ of
+ // InvalidationBailoutStack are already on the stack.
+ static const uint32_t STACK_DATA_SIZE =
+ sizeof(InvalidationBailoutStack) - 2 * sizeof(uintptr_t);
+
+ // Stack has to be alligned here. If not, we will have to fix it.
+ masm.checkStackAlignment();
+
+ // Make room for data on stack.
+ masm.subPtr(Imm32(STACK_DATA_SIZE), StackPointer);
+
+ // Save general purpose registers
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ Address address =
+ Address(StackPointer, InvalidationBailoutStack::offsetOfRegs() +
+ i * sizeof(uintptr_t));
+ masm.storePtr(Register::FromCode(i), address);
+ }
+
+ // Save floating point registers
+ // We can use as_sd because stack is alligned.
+ for (uint32_t i = 0; i < FloatRegisters::TotalDouble; i++) {
+ masm.as_sdc1(
+ FloatRegister::FromIndex(i, FloatRegister::Double), StackPointer,
+ InvalidationBailoutStack::offsetOfFpRegs() + i * sizeof(double));
+ }
+
+ // Pass pointer to InvalidationBailoutStack structure.
+ masm.movePtr(StackPointer, a0);
+
+ // Reserve place for return value and BailoutInfo pointer
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ // Pass pointer to return value.
+ masm.ma_addu(a1, StackPointer, Imm32(sizeof(uintptr_t)));
+ // Pass pointer to BailoutInfo
+ masm.movePtr(StackPointer, a2);
+
+ using Fn = bool (*)(InvalidationBailoutStack * sp, size_t * frameSizeOut,
+ BaselineBailoutInfo * *info);
+ masm.setupAlignedABICall();
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.passABIArg(a2);
+ masm.callWithABI<Fn, InvalidationBailout>(
+ MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
+
+ masm.loadPtr(Address(StackPointer, 0), a2);
+ masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), a1);
+ // Remove the return address, the IonScript, the register state
+ // (InvaliationBailoutStack) and the space that was allocated for the
+ // return value.
+ masm.addPtr(Imm32(sizeof(InvalidationBailoutStack) + 2 * sizeof(uintptr_t)),
+ StackPointer);
+ // remove the space that this frame was using before the bailout
+ // (computed by InvalidationBailout)
+ masm.addPtr(a1, StackPointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
+ masm.jump(bailoutTail);
+}
+
+void JitRuntime::generateArgumentsRectifier(MacroAssembler& masm,
+ ArgumentsRectifierKind kind) {
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ argumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ trialInliningArgumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ }
+ masm.pushReturnAddress();
+
+ Register numActArgsReg = t6;
+ Register calleeTokenReg = t7;
+ Register numArgsReg = t5;
+
+ // Load the number of actual arguments into numActArgsReg
+ masm.loadPtr(
+ Address(StackPointer, RectifierFrameLayout::offsetOfNumActualArgs()),
+ numActArgsReg);
+
+ // Load the number of |undefined|s to push into t1.
+ masm.loadPtr(
+ Address(StackPointer, RectifierFrameLayout::offsetOfCalleeToken()),
+ calleeTokenReg);
+
+ // Copy the number of actual arguments into s3.
+ masm.mov(numActArgsReg, s3);
+
+ masm.mov(calleeTokenReg, numArgsReg);
+ masm.andPtr(Imm32(CalleeTokenMask), numArgsReg);
+ masm.load16ZeroExtend(Address(numArgsReg, JSFunction::offsetOfNargs()),
+ numArgsReg);
+
+ masm.as_subu(t1, numArgsReg, s3);
+
+ // Get the topmost argument.
+ masm.ma_sll(t0, s3, Imm32(3)); // t0 <- nargs * 8
+ masm.as_addu(t2, sp, t0); // t2 <- sp + nargs * 8
+ masm.addPtr(Imm32(sizeof(RectifierFrameLayout)), t2);
+
+ {
+ Label notConstructing;
+
+ masm.branchTest32(Assembler::Zero, calleeTokenReg,
+ Imm32(CalleeToken_FunctionConstructing),
+ &notConstructing);
+
+ // Add sizeof(Value) to overcome |this|
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.load32(Address(t2, NUNBOX32_TYPE_OFFSET + sizeof(Value)), t0);
+ masm.store32(t0, Address(StackPointer, NUNBOX32_TYPE_OFFSET));
+ masm.load32(Address(t2, NUNBOX32_PAYLOAD_OFFSET + sizeof(Value)), t0);
+ masm.store32(t0, Address(StackPointer, NUNBOX32_PAYLOAD_OFFSET));
+
+ // Include the newly pushed newTarget value in the frame size
+ // calculated below.
+ masm.add32(Imm32(1), numArgsReg);
+
+ masm.bind(&notConstructing);
+ }
+
+ // Push undefined.
+ masm.moveValue(UndefinedValue(), ValueOperand(t3, t4));
+ {
+ Label undefLoopTop;
+ masm.bind(&undefLoopTop);
+
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.storeValue(ValueOperand(t3, t4), Address(StackPointer, 0));
+ masm.sub32(Imm32(1), t1);
+
+ masm.ma_b(t1, t1, &undefLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // Push arguments, |nargs| + 1 times (to include |this|).
+ {
+ Label copyLoopTop, initialSkip;
+
+ masm.ma_b(&initialSkip, ShortJump);
+
+ masm.bind(&copyLoopTop);
+ masm.subPtr(Imm32(sizeof(Value)), t2);
+ masm.sub32(Imm32(1), s3);
+
+ masm.bind(&initialSkip);
+
+ MOZ_ASSERT(sizeof(Value) == 2 * sizeof(uint32_t));
+ // Read argument and push to stack.
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.load32(Address(t2, NUNBOX32_TYPE_OFFSET), t0);
+ masm.store32(t0, Address(StackPointer, NUNBOX32_TYPE_OFFSET));
+ masm.load32(Address(t2, NUNBOX32_PAYLOAD_OFFSET), t0);
+ masm.store32(t0, Address(StackPointer, NUNBOX32_PAYLOAD_OFFSET));
+
+ masm.ma_b(s3, s3, &copyLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // translate the framesize from values into bytes
+ masm.ma_addu(t0, numArgsReg, Imm32(1));
+ masm.lshiftPtr(Imm32(3), t0);
+
+ // Construct sizeDescriptor.
+ masm.makeFrameDescriptor(t0, FrameType::Rectifier, JitFrameLayout::Size());
+
+ // Construct JitFrameLayout.
+ masm.subPtr(Imm32(3 * sizeof(uintptr_t)), StackPointer);
+ // Push actual arguments.
+ masm.storePtr(numActArgsReg, Address(StackPointer, 2 * sizeof(uintptr_t)));
+ // Push callee token.
+ masm.storePtr(calleeTokenReg, Address(StackPointer, sizeof(uintptr_t)));
+ // Push frame descriptor.
+ masm.storePtr(t0, Address(StackPointer, 0));
+
+ // Call the target function.
+ masm.andPtr(Imm32(CalleeTokenMask), calleeTokenReg);
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ masm.loadJitCodeRaw(calleeTokenReg, t1);
+ argumentsRectifierReturnOffset_ = masm.callJitNoProfiler(t1);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ Label noBaselineScript, done;
+ masm.loadBaselineJitCodeRaw(calleeTokenReg, t1, &noBaselineScript);
+ masm.callJitNoProfiler(t1);
+ masm.jump(&done);
+
+ // See BaselineCacheIRCompiler::emitCallInlinedFunction.
+ masm.bind(&noBaselineScript);
+ masm.loadJitCodeRaw(calleeTokenReg, t1);
+ masm.callJitNoProfiler(t1);
+ masm.bind(&done);
+ break;
+ }
+
+ // arg1
+ // ...
+ // argN
+ // num actual args
+ // callee token
+ // sizeDescriptor <- sp now
+ // return address
+
+ // Remove the rectifier frame.
+ // t0 <- descriptor with FrameType.
+ masm.loadPtr(Address(StackPointer, 0), t0);
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), t0); // t0 <- descriptor.
+
+ // Discard descriptor, calleeToken and number of actual arguments.
+ masm.addPtr(Imm32(3 * sizeof(uintptr_t)), StackPointer);
+
+ // arg1
+ // ...
+ // argN <- sp now; t0 <- frame descriptor
+ // num actual args
+ // callee token
+ // sizeDescriptor
+ // return address
+
+ // Discard pushed arguments.
+ masm.addPtr(t0, StackPointer);
+
+ masm.ret();
+}
+
+// NOTE: Members snapshotOffset_ and padding_ of BailoutStack
+// are not stored in PushBailoutFrame().
+static const uint32_t bailoutDataSize =
+ sizeof(BailoutStack) - 2 * sizeof(uintptr_t);
+static const uint32_t bailoutInfoOutParamSize = 2 * sizeof(uintptr_t);
+
+/* There are two different stack layouts when doing bailout. They are
+ * represented via class BailoutStack.
+ *
+ * - First case is when bailout is done trough bailout table. In this case
+ * table offset is stored in $ra (look at JitRuntime::generateBailoutTable())
+ * and thunk code should save it on stack. In this case frameClassId_ cannot
+ * be NO_FRAME_SIZE_CLASS_ID. Members snapshotOffset_ and padding_ are not on
+ * the stack.
+ *
+ * - Other case is when bailout is done via out of line code (lazy bailout).
+ * In this case frame size is stored in $ra (look at
+ * CodeGeneratorMIPS::generateOutOfLineCode()) and thunk code should save it
+ * on stack. Other difference is that members snapshotOffset_ and padding_ are
+ * pushed to the stack by CodeGeneratorMIPS::visitOutOfLineBailout(). Field
+ * frameClassId_ is forced to be NO_FRAME_SIZE_CLASS_ID
+ * (See: JitRuntime::generateBailoutHandler).
+ */
+static void PushBailoutFrame(MacroAssembler& masm, uint32_t frameClass,
+ Register spArg) {
+ // Make sure that alignment is proper.
+ masm.checkStackAlignment();
+
+ // Make room for data.
+ masm.subPtr(Imm32(bailoutDataSize), StackPointer);
+
+ // Save general purpose registers.
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ uint32_t off = BailoutStack::offsetOfRegs() + i * sizeof(uintptr_t);
+ masm.storePtr(Register::FromCode(i), Address(StackPointer, off));
+ }
+
+#ifdef ENABLE_WASM_SIMD
+ // What to do for SIMD?
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ // Save floating point registers
+ // We can use as_sdc1 because stack is alligned.
+ for (uint32_t i = 0; i < FloatRegisters::TotalDouble; i++) {
+ masm.as_sdc1(FloatRegister::FromIndex(i, FloatRegister::Double),
+ StackPointer,
+ BailoutStack::offsetOfFpRegs() + i * sizeof(double));
+ }
+
+ // Store the frameSize_ or tableOffset_ stored in ra
+ // See: JitRuntime::generateBailoutTable()
+ // See: CodeGeneratorMIPS::generateOutOfLineCode()
+ masm.storePtr(ra, Address(StackPointer, BailoutStack::offsetOfFrameSize()));
+
+ // Put frame class to stack
+ masm.storePtr(ImmWord(frameClass),
+ Address(StackPointer, BailoutStack::offsetOfFrameClass()));
+
+ // Put pointer to BailoutStack as first argument to the Bailout()
+ masm.movePtr(StackPointer, spArg);
+}
+
+static void GenerateBailoutThunk(MacroAssembler& masm, uint32_t frameClass,
+ Label* bailoutTail) {
+ PushBailoutFrame(masm, frameClass, a0);
+
+ // Put pointer to BailoutInfo
+ masm.subPtr(Imm32(bailoutInfoOutParamSize), StackPointer);
+ masm.storePtr(ImmPtr(nullptr), Address(StackPointer, 0));
+ masm.movePtr(StackPointer, a1);
+
+ using Fn = bool (*)(BailoutStack * sp, BaselineBailoutInfo * *info);
+ masm.setupAlignedABICall();
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI<Fn, Bailout>(MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckOther);
+
+ // Get BailoutInfo pointer
+ masm.loadPtr(Address(StackPointer, 0), a2);
+
+ // Remove both the bailout frame and the topmost Ion frame's stack.
+ if (frameClass == NO_FRAME_SIZE_CLASS_ID) {
+ // Load frameSize from stack
+ masm.loadPtr(Address(StackPointer, bailoutInfoOutParamSize +
+ BailoutStack::offsetOfFrameSize()),
+ a1);
+
+ // Remove complete BailoutStack class and data after it
+ masm.addPtr(Imm32(sizeof(BailoutStack) + bailoutInfoOutParamSize),
+ StackPointer);
+ // Remove frame size srom stack
+ masm.addPtr(a1, StackPointer);
+ } else {
+ uint32_t frameSize = FrameSizeClass::FromClass(frameClass).frameSize();
+ // Remove the data this fuction added and frame size.
+ masm.addPtr(Imm32(bailoutDataSize + bailoutInfoOutParamSize + frameSize),
+ StackPointer);
+ }
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in a2.
+ masm.jump(bailoutTail);
+}
+
+JitRuntime::BailoutTable JitRuntime::generateBailoutTable(MacroAssembler& masm,
+ Label* bailoutTail,
+ uint32_t frameClass) {
+ uint32_t offset = startTrampolineCode(masm);
+
+ Label bailout;
+ for (size_t i = 0; i < BAILOUT_TABLE_SIZE; i++) {
+ // Calculate offset to the end of table
+ int32_t offset = (BAILOUT_TABLE_SIZE - i) * BAILOUT_TABLE_ENTRY_SIZE;
+
+ // We use the 'ra' as table offset later in GenerateBailoutThunk
+ masm.as_bal(BOffImm16(offset));
+ masm.nop();
+ }
+ masm.bind(&bailout);
+
+ GenerateBailoutThunk(masm, frameClass, bailoutTail);
+
+ return BailoutTable(offset, masm.currentOffset() - offset);
+}
+
+void JitRuntime::generateBailoutHandler(MacroAssembler& masm,
+ Label* bailoutTail) {
+ bailoutHandlerOffset_ = startTrampolineCode(masm);
+
+ GenerateBailoutThunk(masm, NO_FRAME_SIZE_CLASS_ID, bailoutTail);
+}
+
+bool JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm,
+ const VMFunctionData& f, DynFn nativeFun,
+ uint32_t* wrapperOffset) {
+ *wrapperOffset = startTrampolineCode(masm);
+
+ // Avoid conflicts with argument registers while discarding the result after
+ // the function call.
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ static_assert(
+ (Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0,
+ "Wrapper register set should be a superset of Volatile register set.");
+
+ // The context is the first argument; a0 is the first argument register.
+ Register cxreg = a0;
+ regs.take(cxreg);
+
+ // If it isn't a tail call, then the return address needs to be saved
+ if (f.expectTailCall == NonTailCall) {
+ masm.pushReturnAddress();
+ }
+
+ // We're aligned to an exit frame, so link it up.
+ masm.loadJSContext(cxreg);
+ masm.enterExitFrame(cxreg, regs.getAny(), &f);
+
+ // Save the base of the argument set stored on the stack.
+ Register argsBase = InvalidReg;
+ if (f.explicitArgs) {
+ argsBase = t1; // Use temporary register.
+ regs.take(argsBase);
+ masm.ma_addu(argsBase, StackPointer,
+ Imm32(ExitFrameLayout::SizeWithFooter()));
+ }
+ uint32_t framePushedBeforeAlignStack = masm.framePushed();
+ masm.alignStackPointer();
+ masm.setFramePushed(0);
+
+ // Reserve space for the outparameter. Reserve sizeof(Value) for every
+ // case so that stack stays aligned.
+ uint32_t outParamSize = 0;
+ switch (f.outParam) {
+ case Type_Value:
+ outParamSize = sizeof(Value);
+ masm.reserveStack(outParamSize);
+ break;
+
+ case Type_Handle: {
+ uint32_t pushed = masm.framePushed();
+ masm.PushEmptyRooted(f.outParamRootType);
+ outParamSize = masm.framePushed() - pushed;
+ } break;
+
+ case Type_Bool:
+ case Type_Int32:
+ MOZ_ASSERT(sizeof(uintptr_t) == sizeof(uint32_t));
+ [[fallthrough]];
+ case Type_Pointer:
+ outParamSize = sizeof(uintptr_t);
+ masm.reserveStack(outParamSize);
+ break;
+
+ case Type_Double:
+ outParamSize = sizeof(double);
+ masm.reserveStack(outParamSize);
+ break;
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ uint32_t outParamOffset = 0;
+ if (f.outParam != Type_Void) {
+ // Make sure that stack is double aligned after outParam.
+ MOZ_ASSERT(outParamSize <= sizeof(double));
+ outParamOffset += sizeof(double) - outParamSize;
+ }
+ // Reserve stack for double sized args that are copied to be aligned.
+ outParamOffset += f.doubleByRefArgs() * sizeof(double);
+
+ Register doubleArgs = t0;
+ masm.reserveStack(outParamOffset);
+ masm.movePtr(StackPointer, doubleArgs);
+
+ if (!generateTLEnterVM(masm, f)) {
+ return false;
+ }
+
+ masm.setupAlignedABICall();
+ masm.passABIArg(cxreg);
+
+ size_t argDisp = 0;
+ size_t doubleArgDisp = 0;
+
+ // Copy any arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ switch (f.argProperties(explicitArg)) {
+ case VMFunctionData::WordByValue:
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::GENERAL);
+ argDisp += sizeof(uint32_t);
+ break;
+ case VMFunctionData::DoubleByValue:
+ // Values should be passed by reference, not by value, so we
+ // assert that the argument is a double-precision float.
+ MOZ_ASSERT(f.argPassedInFloatReg(explicitArg));
+ masm.passABIArg(MoveOperand(argsBase, argDisp), MoveOp::DOUBLE);
+ argDisp += sizeof(double);
+ break;
+ case VMFunctionData::WordByRef:
+ masm.passABIArg(
+ MoveOperand(argsBase, argDisp, MoveOperand::EFFECTIVE_ADDRESS),
+ MoveOp::GENERAL);
+ argDisp += sizeof(uint32_t);
+ break;
+ case VMFunctionData::DoubleByRef:
+ // Copy double sized argument to aligned place.
+ masm.ma_ldc1WordAligned(ScratchDoubleReg, argsBase, argDisp);
+ masm.as_sdc1(ScratchDoubleReg, doubleArgs, doubleArgDisp);
+ masm.passABIArg(MoveOperand(doubleArgs, doubleArgDisp,
+ MoveOperand::EFFECTIVE_ADDRESS),
+ MoveOp::GENERAL);
+ doubleArgDisp += sizeof(double);
+ argDisp += sizeof(double);
+ break;
+ }
+ }
+
+ MOZ_ASSERT_IF(f.outParam != Type_Void, doubleArgDisp + sizeof(double) ==
+ outParamOffset + outParamSize);
+
+ // Copy the implicit outparam, if any.
+ if (f.outParam != Type_Void) {
+ masm.passABIArg(
+ MoveOperand(doubleArgs, outParamOffset, MoveOperand::EFFECTIVE_ADDRESS),
+ MoveOp::GENERAL);
+ }
+
+ masm.callWithABI(nativeFun, MoveOp::GENERAL,
+ CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ if (!generateTLExitVM(masm, f)) {
+ return false;
+ }
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Object:
+ masm.branchTestPtr(Assembler::Zero, v0, v0, masm.failureLabel());
+ break;
+ case Type_Bool:
+ // Called functions return bools, which are 0/false and non-zero/true
+ masm.branchIfFalseBool(v0, masm.failureLabel());
+ break;
+ case Type_Void:
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ masm.freeStack(outParamOffset);
+
+ // Load the outparam and free any allocated stack.
+ switch (f.outParam) {
+ case Type_Handle:
+ masm.popRooted(f.outParamRootType, ReturnReg, JSReturnOperand);
+ break;
+
+ case Type_Value:
+ masm.loadValue(Address(StackPointer, 0), JSReturnOperand);
+ masm.freeStack(sizeof(Value));
+ break;
+
+ case Type_Int32:
+ MOZ_ASSERT(sizeof(uintptr_t) == sizeof(uint32_t));
+ [[fallthrough]];
+ case Type_Pointer:
+ masm.load32(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ case Type_Bool:
+ masm.load8ZeroExtend(Address(StackPointer, 0), ReturnReg);
+ masm.freeStack(sizeof(uintptr_t));
+ break;
+
+ case Type_Double:
+ if (JitOptions.supportsFloatingPoint) {
+ masm.as_ldc1(ReturnDoubleReg, StackPointer, 0);
+ } else {
+ masm.assumeUnreachable(
+ "Unable to load into float reg, with no FP support.");
+ }
+ masm.freeStack(sizeof(double));
+ break;
+
+ default:
+ MOZ_ASSERT(f.outParam == Type_Void);
+ break;
+ }
+
+ masm.restoreStackPointer();
+ masm.setFramePushed(framePushedBeforeAlignStack);
+
+ masm.leaveExitFrame();
+ masm.retn(Imm32(sizeof(ExitFrameLayout) +
+ f.explicitStackSlots() * sizeof(uintptr_t) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ return true;
+}
+
+uint32_t JitRuntime::generatePreBarrier(JSContext* cx, MacroAssembler& masm,
+ MIRType type) {
+ uint32_t offset = startTrampolineCode(masm);
+
+ MOZ_ASSERT(PreBarrierReg == a1);
+ Register temp1 = a0;
+ Register temp2 = a2;
+ Register temp3 = a3;
+ masm.push(temp1);
+ masm.push(temp2);
+ masm.push(temp3);
+
+ Label noBarrier;
+ masm.emitPreBarrierFastPath(cx->runtime(), type, temp1, temp2, temp3,
+ &noBarrier);
+
+ // Call into C++ to mark this GC thing.
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+
+ LiveRegisterSet save;
+ if (JitOptions.supportsFloatingPoint) {
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileMask));
+ } else {
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet());
+ }
+ save.add(ra);
+ masm.PushRegsInMask(save);
+
+ masm.movePtr(ImmPtr(cx->runtime()), a0);
+
+ masm.setupUnalignedABICall(a2);
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI(JitMarkFunction(type));
+
+ save.take(AnyRegister(ra));
+ masm.PopRegsInMask(save);
+ masm.ret();
+
+ masm.bind(&noBarrier);
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+ masm.abiret();
+
+ return offset;
+}
+
+void JitRuntime::generateExceptionTailStub(MacroAssembler& masm,
+ Label* profilerExitTail) {
+ exceptionTailOffset_ = startTrampolineCode(masm);
+
+ masm.bind(masm.failureLabel());
+ masm.handleFailureWithHandlerTail(profilerExitTail);
+}
+
+void JitRuntime::generateBailoutTailStub(MacroAssembler& masm,
+ Label* bailoutTail) {
+ bailoutTailOffset_ = startTrampolineCode(masm);
+ masm.bind(bailoutTail);
+
+ masm.generateBailoutTail(a1, a2);
+}
+
+void JitRuntime::generateProfilerExitFrameTailStub(MacroAssembler& masm,
+ Label* profilerExitTail) {
+ profilerExitFrameTailOffset_ = startTrampolineCode(masm);
+ masm.bind(profilerExitTail);
+
+ Register scratch1 = t0;
+ Register scratch2 = t1;
+ Register scratch3 = t2;
+ Register scratch4 = t3;
+
+ //
+ // The code generated below expects that the current stack pointer points
+ // to an Ion or Baseline frame, at the state it would be immediately
+ // before a ret(). Thus, after this stub's business is done, it executes
+ // a ret() and returns directly to the caller script, on behalf of the
+ // callee script that jumped to this code.
+ //
+ // Thus the expected stack is:
+ //
+ // StackPointer ----+
+ // v
+ // ..., ActualArgc, CalleeToken, Descriptor, ReturnAddr
+ // MEM-HI MEM-LOW
+ //
+ //
+ // The generated jitcode is responsible for overwriting the
+ // jitActivation->lastProfilingFrame field with a pointer to the previous
+ // Ion or Baseline jit-frame that was pushed before this one. It is also
+ // responsible for overwriting jitActivation->lastProfilingCallSite with
+ // the return address into that frame. The frame could either be an
+ // immediate "caller" frame, or it could be a frame in a previous
+ // JitActivation (if the current frame was entered from C++, and the C++
+ // was entered by some caller jit-frame further down the stack).
+ //
+ // So this jitcode is responsible for "walking up" the jit stack, finding
+ // the previous Ion or Baseline JS frame, and storing its address and the
+ // return address into the appropriate fields on the current jitActivation.
+ //
+ // There are a fixed number of different path types that can lead to the
+ // current frame, which is either a baseline or ion frame:
+ //
+ // <Baseline-Or-Ion>
+ // ^
+ // |
+ // ^--- Ion
+ // |
+ // ^--- Baseline Stub <---- Baseline
+ // |
+ // ^--- Argument Rectifier
+ // | ^
+ // | |
+ // | ^--- Ion
+ // | |
+ // | ^--- Baseline Stub <---- Baseline
+ // |
+ // ^--- Entry Frame (From C++)
+ //
+ Register actReg = scratch4;
+ masm.loadJSContext(actReg);
+ masm.loadPtr(Address(actReg, offsetof(JSContext, profilingActivation_)),
+ actReg);
+
+ Address lastProfilingFrame(actReg,
+ JitActivation::offsetOfLastProfilingFrame());
+ Address lastProfilingCallSite(actReg,
+ JitActivation::offsetOfLastProfilingCallSite());
+
+#ifdef DEBUG
+ // Ensure that frame we are exiting is current lastProfilingFrame
+ {
+ masm.loadPtr(lastProfilingFrame, scratch1);
+ Label checkOk;
+ masm.branchPtr(Assembler::Equal, scratch1, ImmWord(0), &checkOk);
+ masm.branchPtr(Assembler::Equal, StackPointer, scratch1, &checkOk);
+ masm.assumeUnreachable(
+ "Mismatch between stored lastProfilingFrame and current stack "
+ "pointer.");
+ masm.bind(&checkOk);
+ }
+#endif
+
+ // Load the frame descriptor into |scratch1|, figure out what to do depending
+ // on its type.
+ masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfDescriptor()),
+ scratch1);
+
+ // Going into the conditionals, we will have:
+ // FrameDescriptor.size in scratch1
+ // FrameDescriptor.type in scratch2
+ masm.ma_and(scratch2, scratch1, Imm32((1 << FRAMETYPE_BITS) - 1));
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch1);
+
+ // Handling of each case is dependent on FrameDescriptor.type
+ Label handle_IonJS;
+ Label handle_BaselineStub;
+ Label handle_Rectifier;
+ Label handle_IonICCall;
+ Label handle_Entry;
+ Label end;
+
+ masm.branch32(Assembler::Equal, scratch2, Imm32(FrameType::IonJS),
+ &handle_IonJS);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(FrameType::BaselineJS),
+ &handle_IonJS);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(FrameType::BaselineStub),
+ &handle_BaselineStub);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(FrameType::Rectifier),
+ &handle_Rectifier);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(FrameType::IonICCall),
+ &handle_IonICCall);
+ masm.branch32(Assembler::Equal, scratch2, Imm32(FrameType::CppToJSJit),
+ &handle_Entry);
+
+ // The WasmToJSJit is just another kind of entry.
+ masm.branch32(Assembler::Equal, scratch2, Imm32(FrameType::WasmToJSJit),
+ &handle_Entry);
+
+ masm.assumeUnreachable(
+ "Invalid caller frame type when exiting from Ion frame.");
+
+ //
+ // FrameType::IonJS
+ //
+ // Stack layout:
+ // ...
+ // Ion-Descriptor
+ // Prev-FP ---> Ion-ReturnAddr
+ // ... previous frame data ... |- Descriptor.Size
+ // ... arguments ... |
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ masm.bind(&handle_IonJS);
+ {
+ // |scratch1| contains Descriptor.size
+
+ // returning directly to an IonJS frame. Store return addr to frame
+ // in lastProfilingCallSite.
+ masm.loadPtr(Address(StackPointer, JitFrameLayout::offsetOfReturnAddress()),
+ scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ // Store return frame in lastProfilingFrame.
+ // scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size();
+ masm.as_addu(scratch2, StackPointer, scratch1);
+ masm.ma_addu(scratch2, scratch2, Imm32(JitFrameLayout::Size()));
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+ //
+ // FrameType::BaselineStub
+ //
+ // Look past the stub and store the frame pointer to
+ // the baselineJS frame prior to it.
+ //
+ // Stack layout:
+ // ...
+ // BL-Descriptor
+ // Prev-FP ---> BL-ReturnAddr
+ // +-----> BL-PrevFramePointer
+ // | ... BL-FrameData ...
+ // | BLStub-Descriptor
+ // | BLStub-ReturnAddr
+ // | BLStub-StubPointer |
+ // +------ BLStub-SavedFramePointer |- Descriptor.Size
+ // ... arguments ... |
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ // We take advantage of the fact that the stub frame saves the frame
+ // pointer pointing to the baseline frame, so a bunch of calculation can
+ // be avoided.
+ //
+ masm.bind(&handle_BaselineStub);
+ {
+ masm.as_addu(scratch3, StackPointer, scratch1);
+ Address stubFrameReturnAddr(
+ scratch3, JitFrameLayout::Size() +
+ BaselineStubFrameLayout::offsetOfReturnAddress());
+ masm.loadPtr(stubFrameReturnAddr, scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ Address stubFrameSavedFramePtr(
+ scratch3, JitFrameLayout::Size() - (2 * sizeof(void*)));
+ masm.loadPtr(stubFrameSavedFramePtr, scratch2);
+ masm.addPtr(Imm32(sizeof(void*)), scratch2); // Skip past BL-PrevFramePtr
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+ //
+ // FrameType::Rectifier
+ //
+ // The rectifier frame can be preceded by either an IonJS, a BaselineStub,
+ // or a CppToJSJit/WasmToJSJit frame.
+ //
+ // Stack layout if caller of rectifier was Ion or CppToJSJit/WasmToJSJit:
+ //
+ // Ion-Descriptor
+ // Ion-ReturnAddr
+ // ... ion frame data ... |- Rect-Descriptor.Size
+ // < COMMON LAYOUT >
+ //
+ // Stack layout if caller of rectifier was Baseline:
+ //
+ // BL-Descriptor
+ // Prev-FP ---> BL-ReturnAddr
+ // +-----> BL-SavedFramePointer
+ // | ... baseline frame data ...
+ // | BLStub-Descriptor
+ // | BLStub-ReturnAddr
+ // | BLStub-StubPointer |
+ // +------ BLStub-SavedFramePointer |- Rect-Descriptor.Size
+ // ... args to rectifier ... |
+ // < COMMON LAYOUT >
+ //
+ // Common stack layout:
+ //
+ // ActualArgc |
+ // CalleeToken |- IonRectitiferFrameLayout::Size()
+ // Rect-Descriptor |
+ // Rect-ReturnAddr |
+ // ... rectifier data & args ... |- Descriptor.Size
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ //
+ masm.bind(&handle_Rectifier);
+ {
+ // scratch2 := StackPointer + Descriptor.size*1 + JitFrameLayout::Size();
+ masm.as_addu(scratch2, StackPointer, scratch1);
+ masm.add32(Imm32(JitFrameLayout::Size()), scratch2);
+ masm.loadPtr(Address(scratch2, RectifierFrameLayout::offsetOfDescriptor()),
+ scratch3);
+ masm.ma_srl(scratch1, scratch3, Imm32(FRAMESIZE_SHIFT));
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch3);
+
+ // Now |scratch1| contains Rect-Descriptor.Size
+ // and |scratch2| points to Rectifier frame
+ // and |scratch3| contains Rect-Descriptor.Type
+
+ masm.assertRectifierFrameParentType(scratch3);
+
+ // Check for either Ion or BaselineStub frame.
+ Label notIonFrame;
+ masm.branch32(Assembler::NotEqual, scratch3, Imm32(FrameType::IonJS),
+ &notIonFrame);
+
+ // Handle Rectifier <- IonJS
+ // scratch3 := RectFrame[ReturnAddr]
+ masm.loadPtr(
+ Address(scratch2, RectifierFrameLayout::offsetOfReturnAddress()),
+ scratch3);
+ masm.storePtr(scratch3, lastProfilingCallSite);
+
+ // scratch3 := RectFrame + Rect-Descriptor.Size +
+ // RectifierFrameLayout::Size()
+ masm.as_addu(scratch3, scratch2, scratch1);
+ masm.add32(Imm32(RectifierFrameLayout::Size()), scratch3);
+ masm.storePtr(scratch3, lastProfilingFrame);
+ masm.ret();
+
+ masm.bind(&notIonFrame);
+
+ // Check for either BaselineStub or a CppToJSJit/WasmToJSJit entry
+ // frame.
+ masm.branch32(Assembler::NotEqual, scratch3, Imm32(FrameType::BaselineStub),
+ &handle_Entry);
+
+ // Handle Rectifier <- BaselineStub <- BaselineJS
+ masm.as_addu(scratch3, scratch2, scratch1);
+ Address stubFrameReturnAddr(
+ scratch3, RectifierFrameLayout::Size() +
+ BaselineStubFrameLayout::offsetOfReturnAddress());
+ masm.loadPtr(stubFrameReturnAddr, scratch2);
+ masm.storePtr(scratch2, lastProfilingCallSite);
+
+ Address stubFrameSavedFramePtr(
+ scratch3, RectifierFrameLayout::Size() - (2 * sizeof(void*)));
+ masm.loadPtr(stubFrameSavedFramePtr, scratch2);
+ masm.addPtr(Imm32(sizeof(void*)), scratch2);
+ masm.storePtr(scratch2, lastProfilingFrame);
+ masm.ret();
+ }
+
+ // FrameType::IonICCall
+ //
+ // The caller is always an IonJS frame.
+ //
+ // Ion-Descriptor
+ // Ion-ReturnAddr
+ // ... ion frame data ... |- CallFrame-Descriptor.Size
+ // StubCode |
+ // ICCallFrame-Descriptor |- IonICCallFrameLayout::Size()
+ // ICCallFrame-ReturnAddr |
+ // ... call frame data & args ... |- Descriptor.Size
+ // ActualArgc |
+ // CalleeToken |- JitFrameLayout::Size()
+ // Descriptor |
+ // FP -----> ReturnAddr |
+ masm.bind(&handle_IonICCall);
+ {
+ // scratch2 := StackPointer + Descriptor.size + JitFrameLayout::Size()
+ masm.as_addu(scratch2, StackPointer, scratch1);
+ masm.addPtr(Imm32(JitFrameLayout::Size()), scratch2);
+
+ // scratch3 := ICCallFrame-Descriptor.Size
+ masm.loadPtr(Address(scratch2, IonICCallFrameLayout::offsetOfDescriptor()),
+ scratch3);
+#ifdef DEBUG
+ // Assert previous frame is an IonJS frame.
+ masm.movePtr(scratch3, scratch1);
+ masm.and32(Imm32((1 << FRAMETYPE_BITS) - 1), scratch1);
+ {
+ Label checkOk;
+ masm.branch32(Assembler::Equal, scratch1, Imm32(FrameType::IonJS),
+ &checkOk);
+ masm.assumeUnreachable("IonICCall frame must be preceded by IonJS frame");
+ masm.bind(&checkOk);
+ }
+#endif
+ masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), scratch3);
+
+ // lastProfilingCallSite := ICCallFrame-ReturnAddr
+ masm.loadPtr(
+ Address(scratch2, IonICCallFrameLayout::offsetOfReturnAddress()),
+ scratch1);
+ masm.storePtr(scratch1, lastProfilingCallSite);
+
+ // lastProfilingFrame := ICCallFrame + ICCallFrame-Descriptor.Size +
+ // IonICCallFrameLayout::Size()
+ masm.as_addu(scratch1, scratch2, scratch3);
+ masm.addPtr(Imm32(IonICCallFrameLayout::Size()), scratch1);
+ masm.storePtr(scratch1, lastProfilingFrame);
+ masm.ret();
+ }
+
+ //
+ // FrameType::CppToJSJit / FrameType::WasmToJSJit
+ //
+ // If at an entry frame, store null into both fields.
+ // A fast-path wasm->jit transition frame is an entry frame from the point
+ // of view of the JIT.
+ //
+ masm.bind(&handle_Entry);
+ {
+ masm.movePtr(ImmPtr(nullptr), scratch1);
+ masm.storePtr(scratch1, lastProfilingCallSite);
+ masm.storePtr(scratch1, lastProfilingFrame);
+ masm.ret();
+ }
+}