summaryrefslogtreecommitdiffstats
path: root/js/src/jit/mips-shared
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /js/src/jit/mips-shared
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'js/src/jit/mips-shared')
-rw-r--r--js/src/jit/mips-shared/Architecture-mips-shared.cpp121
-rw-r--r--js/src/jit/mips-shared/Architecture-mips-shared.h341
-rw-r--r--js/src/jit/mips-shared/Assembler-mips-shared.cpp2094
-rw-r--r--js/src/jit/mips-shared/Assembler-mips-shared.h1500
-rw-r--r--js/src/jit/mips-shared/AtomicOperations-mips-shared.h521
-rw-r--r--js/src/jit/mips-shared/BaselineIC-mips-shared.cpp37
-rw-r--r--js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp2453
-rw-r--r--js/src/jit/mips-shared/CodeGenerator-mips-shared.h157
-rw-r--r--js/src/jit/mips-shared/LIR-mips-shared.h380
-rw-r--r--js/src/jit/mips-shared/Lowering-mips-shared.cpp1036
-rw-r--r--js/src/jit/mips-shared/Lowering-mips-shared.h89
-rw-r--r--js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h1338
-rw-r--r--js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp3355
-rw-r--r--js/src/jit/mips-shared/MacroAssembler-mips-shared.h258
-rw-r--r--js/src/jit/mips-shared/MoveEmitter-mips-shared.cpp207
-rw-r--r--js/src/jit/mips-shared/MoveEmitter-mips-shared.h73
-rw-r--r--js/src/jit/mips-shared/SharedICHelpers-mips-shared-inl.h84
-rw-r--r--js/src/jit/mips-shared/SharedICHelpers-mips-shared.h88
18 files changed, 14132 insertions, 0 deletions
diff --git a/js/src/jit/mips-shared/Architecture-mips-shared.cpp b/js/src/jit/mips-shared/Architecture-mips-shared.cpp
new file mode 100644
index 0000000000..fb28b298ea
--- /dev/null
+++ b/js/src/jit/mips-shared/Architecture-mips-shared.cpp
@@ -0,0 +1,121 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/Architecture-mips-shared.h"
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "jit/FlushICache.h" // js::jit::FlushICache
+#include "jit/mips32/Simulator-mips32.h"
+#include "jit/mips64/Simulator-mips64.h"
+#include "jit/RegisterSets.h"
+
+#if defined(__linux__) && !defined(JS_SIMULATOR)
+# include <sys/cachectl.h>
+#endif
+
+#define HWCAP_MIPS (1 << 28)
+#define HWCAP_LOONGSON (1 << 27)
+#define HWCAP_R2 (1 << 26)
+#define HWCAP_FPU (1 << 0)
+
+namespace js {
+namespace jit {
+
+static uint32_t get_mips_flags() {
+ uint32_t flags = HWCAP_MIPS;
+
+#if defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
+ flags |= HWCAP_FPU;
+ flags |= HWCAP_R2;
+#else
+# ifdef __linux__
+ FILE* fp = fopen("/proc/cpuinfo", "r");
+ if (!fp) {
+ return flags;
+ }
+
+ char buf[1024] = {};
+ (void)fread(buf, sizeof(char), sizeof(buf) - 1, fp);
+ fclose(fp);
+ if (strstr(buf, "FPU")) {
+ flags |= HWCAP_FPU;
+ }
+ if (strstr(buf, "Loongson")) {
+ flags |= HWCAP_LOONGSON;
+ }
+ if (strstr(buf, "mips32r2") || strstr(buf, "mips64r2")) {
+ flags |= HWCAP_R2;
+ }
+# endif
+#endif // JS_SIMULATOR_MIPS32 || JS_SIMULATOR_MIPS64
+ return flags;
+}
+
+static bool check_fpu() { return mips_private::Flags & HWCAP_FPU; }
+
+static bool check_loongson() { return mips_private::Flags & HWCAP_LOONGSON; }
+
+static bool check_r2() { return mips_private::Flags & HWCAP_R2; }
+
+namespace mips_private {
+// Cache a local copy so we only have to read /proc/cpuinfo once.
+uint32_t Flags = get_mips_flags();
+bool hasFPU = check_fpu();
+;
+bool isLoongson = check_loongson();
+bool hasR2 = check_r2();
+} // namespace mips_private
+
+bool CPUFlagsHaveBeenComputed() {
+ // Flags were computed above.
+ return true;
+}
+
+Registers::Code Registers::FromName(const char* name) {
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(i), name) == 0) {
+ return Code(i);
+ }
+ }
+
+ return Invalid;
+}
+
+void FlushICache(void* code, size_t size) {
+#if defined(JS_SIMULATOR)
+ js::jit::SimulatorProcess::FlushICache(code, size);
+
+#elif defined(_MIPS_ARCH_LOONGSON3A)
+ // On Loongson3-CPUs, The cache flushed automatically
+ // by hardware. Just need to execute an instruction hazard.
+ uintptr_t tmp;
+ asm volatile(
+ ".set push \n"
+ ".set noreorder \n"
+ "move %[tmp], $ra \n"
+ "bal 1f \n"
+ "daddiu $ra, 8 \n"
+ "1: \n"
+ "jr.hb $ra \n"
+ "move $ra, %[tmp] \n"
+ ".set pop\n"
+ : [tmp] "=&r"(tmp));
+
+#elif defined(__GNUC__)
+ intptr_t end = reinterpret_cast<intptr_t>(code) + size;
+ __builtin___clear_cache(reinterpret_cast<char*>(code),
+ reinterpret_cast<char*>(end));
+
+#else
+ _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
+
+#endif
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/mips-shared/Architecture-mips-shared.h b/js/src/jit/mips-shared/Architecture-mips-shared.h
new file mode 100644
index 0000000000..1749a2fe6c
--- /dev/null
+++ b/js/src/jit/mips-shared/Architecture-mips-shared.h
@@ -0,0 +1,341 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_Architecture_mips_shared_h
+#define jit_mips_shared_Architecture_mips_shared_h
+
+#include "mozilla/MathAlgorithms.h"
+
+#include <algorithm>
+#include <limits.h>
+#include <stdint.h>
+
+#include "jit/shared/Architecture-shared.h"
+
+#include "js/Utility.h"
+
+// gcc appears to use _mips_hard_float to denote
+// that the target is a hard-float target.
+#ifdef _mips_hard_float
+# define JS_CODEGEN_MIPS_HARDFP
+#endif
+
+#if (defined(_MIPS_SIM) && (_MIPS_SIM == _ABIO32)) || \
+ defined(JS_SIMULATOR_MIPS32)
+# define USES_O32_ABI
+#elif (defined(_MIPS_SIM) && (_MIPS_SIM == _ABI64)) || \
+ defined(JS_SIMULATOR_MIPS64)
+# define USES_N64_ABI
+#else
+# error "Unsupported ABI"
+#endif
+
+#if (defined(__mips_isa_rev) && (__mips_isa_rev >= 6))
+# define MIPSR6
+#endif
+
+namespace js {
+namespace jit {
+
+// How far forward/back can a jump go? Provide a generous buffer for thunks.
+static const uint32_t JumpImmediateRange = UINT32_MAX;
+
+class Registers {
+ public:
+ enum RegisterID {
+ r0 = 0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
+ r16,
+ r17,
+ r18,
+ r19,
+ r20,
+ r21,
+ r22,
+ r23,
+ r24,
+ r25,
+ r26,
+ r27,
+ r28,
+ r29,
+ r30,
+ r31,
+ zero = r0,
+ at = r1,
+ v0 = r2,
+ v1 = r3,
+ a0 = r4,
+ a1 = r5,
+ a2 = r6,
+ a3 = r7,
+#if defined(USES_O32_ABI)
+ t0 = r8,
+ t1 = r9,
+ t2 = r10,
+ t3 = r11,
+ t4 = r12,
+ t5 = r13,
+ t6 = r14,
+ t7 = r15,
+ ta0 = t4,
+ ta1 = t5,
+ ta2 = t6,
+ ta3 = t7,
+#elif defined(USES_N64_ABI)
+ a4 = r8,
+ a5 = r9,
+ a6 = r10,
+ a7 = r11,
+ t0 = r12,
+ t1 = r13,
+ t2 = r14,
+ t3 = r15,
+ ta0 = a4,
+ ta1 = a5,
+ ta2 = a6,
+ ta3 = a7,
+#endif
+ s0 = r16,
+ s1 = r17,
+ s2 = r18,
+ s3 = r19,
+ s4 = r20,
+ s5 = r21,
+ s6 = r22,
+ s7 = r23,
+ t8 = r24,
+ t9 = r25,
+ k0 = r26,
+ k1 = r27,
+ gp = r28,
+ sp = r29,
+ fp = r30,
+ ra = r31,
+ invalid_reg
+ };
+ typedef uint8_t Code;
+ typedef RegisterID Encoding;
+
+ // Content spilled during bailouts.
+ union RegisterContent {
+ uintptr_t r;
+ };
+
+ static const char* const RegNames[];
+ static const char* GetName(Code code) {
+ MOZ_ASSERT(code < Total);
+ return RegNames[code];
+ }
+ static const char* GetName(Encoding i) { return GetName(Code(i)); }
+
+ static Code FromName(const char* name);
+
+ static const Encoding StackPointer = sp;
+ static const Encoding Invalid = invalid_reg;
+
+ static const uint32_t Total = 32;
+ static const uint32_t Allocatable;
+
+ typedef uint32_t SetType;
+ static const SetType AllMask = 0xffffffff;
+ static const SetType SharedArgRegMask =
+ (1 << a0) | (1 << a1) | (1 << a2) | (1 << a3);
+ static const SetType ArgRegMask;
+
+ static const SetType VolatileMask =
+ (1 << Registers::v0) | (1 << Registers::v1) | (1 << Registers::a0) |
+ (1 << Registers::a1) | (1 << Registers::a2) | (1 << Registers::a3) |
+ (1 << Registers::t0) | (1 << Registers::t1) | (1 << Registers::t2) |
+ (1 << Registers::t3) | (1 << Registers::ta0) | (1 << Registers::ta1) |
+ (1 << Registers::ta2) | (1 << Registers::ta3);
+
+ // We use this constant to save registers when entering functions. This
+ // is why $ra is added here even though it is not "Non Volatile".
+ static const SetType NonVolatileMask =
+ (1 << Registers::s0) | (1 << Registers::s1) | (1 << Registers::s2) |
+ (1 << Registers::s3) | (1 << Registers::s4) | (1 << Registers::s5) |
+ (1 << Registers::s6) | (1 << Registers::s7) | (1 << Registers::fp) |
+ (1 << Registers::ra);
+
+ static const SetType WrapperMask = VolatileMask | // = arguments
+ (1 << Registers::t0) | // = outReg
+ (1 << Registers::t1); // = argBase
+
+ static const SetType NonAllocatableMask =
+ (1 << Registers::zero) | (1 << Registers::at) | // at = scratch
+ (1 << Registers::t8) | // t8 = scratch
+ (1 << Registers::t9) | // t9 = scratch
+ (1 << Registers::k0) | (1 << Registers::k1) | (1 << Registers::gp) |
+ (1 << Registers::sp) | (1 << Registers::ra) | (1 << Registers::fp);
+
+ // Registers returned from a JS -> JS call.
+ static const SetType JSCallMask;
+
+ // Registers returned from a JS -> C call.
+ static const SetType SharedCallMask = (1 << Registers::v0);
+ static const SetType CallMask;
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ return mozilla::CountPopulation32(x);
+ }
+ static uint32_t FirstBit(SetType x) {
+ return mozilla::CountTrailingZeroes32(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 31 - mozilla::CountLeadingZeroes32(x);
+ }
+};
+
+// Smallest integer type that can hold a register bitmask.
+typedef uint32_t PackedRegisterMask;
+
+class FloatRegistersMIPSShared {
+ public:
+ enum FPRegisterID {
+ f0 = 0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
+ invalid_freg
+ };
+ typedef uint32_t Code;
+ typedef FPRegisterID Encoding;
+
+ // Content spilled during bailouts.
+ union RegisterContent {
+ double d;
+ };
+
+ static const char* GetName(Encoding code) {
+ static const char* const Names[] = {
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"};
+ return Names[code];
+ }
+
+ static const Encoding Invalid = invalid_freg;
+
+#if defined(JS_CODEGEN_MIPS32)
+ typedef uint32_t SetType;
+#elif defined(JS_CODEGEN_MIPS64)
+ typedef uint64_t SetType;
+#endif
+};
+
+static const uint32_t SpillSlotSize =
+ std::max(sizeof(Registers::RegisterContent),
+ sizeof(FloatRegistersMIPSShared::RegisterContent));
+
+template <typename T>
+class TypedRegisterSet;
+
+class FloatRegisterMIPSShared {
+ public:
+ bool isSimd128() const { return false; }
+
+ typedef FloatRegistersMIPSShared::SetType SetType;
+
+#if defined(JS_CODEGEN_MIPS32)
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ return mozilla::CountPopulation32(x);
+ }
+ static uint32_t FirstBit(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ return mozilla::CountTrailingZeroes32(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ return 31 - mozilla::CountLeadingZeroes32(x);
+ }
+#elif defined(JS_CODEGEN_MIPS64)
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 8, "SetType must be 64 bits");
+ return mozilla::CountPopulation64(x);
+ }
+ static uint32_t FirstBit(SetType x) {
+ static_assert(sizeof(SetType) == 8, "SetType must be 64 bits");
+ return mozilla::CountTrailingZeroes64(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ static_assert(sizeof(SetType) == 8, "SetType must be 64 bits");
+ return 63 - mozilla::CountLeadingZeroes64(x);
+ }
+#endif
+};
+
+namespace mips_private {
+extern uint32_t Flags;
+extern bool hasFPU;
+extern bool isLoongson;
+extern bool hasR2;
+} // namespace mips_private
+
+inline uint32_t GetMIPSFlags() { return mips_private::Flags; }
+inline bool hasFPU() { return mips_private::hasFPU; }
+inline bool isLoongson() { return mips_private::isLoongson; }
+inline bool hasR2() { return mips_private::hasR2; }
+
+// MIPS doesn't have double registers that can NOT be treated as float32.
+inline bool hasUnaliasedDouble() { return false; }
+
+// MIPS64 doesn't support it and on MIPS32 we don't allocate odd single fp
+// registers thus not exposing multi aliasing to the jit.
+// See comments in Arhitecture-mips32.h.
+inline bool hasMultiAlias() { return false; }
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_Architecture_mips_shared_h */
diff --git a/js/src/jit/mips-shared/Assembler-mips-shared.cpp b/js/src/jit/mips-shared/Assembler-mips-shared.cpp
new file mode 100644
index 0000000000..11f834c3c7
--- /dev/null
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.cpp
@@ -0,0 +1,2094 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/Assembler-mips-shared.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "gc/Marking.h"
+#include "jit/ExecutableAllocator.h"
+#include "vm/Realm.h"
+
+using mozilla::DebugOnly;
+
+using namespace js;
+using namespace js::jit;
+
+// Encode a standard register when it is being used as rd, the rs, and
+// an extra register(rt). These should never be called with an InvalidReg.
+uint32_t js::jit::RS(Register r) {
+ MOZ_ASSERT((r.code() & ~RegMask) == 0);
+ return r.code() << RSShift;
+}
+
+uint32_t js::jit::RT(Register r) {
+ MOZ_ASSERT((r.code() & ~RegMask) == 0);
+ return r.code() << RTShift;
+}
+
+uint32_t js::jit::RD(Register r) {
+ MOZ_ASSERT((r.code() & ~RegMask) == 0);
+ return r.code() << RDShift;
+}
+
+uint32_t js::jit::RZ(Register r) {
+ MOZ_ASSERT((r.code() & ~RegMask) == 0);
+ return r.code() << RZShift;
+}
+
+uint32_t js::jit::SA(uint32_t value) {
+ MOZ_ASSERT(value < 32);
+ return value << SAShift;
+}
+
+uint32_t js::jit::FS(uint32_t value) {
+ MOZ_ASSERT(value < 32);
+ return value << FSShift;
+}
+
+Register js::jit::toRS(Instruction& i) {
+ return Register::FromCode((i.encode() & RSMask) >> RSShift);
+}
+
+Register js::jit::toRT(Instruction& i) {
+ return Register::FromCode((i.encode() & RTMask) >> RTShift);
+}
+
+Register js::jit::toRD(Instruction& i) {
+ return Register::FromCode((i.encode() & RDMask) >> RDShift);
+}
+
+Register js::jit::toR(Instruction& i) {
+ return Register::FromCode(i.encode() & RegMask);
+}
+
+void InstImm::extractImm16(BOffImm16* dest) { *dest = BOffImm16(*this); }
+
+void AssemblerMIPSShared::finish() {
+ MOZ_ASSERT(!isFinished);
+ isFinished = true;
+}
+
+bool AssemblerMIPSShared::appendRawCode(const uint8_t* code, size_t numBytes) {
+ return m_buffer.appendRawCode(code, numBytes);
+}
+
+bool AssemblerMIPSShared::reserve(size_t size) {
+ // This buffer uses fixed-size chunks so there's no point in reserving
+ // now vs. on-demand.
+ return !oom();
+}
+
+bool AssemblerMIPSShared::swapBuffer(wasm::Bytes& bytes) {
+ // For now, specialize to the one use case. As long as wasm::Bytes is a
+ // Vector, not a linked-list of chunks, there's not much we can do other
+ // than copy.
+ MOZ_ASSERT(bytes.empty());
+ if (!bytes.resize(bytesNeeded())) {
+ return false;
+ }
+ m_buffer.executableCopy(bytes.begin());
+ return true;
+}
+
+void AssemblerMIPSShared::copyJumpRelocationTable(uint8_t* dest) {
+ if (jumpRelocations_.length()) {
+ memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
+ }
+}
+
+void AssemblerMIPSShared::copyDataRelocationTable(uint8_t* dest) {
+ if (dataRelocations_.length()) {
+ memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
+ }
+}
+
+AssemblerMIPSShared::Condition AssemblerMIPSShared::InvertCondition(
+ Condition cond) {
+ switch (cond) {
+ case Equal:
+ return NotEqual;
+ case NotEqual:
+ return Equal;
+ case Zero:
+ return NonZero;
+ case NonZero:
+ return Zero;
+ case LessThan:
+ return GreaterThanOrEqual;
+ case LessThanOrEqual:
+ return GreaterThan;
+ case GreaterThan:
+ return LessThanOrEqual;
+ case GreaterThanOrEqual:
+ return LessThan;
+ case Above:
+ return BelowOrEqual;
+ case AboveOrEqual:
+ return Below;
+ case Below:
+ return AboveOrEqual;
+ case BelowOrEqual:
+ return Above;
+ case Signed:
+ return NotSigned;
+ case NotSigned:
+ return Signed;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+AssemblerMIPSShared::DoubleCondition AssemblerMIPSShared::InvertCondition(
+ DoubleCondition cond) {
+ switch (cond) {
+ case DoubleOrdered:
+ return DoubleUnordered;
+ case DoubleEqual:
+ return DoubleNotEqualOrUnordered;
+ case DoubleNotEqual:
+ return DoubleEqualOrUnordered;
+ case DoubleGreaterThan:
+ return DoubleLessThanOrEqualOrUnordered;
+ case DoubleGreaterThanOrEqual:
+ return DoubleLessThanOrUnordered;
+ case DoubleLessThan:
+ return DoubleGreaterThanOrEqualOrUnordered;
+ case DoubleLessThanOrEqual:
+ return DoubleGreaterThanOrUnordered;
+ case DoubleUnordered:
+ return DoubleOrdered;
+ case DoubleEqualOrUnordered:
+ return DoubleNotEqual;
+ case DoubleNotEqualOrUnordered:
+ return DoubleEqual;
+ case DoubleGreaterThanOrUnordered:
+ return DoubleLessThanOrEqual;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ return DoubleLessThan;
+ case DoubleLessThanOrUnordered:
+ return DoubleGreaterThanOrEqual;
+ case DoubleLessThanOrEqualOrUnordered:
+ return DoubleGreaterThan;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+BOffImm16::BOffImm16(InstImm inst) : data(inst.encode() & Imm16Mask) {}
+
+Instruction* BOffImm16::getDest(Instruction* src) const {
+ return &src[(((int32_t)data << 16) >> 16) + 1];
+}
+
+bool AssemblerMIPSShared::oom() const {
+ return AssemblerShared::oom() || m_buffer.oom() || jumpRelocations_.oom() ||
+ dataRelocations_.oom();
+}
+
+// Size of the instruction stream, in bytes.
+size_t AssemblerMIPSShared::size() const { return m_buffer.size(); }
+
+// Size of the relocation table, in bytes.
+size_t AssemblerMIPSShared::jumpRelocationTableBytes() const {
+ return jumpRelocations_.length();
+}
+
+size_t AssemblerMIPSShared::dataRelocationTableBytes() const {
+ return dataRelocations_.length();
+}
+
+// Size of the data table, in bytes.
+size_t AssemblerMIPSShared::bytesNeeded() const {
+ return size() + jumpRelocationTableBytes() + dataRelocationTableBytes();
+}
+
+// write a blob of binary into the instruction stream
+BufferOffset AssemblerMIPSShared::writeInst(uint32_t x, uint32_t* dest) {
+ MOZ_ASSERT(hasCreator());
+ if (dest == nullptr) {
+ return m_buffer.putInt(x);
+ }
+
+ WriteInstStatic(x, dest);
+ return BufferOffset();
+}
+
+void AssemblerMIPSShared::WriteInstStatic(uint32_t x, uint32_t* dest) {
+ MOZ_ASSERT(dest != nullptr);
+ *dest = x;
+}
+
+BufferOffset AssemblerMIPSShared::haltingAlign(int alignment) {
+ // TODO: Implement a proper halting align.
+ return nopAlign(alignment);
+}
+
+BufferOffset AssemblerMIPSShared::nopAlign(int alignment) {
+ BufferOffset ret;
+ MOZ_ASSERT(m_buffer.isAligned(4));
+ if (alignment == 8) {
+ if (!m_buffer.isAligned(alignment)) {
+ BufferOffset tmp = as_nop();
+ if (!ret.assigned()) {
+ ret = tmp;
+ }
+ }
+ } else {
+ MOZ_ASSERT((alignment & (alignment - 1)) == 0);
+ while (size() & (alignment - 1)) {
+ BufferOffset tmp = as_nop();
+ if (!ret.assigned()) {
+ ret = tmp;
+ }
+ }
+ }
+ return ret;
+}
+
+BufferOffset AssemblerMIPSShared::as_nop() {
+ spew("nop");
+ return writeInst(op_special | ff_sll);
+}
+
+// Logical operations.
+BufferOffset AssemblerMIPSShared::as_and(Register rd, Register rs,
+ Register rt) {
+ spew("and %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_and).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_or(Register rd, Register rs, Register rt) {
+ spew("or %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_or).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_xor(Register rd, Register rs,
+ Register rt) {
+ spew("xor %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_xor).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_nor(Register rd, Register rs,
+ Register rt) {
+ spew("nor %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_nor).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_andi(Register rd, Register rs, int32_t j) {
+ MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
+ spew("andi %3s,%3s,0x%x", rd.name(), rs.name(), j);
+ return writeInst(InstImm(op_andi, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ori(Register rd, Register rs, int32_t j) {
+ MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
+ spew("ori %3s,%3s,0x%x", rd.name(), rs.name(), j);
+ return writeInst(InstImm(op_ori, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_xori(Register rd, Register rs, int32_t j) {
+ MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
+ spew("xori %3s,%3s,0x%x", rd.name(), rs.name(), j);
+ return writeInst(InstImm(op_xori, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_lui(Register rd, int32_t j) {
+ MOZ_ASSERT(Imm16::IsInUnsignedRange(j));
+ spew("lui %3s,0x%x", rd.name(), j);
+ return writeInst(InstImm(op_lui, zero, rd, Imm16(j)).encode());
+}
+
+// Branch and jump instructions
+BufferOffset AssemblerMIPSShared::as_bal(BOffImm16 off) {
+ spew("bal %d", off.decode());
+ BufferOffset bo =
+ writeInst(InstImm(op_regimm, zero, rt_bgezal, off).encode());
+ return bo;
+}
+
+BufferOffset AssemblerMIPSShared::as_b(BOffImm16 off) {
+ spew("b %d", off.decode());
+ BufferOffset bo = writeInst(InstImm(op_beq, zero, zero, off).encode());
+ return bo;
+}
+
+InstImm AssemblerMIPSShared::getBranchCode(JumpOrCall jumpOrCall) {
+ if (jumpOrCall == BranchIsCall) {
+ return InstImm(op_regimm, zero, rt_bgezal, BOffImm16(0));
+ }
+
+ return InstImm(op_beq, zero, zero, BOffImm16(0));
+}
+
+InstImm AssemblerMIPSShared::getBranchCode(Register s, Register t,
+ Condition c) {
+ MOZ_ASSERT(c == AssemblerMIPSShared::Equal ||
+ c == AssemblerMIPSShared::NotEqual);
+ return InstImm(c == AssemblerMIPSShared::Equal ? op_beq : op_bne, s, t,
+ BOffImm16(0));
+}
+
+InstImm AssemblerMIPSShared::getBranchCode(Register s, Condition c) {
+ switch (c) {
+ case AssemblerMIPSShared::Equal:
+ case AssemblerMIPSShared::Zero:
+ case AssemblerMIPSShared::BelowOrEqual:
+ return InstImm(op_beq, s, zero, BOffImm16(0));
+ case AssemblerMIPSShared::NotEqual:
+ case AssemblerMIPSShared::NonZero:
+ case AssemblerMIPSShared::Above:
+ return InstImm(op_bne, s, zero, BOffImm16(0));
+ case AssemblerMIPSShared::GreaterThan:
+ return InstImm(op_bgtz, s, zero, BOffImm16(0));
+ case AssemblerMIPSShared::GreaterThanOrEqual:
+ case AssemblerMIPSShared::NotSigned:
+ return InstImm(op_regimm, s, rt_bgez, BOffImm16(0));
+ case AssemblerMIPSShared::LessThan:
+ case AssemblerMIPSShared::Signed:
+ return InstImm(op_regimm, s, rt_bltz, BOffImm16(0));
+ case AssemblerMIPSShared::LessThanOrEqual:
+ return InstImm(op_blez, s, zero, BOffImm16(0));
+ default:
+ MOZ_CRASH("Condition not supported.");
+ }
+}
+
+InstImm AssemblerMIPSShared::getBranchCode(FloatTestKind testKind,
+ FPConditionBit fcc) {
+ MOZ_ASSERT(!(fcc && FccMask));
+#ifdef MIPSR6
+ RSField rsField = ((testKind == TestForTrue ? rs_t : rs_f));
+
+ return InstImm(op_cop1, rsField, FloatRegisters::f24 << 16, BOffImm16(0));
+#else
+ uint32_t rtField = ((testKind == TestForTrue ? 1 : 0) | (fcc << FccShift))
+ << RTShift;
+
+ return InstImm(op_cop1, rs_bc1, rtField, BOffImm16(0));
+#endif
+}
+
+BufferOffset AssemblerMIPSShared::as_j(JOffImm26 off) {
+ spew("j 0x%x", off.decode());
+ BufferOffset bo = writeInst(InstJump(op_j, off).encode());
+ return bo;
+}
+BufferOffset AssemblerMIPSShared::as_jal(JOffImm26 off) {
+ spew("jal 0x%x", off.decode());
+ BufferOffset bo = writeInst(InstJump(op_jal, off).encode());
+ return bo;
+}
+
+BufferOffset AssemblerMIPSShared::as_jr(Register rs) {
+ spew("jr %3s", rs.name());
+#ifdef MIPSR6
+ BufferOffset bo =
+ writeInst(InstReg(op_special, rs, zero, zero, ff_jalr).encode());
+#else
+ BufferOffset bo =
+ writeInst(InstReg(op_special, rs, zero, zero, ff_jr).encode());
+#endif
+ return bo;
+}
+BufferOffset AssemblerMIPSShared::as_jalr(Register rs) {
+ spew("jalr %3s", rs.name());
+ BufferOffset bo =
+ writeInst(InstReg(op_special, rs, zero, ra, ff_jalr).encode());
+ return bo;
+}
+
+// Arithmetic instructions
+BufferOffset AssemblerMIPSShared::as_addu(Register rd, Register rs,
+ Register rt) {
+ spew("addu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_addu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_addiu(Register rd, Register rs,
+ int32_t j) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(j));
+ spew("addiu %3s,%3s,0x%x", rd.name(), rs.name(), j);
+ return writeInst(InstImm(op_addiu, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_daddu(Register rd, Register rs,
+ Register rt) {
+ spew("daddu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_daddu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_daddiu(Register rd, Register rs,
+ int32_t j) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(j));
+ spew("daddiu %3s,%3s,0x%x", rd.name(), rs.name(), j);
+ return writeInst(InstImm(op_daddiu, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_subu(Register rd, Register rs,
+ Register rt) {
+ spew("subu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_subu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsubu(Register rd, Register rs,
+ Register rt) {
+ spew("dsubu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_dsubu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_mult(Register rs, Register rt) {
+ spew("mult %3s,%3s", rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, ff_mult).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_multu(Register rs, Register rt) {
+ spew("multu %3s,%3s", rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, ff_multu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dmult(Register rs, Register rt) {
+ spew("dmult %3s,%3s", rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, ff_dmult).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dmultu(Register rs, Register rt) {
+ spew("dmultu %3s,%3s", rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, ff_dmultu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_div(Register rs, Register rt) {
+ spew("div %3s,%3s", rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, ff_div).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_div(Register rd, Register rs,
+ Register rt) {
+ spew("div %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x2, ff_div).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_divu(Register rs, Register rt) {
+ spew("divu %3s,%3s", rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, ff_divu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_divu(Register rd, Register rs,
+ Register rt) {
+ spew("divu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x2, ff_divu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_mod(Register rd, Register rs,
+ Register rt) {
+ spew("mod %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x3, ff_mod).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_modu(Register rd, Register rs,
+ Register rt) {
+ spew("modu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x3, ff_modu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ddiv(Register rs, Register rt) {
+ spew("ddiv %3s,%3s", rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, ff_ddiv).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ddiv(Register rd, Register rs,
+ Register rt) {
+ spew("ddiv %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x2, ff_ddiv).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ddivu(Register rs, Register rt) {
+ spew("ddivu %3s,%3s", rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, ff_ddivu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ddivu(Register rd, Register rs,
+ Register rt) {
+ spew("ddivu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x2, ff_ddivu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_mul(Register rd, Register rs,
+ Register rt) {
+ spew("mul %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+#ifdef MIPSR6
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x2, ff_mul).encode());
+#else
+ return writeInst(InstReg(op_special2, rs, rt, rd, ff_mul).encode());
+#endif
+}
+
+BufferOffset AssemblerMIPSShared::as_muh(Register rd, Register rs,
+ Register rt) {
+ spew("muh %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x3, ff_muh).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_mulu(Register rd, Register rs,
+ Register rt) {
+ spew("mulu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x2, ff_mulu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_muhu(Register rd, Register rs,
+ Register rt) {
+ spew("muhu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x3, ff_muhu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dmul(Register rd, Register rs,
+ Register rt) {
+ spew("dmul %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x2, ff_dmul).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dmuh(Register rd, Register rs,
+ Register rt) {
+ spew("dmuh %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x3, ff_dmuh).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dmulu(Register rd, Register rt,
+ Register rs) {
+ spew("dmulu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x2, ff_dmulu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dmuhu(Register rd, Register rt,
+ Register rs) {
+ spew("dmuhu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x3, ff_dmuhu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dmod(Register rd, Register rs,
+ Register rt) {
+ spew("dmod %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x3, ff_dmod).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dmodu(Register rd, Register rs,
+ Register rt) {
+ spew("dmodu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x3, ff_dmodu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_madd(Register rs, Register rt) {
+ spew("madd %3s,%3s", rs.name(), rt.name());
+ return writeInst(InstReg(op_special2, rs, rt, ff_madd).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_maddu(Register rs, Register rt) {
+ spew("maddu %3s,%3s", rs.name(), rt.name());
+ return writeInst(InstReg(op_special2, rs, rt, ff_maddu).encode());
+}
+
+// Shift instructions
+BufferOffset AssemblerMIPSShared::as_sll(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(sa < 32);
+ spew("sll %3s,%3s, 0x%x", rd.name(), rt.name(), sa);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_sll).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsll(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(sa < 32);
+ spew("dsll %3s,%3s, 0x%x", rd.name(), rt.name(), sa);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_dsll).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsll32(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(31 < sa && sa < 64);
+ spew("dsll32 %3s,%3s, 0x%x", rd.name(), rt.name(), sa - 32);
+ return writeInst(
+ InstReg(op_special, rs_zero, rt, rd, sa - 32, ff_dsll32).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sllv(Register rd, Register rt,
+ Register rs) {
+ spew("sllv %3s,%3s,%3s", rd.name(), rt.name(), rs.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_sllv).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsllv(Register rd, Register rt,
+ Register rs) {
+ spew("dsllv %3s,%3s,%3s", rd.name(), rt.name(), rs.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_dsllv).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_srl(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(sa < 32);
+ spew("srl %3s,%3s, 0x%x", rd.name(), rt.name(), sa);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_srl).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsrl(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(sa < 32);
+ spew("dsrl %3s,%3s, 0x%x", rd.name(), rt.name(), sa);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_dsrl).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsrl32(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(31 < sa && sa < 64);
+ spew("dsrl32 %3s,%3s, 0x%x", rd.name(), rt.name(), sa - 32);
+ return writeInst(
+ InstReg(op_special, rs_zero, rt, rd, sa - 32, ff_dsrl32).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_srlv(Register rd, Register rt,
+ Register rs) {
+ spew("srlv %3s,%3s,%3s", rd.name(), rt.name(), rs.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_srlv).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsrlv(Register rd, Register rt,
+ Register rs) {
+ spew("dsrlv %3s,%3s,%3s", rd.name(), rt.name(), rs.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_dsrlv).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sra(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(sa < 32);
+ spew("sra %3s,%3s, 0x%x", rd.name(), rt.name(), sa);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_sra).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsra(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(sa < 32);
+ spew("dsra %3s,%3s, 0x%x", rd.name(), rt.name(), sa);
+ return writeInst(InstReg(op_special, rs_zero, rt, rd, sa, ff_dsra).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsra32(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(31 < sa && sa < 64);
+ spew("dsra32 %3s,%3s, 0x%x", rd.name(), rt.name(), sa - 32);
+ return writeInst(
+ InstReg(op_special, rs_zero, rt, rd, sa - 32, ff_dsra32).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_srav(Register rd, Register rt,
+ Register rs) {
+ spew("srav %3s,%3s,%3s", rd.name(), rt.name(), rs.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_srav).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsrav(Register rd, Register rt,
+ Register rs) {
+ spew("dsrav %3s,%3s,%3s", rd.name(), rt.name(), rs.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_dsrav).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_rotr(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(sa < 32);
+ spew("rotr %3s,%3s, 0x%x", rd.name(), rt.name(), sa);
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special, rs_one, rt, rd, sa, ff_srl).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_drotr(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(sa < 32);
+ spew("drotr %3s,%3s, 0x%x", rd.name(), rt.name(), sa);
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special, rs_one, rt, rd, sa, ff_dsrl).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_drotr32(Register rd, Register rt,
+ uint16_t sa) {
+ MOZ_ASSERT(31 < sa && sa < 64);
+ spew("drotr32%3s,%3s, 0x%x", rd.name(), rt.name(), sa - 32);
+ MOZ_ASSERT(hasR2());
+ return writeInst(
+ InstReg(op_special, rs_one, rt, rd, sa - 32, ff_dsrl32).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_rotrv(Register rd, Register rt,
+ Register rs) {
+ spew("rotrv %3s,%3s,%3s", rd.name(), rt.name(), rs.name());
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special, rs, rt, rd, 1, ff_srlv).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_drotrv(Register rd, Register rt,
+ Register rs) {
+ spew("drotrv %3s,%3s,%3s", rd.name(), rt.name(), rs.name());
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special, rs, rt, rd, 1, ff_dsrlv).encode());
+}
+
+// Load and store instructions
+BufferOffset AssemblerMIPSShared::as_lb(Register rd, Register rs, int16_t off) {
+ spew("lb %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_lb, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_lbu(Register rd, Register rs,
+ int16_t off) {
+ spew("lbu %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_lbu, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_lh(Register rd, Register rs, int16_t off) {
+ spew("lh %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_lh, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_lhu(Register rd, Register rs,
+ int16_t off) {
+ spew("lhu %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_lhu, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_lw(Register rd, Register rs, int16_t off) {
+ spew("lw %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_lw, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_lwu(Register rd, Register rs,
+ int16_t off) {
+ spew("lwu %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_lwu, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_lwl(Register rd, Register rs,
+ int16_t off) {
+ spew("lwl %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_lwl, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_lwr(Register rd, Register rs,
+ int16_t off) {
+ spew("lwr %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_lwr, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ll(Register rd, Register rs, int16_t off) {
+ spew("ll %3s, (0x%x)%2s", rd.name(), off, rs.name());
+#ifdef MIPSR6
+ return writeInst(InstReg(op_special3, rs, rd, ff_ll).encode());
+#else
+ return writeInst(InstImm(op_ll, rs, rd, Imm16(off)).encode());
+#endif
+}
+
+BufferOffset AssemblerMIPSShared::as_lld(Register rd, Register rs,
+ int16_t off) {
+ spew("lld %3s, (0x%x)%2s", rd.name(), off, rs.name());
+#ifdef MIPSR6
+ return writeInst(InstReg(op_special3, rs, rd, ff_lld).encode());
+#else
+ return writeInst(InstImm(op_lld, rs, rd, Imm16(off)).encode());
+#endif
+}
+
+BufferOffset AssemblerMIPSShared::as_ld(Register rd, Register rs, int16_t off) {
+ spew("ld %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_ld, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ldl(Register rd, Register rs,
+ int16_t off) {
+ spew("ldl %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_ldl, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ldr(Register rd, Register rs,
+ int16_t off) {
+ spew("ldr %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_ldr, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sb(Register rd, Register rs, int16_t off) {
+ spew("sb %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_sb, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sh(Register rd, Register rs, int16_t off) {
+ spew("sh %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_sh, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sw(Register rd, Register rs, int16_t off) {
+ spew("sw %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_sw, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_swl(Register rd, Register rs,
+ int16_t off) {
+ spew("swl %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_swl, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_swr(Register rd, Register rs,
+ int16_t off) {
+ spew("swr %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_swr, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sc(Register rd, Register rs, int16_t off) {
+ spew("sc %3s, (0x%x)%2s", rd.name(), off, rs.name());
+#ifdef MIPSR6
+ return writeInst(InstReg(op_special3, rs, rd, ff_sc).encode());
+#else
+ return writeInst(InstImm(op_sc, rs, rd, Imm16(off)).encode());
+#endif
+}
+
+BufferOffset AssemblerMIPSShared::as_scd(Register rd, Register rs,
+ int16_t off) {
+#ifdef MIPSR6
+ return writeInst(InstReg(op_special3, rs, rd, ff_scd).encode());
+#else
+ spew("scd %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_scd, rs, rd, Imm16(off)).encode());
+#endif
+}
+
+BufferOffset AssemblerMIPSShared::as_sd(Register rd, Register rs, int16_t off) {
+ spew("sd %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_sd, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sdl(Register rd, Register rs,
+ int16_t off) {
+ spew("sdl %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_sdl, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sdr(Register rd, Register rs,
+ int16_t off) {
+ spew("sdr %3s, (0x%x)%2s", rd.name(), off, rs.name());
+ return writeInst(InstImm(op_sdr, rs, rd, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_seleqz(Register rd, Register rs,
+ Register rt) {
+ spew("seleqz %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x0, ff_seleqz).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_selnez(Register rd, Register rs,
+ Register rt) {
+ spew("selnez %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, 0x0, ff_selnez).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gslbx(Register rd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gslbx %3s,%3s, (0x%x)%2s", rd.name(), rs.name(), off, ri.name());
+ return writeInst(InstGS(op_ldc2, rs, rd, ri, Imm8(off), ff_gsxbx).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gssbx(Register rd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gssbx %3s,%3s, (0x%x)%2s", rd.name(), rs.name(), off, ri.name());
+ return writeInst(InstGS(op_sdc2, rs, rd, ri, Imm8(off), ff_gsxbx).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gslhx(Register rd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gslhx %3s,%3s, (0x%x)%2s", rd.name(), rs.name(), off, ri.name());
+ return writeInst(InstGS(op_ldc2, rs, rd, ri, Imm8(off), ff_gsxhx).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gsshx(Register rd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gsshx %3s,%3s, (0x%x)%2s", rd.name(), rs.name(), off, ri.name());
+ return writeInst(InstGS(op_sdc2, rs, rd, ri, Imm8(off), ff_gsxhx).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gslwx(Register rd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gslwx %3s,%3s, (0x%x)%2s", rd.name(), rs.name(), off, ri.name());
+ return writeInst(InstGS(op_ldc2, rs, rd, ri, Imm8(off), ff_gsxwx).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gsswx(Register rd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gsswx %3s,%3s, (0x%x)%2s", rd.name(), rs.name(), off, ri.name());
+ return writeInst(InstGS(op_sdc2, rs, rd, ri, Imm8(off), ff_gsxwx).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gsldx(Register rd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gsldx %3s,%3s, (0x%x)%2s", rd.name(), rs.name(), off, ri.name());
+ return writeInst(InstGS(op_ldc2, rs, rd, ri, Imm8(off), ff_gsxdx).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gssdx(Register rd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gssdx %3s,%3s, (0x%x)%2s", rd.name(), rs.name(), off, ri.name());
+ return writeInst(InstGS(op_sdc2, rs, rd, ri, Imm8(off), ff_gsxdx).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gslq(Register rh, Register rl, Register rs,
+ int16_t off) {
+ MOZ_ASSERT(GSImm13::IsInRange(off));
+ spew("gslq %3s,%3s, (0x%x)%2s", rh.name(), rl.name(), off, rs.name());
+ return writeInst(InstGS(op_lwc2, rs, rl, rh, GSImm13(off), ff_gsxq).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gssq(Register rh, Register rl, Register rs,
+ int16_t off) {
+ MOZ_ASSERT(GSImm13::IsInRange(off));
+ spew("gssq %3s,%3s, (0x%x)%2s", rh.name(), rl.name(), off, rs.name());
+ return writeInst(InstGS(op_swc2, rs, rl, rh, GSImm13(off), ff_gsxq).encode());
+}
+
+// Move from HI/LO register.
+BufferOffset AssemblerMIPSShared::as_mfhi(Register rd) {
+ spew("mfhi %3s", rd.name());
+ return writeInst(InstReg(op_special, rd, ff_mfhi).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_mflo(Register rd) {
+ spew("mflo %3s", rd.name());
+ return writeInst(InstReg(op_special, rd, ff_mflo).encode());
+}
+
+// Set on less than.
+BufferOffset AssemblerMIPSShared::as_slt(Register rd, Register rs,
+ Register rt) {
+ spew("slt %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_slt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sltu(Register rd, Register rs,
+ Register rt) {
+ spew("sltu %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_sltu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_slti(Register rd, Register rs, int32_t j) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(j));
+ spew("slti %3s,%3s, 0x%x", rd.name(), rs.name(), j);
+ return writeInst(InstImm(op_slti, rs, rd, Imm16(j)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sltiu(Register rd, Register rs,
+ uint32_t j) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(int32_t(j)));
+ spew("sltiu %3s,%3s, 0x%x", rd.name(), rs.name(), j);
+ return writeInst(InstImm(op_sltiu, rs, rd, Imm16(j)).encode());
+}
+
+// Conditional move.
+BufferOffset AssemblerMIPSShared::as_movz(Register rd, Register rs,
+ Register rt) {
+ spew("movz %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_movz).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_movn(Register rd, Register rs,
+ Register rt) {
+ spew("movn %3s,%3s,%3s", rd.name(), rs.name(), rt.name());
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_movn).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_movt(Register rd, Register rs,
+ uint16_t cc) {
+ Register rt;
+ rt = Register::FromCode((cc & 0x7) << 2 | 1);
+ spew("movt %3s,%3s, FCC%d", rd.name(), rs.name(), cc);
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_movci).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_movf(Register rd, Register rs,
+ uint16_t cc) {
+ Register rt;
+ rt = Register::FromCode((cc & 0x7) << 2 | 0);
+ spew("movf %3s,%3s, FCC%d", rd.name(), rs.name(), cc);
+ return writeInst(InstReg(op_special, rs, rt, rd, ff_movci).encode());
+}
+
+// Bit twiddling.
+BufferOffset AssemblerMIPSShared::as_clz(Register rd, Register rs) {
+ spew("clz %3s,%3s", rd.name(), rs.name());
+#ifdef MIPSR6
+ return writeInst(InstReg(op_special, rs, 0x0, rd, 0x1, ff_clz).encode());
+#else
+ return writeInst(InstReg(op_special2, rs, rd, rd, ff_clz).encode());
+#endif
+}
+
+BufferOffset AssemblerMIPSShared::as_dclz(Register rd, Register rs) {
+ spew("dclz %3s,%3s", rd.name(), rs.name());
+#ifdef MIPSR6
+ return writeInst(InstReg(op_special, rs, 0x0, rd, 0x1, ff_dclz).encode());
+#else
+ return writeInst(InstReg(op_special2, rs, rd, rd, ff_dclz).encode());
+#endif
+}
+
+BufferOffset AssemblerMIPSShared::as_wsbh(Register rd, Register rt) {
+ spew("wsbh %3s,%3s", rd.name(), rt.name());
+ return writeInst(InstReg(op_special3, zero, rt, rd, 0x2, ff_bshfl).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dsbh(Register rd, Register rt) {
+ spew("dsbh %3s,%3s", rd.name(), rt.name());
+ return writeInst(InstReg(op_special3, zero, rt, rd, 0x2, ff_dbshfl).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dshd(Register rd, Register rt) {
+ spew("dshd %3s,%3s", rd.name(), rt.name());
+ return writeInst(InstReg(op_special3, zero, rt, rd, 0x5, ff_dbshfl).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ins(Register rt, Register rs, uint16_t pos,
+ uint16_t size) {
+ MOZ_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 &&
+ pos + size <= 32);
+ Register rd;
+ rd = Register::FromCode(pos + size - 1);
+ spew("ins %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_ins).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dins(Register rt, Register rs,
+ uint16_t pos, uint16_t size) {
+ MOZ_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 &&
+ pos + size <= 32);
+ Register rd;
+ rd = Register::FromCode(pos + size - 1);
+ spew("dins %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dins).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dinsm(Register rt, Register rs,
+ uint16_t pos, uint16_t size) {
+ MOZ_ASSERT(pos < 32 && size >= 2 && size <= 64 && pos + size > 32 &&
+ pos + size <= 64);
+ Register rd;
+ rd = Register::FromCode(pos + size - 1 - 32);
+ spew("dinsm %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dinsm).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dinsu(Register rt, Register rs,
+ uint16_t pos, uint16_t size) {
+ MOZ_ASSERT(pos >= 32 && pos < 64 && size >= 1 && size <= 32 &&
+ pos + size > 32 && pos + size <= 64);
+ Register rd;
+ rd = Register::FromCode(pos + size - 1 - 32);
+ spew("dinsu %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
+ MOZ_ASSERT(hasR2());
+ return writeInst(
+ InstReg(op_special3, rs, rt, rd, pos - 32, ff_dinsu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ext(Register rt, Register rs, uint16_t pos,
+ uint16_t size) {
+ MOZ_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 &&
+ pos + size <= 32);
+ Register rd;
+ rd = Register::FromCode(size - 1);
+ spew("ext %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_ext).encode());
+}
+
+// Sign extend
+BufferOffset AssemblerMIPSShared::as_seb(Register rd, Register rt) {
+ spew("seb %3s,%3s", rd.name(), rt.name());
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special3, zero, rt, rd, 16, ff_bshfl).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_seh(Register rd, Register rt) {
+ spew("seh %3s,%3s", rd.name(), rt.name());
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special3, zero, rt, rd, 24, ff_bshfl).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dext(Register rt, Register rs,
+ uint16_t pos, uint16_t size) {
+ MOZ_ASSERT(pos < 32 && size != 0 && size <= 32 && pos + size != 0 &&
+ pos + size <= 63);
+ Register rd;
+ rd = Register::FromCode(size - 1);
+ spew("dext %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dext).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dextm(Register rt, Register rs,
+ uint16_t pos, uint16_t size) {
+ MOZ_ASSERT(pos < 32 && size > 32 && size <= 64 && pos + size > 32 &&
+ pos + size <= 64);
+ Register rd;
+ rd = Register::FromCode(size - 1 - 32);
+ spew("dextm %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_special3, rs, rt, rd, pos, ff_dextm).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dextu(Register rt, Register rs,
+ uint16_t pos, uint16_t size) {
+ MOZ_ASSERT(pos >= 32 && pos < 64 && size != 0 && size <= 32 &&
+ pos + size > 32 && pos + size <= 64);
+ Register rd;
+ rd = Register::FromCode(size - 1);
+ spew("dextu %3s,%3s, %d, %d", rt.name(), rs.name(), pos, size);
+ MOZ_ASSERT(hasR2());
+ return writeInst(
+ InstReg(op_special3, rs, rt, rd, pos - 32, ff_dextu).encode());
+}
+
+// FP instructions
+BufferOffset AssemblerMIPSShared::as_ldc1(FloatRegister ft, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(off));
+ spew("ldc1 %3s, (0x%x)%2s", ft.name(), off, base.name());
+ return writeInst(InstImm(op_ldc1, base, ft, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sdc1(FloatRegister ft, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(off));
+ spew("sdc1 %3s, (0x%x)%2s", ft.name(), off, base.name());
+ return writeInst(InstImm(op_sdc1, base, ft, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_lwc1(FloatRegister ft, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(off));
+ spew("lwc1 %3s, (0x%x)%2s", ft.name(), off, base.name());
+ return writeInst(InstImm(op_lwc1, base, ft, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_swc1(FloatRegister ft, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(off));
+ spew("swc1 %3s, (0x%x)%2s", ft.name(), off, base.name());
+ return writeInst(InstImm(op_swc1, base, ft, Imm16(off)).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gsldl(FloatRegister fd, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gsldl %3s, (0x%x)%2s", fd.name(), off, base.name());
+ return writeInst(InstGS(op_lwc2, base, fd, Imm8(off), ff_gsxdlc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gsldr(FloatRegister fd, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gsldr %3s, (0x%x)%2s", fd.name(), off, base.name());
+ return writeInst(InstGS(op_lwc2, base, fd, Imm8(off), ff_gsxdrc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gssdl(FloatRegister fd, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gssdl %3s, (0x%x)%2s", fd.name(), off, base.name());
+ return writeInst(InstGS(op_swc2, base, fd, Imm8(off), ff_gsxdlc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gssdr(FloatRegister fd, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gssdr %3s, (0x%x)%2s", fd.name(), off, base.name());
+ return writeInst(InstGS(op_swc2, base, fd, Imm8(off), ff_gsxdrc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gslsl(FloatRegister fd, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gslsl %3s, (0x%x)%2s", fd.name(), off, base.name());
+ return writeInst(InstGS(op_lwc2, base, fd, Imm8(off), ff_gsxwlc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gslsr(FloatRegister fd, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gslsr %3s, (0x%x)%2s", fd.name(), off, base.name());
+ return writeInst(InstGS(op_lwc2, base, fd, Imm8(off), ff_gsxwrc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gsssl(FloatRegister fd, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gsssl %3s, (0x%x)%2s", fd.name(), off, base.name());
+ return writeInst(InstGS(op_swc2, base, fd, Imm8(off), ff_gsxwlc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gsssr(FloatRegister fd, Register base,
+ int32_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gsssr %3s, (0x%x)%2s", fd.name(), off, base.name());
+ return writeInst(InstGS(op_swc2, base, fd, Imm8(off), ff_gsxwrc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gslsx(FloatRegister fd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gslsx %3s, (%3s,%3s, 0x%x)", fd.name(), rs.name(), ri.name(), off);
+ return writeInst(InstGS(op_ldc2, rs, fd, ri, Imm8(off), ff_gsxwxc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gsssx(FloatRegister fd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gsssx %3s, (%3s,%3s, 0x%x)", fd.name(), rs.name(), ri.name(), off);
+ return writeInst(InstGS(op_sdc2, rs, fd, ri, Imm8(off), ff_gsxwxc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gsldx(FloatRegister fd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gsldx %3s, (%3s,%3s, 0x%x)", fd.name(), rs.name(), ri.name(), off);
+ return writeInst(InstGS(op_ldc2, rs, fd, ri, Imm8(off), ff_gsxdxc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gssdx(FloatRegister fd, Register rs,
+ Register ri, int16_t off) {
+ MOZ_ASSERT(Imm8::IsInSignedRange(off));
+ spew("gssdx %3s, (%3s,%3s, 0x%x)", fd.name(), rs.name(), ri.name(), off);
+ return writeInst(InstGS(op_sdc2, rs, fd, ri, Imm8(off), ff_gsxdxc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gslq(FloatRegister rh, FloatRegister rl,
+ Register rs, int16_t off) {
+ MOZ_ASSERT(GSImm13::IsInRange(off));
+ spew("gslq %3s,%3s, (0x%x)%2s", rh.name(), rl.name(), off, rs.name());
+ return writeInst(
+ InstGS(op_lwc2, rs, rl, rh, GSImm13(off), ff_gsxqc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_gssq(FloatRegister rh, FloatRegister rl,
+ Register rs, int16_t off) {
+ MOZ_ASSERT(GSImm13::IsInRange(off));
+ spew("gssq %3s,%3s, (0x%x)%2s", rh.name(), rl.name(), off, rs.name());
+ return writeInst(
+ InstGS(op_swc2, rs, rl, rh, GSImm13(off), ff_gsxqc1).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_movs(FloatRegister fd, FloatRegister fs) {
+ spew("mov.s %3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_mov_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_movd(FloatRegister fd, FloatRegister fs) {
+ spew("mov.d %3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_mov_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ctc1(Register rt, FPControl fc) {
+ spew("ctc1 %3s,%d", rt.name(), fc);
+ return writeInst(InstReg(op_cop1, rs_ctc1, rt, (uint32_t)fc).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_cfc1(Register rt, FPControl fc) {
+ spew("cfc1 %3s,%d", rt.name(), fc);
+ return writeInst(InstReg(op_cop1, rs_cfc1, rt, (uint32_t)fc).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_mtc1(Register rt, FloatRegister fs) {
+ spew("mtc1 %3s,%3s", rt.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_mtc1, rt, fs).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_mfc1(Register rt, FloatRegister fs) {
+ spew("mfc1 %3s,%3s", rt.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_mfc1, rt, fs).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_mthc1(Register rt, FloatRegister fs) {
+ spew("mthc1 %3s,%3s", rt.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_mthc1, rt, fs).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_mfhc1(Register rt, FloatRegister fs) {
+ spew("mfhc1 %3s,%3s", rt.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_mfhc1, rt, fs).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dmtc1(Register rt, FloatRegister fs) {
+ spew("dmtc1 %3s,%3s", rt.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_dmtc1, rt, fs).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_dmfc1(Register rt, FloatRegister fs) {
+ spew("dmfc1 %3s,%3s", rt.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_dmfc1, rt, fs).encode());
+}
+
+// FP convert instructions
+BufferOffset AssemblerMIPSShared::as_ceilws(FloatRegister fd,
+ FloatRegister fs) {
+ spew("ceil.w.s%3s,%3s", fd.name(), fs.name());
+ return writeInst(
+ InstReg(op_cop1, rs_s, zero, fs, fd, ff_ceil_w_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_floorws(FloatRegister fd,
+ FloatRegister fs) {
+ spew("floor.w.s%3s,%3s", fd.name(), fs.name());
+ return writeInst(
+ InstReg(op_cop1, rs_s, zero, fs, fd, ff_floor_w_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_roundws(FloatRegister fd,
+ FloatRegister fs) {
+ spew("round.w.s%3s,%3s", fd.name(), fs.name());
+ return writeInst(
+ InstReg(op_cop1, rs_s, zero, fs, fd, ff_round_w_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_truncws(FloatRegister fd,
+ FloatRegister fs) {
+ spew("trunc.w.s%3s,%3s", fd.name(), fs.name());
+ return writeInst(
+ InstReg(op_cop1, rs_s, zero, fs, fd, ff_trunc_w_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_truncls(FloatRegister fd,
+ FloatRegister fs) {
+ spew("trunc.l.s%3s,%3s", fd.name(), fs.name());
+ MOZ_ASSERT(hasR2());
+ return writeInst(
+ InstReg(op_cop1, rs_s, zero, fs, fd, ff_trunc_l_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_ceilwd(FloatRegister fd,
+ FloatRegister fs) {
+ spew("ceil.w.d%3s,%3s", fd.name(), fs.name());
+ return writeInst(
+ InstReg(op_cop1, rs_d, zero, fs, fd, ff_ceil_w_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_floorwd(FloatRegister fd,
+ FloatRegister fs) {
+ spew("floor.w.d%3s,%3s", fd.name(), fs.name());
+ return writeInst(
+ InstReg(op_cop1, rs_d, zero, fs, fd, ff_floor_w_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_roundwd(FloatRegister fd,
+ FloatRegister fs) {
+ spew("round.w.d%3s,%3s", fd.name(), fs.name());
+ return writeInst(
+ InstReg(op_cop1, rs_d, zero, fs, fd, ff_round_w_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_truncwd(FloatRegister fd,
+ FloatRegister fs) {
+ spew("trunc.w.d%3s,%3s", fd.name(), fs.name());
+ return writeInst(
+ InstReg(op_cop1, rs_d, zero, fs, fd, ff_trunc_w_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_truncld(FloatRegister fd,
+ FloatRegister fs) {
+ spew("trunc.l.d%3s,%3s", fd.name(), fs.name());
+ MOZ_ASSERT(hasR2());
+ return writeInst(
+ InstReg(op_cop1, rs_d, zero, fs, fd, ff_trunc_l_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_cvtdl(FloatRegister fd, FloatRegister fs) {
+ spew("cvt.d.l%3s,%3s", fd.name(), fs.name());
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_cop1, rs_l, zero, fs, fd, ff_cvt_d_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_cvtds(FloatRegister fd, FloatRegister fs) {
+ spew("cvt.d.s%3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_cvt_d_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_cvtdw(FloatRegister fd, FloatRegister fs) {
+ spew("cvt.d.w%3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_w, zero, fs, fd, ff_cvt_d_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_cvtsd(FloatRegister fd, FloatRegister fs) {
+ spew("cvt.s.d%3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_cvt_s_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_cvtsl(FloatRegister fd, FloatRegister fs) {
+ spew("cvt.s.l%3s,%3s", fd.name(), fs.name());
+ MOZ_ASSERT(hasR2());
+ return writeInst(InstReg(op_cop1, rs_l, zero, fs, fd, ff_cvt_s_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_cvtsw(FloatRegister fd, FloatRegister fs) {
+ spew("cvt.s.w%3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_w, zero, fs, fd, ff_cvt_s_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_cvtwd(FloatRegister fd, FloatRegister fs) {
+ spew("cvt.w.d%3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_cvt_w_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_cvtws(FloatRegister fd, FloatRegister fs) {
+ spew("cvt.w.s%3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_cvt_w_fmt).encode());
+}
+
+// FP arithmetic instructions
+BufferOffset AssemblerMIPSShared::as_adds(FloatRegister fd, FloatRegister fs,
+ FloatRegister ft) {
+ spew("add.s %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_add_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_addd(FloatRegister fd, FloatRegister fs,
+ FloatRegister ft) {
+ spew("add.d %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_add_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_subs(FloatRegister fd, FloatRegister fs,
+ FloatRegister ft) {
+ spew("sub.s %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_sub_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_subd(FloatRegister fd, FloatRegister fs,
+ FloatRegister ft) {
+ spew("sub.d %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_sub_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_abss(FloatRegister fd, FloatRegister fs) {
+ spew("abs.s %3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_abs_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_absd(FloatRegister fd, FloatRegister fs) {
+ spew("abs.d %3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_abs_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_negs(FloatRegister fd, FloatRegister fs) {
+ spew("neg.s %3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_neg_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_negd(FloatRegister fd, FloatRegister fs) {
+ spew("neg.d %3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_neg_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_muls(FloatRegister fd, FloatRegister fs,
+ FloatRegister ft) {
+ spew("mul.s %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_mul_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_muld(FloatRegister fd, FloatRegister fs,
+ FloatRegister ft) {
+ spew("mul.d %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_mul_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_divs(FloatRegister fd, FloatRegister fs,
+ FloatRegister ft) {
+ spew("div.s %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_div_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_divd(FloatRegister fd, FloatRegister fs,
+ FloatRegister ft) {
+ spew("divd.d %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_div_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sqrts(FloatRegister fd, FloatRegister fs) {
+ spew("sqrts %3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_sqrt_fmt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_sqrtd(FloatRegister fd, FloatRegister fs) {
+ spew("sqrtd %3s,%3s", fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_sqrt_fmt).encode());
+}
+
+// FP compare instructions
+BufferOffset AssemblerMIPSShared::as_cf(FloatFormat fmt, FloatRegister fs,
+ FloatRegister ft, FPConditionBit fcc) {
+ if (fmt == DoubleFloat) {
+ spew("c.f.d FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_d_r6, ft, fs, FloatRegisters::f24, ff_c_f_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_f_fmt).encode());
+#endif
+ } else {
+ spew("c.f.s FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_s_r6, ft, fs, FloatRegisters::f24, ff_c_f_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_f_fmt).encode());
+#endif
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_cun(FloatFormat fmt, FloatRegister fs,
+ FloatRegister ft, FPConditionBit fcc) {
+ if (fmt == DoubleFloat) {
+ spew("c.un.d FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_d_r6, ft, fs, FloatRegisters::f24, ff_c_un_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_un_fmt).encode());
+#endif
+ } else {
+ spew("c.un.s FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_s_r6, ft, fs, FloatRegisters::f24, ff_c_un_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_un_fmt).encode());
+#endif
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_ceq(FloatFormat fmt, FloatRegister fs,
+ FloatRegister ft, FPConditionBit fcc) {
+ if (fmt == DoubleFloat) {
+ spew("c.eq.d FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_d_r6, ft, fs, FloatRegisters::f24, ff_c_eq_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_eq_fmt).encode());
+#endif
+ } else {
+ spew("c.eq.s FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_s_r6, ft, fs, FloatRegisters::f24, ff_c_eq_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_eq_fmt).encode());
+#endif
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_cueq(FloatFormat fmt, FloatRegister fs,
+ FloatRegister ft,
+ FPConditionBit fcc) {
+ if (fmt == DoubleFloat) {
+ spew("c.ueq.d FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_d_r6, ft, fs, FloatRegisters::f24, ff_c_ueq_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_ueq_fmt).encode());
+#endif
+ } else {
+ spew("c.ueq.s FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_s_r6, ft, fs, FloatRegisters::f24, ff_c_ueq_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_ueq_fmt).encode());
+#endif
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_colt(FloatFormat fmt, FloatRegister fs,
+ FloatRegister ft,
+ FPConditionBit fcc) {
+ if (fmt == DoubleFloat) {
+ spew("c.olt.d FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_d_r6, ft, fs, FloatRegisters::f24, ff_c_olt_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_olt_fmt).encode());
+#endif
+ } else {
+ spew("c.olt.s FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_s_r6, ft, fs, FloatRegisters::f24, ff_c_olt_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_olt_fmt).encode());
+#endif
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_cult(FloatFormat fmt, FloatRegister fs,
+ FloatRegister ft,
+ FPConditionBit fcc) {
+ if (fmt == DoubleFloat) {
+ spew("c.ult.d FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_d_r6, ft, fs, FloatRegisters::f24, ff_c_ult_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_ult_fmt).encode());
+#endif
+ } else {
+ spew("c.ult.s FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_s_r6, ft, fs, FloatRegisters::f24, ff_c_ult_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_ult_fmt).encode());
+#endif
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_cole(FloatFormat fmt, FloatRegister fs,
+ FloatRegister ft,
+ FPConditionBit fcc) {
+ if (fmt == DoubleFloat) {
+ spew("c.ole.d FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_d_r6, ft, fs, FloatRegisters::f24, ff_c_ole_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_ole_fmt).encode());
+#endif
+ } else {
+ spew("c.ole.s FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_s_r6, ft, fs, FloatRegisters::f24, ff_c_ole_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_ole_fmt).encode());
+#endif
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_cule(FloatFormat fmt, FloatRegister fs,
+ FloatRegister ft,
+ FPConditionBit fcc) {
+ if (fmt == DoubleFloat) {
+ spew("c.ule.d FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_d_r6, ft, fs, FloatRegisters::f24, ff_c_ule_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_d, ft, fs, fcc << FccShift, ff_c_ule_fmt).encode());
+#endif
+ } else {
+ spew("c.ule.s FCC%d,%3s,%3s", fcc, fs.name(), ft.name());
+#ifdef MIPSR6
+ return writeInst(
+ InstReg(op_cop1, rs_s_r6, ft, fs, FloatRegisters::f24, ff_c_ule_fmt)
+ .encode());
+#else
+ return writeInst(
+ InstReg(op_cop1, rs_s, ft, fs, fcc << FccShift, ff_c_ule_fmt).encode());
+#endif
+ }
+}
+
+// FP conditional move.
+BufferOffset AssemblerMIPSShared::as_movt(FloatFormat fmt, FloatRegister fd,
+ FloatRegister fs,
+ FPConditionBit fcc) {
+ Register rt = Register::FromCode(fcc << 2 | 1);
+ if (fmt == DoubleFloat) {
+ spew("movt.d FCC%d,%3s,%3s", fcc, fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_d, rt, fs, fd, ff_movf_fmt).encode());
+ } else {
+ spew("movt.s FCC%d,%3s,%3s", fcc, fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_s, rt, fs, fd, ff_movf_fmt).encode());
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_movf(FloatFormat fmt, FloatRegister fd,
+ FloatRegister fs,
+ FPConditionBit fcc) {
+ Register rt = Register::FromCode(fcc << 2 | 0);
+ if (fmt == DoubleFloat) {
+ spew("movf.d FCC%d,%3s,%3s", fcc, fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_d, rt, fs, fd, ff_movf_fmt).encode());
+ } else {
+ spew("movf.s FCC%d,%3s,%3s", fcc, fd.name(), fs.name());
+ return writeInst(InstReg(op_cop1, rs_s, rt, fs, fd, ff_movf_fmt).encode());
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_movz(FloatFormat fmt, FloatRegister fd,
+ FloatRegister fs, Register rt) {
+ if (fmt == DoubleFloat) {
+ spew("movz.d %3s,%3s,%3s", fd.name(), fs.name(), rt.name());
+ return writeInst(InstReg(op_cop1, rs_d, rt, fs, fd, ff_movz_fmt).encode());
+ } else {
+ spew("movz.s %3s,%3s,%3s", fd.name(), fs.name(), rt.name());
+ return writeInst(InstReg(op_cop1, rs_s, rt, fs, fd, ff_movz_fmt).encode());
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_movn(FloatFormat fmt, FloatRegister fd,
+ FloatRegister fs, Register rt) {
+ if (fmt == DoubleFloat) {
+ spew("movn.d %3s,%3s,%3s", fd.name(), fs.name(), rt.name());
+ return writeInst(InstReg(op_cop1, rs_d, rt, fs, fd, ff_movn_fmt).encode());
+ } else {
+ spew("movn.s %3s,%3s,%3s", fd.name(), fs.name(), rt.name());
+ return writeInst(InstReg(op_cop1, rs_s, rt, fs, fd, ff_movn_fmt).encode());
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_max(FloatFormat fmt, FloatRegister fd,
+ FloatRegister fs, FloatRegister ft) {
+ if (fmt == DoubleFloat) {
+ spew("max %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_max).encode());
+ } else {
+ spew("max %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_max).encode());
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_min(FloatFormat fmt, FloatRegister fd,
+ FloatRegister fs, FloatRegister ft) {
+ if (fmt == DoubleFloat) {
+ spew("min %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_d, ft, fs, fd, ff_min).encode());
+ } else {
+ spew("min %3s,%3s,%3s", fd.name(), fs.name(), ft.name());
+ return writeInst(InstReg(op_cop1, rs_s, ft, fs, fd, ff_min).encode());
+ }
+}
+
+BufferOffset AssemblerMIPSShared::as_tge(Register rs, Register rt,
+ uint32_t code) {
+ MOZ_ASSERT(code <= MAX_BREAK_CODE);
+ spew("tge %3s,%3s,%d", rs.name(), rt.name(), code);
+ return writeInst(InstReg(op_special, rs, rt, zero, code, ff_tge).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_tgeu(Register rs, Register rt,
+ uint32_t code) {
+ MOZ_ASSERT(code <= MAX_BREAK_CODE);
+ spew("tgeu %3s,%3s,%d", rs.name(), rt.name(), code);
+ return writeInst(InstReg(op_special, rs, rt, zero, code, ff_tgeu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_tlt(Register rs, Register rt,
+ uint32_t code) {
+ MOZ_ASSERT(code <= MAX_BREAK_CODE);
+ spew("tlt %3s,%3s,%d", rs.name(), rt.name(), code);
+ return writeInst(InstReg(op_special, rs, rt, zero, code, ff_tlt).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_tltu(Register rs, Register rt,
+ uint32_t code) {
+ MOZ_ASSERT(code <= MAX_BREAK_CODE);
+ spew("tltu %3s,%3s,%d", rs.name(), rt.name(), code);
+ return writeInst(InstReg(op_special, rs, rt, zero, code, ff_tltu).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_teq(Register rs, Register rt,
+ uint32_t code) {
+ MOZ_ASSERT(code <= MAX_BREAK_CODE);
+ spew("teq %3s,%3s,%d", rs.name(), rt.name(), code);
+ return writeInst(InstReg(op_special, rs, rt, zero, code, ff_teq).encode());
+}
+
+BufferOffset AssemblerMIPSShared::as_tne(Register rs, Register rt,
+ uint32_t code) {
+ MOZ_ASSERT(code <= MAX_BREAK_CODE);
+ spew("tne %3s,%3s,%d", rs.name(), rt.name(), code);
+ return writeInst(InstReg(op_special, rs, rt, zero, code, ff_tne).encode());
+}
+
+void AssemblerMIPSShared::bind(Label* label, BufferOffset boff) {
+ spew(".set Llabel %p", label);
+ // If our caller didn't give us an explicit target to bind to
+ // then we want to bind to the location of the next instruction
+ BufferOffset dest = boff.assigned() ? boff : nextOffset();
+ if (label->used()) {
+ int32_t next;
+
+ // A used label holds a link to branch that uses it.
+ BufferOffset b(label);
+ do {
+ // Even a 0 offset may be invalid if we're out of memory.
+ if (oom()) {
+ return;
+ }
+
+ Instruction* inst = editSrc(b);
+
+ // Second word holds a pointer to the next branch in label's chain.
+ next = inst[1].encode();
+ bind(reinterpret_cast<InstImm*>(inst), b.getOffset(), dest.getOffset());
+
+ b = BufferOffset(next);
+ } while (next != LabelBase::INVALID_OFFSET);
+ }
+ label->bind(dest.getOffset());
+}
+
+void AssemblerMIPSShared::retarget(Label* label, Label* target) {
+ spew("retarget %p -> %p", label, target);
+ if (label->used() && !oom()) {
+ if (target->bound()) {
+ bind(label, BufferOffset(target));
+ } else if (target->used()) {
+ // The target is not bound but used. Prepend label's branch list
+ // onto target's.
+ int32_t next;
+ BufferOffset labelBranchOffset(label);
+
+ // Find the head of the use chain for label.
+ do {
+ Instruction* inst = editSrc(labelBranchOffset);
+
+ // Second word holds a pointer to the next branch in chain.
+ next = inst[1].encode();
+ labelBranchOffset = BufferOffset(next);
+ } while (next != LabelBase::INVALID_OFFSET);
+
+ // Then patch the head of label's use chain to the tail of
+ // target's use chain, prepending the entire use chain of target.
+ Instruction* inst = editSrc(labelBranchOffset);
+ int32_t prev = target->offset();
+ target->use(label->offset());
+ inst[1].setData(prev);
+ } else {
+ // The target is unbound and unused. We can just take the head of
+ // the list hanging off of label, and dump that into target.
+ target->use(label->offset());
+ }
+ }
+ label->reset();
+}
+
+void dbg_break() {}
+void AssemblerMIPSShared::as_break(uint32_t code) {
+ MOZ_ASSERT(code <= MAX_BREAK_CODE);
+ spew("break %d", code);
+ writeInst(op_special | code << FunctionBits | ff_break);
+}
+
+void AssemblerMIPSShared::as_sync(uint32_t stype) {
+ MOZ_ASSERT(stype <= 31);
+ spew("sync %d", stype);
+ writeInst(InstReg(op_special, zero, zero, zero, stype, ff_sync).encode());
+}
+
+// This just stomps over memory with 32 bits of raw data. Its purpose is to
+// overwrite the call of JITed code with 32 bits worth of an offset. This will
+// is only meant to function on code that has been invalidated, so it should
+// be totally safe. Since that instruction will never be executed again, a
+// ICache flush should not be necessary
+void AssemblerMIPSShared::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
+ // Raw is going to be the return address.
+ uint32_t* raw = (uint32_t*)label.raw();
+ // Overwrite the 4 bytes before the return address, which will
+ // end up being the call instruction.
+ *(raw - 1) = imm.value;
+}
+
+uint8_t* AssemblerMIPSShared::NextInstruction(uint8_t* inst_, uint32_t* count) {
+ Instruction* inst = reinterpret_cast<Instruction*>(inst_);
+ if (count != nullptr) {
+ *count += sizeof(Instruction);
+ }
+ return reinterpret_cast<uint8_t*>(inst->next());
+}
+
+// Since there are no pools in MIPS implementation, this should be simple.
+Instruction* Instruction::next() { return this + 1; }
+
+InstImm AssemblerMIPSShared::invertBranch(InstImm branch,
+ BOffImm16 skipOffset) {
+ uint32_t rt = 0;
+ OpcodeField op = (OpcodeField)(branch.extractOpcode() << OpcodeShift);
+ switch (op) {
+ case op_beq:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_bne);
+ return branch;
+ case op_bne:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_beq);
+ return branch;
+ case op_bgtz:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_blez);
+ return branch;
+ case op_blez:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_bgtz);
+ return branch;
+ case op_regimm:
+ branch.setBOffImm16(skipOffset);
+ rt = branch.extractRT();
+ if (rt == (rt_bltz >> RTShift)) {
+ branch.setRT(rt_bgez);
+ return branch;
+ }
+ if (rt == (rt_bgez >> RTShift)) {
+ branch.setRT(rt_bltz);
+ return branch;
+ }
+
+ MOZ_CRASH("Error creating long branch.");
+
+ case op_cop1:
+ MOZ_ASSERT(branch.extractRS() == rs_bc1 >> RSShift);
+
+ branch.setBOffImm16(skipOffset);
+ rt = branch.extractRT();
+ if (rt & 0x1) {
+ branch.setRT((RTField)((rt & ~0x1) << RTShift));
+ } else {
+ branch.setRT((RTField)((rt | 0x1) << RTShift));
+ }
+ return branch;
+ default:
+ MOZ_CRASH("Error creating long branch.");
+ }
+}
+
+void AssemblerMIPSShared::ToggleToJmp(CodeLocationLabel inst_) {
+ InstImm* inst = (InstImm*)inst_.raw();
+
+ MOZ_ASSERT(inst->extractOpcode() == ((uint32_t)op_andi >> OpcodeShift));
+ // We converted beq to andi, so now we restore it.
+ inst->setOpcode(op_beq);
+}
+
+void AssemblerMIPSShared::ToggleToCmp(CodeLocationLabel inst_) {
+ InstImm* inst = (InstImm*)inst_.raw();
+
+ // toggledJump is allways used for short jumps.
+ MOZ_ASSERT(inst->extractOpcode() == ((uint32_t)op_beq >> OpcodeShift));
+ // Replace "beq $zero, $zero, offset" with "andi $zero, $zero, offset"
+ inst->setOpcode(op_andi);
+}
+
+void AssemblerMIPSShared::UpdateLuiOriValue(Instruction* inst0,
+ Instruction* inst1,
+ uint32_t value) {
+ MOZ_ASSERT(inst0->extractOpcode() == ((uint32_t)op_lui >> OpcodeShift));
+ MOZ_ASSERT(inst1->extractOpcode() == ((uint32_t)op_ori >> OpcodeShift));
+
+ ((InstImm*)inst0)->setImm16(Imm16::Upper(Imm32(value)));
+ ((InstImm*)inst1)->setImm16(Imm16::Lower(Imm32(value)));
+}
+
+#ifdef JS_JITSPEW
+void AssemblerMIPSShared::decodeBranchInstAndSpew(InstImm branch) {
+ OpcodeField op = (OpcodeField)(branch.extractOpcode() << OpcodeShift);
+ uint32_t rt_id;
+ uint32_t rs_id;
+ uint32_t immi = branch.extractImm16Value();
+ uint32_t fcc;
+ switch (op) {
+ case op_beq:
+ rt_id = branch.extractRT();
+ rs_id = branch.extractRS();
+ spew("beq %3s,%3s,0x%x", Registers::GetName(rs_id),
+ Registers::GetName(rt_id), (int32_t(immi << 18) >> 16) + 4);
+ break;
+ case op_bne:
+ rt_id = branch.extractRT();
+ rs_id = branch.extractRS();
+ spew("bne %3s,%3s,0x%x", Registers::GetName(rs_id),
+ Registers::GetName(rt_id), (int32_t(immi << 18) >> 16) + 4);
+ break;
+ case op_bgtz:
+ rs_id = branch.extractRS();
+ spew("bgt %3s, 0,0x%x", Registers::GetName(rs_id),
+ (int32_t(immi << 18) >> 16) + 4);
+ break;
+ case op_blez:
+ rs_id = branch.extractRS();
+ spew("ble %3s, 0,0x%x", Registers::GetName(rs_id),
+ (int32_t(immi << 18) >> 16) + 4);
+ break;
+ case op_regimm:
+ rt_id = branch.extractRT();
+ if (rt_id == (rt_bltz >> RTShift)) {
+ rs_id = branch.extractRS();
+ spew("blt %3s, 0,0x%x", Registers::GetName(rs_id),
+ (int32_t(immi << 18) >> 16) + 4);
+ } else if (rt_id == (rt_bgez >> RTShift)) {
+ rs_id = branch.extractRS();
+ spew("bge %3s, 0,0x%x", Registers::GetName(rs_id),
+ (int32_t(immi << 18) >> 16) + 4);
+ } else {
+ MOZ_CRASH("Error disassemble branch.");
+ }
+ break;
+ case op_cop1:
+ MOZ_ASSERT(branch.extractRS() == rs_bc1 >> RSShift);
+ rt_id = branch.extractRT();
+ fcc = branch.extractBitField(FCccShift + FCccBits - 1, FCccShift);
+ if (rt_id & 0x1) {
+ spew("bc1t FCC%d, 0x%x", fcc, (int32_t(immi << 18) >> 16) + 4);
+ } else {
+ spew("bc1f FCC%d, 0x%x", fcc, (int32_t(immi << 18) >> 16) + 4);
+ }
+ break;
+ default:
+ MOZ_CRASH("Error disassemble branch.");
+ }
+}
+#endif
diff --git a/js/src/jit/mips-shared/Assembler-mips-shared.h b/js/src/jit/mips-shared/Assembler-mips-shared.h
new file mode 100644
index 0000000000..32332de5be
--- /dev/null
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.h
@@ -0,0 +1,1500 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_Assembler_mips_shared_h
+#define jit_mips_shared_Assembler_mips_shared_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Sprintf.h"
+
+#include "jit/CompactBuffer.h"
+#include "jit/JitCode.h"
+#include "jit/JitSpewer.h"
+#include "jit/mips-shared/Architecture-mips-shared.h"
+#include "jit/shared/Assembler-shared.h"
+#include "jit/shared/IonAssemblerBuffer.h"
+#include "wasm/WasmTypeDecls.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register zero{Registers::zero};
+static constexpr Register at{Registers::at};
+static constexpr Register v0{Registers::v0};
+static constexpr Register v1{Registers::v1};
+static constexpr Register a0{Registers::a0};
+static constexpr Register a1{Registers::a1};
+static constexpr Register a2{Registers::a2};
+static constexpr Register a3{Registers::a3};
+static constexpr Register a4{Registers::ta0};
+static constexpr Register a5{Registers::ta1};
+static constexpr Register a6{Registers::ta2};
+static constexpr Register a7{Registers::ta3};
+static constexpr Register t0{Registers::t0};
+static constexpr Register t1{Registers::t1};
+static constexpr Register t2{Registers::t2};
+static constexpr Register t3{Registers::t3};
+static constexpr Register t4{Registers::ta0};
+static constexpr Register t5{Registers::ta1};
+static constexpr Register t6{Registers::ta2};
+static constexpr Register t7{Registers::ta3};
+static constexpr Register s0{Registers::s0};
+static constexpr Register s1{Registers::s1};
+static constexpr Register s2{Registers::s2};
+static constexpr Register s3{Registers::s3};
+static constexpr Register s4{Registers::s4};
+static constexpr Register s5{Registers::s5};
+static constexpr Register s6{Registers::s6};
+static constexpr Register s7{Registers::s7};
+static constexpr Register t8{Registers::t8};
+static constexpr Register t9{Registers::t9};
+static constexpr Register k0{Registers::k0};
+static constexpr Register k1{Registers::k1};
+static constexpr Register gp{Registers::gp};
+static constexpr Register sp{Registers::sp};
+static constexpr Register fp{Registers::fp};
+static constexpr Register ra{Registers::ra};
+
+static constexpr Register ScratchRegister = at;
+static constexpr Register SecondScratchReg = t8;
+
+// Helper classes for ScratchRegister usage. Asserts that only one piece
+// of code thinks it has exclusive ownership of each scratch register.
+struct ScratchRegisterScope : public AutoRegisterScope {
+ explicit ScratchRegisterScope(MacroAssembler& masm)
+ : AutoRegisterScope(masm, ScratchRegister) {}
+};
+struct SecondScratchRegisterScope : public AutoRegisterScope {
+ explicit SecondScratchRegisterScope(MacroAssembler& masm)
+ : AutoRegisterScope(masm, SecondScratchReg) {}
+};
+
+// Use arg reg from EnterJIT function as OsrFrameReg.
+static constexpr Register OsrFrameReg = a3;
+static constexpr Register CallTempReg0 = t0;
+static constexpr Register CallTempReg1 = t1;
+static constexpr Register CallTempReg2 = t2;
+static constexpr Register CallTempReg3 = t3;
+
+static constexpr Register IntArgReg0 = a0;
+static constexpr Register IntArgReg1 = a1;
+static constexpr Register IntArgReg2 = a2;
+static constexpr Register IntArgReg3 = a3;
+static constexpr Register IntArgReg4 = a4;
+static constexpr Register IntArgReg5 = a5;
+static constexpr Register IntArgReg6 = a6;
+static constexpr Register IntArgReg7 = a7;
+static constexpr Register GlobalReg = s6; // used by Odin
+static constexpr Register HeapReg = s7; // used by Odin
+
+static constexpr Register PreBarrierReg = a1;
+
+static constexpr Register InvalidReg{Registers::invalid_reg};
+static constexpr FloatRegister InvalidFloatReg;
+
+static constexpr Register StackPointer = sp;
+static constexpr Register FramePointer = fp;
+static constexpr Register ReturnReg = v0;
+static constexpr FloatRegister ReturnSimd128Reg = InvalidFloatReg;
+static constexpr FloatRegister ScratchSimd128Reg = InvalidFloatReg;
+
+// A bias applied to the GlobalReg to allow the use of instructions with small
+// negative immediate offsets which doubles the range of global data that can be
+// accessed with a single instruction.
+static const int32_t WasmGlobalRegBias = 32768;
+
+// Registers used by RegExpMatcher and RegExpExecMatch stubs (do not use
+// JSReturnOperand).
+static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpMatcherStringReg = CallTempReg1;
+static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
+
+// Registers used by RegExpExecTest stub (do not use ReturnReg).
+static constexpr Register RegExpExecTestRegExpReg = CallTempReg0;
+static constexpr Register RegExpExecTestStringReg = CallTempReg1;
+
+// Registers used by RegExpSearcher stub (do not use ReturnReg).
+static constexpr Register RegExpSearcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpSearcherStringReg = CallTempReg1;
+static constexpr Register RegExpSearcherLastIndexReg = CallTempReg2;
+
+static constexpr uint32_t CodeAlignment = 8;
+
+/* clang-format off */
+// MIPS instruction types
+// +---------------------------------------------------------------+
+// | 6 | 5 | 5 | 5 | 5 | 6 |
+// +---------------------------------------------------------------+
+// Register type | Opcode | Rs | Rt | Rd | Sa | Function |
+// +---------------------------------------------------------------+
+// | 6 | 5 | 5 | 16 |
+// +---------------------------------------------------------------+
+// Immediate type | Opcode | Rs | Rt | 2's complement constant |
+// +---------------------------------------------------------------+
+// | 6 | 26 |
+// +---------------------------------------------------------------+
+// Jump type | Opcode | jump_target |
+// +---------------------------------------------------------------+
+// 31 bit bit 0
+/* clang-format on */
+
+// MIPS instruction encoding constants.
+static const uint32_t OpcodeShift = 26;
+static const uint32_t OpcodeBits = 6;
+static const uint32_t RSShift = 21;
+static const uint32_t RSBits = 5;
+static const uint32_t RTShift = 16;
+static const uint32_t RTBits = 5;
+static const uint32_t RDShift = 11;
+static const uint32_t RDBits = 5;
+static const uint32_t RZShift = 0;
+static const uint32_t RZBits = 5;
+static const uint32_t SAShift = 6;
+static const uint32_t SABits = 5;
+static const uint32_t FunctionShift = 0;
+static const uint32_t FunctionBits = 6;
+static const uint32_t Imm16Shift = 0;
+static const uint32_t Imm16Bits = 16;
+static const uint32_t Imm26Shift = 0;
+static const uint32_t Imm26Bits = 26;
+static const uint32_t Imm28Shift = 0;
+static const uint32_t Imm28Bits = 28;
+static const uint32_t ImmFieldShift = 2;
+static const uint32_t FRBits = 5;
+static const uint32_t FRShift = 21;
+static const uint32_t FSShift = 11;
+static const uint32_t FSBits = 5;
+static const uint32_t FTShift = 16;
+static const uint32_t FTBits = 5;
+static const uint32_t FDShift = 6;
+static const uint32_t FDBits = 5;
+static const uint32_t FCccShift = 8;
+static const uint32_t FCccBits = 3;
+static const uint32_t FBccShift = 18;
+static const uint32_t FBccBits = 3;
+static const uint32_t FBtrueShift = 16;
+static const uint32_t FBtrueBits = 1;
+static const uint32_t FccMask = 0x7;
+static const uint32_t FccShift = 2;
+
+// MIPS instruction field bit masks.
+static const uint32_t OpcodeMask = ((1 << OpcodeBits) - 1) << OpcodeShift;
+static const uint32_t Imm16Mask = ((1 << Imm16Bits) - 1) << Imm16Shift;
+static const uint32_t Imm26Mask = ((1 << Imm26Bits) - 1) << Imm26Shift;
+static const uint32_t Imm28Mask = ((1 << Imm28Bits) - 1) << Imm28Shift;
+static const uint32_t RSMask = ((1 << RSBits) - 1) << RSShift;
+static const uint32_t RTMask = ((1 << RTBits) - 1) << RTShift;
+static const uint32_t RDMask = ((1 << RDBits) - 1) << RDShift;
+static const uint32_t SAMask = ((1 << SABits) - 1) << SAShift;
+static const uint32_t FunctionMask = ((1 << FunctionBits) - 1) << FunctionShift;
+static const uint32_t RegMask = Registers::Total - 1;
+
+static const uint32_t BREAK_STACK_UNALIGNED = 1;
+static const uint32_t MAX_BREAK_CODE = 1024 - 1;
+static const uint32_t WASM_TRAP = 6; // BRK_OVERFLOW
+
+class Instruction;
+class InstReg;
+class InstImm;
+class InstJump;
+
+uint32_t RS(Register r);
+uint32_t RT(Register r);
+uint32_t RT(FloatRegister r);
+uint32_t RD(Register r);
+uint32_t RD(FloatRegister r);
+uint32_t RZ(Register r);
+uint32_t RZ(FloatRegister r);
+uint32_t SA(uint32_t value);
+uint32_t SA(FloatRegister r);
+uint32_t FS(uint32_t value);
+
+Register toRS(Instruction& i);
+Register toRT(Instruction& i);
+Register toRD(Instruction& i);
+Register toR(Instruction& i);
+
+// MIPS enums for instruction fields
+enum OpcodeField {
+ op_special = 0 << OpcodeShift,
+ op_regimm = 1 << OpcodeShift,
+
+ op_j = 2 << OpcodeShift,
+ op_jal = 3 << OpcodeShift,
+ op_beq = 4 << OpcodeShift,
+ op_bne = 5 << OpcodeShift,
+ op_blez = 6 << OpcodeShift,
+ op_bgtz = 7 << OpcodeShift,
+
+ op_addi = 8 << OpcodeShift,
+ op_addiu = 9 << OpcodeShift,
+ op_slti = 10 << OpcodeShift,
+ op_sltiu = 11 << OpcodeShift,
+ op_andi = 12 << OpcodeShift,
+ op_ori = 13 << OpcodeShift,
+ op_xori = 14 << OpcodeShift,
+ op_lui = 15 << OpcodeShift,
+
+ op_cop1 = 17 << OpcodeShift,
+ op_cop1x = 19 << OpcodeShift,
+
+ op_beql = 20 << OpcodeShift,
+ op_bnel = 21 << OpcodeShift,
+ op_blezl = 22 << OpcodeShift,
+ op_bgtzl = 23 << OpcodeShift,
+
+ op_daddi = 24 << OpcodeShift,
+ op_daddiu = 25 << OpcodeShift,
+
+ op_ldl = 26 << OpcodeShift,
+ op_ldr = 27 << OpcodeShift,
+
+ op_special2 = 28 << OpcodeShift,
+ op_special3 = 31 << OpcodeShift,
+
+ op_lb = 32 << OpcodeShift,
+ op_lh = 33 << OpcodeShift,
+ op_lwl = 34 << OpcodeShift,
+ op_lw = 35 << OpcodeShift,
+ op_lbu = 36 << OpcodeShift,
+ op_lhu = 37 << OpcodeShift,
+ op_lwr = 38 << OpcodeShift,
+ op_lwu = 39 << OpcodeShift,
+ op_sb = 40 << OpcodeShift,
+ op_sh = 41 << OpcodeShift,
+ op_swl = 42 << OpcodeShift,
+ op_sw = 43 << OpcodeShift,
+ op_sdl = 44 << OpcodeShift,
+ op_sdr = 45 << OpcodeShift,
+ op_swr = 46 << OpcodeShift,
+
+ op_ll = 48 << OpcodeShift,
+ op_lwc1 = 49 << OpcodeShift,
+ op_lwc2 = 50 << OpcodeShift,
+ op_lld = 52 << OpcodeShift,
+ op_ldc1 = 53 << OpcodeShift,
+ op_ldc2 = 54 << OpcodeShift,
+ op_ld = 55 << OpcodeShift,
+
+ op_sc = 56 << OpcodeShift,
+ op_swc1 = 57 << OpcodeShift,
+ op_swc2 = 58 << OpcodeShift,
+ op_scd = 60 << OpcodeShift,
+ op_sdc1 = 61 << OpcodeShift,
+ op_sdc2 = 62 << OpcodeShift,
+ op_sd = 63 << OpcodeShift,
+};
+
+enum RSField {
+ rs_zero = 0 << RSShift,
+ // cop1 encoding of RS field.
+ rs_mfc1 = 0 << RSShift,
+ rs_one = 1 << RSShift,
+ rs_dmfc1 = 1 << RSShift,
+ rs_cfc1 = 2 << RSShift,
+ rs_mfhc1 = 3 << RSShift,
+ rs_mtc1 = 4 << RSShift,
+ rs_dmtc1 = 5 << RSShift,
+ rs_ctc1 = 6 << RSShift,
+ rs_mthc1 = 7 << RSShift,
+ rs_bc1 = 8 << RSShift,
+ rs_f = 0x9 << RSShift,
+ rs_t = 0xd << RSShift,
+ rs_s_r6 = 20 << RSShift,
+ rs_d_r6 = 21 << RSShift,
+ rs_s = 16 << RSShift,
+ rs_d = 17 << RSShift,
+ rs_w = 20 << RSShift,
+ rs_l = 21 << RSShift,
+ rs_ps = 22 << RSShift
+};
+
+enum RTField {
+ rt_zero = 0 << RTShift,
+ // regimm encoding of RT field.
+ rt_bltz = 0 << RTShift,
+ rt_bgez = 1 << RTShift,
+ rt_bltzal = 16 << RTShift,
+ rt_bgezal = 17 << RTShift
+};
+
+enum FunctionField {
+ // special encoding of function field.
+ ff_sll = 0,
+ ff_movci = 1,
+ ff_srl = 2,
+ ff_sra = 3,
+ ff_sllv = 4,
+ ff_srlv = 6,
+ ff_srav = 7,
+
+ ff_jr = 8,
+ ff_jalr = 9,
+ ff_movz = 10,
+ ff_movn = 11,
+ ff_break = 13,
+ ff_sync = 15,
+
+ ff_mfhi = 16,
+ ff_mflo = 18,
+
+ ff_dsllv = 20,
+ ff_dsrlv = 22,
+ ff_dsrav = 23,
+
+ ff_mult = 24,
+ ff_multu = 25,
+
+ ff_mulu = 25,
+ ff_muh = 24,
+ ff_muhu = 25,
+ ff_dmul = 28,
+ ff_dmulu = 29,
+ ff_dmuh = 28,
+ ff_dmuhu = 29,
+
+ ff_div = 26,
+ ff_mod = 26,
+ ff_divu = 27,
+ ff_modu = 27,
+ ff_dmult = 28,
+ ff_dmultu = 29,
+ ff_ddiv = 30,
+ ff_dmod = 30,
+ ff_ddivu = 31,
+ ff_dmodu = 31,
+
+ ff_add = 32,
+ ff_addu = 33,
+ ff_sub = 34,
+ ff_subu = 35,
+ ff_and = 36,
+ ff_or = 37,
+ ff_xor = 38,
+ ff_nor = 39,
+
+ ff_slt = 42,
+ ff_sltu = 43,
+ ff_dadd = 44,
+ ff_daddu = 45,
+ ff_dsub = 46,
+ ff_dsubu = 47,
+
+ ff_tge = 48,
+ ff_tgeu = 49,
+ ff_tlt = 50,
+ ff_tltu = 51,
+ ff_teq = 52,
+ ff_seleqz = 53,
+ ff_tne = 54,
+ ff_selnez = 55,
+ ff_dsll = 56,
+ ff_dsrl = 58,
+ ff_dsra = 59,
+ ff_dsll32 = 60,
+ ff_dsrl32 = 62,
+ ff_dsra32 = 63,
+
+ // special2 encoding of function field.
+ ff_madd = 0,
+ ff_maddu = 1,
+#ifdef MIPSR6
+ ff_clz = 16,
+ ff_dclz = 18,
+ ff_mul = 24,
+#else
+ ff_mul = 2,
+ ff_clz = 32,
+ ff_dclz = 36,
+#endif
+ ff_clo = 33,
+
+ // special3 encoding of function field.
+ ff_ext = 0,
+ ff_dextm = 1,
+ ff_dextu = 2,
+ ff_dext = 3,
+ ff_ins = 4,
+ ff_dinsm = 5,
+ ff_dinsu = 6,
+ ff_dins = 7,
+ ff_bshfl = 32,
+ ff_dbshfl = 36,
+ ff_sc = 38,
+ ff_scd = 39,
+ ff_ll = 54,
+ ff_lld = 55,
+
+ // cop1 encoding of function field.
+ ff_add_fmt = 0,
+ ff_sub_fmt = 1,
+ ff_mul_fmt = 2,
+ ff_div_fmt = 3,
+ ff_sqrt_fmt = 4,
+ ff_abs_fmt = 5,
+ ff_mov_fmt = 6,
+ ff_neg_fmt = 7,
+
+ ff_round_l_fmt = 8,
+ ff_trunc_l_fmt = 9,
+ ff_ceil_l_fmt = 10,
+ ff_floor_l_fmt = 11,
+
+ ff_round_w_fmt = 12,
+ ff_trunc_w_fmt = 13,
+ ff_ceil_w_fmt = 14,
+ ff_floor_w_fmt = 15,
+
+ ff_movf_fmt = 17,
+ ff_movz_fmt = 18,
+ ff_movn_fmt = 19,
+
+ ff_min = 28,
+ ff_max = 30,
+
+ ff_cvt_s_fmt = 32,
+ ff_cvt_d_fmt = 33,
+ ff_cvt_w_fmt = 36,
+ ff_cvt_l_fmt = 37,
+ ff_cvt_ps_s = 38,
+
+#ifdef MIPSR6
+ ff_c_f_fmt = 0,
+ ff_c_un_fmt = 1,
+ ff_c_eq_fmt = 2,
+ ff_c_ueq_fmt = 3,
+ ff_c_olt_fmt = 4,
+ ff_c_ult_fmt = 5,
+ ff_c_ole_fmt = 6,
+ ff_c_ule_fmt = 7,
+#else
+ ff_c_f_fmt = 48,
+ ff_c_un_fmt = 49,
+ ff_c_eq_fmt = 50,
+ ff_c_ueq_fmt = 51,
+ ff_c_olt_fmt = 52,
+ ff_c_ult_fmt = 53,
+ ff_c_ole_fmt = 54,
+ ff_c_ule_fmt = 55,
+#endif
+
+ ff_madd_s = 32,
+ ff_madd_d = 33,
+
+ // Loongson encoding of function field.
+ ff_gsxbx = 0,
+ ff_gsxhx = 1,
+ ff_gsxwx = 2,
+ ff_gsxdx = 3,
+ ff_gsxwlc1 = 4,
+ ff_gsxwrc1 = 5,
+ ff_gsxdlc1 = 6,
+ ff_gsxdrc1 = 7,
+ ff_gsxwxc1 = 6,
+ ff_gsxdxc1 = 7,
+ ff_gsxq = 0x20,
+ ff_gsxqc1 = 0x8020,
+
+ ff_null = 0
+};
+
+class Operand;
+
+// A BOffImm16 is a 16 bit immediate that is used for branches.
+class BOffImm16 {
+ uint32_t data;
+
+ public:
+ uint32_t encode() {
+ MOZ_ASSERT(!isInvalid());
+ return data;
+ }
+ int32_t decode() {
+ MOZ_ASSERT(!isInvalid());
+ return (int32_t(data << 18) >> 16) + 4;
+ }
+
+ explicit BOffImm16(int offset) : data((offset - 4) >> 2 & Imm16Mask) {
+ MOZ_ASSERT((offset & 0x3) == 0);
+ MOZ_ASSERT(IsInRange(offset));
+ }
+ static bool IsInRange(int offset) {
+ if ((offset - 4) < int(unsigned(INT16_MIN) << 2)) {
+ return false;
+ }
+ if ((offset - 4) > (INT16_MAX << 2)) {
+ return false;
+ }
+ return true;
+ }
+ static const uint32_t INVALID = 0x00020000;
+ BOffImm16() : data(INVALID) {}
+
+ bool isInvalid() { return data == INVALID; }
+ Instruction* getDest(Instruction* src) const;
+
+ BOffImm16(InstImm inst);
+};
+
+// A JOffImm26 is a 26 bit immediate that is used for unconditional jumps.
+class JOffImm26 {
+ uint32_t data;
+
+ public:
+ uint32_t encode() {
+ MOZ_ASSERT(!isInvalid());
+ return data;
+ }
+ int32_t decode() {
+ MOZ_ASSERT(!isInvalid());
+ return (int32_t(data << 8) >> 6) + 4;
+ }
+
+ explicit JOffImm26(int offset) : data((offset - 4) >> 2 & Imm26Mask) {
+ MOZ_ASSERT((offset & 0x3) == 0);
+ MOZ_ASSERT(IsInRange(offset));
+ }
+ static bool IsInRange(int offset) {
+ if ((offset - 4) < -536870912) {
+ return false;
+ }
+ if ((offset - 4) > 536870908) {
+ return false;
+ }
+ return true;
+ }
+ static const uint32_t INVALID = 0x20000000;
+ JOffImm26() : data(INVALID) {}
+
+ bool isInvalid() { return data == INVALID; }
+ Instruction* getDest(Instruction* src);
+};
+
+class Imm16 {
+ uint16_t value;
+
+ public:
+ Imm16();
+ Imm16(uint32_t imm) : value(imm) {}
+ uint32_t encode() { return value; }
+ int32_t decodeSigned() { return value; }
+ uint32_t decodeUnsigned() { return value; }
+ static bool IsInSignedRange(int32_t imm) {
+ return imm >= INT16_MIN && imm <= INT16_MAX;
+ }
+ static bool IsInUnsignedRange(uint32_t imm) { return imm <= UINT16_MAX; }
+ static Imm16 Lower(Imm32 imm) { return Imm16(imm.value & 0xffff); }
+ static Imm16 Upper(Imm32 imm) { return Imm16((imm.value >> 16) & 0xffff); }
+};
+
+class Imm8 {
+ uint8_t value;
+
+ public:
+ Imm8();
+ Imm8(uint32_t imm) : value(imm) {}
+ uint32_t encode(uint32_t shift) { return value << shift; }
+ int32_t decodeSigned() { return value; }
+ uint32_t decodeUnsigned() { return value; }
+ static bool IsInSignedRange(int32_t imm) {
+ return imm >= INT8_MIN && imm <= INT8_MAX;
+ }
+ static bool IsInUnsignedRange(uint32_t imm) { return imm <= UINT8_MAX; }
+ static Imm8 Lower(Imm16 imm) { return Imm8(imm.decodeSigned() & 0xff); }
+ static Imm8 Upper(Imm16 imm) {
+ return Imm8((imm.decodeSigned() >> 8) & 0xff);
+ }
+};
+
+class GSImm13 {
+ uint16_t value;
+
+ public:
+ GSImm13();
+ GSImm13(uint32_t imm) : value(imm & ~0xf) {}
+ uint32_t encode(uint32_t shift) { return ((value >> 4) & 0x1ff) << shift; }
+ int32_t decodeSigned() { return value; }
+ uint32_t decodeUnsigned() { return value; }
+ static bool IsInRange(int32_t imm) {
+ return imm >= int32_t(uint32_t(-256) << 4) && imm <= (255 << 4);
+ }
+};
+
+class Operand {
+ public:
+ enum Tag { REG, FREG, MEM };
+
+ private:
+ Tag tag : 3;
+ uint32_t reg : 5;
+ int32_t offset;
+
+ public:
+ Operand(Register reg_) : tag(REG), reg(reg_.code()) {}
+
+ Operand(FloatRegister freg) : tag(FREG), reg(freg.code()) {}
+
+ Operand(Register base, Imm32 off)
+ : tag(MEM), reg(base.code()), offset(off.value) {}
+
+ Operand(Register base, int32_t off)
+ : tag(MEM), reg(base.code()), offset(off) {}
+
+ Operand(const Address& addr)
+ : tag(MEM), reg(addr.base.code()), offset(addr.offset) {}
+
+ Tag getTag() const { return tag; }
+
+ Register toReg() const {
+ MOZ_ASSERT(tag == REG);
+ return Register::FromCode(reg);
+ }
+
+ FloatRegister toFReg() const {
+ MOZ_ASSERT(tag == FREG);
+ return FloatRegister::FromCode(reg);
+ }
+
+ void toAddr(Register* r, Imm32* dest) const {
+ MOZ_ASSERT(tag == MEM);
+ *r = Register::FromCode(reg);
+ *dest = Imm32(offset);
+ }
+ Address toAddress() const {
+ MOZ_ASSERT(tag == MEM);
+ return Address(Register::FromCode(reg), offset);
+ }
+ int32_t disp() const {
+ MOZ_ASSERT(tag == MEM);
+ return offset;
+ }
+
+ int32_t base() const {
+ MOZ_ASSERT(tag == MEM);
+ return reg;
+ }
+ Register baseReg() const {
+ MOZ_ASSERT(tag == MEM);
+ return Register::FromCode(reg);
+ }
+};
+
+inline Imm32 Imm64::firstHalf() const { return low(); }
+
+inline Imm32 Imm64::secondHalf() const { return hi(); }
+
+static constexpr int32_t SliceSize = 1024;
+typedef js::jit::AssemblerBuffer<SliceSize, Instruction> MIPSBuffer;
+
+class MIPSBufferWithExecutableCopy : public MIPSBuffer {
+ public:
+ void executableCopy(uint8_t* buffer) {
+ if (this->oom()) {
+ return;
+ }
+
+ for (Slice* cur = head; cur != nullptr; cur = cur->getNext()) {
+ memcpy(buffer, &cur->instructions, cur->length());
+ buffer += cur->length();
+ }
+ }
+
+ bool appendRawCode(const uint8_t* code, size_t numBytes) {
+ if (this->oom()) {
+ return false;
+ }
+ while (numBytes > SliceSize) {
+ this->putBytes(SliceSize, code);
+ numBytes -= SliceSize;
+ code += SliceSize;
+ }
+ this->putBytes(numBytes, code);
+ return !this->oom();
+ }
+};
+
+class AssemblerMIPSShared : public AssemblerShared {
+ public:
+ enum Condition {
+ Equal,
+ NotEqual,
+ Above,
+ AboveOrEqual,
+ Below,
+ BelowOrEqual,
+ GreaterThan,
+ GreaterThanOrEqual,
+ LessThan,
+ LessThanOrEqual,
+ Overflow,
+ CarrySet,
+ CarryClear,
+ Signed,
+ NotSigned,
+ Zero,
+ NonZero,
+ Always,
+ };
+
+ enum DoubleCondition {
+ // These conditions will only evaluate to true if the comparison is ordered
+ // - i.e. neither operand is NaN.
+ DoubleOrdered,
+ DoubleEqual,
+ DoubleNotEqual,
+ DoubleGreaterThan,
+ DoubleGreaterThanOrEqual,
+ DoubleLessThan,
+ DoubleLessThanOrEqual,
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleUnordered,
+ DoubleEqualOrUnordered,
+ DoubleNotEqualOrUnordered,
+ DoubleGreaterThanOrUnordered,
+ DoubleGreaterThanOrEqualOrUnordered,
+ DoubleLessThanOrUnordered,
+ DoubleLessThanOrEqualOrUnordered
+ };
+
+ enum FPConditionBit { FCC0 = 0, FCC1, FCC2, FCC3, FCC4, FCC5, FCC6, FCC7 };
+
+ enum FPControl {
+ FIR = 0,
+ UFR,
+ UNFR = 4,
+ FCCR = 25,
+ FEXR,
+ FENR = 28,
+ FCSR = 31
+ };
+
+ enum FCSRBit { CauseI = 12, CauseU, CauseO, CauseZ, CauseV };
+
+ enum FloatFormat { SingleFloat, DoubleFloat };
+
+ enum JumpOrCall { BranchIsJump, BranchIsCall };
+
+ enum FloatTestKind { TestForTrue, TestForFalse };
+
+ // :( this should be protected, but since CodeGenerator
+ // wants to use it, It needs to go out here :(
+
+ BufferOffset nextOffset() { return m_buffer.nextOffset(); }
+
+ protected:
+ Instruction* editSrc(BufferOffset bo) { return m_buffer.getInst(bo); }
+
+ // structure for fixing up pc-relative loads/jumps when a the machine code
+ // gets moved (executable copy, gc, etc.)
+ struct RelativePatch {
+ // the offset within the code buffer where the value is loaded that
+ // we want to fix-up
+ BufferOffset offset;
+ void* target;
+ RelocationKind kind;
+
+ RelativePatch(BufferOffset offset, void* target, RelocationKind kind)
+ : offset(offset), target(target), kind(kind) {}
+ };
+
+ js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
+
+ CompactBufferWriter jumpRelocations_;
+ CompactBufferWriter dataRelocations_;
+
+ MIPSBufferWithExecutableCopy m_buffer;
+
+#ifdef JS_JITSPEW
+ Sprinter* printer;
+#endif
+
+ public:
+ AssemblerMIPSShared()
+ : m_buffer(),
+#ifdef JS_JITSPEW
+ printer(nullptr),
+#endif
+ isFinished(false) {
+ }
+
+ static Condition InvertCondition(Condition cond);
+ static DoubleCondition InvertCondition(DoubleCondition cond);
+
+ // As opposed to x86/x64 version, the data relocation has to be executed
+ // before to recover the pointer, and not after.
+ void writeDataRelocation(ImmGCPtr ptr) {
+ // Raw GC pointer relocations and Value relocations both end up in
+ // TraceOneDataRelocation.
+ if (ptr.value) {
+ if (gc::IsInsideNursery(ptr.value)) {
+ embedsNurseryPointers_ = true;
+ }
+ dataRelocations_.writeUnsigned(nextOffset().getOffset());
+ }
+ }
+
+ void assertNoGCThings() const {
+#ifdef DEBUG
+ MOZ_ASSERT(dataRelocations_.length() == 0);
+ for (auto& j : jumps_) {
+ MOZ_ASSERT(j.kind == RelocationKind::HARDCODED);
+ }
+#endif
+ }
+
+ public:
+ void setUnlimitedBuffer() { m_buffer.setUnlimited(); }
+ bool oom() const;
+
+ void setPrinter(Sprinter* sp) {
+#ifdef JS_JITSPEW
+ printer = sp;
+#endif
+ }
+
+#ifdef JS_JITSPEW
+ inline void spew(const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3) {
+ if (MOZ_UNLIKELY(printer || JitSpewEnabled(JitSpew_Codegen))) {
+ va_list va;
+ va_start(va, fmt);
+ spew(fmt, va);
+ va_end(va);
+ }
+ }
+
+ void decodeBranchInstAndSpew(InstImm branch);
+#else
+ MOZ_ALWAYS_INLINE void spew(const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3) {}
+#endif
+
+#ifdef JS_JITSPEW
+ MOZ_COLD void spew(const char* fmt, va_list va) MOZ_FORMAT_PRINTF(2, 0) {
+ // Buffer to hold the formatted string. Note that this may contain
+ // '%' characters, so do not pass it directly to printf functions.
+ char buf[200];
+
+ int i = VsprintfLiteral(buf, fmt, va);
+ if (i > -1) {
+ if (printer) {
+ printer->printf("%s\n", buf);
+ }
+ js::jit::JitSpew(js::jit::JitSpew_Codegen, "%s", buf);
+ }
+ }
+#endif
+
+ Register getStackPointer() const { return StackPointer; }
+
+ protected:
+ bool isFinished;
+
+ public:
+ void finish();
+ bool appendRawCode(const uint8_t* code, size_t numBytes);
+ bool reserve(size_t size);
+ bool swapBuffer(wasm::Bytes& bytes);
+ void executableCopy(void* buffer);
+ void copyJumpRelocationTable(uint8_t* dest);
+ void copyDataRelocationTable(uint8_t* dest);
+
+ // Size of the instruction stream, in bytes.
+ size_t size() const;
+ // Size of the jump relocation table, in bytes.
+ size_t jumpRelocationTableBytes() const;
+ size_t dataRelocationTableBytes() const;
+
+ // Size of the data table, in bytes.
+ size_t bytesNeeded() const;
+
+ // Write a blob of binary into the instruction stream *OR*
+ // into a destination address. If dest is nullptr (the default), then the
+ // instruction gets written into the instruction stream. If dest is not null
+ // it is interpreted as a pointer to the location that we want the
+ // instruction to be written.
+ BufferOffset writeInst(uint32_t x, uint32_t* dest = nullptr);
+ // A static variant for the cases where we don't want to have an assembler
+ // object at all. Normally, you would use the dummy (nullptr) object.
+ static void WriteInstStatic(uint32_t x, uint32_t* dest);
+
+ public:
+ BufferOffset haltingAlign(int alignment);
+ BufferOffset nopAlign(int alignment);
+ BufferOffset as_nop();
+
+ // Branch and jump instructions
+ BufferOffset as_bal(BOffImm16 off);
+ BufferOffset as_b(BOffImm16 off);
+
+ InstImm getBranchCode(JumpOrCall jumpOrCall);
+ InstImm getBranchCode(Register s, Register t, Condition c);
+ InstImm getBranchCode(Register s, Condition c);
+ InstImm getBranchCode(FloatTestKind testKind, FPConditionBit fcc);
+
+ BufferOffset as_j(JOffImm26 off);
+ BufferOffset as_jal(JOffImm26 off);
+
+ BufferOffset as_jr(Register rs);
+ BufferOffset as_jalr(Register rs);
+
+ // Arithmetic instructions
+ BufferOffset as_addu(Register rd, Register rs, Register rt);
+ BufferOffset as_addiu(Register rd, Register rs, int32_t j);
+ BufferOffset as_daddu(Register rd, Register rs, Register rt);
+ BufferOffset as_daddiu(Register rd, Register rs, int32_t j);
+ BufferOffset as_subu(Register rd, Register rs, Register rt);
+ BufferOffset as_dsubu(Register rd, Register rs, Register rt);
+ BufferOffset as_mult(Register rs, Register rt);
+ BufferOffset as_multu(Register rs, Register rt);
+ BufferOffset as_dmult(Register rs, Register rt);
+ BufferOffset as_dmultu(Register rs, Register rt);
+ BufferOffset as_div(Register rs, Register rt);
+ BufferOffset as_divu(Register rs, Register rt);
+ BufferOffset as_mul(Register rd, Register rs, Register rt);
+ BufferOffset as_madd(Register rs, Register rt);
+ BufferOffset as_maddu(Register rs, Register rt);
+ BufferOffset as_ddiv(Register rs, Register rt);
+ BufferOffset as_ddivu(Register rs, Register rt);
+
+ BufferOffset as_muh(Register rd, Register rs, Register rt);
+ BufferOffset as_muhu(Register rd, Register rs, Register rt);
+ BufferOffset as_mulu(Register rd, Register rs, Register rt);
+ BufferOffset as_dmuh(Register rd, Register rs, Register rt);
+ BufferOffset as_dmuhu(Register rd, Register rs, Register rt);
+ BufferOffset as_dmul(Register rd, Register rs, Register rt);
+ BufferOffset as_dmulu(Register rd, Register rs, Register rt);
+ BufferOffset as_div(Register rd, Register rs, Register rt);
+ BufferOffset as_divu(Register rd, Register rs, Register rt);
+ BufferOffset as_mod(Register rd, Register rs, Register rt);
+ BufferOffset as_modu(Register rd, Register rs, Register rt);
+ BufferOffset as_ddiv(Register rd, Register rs, Register rt);
+ BufferOffset as_ddivu(Register rd, Register rs, Register rt);
+ BufferOffset as_dmod(Register rd, Register rs, Register rt);
+ BufferOffset as_dmodu(Register rd, Register rs, Register rt);
+
+ // Logical instructions
+ BufferOffset as_and(Register rd, Register rs, Register rt);
+ BufferOffset as_or(Register rd, Register rs, Register rt);
+ BufferOffset as_xor(Register rd, Register rs, Register rt);
+ BufferOffset as_nor(Register rd, Register rs, Register rt);
+
+ BufferOffset as_andi(Register rd, Register rs, int32_t j);
+ BufferOffset as_ori(Register rd, Register rs, int32_t j);
+ BufferOffset as_xori(Register rd, Register rs, int32_t j);
+ BufferOffset as_lui(Register rd, int32_t j);
+
+ // Shift instructions
+ // as_sll(zero, zero, x) instructions are reserved as nop
+ BufferOffset as_sll(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsll(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsll32(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_sllv(Register rd, Register rt, Register rs);
+ BufferOffset as_dsllv(Register rd, Register rt, Register rs);
+ BufferOffset as_srl(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsrl(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsrl32(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_srlv(Register rd, Register rt, Register rs);
+ BufferOffset as_dsrlv(Register rd, Register rt, Register rs);
+ BufferOffset as_sra(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsra(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_dsra32(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_srav(Register rd, Register rt, Register rs);
+ BufferOffset as_rotr(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_rotrv(Register rd, Register rt, Register rs);
+ BufferOffset as_dsrav(Register rd, Register rt, Register rs);
+ BufferOffset as_drotr(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_drotr32(Register rd, Register rt, uint16_t sa);
+ BufferOffset as_drotrv(Register rd, Register rt, Register rs);
+
+ // Load and store instructions
+ BufferOffset as_lb(Register rd, Register rs, int16_t off);
+ BufferOffset as_lbu(Register rd, Register rs, int16_t off);
+ BufferOffset as_lh(Register rd, Register rs, int16_t off);
+ BufferOffset as_lhu(Register rd, Register rs, int16_t off);
+ BufferOffset as_lw(Register rd, Register rs, int16_t off);
+ BufferOffset as_lwu(Register rd, Register rs, int16_t off);
+ BufferOffset as_lwl(Register rd, Register rs, int16_t off);
+ BufferOffset as_lwr(Register rd, Register rs, int16_t off);
+ BufferOffset as_ll(Register rd, Register rs, int16_t off);
+ BufferOffset as_lld(Register rd, Register rs, int16_t off);
+ BufferOffset as_ld(Register rd, Register rs, int16_t off);
+ BufferOffset as_ldl(Register rd, Register rs, int16_t off);
+ BufferOffset as_ldr(Register rd, Register rs, int16_t off);
+ BufferOffset as_sb(Register rd, Register rs, int16_t off);
+ BufferOffset as_sh(Register rd, Register rs, int16_t off);
+ BufferOffset as_sw(Register rd, Register rs, int16_t off);
+ BufferOffset as_swl(Register rd, Register rs, int16_t off);
+ BufferOffset as_swr(Register rd, Register rs, int16_t off);
+ BufferOffset as_sc(Register rd, Register rs, int16_t off);
+ BufferOffset as_scd(Register rd, Register rs, int16_t off);
+ BufferOffset as_sd(Register rd, Register rs, int16_t off);
+ BufferOffset as_sdl(Register rd, Register rs, int16_t off);
+ BufferOffset as_sdr(Register rd, Register rs, int16_t off);
+
+ // Loongson-specific load and store instructions
+ BufferOffset as_gslbx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gssbx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gslhx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gsshx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gslwx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gsswx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gsldx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gssdx(Register rd, Register rs, Register ri, int16_t off);
+ BufferOffset as_gslq(Register rh, Register rl, Register rs, int16_t off);
+ BufferOffset as_gssq(Register rh, Register rl, Register rs, int16_t off);
+
+ // Move from HI/LO register.
+ BufferOffset as_mfhi(Register rd);
+ BufferOffset as_mflo(Register rd);
+
+ // Set on less than.
+ BufferOffset as_slt(Register rd, Register rs, Register rt);
+ BufferOffset as_sltu(Register rd, Register rs, Register rt);
+ BufferOffset as_slti(Register rd, Register rs, int32_t j);
+ BufferOffset as_sltiu(Register rd, Register rs, uint32_t j);
+
+ // Conditional move.
+ BufferOffset as_movz(Register rd, Register rs, Register rt);
+ BufferOffset as_movn(Register rd, Register rs, Register rt);
+ BufferOffset as_movt(Register rd, Register rs, uint16_t cc = 0);
+ BufferOffset as_movf(Register rd, Register rs, uint16_t cc = 0);
+ BufferOffset as_seleqz(Register rd, Register rs, Register rt);
+ BufferOffset as_selnez(Register rd, Register rs, Register rt);
+
+ // Bit twiddling.
+ BufferOffset as_clz(Register rd, Register rs);
+ BufferOffset as_dclz(Register rd, Register rs);
+ BufferOffset as_wsbh(Register rd, Register rt);
+ BufferOffset as_dsbh(Register rd, Register rt);
+ BufferOffset as_dshd(Register rd, Register rt);
+ BufferOffset as_ins(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dins(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dinsm(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dinsu(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dext(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dextm(Register rt, Register rs, uint16_t pos, uint16_t size);
+ BufferOffset as_dextu(Register rt, Register rs, uint16_t pos, uint16_t size);
+
+ // Sign extend
+ BufferOffset as_seb(Register rd, Register rt);
+ BufferOffset as_seh(Register rd, Register rt);
+
+ // FP instructions
+
+ BufferOffset as_ldc1(FloatRegister ft, Register base, int32_t off);
+ BufferOffset as_sdc1(FloatRegister ft, Register base, int32_t off);
+
+ BufferOffset as_lwc1(FloatRegister ft, Register base, int32_t off);
+ BufferOffset as_swc1(FloatRegister ft, Register base, int32_t off);
+
+ // Loongson-specific FP load and store instructions
+ BufferOffset as_gsldl(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gsldr(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gssdl(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gssdr(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gslsl(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gslsr(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gsssl(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gsssr(FloatRegister fd, Register base, int32_t off);
+ BufferOffset as_gslsx(FloatRegister fd, Register rs, Register ri,
+ int16_t off);
+ BufferOffset as_gsssx(FloatRegister fd, Register rs, Register ri,
+ int16_t off);
+ BufferOffset as_gsldx(FloatRegister fd, Register rs, Register ri,
+ int16_t off);
+ BufferOffset as_gssdx(FloatRegister fd, Register rs, Register ri,
+ int16_t off);
+ BufferOffset as_gslq(FloatRegister rh, FloatRegister rl, Register rs,
+ int16_t off);
+ BufferOffset as_gssq(FloatRegister rh, FloatRegister rl, Register rs,
+ int16_t off);
+
+ BufferOffset as_movs(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_movd(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_ctc1(Register rt, FPControl fc);
+ BufferOffset as_cfc1(Register rt, FPControl fc);
+
+ BufferOffset as_mtc1(Register rt, FloatRegister fs);
+ BufferOffset as_mfc1(Register rt, FloatRegister fs);
+
+ BufferOffset as_mthc1(Register rt, FloatRegister fs);
+ BufferOffset as_mfhc1(Register rt, FloatRegister fs);
+ BufferOffset as_dmtc1(Register rt, FloatRegister fs);
+ BufferOffset as_dmfc1(Register rt, FloatRegister fs);
+
+ public:
+ // FP convert instructions
+ BufferOffset as_ceilws(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_floorws(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_roundws(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_truncws(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_truncls(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_ceilwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_floorwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_roundwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_truncwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_truncld(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_cvtdl(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtds(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtdw(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtld(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtls(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtsd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtsl(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtsw(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtwd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_cvtws(FloatRegister fd, FloatRegister fs);
+
+ // FP arithmetic instructions
+ BufferOffset as_adds(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_addd(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_subs(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_subd(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+
+ BufferOffset as_abss(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_absd(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_negs(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_negd(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_muls(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_muld(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_divs(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_divd(FloatRegister fd, FloatRegister fs, FloatRegister ft);
+ BufferOffset as_sqrts(FloatRegister fd, FloatRegister fs);
+ BufferOffset as_sqrtd(FloatRegister fd, FloatRegister fs);
+
+ BufferOffset as_max(FloatFormat fmt, FloatRegister fd, FloatRegister fs,
+ FloatRegister ft);
+ BufferOffset as_min(FloatFormat fmt, FloatRegister fd, FloatRegister fs,
+ FloatRegister ft);
+
+ // FP compare instructions
+ BufferOffset as_cf(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cun(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_ceq(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cueq(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_colt(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cult(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cole(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_cule(FloatFormat fmt, FloatRegister fs, FloatRegister ft,
+ FPConditionBit fcc = FCC0);
+
+ // FP conditional move.
+ BufferOffset as_movt(FloatFormat fmt, FloatRegister fd, FloatRegister fs,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_movf(FloatFormat fmt, FloatRegister fd, FloatRegister fs,
+ FPConditionBit fcc = FCC0);
+ BufferOffset as_movz(FloatFormat fmt, FloatRegister fd, FloatRegister fs,
+ Register rt);
+ BufferOffset as_movn(FloatFormat fmt, FloatRegister fd, FloatRegister fs,
+ Register rt);
+
+ // Conditional trap operations
+ BufferOffset as_tge(Register rs, Register rt, uint32_t code = 0);
+ BufferOffset as_tgeu(Register rs, Register rt, uint32_t code = 0);
+ BufferOffset as_tlt(Register rs, Register rt, uint32_t code = 0);
+ BufferOffset as_tltu(Register rs, Register rt, uint32_t code = 0);
+ BufferOffset as_teq(Register rs, Register rt, uint32_t code = 0);
+ BufferOffset as_tne(Register rs, Register rt, uint32_t code = 0);
+
+ // label operations
+ void bind(Label* label, BufferOffset boff = BufferOffset());
+ virtual void bind(InstImm* inst, uintptr_t branch, uintptr_t target) = 0;
+ void bind(CodeLabel* label) { label->target()->bind(currentOffset()); }
+ uint32_t currentOffset() { return nextOffset().getOffset(); }
+ void retarget(Label* label, Label* target);
+
+ void call(Label* label);
+ void call(void* target);
+
+ void as_break(uint32_t code);
+ void as_sync(uint32_t stype = 0);
+
+ public:
+ static bool SupportsFloatingPoint() {
+#if (defined(__mips_hard_float) && !defined(__mips_single_float)) || \
+ defined(JS_SIMULATOR_MIPS32) || defined(JS_SIMULATOR_MIPS64)
+ return true;
+#else
+ return false;
+#endif
+ }
+ static bool SupportsUnalignedAccesses() { return true; }
+ static bool SupportsFastUnalignedFPAccesses() { return false; }
+
+ static bool HasRoundInstruction(RoundingMode mode) { return false; }
+
+ protected:
+ InstImm invertBranch(InstImm branch, BOffImm16 skipOffset);
+ void addPendingJump(BufferOffset src, ImmPtr target, RelocationKind kind) {
+ enoughMemory_ &= jumps_.append(RelativePatch(src, target.value, kind));
+ if (kind == RelocationKind::JITCODE) {
+ jumpRelocations_.writeUnsigned(src.getOffset());
+ }
+ }
+
+ void addLongJump(BufferOffset src, BufferOffset dst) {
+ CodeLabel cl;
+ cl.patchAt()->bind(src.getOffset());
+ cl.target()->bind(dst.getOffset());
+ cl.setLinkMode(CodeLabel::JumpImmediate);
+ addCodeLabel(std::move(cl));
+ }
+
+ public:
+ void flushBuffer() {}
+
+ void comment(const char* msg) { spew("; %s", msg); }
+
+ static uint32_t NopSize() { return 4; }
+
+ static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
+
+ static uint32_t AlignDoubleArg(uint32_t offset) {
+ return (offset + 1U) & ~1U;
+ }
+
+ static uint8_t* NextInstruction(uint8_t* instruction,
+ uint32_t* count = nullptr);
+
+ static void ToggleToJmp(CodeLocationLabel inst_);
+ static void ToggleToCmp(CodeLocationLabel inst_);
+
+ static void UpdateLuiOriValue(Instruction* inst0, Instruction* inst1,
+ uint32_t value);
+
+ void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
+ const Disassembler::HeapAccess& heapAccess) {
+ // Implement this if we implement a disassembler.
+ }
+}; // AssemblerMIPSShared
+
+// sll zero, zero, 0
+const uint32_t NopInst = 0x00000000;
+
+// An Instruction is a structure for both encoding and decoding any and all
+// MIPS instructions.
+class Instruction {
+ protected:
+ uint32_t data;
+
+ // Standard constructor
+ Instruction(uint32_t data_) : data(data_) {}
+
+ // You should never create an instruction directly. You should create a
+ // more specific instruction which will eventually call one of these
+ // constructors for you.
+ public:
+ uint32_t encode() const { return data; }
+
+ void makeNop() { data = NopInst; }
+
+ void setData(uint32_t data) { this->data = data; }
+
+ const Instruction& operator=(const Instruction& src) {
+ data = src.data;
+ return *this;
+ }
+
+ // Extract the one particular bit.
+ uint32_t extractBit(uint32_t bit) { return (encode() >> bit) & 1; }
+ // Extract a bit field out of the instruction
+ uint32_t extractBitField(uint32_t hi, uint32_t lo) {
+ return (encode() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+ // Since all MIPS instructions have opcode, the opcode
+ // extractor resides in the base class.
+ uint32_t extractOpcode() {
+ return extractBitField(OpcodeShift + OpcodeBits - 1, OpcodeShift);
+ }
+ // Return the fields at their original place in the instruction encoding.
+ OpcodeField OpcodeFieldRaw() const {
+ return static_cast<OpcodeField>(encode() & OpcodeMask);
+ }
+
+ // Get the next instruction in the instruction stream.
+ // This does neat things like ignoreconstant pools and their guards.
+ Instruction* next();
+
+ // Sometimes, an api wants a uint32_t (or a pointer to it) rather than
+ // an instruction. raw() just coerces this into a pointer to a uint32_t
+ const uint32_t* raw() const { return &data; }
+ uint32_t size() const { return 4; }
+}; // Instruction
+
+// make sure that it is the right size
+static_assert(sizeof(Instruction) == 4,
+ "Size of Instruction class has to be 4 bytes.");
+
+class InstNOP : public Instruction {
+ public:
+ InstNOP() : Instruction(NopInst) {}
+};
+
+// Class for register type instructions.
+class InstReg : public Instruction {
+ public:
+ InstReg(OpcodeField op, Register rd, FunctionField ff)
+ : Instruction(op | RD(rd) | ff) {}
+ InstReg(OpcodeField op, Register rs, Register rt, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | ff) {}
+ InstReg(OpcodeField op, Register rs, Register rt, Register rd,
+ FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RD(rd) | ff) {}
+ InstReg(OpcodeField op, Register rs, Register rt, Register rd, uint32_t sa,
+ FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RD(rd) | SA(sa) | ff) {}
+ InstReg(OpcodeField op, RSField rs, Register rt, Register rd, uint32_t sa,
+ FunctionField ff)
+ : Instruction(op | rs | RT(rt) | RD(rd) | SA(sa) | ff) {}
+ InstReg(OpcodeField op, Register rs, RTField rt, Register rd, uint32_t sa,
+ FunctionField ff)
+ : Instruction(op | RS(rs) | rt | RD(rd) | SA(sa) | ff) {}
+ InstReg(OpcodeField op, Register rs, uint32_t cc, Register rd, uint32_t sa,
+ FunctionField ff)
+ : Instruction(op | RS(rs) | cc | RD(rd) | SA(sa) | ff) {}
+ InstReg(OpcodeField op, uint32_t code, FunctionField ff)
+ : Instruction(op | code | ff) {}
+ // for float point
+ InstReg(OpcodeField op, RSField rs, Register rt, uint32_t fs)
+ : Instruction(op | rs | RT(rt) | FS(fs)) {}
+ InstReg(OpcodeField op, RSField rs, Register rt, FloatRegister rd)
+ : Instruction(op | rs | RT(rt) | RD(rd)) {}
+ InstReg(OpcodeField op, RSField rs, Register rt, FloatRegister rd,
+ uint32_t sa, FunctionField ff)
+ : Instruction(op | rs | RT(rt) | RD(rd) | SA(sa) | ff) {}
+ InstReg(OpcodeField op, RSField rs, Register rt, FloatRegister fs,
+ FloatRegister fd, FunctionField ff)
+ : Instruction(op | rs | RT(rt) | RD(fs) | SA(fd) | ff) {}
+ InstReg(OpcodeField op, RSField rs, FloatRegister ft, FloatRegister fs,
+ FloatRegister fd, FunctionField ff)
+ : Instruction(op | rs | RT(ft) | RD(fs) | SA(fd) | ff) {}
+ InstReg(OpcodeField op, RSField rs, FloatRegister ft, FloatRegister fd,
+ uint32_t sa, FunctionField ff)
+ : Instruction(op | rs | RT(ft) | RD(fd) | SA(sa) | ff) {}
+
+ uint32_t extractRS() {
+ return extractBitField(RSShift + RSBits - 1, RSShift);
+ }
+ uint32_t extractRT() {
+ return extractBitField(RTShift + RTBits - 1, RTShift);
+ }
+ uint32_t extractRD() {
+ return extractBitField(RDShift + RDBits - 1, RDShift);
+ }
+ uint32_t extractSA() {
+ return extractBitField(SAShift + SABits - 1, SAShift);
+ }
+ uint32_t extractFunctionField() {
+ return extractBitField(FunctionShift + FunctionBits - 1, FunctionShift);
+ }
+};
+
+// Class for branch, load and store instructions with immediate offset.
+class InstImm : public Instruction {
+ public:
+ void extractImm16(BOffImm16* dest);
+
+ InstImm(OpcodeField op, Register rs, Register rt, BOffImm16 off)
+ : Instruction(op | RS(rs) | RT(rt) | off.encode()) {}
+ InstImm(OpcodeField op, Register rs, RTField rt, BOffImm16 off)
+ : Instruction(op | RS(rs) | rt | off.encode()) {}
+ InstImm(OpcodeField op, RSField rs, uint32_t cc, BOffImm16 off)
+ : Instruction(op | rs | cc | off.encode()) {}
+ InstImm(OpcodeField op, Register rs, Register rt, Imm16 off)
+ : Instruction(op | RS(rs) | RT(rt) | off.encode()) {}
+ InstImm(uint32_t raw) : Instruction(raw) {}
+ // For floating-point loads and stores.
+ InstImm(OpcodeField op, Register rs, FloatRegister rt, Imm16 off)
+ : Instruction(op | RS(rs) | RT(rt) | off.encode()) {}
+
+ uint32_t extractOpcode() {
+ return extractBitField(OpcodeShift + OpcodeBits - 1, OpcodeShift);
+ }
+ void setOpcode(OpcodeField op) { data = (data & ~OpcodeMask) | op; }
+ uint32_t extractRS() {
+ return extractBitField(RSShift + RSBits - 1, RSShift);
+ }
+ uint32_t extractRT() {
+ return extractBitField(RTShift + RTBits - 1, RTShift);
+ }
+ void setRT(RTField rt) { data = (data & ~RTMask) | rt; }
+ uint32_t extractImm16Value() {
+ return extractBitField(Imm16Shift + Imm16Bits - 1, Imm16Shift);
+ }
+ void setBOffImm16(BOffImm16 off) {
+ // Reset immediate field and replace it
+ data = (data & ~Imm16Mask) | off.encode();
+ }
+ void setImm16(Imm16 off) {
+ // Reset immediate field and replace it
+ data = (data & ~Imm16Mask) | off.encode();
+ }
+};
+
+// Class for Jump type instructions.
+class InstJump : public Instruction {
+ public:
+ InstJump(OpcodeField op, JOffImm26 off) : Instruction(op | off.encode()) {}
+
+ uint32_t extractImm26Value() {
+ return extractBitField(Imm26Shift + Imm26Bits - 1, Imm26Shift);
+ }
+};
+
+// Class for Loongson-specific instructions
+class InstGS : public Instruction {
+ public:
+ // For indexed loads and stores.
+ InstGS(OpcodeField op, Register rs, Register rt, Register rd, Imm8 off,
+ FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RD(rd) | off.encode(3) | ff) {}
+ InstGS(OpcodeField op, Register rs, FloatRegister rt, Register rd, Imm8 off,
+ FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RD(rd) | off.encode(3) | ff) {}
+ // For quad-word loads and stores.
+ InstGS(OpcodeField op, Register rs, Register rt, Register rz, GSImm13 off,
+ FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RZ(rz) | off.encode(6) | ff) {}
+ InstGS(OpcodeField op, Register rs, FloatRegister rt, FloatRegister rz,
+ GSImm13 off, FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | RZ(rz) | off.encode(6) | ff) {}
+ InstGS(uint32_t raw) : Instruction(raw) {}
+ // For floating-point unaligned loads and stores.
+ InstGS(OpcodeField op, Register rs, FloatRegister rt, Imm8 off,
+ FunctionField ff)
+ : Instruction(op | RS(rs) | RT(rt) | off.encode(6) | ff) {}
+};
+
+inline bool IsUnaligned(const wasm::MemoryAccessDesc& access) {
+ if (!access.align()) {
+ return false;
+ }
+
+#ifdef JS_CODEGEN_MIPS32
+ if (access.type() == Scalar::Int64 && access.align() >= 4) {
+ return false;
+ }
+#endif
+
+ return access.align() < access.byteSize();
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_Assembler_mips_shared_h */
diff --git a/js/src/jit/mips-shared/AtomicOperations-mips-shared.h b/js/src/jit/mips-shared/AtomicOperations-mips-shared.h
new file mode 100644
index 0000000000..5ef11fd8c2
--- /dev/null
+++ b/js/src/jit/mips-shared/AtomicOperations-mips-shared.h
@@ -0,0 +1,521 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* For documentation, see jit/AtomicOperations.h */
+
+// NOTE, MIPS32 unlike MIPS64 doesn't provide hardware support for lock-free
+// 64-bit atomics. We lie down below about 8-byte atomics being always lock-
+// free in order to support wasm jit. The 64-bit atomic for MIPS32 do not use
+// __atomic intrinsic and therefore do not relay on -latomic.
+// Access to a aspecific 64-bit variable in memory is protected by an
+// AddressLock whose instance is shared between jit and AtomicOperations.
+
+#ifndef jit_mips_shared_AtomicOperations_mips_shared_h
+#define jit_mips_shared_AtomicOperations_mips_shared_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Types.h"
+
+#include "builtin/AtomicsObject.h"
+#include "vm/Uint8Clamped.h"
+
+#if !defined(__clang__) && !defined(__GNUC__)
+# error "This file only for gcc-compatible compilers"
+#endif
+
+#if defined(JS_SIMULATOR_MIPS32) && !defined(__i386__)
+# error "The MIPS32 simulator atomics assume x86"
+#endif
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+struct AddressLock {
+ public:
+ void acquire();
+ void release();
+
+ private:
+ uint32_t spinlock;
+};
+
+static_assert(sizeof(AddressLock) == sizeof(uint32_t),
+ "AddressLock must be 4 bytes for it to be consumed by jit");
+
+// For now use a single global AddressLock.
+static AddressLock gAtomic64Lock;
+
+struct MOZ_RAII AddressGuard {
+ explicit AddressGuard(void* addr) { gAtomic64Lock.acquire(); }
+
+ ~AddressGuard() { gAtomic64Lock.release(); }
+};
+
+#endif
+
+} // namespace jit
+} // namespace js
+
+inline bool js::jit::AtomicOperations::hasAtomic8() { return true; }
+
+inline bool js::jit::AtomicOperations::isLockfree8() {
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
+#if defined(JS_64BIT)
+ MOZ_ASSERT(__atomic_always_lock_free(sizeof(int64_t), 0));
+#endif
+ return true;
+}
+
+inline void js::jit::AtomicOperations::fenceSeqCst() {
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+}
+
+template <typename T>
+inline T js::jit::AtomicOperations::loadSeqCst(T* addr) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ T v;
+ __atomic_load(addr, &v, __ATOMIC_SEQ_CST);
+ return v;
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline int64_t js::jit::AtomicOperations::loadSeqCst(int64_t* addr) {
+ AddressGuard guard(addr);
+ return *addr;
+}
+
+template <>
+inline uint64_t js::jit::AtomicOperations::loadSeqCst(uint64_t* addr) {
+ AddressGuard guard(addr);
+ return *addr;
+}
+
+#endif
+
+} // namespace jit
+} // namespace js
+
+template <typename T>
+inline void js::jit::AtomicOperations::storeSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ __atomic_store(addr, &val, __ATOMIC_SEQ_CST);
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline void js::jit::AtomicOperations::storeSeqCst(int64_t* addr, int64_t val) {
+ AddressGuard guard(addr);
+ *addr = val;
+}
+
+template <>
+inline void js::jit::AtomicOperations::storeSeqCst(uint64_t* addr,
+ uint64_t val) {
+ AddressGuard guard(addr);
+ *addr = val;
+}
+
+#endif
+
+} // namespace jit
+} // namespace js
+
+template <typename T>
+inline T js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval,
+ T newval) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST,
+ __ATOMIC_SEQ_CST);
+ return oldval;
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline int64_t js::jit::AtomicOperations::compareExchangeSeqCst(
+ int64_t* addr, int64_t oldval, int64_t newval) {
+ AddressGuard guard(addr);
+ int64_t val = *addr;
+ if (val == oldval) {
+ *addr = newval;
+ }
+ return val;
+}
+
+template <>
+inline uint64_t js::jit::AtomicOperations::compareExchangeSeqCst(
+ uint64_t* addr, uint64_t oldval, uint64_t newval) {
+ AddressGuard guard(addr);
+ uint64_t val = *addr;
+ if (val == oldval) {
+ *addr = newval;
+ }
+ return val;
+}
+
+#endif
+
+} // namespace jit
+} // namespace js
+
+template <typename T>
+inline T js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline int64_t js::jit::AtomicOperations::fetchAddSeqCst(int64_t* addr,
+ int64_t val) {
+ AddressGuard guard(addr);
+ int64_t old = *addr;
+ *addr = old + val;
+ return old;
+}
+
+template <>
+inline uint64_t js::jit::AtomicOperations::fetchAddSeqCst(uint64_t* addr,
+ uint64_t val) {
+ AddressGuard guard(addr);
+ uint64_t old = *addr;
+ *addr = old + val;
+ return old;
+}
+
+#endif
+
+} // namespace jit
+} // namespace js
+
+template <typename T>
+inline T js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline int64_t js::jit::AtomicOperations::fetchSubSeqCst(int64_t* addr,
+ int64_t val) {
+ AddressGuard guard(addr);
+ int64_t old = *addr;
+ *addr = old - val;
+ return old;
+}
+
+template <>
+inline uint64_t js::jit::AtomicOperations::fetchSubSeqCst(uint64_t* addr,
+ uint64_t val) {
+ AddressGuard guard(addr);
+ uint64_t old = *addr;
+ *addr = old - val;
+ return old;
+}
+
+#endif
+
+} // namespace jit
+} // namespace js
+
+template <typename T>
+inline T js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline int64_t js::jit::AtomicOperations::fetchAndSeqCst(int64_t* addr,
+ int64_t val) {
+ AddressGuard guard(addr);
+ int64_t old = *addr;
+ *addr = old & val;
+ return old;
+}
+
+template <>
+inline uint64_t js::jit::AtomicOperations::fetchAndSeqCst(uint64_t* addr,
+ uint64_t val) {
+ AddressGuard guard(addr);
+ uint64_t old = *addr;
+ *addr = old & val;
+ return old;
+}
+
+#endif
+
+} // namespace jit
+} // namespace js
+
+template <typename T>
+inline T js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline int64_t js::jit::AtomicOperations::fetchOrSeqCst(int64_t* addr,
+ int64_t val) {
+ AddressGuard guard(addr);
+ int64_t old = *addr;
+ *addr = old | val;
+ return old;
+}
+
+template <>
+inline uint64_t js::jit::AtomicOperations::fetchOrSeqCst(uint64_t* addr,
+ uint64_t val) {
+ AddressGuard guard(addr);
+ uint64_t old = *addr;
+ *addr = old | val;
+ return old;
+}
+
+#endif
+
+} // namespace jit
+} // namespace js
+
+template <typename T>
+inline T js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline int64_t js::jit::AtomicOperations::fetchXorSeqCst(int64_t* addr,
+ int64_t val) {
+ AddressGuard guard(addr);
+ int64_t old = *addr;
+ *addr = old ^ val;
+ return old;
+}
+
+template <>
+inline uint64_t js::jit::AtomicOperations::fetchXorSeqCst(uint64_t* addr,
+ uint64_t val) {
+ AddressGuard guard(addr);
+ uint64_t old = *addr;
+ *addr = old ^ val;
+ return old;
+}
+
+#endif
+
+} // namespace jit
+} // namespace js
+
+template <typename T>
+inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ T v;
+ __atomic_load(addr, &v, __ATOMIC_RELAXED);
+ return v;
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline int64_t js::jit::AtomicOperations::loadSafeWhenRacy(int64_t* addr) {
+ return *addr;
+}
+
+template <>
+inline uint64_t js::jit::AtomicOperations::loadSafeWhenRacy(uint64_t* addr) {
+ return *addr;
+}
+
+#endif
+
+template <>
+inline uint8_clamped js::jit::AtomicOperations::loadSafeWhenRacy(
+ uint8_clamped* addr) {
+ uint8_t v;
+ __atomic_load(&addr->val, &v, __ATOMIC_RELAXED);
+ return uint8_clamped(v);
+}
+
+template <>
+inline float js::jit::AtomicOperations::loadSafeWhenRacy(float* addr) {
+ return *addr;
+}
+
+template <>
+inline double js::jit::AtomicOperations::loadSafeWhenRacy(double* addr) {
+ return *addr;
+}
+
+} // namespace jit
+} // namespace js
+
+template <typename T>
+inline void js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ __atomic_store(addr, &val, __ATOMIC_RELAXED);
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline void js::jit::AtomicOperations::storeSafeWhenRacy(int64_t* addr,
+ int64_t val) {
+ *addr = val;
+}
+
+template <>
+inline void js::jit::AtomicOperations::storeSafeWhenRacy(uint64_t* addr,
+ uint64_t val) {
+ *addr = val;
+}
+
+#endif
+
+template <>
+inline void js::jit::AtomicOperations::storeSafeWhenRacy(uint8_clamped* addr,
+ uint8_clamped val) {
+ __atomic_store(&addr->val, &val.val, __ATOMIC_RELAXED);
+}
+
+template <>
+inline void js::jit::AtomicOperations::storeSafeWhenRacy(float* addr,
+ float val) {
+ *addr = val;
+}
+
+template <>
+inline void js::jit::AtomicOperations::storeSafeWhenRacy(double* addr,
+ double val) {
+ *addr = val;
+}
+
+} // namespace jit
+} // namespace js
+
+inline void js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest,
+ const void* src,
+ size_t nbytes) {
+ MOZ_ASSERT(!((char*)dest <= (char*)src && (char*)src < (char*)dest + nbytes));
+ MOZ_ASSERT(!((char*)src <= (char*)dest && (char*)dest < (char*)src + nbytes));
+ ::memcpy(dest, src, nbytes);
+}
+
+inline void js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest,
+ const void* src,
+ size_t nbytes) {
+ ::memmove(dest, src, nbytes);
+}
+
+template <typename T>
+inline T js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val) {
+ static_assert(sizeof(T) <= sizeof(void*),
+ "atomics supported up to pointer size only");
+ T v;
+ __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
+ return v;
+}
+
+namespace js {
+namespace jit {
+
+#if !defined(JS_64BIT)
+
+template <>
+inline int64_t js::jit::AtomicOperations::exchangeSeqCst(int64_t* addr,
+ int64_t val) {
+ AddressGuard guard(addr);
+ int64_t old = *addr;
+ *addr = val;
+ return old;
+}
+
+template <>
+inline uint64_t js::jit::AtomicOperations::exchangeSeqCst(uint64_t* addr,
+ uint64_t val) {
+ AddressGuard guard(addr);
+ uint64_t old = *addr;
+ *addr = val;
+ return old;
+}
+
+#endif
+
+} // namespace jit
+} // namespace js
+
+#if !defined(JS_64BIT)
+
+inline void js::jit::AddressLock::acquire() {
+ uint32_t zero = 0;
+ uint32_t one = 1;
+ while (!__atomic_compare_exchange(&spinlock, &zero, &one, true,
+ __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
+ zero = 0;
+ }
+}
+
+inline void js::jit::AddressLock::release() {
+ uint32_t zero = 0;
+ __atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
+}
+
+#endif
+
+#endif // jit_mips_shared_AtomicOperations_mips_shared_h
diff --git a/js/src/jit/mips-shared/BaselineIC-mips-shared.cpp b/js/src/jit/mips-shared/BaselineIC-mips-shared.cpp
new file mode 100644
index 0000000000..6e21edc0ba
--- /dev/null
+++ b/js/src/jit/mips-shared/BaselineIC-mips-shared.cpp
@@ -0,0 +1,37 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/BaselineIC.h"
+#include "jit/SharedICHelpers.h"
+
+using namespace js;
+using namespace js::jit;
+
+namespace js {
+namespace jit {
+
+bool ICCompare_Double::Compiler::generateStubCode(MacroAssembler& masm) {
+ Label failure, isNaN;
+ masm.ensureDouble(R0, FloatReg0, &failure);
+ masm.ensureDouble(R1, FloatReg1, &failure);
+
+ Register dest = R0.scratchReg();
+
+ Assembler::DoubleCondition doubleCond = JSOpToDoubleCondition(op);
+
+ masm.ma_cmp_set_double(dest, FloatReg0, FloatReg1, doubleCond);
+
+ masm.tagValue(JSVAL_TYPE_BOOLEAN, dest, R0);
+ EmitReturnFromIC(masm);
+
+ // Failure case - jump to next stub
+ masm.bind(&failure);
+ EmitStubGuardFailure(masm);
+ return true;
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
new file mode 100644
index 0000000000..424ddab061
--- /dev/null
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -0,0 +1,2453 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/CodeGenerator-mips-shared.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jsnum.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/InlineScriptTree.h"
+#include "jit/JitRuntime.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "js/Conversions.h"
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+#include "vm/Shape.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "jit/shared/CodeGenerator-shared-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using JS::GenericNaN;
+using JS::ToInt32;
+using mozilla::DebugOnly;
+using mozilla::FloorLog2;
+using mozilla::NegativeInfinity;
+
+// shared
+CodeGeneratorMIPSShared::CodeGeneratorMIPSShared(MIRGenerator* gen,
+ LIRGraph* graph,
+ MacroAssembler* masm)
+ : CodeGeneratorShared(gen, graph, masm) {}
+
+Operand CodeGeneratorMIPSShared::ToOperand(const LAllocation& a) {
+ if (a.isGeneralReg()) {
+ return Operand(a.toGeneralReg()->reg());
+ }
+ if (a.isFloatReg()) {
+ return Operand(a.toFloatReg()->reg());
+ }
+ return Operand(ToAddress(a));
+}
+
+Operand CodeGeneratorMIPSShared::ToOperand(const LAllocation* a) {
+ return ToOperand(*a);
+}
+
+Operand CodeGeneratorMIPSShared::ToOperand(const LDefinition* def) {
+ return ToOperand(def->output());
+}
+
+#ifdef JS_PUNBOX64
+Operand CodeGeneratorMIPSShared::ToOperandOrRegister64(
+ const LInt64Allocation input) {
+ return ToOperand(input.value());
+}
+#else
+Register64 CodeGeneratorMIPSShared::ToOperandOrRegister64(
+ const LInt64Allocation input) {
+ return ToRegister64(input);
+}
+#endif
+
+void CodeGeneratorMIPSShared::branchToBlock(Assembler::FloatFormat fmt,
+ FloatRegister lhs,
+ FloatRegister rhs, MBasicBlock* mir,
+ Assembler::DoubleCondition cond) {
+ // Skip past trivial blocks.
+ Label* label = skipTrivialBlocks(mir)->lir()->label();
+ if (fmt == Assembler::DoubleFloat) {
+ masm.branchDouble(cond, lhs, rhs, label);
+ } else {
+ masm.branchFloat(cond, lhs, rhs, label);
+ }
+}
+
+void OutOfLineBailout::accept(CodeGeneratorMIPSShared* codegen) {
+ codegen->visitOutOfLineBailout(this);
+}
+
+void CodeGenerator::visitTestIAndBranch(LTestIAndBranch* test) {
+ const LAllocation* opd = test->getOperand(0);
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ emitBranch(ToRegister(opd), Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
+}
+
+void CodeGenerator::visitCompare(LCompare* comp) {
+ MCompare* mir = comp->mir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
+ const LAllocation* left = comp->getOperand(0);
+ const LAllocation* right = comp->getOperand(1);
+ const LDefinition* def = comp->getDef(0);
+
+#ifdef JS_CODEGEN_MIPS64
+ if (mir->compareType() == MCompare::Compare_Object ||
+ mir->compareType() == MCompare::Compare_Symbol ||
+ mir->compareType() == MCompare::Compare_UIntPtr ||
+ mir->compareType() == MCompare::Compare_WasmAnyRef) {
+ if (right->isConstant()) {
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_UIntPtr);
+ masm.cmpPtrSet(cond, ToRegister(left), Imm32(ToInt32(right)),
+ ToRegister(def));
+ } else if (right->isGeneralReg()) {
+ masm.cmpPtrSet(cond, ToRegister(left), ToRegister(right),
+ ToRegister(def));
+ } else {
+ masm.cmpPtrSet(cond, ToRegister(left), ToAddress(right), ToRegister(def));
+ }
+ return;
+ }
+#endif
+
+ if (right->isConstant()) {
+ masm.cmp32Set(cond, ToRegister(left), Imm32(ToInt32(right)),
+ ToRegister(def));
+ } else if (right->isGeneralReg()) {
+ masm.cmp32Set(cond, ToRegister(left), ToRegister(right), ToRegister(def));
+ } else {
+ masm.cmp32Set(cond, ToRegister(left), ToAddress(right), ToRegister(def));
+ }
+}
+
+void CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp) {
+ MCompare* mir = comp->cmpMir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
+
+#ifdef JS_CODEGEN_MIPS64
+ if (mir->compareType() == MCompare::Compare_Object ||
+ mir->compareType() == MCompare::Compare_Symbol ||
+ mir->compareType() == MCompare::Compare_UIntPtr ||
+ mir->compareType() == MCompare::Compare_WasmAnyRef) {
+ if (comp->right()->isConstant()) {
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_UIntPtr);
+ emitBranch(ToRegister(comp->left()), Imm32(ToInt32(comp->right())), cond,
+ comp->ifTrue(), comp->ifFalse());
+ } else if (comp->right()->isGeneralReg()) {
+ emitBranch(ToRegister(comp->left()), ToRegister(comp->right()), cond,
+ comp->ifTrue(), comp->ifFalse());
+ } else {
+ masm.loadPtr(ToAddress(comp->right()), ScratchRegister);
+ emitBranch(ToRegister(comp->left()), ScratchRegister, cond,
+ comp->ifTrue(), comp->ifFalse());
+ }
+ return;
+ }
+#endif
+
+ if (comp->right()->isConstant()) {
+ emitBranch(ToRegister(comp->left()), Imm32(ToInt32(comp->right())), cond,
+ comp->ifTrue(), comp->ifFalse());
+ } else if (comp->right()->isGeneralReg()) {
+ emitBranch(ToRegister(comp->left()), ToRegister(comp->right()), cond,
+ comp->ifTrue(), comp->ifFalse());
+ } else {
+ masm.load32(ToAddress(comp->right()), ScratchRegister);
+ emitBranch(ToRegister(comp->left()), ScratchRegister, cond, comp->ifTrue(),
+ comp->ifFalse());
+ }
+}
+
+bool CodeGeneratorMIPSShared::generateOutOfLineCode() {
+ if (!CodeGeneratorShared::generateOutOfLineCode()) {
+ return false;
+ }
+
+ if (deoptLabel_.used()) {
+ // All non-table-based bailouts will go here.
+ masm.bind(&deoptLabel_);
+
+ // Push the frame size, so the handler can recover the IonScript.
+ // Frame size is stored in 'ra' and pushed by GenerateBailoutThunk
+ // We have to use 'ra' because generateBailoutTable will implicitly do
+ // the same.
+ masm.move32(Imm32(frameSize()), ra);
+
+ TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
+ masm.jump(handler);
+ }
+
+ return !masm.oom();
+}
+
+void CodeGeneratorMIPSShared::bailoutFrom(Label* label, LSnapshot* snapshot) {
+ MOZ_ASSERT_IF(!masm.oom(), label->used());
+ MOZ_ASSERT_IF(!masm.oom(), !label->bound());
+
+ encode(snapshot);
+
+ InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
+ OutOfLineBailout* ool =
+ new (alloc()) OutOfLineBailout(snapshot, masm.framePushed());
+ addOutOfLineCode(ool,
+ new (alloc()) BytecodeSite(tree, tree->script()->code()));
+
+ masm.retarget(label, ool->entry());
+}
+
+void CodeGeneratorMIPSShared::bailout(LSnapshot* snapshot) {
+ Label label;
+ masm.jump(&label);
+ bailoutFrom(&label, snapshot);
+}
+
+void CodeGenerator::visitMinMaxD(LMinMaxD* ins) {
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+
+ MOZ_ASSERT(first == ToFloatRegister(ins->output()));
+
+ if (ins->mir()->isMax()) {
+ masm.maxDouble(second, first, true);
+ } else {
+ masm.minDouble(second, first, true);
+ }
+}
+
+void CodeGenerator::visitMinMaxF(LMinMaxF* ins) {
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+
+ MOZ_ASSERT(first == ToFloatRegister(ins->output()));
+
+ if (ins->mir()->isMax()) {
+ masm.maxFloat32(second, first, true);
+ } else {
+ masm.minFloat32(second, first, true);
+ }
+}
+
+void CodeGenerator::visitAddI(LAddI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
+
+ // If there is no snapshot, we don't need to check for overflow
+ if (!ins->snapshot()) {
+ if (rhs->isConstant()) {
+ masm.ma_addu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.as_addu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ }
+ return;
+ }
+
+ Label overflow;
+ if (rhs->isConstant()) {
+ masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ Imm32(ToInt32(rhs)), &overflow);
+ } else {
+ masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ ToRegister(rhs), &overflow);
+ }
+
+ bailoutFrom(&overflow, ins->snapshot());
+}
+
+void CodeGenerator::visitAddI64(LAddI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LAddI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LAddI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.add64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void CodeGenerator::visitSubI(LSubI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
+
+ // If there is no snapshot, we don't need to check for overflow
+ if (!ins->snapshot()) {
+ if (rhs->isConstant()) {
+ masm.ma_subu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.as_subu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ }
+ return;
+ }
+
+ Label overflow;
+ if (rhs->isConstant()) {
+ masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ Imm32(ToInt32(rhs)), &overflow);
+ } else {
+ masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ ToRegister(rhs), &overflow);
+ }
+
+ bailoutFrom(&overflow, ins->snapshot());
+}
+
+void CodeGenerator::visitSubI64(LSubI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LSubI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LSubI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.sub64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.sub64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void CodeGenerator::visitMulI(LMulI* ins) {
+ const LAllocation* lhs = ins->lhs();
+ const LAllocation* rhs = ins->rhs();
+ Register dest = ToRegister(ins->output());
+ MMul* mul = ins->mir();
+
+ MOZ_ASSERT_IF(mul->mode() == MMul::Integer,
+ !mul->canBeNegativeZero() && !mul->canOverflow());
+
+ if (rhs->isConstant()) {
+ int32_t constant = ToInt32(rhs);
+ Register src = ToRegister(lhs);
+
+ // Bailout on -0.0
+ if (mul->canBeNegativeZero() && constant <= 0) {
+ Assembler::Condition cond =
+ (constant == 0) ? Assembler::LessThan : Assembler::Equal;
+ bailoutCmp32(cond, src, Imm32(0), ins->snapshot());
+ }
+
+ switch (constant) {
+ case -1:
+ if (mul->canOverflow()) {
+ bailoutCmp32(Assembler::Equal, src, Imm32(INT32_MIN),
+ ins->snapshot());
+ }
+
+ masm.ma_negu(dest, src);
+ break;
+ case 0:
+ masm.move32(Imm32(0), dest);
+ break;
+ case 1:
+ masm.move32(src, dest);
+ break;
+ case 2:
+ if (mul->canOverflow()) {
+ Label mulTwoOverflow;
+ masm.ma_add32TestOverflow(dest, src, src, &mulTwoOverflow);
+
+ bailoutFrom(&mulTwoOverflow, ins->snapshot());
+ } else {
+ masm.as_addu(dest, src, src);
+ }
+ break;
+ default:
+ uint32_t shift = FloorLog2(constant);
+
+ if (!mul->canOverflow() && (constant > 0)) {
+ // If it cannot overflow, we can do lots of optimizations.
+ uint32_t rest = constant - (1 << shift);
+
+ // See if the constant has one bit set, meaning it can be
+ // encoded as a bitshift.
+ if ((1 << shift) == constant) {
+ masm.ma_sll(dest, src, Imm32(shift));
+ return;
+ }
+
+ // If the constant cannot be encoded as (1<<C1), see if it can
+ // be encoded as (1<<C1) | (1<<C2), which can be computed
+ // using an add and a shift.
+ uint32_t shift_rest = FloorLog2(rest);
+ if (src != dest && (1u << shift_rest) == rest) {
+ masm.ma_sll(dest, src, Imm32(shift - shift_rest));
+ masm.add32(src, dest);
+ if (shift_rest != 0) {
+ masm.ma_sll(dest, dest, Imm32(shift_rest));
+ }
+ return;
+ }
+ }
+
+ if (mul->canOverflow() && (constant > 0) && (src != dest)) {
+ // To stay on the safe side, only optimize things that are a
+ // power of 2.
+
+ if ((1 << shift) == constant) {
+ // dest = lhs * pow(2, shift)
+ masm.ma_sll(dest, src, Imm32(shift));
+ // At runtime, check (lhs == dest >> shift), if this does
+ // not hold, some bits were lost due to overflow, and the
+ // computation should be resumed as a double.
+ masm.ma_sra(ScratchRegister, dest, Imm32(shift));
+ bailoutCmp32(Assembler::NotEqual, src, ScratchRegister,
+ ins->snapshot());
+ return;
+ }
+ }
+
+ if (mul->canOverflow()) {
+ Label mulConstOverflow;
+ masm.ma_mul32TestOverflow(dest, ToRegister(lhs), Imm32(ToInt32(rhs)),
+ &mulConstOverflow);
+
+ bailoutFrom(&mulConstOverflow, ins->snapshot());
+ } else {
+ masm.ma_mul(dest, src, Imm32(ToInt32(rhs)));
+ }
+ break;
+ }
+ } else {
+ Label multRegOverflow;
+
+ if (mul->canOverflow()) {
+ masm.ma_mul32TestOverflow(dest, ToRegister(lhs), ToRegister(rhs),
+ &multRegOverflow);
+ bailoutFrom(&multRegOverflow, ins->snapshot());
+ } else {
+ masm.as_mul(dest, ToRegister(lhs), ToRegister(rhs));
+ }
+
+ if (mul->canBeNegativeZero()) {
+ Label done;
+ masm.ma_b(dest, dest, &done, Assembler::NonZero, ShortJump);
+
+ // Result is -0 if lhs or rhs is negative.
+ // In that case result must be double value so bailout
+ Register scratch = SecondScratchReg;
+ masm.as_or(scratch, ToRegister(lhs), ToRegister(rhs));
+ bailoutCmp32(Assembler::Signed, scratch, scratch, ins->snapshot());
+
+ masm.bind(&done);
+ }
+ }
+}
+
+void CodeGenerator::visitMulI64(LMulI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LMulI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LMulI64::Rhs);
+ const Register64 output = ToOutRegister64(lir);
+ MOZ_ASSERT(ToRegister64(lhs) == output);
+
+ if (IsConstant(rhs)) {
+ int64_t constant = ToInt64(rhs);
+ switch (constant) {
+ case -1:
+ masm.neg64(ToRegister64(lhs));
+ return;
+ case 0:
+ masm.xor64(ToRegister64(lhs), ToRegister64(lhs));
+ return;
+ case 1:
+ // nop
+ return;
+ case 2:
+ masm.add64(ToRegister64(lhs), ToRegister64(lhs));
+ return;
+ default:
+ if (constant > 0) {
+ if (mozilla::IsPowerOfTwo(static_cast<uint64_t>(constant + 1))) {
+ ScratchRegisterScope scratch(masm);
+ Register64 scratch64(scratch);
+ masm.move64(ToRegister64(lhs), scratch64);
+ masm.lshift64(Imm32(FloorLog2(constant + 1)), output);
+ masm.sub64(scratch64, output);
+ return;
+ } else if (mozilla::IsPowerOfTwo(
+ static_cast<uint64_t>(constant - 1))) {
+ ScratchRegisterScope scratch(masm);
+ Register64 scratch64(scratch);
+ masm.move64(ToRegister64(lhs), scratch64);
+ masm.lshift64(Imm32(FloorLog2(constant - 1u)), output);
+ masm.add64(scratch64, output);
+ return;
+ }
+ // Use shift if constant is power of 2.
+ int32_t shift = mozilla::FloorLog2(constant);
+ if (int64_t(1) << shift == constant) {
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ return;
+ }
+ }
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(Imm64(constant), ToRegister64(lhs), temp);
+ }
+ } else {
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(ToOperandOrRegister64(rhs), ToRegister64(lhs), temp);
+ }
+}
+
+void CodeGenerator::visitDivI(LDivI* ins) {
+ // Extract the registers from this instruction
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register dest = ToRegister(ins->output());
+ Register temp = ToRegister(ins->getTemp(0));
+ MDiv* mir = ins->mir();
+
+ Label done;
+
+ // Handle divide by zero.
+ if (mir->canBeDivideByZero()) {
+ if (mir->trapOnError()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else if (mir->canTruncateInfinities()) {
+ // Truncated division by zero is zero (Infinity|0 == 0)
+ Label notzero;
+ masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&notzero);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Zero, rhs, rhs, ins->snapshot());
+ }
+ }
+
+ // Handle an integer overflow exception from -2147483648 / -1.
+ if (mir->canBeNegativeOverflow()) {
+ Label notMinInt;
+ masm.move32(Imm32(INT32_MIN), temp);
+ masm.ma_b(lhs, temp, &notMinInt, Assembler::NotEqual, ShortJump);
+
+ masm.move32(Imm32(-1), temp);
+ if (mir->trapOnError()) {
+ Label ok;
+ masm.ma_b(rhs, temp, &ok, Assembler::NotEqual);
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->bytecodeOffset());
+ masm.bind(&ok);
+ } else if (mir->canTruncateOverflow()) {
+ // (-INT32_MIN)|0 == INT32_MIN
+ Label skip;
+ masm.ma_b(rhs, temp, &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(INT32_MIN), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, temp, ins->snapshot());
+ }
+ masm.bind(&notMinInt);
+ }
+
+ // Handle negative 0. (0/-Y)
+ if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
+ Label nonzero;
+ masm.ma_b(lhs, lhs, &nonzero, Assembler::NonZero, ShortJump);
+ bailoutCmp32(Assembler::LessThan, rhs, Imm32(0), ins->snapshot());
+ masm.bind(&nonzero);
+ }
+ // Note: above safety checks could not be verified as Ion seems to be
+ // smarter and requires double arithmetic in such cases.
+
+ // All regular. Lets call div.
+ if (mir->canTruncateRemainder()) {
+#ifdef MIPSR6
+ masm.as_div(dest, lhs, rhs);
+#else
+ masm.as_div(lhs, rhs);
+ masm.as_mflo(dest);
+#endif
+ } else {
+ MOZ_ASSERT(mir->fallible());
+
+ Label remainderNonZero;
+ masm.ma_div_branch_overflow(dest, lhs, rhs, &remainderNonZero);
+ bailoutFrom(&remainderNonZero, ins->snapshot());
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) {
+ Register lhs = ToRegister(ins->numerator());
+ Register dest = ToRegister(ins->output());
+ Register tmp = ToRegister(ins->getTemp(0));
+ int32_t shift = ins->shift();
+
+ if (shift != 0) {
+ MDiv* mir = ins->mir();
+ if (!mir->isTruncated()) {
+ // If the remainder is going to be != 0, bailout since this must
+ // be a double.
+ masm.ma_sll(tmp, lhs, Imm32(32 - shift));
+ bailoutCmp32(Assembler::NonZero, tmp, tmp, ins->snapshot());
+ }
+
+ if (!mir->canBeNegativeDividend()) {
+ // Numerator is unsigned, so needs no adjusting. Do the shift.
+ masm.ma_sra(dest, lhs, Imm32(shift));
+ return;
+ }
+
+ // Adjust the value so that shifting produces a correctly rounded result
+ // when the numerator is negative. See 10-1 "Signed Division by a Known
+ // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
+ if (shift > 1) {
+ masm.ma_sra(tmp, lhs, Imm32(31));
+ masm.ma_srl(tmp, tmp, Imm32(32 - shift));
+ masm.add32(lhs, tmp);
+ } else {
+ masm.ma_srl(tmp, lhs, Imm32(32 - shift));
+ masm.add32(lhs, tmp);
+ }
+
+ // Do the shift.
+ masm.ma_sra(dest, tmp, Imm32(shift));
+ } else {
+ masm.move32(lhs, dest);
+ }
+}
+
+void CodeGenerator::visitModI(LModI* ins) {
+ // Extract the registers from this instruction
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register dest = ToRegister(ins->output());
+ Register callTemp = ToRegister(ins->callTemp());
+ MMod* mir = ins->mir();
+ Label done, prevent;
+
+ masm.move32(lhs, callTemp);
+
+ // Prevent INT_MIN % -1;
+ // The integer division will give INT_MIN, but we want -(double)INT_MIN.
+ if (mir->canBeNegativeDividend()) {
+ masm.ma_b(lhs, Imm32(INT_MIN), &prevent, Assembler::NotEqual, ShortJump);
+ if (mir->isTruncated()) {
+ // (INT_MIN % -1)|0 == 0
+ Label skip;
+ masm.ma_b(rhs, Imm32(-1), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(-1), ins->snapshot());
+ }
+ masm.bind(&prevent);
+ }
+
+ // 0/X (with X < 0) is bad because both of these values *should* be
+ // doubles, and the result should be -0.0, which cannot be represented in
+ // integers. X/0 is bad because it will give garbage (or abort), when it
+ // should give either \infty, -\infty or NAN.
+
+ // Prevent 0 / X (with X < 0) and X / 0
+ // testing X / Y. Compare Y with 0.
+ // There are three cases: (Y < 0), (Y == 0) and (Y > 0)
+ // If (Y < 0), then we compare X with 0, and bail if X == 0
+ // If (Y == 0), then we simply want to bail.
+ // if (Y > 0), we don't bail.
+
+ if (mir->canBeDivideByZero()) {
+ if (mir->isTruncated()) {
+ if (mir->trapOnError()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else {
+ Label skip;
+ masm.ma_b(rhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ }
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
+ }
+ }
+
+ if (mir->canBeNegativeDividend()) {
+ Label notNegative;
+ masm.ma_b(rhs, Imm32(0), &notNegative, Assembler::GreaterThan, ShortJump);
+ if (mir->isTruncated()) {
+ // NaN|0 == 0 and (0 % -X)|0 == 0
+ Label skip;
+ masm.ma_b(lhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, lhs, Imm32(0), ins->snapshot());
+ }
+ masm.bind(&notNegative);
+ }
+#ifdef MIPSR6
+ masm.as_mod(dest, lhs, rhs);
+#else
+ masm.as_div(lhs, rhs);
+ masm.as_mfhi(dest);
+#endif
+
+ // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
+ if (mir->canBeNegativeDividend()) {
+ if (mir->isTruncated()) {
+ // -0.0|0 == 0
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ // See if X < 0
+ masm.ma_b(dest, Imm32(0), &done, Assembler::NotEqual, ShortJump);
+ bailoutCmp32(Assembler::Signed, callTemp, Imm32(0), ins->snapshot());
+ }
+ }
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) {
+ Register in = ToRegister(ins->getOperand(0));
+ Register out = ToRegister(ins->getDef(0));
+ MMod* mir = ins->mir();
+ Label negative, done;
+
+ masm.move32(in, out);
+ masm.ma_b(in, in, &done, Assembler::Zero, ShortJump);
+ // Switch based on sign of the lhs.
+ // Positive numbers are just a bitmask
+ masm.ma_b(in, in, &negative, Assembler::Signed, ShortJump);
+ {
+ masm.and32(Imm32((1 << ins->shift()) - 1), out);
+ masm.ma_b(&done, ShortJump);
+ }
+
+ // Negative numbers need a negate, bitmask, negate
+ {
+ masm.bind(&negative);
+ masm.neg32(out);
+ masm.and32(Imm32((1 << ins->shift()) - 1), out);
+ masm.neg32(out);
+ }
+ if (mir->canBeNegativeDividend()) {
+ if (!mir->isTruncated()) {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, out, zero, ins->snapshot());
+ } else {
+ // -0|0 == 0
+ }
+ }
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitModMaskI(LModMaskI* ins) {
+ Register src = ToRegister(ins->getOperand(0));
+ Register dest = ToRegister(ins->getDef(0));
+ Register tmp0 = ToRegister(ins->getTemp(0));
+ Register tmp1 = ToRegister(ins->getTemp(1));
+ MMod* mir = ins->mir();
+
+ if (!mir->isTruncated() && mir->canBeNegativeDividend()) {
+ MOZ_ASSERT(mir->fallible());
+
+ Label bail;
+ masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), &bail);
+ bailoutFrom(&bail, ins->snapshot());
+ } else {
+ masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), nullptr);
+ }
+}
+
+void CodeGenerator::visitBitNotI(LBitNotI* ins) {
+ const LAllocation* input = ins->getOperand(0);
+ const LDefinition* dest = ins->getDef(0);
+ MOZ_ASSERT(!input->isConstant());
+
+ masm.ma_not(ToRegister(dest), ToRegister(input));
+}
+
+void CodeGenerator::visitBitOpI(LBitOpI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+ // all of these bitops should be either imm32's, or integer registers.
+ switch (ins->bitop()) {
+ case JSOp::BitOr:
+ if (rhs->isConstant()) {
+ masm.ma_or(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.as_or(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ }
+ break;
+ case JSOp::BitXor:
+ if (rhs->isConstant()) {
+ masm.ma_xor(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.as_xor(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ }
+ break;
+ case JSOp::BitAnd:
+ if (rhs->isConstant()) {
+ masm.ma_and(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.as_and(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void CodeGenerator::visitBitOpI64(LBitOpI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LBitOpI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LBitOpI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ switch (lir->bitop()) {
+ case JSOp::BitOr:
+ if (IsConstant(rhs)) {
+ masm.or64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.or64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::BitXor:
+ if (IsConstant(rhs)) {
+ masm.xor64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.xor64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::BitAnd:
+ if (IsConstant(rhs)) {
+ masm.and64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.and64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void CodeGenerator::visitShiftI(LShiftI* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ const LAllocation* rhs = ins->rhs();
+ Register dest = ToRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ int32_t shift = ToInt32(rhs) & 0x1F;
+ switch (ins->bitop()) {
+ case JSOp::Lsh:
+ if (shift) {
+ masm.ma_sll(dest, lhs, Imm32(shift));
+ } else {
+ masm.move32(lhs, dest);
+ }
+ break;
+ case JSOp::Rsh:
+ if (shift) {
+ masm.ma_sra(dest, lhs, Imm32(shift));
+ } else {
+ masm.move32(lhs, dest);
+ }
+ break;
+ case JSOp::Ursh:
+ if (shift) {
+ masm.ma_srl(dest, lhs, Imm32(shift));
+ } else {
+ // x >>> 0 can overflow.
+ if (ins->mir()->toUrsh()->fallible()) {
+ bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot());
+ }
+ masm.move32(lhs, dest);
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ } else {
+ // The shift amounts should be AND'ed into the 0-31 range
+ masm.ma_and(dest, ToRegister(rhs), Imm32(0x1F));
+
+ switch (ins->bitop()) {
+ case JSOp::Lsh:
+ masm.ma_sll(dest, lhs, dest);
+ break;
+ case JSOp::Rsh:
+ masm.ma_sra(dest, lhs, dest);
+ break;
+ case JSOp::Ursh:
+ masm.ma_srl(dest, lhs, dest);
+ if (ins->mir()->toUrsh()->fallible()) {
+ // x >>> 0 can overflow.
+ bailoutCmp32(Assembler::LessThan, dest, Imm32(0), ins->snapshot());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ }
+}
+
+void CodeGenerator::visitShiftI64(LShiftI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LShiftI64::Lhs);
+ LAllocation* rhs = lir->getOperand(LShiftI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (rhs->isConstant()) {
+ int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ if (shift) {
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::Rsh:
+ if (shift) {
+ masm.rshift64Arithmetic(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::Ursh:
+ if (shift) {
+ masm.rshift64(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ return;
+ }
+
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ masm.lshift64(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ case JSOp::Rsh:
+ masm.rshift64Arithmetic(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ case JSOp::Ursh:
+ masm.rshift64(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+}
+
+void CodeGenerator::visitRotateI64(LRotateI64* lir) {
+ MRotate* mir = lir->mir();
+ LAllocation* count = lir->count();
+
+ Register64 input = ToRegister64(lir->input());
+ Register64 output = ToOutRegister64(lir);
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+
+#ifdef JS_CODEGEN_MIPS64
+ MOZ_ASSERT(input == output);
+#endif
+
+ if (count->isConstant()) {
+ int32_t c = int32_t(count->toConstant()->toInt64() & 0x3F);
+ if (!c) {
+#ifdef JS_CODEGEN_MIPS32
+ masm.move64(input, output);
+#endif
+ return;
+ }
+ if (mir->isLeftRotate()) {
+ masm.rotateLeft64(Imm32(c), input, output, temp);
+ } else {
+ masm.rotateRight64(Imm32(c), input, output, temp);
+ }
+ } else {
+ if (mir->isLeftRotate()) {
+ masm.rotateLeft64(ToRegister(count), input, output, temp);
+ } else {
+ masm.rotateRight64(ToRegister(count), input, output, temp);
+ }
+ }
+}
+
+void CodeGenerator::visitUrshD(LUrshD* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register temp = ToRegister(ins->temp());
+
+ const LAllocation* rhs = ins->rhs();
+ FloatRegister out = ToFloatRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ masm.ma_srl(temp, lhs, Imm32(ToInt32(rhs)));
+ } else {
+ masm.ma_srl(temp, lhs, ToRegister(rhs));
+ }
+
+ masm.convertUInt32ToDouble(temp, out);
+}
+
+void CodeGenerator::visitClzI(LClzI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.as_clz(output, input);
+}
+
+void CodeGenerator::visitCtzI(LCtzI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.ma_ctz(output, input);
+}
+
+void CodeGenerator::visitPopcntI(LPopcntI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+ Register tmp = ToRegister(ins->temp0());
+
+ masm.popcnt32(input, output, tmp);
+}
+
+void CodeGenerator::visitPopcntI64(LPopcntI64* ins) {
+ Register64 input = ToRegister64(ins->getInt64Operand(0));
+ Register64 output = ToOutRegister64(ins);
+ Register tmp = ToRegister(ins->getTemp(0));
+
+ masm.popcnt64(input, output, tmp);
+}
+
+void CodeGenerator::visitPowHalfD(LPowHalfD* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ Label done, skip;
+
+ // Masm.pow(-Infinity, 0.5) == Infinity.
+ masm.loadConstantDouble(NegativeInfinity<double>(), ScratchDoubleReg);
+ masm.ma_bc1d(input, ScratchDoubleReg, &skip,
+ Assembler::DoubleNotEqualOrUnordered, ShortJump);
+ masm.as_negd(output, ScratchDoubleReg);
+ masm.ma_b(&done, ShortJump);
+
+ masm.bind(&skip);
+ // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
+ // Adding 0 converts any -0 to 0.
+ masm.loadConstantDouble(0.0, ScratchDoubleReg);
+ masm.as_addd(output, input, ScratchDoubleReg);
+ masm.as_sqrtd(output, output);
+
+ masm.bind(&done);
+}
+
+MoveOperand CodeGeneratorMIPSShared::toMoveOperand(LAllocation a) const {
+ if (a.isGeneralReg()) {
+ return MoveOperand(ToRegister(a));
+ }
+ if (a.isFloatReg()) {
+ return MoveOperand(ToFloatRegister(a));
+ }
+ MoveOperand::Kind kind = a.isStackArea() ? MoveOperand::Kind::EffectiveAddress
+ : MoveOperand::Kind::Memory;
+ Address address = ToAddress(a);
+ MOZ_ASSERT((address.offset & 3) == 0);
+ return MoveOperand(address, kind);
+}
+
+void CodeGenerator::visitMathD(LMathD* math) {
+ FloatRegister src1 = ToFloatRegister(math->getOperand(0));
+ FloatRegister src2 = ToFloatRegister(math->getOperand(1));
+ FloatRegister output = ToFloatRegister(math->getDef(0));
+
+ switch (math->jsop()) {
+ case JSOp::Add:
+ masm.as_addd(output, src1, src2);
+ break;
+ case JSOp::Sub:
+ masm.as_subd(output, src1, src2);
+ break;
+ case JSOp::Mul:
+ masm.as_muld(output, src1, src2);
+ break;
+ case JSOp::Div:
+ masm.as_divd(output, src1, src2);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void CodeGenerator::visitMathF(LMathF* math) {
+ FloatRegister src1 = ToFloatRegister(math->getOperand(0));
+ FloatRegister src2 = ToFloatRegister(math->getOperand(1));
+ FloatRegister output = ToFloatRegister(math->getDef(0));
+
+ switch (math->jsop()) {
+ case JSOp::Add:
+ masm.as_adds(output, src1, src2);
+ break;
+ case JSOp::Sub:
+ masm.as_subs(output, src1, src2);
+ break;
+ case JSOp::Mul:
+ masm.as_muls(output, src1, src2);
+ break;
+ case JSOp::Div:
+ masm.as_divs(output, src1, src2);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
+ emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
+ ins->mir());
+}
+
+void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
+ emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
+ ins->mir());
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
+ LWasmBuiltinTruncateDToInt32* lir) {
+ emitTruncateDouble(ToFloatRegister(lir->getOperand(0)),
+ ToRegister(lir->getDef(0)), lir->mir());
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
+ LWasmBuiltinTruncateFToInt32* lir) {
+ emitTruncateFloat32(ToFloatRegister(lir->getOperand(0)),
+ ToRegister(lir->getDef(0)), lir->mir());
+}
+
+void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) {
+ auto input = ToFloatRegister(lir->input());
+ auto output = ToRegister(lir->output());
+
+ MWasmTruncateToInt32* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
+ addOutOfLineCode(ool, mir);
+
+ Label* oolEntry = ool->entry();
+ if (mir->isUnsigned()) {
+ if (fromType == MIRType::Double) {
+ masm.wasmTruncateDoubleToUInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else if (fromType == MIRType::Float32) {
+ masm.wasmTruncateFloat32ToUInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+
+ masm.bind(ool->rejoin());
+ return;
+ }
+
+ if (fromType == MIRType::Double) {
+ masm.wasmTruncateDoubleToInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else if (fromType == MIRType::Float32) {
+ masm.wasmTruncateFloat32ToInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGeneratorMIPSShared::visitOutOfLineBailout(OutOfLineBailout* ool) {
+ // Push snapshotOffset and make sure stack is aligned.
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.storePtr(ImmWord(ool->snapshot()->snapshotOffset()),
+ Address(StackPointer, 0));
+
+ masm.jump(&deoptLabel_);
+}
+
+void CodeGeneratorMIPSShared::visitOutOfLineWasmTruncateCheck(
+ OutOfLineWasmTruncateCheck* ool) {
+ if (ool->toType() == MIRType::Int32) {
+ masm.outOfLineWasmTruncateToInt32Check(
+ ool->input(), ool->output(), ool->fromType(), ool->flags(),
+ ool->rejoin(), ool->bytecodeOffset());
+ } else {
+ MOZ_ASSERT(ool->toType() == MIRType::Int64);
+ masm.outOfLineWasmTruncateToInt64Check(
+ ool->input(), ool->output64(), ool->fromType(), ool->flags(),
+ ool->rejoin(), ool->bytecodeOffset());
+ }
+}
+
+void CodeGenerator::visitCopySignF(LCopySignF* ins) {
+ FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
+ FloatRegister output = ToFloatRegister(ins->getDef(0));
+
+ Register lhsi = ToRegister(ins->getTemp(0));
+ Register rhsi = ToRegister(ins->getTemp(1));
+
+ masm.moveFromFloat32(lhs, lhsi);
+ masm.moveFromFloat32(rhs, rhsi);
+
+ // Combine.
+ masm.ma_ins(rhsi, lhsi, 0, 31);
+
+ masm.moveToFloat32(rhsi, output);
+}
+
+void CodeGenerator::visitCopySignD(LCopySignD* ins) {
+ FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
+ FloatRegister output = ToFloatRegister(ins->getDef(0));
+
+ Register lhsi = ToRegister(ins->getTemp(0));
+ Register rhsi = ToRegister(ins->getTemp(1));
+
+ // Manipulate high words of double inputs.
+ masm.moveFromDoubleHi(lhs, lhsi);
+ masm.moveFromDoubleHi(rhs, rhsi);
+
+ // Combine.
+ masm.ma_ins(rhsi, lhsi, 0, 31);
+
+ masm.moveToDoubleHi(rhsi, output);
+}
+
+void CodeGenerator::visitValue(LValue* value) {
+ const ValueOperand out = ToOutValue(value);
+
+ masm.moveValue(value->value(), out);
+}
+
+void CodeGenerator::visitDouble(LDouble* ins) {
+ const LDefinition* out = ins->getDef(0);
+
+ masm.loadConstantDouble(ins->value(), ToFloatRegister(out));
+}
+
+void CodeGenerator::visitFloat32(LFloat32* ins) {
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantFloat32(ins->value(), ToFloatRegister(out));
+}
+
+void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) {
+ FloatRegister input = ToFloatRegister(test->input());
+
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ masm.loadConstantDouble(0.0, ScratchDoubleReg);
+ // If 0, or NaN, the result is false.
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::DoubleFloat, input, ScratchDoubleReg, ifTrue,
+ Assembler::DoubleNotEqual);
+ } else {
+ branchToBlock(Assembler::DoubleFloat, input, ScratchDoubleReg, ifFalse,
+ Assembler::DoubleEqualOrUnordered);
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) {
+ FloatRegister input = ToFloatRegister(test->input());
+
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ masm.loadConstantFloat32(0.0f, ScratchFloat32Reg);
+ // If 0, or NaN, the result is false.
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::SingleFloat, input, ScratchFloat32Reg, ifTrue,
+ Assembler::DoubleNotEqual);
+ } else {
+ branchToBlock(Assembler::SingleFloat, input, ScratchFloat32Reg, ifFalse,
+ Assembler::DoubleEqualOrUnordered);
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitCompareD(LCompareD* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+ Register dest = ToRegister(comp->output());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.ma_cmp_set_double(dest, lhs, rhs, cond);
+}
+
+void CodeGenerator::visitCompareF(LCompareF* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+ Register dest = ToRegister(comp->output());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.ma_cmp_set_float32(dest, lhs, rhs, cond);
+}
+
+void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond =
+ JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ MBasicBlock* ifTrue = comp->ifTrue();
+ MBasicBlock* ifFalse = comp->ifFalse();
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifTrue, cond);
+ } else {
+ branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifFalse,
+ Assembler::InvertCondition(cond));
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond =
+ JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ MBasicBlock* ifTrue = comp->ifTrue();
+ MBasicBlock* ifFalse = comp->ifFalse();
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::SingleFloat, lhs, rhs, ifTrue, cond);
+ } else {
+ branchToBlock(Assembler::SingleFloat, lhs, rhs, ifFalse,
+ Assembler::InvertCondition(cond));
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* lir) {
+ if (lir->right()->isConstant()) {
+ masm.ma_and(ScratchRegister, ToRegister(lir->left()),
+ Imm32(ToInt32(lir->right())));
+ } else {
+ masm.as_and(ScratchRegister, ToRegister(lir->left()),
+ ToRegister(lir->right()));
+ }
+ emitBranch(ScratchRegister, ScratchRegister, lir->cond(), lir->ifTrue(),
+ lir->ifFalse());
+}
+
+void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
+ masm.convertUInt32ToDouble(ToRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
+ masm.convertUInt32ToFloat32(ToRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitNotI(LNotI* ins) {
+ masm.cmp32Set(Assembler::Equal, ToRegister(ins->input()), Imm32(0),
+ ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitNotD(LNotD* ins) {
+ // Since this operation is not, we want to set a bit if
+ // the double is falsey, which means 0.0, -0.0 or NaN.
+ FloatRegister in = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+
+ masm.loadConstantDouble(0.0, ScratchDoubleReg);
+ masm.ma_cmp_set_double(dest, in, ScratchDoubleReg,
+ Assembler::DoubleEqualOrUnordered);
+}
+
+void CodeGenerator::visitNotF(LNotF* ins) {
+ // Since this operation is not, we want to set a bit if
+ // the float32 is falsey, which means 0.0, -0.0 or NaN.
+ FloatRegister in = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+
+ masm.loadConstantFloat32(0.0f, ScratchFloat32Reg);
+ masm.ma_cmp_set_float32(dest, in, ScratchFloat32Reg,
+ Assembler::DoubleEqualOrUnordered);
+}
+
+void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
+ masm.memoryBarrier(ins->type());
+}
+
+void CodeGeneratorMIPSShared::generateInvalidateEpilogue() {
+ // Ensure that there is enough space in the buffer for the OsiPoint
+ // patching to occur. Otherwise, we could overwrite the invalidation
+ // epilogue.
+ for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) {
+ masm.nop();
+ }
+
+ masm.bind(&invalidate_);
+
+ // Push the return address of the point that we bailed out at to the stack
+ masm.Push(ra);
+
+ // Push the Ion script onto the stack (when we determine what that
+ // pointer is).
+ invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
+
+ // Jump to the invalidator which will replace the current frame.
+ TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk();
+ masm.jump(thunk);
+}
+
+class js::jit::OutOfLineTableSwitch
+ : public OutOfLineCodeBase<CodeGeneratorMIPSShared> {
+ MTableSwitch* mir_;
+ CodeLabel jumpLabel_;
+
+ void accept(CodeGeneratorMIPSShared* codegen) {
+ codegen->visitOutOfLineTableSwitch(this);
+ }
+
+ public:
+ OutOfLineTableSwitch(MTableSwitch* mir) : mir_(mir) {}
+
+ MTableSwitch* mir() const { return mir_; }
+
+ CodeLabel* jumpLabel() { return &jumpLabel_; }
+};
+
+void CodeGeneratorMIPSShared::visitOutOfLineTableSwitch(
+ OutOfLineTableSwitch* ool) {
+ MTableSwitch* mir = ool->mir();
+
+ masm.haltingAlign(sizeof(void*));
+ masm.bind(ool->jumpLabel());
+ masm.addCodeLabel(*ool->jumpLabel());
+
+ for (size_t i = 0; i < mir->numCases(); i++) {
+ LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
+ Label* caseheader = caseblock->label();
+ uint32_t caseoffset = caseheader->offset();
+
+ // The entries of the jump table need to be absolute addresses and thus
+ // must be patched after codegen is finished.
+ CodeLabel cl;
+ masm.writeCodePointer(&cl);
+ cl.target()->bind(caseoffset);
+ masm.addCodeLabel(cl);
+ }
+}
+
+void CodeGeneratorMIPSShared::emitTableSwitchDispatch(MTableSwitch* mir,
+ Register index,
+ Register base) {
+ Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
+
+ // Lower value with low value
+ if (mir->low() != 0) {
+ masm.subPtr(Imm32(mir->low()), index);
+ }
+
+ // Jump to default case if input is out of range
+ int32_t cases = mir->numCases();
+ masm.branchPtr(Assembler::AboveOrEqual, index, ImmWord(cases), defaultcase);
+
+ // To fill in the CodeLabels for the case entries, we need to first
+ // generate the case entries (we don't yet know their offsets in the
+ // instruction stream).
+ OutOfLineTableSwitch* ool = new (alloc()) OutOfLineTableSwitch(mir);
+ addOutOfLineCode(ool, mir);
+
+ // Compute the position where a pointer to the right case stands.
+ masm.ma_li(base, ool->jumpLabel());
+
+ BaseIndex pointer(base, index, ScalePointer);
+
+ // Jump to the right case
+ masm.branchToComputedAddress(pointer);
+}
+
+template <typename T>
+void CodeGeneratorMIPSShared::emitWasmLoad(T* lir) {
+ const MWasmLoad* mir = lir->mir();
+ SecondScratchRegisterScope scratch2(masm);
+
+ Register memoryBase = ToRegister(lir->memoryBase());
+ Register ptr = ToRegister(lir->ptr());
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ if (mir->base()->type() == MIRType::Int32) {
+ masm.move32To64ZeroExtend(ptr, Register64(scratch2));
+ ptr = scratch2;
+ ptrScratch = ptrScratch != InvalidReg ? scratch2 : InvalidReg;
+ }
+
+ if (IsUnaligned(mir->access())) {
+ if (IsFloatingPointType(mir->type())) {
+ masm.wasmUnalignedLoadFP(mir->access(), memoryBase, ptr, ptrScratch,
+ ToFloatRegister(lir->output()),
+ ToRegister(lir->getTemp(1)));
+ } else {
+ masm.wasmUnalignedLoad(mir->access(), memoryBase, ptr, ptrScratch,
+ ToRegister(lir->output()),
+ ToRegister(lir->getTemp(1)));
+ }
+ } else {
+ masm.wasmLoad(mir->access(), memoryBase, ptr, ptrScratch,
+ ToAnyRegister(lir->output()));
+ }
+}
+
+void CodeGenerator::visitWasmLoad(LWasmLoad* lir) { emitWasmLoad(lir); }
+
+void CodeGenerator::visitWasmUnalignedLoad(LWasmUnalignedLoad* lir) {
+ emitWasmLoad(lir);
+}
+
+template <typename T>
+void CodeGeneratorMIPSShared::emitWasmStore(T* lir) {
+ const MWasmStore* mir = lir->mir();
+ SecondScratchRegisterScope scratch2(masm);
+
+ Register memoryBase = ToRegister(lir->memoryBase());
+ Register ptr = ToRegister(lir->ptr());
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ if (mir->base()->type() == MIRType::Int32) {
+ masm.move32To64ZeroExtend(ptr, Register64(scratch2));
+ ptr = scratch2;
+ ptrScratch = ptrScratch != InvalidReg ? scratch2 : InvalidReg;
+ }
+
+ if (IsUnaligned(mir->access())) {
+ if (mir->access().type() == Scalar::Float32 ||
+ mir->access().type() == Scalar::Float64) {
+ masm.wasmUnalignedStoreFP(mir->access(), ToFloatRegister(lir->value()),
+ memoryBase, ptr, ptrScratch,
+ ToRegister(lir->getTemp(1)));
+ } else {
+ masm.wasmUnalignedStore(mir->access(), ToRegister(lir->value()),
+ memoryBase, ptr, ptrScratch,
+ ToRegister(lir->getTemp(1)));
+ }
+ } else {
+ masm.wasmStore(mir->access(), ToAnyRegister(lir->value()), memoryBase, ptr,
+ ptrScratch);
+ }
+}
+
+void CodeGenerator::visitWasmStore(LWasmStore* lir) { emitWasmStore(lir); }
+
+void CodeGenerator::visitWasmUnalignedStore(LWasmUnalignedStore* lir) {
+ emitWasmStore(lir);
+}
+
+void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
+ const MAsmJSLoadHeap* mir = ins->mir();
+ const LAllocation* ptr = ins->ptr();
+ const LDefinition* out = ins->output();
+ const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+
+ bool isSigned;
+ int size;
+ bool isFloat = false;
+ switch (mir->access().type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ size = 8;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ size = 8;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ size = 16;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ size = 16;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ size = 32;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ size = 32;
+ break;
+ case Scalar::Float64:
+ isFloat = true;
+ size = 64;
+ break;
+ case Scalar::Float32:
+ isFloat = true;
+ size = 32;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ if (ptr->isConstant()) {
+ MOZ_ASSERT(!mir->needsBoundsCheck());
+ int32_t ptrImm = ptr->toConstant()->toInt32();
+ MOZ_ASSERT(ptrImm >= 0);
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadFloat32(Address(HeapReg, ptrImm), ToFloatRegister(out));
+ } else {
+ masm.loadDouble(Address(HeapReg, ptrImm), ToFloatRegister(out));
+ }
+ } else {
+ masm.ma_load(ToRegister(out), Address(HeapReg, ptrImm),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ Register ptrReg = ToRegister(ptr);
+
+ if (!mir->needsBoundsCheck()) {
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne),
+ ToFloatRegister(out));
+ } else {
+ masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne),
+ ToFloatRegister(out));
+ }
+ } else {
+ masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ Label done, outOfRange;
+ masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptrReg,
+ ToRegister(boundsCheckLimit), &outOfRange);
+ // Offset is ok, let's load value.
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadFloat32(BaseIndex(HeapReg, ptrReg, TimesOne),
+ ToFloatRegister(out));
+ } else {
+ masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne),
+ ToFloatRegister(out));
+ }
+ } else {
+ masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&outOfRange);
+ // Offset is out of range. Load default values.
+ if (isFloat) {
+ if (size == 32) {
+ masm.loadConstantFloat32(float(GenericNaN()), ToFloatRegister(out));
+ } else {
+ masm.loadConstantDouble(GenericNaN(), ToFloatRegister(out));
+ }
+ } else {
+ masm.move32(Imm32(0), ToRegister(out));
+ }
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
+ const MAsmJSStoreHeap* mir = ins->mir();
+ const LAllocation* value = ins->value();
+ const LAllocation* ptr = ins->ptr();
+ const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+
+ bool isSigned;
+ int size;
+ bool isFloat = false;
+ switch (mir->access().type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ size = 8;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ size = 8;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ size = 16;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ size = 16;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ size = 32;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ size = 32;
+ break;
+ case Scalar::Float64:
+ isFloat = true;
+ size = 64;
+ break;
+ case Scalar::Float32:
+ isFloat = true;
+ size = 32;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ if (ptr->isConstant()) {
+ MOZ_ASSERT(!mir->needsBoundsCheck());
+ int32_t ptrImm = ptr->toConstant()->toInt32();
+ MOZ_ASSERT(ptrImm >= 0);
+
+ if (isFloat) {
+ FloatRegister freg = ToFloatRegister(value);
+ Address addr(HeapReg, ptrImm);
+ if (size == 32) {
+ masm.storeFloat32(freg, addr);
+ } else {
+ masm.storeDouble(freg, addr);
+ }
+ } else {
+ masm.ma_store(ToRegister(value), Address(HeapReg, ptrImm),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ Register ptrReg = ToRegister(ptr);
+ Address dstAddr(ptrReg, 0);
+
+ if (!mir->needsBoundsCheck()) {
+ if (isFloat) {
+ FloatRegister freg = ToFloatRegister(value);
+ BaseIndex bi(HeapReg, ptrReg, TimesOne);
+ if (size == 32) {
+ masm.storeFloat32(freg, bi);
+ } else {
+ masm.storeDouble(freg, bi);
+ }
+ } else {
+ masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ Label outOfRange;
+ masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptrReg,
+ ToRegister(boundsCheckLimit), &outOfRange);
+
+ // Offset is ok, let's store value.
+ if (isFloat) {
+ if (size == 32) {
+ masm.storeFloat32(ToFloatRegister(value),
+ BaseIndex(HeapReg, ptrReg, TimesOne));
+ } else
+ masm.storeDouble(ToFloatRegister(value),
+ BaseIndex(HeapReg, ptrReg, TimesOne));
+ } else {
+ masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
+ static_cast<LoadStoreSize>(size),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+
+ masm.bind(&outOfRange);
+}
+
+void CodeGenerator::visitWasmCompareExchangeHeap(
+ LWasmCompareExchangeHeap* ins) {
+ MWasmCompareExchangeHeap* mir = ins->mir();
+ Register memoryBase = ToRegister(ins->memoryBase());
+ Register ptrReg = ToRegister(ins->ptr());
+ BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Register oldval = ToRegister(ins->oldValue());
+ Register newval = ToRegister(ins->newValue());
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ masm.wasmCompareExchange(mir->access(), srcAddr, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
+ MWasmAtomicExchangeHeap* mir = ins->mir();
+ Register memoryBase = ToRegister(ins->memoryBase());
+ Register ptrReg = ToRegister(ins->ptr());
+ Register value = ToRegister(ins->value());
+ BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ masm.wasmAtomicExchange(mir->access(), srcAddr, value, valueTemp, offsetTemp,
+ maskTemp, ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
+ MOZ_ASSERT(ins->mir()->hasUses());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ MWasmAtomicBinopHeap* mir = ins->mir();
+ Register memoryBase = ToRegister(ins->memoryBase());
+ Register ptrReg = ToRegister(ins->ptr());
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset());
+
+ masm.wasmAtomicFetchOp(mir->access(), mir->operation(),
+ ToRegister(ins->value()), srcAddr, valueTemp,
+ offsetTemp, maskTemp, ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
+ LWasmAtomicBinopHeapForEffect* ins) {
+ MOZ_ASSERT(!ins->mir()->hasUses());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ MWasmAtomicBinopHeap* mir = ins->mir();
+ Register memoryBase = ToRegister(ins->memoryBase());
+ Register ptrReg = ToRegister(ins->ptr());
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset());
+ masm.wasmAtomicEffectOp(mir->access(), mir->operation(),
+ ToRegister(ins->value()), srcAddr, valueTemp,
+ offsetTemp, maskTemp);
+}
+
+void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) {
+ const MWasmStackArg* mir = ins->mir();
+ if (ins->arg()->isConstant()) {
+ masm.storePtr(ImmWord(ToInt32(ins->arg())),
+ Address(StackPointer, mir->spOffset()));
+ } else {
+ if (ins->arg()->isGeneralReg()) {
+ masm.storePtr(ToRegister(ins->arg()),
+ Address(StackPointer, mir->spOffset()));
+ } else if (mir->input()->type() == MIRType::Double) {
+ masm.storeDouble(ToFloatRegister(ins->arg()).doubleOverlay(),
+ Address(StackPointer, mir->spOffset()));
+ } else {
+ masm.storeFloat32(ToFloatRegister(ins->arg()),
+ Address(StackPointer, mir->spOffset()));
+ }
+ }
+}
+
+void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) {
+ const MWasmStackArg* mir = ins->mir();
+ Address dst(StackPointer, mir->spOffset());
+ if (IsConstant(ins->arg())) {
+ masm.store64(Imm64(ToInt64(ins->arg())), dst);
+ } else {
+ masm.store64(ToRegister64(ins->arg()), dst);
+ }
+}
+
+void CodeGenerator::visitWasmSelect(LWasmSelect* ins) {
+ MIRType mirType = ins->mir()->type();
+
+ Register cond = ToRegister(ins->condExpr());
+ const LAllocation* falseExpr = ins->falseExpr();
+
+ if (mirType == MIRType::Int32 || mirType == MIRType::WasmAnyRef) {
+ Register out = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->trueExpr()) == out,
+ "true expr input is reused for output");
+ if (falseExpr->isRegister()) {
+ masm.as_movz(out, ToRegister(falseExpr), cond);
+ } else {
+ masm.cmp32Load32(Assembler::Zero, cond, cond, ToAddress(falseExpr), out);
+ }
+ return;
+ }
+
+ FloatRegister out = ToFloatRegister(ins->output());
+ MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out,
+ "true expr input is reused for output");
+
+ if (falseExpr->isFloatReg()) {
+ if (mirType == MIRType::Float32) {
+ masm.as_movz(Assembler::SingleFloat, out, ToFloatRegister(falseExpr),
+ cond);
+ } else if (mirType == MIRType::Double) {
+ masm.as_movz(Assembler::DoubleFloat, out, ToFloatRegister(falseExpr),
+ cond);
+ } else {
+ MOZ_CRASH("unhandled type in visitWasmSelect!");
+ }
+ } else {
+ Label done;
+ masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
+
+ if (mirType == MIRType::Float32) {
+ masm.loadFloat32(ToAddress(falseExpr), out);
+ } else if (mirType == MIRType::Double) {
+ masm.loadDouble(ToAddress(falseExpr), out);
+ } else {
+ MOZ_CRASH("unhandled type in visitWasmSelect!");
+ }
+
+ masm.bind(&done);
+ }
+}
+
+// We expect to handle only the case where compare is {U,}Int32 and select is
+// {U,}Int32, and the "true" input is reused for the output.
+void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
+ bool cmpIs32bit = ins->compareType() == MCompare::Compare_Int32 ||
+ ins->compareType() == MCompare::Compare_UInt32;
+ bool selIs32bit = ins->mir()->type() == MIRType::Int32;
+
+ MOZ_RELEASE_ASSERT(
+ cmpIs32bit && selIs32bit,
+ "CodeGenerator::visitWasmCompareAndSelect: unexpected types");
+
+ Register trueExprAndDest = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->ifTrueExpr()) == trueExprAndDest,
+ "true expr input is reused for output");
+
+ Assembler::Condition cond = Assembler::InvertCondition(
+ JSOpToCondition(ins->compareType(), ins->jsop()));
+ const LAllocation* rhs = ins->rightExpr();
+ const LAllocation* falseExpr = ins->ifFalseExpr();
+ Register lhs = ToRegister(ins->leftExpr());
+
+ masm.cmp32Move32(cond, lhs, ToRegister(rhs), ToRegister(falseExpr),
+ trueExprAndDest);
+}
+
+void CodeGenerator::visitWasmReinterpret(LWasmReinterpret* lir) {
+ MOZ_ASSERT(gen->compilingWasm());
+ MWasmReinterpret* ins = lir->mir();
+
+ MIRType to = ins->type();
+ DebugOnly<MIRType> from = ins->input()->type();
+
+ switch (to) {
+ case MIRType::Int32:
+ MOZ_ASSERT(from == MIRType::Float32);
+ masm.as_mfc1(ToRegister(lir->output()), ToFloatRegister(lir->input()));
+ break;
+ case MIRType::Float32:
+ MOZ_ASSERT(from == MIRType::Int32);
+ masm.as_mtc1(ToRegister(lir->input()), ToFloatRegister(lir->output()));
+ break;
+ case MIRType::Double:
+ case MIRType::Int64:
+ MOZ_CRASH("not handled by this LIR opcode");
+ default:
+ MOZ_CRASH("unexpected WasmReinterpret");
+ }
+}
+
+void CodeGenerator::visitUDivOrMod(LUDivOrMod* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+ Label done;
+
+ // Prevent divide by zero.
+ if (ins->canBeDivideByZero()) {
+ if (ins->mir()->isTruncated()) {
+ if (ins->trapOnError()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, ins->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else {
+ // Infinity|0 == 0
+ Label notzero;
+ masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&notzero);
+ }
+ } else {
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
+ }
+ }
+
+#ifdef MIPSR6
+ masm.as_modu(output, lhs, rhs);
+#else
+ masm.as_divu(lhs, rhs);
+ masm.as_mfhi(output);
+#endif
+
+ // If the remainder is > 0, bailout since this must be a double.
+ if (ins->mir()->isDiv()) {
+ if (!ins->mir()->toDiv()->canTruncateRemainder()) {
+ bailoutCmp32(Assembler::NonZero, output, output, ins->snapshot());
+ }
+ // Get quotient
+#ifdef MIPSR6
+ masm.as_divu(output, lhs, rhs);
+#else
+ masm.as_mflo(output);
+#endif
+ }
+
+ if (!ins->mir()->isTruncated()) {
+ bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot());
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitEffectiveAddress(LEffectiveAddress* ins) {
+ const MEffectiveAddress* mir = ins->mir();
+ Register base = ToRegister(ins->base());
+ Register index = ToRegister(ins->index());
+ Register output = ToRegister(ins->output());
+
+ BaseIndex address(base, index, mir->scale(), mir->displacement());
+ masm.computeEffectiveAddress(address, output);
+}
+
+void CodeGenerator::visitNegI(LNegI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.ma_negu(output, input);
+}
+
+void CodeGenerator::visitNegI64(LNegI64* ins) {
+ Register64 input = ToRegister64(ins->getInt64Operand(0));
+ MOZ_ASSERT(input == ToOutRegister64(ins));
+ masm.neg64(input);
+}
+
+void CodeGenerator::visitNegD(LNegD* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ masm.as_negd(output, input);
+}
+
+void CodeGenerator::visitNegF(LNegF* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ masm.as_negs(output, input);
+}
+
+void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) {
+ MWasmAddOffset* mir = lir->mir();
+ Register base = ToRegister(lir->base());
+ Register out = ToRegister(lir->output());
+
+ Label ok;
+ masm.ma_add32TestCarry(Assembler::CarryClear, out, base, Imm32(mir->offset()),
+ &ok);
+ masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
+ masm.bind(&ok);
+}
+
+void CodeGenerator::visitWasmAddOffset64(LWasmAddOffset64* lir) {
+ MWasmAddOffset* mir = lir->mir();
+ Register64 base = ToRegister64(lir->base());
+ Register64 out = ToOutRegister64(lir);
+
+ Label ok;
+ masm.ma_addPtrTestCarry(Assembler::CarryClear, out.reg, base.reg,
+ ImmWord(mir->offset()), &ok);
+ masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
+ masm.bind(&ok);
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinop(
+ LAtomicTypedArrayElementBinop* lir) {
+ MOZ_ASSERT(!lir->mir()->isForEffect());
+
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register elements = ToRegister(lir->elements());
+ Register outTemp = ToTempRegisterOrInvalid(lir->temp2());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+ Register value = ToRegister(lir->value());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address mem = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp, outTemp, output);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp, outTemp, output);
+ }
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
+ LAtomicTypedArrayElementBinopForEffect* lir) {
+ MOZ_ASSERT(lir->mir()->isForEffect());
+
+ Register elements = ToRegister(lir->elements());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+ Register value = ToRegister(lir->value());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address mem = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp);
+ }
+}
+
+void CodeGenerator::visitCompareExchangeTypedArrayElement(
+ LCompareExchangeTypedArrayElement* lir) {
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register outTemp = ToTempRegisterOrInvalid(lir->temp());
+
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, outTemp,
+ output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, outTemp,
+ output);
+ }
+}
+
+void CodeGenerator::visitAtomicExchangeTypedArrayElement(
+ LAtomicExchangeTypedArrayElement* lir) {
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register outTemp = ToTempRegisterOrInvalid(lir->temp());
+
+ Register value = ToRegister(lir->value());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value,
+ valueTemp, offsetTemp, maskTemp, outTemp, output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value,
+ valueTemp, offsetTemp, maskTemp, outTemp, output);
+ }
+}
+
+void CodeGenerator::visitCompareExchangeTypedArrayElement64(
+ LCompareExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+ Register out = ToRegister(lir->output());
+ Register64 tempOut(out);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ masm.loadBigInt64(oldval, temp1);
+ masm.loadBigInt64(newval, tempOut);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.compareExchange64(Synchronization::Full(), dest, temp1, tempOut,
+ temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.compareExchange64(Synchronization::Full(), dest, temp1, tempOut,
+ temp2);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicExchangeTypedArrayElement64(
+ LAtomicExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = Register64(ToRegister(lir->temp2()));
+ Register out = ToRegister(lir->output());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinop64(
+ LAtomicTypedArrayElementBinop64* lir) {
+ MOZ_ASSERT(lir->mir()->hasUses());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+ Register out = ToRegister(lir->output());
+ Register64 tempOut = Register64(out);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ tempOut, temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ tempOut, temp2);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
+ LAtomicTypedArrayElementBinopForEffect64* lir) {
+ MOZ_ASSERT(!lir->mir()->hasUses());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ temp2);
+ }
+}
+
+void CodeGenerator::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* lir) {
+ Register memoryBase = ToRegister(lir->memoryBase());
+ Register ptr = ToRegister(lir->ptr());
+ Register64 oldValue = ToRegister64(lir->oldValue());
+ Register64 newValue = ToRegister64(lir->newValue());
+ Register64 output = ToOutRegister64(lir);
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(memoryBase, ptr, TimesOne, offset);
+ masm.wasmCompareExchange64(lir->mir()->access(), addr, oldValue, newValue,
+ output);
+}
+
+void CodeGenerator::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* lir) {
+ Register memoryBase = ToRegister(lir->memoryBase());
+ Register ptr = ToRegister(lir->ptr());
+ Register64 value = ToRegister64(lir->value());
+ Register64 output = ToOutRegister64(lir);
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(memoryBase, ptr, TimesOne, offset);
+ masm.wasmAtomicExchange64(lir->mir()->access(), addr, value, output);
+}
+
+void CodeGenerator::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* lir) {
+ Register memoryBase = ToRegister(lir->memoryBase());
+ Register ptr = ToRegister(lir->ptr());
+ Register64 value = ToRegister64(lir->value());
+ Register64 output = ToOutRegister64(lir);
+#ifdef JS_CODEGEN_MIPS32
+ Register64 temp(ToRegister(lir->getTemp(0)), ToRegister(lir->getTemp(1)));
+#else
+ Register64 temp(ToRegister(lir->getTemp(0)));
+#endif
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(memoryBase, ptr, TimesOne, offset);
+
+ masm.wasmAtomicFetchOp64(lir->mir()->access(), lir->mir()->operation(), value,
+ addr, temp, output);
+}
+
+void CodeGenerator::visitNearbyInt(LNearbyInt*) { MOZ_CRASH("NYI"); }
+
+void CodeGenerator::visitNearbyIntF(LNearbyIntF*) { MOZ_CRASH("NYI"); }
+
+void CodeGenerator::visitSimd128(LSimd128* ins) { MOZ_CRASH("No SIMD"); }
+
+void CodeGenerator::visitWasmTernarySimd128(LWasmTernarySimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmBinarySimd128(LWasmBinarySimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmBinarySimd128WithConstant(
+ LWasmBinarySimd128WithConstant* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmVariableShiftSimd128(
+ LWasmVariableShiftSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmConstantShiftSimd128(
+ LWasmConstantShiftSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmSignReplicationSimd128(
+ LWasmSignReplicationSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmShuffleSimd128(LWasmShuffleSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmPermuteSimd128(LWasmPermuteSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReplaceLaneSimd128(LWasmReplaceLaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReplaceInt64LaneSimd128(
+ LWasmReplaceInt64LaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmScalarToSimd128(LWasmScalarToSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmInt64ToSimd128(LWasmInt64ToSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmUnarySimd128(LWasmUnarySimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReduceSimd128(LWasmReduceSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReduceAndBranchSimd128(
+ LWasmReduceAndBranchSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReduceSimd128ToInt64(
+ LWasmReduceSimd128ToInt64* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmLoadLaneSimd128(LWasmLoadLaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmStoreLaneSimd128(LWasmStoreLaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
diff --git a/js/src/jit/mips-shared/CodeGenerator-mips-shared.h b/js/src/jit/mips-shared/CodeGenerator-mips-shared.h
new file mode 100644
index 0000000000..2452a443be
--- /dev/null
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.h
@@ -0,0 +1,157 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_CodeGenerator_mips_shared_h
+#define jit_mips_shared_CodeGenerator_mips_shared_h
+
+#include "jit/shared/CodeGenerator-shared.h"
+
+namespace js {
+namespace jit {
+
+class CodeGeneratorMIPSShared;
+class OutOfLineBailout;
+class OutOfLineTableSwitch;
+
+using OutOfLineWasmTruncateCheck =
+ OutOfLineWasmTruncateCheckBase<CodeGeneratorMIPSShared>;
+
+class CodeGeneratorMIPSShared : public CodeGeneratorShared {
+ friend class MoveResolverMIPS;
+
+ protected:
+ CodeGeneratorMIPSShared(MIRGenerator* gen, LIRGraph* graph,
+ MacroAssembler* masm);
+
+ NonAssertingLabel deoptLabel_;
+
+ Operand ToOperand(const LAllocation& a);
+ Operand ToOperand(const LAllocation* a);
+ Operand ToOperand(const LDefinition* def);
+
+#ifdef JS_PUNBOX64
+ Operand ToOperandOrRegister64(const LInt64Allocation input);
+#else
+ Register64 ToOperandOrRegister64(const LInt64Allocation input);
+#endif
+
+ MoveOperand toMoveOperand(LAllocation a) const;
+
+ template <typename T1, typename T2>
+ void bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ Label bail;
+ masm.branch32(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutTest32(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ Label bail;
+ masm.branchTest32(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutCmpPtr(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ Label bail;
+ masm.branchPtr(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ void bailoutTestPtr(Assembler::Condition c, Register lhs, Register rhs,
+ LSnapshot* snapshot) {
+ Label bail;
+ masm.branchTestPtr(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ void bailoutIfFalseBool(Register reg, LSnapshot* snapshot) {
+ Label bail;
+ masm.branchTest32(Assembler::Zero, reg, Imm32(0xFF), &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+
+ void bailoutFrom(Label* label, LSnapshot* snapshot);
+ void bailout(LSnapshot* snapshot);
+
+ bool generateOutOfLineCode();
+
+ template <typename T>
+ void branchToBlock(Register lhs, T rhs, MBasicBlock* mir,
+ Assembler::Condition cond) {
+ masm.ma_b(lhs, rhs, skipTrivialBlocks(mir)->lir()->label(), cond);
+ }
+ void branchToBlock(Assembler::FloatFormat fmt, FloatRegister lhs,
+ FloatRegister rhs, MBasicBlock* mir,
+ Assembler::DoubleCondition cond);
+
+ // Emits a branch that directs control flow to the true block if |cond| is
+ // true, and the false block if |cond| is false.
+ template <typename T>
+ void emitBranch(Register lhs, T rhs, Assembler::Condition cond,
+ MBasicBlock* mirTrue, MBasicBlock* mirFalse) {
+ if (isNextBlock(mirFalse->lir())) {
+ branchToBlock(lhs, rhs, mirTrue, cond);
+ } else {
+ branchToBlock(lhs, rhs, mirFalse, Assembler::InvertCondition(cond));
+ jumpToBlock(mirTrue);
+ }
+ }
+ void testZeroEmitBranch(Assembler::Condition cond, Register reg,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ emitBranch(reg, Imm32(0), cond, ifTrue, ifFalse);
+ }
+
+ void emitTableSwitchDispatch(MTableSwitch* mir, Register index,
+ Register base);
+
+ template <typename T>
+ void emitWasmLoad(T* ins);
+ template <typename T>
+ void emitWasmStore(T* ins);
+
+ void generateInvalidateEpilogue();
+
+ // Generating a result.
+ template <typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const S& value, const T& mem,
+ Register flagTemp, Register outTemp,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, AnyRegister output);
+
+ // Generating no result.
+ template <typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const S& value, const T& mem,
+ Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp);
+
+ public:
+ // Out of line visitors.
+ void visitOutOfLineBailout(OutOfLineBailout* ool);
+ void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool);
+ void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool);
+};
+
+// An out-of-line bailout thunk.
+class OutOfLineBailout : public OutOfLineCodeBase<CodeGeneratorMIPSShared> {
+ protected:
+ LSnapshot* snapshot_;
+ uint32_t frameSize_;
+
+ public:
+ OutOfLineBailout(LSnapshot* snapshot, uint32_t frameSize)
+ : snapshot_(snapshot), frameSize_(frameSize) {}
+
+ void accept(CodeGeneratorMIPSShared* codegen) override;
+
+ LSnapshot* snapshot() const { return snapshot_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_CodeGenerator_mips_shared_h */
diff --git a/js/src/jit/mips-shared/LIR-mips-shared.h b/js/src/jit/mips-shared/LIR-mips-shared.h
new file mode 100644
index 0000000000..a302be061e
--- /dev/null
+++ b/js/src/jit/mips-shared/LIR-mips-shared.h
@@ -0,0 +1,380 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_LIR_mips_shared_h
+#define jit_mips_shared_LIR_mips_shared_h
+
+namespace js {
+namespace jit {
+
+// Convert a 32-bit unsigned integer to a double.
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmUint32ToDouble)
+
+ LWasmUint32ToDouble(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a float32.
+class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmUint32ToFloat32)
+
+ LWasmUint32ToFloat32(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+class LDivI : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(DivI);
+
+ LDivI(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ MDiv* mir() const { return mir_->toDiv(); }
+};
+
+class LDivPowTwoI : public LInstructionHelper<1, 1, 1> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(DivPowTwoI)
+
+ LDivPowTwoI(const LAllocation& lhs, int32_t shift, const LDefinition& temp)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* numerator() { return getOperand(0); }
+ int32_t shift() const { return shift_; }
+ MDiv* mir() const { return mir_->toDiv(); }
+};
+
+class LModI : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(ModI);
+
+ LModI(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& callTemp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, callTemp);
+ }
+
+ const LDefinition* callTemp() { return getTemp(0); }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+class LModPowTwoI : public LInstructionHelper<1, 1, 0> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModPowTwoI);
+
+ LModPowTwoI(const LAllocation& lhs, int32_t shift)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ }
+
+ int32_t shift() const { return shift_; }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+class LModMaskI : public LInstructionHelper<1, 1, 2> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModMaskI);
+
+ LModMaskI(const LAllocation& lhs, const LDefinition& temp0,
+ const LDefinition& temp1, int32_t shift)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ int32_t shift() const { return shift_; }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitch : public LInstructionHelper<0, 1, 2> {
+ public:
+ LIR_HEADER(TableSwitch);
+
+ LTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
+ const LDefinition& jumpTablePointer, MTableSwitch* ins)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setTemp(0, inputCopy);
+ setTemp(1, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const { return mir_->toTableSwitch(); }
+ const LAllocation* index() { return getOperand(0); }
+ const LDefinition* tempInt() { return getTemp(0); }
+ // This is added to share the same CodeGenerator prefixes.
+ const LDefinition* tempPointer() { return getTemp(1); }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 3> {
+ public:
+ LIR_HEADER(TableSwitchV);
+
+ LTableSwitchV(const LBoxAllocation& input, const LDefinition& inputCopy,
+ const LDefinition& floatCopy,
+ const LDefinition& jumpTablePointer, MTableSwitch* ins)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(InputValue, input);
+ setTemp(0, inputCopy);
+ setTemp(1, floatCopy);
+ setTemp(2, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const { return mir_->toTableSwitch(); }
+
+ static const size_t InputValue = 0;
+
+ const LDefinition* tempInt() { return getTemp(0); }
+ const LDefinition* tempFloat() { return getTemp(1); }
+ const LDefinition* tempPointer() { return getTemp(2); }
+};
+
+class LMulI : public LBinaryMath<0> {
+ public:
+ LIR_HEADER(MulI);
+
+ LMulI() : LBinaryMath(classOpcode) {}
+
+ MMul* mir() { return mir_->toMul(); }
+};
+
+class LUDivOrMod : public LBinaryMath<0> {
+ public:
+ LIR_HEADER(UDivOrMod);
+
+ LUDivOrMod() : LBinaryMath(classOpcode) {}
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+
+ bool trapOnError() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->trapOnError();
+ }
+ return mir_->toDiv()->trapOnError();
+ }
+
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+namespace details {
+
+// Base class for the int64 and non-int64 variants.
+template <size_t NumDefs>
+class LWasmUnalignedLoadBase : public details::LWasmLoadBase<NumDefs, 2> {
+ public:
+ typedef LWasmLoadBase<NumDefs, 2> Base;
+
+ explicit LWasmUnalignedLoadBase(LNode::Opcode opcode, const LAllocation& ptr,
+ const LAllocation& memoryBase,
+ const LDefinition& valueHelper)
+ : Base(opcode, ptr, memoryBase) {
+ Base::setTemp(0, LDefinition::BogusTemp());
+ Base::setTemp(1, valueHelper);
+ }
+
+ const LAllocation* ptr() { return Base::getOperand(0); }
+ const LDefinition* ptrCopy() { return Base::getTemp(0); }
+};
+
+} // namespace details
+
+class LWasmUnalignedLoad : public details::LWasmUnalignedLoadBase<1> {
+ public:
+ LIR_HEADER(WasmUnalignedLoad);
+
+ explicit LWasmUnalignedLoad(const LAllocation& ptr,
+ const LAllocation& memoryBase,
+ const LDefinition& valueHelper)
+ : LWasmUnalignedLoadBase(classOpcode, ptr, memoryBase, valueHelper) {}
+};
+
+class LWasmUnalignedLoadI64
+ : public details::LWasmUnalignedLoadBase<INT64_PIECES> {
+ public:
+ LIR_HEADER(WasmUnalignedLoadI64);
+
+ explicit LWasmUnalignedLoadI64(const LAllocation& ptr,
+ const LAllocation& memoryBase,
+ const LDefinition& valueHelper)
+ : LWasmUnalignedLoadBase(classOpcode, ptr, memoryBase, valueHelper) {}
+};
+
+namespace details {
+
+// Base class for the int64 and non-int64 variants.
+template <size_t NumOps>
+class LWasmUnalignedStoreBase : public LInstructionHelper<0, NumOps, 2> {
+ public:
+ typedef LInstructionHelper<0, NumOps, 2> Base;
+
+ static const size_t PtrIndex = 0;
+ static const size_t ValueIndex = 1;
+
+ LWasmUnalignedStoreBase(LNode::Opcode opcode, const LAllocation& ptr,
+ const LDefinition& valueHelper)
+ : Base(opcode) {
+ Base::setOperand(0, ptr);
+ Base::setTemp(0, LDefinition::BogusTemp());
+ Base::setTemp(1, valueHelper);
+ }
+
+ MWasmStore* mir() const { return Base::mir_->toWasmStore(); }
+ const LAllocation* ptr() { return Base::getOperand(PtrIndex); }
+ const LDefinition* ptrCopy() { return Base::getTemp(0); }
+};
+
+} // namespace details
+
+class LWasmUnalignedStore : public details::LWasmUnalignedStoreBase<3> {
+ public:
+ LIR_HEADER(WasmUnalignedStore);
+
+ LWasmUnalignedStore(const LAllocation& ptr, const LAllocation& value,
+ const LAllocation& memoryBase,
+ const LDefinition& valueHelper)
+ : LWasmUnalignedStoreBase(classOpcode, ptr, valueHelper) {
+ setOperand(1, value);
+ setOperand(2, memoryBase);
+ }
+
+ const LAllocation* value() { return Base::getOperand(ValueIndex); }
+ const LAllocation* memoryBase() { return Base::getOperand(ValueIndex + 1); }
+};
+
+class LWasmUnalignedStoreI64
+ : public details::LWasmUnalignedStoreBase<2 + INT64_PIECES> {
+ public:
+ LIR_HEADER(WasmUnalignedStoreI64);
+ LWasmUnalignedStoreI64(const LAllocation& ptr, const LInt64Allocation& value,
+ const LAllocation& memoryBase,
+ const LDefinition& valueHelper)
+ : LWasmUnalignedStoreBase(classOpcode, ptr, valueHelper) {
+ setInt64Operand(1, value);
+ setOperand(1 + INT64_PIECES, memoryBase);
+ }
+
+ const LInt64Allocation value() { return getInt64Operand(ValueIndex); }
+ const LAllocation* memoryBase() {
+ return Base::getOperand(ValueIndex + INT64_PIECES);
+ }
+};
+
+class LWasmCompareExchangeI64
+ : public LInstructionHelper<INT64_PIECES, 2 + INT64_PIECES + INT64_PIECES,
+ 0> {
+ public:
+ LIR_HEADER(WasmCompareExchangeI64);
+
+ LWasmCompareExchangeI64(const LAllocation& ptr,
+ const LInt64Allocation& oldValue,
+ const LInt64Allocation& newValue,
+ const LAllocation& memoryBase)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, oldValue);
+ setInt64Operand(1 + INT64_PIECES, newValue);
+ setOperand(1 + 2 * INT64_PIECES, memoryBase);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation oldValue() { return getInt64Operand(1); }
+ const LInt64Allocation newValue() {
+ return getInt64Operand(1 + INT64_PIECES);
+ }
+ const LAllocation* memoryBase() { return getOperand(1 + 2 * INT64_PIECES); }
+ const MWasmCompareExchangeHeap* mir() const {
+ return mir_->toWasmCompareExchangeHeap();
+ }
+};
+
+class LWasmAtomicExchangeI64
+ : public LInstructionHelper<INT64_PIECES, 2 + INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(WasmAtomicExchangeI64);
+
+ LWasmAtomicExchangeI64(const LAllocation& ptr, const LInt64Allocation& value,
+ const LAllocation& memoryBase)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, value);
+ setOperand(1 + INT64_PIECES, memoryBase);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation value() { return getInt64Operand(1); }
+ const LAllocation* memoryBase() { return getOperand(1 + INT64_PIECES); }
+ const MWasmAtomicExchangeHeap* mir() const {
+ return mir_->toWasmAtomicExchangeHeap();
+ }
+};
+
+class LWasmAtomicBinopI64
+ : public LInstructionHelper<INT64_PIECES, 2 + INT64_PIECES, 2> {
+ public:
+ LIR_HEADER(WasmAtomicBinopI64);
+
+ LWasmAtomicBinopI64(const LAllocation& ptr, const LInt64Allocation& value,
+ const LAllocation& memoryBase)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, value);
+ setOperand(1 + INT64_PIECES, memoryBase);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation value() { return getInt64Operand(1); }
+ const LAllocation* memoryBase() { return getOperand(1 + INT64_PIECES); }
+ const MWasmAtomicBinopHeap* mir() const {
+ return mir_->toWasmAtomicBinopHeap();
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_LIR_mips_shared_h */
diff --git a/js/src/jit/mips-shared/Lowering-mips-shared.cpp b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
new file mode 100644
index 0000000000..db0f1749bf
--- /dev/null
+++ b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
@@ -0,0 +1,1036 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/Lowering-mips-shared.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/Lowering.h"
+#include "jit/MIR.h"
+
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::FloorLog2;
+
+LAllocation LIRGeneratorMIPSShared::useByteOpRegister(MDefinition* mir) {
+ return useRegister(mir);
+}
+
+LAllocation LIRGeneratorMIPSShared::useByteOpRegisterAtStart(MDefinition* mir) {
+ return useRegisterAtStart(mir);
+}
+
+LAllocation LIRGeneratorMIPSShared::useByteOpRegisterOrNonDoubleConstant(
+ MDefinition* mir) {
+ return useRegisterOrNonDoubleConstant(mir);
+}
+
+LDefinition LIRGeneratorMIPSShared::tempByteOpRegister() { return temp(); }
+
+// x = !y
+void LIRGeneratorMIPSShared::lowerForALU(LInstructionHelper<1, 1, 0>* ins,
+ MDefinition* mir, MDefinition* input) {
+ ins->setOperand(0, useRegister(input));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+// z = x+y
+void LIRGeneratorMIPSShared::lowerForALU(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegisterOrConstant(rhs));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+void LIRGeneratorMIPSShared::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins, MDefinition* mir,
+ MDefinition* input) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(input));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorMIPSShared::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES, willHaveDifferentLIRNodes(lhs, rhs)
+ ? useInt64OrConstant(rhs)
+ : useInt64OrConstantAtStart(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorMIPSShared::lowerForMulInt64(LMulI64* ins, MMul* mir,
+ MDefinition* lhs,
+ MDefinition* rhs) {
+ bool needsTemp = false;
+ bool cannotAliasRhs = false;
+ bool reuseInput = true;
+
+#ifdef JS_CODEGEN_MIPS32
+ needsTemp = true;
+ cannotAliasRhs = true;
+ // See the documentation on willHaveDifferentLIRNodes; that test does not
+ // allow additional constraints.
+ MOZ_CRASH(
+ "cannotAliasRhs cannot be used the way it is used in the guard below");
+ if (rhs->isConstant()) {
+ int64_t constant = rhs->toConstant()->toInt64();
+ int32_t shift = mozilla::FloorLog2(constant);
+ // See special cases in CodeGeneratorMIPSShared::visitMulI64
+ if (constant >= -1 && constant <= 2) {
+ needsTemp = false;
+ }
+ if (int64_t(1) << shift == constant) {
+ needsTemp = false;
+ }
+ if (mozilla::IsPowerOfTwo(static_cast<uint32_t>(constant + 1)) ||
+ mozilla::IsPowerOfTwo(static_cast<uint32_t>(constant - 1)))
+ reuseInput = false;
+ }
+#endif
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES,
+ (willHaveDifferentLIRNodes(lhs, rhs) || cannotAliasRhs)
+ ? useInt64OrConstant(rhs)
+ : useInt64OrConstantAtStart(rhs));
+
+ if (needsTemp) {
+ ins->setTemp(0, temp());
+ }
+ if (reuseInput) {
+ defineInt64ReuseInput(ins, mir, 0);
+ } else {
+ defineInt64(ins, mir);
+ }
+}
+
+template <size_t Temps>
+void LIRGeneratorMIPSShared::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
+#ifdef JS_CODEGEN_MIPS32
+ if (mir->isRotate()) {
+ if (!rhs->isConstant()) {
+ ins->setTemp(0, temp());
+ }
+ ins->setInt64Operand(0, useInt64Register(lhs));
+ } else {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ }
+#else
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+#endif
+
+ static_assert(LShiftI64::Rhs == INT64_PIECES,
+ "Assume Rhs is located at INT64_PIECES.");
+ static_assert(LRotateI64::Count == INT64_PIECES,
+ "Assume Count is located at INT64_PIECES.");
+
+ ins->setOperand(INT64_PIECES, useRegisterOrConstant(rhs));
+
+#ifdef JS_CODEGEN_MIPS32
+ if (mir->isRotate()) {
+ defineInt64(ins, mir);
+ } else {
+ defineInt64ReuseInput(ins, mir, 0);
+ }
+#else
+ defineInt64ReuseInput(ins, mir, 0);
+#endif
+}
+
+template void LIRGeneratorMIPSShared::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+template void LIRGeneratorMIPSShared::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 1>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+void LIRGeneratorMIPSShared::lowerForCompareI64AndBranch(
+ MTest* mir, MCompare* comp, JSOp op, MDefinition* left, MDefinition* right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ LCompareI64AndBranch* lir = new (alloc())
+ LCompareI64AndBranch(comp, op, useInt64Register(left),
+ useInt64OrConstant(right), ifTrue, ifFalse);
+ add(lir, mir);
+}
+
+void LIRGeneratorMIPSShared::lowerForFPU(LInstructionHelper<1, 1, 0>* ins,
+ MDefinition* mir, MDefinition* input) {
+ ins->setOperand(0, useRegister(input));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template <size_t Temps>
+void LIRGeneratorMIPSShared::lowerForFPU(LInstructionHelper<1, 2, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegister(rhs));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template void LIRGeneratorMIPSShared::lowerForFPU(
+ LInstructionHelper<1, 2, 0>* ins, MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs);
+template void LIRGeneratorMIPSShared::lowerForFPU(
+ LInstructionHelper<1, 2, 1>* ins, MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs);
+
+void LIRGeneratorMIPSShared::lowerForBitAndAndBranch(LBitAndAndBranch* baab,
+ MInstruction* mir,
+ MDefinition* lhs,
+ MDefinition* rhs) {
+ baab->setOperand(0, useRegisterAtStart(lhs));
+ baab->setOperand(1, useRegisterOrConstantAtStart(rhs));
+ add(baab, mir);
+}
+
+void LIRGeneratorMIPSShared::lowerWasmBuiltinTruncateToInt32(
+ MWasmBuiltinTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ if (opd->type() == MIRType::Double) {
+ define(new (alloc()) LWasmBuiltinTruncateDToInt32(
+ useRegister(opd), useFixed(ins->instance(), InstanceReg),
+ LDefinition::BogusTemp()),
+ ins);
+ return;
+ }
+
+ define(new (alloc()) LWasmBuiltinTruncateFToInt32(
+ useRegister(opd), useFixed(ins->instance(), InstanceReg),
+ LDefinition::BogusTemp()),
+ ins);
+}
+
+void LIRGeneratorMIPSShared::lowerForShift(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegisterOrConstant(rhs));
+ define(ins, mir);
+}
+
+void LIRGeneratorMIPSShared::lowerDivI(MDiv* div) {
+ if (div->isUnsigned()) {
+ lowerUDiv(div);
+ return;
+ }
+
+ // Division instructions are slow. Division by constant denominators can be
+ // rewritten to use other instructions.
+ if (div->rhs()->isConstant()) {
+ int32_t rhs = div->rhs()->toConstant()->toInt32();
+ // Check for division by a positive power of two, which is an easy and
+ // important case to optimize. Note that other optimizations are also
+ // possible; division by negative powers of two can be optimized in a
+ // similar manner as positive powers of two, and division by other
+ // constants can be optimized by a reciprocal multiplication technique.
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LDivPowTwoI* lir =
+ new (alloc()) LDivPowTwoI(useRegister(div->lhs()), shift, temp());
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ define(lir, div);
+ return;
+ }
+ }
+
+ LDivI* lir = new (alloc())
+ LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ define(lir, div);
+}
+
+void LIRGeneratorMIPSShared::lowerNegI(MInstruction* ins, MDefinition* input) {
+ define(new (alloc()) LNegI(useRegisterAtStart(input)), ins);
+}
+
+void LIRGeneratorMIPSShared::lowerNegI64(MInstruction* ins,
+ MDefinition* input) {
+ defineInt64ReuseInput(new (alloc()) LNegI64(useInt64RegisterAtStart(input)),
+ ins, 0);
+}
+
+void LIRGenerator::visitAbs(MAbs* ins) {
+ define(allocateAbs(ins, useRegisterAtStart(ins->input())), ins);
+}
+
+void LIRGeneratorMIPSShared::lowerMulI(MMul* mul, MDefinition* lhs,
+ MDefinition* rhs) {
+ LMulI* lir = new (alloc()) LMulI;
+ if (mul->fallible()) {
+ assignSnapshot(lir, mul->bailoutKind());
+ }
+
+ lowerForALU(lir, mul, lhs, rhs);
+}
+
+void LIRGeneratorMIPSShared::lowerModI(MMod* mod) {
+ if (mod->isUnsigned()) {
+ lowerUMod(mod);
+ return;
+ }
+
+ if (mod->rhs()->isConstant()) {
+ int32_t rhs = mod->rhs()->toConstant()->toInt32();
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LModPowTwoI* lir =
+ new (alloc()) LModPowTwoI(useRegister(mod->lhs()), shift);
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+ return;
+ } else if (shift < 31 && (1 << (shift + 1)) - 1 == rhs) {
+ LModMaskI* lir = new (alloc())
+ LModMaskI(useRegister(mod->lhs()), temp(LDefinition::GENERAL),
+ temp(LDefinition::GENERAL), shift + 1);
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+ return;
+ }
+ }
+ LModI* lir =
+ new (alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()),
+ temp(LDefinition::GENERAL));
+
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+}
+
+void LIRGenerator::visitPowHalf(MPowHalf* ins) {
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Double);
+ LPowHalfD* lir = new (alloc()) LPowHalfD(useRegisterAtStart(input));
+ defineReuseInput(lir, ins, 0);
+}
+
+void LIRGeneratorMIPSShared::lowerWasmSelectI(MWasmSelect* select) {
+ auto* lir = new (alloc())
+ LWasmSelect(useRegisterAtStart(select->trueExpr()),
+ useAny(select->falseExpr()), useRegister(select->condExpr()));
+ defineReuseInput(lir, select, LWasmSelect::TrueExprIndex);
+}
+
+void LIRGeneratorMIPSShared::lowerWasmSelectI64(MWasmSelect* select) {
+ auto* lir = new (alloc()) LWasmSelectI64(
+ useInt64RegisterAtStart(select->trueExpr()),
+ useInt64(select->falseExpr()), useRegister(select->condExpr()));
+ defineInt64ReuseInput(lir, select, LWasmSelectI64::TrueExprIndex);
+}
+
+LTableSwitch* LIRGeneratorMIPSShared::newLTableSwitch(
+ const LAllocation& in, const LDefinition& inputCopy,
+ MTableSwitch* tableswitch) {
+ return new (alloc()) LTableSwitch(in, inputCopy, temp(), tableswitch);
+}
+
+LTableSwitchV* LIRGeneratorMIPSShared::newLTableSwitchV(
+ MTableSwitch* tableswitch) {
+ return new (alloc()) LTableSwitchV(useBox(tableswitch->getOperand(0)), temp(),
+ tempDouble(), temp(), tableswitch);
+}
+
+void LIRGeneratorMIPSShared::lowerUrshD(MUrsh* mir) {
+ MDefinition* lhs = mir->lhs();
+ MDefinition* rhs = mir->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+
+ LUrshD* lir = new (alloc())
+ LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
+ define(lir, mir);
+}
+
+void LIRGeneratorMIPSShared::lowerPowOfTwoI(MPow* mir) {
+ int32_t base = mir->input()->toConstant()->toInt32();
+ MDefinition* power = mir->power();
+
+ auto* lir = new (alloc()) LPowOfTwoI(useRegister(power), base);
+ assignSnapshot(lir, mir->bailoutKind());
+ define(lir, mir);
+}
+
+void LIRGeneratorMIPSShared::lowerBigIntLsh(MBigIntLsh* ins) {
+ auto* lir = new (alloc()) LBigIntLsh(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorMIPSShared::lowerBigIntRsh(MBigIntRsh* ins) {
+ auto* lir = new (alloc()) LBigIntRsh(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitWasmNeg(MWasmNeg* ins) {
+ if (ins->type() == MIRType::Int32) {
+ define(new (alloc()) LNegI(useRegisterAtStart(ins->input())), ins);
+ } else if (ins->type() == MIRType::Float32) {
+ define(new (alloc()) LNegF(useRegisterAtStart(ins->input())), ins);
+ } else {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ define(new (alloc()) LNegD(useRegisterAtStart(ins->input())), ins);
+ }
+}
+
+void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
+ MDefinition* base = ins->base();
+ // 'base' is a GPR but may be of either type. If it is 32-bit, it is
+ // sign-extended on mips64 platform and we should explicitly promote it to
+ // 64-bit by zero-extension when use it as an index register in memory
+ // accesses.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ LAllocation memoryBase =
+ ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
+ : LGeneralReg(HeapReg);
+
+ LAllocation ptr;
+#ifdef JS_CODEGEN_MIPS32
+ if (ins->type() == MIRType::Int64) {
+ ptr = useRegister(base);
+ } else {
+ ptr = useRegisterAtStart(base);
+ }
+#else
+ ptr = useRegisterAtStart(base);
+#endif
+
+ if (IsUnaligned(ins->access())) {
+ if (ins->type() == MIRType::Int64) {
+ auto* lir = new (alloc()) LWasmUnalignedLoadI64(ptr, memoryBase, temp());
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ defineInt64(lir, ins);
+ return;
+ }
+
+ auto* lir = new (alloc()) LWasmUnalignedLoad(ptr, memoryBase, temp());
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ define(lir, ins);
+ return;
+ }
+
+ if (ins->type() == MIRType::Int64) {
+#ifdef JS_CODEGEN_MIPS32
+ if (ins->access().isAtomic()) {
+ auto* lir = new (alloc()) LWasmAtomicLoadI64(ptr);
+ defineInt64(lir, ins);
+ return;
+ }
+#endif
+ auto* lir = new (alloc()) LWasmLoadI64(ptr, memoryBase);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ defineInt64(lir, ins);
+ return;
+ }
+
+ auto* lir = new (alloc()) LWasmLoad(ptr, memoryBase);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmStore(MWasmStore* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ MDefinition* value = ins->value();
+ LAllocation memoryBase =
+ ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
+ : LGeneralReg(HeapReg);
+
+ if (IsUnaligned(ins->access())) {
+ LAllocation baseAlloc = useRegisterAtStart(base);
+ if (ins->access().type() == Scalar::Int64) {
+ LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
+ auto* lir = new (alloc())
+ LWasmUnalignedStoreI64(baseAlloc, valueAlloc, memoryBase, temp());
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ add(lir, ins);
+ return;
+ }
+
+ LAllocation valueAlloc = useRegisterAtStart(value);
+ auto* lir = new (alloc())
+ LWasmUnalignedStore(baseAlloc, valueAlloc, memoryBase, temp());
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ add(lir, ins);
+ return;
+ }
+
+ if (ins->access().type() == Scalar::Int64) {
+#ifdef JS_CODEGEN_MIPS32
+ if (ins->access().isAtomic()) {
+ auto* lir = new (alloc()) LWasmAtomicStoreI64(
+ useRegister(base), useInt64Register(value), temp());
+ add(lir, ins);
+ return;
+ }
+#endif
+
+ LAllocation baseAlloc = useRegisterAtStart(base);
+ LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
+ auto* lir = new (alloc()) LWasmStoreI64(baseAlloc, valueAlloc, memoryBase);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ add(lir, ins);
+ return;
+ }
+
+ LAllocation baseAlloc = useRegisterAtStart(base);
+ LAllocation valueAlloc = useRegisterAtStart(value);
+ auto* lir = new (alloc()) LWasmStore(baseAlloc, valueAlloc, memoryBase);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ add(lir, ins);
+}
+
+void LIRGeneratorMIPSShared::lowerUDiv(MDiv* div) {
+ MDefinition* lhs = div->getOperand(0);
+ MDefinition* rhs = div->getOperand(1);
+
+ LUDivOrMod* lir = new (alloc()) LUDivOrMod;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+
+ define(lir, div);
+}
+
+void LIRGeneratorMIPSShared::lowerUMod(MMod* mod) {
+ MDefinition* lhs = mod->getOperand(0);
+ MDefinition* rhs = mod->getOperand(1);
+
+ LUDivOrMod* lir = new (alloc()) LUDivOrMod;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+
+ define(lir, mod);
+}
+
+void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToDouble* lir =
+ new (alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToFloat32* lir =
+ new (alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins) {
+ MOZ_ASSERT(ins->access().offset() == 0);
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+ LAllocation baseAlloc;
+ LAllocation limitAlloc;
+ // For MIPS it is best to keep the 'base' in a register if a bounds check
+ // is needed.
+ if (base->isConstant() && !ins->needsBoundsCheck()) {
+ // A bounds check is only skipped for a positive index.
+ MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
+ baseAlloc = LAllocation(base->toConstant());
+ } else {
+ baseAlloc = useRegisterAtStart(base);
+ if (ins->needsBoundsCheck()) {
+ MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
+ MOZ_ASSERT(boundsCheckLimit->type() == MIRType::Int32);
+ limitAlloc = useRegisterAtStart(boundsCheckLimit);
+ }
+ }
+
+ define(new (alloc()) LAsmJSLoadHeap(baseAlloc, limitAlloc, LAllocation()),
+ ins);
+}
+
+void LIRGenerator::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins) {
+ MOZ_ASSERT(ins->access().offset() == 0);
+
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+ LAllocation baseAlloc;
+ LAllocation limitAlloc;
+ if (base->isConstant() && !ins->needsBoundsCheck()) {
+ MOZ_ASSERT(base->toConstant()->toInt32() >= 0);
+ baseAlloc = LAllocation(base->toConstant());
+ } else {
+ baseAlloc = useRegisterAtStart(base);
+ if (ins->needsBoundsCheck()) {
+ MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
+ MOZ_ASSERT(boundsCheckLimit->type() == MIRType::Int32);
+ limitAlloc = useRegisterAtStart(boundsCheckLimit);
+ }
+ }
+
+ add(new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()),
+ limitAlloc, LAllocation()),
+ ins);
+}
+
+void LIRGenerator::visitSubstr(MSubstr* ins) {
+ LSubstr* lir = new (alloc())
+ LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
+ useRegister(ins->length()), temp(), temp(), tempByteOpRegister());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCompareExchangeTypedArrayElement(
+ MCompareExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+
+ const LAllocation newval = useRegister(ins->newval());
+ const LAllocation oldval = useRegister(ins->oldval());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LInt64Definition temp1 = tempInt64();
+ LInt64Definition temp2 = tempInt64();
+
+ auto* lir = new (alloc()) LCompareExchangeTypedArrayElement64(
+ elements, index, oldval, newval, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ // If the target is a floating register then we need a temp at the
+ // CodeGenerator level for creating the result.
+
+ LDefinition outTemp = LDefinition::BogusTemp();
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
+ outTemp = temp();
+ }
+
+ if (Scalar::byteSize(ins->arrayType()) < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LCompareExchangeTypedArrayElement* lir = new (alloc())
+ LCompareExchangeTypedArrayElement(elements, index, oldval, newval,
+ outTemp, valueTemp, offsetTemp,
+ maskTemp);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAtomicExchangeTypedArrayElement(
+ MAtomicExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+
+ const LAllocation value = useRegister(ins->value());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LInt64Definition temp1 = tempInt64();
+ LDefinition temp2 = temp();
+
+ auto* lir = new (alloc()) LAtomicExchangeTypedArrayElement64(
+ elements, index, value, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ // If the target is a floating register then we need a temp at the
+ // CodeGenerator level for creating the result.
+
+ MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
+
+ LDefinition outTemp = LDefinition::BogusTemp();
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32) {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ outTemp = temp();
+ }
+
+ if (Scalar::byteSize(ins->arrayType()) < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LAtomicExchangeTypedArrayElement* lir =
+ new (alloc()) LAtomicExchangeTypedArrayElement(
+ elements, index, value, outTemp, valueTemp, offsetTemp, maskTemp);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+ LAllocation memoryBase =
+ ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
+ : LGeneralReg(HeapReg);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc()) LWasmCompareExchangeI64(
+ useRegister(base), useInt64Register(ins->oldValue()),
+ useInt64Register(ins->newValue()), memoryBase);
+ defineInt64(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->access().byteSize() < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LWasmCompareExchangeHeap* lir = new (alloc())
+ LWasmCompareExchangeHeap(useRegister(base), useRegister(ins->oldValue()),
+ useRegister(ins->newValue()), valueTemp,
+ offsetTemp, maskTemp, memoryBase);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+ LAllocation memoryBase =
+ ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
+ : LGeneralReg(HeapReg);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc()) LWasmAtomicExchangeI64(
+ useRegister(base), useInt64Register(ins->value()), memoryBase);
+ defineInt64(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->access().byteSize() < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LWasmAtomicExchangeHeap* lir = new (alloc())
+ LWasmAtomicExchangeHeap(useRegister(base), useRegister(ins->value()),
+ valueTemp, offsetTemp, maskTemp, memoryBase);
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+ LAllocation memoryBase =
+ ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
+ : LGeneralReg(HeapReg);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc()) LWasmAtomicBinopI64(
+ useRegister(base), useInt64Register(ins->value()), memoryBase);
+ lir->setTemp(0, temp());
+#ifdef JS_CODEGEN_MIPS32
+ lir->setTemp(1, temp());
+#endif
+ defineInt64(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->access().byteSize() < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ if (!ins->hasUses()) {
+ LWasmAtomicBinopHeapForEffect* lir = new (alloc())
+ LWasmAtomicBinopHeapForEffect(useRegister(base),
+ useRegister(ins->value()), valueTemp,
+ offsetTemp, maskTemp, memoryBase);
+ add(lir, ins);
+ return;
+ }
+
+ LWasmAtomicBinopHeap* lir = new (alloc())
+ LWasmAtomicBinopHeap(useRegister(base), useRegister(ins->value()),
+ valueTemp, offsetTemp, maskTemp, memoryBase);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAtomicTypedArrayElementBinop(
+ MAtomicTypedArrayElementBinop* ins) {
+ MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+ const LAllocation value = useRegister(ins->value());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LInt64Definition temp1 = tempInt64();
+ LInt64Definition temp2 = tempInt64();
+
+ // Case 1: the result of the operation is not used.
+ //
+ // We can omit allocating the result BigInt.
+
+ if (ins->isForEffect()) {
+ auto* lir = new (alloc()) LAtomicTypedArrayElementBinopForEffect64(
+ elements, index, value, temp1, temp2);
+ add(lir, ins);
+ return;
+ }
+
+ // Case 2: the result of the operation is used.
+
+ auto* lir = new (alloc())
+ LAtomicTypedArrayElementBinop64(elements, index, value, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (Scalar::byteSize(ins->arrayType()) < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ if (ins->isForEffect()) {
+ LAtomicTypedArrayElementBinopForEffect* lir =
+ new (alloc()) LAtomicTypedArrayElementBinopForEffect(
+ elements, index, value, valueTemp, offsetTemp, maskTemp);
+ add(lir, ins);
+ return;
+ }
+
+ // For a Uint32Array with a known double result we need a temp for
+ // the intermediate output.
+
+ LDefinition outTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
+ outTemp = temp();
+ }
+
+ LAtomicTypedArrayElementBinop* lir =
+ new (alloc()) LAtomicTypedArrayElementBinop(
+ elements, index, value, outTemp, valueTemp, offsetTemp, maskTemp);
+ define(lir, ins);
+}
+
+void LIRGenerator::visitCopySign(MCopySign* ins) {
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(IsFloatingPointType(lhs->type()));
+ MOZ_ASSERT(lhs->type() == rhs->type());
+ MOZ_ASSERT(lhs->type() == ins->type());
+
+ LInstructionHelper<1, 2, 2>* lir;
+ if (lhs->type() == MIRType::Double) {
+ lir = new (alloc()) LCopySignD();
+ } else {
+ lir = new (alloc()) LCopySignF();
+ }
+
+ lir->setTemp(0, temp());
+ lir->setTemp(1, temp());
+
+ lir->setOperand(0, useRegisterAtStart(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ defineReuseInput(lir, ins, 0);
+}
+
+void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
+ defineInt64(
+ new (alloc()) LExtendInt32ToInt64(useRegisterAtStart(ins->input())), ins);
+}
+
+void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
+ defineInt64(new (alloc())
+ LSignExtendInt64(useInt64RegisterAtStart(ins->input())),
+ ins);
+}
+
+// On mips we specialize the only cases where compare is {U,}Int32 and select
+// is {U,}Int32.
+bool LIRGeneratorShared::canSpecializeWasmCompareAndSelect(
+ MCompare::CompareType compTy, MIRType insTy) {
+ return insTy == MIRType::Int32 && (compTy == MCompare::Compare_Int32 ||
+ compTy == MCompare::Compare_UInt32);
+}
+
+void LIRGeneratorShared::lowerWasmCompareAndSelect(MWasmSelect* ins,
+ MDefinition* lhs,
+ MDefinition* rhs,
+ MCompare::CompareType compTy,
+ JSOp jsop) {
+ MOZ_ASSERT(canSpecializeWasmCompareAndSelect(compTy, ins->type()));
+ auto* lir = new (alloc()) LWasmCompareAndSelect(
+ useRegister(lhs), useRegister(rhs), compTy, jsop,
+ useRegisterAtStart(ins->trueExpr()), useRegister(ins->falseExpr()));
+ defineReuseInput(lir, ins, LWasmCompareAndSelect::IfTrueExprIndex);
+}
+
+void LIRGenerator::visitWasmTernarySimd128(MWasmTernarySimd128* ins) {
+ MOZ_CRASH("ternary SIMD NYI");
+}
+
+void LIRGenerator::visitWasmBinarySimd128(MWasmBinarySimd128* ins) {
+ MOZ_CRASH("binary SIMD NYI");
+}
+
+#ifdef ENABLE_WASM_SIMD
+bool MWasmTernarySimd128::specializeBitselectConstantMaskAsShuffle(
+ int8_t shuffle[16]) {
+ return false;
+}
+bool MWasmTernarySimd128::canRelaxBitselect() { return false; }
+
+bool MWasmBinarySimd128::canPmaddubsw() { return false; }
+#endif
+
+bool MWasmBinarySimd128::specializeForConstantRhs() {
+ // Probably many we want to do here
+ return false;
+}
+
+void LIRGenerator::visitWasmBinarySimd128WithConstant(
+ MWasmBinarySimd128WithConstant* ins) {
+ MOZ_CRASH("binary SIMD with constant NYI");
+}
+
+void LIRGenerator::visitWasmShiftSimd128(MWasmShiftSimd128* ins) {
+ MOZ_CRASH("shift SIMD NYI");
+}
+
+void LIRGenerator::visitWasmShuffleSimd128(MWasmShuffleSimd128* ins) {
+ MOZ_CRASH("shuffle SIMD NYI");
+}
+
+void LIRGenerator::visitWasmReplaceLaneSimd128(MWasmReplaceLaneSimd128* ins) {
+ MOZ_CRASH("replace-lane SIMD NYI");
+}
+
+void LIRGenerator::visitWasmScalarToSimd128(MWasmScalarToSimd128* ins) {
+ MOZ_CRASH("scalar-to-SIMD NYI");
+}
+
+void LIRGenerator::visitWasmUnarySimd128(MWasmUnarySimd128* ins) {
+ MOZ_CRASH("unary SIMD NYI");
+}
+
+void LIRGenerator::visitWasmReduceSimd128(MWasmReduceSimd128* ins) {
+ MOZ_CRASH("reduce-SIMD NYI");
+}
+
+void LIRGenerator::visitWasmLoadLaneSimd128(MWasmLoadLaneSimd128* ins) {
+ MOZ_CRASH("load-lane SIMD NYI");
+}
+
+void LIRGenerator::visitWasmStoreLaneSimd128(MWasmStoreLaneSimd128* ins) {
+ MOZ_CRASH("store-lane SIMD NYI");
+}
diff --git a/js/src/jit/mips-shared/Lowering-mips-shared.h b/js/src/jit/mips-shared/Lowering-mips-shared.h
new file mode 100644
index 0000000000..ca74a7aaf5
--- /dev/null
+++ b/js/src/jit/mips-shared/Lowering-mips-shared.h
@@ -0,0 +1,89 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_Lowering_mips_shared_h
+#define jit_mips_shared_Lowering_mips_shared_h
+
+#include "jit/shared/Lowering-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorMIPSShared : public LIRGeneratorShared {
+ protected:
+ LIRGeneratorMIPSShared(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorShared(gen, graph, lirGraph) {}
+
+ // x86 has constraints on what registers can be formatted for 1-byte
+ // stores and loads; on MIPS all registers are okay.
+ LAllocation useByteOpRegister(MDefinition* mir);
+ LAllocation useByteOpRegisterAtStart(MDefinition* mir);
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir);
+ LDefinition tempByteOpRegister();
+
+ bool needTempForPostBarrier() { return false; }
+
+ void lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void lowerUrshD(MUrsh* mir);
+
+ void lowerPowOfTwoI(MPow* mir);
+
+ void lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* input);
+ void lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForALUInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* input);
+ void lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs,
+ MDefinition* rhs);
+ template <size_t Temps>
+ void lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForCompareI64AndBranch(MTest* mir, MCompare* comp, JSOp op,
+ MDefinition* left, MDefinition* right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse);
+
+ void lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* src);
+ template <size_t Temps>
+ void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void lowerWasmBuiltinTruncateToInt32(MWasmBuiltinTruncateToInt32* ins);
+ void lowerDivI(MDiv* div);
+ void lowerModI(MMod* mod);
+ void lowerNegI(MInstruction* ins, MDefinition* input);
+ void lowerNegI64(MInstruction* ins, MDefinition* input);
+ void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
+ void lowerUDiv(MDiv* div);
+ void lowerUMod(MMod* mod);
+ void lowerWasmSelectI(MWasmSelect* select);
+ void lowerWasmSelectI64(MWasmSelect* select);
+
+ void lowerBigIntLsh(MBigIntLsh* ins);
+ void lowerBigIntRsh(MBigIntRsh* ins);
+
+ LTableSwitch* newLTableSwitch(const LAllocation& in,
+ const LDefinition& inputCopy,
+ MTableSwitch* ins);
+ LTableSwitchV* newLTableSwitchV(MTableSwitch* ins);
+
+ void lowerPhi(MPhi* phi);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_Lowering_mips_shared_h */
diff --git a/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h b/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
new file mode 100644
index 0000000000..debd3b67ff
--- /dev/null
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared-inl.h
@@ -0,0 +1,1338 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_MacroAssembler_mips_shared_inl_h
+#define jit_mips_shared_MacroAssembler_mips_shared_inl_h
+
+#include "jit/mips-shared/MacroAssembler-mips-shared.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+
+void MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest) {
+ moveFromFloat32(src, dest);
+}
+
+void MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest) {
+ moveToFloat32(src, dest);
+}
+
+void MacroAssembler::move8SignExtend(Register src, Register dest) {
+ ma_seb(dest, src);
+}
+
+void MacroAssembler::move16SignExtend(Register src, Register dest) {
+ ma_seh(dest, src);
+}
+
+void MacroAssembler::loadAbiReturnAddress(Register dest) { movePtr(ra, dest); }
+
+// ===============================================================
+// Logical instructions
+
+void MacroAssembler::not32(Register reg) { ma_not(reg, reg); }
+
+void MacroAssembler::and32(Register src, Register dest) {
+ as_and(dest, dest, src);
+}
+
+void MacroAssembler::and32(Imm32 imm, Register dest) { ma_and(dest, imm); }
+
+void MacroAssembler::and32(Imm32 imm, const Address& dest) {
+ load32(dest, SecondScratchReg);
+ ma_and(SecondScratchReg, imm);
+ store32(SecondScratchReg, dest);
+}
+
+void MacroAssembler::and32(const Address& src, Register dest) {
+ load32(src, SecondScratchReg);
+ ma_and(dest, SecondScratchReg);
+}
+
+void MacroAssembler::or32(Register src, Register dest) { ma_or(dest, src); }
+
+void MacroAssembler::or32(Imm32 imm, Register dest) { ma_or(dest, imm); }
+
+void MacroAssembler::or32(Imm32 imm, const Address& dest) {
+ load32(dest, SecondScratchReg);
+ ma_or(SecondScratchReg, imm);
+ store32(SecondScratchReg, dest);
+}
+
+void MacroAssembler::xor32(Register src, Register dest) { ma_xor(dest, src); }
+
+void MacroAssembler::xor32(Imm32 imm, Register dest) { ma_xor(dest, imm); }
+
+void MacroAssembler::xor32(Imm32 imm, const Address& dest) {
+ load32(dest, SecondScratchReg);
+ ma_xor(SecondScratchReg, imm);
+ store32(SecondScratchReg, dest);
+}
+
+void MacroAssembler::xor32(const Address& src, Register dest) {
+ load32(src, SecondScratchReg);
+ ma_xor(dest, SecondScratchReg);
+}
+
+// ===============================================================
+// Swap instructions
+
+void MacroAssembler::byteSwap16SignExtend(Register reg) {
+ ma_wsbh(reg, reg);
+ ma_seh(reg, reg);
+}
+
+void MacroAssembler::byteSwap16ZeroExtend(Register reg) {
+ ma_wsbh(reg, reg);
+ ma_and(reg, Imm32(0xFFFF));
+}
+
+void MacroAssembler::byteSwap32(Register reg) {
+ ma_wsbh(reg, reg);
+ as_rotr(reg, reg, 16);
+}
+
+// ===============================================================
+// Arithmetic instructions
+
+void MacroAssembler::add32(Register src, Register dest) {
+ as_addu(dest, dest, src);
+}
+
+void MacroAssembler::add32(Imm32 imm, Register dest) {
+ ma_addu(dest, dest, imm);
+}
+
+void MacroAssembler::add32(Imm32 imm, Register src, Register dest) {
+ ma_addu(dest, src, imm);
+}
+
+void MacroAssembler::add32(Imm32 imm, const Address& dest) {
+ load32(dest, SecondScratchReg);
+ ma_addu(SecondScratchReg, imm);
+ store32(SecondScratchReg, dest);
+}
+
+void MacroAssembler::addPtr(Imm32 imm, const Address& dest) {
+ loadPtr(dest, ScratchRegister);
+ addPtr(imm, ScratchRegister);
+ storePtr(ScratchRegister, dest);
+}
+
+void MacroAssembler::addPtr(const Address& src, Register dest) {
+ loadPtr(src, ScratchRegister);
+ addPtr(ScratchRegister, dest);
+}
+
+void MacroAssembler::addDouble(FloatRegister src, FloatRegister dest) {
+ as_addd(dest, dest, src);
+}
+
+void MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest) {
+ as_adds(dest, dest, src);
+}
+
+void MacroAssembler::sub32(Register src, Register dest) {
+ as_subu(dest, dest, src);
+}
+
+void MacroAssembler::sub32(Imm32 imm, Register dest) {
+ ma_subu(dest, dest, imm);
+}
+
+void MacroAssembler::sub32(const Address& src, Register dest) {
+ load32(src, SecondScratchReg);
+ as_subu(dest, dest, SecondScratchReg);
+}
+
+void MacroAssembler::subPtr(Register src, const Address& dest) {
+ loadPtr(dest, SecondScratchReg);
+ subPtr(src, SecondScratchReg);
+ storePtr(SecondScratchReg, dest);
+}
+
+void MacroAssembler::subPtr(const Address& addr, Register dest) {
+ loadPtr(addr, SecondScratchReg);
+ subPtr(SecondScratchReg, dest);
+}
+
+void MacroAssembler::subDouble(FloatRegister src, FloatRegister dest) {
+ as_subd(dest, dest, src);
+}
+
+void MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest) {
+ as_subs(dest, dest, src);
+}
+
+void MacroAssembler::mul32(Register rhs, Register srcDest) {
+ as_mul(srcDest, srcDest, rhs);
+}
+
+void MacroAssembler::mul32(Imm32 imm, Register srcDest) {
+ move32(imm, SecondScratchReg);
+ mul32(SecondScratchReg, srcDest);
+}
+
+void MacroAssembler::mulFloat32(FloatRegister src, FloatRegister dest) {
+ as_muls(dest, dest, src);
+}
+
+void MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest) {
+ as_muld(dest, dest, src);
+}
+
+void MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp,
+ FloatRegister dest) {
+ movePtr(imm, ScratchRegister);
+ loadDouble(Address(ScratchRegister, 0), ScratchDoubleReg);
+ mulDouble(ScratchDoubleReg, dest);
+}
+
+void MacroAssembler::quotient32(Register rhs, Register srcDest,
+ bool isUnsigned) {
+ if (isUnsigned) {
+#ifdef MIPSR6
+ as_divu(srcDest, srcDest, rhs);
+#else
+ as_divu(srcDest, rhs);
+#endif
+ } else {
+#ifdef MIPSR6
+ as_div(srcDest, srcDest, rhs);
+#else
+ as_div(srcDest, rhs);
+#endif
+ }
+#ifndef MIPSR6
+ as_mflo(srcDest);
+#endif
+}
+
+void MacroAssembler::remainder32(Register rhs, Register srcDest,
+ bool isUnsigned) {
+ if (isUnsigned) {
+#ifdef MIPSR6
+ as_modu(srcDest, srcDest, rhs);
+#else
+ as_divu(srcDest, rhs);
+#endif
+ } else {
+#ifdef MIPSR6
+ as_mod(srcDest, srcDest, rhs);
+#else
+ as_div(srcDest, rhs);
+#endif
+ }
+#ifndef MIPSR6
+ as_mfhi(srcDest);
+#endif
+}
+
+void MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest) {
+ as_divs(dest, dest, src);
+}
+
+void MacroAssembler::divDouble(FloatRegister src, FloatRegister dest) {
+ as_divd(dest, dest, src);
+}
+
+void MacroAssembler::neg32(Register reg) { ma_negu(reg, reg); }
+
+void MacroAssembler::negateDouble(FloatRegister reg) { as_negd(reg, reg); }
+
+void MacroAssembler::negateFloat(FloatRegister reg) { as_negs(reg, reg); }
+
+void MacroAssembler::abs32(Register src, Register dest) {
+ // TODO: There's probably a better way to do this.
+ if (src != dest) {
+ move32(src, dest);
+ }
+ Label positive;
+ branchTest32(Assembler::NotSigned, dest, dest, &positive);
+ neg32(dest);
+ bind(&positive);
+}
+
+void MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest) {
+ as_abss(dest, src);
+}
+
+void MacroAssembler::absDouble(FloatRegister src, FloatRegister dest) {
+ as_absd(dest, src);
+}
+
+void MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest) {
+ as_sqrts(dest, src);
+}
+
+void MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest) {
+ as_sqrtd(dest, src);
+}
+
+void MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxFloat32(srcDest, other, handleNaN, false);
+}
+
+void MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxDouble(srcDest, other, handleNaN, false);
+}
+
+void MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxFloat32(srcDest, other, handleNaN, true);
+}
+
+void MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxDouble(srcDest, other, handleNaN, true);
+}
+
+// ===============================================================
+// Shift functions
+
+void MacroAssembler::lshift32(Register src, Register dest) {
+ ma_sll(dest, dest, src);
+}
+
+void MacroAssembler::flexibleLshift32(Register src, Register dest) {
+ lshift32(src, dest);
+}
+
+void MacroAssembler::lshift32(Imm32 imm, Register dest) {
+ ma_sll(dest, dest, imm);
+}
+
+void MacroAssembler::rshift32(Register src, Register dest) {
+ ma_srl(dest, dest, src);
+}
+
+void MacroAssembler::flexibleRshift32(Register src, Register dest) {
+ rshift32(src, dest);
+}
+
+void MacroAssembler::rshift32(Imm32 imm, Register dest) {
+ ma_srl(dest, dest, imm);
+}
+
+void MacroAssembler::rshift32Arithmetic(Register src, Register dest) {
+ ma_sra(dest, dest, src);
+}
+
+void MacroAssembler::flexibleRshift32Arithmetic(Register src, Register dest) {
+ rshift32Arithmetic(src, dest);
+}
+
+void MacroAssembler::rshift32Arithmetic(Imm32 imm, Register dest) {
+ ma_sra(dest, dest, imm);
+}
+
+// ===============================================================
+// Rotation functions
+void MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest) {
+ if (count.value) {
+ ma_rol(dest, input, count);
+ } else {
+ ma_move(dest, input);
+ }
+}
+void MacroAssembler::rotateLeft(Register count, Register input, Register dest) {
+ ma_rol(dest, input, count);
+}
+void MacroAssembler::rotateRight(Imm32 count, Register input, Register dest) {
+ if (count.value) {
+ ma_ror(dest, input, count);
+ } else {
+ ma_move(dest, input);
+ }
+}
+void MacroAssembler::rotateRight(Register count, Register input,
+ Register dest) {
+ ma_ror(dest, input, count);
+}
+
+// ===============================================================
+// Bit counting functions
+
+void MacroAssembler::clz32(Register src, Register dest, bool knownNotZero) {
+ as_clz(dest, src);
+}
+
+void MacroAssembler::ctz32(Register src, Register dest, bool knownNotZero) {
+ ma_ctz(dest, src);
+}
+
+void MacroAssembler::popcnt32(Register input, Register output, Register tmp) {
+ // Equivalent to GCC output of mozilla::CountPopulation32()
+ ma_move(output, input);
+ ma_sra(tmp, input, Imm32(1));
+ ma_and(tmp, Imm32(0x55555555));
+ ma_subu(output, tmp);
+ ma_sra(tmp, output, Imm32(2));
+ ma_and(output, Imm32(0x33333333));
+ ma_and(tmp, Imm32(0x33333333));
+ ma_addu(output, tmp);
+ ma_srl(tmp, output, Imm32(4));
+ ma_addu(output, tmp);
+ ma_and(output, Imm32(0xF0F0F0F));
+ ma_sll(tmp, output, Imm32(8));
+ ma_addu(output, tmp);
+ ma_sll(tmp, output, Imm32(16));
+ ma_addu(output, tmp);
+ ma_sra(output, output, Imm32(24));
+}
+
+// ===============================================================
+// Condition functions
+
+void MacroAssembler::cmp8Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load8ZeroExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(uint8_t(rhs.value)), cond);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load8SignExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(int8_t(rhs.value)), cond);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void MacroAssembler::cmp16Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load16ZeroExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(uint16_t(rhs.value)), cond);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load16SignExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(int16_t(rhs.value)), cond);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::branch8(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load8ZeroExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(uint8_t(rhs.value)), label);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load8SignExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(int8_t(rhs.value)), label);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void MacroAssembler::branch8(Condition cond, const BaseIndex& lhs, Register rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ computeScaledAddress(lhs, scratch2);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load8ZeroExtend(Address(scratch2, lhs.offset), scratch2);
+ branch32(cond, scratch2, rhs, label);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load8SignExtend(Address(scratch2, lhs.offset), scratch2);
+ branch32(cond, scratch2, rhs, label);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void MacroAssembler::branch16(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load16ZeroExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(uint16_t(rhs.value)), label);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load16SignExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(int16_t(rhs.value)), label);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+template <class L>
+void MacroAssembler::branch32(Condition cond, Register lhs, Register rhs,
+ L label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+template <class L>
+void MacroAssembler::branch32(Condition cond, Register lhs, Imm32 imm,
+ L label) {
+ ma_b(lhs, imm, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs,
+ Label* label) {
+ load32(lhs, SecondScratchReg);
+ ma_b(SecondScratchReg, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ load32(lhs, SecondScratchReg);
+ ma_b(SecondScratchReg, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ load32(lhs, SecondScratchReg);
+ ma_b(SecondScratchReg, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ load32(lhs, SecondScratchReg);
+ ma_b(SecondScratchReg, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs,
+ Label* label) {
+ load32(lhs, SecondScratchReg);
+ ma_b(SecondScratchReg, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress addr,
+ Imm32 imm, Label* label) {
+ load32(addr, SecondScratchReg);
+ ma_b(SecondScratchReg, imm, label, cond);
+}
+
+template <class L>
+void MacroAssembler::branchPtr(Condition cond, Register lhs, Register rhs,
+ L label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmPtr rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmGCPtr rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmWord rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+template <class L>
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, Register rhs,
+ L label) {
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs,
+ Label* label) {
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs,
+ Label* label) {
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs,
+ Label* label) {
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ ImmWord rhs, Label* label) {
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs,
+ Register rhs, Label* label) {
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
+ ImmWord rhs, Label* label) {
+ loadPtr(lhs, SecondScratchReg);
+ branchPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
+ Register rhs, Label* label) {
+ SecondScratchRegisterScope scratch(*this);
+ loadPtr(lhs, scratch);
+ branchPtr(cond, scratch, rhs, label);
+}
+
+void MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs, Label* label) {
+ ma_bc1s(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ convertFloat32ToInt32(src, dest, fail, false);
+}
+
+void MacroAssembler::branchDouble(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs, Label* label) {
+ ma_bc1d(lhs, rhs, label, cond);
+}
+
+template <typename T>
+void MacroAssembler::branchAdd32(Condition cond, T src, Register dest,
+ Label* overflow) {
+ switch (cond) {
+ case Overflow:
+ ma_add32TestOverflow(dest, dest, src, overflow);
+ break;
+ case CarryClear:
+ case CarrySet:
+ ma_add32TestCarry(cond, dest, dest, src, overflow);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+
+template <typename T>
+void MacroAssembler::branchSub32(Condition cond, T src, Register dest,
+ Label* overflow) {
+ switch (cond) {
+ case Overflow:
+ ma_sub32TestOverflow(dest, dest, src, overflow);
+ break;
+ case NonZero:
+ case Zero:
+ case Signed:
+ case NotSigned:
+ ma_subu(dest, src);
+ ma_b(dest, dest, overflow, cond);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+
+template <typename T>
+void MacroAssembler::branchMul32(Condition cond, T src, Register dest,
+ Label* overflow) {
+ MOZ_ASSERT(cond == Assembler::Overflow);
+ ma_mul32TestOverflow(dest, dest, src, overflow);
+}
+
+template <typename T>
+void MacroAssembler::branchRshift32(Condition cond, T src, Register dest,
+ Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero);
+ rshift32(src, dest);
+ branch32(cond == Zero ? Equal : NotEqual, dest, Imm32(0), label);
+}
+
+void MacroAssembler::branchNeg32(Condition cond, Register reg, Label* label) {
+ MOZ_ASSERT(cond == Overflow);
+ neg32(reg);
+ branch32(Assembler::Equal, reg, Imm32(INT32_MIN), label);
+}
+
+template <typename T>
+void MacroAssembler::branchAddPtr(Condition cond, T src, Register dest,
+ Label* label) {
+ switch (cond) {
+ case Overflow:
+ ma_addPtrTestOverflow(dest, dest, src, label);
+ break;
+ case CarryClear:
+ case CarrySet:
+ ma_addPtrTestCarry(cond, dest, dest, src, label);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+
+template <typename T>
+void MacroAssembler::branchSubPtr(Condition cond, T src, Register dest,
+ Label* label) {
+ switch (cond) {
+ case Overflow:
+ ma_subPtrTestOverflow(dest, dest, src, label);
+ break;
+ case NonZero:
+ case Zero:
+ case Signed:
+ case NotSigned:
+ subPtr(src, dest);
+ ma_b(dest, dest, label, cond);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+
+void MacroAssembler::branchMulPtr(Condition cond, Register src, Register dest,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Overflow);
+ ma_mulPtrTestOverflow(dest, dest, src, label);
+}
+
+void MacroAssembler::decBranchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ subPtr(rhs, lhs);
+ branchPtr(cond, lhs, Imm32(0), label);
+}
+
+template <class L>
+void MacroAssembler::branchTest32(Condition cond, Register lhs, Register rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ if (lhs == rhs) {
+ ma_b(lhs, rhs, label, cond);
+ } else {
+ as_and(ScratchRegister, lhs, rhs);
+ ma_b(ScratchRegister, ScratchRegister, label, cond);
+ }
+}
+
+template <class L>
+void MacroAssembler::branchTest32(Condition cond, Register lhs, Imm32 rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ ma_and(ScratchRegister, lhs, rhs);
+ ma_b(ScratchRegister, ScratchRegister, label, cond);
+}
+
+void MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ load32(lhs, SecondScratchReg);
+ branchTest32(cond, SecondScratchReg, rhs, label);
+}
+
+void MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ load32(lhs, SecondScratchReg);
+ branchTest32(cond, SecondScratchReg, rhs, label);
+}
+
+template <class L>
+void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Register rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ if (lhs == rhs) {
+ ma_b(lhs, rhs, label, cond);
+ } else {
+ as_and(ScratchRegister, lhs, rhs);
+ ma_b(ScratchRegister, ScratchRegister, label, cond);
+ }
+}
+
+void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ ma_and(ScratchRegister, lhs, rhs);
+ ma_b(ScratchRegister, ScratchRegister, label, cond);
+}
+
+void MacroAssembler::branchTestPtr(Condition cond, const Address& lhs,
+ Imm32 rhs, Label* label) {
+ loadPtr(lhs, SecondScratchReg);
+ branchTestPtr(cond, SecondScratchReg, rhs, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestUndefined(cond, tag, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestUndefined(cond, tag, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_INT32), label, cond);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestInt32(cond, tag, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestInt32(cond, tag, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestDouble(cond, tag, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestDouble(cond, tag, label);
+}
+
+void MacroAssembler::branchTestDoubleTruthy(bool b, FloatRegister value,
+ Label* label) {
+ ma_lid(ScratchDoubleReg, 0.0);
+ DoubleCondition cond = b ? DoubleNotEqual : DoubleEqualOrUnordered;
+ ma_bc1d(value, ScratchDoubleReg, label, cond);
+}
+
+void MacroAssembler::branchTestNumber(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = cond == Equal ? BelowOrEqual : Above;
+ ma_b(tag, ImmTag(JS::detail::ValueUpperInclNumberTag), label, actual);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_BOOLEAN), label, cond);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestBoolean(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestBoolean(cond, tag, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_STRING), label, cond);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestString(cond, tag, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestString(cond, tag, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_SYMBOL), label, cond);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestSymbol(cond, tag, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestSymbol(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_BIGINT), label, cond);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestBigInt(cond, tag, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_NULL), label, cond);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestNull(cond, tag, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestNull(cond, tag, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_OBJECT), label, cond);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestObject(cond, tag, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestObject(cond, tag, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond, const Address& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond,
+ const ValueOperand& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestGCThingImpl(Condition cond, const T& address,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ ma_b(tag, ImmTag(JS::detail::ValueLowerInclGCThingTag), label,
+ (cond == Equal) ? AboveOrEqual : Below);
+}
+
+void MacroAssembler::branchTestPrimitive(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JS::detail::ValueUpperExclPrimitiveTag), label,
+ (cond == Equal) ? Below : AboveOrEqual);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestMagic(cond, tag, label);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestMagic(cond, tag, label);
+}
+
+template <typename T>
+void MacroAssembler::testNumberSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JS::detail::ValueUpperInclNumberTag),
+ cond == Equal ? BelowOrEqual : Above);
+}
+
+template <typename T>
+void MacroAssembler::testBooleanSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_BOOLEAN), cond);
+}
+
+template <typename T>
+void MacroAssembler::testStringSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_STRING), cond);
+}
+
+template <typename T>
+void MacroAssembler::testSymbolSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_SYMBOL), cond);
+}
+
+template <typename T>
+void MacroAssembler::testBigIntSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_BIGINT), cond);
+}
+
+void MacroAssembler::branchToComputedAddress(const BaseIndex& addr) {
+ loadPtr(addr, ScratchRegister);
+ branch(ScratchRegister);
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs, Imm32 rhs,
+ Register src, Register dest) {
+ Register scratch = ScratchRegister;
+ MOZ_ASSERT(src != scratch && dest != scratch);
+ cmp32Set(cond, lhs, rhs, scratch);
+#ifdef MIPSR6
+ as_selnez(src, src, scratch);
+ as_seleqz(dest, dest, scratch);
+ as_or(dest, dest, src);
+#else
+ as_movn(dest, src, scratch);
+#endif
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ Register scratch = ScratchRegister;
+ MOZ_ASSERT(src != scratch && dest != scratch);
+ cmp32Set(cond, lhs, rhs, scratch);
+#ifdef MIPSR6
+ as_selnez(src, src, scratch);
+ as_seleqz(dest, dest, scratch);
+ as_or(dest, dest, src);
+#else
+ as_movn(dest, src, scratch);
+#endif
+}
+
+void MacroAssembler::cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
+ Register src, Register dest) {
+ Register scratch = ScratchRegister;
+ MOZ_ASSERT(src != scratch && dest != scratch);
+ cmp32Set(cond, lhs, rhs, scratch);
+#ifdef MIPSR6
+ as_selnez(src, src, scratch);
+ as_seleqz(dest, dest, scratch);
+ as_or(dest, dest, src);
+#else
+ as_movn(dest, src, scratch);
+#endif
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(lhs != scratch2 && src != scratch2 && dest != scratch2);
+ load32(rhs, scratch2);
+ cmp32Move32(cond, lhs, scratch2, src, dest);
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs,
+ const Address& rhs, const Address& src,
+ Register dest) {
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(lhs != scratch && dest != scratch);
+ load32(rhs, scratch);
+ cmp32Load32(cond, lhs, scratch, src, dest);
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs, Register rhs,
+ const Address& src, Register dest) {
+ Label skip;
+ branch32(Assembler::InvertCondition(cond), lhs, rhs, &skip);
+ load32(src, dest);
+ bind(&skip);
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs, Imm32 rhs,
+ const Address& src, Register dest) {
+ Label skip;
+ branch32(Assembler::InvertCondition(cond), lhs, rhs, &skip);
+ load32(src, dest);
+ bind(&skip);
+}
+
+void MacroAssembler::cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs,
+ const Address& src, Register dest) {
+ Label skip;
+ branch32(Assembler::InvertCondition(cond), lhs, rhs, &skip);
+ loadPtr(src, dest);
+ bind(&skip);
+}
+
+void MacroAssembler::test32LoadPtr(Condition cond, const Address& addr,
+ Imm32 mask, const Address& src,
+ Register dest) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreStringMitigations);
+ Label skip;
+ branchTest32(Assembler::InvertCondition(cond), addr, mask, &skip);
+ loadPtr(src, dest);
+ bind(&skip);
+}
+
+void MacroAssembler::test32MovePtr(Condition cond, const Address& addr,
+ Imm32 mask, Register src, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branch32(Assembler::BelowOrEqual, length, index, failure);
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branch32(Assembler::BelowOrEqual, length, index, failure);
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branchPtr(Assembler::BelowOrEqual, length, index, failure);
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index,
+ const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branchPtr(Assembler::BelowOrEqual, length, index, failure);
+}
+
+void MacroAssembler::spectreMovePtr(Condition cond, Register src,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::spectreZeroRegister(Condition cond, Register scratch,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+// ========================================================================
+// Memory access primitives.
+
+FaultingCodeOffset MacroAssembler::storeUncanonicalizedDouble(
+ FloatRegister src, const Address& addr) {
+ // FIXME -- see https://bugzilla.mozilla.org/show_bug.cgi?id=1855960
+ return FaultingCodeOffset();
+ ma_sd(src, addr);
+}
+FaultingCodeOffset MacroAssembler::storeUncanonicalizedDouble(
+ FloatRegister src, const BaseIndex& addr) {
+ return FaultingCodeOffset(); // FIXME
+ ma_sd(src, addr);
+}
+
+FaultingCodeOffset MacroAssembler::storeUncanonicalizedFloat32(
+ FloatRegister src, const Address& addr) {
+ return FaultingCodeOffset(); // FIXME
+ ma_ss(src, addr);
+}
+FaultingCodeOffset MacroAssembler::storeUncanonicalizedFloat32(
+ FloatRegister src, const BaseIndex& addr) {
+ return FaultingCodeOffset(); // FIXME
+ ma_ss(src, addr);
+}
+
+void MacroAssembler::memoryBarrier(MemoryBarrierBits barrier) {
+ if (barrier) {
+ as_sync();
+ }
+}
+
+// ===============================================================
+// Clamping functions.
+
+void MacroAssembler::clampIntToUint8(Register reg) {
+ // If reg is < 0, then we want to clamp to 0.
+ as_slti(ScratchRegister, reg, 0);
+#ifdef MIPSR6
+ as_seleqz(reg, reg, ScratchRegister);
+#else
+ as_movn(reg, zero, ScratchRegister);
+#endif
+ // If reg is >= 255, then we want to clamp to 255.
+ ma_li(SecondScratchReg, Imm32(255));
+ as_slti(ScratchRegister, reg, 255);
+#ifdef MIPSR6
+ as_seleqz(SecondScratchReg, SecondScratchReg, ScratchRegister);
+ as_selnez(reg, reg, ScratchRegister);
+ as_or(reg, reg, SecondScratchReg);
+#else
+ as_movz(reg, SecondScratchReg, ScratchRegister);
+#endif
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_MacroAssembler_mips_shared_inl_h */
diff --git a/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
new file mode 100644
index 0000000000..052c76ba0f
--- /dev/null
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.cpp
@@ -0,0 +1,3355 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/MacroAssembler-mips-shared.h"
+
+#include "mozilla/EndianUtils.h"
+
+#include "jsmath.h"
+
+#include "jit/MacroAssembler.h"
+
+using namespace js;
+using namespace jit;
+
+void MacroAssemblerMIPSShared::ma_move(Register rd, Register rs) {
+ as_or(rd, rs, zero);
+}
+
+void MacroAssemblerMIPSShared::ma_li(Register dest, ImmGCPtr ptr) {
+ writeDataRelocation(ptr);
+ asMasm().ma_liPatchable(dest, ImmPtr(ptr.value));
+}
+
+void MacroAssemblerMIPSShared::ma_li(Register dest, Imm32 imm) {
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_addiu(dest, zero, imm.value);
+ } else if (Imm16::IsInUnsignedRange(imm.value)) {
+ as_ori(dest, zero, Imm16::Lower(imm).encode());
+ } else if (Imm16::Lower(imm).encode() == 0) {
+ as_lui(dest, Imm16::Upper(imm).encode());
+ } else {
+ as_lui(dest, Imm16::Upper(imm).encode());
+ as_ori(dest, dest, Imm16::Lower(imm).encode());
+ }
+}
+
+// This method generates lui and ori instruction pair that can be modified by
+// UpdateLuiOriValue, either during compilation (eg. Assembler::bind), or
+// during execution (eg. jit::PatchJump).
+void MacroAssemblerMIPSShared::ma_liPatchable(Register dest, Imm32 imm) {
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
+ as_lui(dest, Imm16::Upper(imm).encode());
+ as_ori(dest, dest, Imm16::Lower(imm).encode());
+}
+
+// Shifts
+void MacroAssemblerMIPSShared::ma_sll(Register rd, Register rt, Imm32 shift) {
+ as_sll(rd, rt, shift.value % 32);
+}
+void MacroAssemblerMIPSShared::ma_srl(Register rd, Register rt, Imm32 shift) {
+ as_srl(rd, rt, shift.value % 32);
+}
+
+void MacroAssemblerMIPSShared::ma_sra(Register rd, Register rt, Imm32 shift) {
+ as_sra(rd, rt, shift.value % 32);
+}
+
+void MacroAssemblerMIPSShared::ma_ror(Register rd, Register rt, Imm32 shift) {
+ if (hasR2()) {
+ as_rotr(rd, rt, shift.value % 32);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ as_srl(scratch, rt, shift.value % 32);
+ as_sll(rd, rt, (32 - (shift.value % 32)) % 32);
+ as_or(rd, rd, scratch);
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_rol(Register rd, Register rt, Imm32 shift) {
+ if (hasR2()) {
+ as_rotr(rd, rt, (32 - (shift.value % 32)) % 32);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ as_srl(scratch, rt, (32 - (shift.value % 32)) % 32);
+ as_sll(rd, rt, shift.value % 32);
+ as_or(rd, rd, scratch);
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_sll(Register rd, Register rt,
+ Register shift) {
+ as_sllv(rd, rt, shift);
+}
+
+void MacroAssemblerMIPSShared::ma_srl(Register rd, Register rt,
+ Register shift) {
+ as_srlv(rd, rt, shift);
+}
+
+void MacroAssemblerMIPSShared::ma_sra(Register rd, Register rt,
+ Register shift) {
+ as_srav(rd, rt, shift);
+}
+
+void MacroAssemblerMIPSShared::ma_ror(Register rd, Register rt,
+ Register shift) {
+ if (hasR2()) {
+ as_rotrv(rd, rt, shift);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ ma_negu(scratch, shift);
+ as_sllv(scratch, rt, scratch);
+ as_srlv(rd, rt, shift);
+ as_or(rd, rd, scratch);
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_rol(Register rd, Register rt,
+ Register shift) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_negu(scratch, shift);
+ if (hasR2()) {
+ as_rotrv(rd, rt, scratch);
+ } else {
+ as_srlv(rd, rt, scratch);
+ as_sllv(scratch, rt, shift);
+ as_or(rd, rd, scratch);
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_negu(Register rd, Register rs) {
+ as_subu(rd, zero, rs);
+}
+
+void MacroAssemblerMIPSShared::ma_not(Register rd, Register rs) {
+ as_nor(rd, rs, zero);
+}
+
+// Bit extract/insert
+void MacroAssemblerMIPSShared::ma_ext(Register rt, Register rs, uint16_t pos,
+ uint16_t size) {
+ MOZ_ASSERT(pos < 32);
+ MOZ_ASSERT(pos + size < 33);
+
+ if (hasR2()) {
+ as_ext(rt, rs, pos, size);
+ } else {
+ int shift_left = 32 - (pos + size);
+ as_sll(rt, rs, shift_left);
+ int shift_right = 32 - size;
+ if (shift_right > 0) {
+ as_srl(rt, rt, shift_right);
+ }
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_ins(Register rt, Register rs, uint16_t pos,
+ uint16_t size) {
+ MOZ_ASSERT(pos < 32);
+ MOZ_ASSERT(pos + size <= 32);
+ MOZ_ASSERT(size != 0);
+
+ if (hasR2()) {
+ as_ins(rt, rs, pos, size);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_subu(scratch, zero, Imm32(1));
+ as_srl(scratch, scratch, 32 - size);
+ as_and(scratch2, rs, scratch);
+ as_sll(scratch2, scratch2, pos);
+ as_sll(scratch, scratch, pos);
+ as_nor(scratch, scratch, zero);
+ as_and(scratch, rt, scratch);
+ as_or(rt, scratch2, scratch);
+ }
+}
+
+// Sign extend
+void MacroAssemblerMIPSShared::ma_seb(Register rd, Register rt) {
+ if (hasR2()) {
+ as_seb(rd, rt);
+ } else {
+ as_sll(rd, rt, 24);
+ as_sra(rd, rd, 24);
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_seh(Register rd, Register rt) {
+ if (hasR2()) {
+ as_seh(rd, rt);
+ } else {
+ as_sll(rd, rt, 16);
+ as_sra(rd, rd, 16);
+ }
+}
+
+// And.
+void MacroAssemblerMIPSShared::ma_and(Register rd, Register rs) {
+ as_and(rd, rd, rs);
+}
+
+void MacroAssemblerMIPSShared::ma_and(Register rd, Imm32 imm) {
+ ma_and(rd, rd, imm);
+}
+
+void MacroAssemblerMIPSShared::ma_and(Register rd, Register rs, Imm32 imm) {
+ if (Imm16::IsInUnsignedRange(imm.value)) {
+ as_andi(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_and(rd, rs, ScratchRegister);
+ }
+}
+
+// Or.
+void MacroAssemblerMIPSShared::ma_or(Register rd, Register rs) {
+ as_or(rd, rd, rs);
+}
+
+void MacroAssemblerMIPSShared::ma_or(Register rd, Imm32 imm) {
+ ma_or(rd, rd, imm);
+}
+
+void MacroAssemblerMIPSShared::ma_or(Register rd, Register rs, Imm32 imm) {
+ if (Imm16::IsInUnsignedRange(imm.value)) {
+ as_ori(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_or(rd, rs, ScratchRegister);
+ }
+}
+
+// xor
+void MacroAssemblerMIPSShared::ma_xor(Register rd, Register rs) {
+ as_xor(rd, rd, rs);
+}
+
+void MacroAssemblerMIPSShared::ma_xor(Register rd, Imm32 imm) {
+ ma_xor(rd, rd, imm);
+}
+
+void MacroAssemblerMIPSShared::ma_xor(Register rd, Register rs, Imm32 imm) {
+ if (Imm16::IsInUnsignedRange(imm.value)) {
+ as_xori(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_xor(rd, rs, ScratchRegister);
+ }
+}
+
+// word swap bytes within halfwords
+void MacroAssemblerMIPSShared::ma_wsbh(Register rd, Register rt) {
+ as_wsbh(rd, rt);
+}
+
+void MacroAssemblerMIPSShared::ma_ctz(Register rd, Register rs) {
+ as_addiu(ScratchRegister, rs, -1);
+ as_xor(rd, ScratchRegister, rs);
+ as_and(rd, rd, ScratchRegister);
+ as_clz(rd, rd);
+ ma_li(ScratchRegister, Imm32(0x20));
+ as_subu(rd, ScratchRegister, rd);
+}
+
+// Arithmetic-based ops.
+
+// Add.
+void MacroAssemblerMIPSShared::ma_addu(Register rd, Register rs, Imm32 imm) {
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_addiu(rd, rs, imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_addu(rd, rs, ScratchRegister);
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_addu(Register rd, Register rs) {
+ as_addu(rd, rd, rs);
+}
+
+void MacroAssemblerMIPSShared::ma_addu(Register rd, Imm32 imm) {
+ ma_addu(rd, rd, imm);
+}
+
+void MacroAssemblerMIPSShared::ma_add32TestCarry(Condition cond, Register rd,
+ Register rs, Register rt,
+ Label* overflow) {
+ MOZ_ASSERT(cond == Assembler::CarrySet || cond == Assembler::CarryClear);
+ MOZ_ASSERT_IF(rd == rs, rt != rd);
+ as_addu(rd, rs, rt);
+ as_sltu(SecondScratchReg, rd, rd == rs ? rt : rs);
+ ma_b(SecondScratchReg, SecondScratchReg, overflow,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+}
+
+void MacroAssemblerMIPSShared::ma_add32TestCarry(Condition cond, Register rd,
+ Register rs, Imm32 imm,
+ Label* overflow) {
+ ma_li(ScratchRegister, imm);
+ ma_add32TestCarry(cond, rd, rs, ScratchRegister, overflow);
+}
+
+// Subtract.
+void MacroAssemblerMIPSShared::ma_subu(Register rd, Register rs, Imm32 imm) {
+ if (Imm16::IsInSignedRange(-imm.value)) {
+ as_addiu(rd, rs, -imm.value);
+ } else {
+ ma_li(ScratchRegister, imm);
+ as_subu(rd, rs, ScratchRegister);
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_subu(Register rd, Imm32 imm) {
+ ma_subu(rd, rd, imm);
+}
+
+void MacroAssemblerMIPSShared::ma_subu(Register rd, Register rs) {
+ as_subu(rd, rd, rs);
+}
+
+void MacroAssemblerMIPSShared::ma_sub32TestOverflow(Register rd, Register rs,
+ Imm32 imm,
+ Label* overflow) {
+ if (imm.value != INT32_MIN) {
+ asMasm().ma_add32TestOverflow(rd, rs, Imm32(-imm.value), overflow);
+ } else {
+ ma_li(ScratchRegister, Imm32(imm.value));
+ asMasm().ma_sub32TestOverflow(rd, rs, ScratchRegister, overflow);
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_mul(Register rd, Register rs, Imm32 imm) {
+ ma_li(ScratchRegister, imm);
+ as_mul(rd, rs, ScratchRegister);
+}
+
+void MacroAssemblerMIPSShared::ma_mul32TestOverflow(Register rd, Register rs,
+ Register rt,
+ Label* overflow) {
+#ifdef MIPSR6
+ if (rd == rs) {
+ ma_move(SecondScratchReg, rs);
+ rs = SecondScratchReg;
+ }
+ as_mul(rd, rs, rt);
+ as_muh(SecondScratchReg, rs, rt);
+#else
+ as_mult(rs, rt);
+ as_mflo(rd);
+ as_mfhi(SecondScratchReg);
+#endif
+ as_sra(ScratchRegister, rd, 31);
+ ma_b(ScratchRegister, SecondScratchReg, overflow, Assembler::NotEqual);
+}
+
+void MacroAssemblerMIPSShared::ma_mul32TestOverflow(Register rd, Register rs,
+ Imm32 imm,
+ Label* overflow) {
+ ma_li(ScratchRegister, imm);
+ ma_mul32TestOverflow(rd, rs, ScratchRegister, overflow);
+}
+
+void MacroAssemblerMIPSShared::ma_div_branch_overflow(Register rd, Register rs,
+ Register rt,
+ Label* overflow) {
+#ifdef MIPSR6
+ if (rd == rs) {
+ ma_move(SecondScratchReg, rs);
+ rs = SecondScratchReg;
+ }
+ as_mod(ScratchRegister, rs, rt);
+#else
+ as_div(rs, rt);
+ as_mfhi(ScratchRegister);
+#endif
+ ma_b(ScratchRegister, ScratchRegister, overflow, Assembler::NonZero);
+#ifdef MIPSR6
+ as_div(rd, rs, rt);
+#else
+ as_mflo(rd);
+#endif
+}
+
+void MacroAssemblerMIPSShared::ma_div_branch_overflow(Register rd, Register rs,
+ Imm32 imm,
+ Label* overflow) {
+ ma_li(ScratchRegister, imm);
+ ma_div_branch_overflow(rd, rs, ScratchRegister, overflow);
+}
+
+void MacroAssemblerMIPSShared::ma_mod_mask(Register src, Register dest,
+ Register hold, Register remain,
+ int32_t shift, Label* negZero) {
+ // MATH:
+ // We wish to compute x % (1<<y) - 1 for a known constant, y.
+ // First, let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit
+ // dividend as a number in base b, namely
+ // c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
+ // now, since both addition and multiplication commute with modulus,
+ // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
+ // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
+ // now, since b == C + 1, b % C == 1, and b^n % C == 1
+ // this means that the whole thing simplifies to:
+ // c_0 + c_1 + c_2 ... c_n % C
+ // each c_n can easily be computed by a shift/bitextract, and the modulus
+ // can be maintained by simply subtracting by C whenever the number gets
+ // over C.
+ int32_t mask = (1 << shift) - 1;
+ Label head, negative, sumSigned, done;
+
+ // hold holds -1 if the value was negative, 1 otherwise.
+ // remain holds the remaining bits that have not been processed
+ // SecondScratchReg serves as a temporary location to store extracted bits
+ // into as well as holding the trial subtraction as a temp value dest is
+ // the accumulator (and holds the final result)
+
+ // move the whole value into the remain.
+ ma_move(remain, src);
+ // Zero out the dest.
+ ma_li(dest, Imm32(0));
+ // Set the hold appropriately.
+ ma_b(remain, remain, &negative, Signed, ShortJump);
+ ma_li(hold, Imm32(1));
+ ma_b(&head, ShortJump);
+
+ bind(&negative);
+ ma_li(hold, Imm32(-1));
+ ma_negu(remain, remain);
+
+ // Begin the main loop.
+ bind(&head);
+
+ // Extract the bottom bits into SecondScratchReg.
+ ma_and(SecondScratchReg, remain, Imm32(mask));
+ // Add those bits to the accumulator.
+ as_addu(dest, dest, SecondScratchReg);
+ // Do a trial subtraction
+ ma_subu(SecondScratchReg, dest, Imm32(mask));
+ // If (sum - C) > 0, store sum - C back into sum, thus performing a
+ // modulus.
+ ma_b(SecondScratchReg, SecondScratchReg, &sumSigned, Signed, ShortJump);
+ ma_move(dest, SecondScratchReg);
+ bind(&sumSigned);
+ // Get rid of the bits that we extracted before.
+ as_srl(remain, remain, shift);
+ // If the shift produced zero, finish, otherwise, continue in the loop.
+ ma_b(remain, remain, &head, NonZero, ShortJump);
+ // Check the hold to see if we need to negate the result.
+ ma_b(hold, hold, &done, NotSigned, ShortJump);
+
+ // If the hold was non-zero, negate the result to be in line with
+ // what JS wants
+ if (negZero != nullptr) {
+ // Jump out in case of negative zero.
+ ma_b(hold, hold, negZero, Zero);
+ ma_negu(dest, dest);
+ } else {
+ ma_negu(dest, dest);
+ }
+
+ bind(&done);
+}
+
+// Memory.
+
+void MacroAssemblerMIPSShared::ma_load(Register dest, const BaseIndex& src,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ if (isLoongson() && ZeroExtend != extension &&
+ Imm8::IsInSignedRange(src.offset)) {
+ Register index = src.index;
+
+ if (src.scale != TimesOne) {
+ int32_t shift = Imm32::ShiftOf(src.scale).value;
+
+ MOZ_ASSERT(SecondScratchReg != src.base);
+ index = SecondScratchReg;
+#ifdef JS_CODEGEN_MIPS64
+ asMasm().ma_dsll(index, src.index, Imm32(shift));
+#else
+ asMasm().ma_sll(index, src.index, Imm32(shift));
+#endif
+ }
+
+ switch (size) {
+ case SizeByte:
+ as_gslbx(dest, src.base, index, src.offset);
+ break;
+ case SizeHalfWord:
+ as_gslhx(dest, src.base, index, src.offset);
+ break;
+ case SizeWord:
+ as_gslwx(dest, src.base, index, src.offset);
+ break;
+ case SizeDouble:
+ as_gsldx(dest, src.base, index, src.offset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+ return;
+ }
+
+ asMasm().computeScaledAddress(src, SecondScratchReg);
+ asMasm().ma_load(dest, Address(SecondScratchReg, src.offset), size,
+ extension);
+}
+
+void MacroAssemblerMIPSShared::ma_load_unaligned(Register dest,
+ const BaseIndex& src,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ int16_t lowOffset, hiOffset;
+ SecondScratchRegisterScope base(asMasm());
+ asMasm().computeScaledAddress(src, base);
+ ScratchRegisterScope scratch(asMasm());
+
+ if (Imm16::IsInSignedRange(src.offset) &&
+ Imm16::IsInSignedRange(src.offset + size / 8 - 1)) {
+ lowOffset = Imm16(src.offset).encode();
+ hiOffset = Imm16(src.offset + size / 8 - 1).encode();
+ } else {
+ ma_li(scratch, Imm32(src.offset));
+ asMasm().addPtr(scratch, base);
+ lowOffset = Imm16(0).encode();
+ hiOffset = Imm16(size / 8 - 1).encode();
+ }
+
+ switch (size) {
+ case SizeHalfWord:
+ MOZ_ASSERT(dest != scratch);
+ if (extension == ZeroExtend) {
+ as_lbu(scratch, base, hiOffset);
+ } else {
+ as_lb(scratch, base, hiOffset);
+ }
+ as_lbu(dest, base, lowOffset);
+ ma_ins(dest, scratch, 8, 24);
+ break;
+ case SizeWord:
+ MOZ_ASSERT(dest != base);
+ as_lwl(dest, base, hiOffset);
+ as_lwr(dest, base, lowOffset);
+#ifdef JS_CODEGEN_MIPS64
+ if (extension == ZeroExtend) {
+ as_dext(dest, dest, 0, 32);
+ }
+#endif
+ break;
+#ifdef JS_CODEGEN_MIPS64
+ case SizeDouble:
+ MOZ_ASSERT(dest != base);
+ as_ldl(dest, base, hiOffset);
+ as_ldr(dest, base, lowOffset);
+ break;
+#endif
+ default:
+ MOZ_CRASH("Invalid argument for ma_load_unaligned");
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_load_unaligned(Register dest,
+ const Address& address,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ int16_t lowOffset, hiOffset;
+ ScratchRegisterScope scratch1(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ Register base;
+
+ if (Imm16::IsInSignedRange(address.offset) &&
+ Imm16::IsInSignedRange(address.offset + size / 8 - 1)) {
+ base = address.base;
+ lowOffset = Imm16(address.offset).encode();
+ hiOffset = Imm16(address.offset + size / 8 - 1).encode();
+ } else {
+ ma_li(scratch1, Imm32(address.offset));
+ asMasm().addPtr(address.base, scratch1);
+ base = scratch1;
+ lowOffset = Imm16(0).encode();
+ hiOffset = Imm16(size / 8 - 1).encode();
+ }
+
+ switch (size) {
+ case SizeHalfWord:
+ MOZ_ASSERT(base != scratch2 && dest != scratch2);
+ if (extension == ZeroExtend) {
+ as_lbu(scratch2, base, hiOffset);
+ } else {
+ as_lb(scratch2, base, hiOffset);
+ }
+ as_lbu(dest, base, lowOffset);
+ ma_ins(dest, scratch2, 8, 24);
+ break;
+ case SizeWord:
+ MOZ_ASSERT(dest != base);
+ as_lwl(dest, base, hiOffset);
+ as_lwr(dest, base, lowOffset);
+#ifdef JS_CODEGEN_MIPS64
+ if (extension == ZeroExtend) {
+ as_dext(dest, dest, 0, 32);
+ }
+#endif
+ break;
+#ifdef JS_CODEGEN_MIPS64
+ case SizeDouble:
+ MOZ_ASSERT(dest != base);
+ as_ldl(dest, base, hiOffset);
+ as_ldr(dest, base, lowOffset);
+ break;
+#endif
+ default:
+ MOZ_CRASH("Invalid argument for ma_load_unaligned");
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_load_unaligned(
+ const wasm::MemoryAccessDesc& access, Register dest, const BaseIndex& src,
+ Register temp, LoadStoreSize size, LoadStoreExtension extension) {
+ MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Wasm-only; wasm is disabled on big-endian.");
+ int16_t lowOffset, hiOffset;
+ Register base;
+
+ asMasm().computeScaledAddress(src, SecondScratchReg);
+
+ if (Imm16::IsInSignedRange(src.offset) &&
+ Imm16::IsInSignedRange(src.offset + size / 8 - 1)) {
+ base = SecondScratchReg;
+ lowOffset = Imm16(src.offset).encode();
+ hiOffset = Imm16(src.offset + size / 8 - 1).encode();
+ } else {
+ ma_li(ScratchRegister, Imm32(src.offset));
+ asMasm().addPtr(SecondScratchReg, ScratchRegister);
+ base = ScratchRegister;
+ lowOffset = Imm16(0).encode();
+ hiOffset = Imm16(size / 8 - 1).encode();
+ }
+
+ BufferOffset load;
+ switch (size) {
+ case SizeHalfWord:
+ if (extension == ZeroExtend) {
+ load = as_lbu(temp, base, hiOffset);
+ } else {
+ load = as_lb(temp, base, hiOffset);
+ }
+ as_lbu(dest, base, lowOffset);
+ ma_ins(dest, temp, 8, 24);
+ break;
+ case SizeWord:
+ load = as_lwl(dest, base, hiOffset);
+ as_lwr(dest, base, lowOffset);
+#ifdef JS_CODEGEN_MIPS64
+ if (extension == ZeroExtend) {
+ as_dext(dest, dest, 0, 32);
+ }
+#endif
+ break;
+#ifdef JS_CODEGEN_MIPS64
+ case SizeDouble:
+ load = as_ldl(dest, base, hiOffset);
+ as_ldr(dest, base, lowOffset);
+ break;
+#endif
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+
+ append(access, load.getOffset());
+}
+
+void MacroAssemblerMIPSShared::ma_store(Register data, const BaseIndex& dest,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ if (isLoongson() && Imm8::IsInSignedRange(dest.offset)) {
+ Register index = dest.index;
+
+ if (dest.scale != TimesOne) {
+ int32_t shift = Imm32::ShiftOf(dest.scale).value;
+
+ MOZ_ASSERT(SecondScratchReg != dest.base);
+ index = SecondScratchReg;
+#ifdef JS_CODEGEN_MIPS64
+ asMasm().ma_dsll(index, dest.index, Imm32(shift));
+#else
+ asMasm().ma_sll(index, dest.index, Imm32(shift));
+#endif
+ }
+
+ switch (size) {
+ case SizeByte:
+ as_gssbx(data, dest.base, index, dest.offset);
+ break;
+ case SizeHalfWord:
+ as_gsshx(data, dest.base, index, dest.offset);
+ break;
+ case SizeWord:
+ as_gsswx(data, dest.base, index, dest.offset);
+ break;
+ case SizeDouble:
+ as_gssdx(data, dest.base, index, dest.offset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+ return;
+ }
+
+ asMasm().computeScaledAddress(dest, SecondScratchReg);
+ asMasm().ma_store(data, Address(SecondScratchReg, dest.offset), size,
+ extension);
+}
+
+void MacroAssemblerMIPSShared::ma_store(Imm32 imm, const BaseIndex& dest,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ if (isLoongson() && Imm8::IsInSignedRange(dest.offset)) {
+ Register data = zero;
+ Register index = dest.index;
+
+ if (imm.value) {
+ MOZ_ASSERT(ScratchRegister != dest.base);
+ MOZ_ASSERT(ScratchRegister != dest.index);
+ data = ScratchRegister;
+ ma_li(data, imm);
+ }
+
+ if (dest.scale != TimesOne) {
+ int32_t shift = Imm32::ShiftOf(dest.scale).value;
+
+ MOZ_ASSERT(SecondScratchReg != dest.base);
+ index = SecondScratchReg;
+#ifdef JS_CODEGEN_MIPS64
+ asMasm().ma_dsll(index, dest.index, Imm32(shift));
+#else
+ asMasm().ma_sll(index, dest.index, Imm32(shift));
+#endif
+ }
+
+ switch (size) {
+ case SizeByte:
+ as_gssbx(data, dest.base, index, dest.offset);
+ break;
+ case SizeHalfWord:
+ as_gsshx(data, dest.base, index, dest.offset);
+ break;
+ case SizeWord:
+ as_gsswx(data, dest.base, index, dest.offset);
+ break;
+ case SizeDouble:
+ as_gssdx(data, dest.base, index, dest.offset);
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+ return;
+ }
+
+ // Make sure that SecondScratchReg contains absolute address so that
+ // offset is 0.
+ asMasm().computeEffectiveAddress(dest, SecondScratchReg);
+
+ // Scrach register is free now, use it for loading imm value
+ ma_li(ScratchRegister, imm);
+
+ // with offset=0 ScratchRegister will not be used in ma_store()
+ // so we can use it as a parameter here
+ asMasm().ma_store(ScratchRegister, Address(SecondScratchReg, 0), size,
+ extension);
+}
+
+void MacroAssemblerMIPSShared::ma_store_unaligned(Register data,
+ const Address& address,
+ LoadStoreSize size) {
+ int16_t lowOffset, hiOffset;
+ ScratchRegisterScope scratch(asMasm());
+ Register base;
+
+ if (Imm16::IsInSignedRange(address.offset) &&
+ Imm16::IsInSignedRange(address.offset + size / 8 - 1)) {
+ base = address.base;
+ lowOffset = Imm16(address.offset).encode();
+ hiOffset = Imm16(address.offset + size / 8 - 1).encode();
+ } else {
+ ma_li(scratch, Imm32(address.offset));
+ asMasm().addPtr(address.base, scratch);
+ base = scratch;
+ lowOffset = Imm16(0).encode();
+ hiOffset = Imm16(size / 8 - 1).encode();
+ }
+
+ switch (size) {
+ case SizeHalfWord: {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(base != scratch2);
+ as_sb(data, base, lowOffset);
+ ma_ext(scratch2, data, 8, 8);
+ as_sb(scratch2, base, hiOffset);
+ break;
+ }
+ case SizeWord:
+ as_swl(data, base, hiOffset);
+ as_swr(data, base, lowOffset);
+ break;
+#ifdef JS_CODEGEN_MIPS64
+ case SizeDouble:
+ as_sdl(data, base, hiOffset);
+ as_sdr(data, base, lowOffset);
+ break;
+#endif
+ default:
+ MOZ_CRASH("Invalid argument for ma_store_unaligned");
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_store_unaligned(Register data,
+ const BaseIndex& dest,
+ LoadStoreSize size) {
+ int16_t lowOffset, hiOffset;
+ SecondScratchRegisterScope base(asMasm());
+ asMasm().computeScaledAddress(dest, base);
+ ScratchRegisterScope scratch(asMasm());
+
+ if (Imm16::IsInSignedRange(dest.offset) &&
+ Imm16::IsInSignedRange(dest.offset + size / 8 - 1)) {
+ lowOffset = Imm16(dest.offset).encode();
+ hiOffset = Imm16(dest.offset + size / 8 - 1).encode();
+ } else {
+ ma_li(scratch, Imm32(dest.offset));
+ asMasm().addPtr(scratch, base);
+ lowOffset = Imm16(0).encode();
+ hiOffset = Imm16(size / 8 - 1).encode();
+ }
+
+ switch (size) {
+ case SizeHalfWord:
+ MOZ_ASSERT(base != scratch);
+ as_sb(data, base, lowOffset);
+ ma_ext(scratch, data, 8, 8);
+ as_sb(scratch, base, hiOffset);
+ break;
+ case SizeWord:
+ as_swl(data, base, hiOffset);
+ as_swr(data, base, lowOffset);
+ break;
+#ifdef JS_CODEGEN_MIPS64
+ case SizeDouble:
+ as_sdl(data, base, hiOffset);
+ as_sdr(data, base, lowOffset);
+ break;
+#endif
+ default:
+ MOZ_CRASH("Invalid argument for ma_store_unaligned");
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_store_unaligned(
+ const wasm::MemoryAccessDesc& access, Register data, const BaseIndex& dest,
+ Register temp, LoadStoreSize size, LoadStoreExtension extension) {
+ MOZ_ASSERT(MOZ_LITTLE_ENDIAN(), "Wasm-only; wasm is disabled on big-endian.");
+ int16_t lowOffset, hiOffset;
+ Register base;
+
+ asMasm().computeScaledAddress(dest, SecondScratchReg);
+
+ if (Imm16::IsInSignedRange(dest.offset) &&
+ Imm16::IsInSignedRange(dest.offset + size / 8 - 1)) {
+ base = SecondScratchReg;
+ lowOffset = Imm16(dest.offset).encode();
+ hiOffset = Imm16(dest.offset + size / 8 - 1).encode();
+ } else {
+ ma_li(ScratchRegister, Imm32(dest.offset));
+ asMasm().addPtr(SecondScratchReg, ScratchRegister);
+ base = ScratchRegister;
+ lowOffset = Imm16(0).encode();
+ hiOffset = Imm16(size / 8 - 1).encode();
+ }
+
+ BufferOffset store;
+ switch (size) {
+ case SizeHalfWord:
+ ma_ext(temp, data, 8, 8);
+ store = as_sb(temp, base, hiOffset);
+ as_sb(data, base, lowOffset);
+ break;
+ case SizeWord:
+ store = as_swl(data, base, hiOffset);
+ as_swr(data, base, lowOffset);
+ break;
+#ifdef JS_CODEGEN_MIPS64
+ case SizeDouble:
+ store = as_sdl(data, base, hiOffset);
+ as_sdr(data, base, lowOffset);
+ break;
+#endif
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+ append(access, store.getOffset());
+}
+
+// Branches when done from within mips-specific code.
+void MacroAssemblerMIPSShared::ma_b(Register lhs, Register rhs, Label* label,
+ Condition c, JumpKind jumpKind) {
+ switch (c) {
+ case Equal:
+ case NotEqual:
+ asMasm().branchWithCode(getBranchCode(lhs, rhs, c), label, jumpKind);
+ break;
+ case Always:
+ ma_b(label, jumpKind);
+ break;
+ case Zero:
+ case NonZero:
+ case Signed:
+ case NotSigned:
+ MOZ_ASSERT(lhs == rhs);
+ asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
+ break;
+ default:
+ Condition cond = ma_cmp(ScratchRegister, lhs, rhs, c);
+ asMasm().branchWithCode(getBranchCode(ScratchRegister, cond), label,
+ jumpKind);
+ break;
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_b(Register lhs, Imm32 imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ MOZ_ASSERT(c != Overflow);
+ if (imm.value == 0) {
+ if (c == Always || c == AboveOrEqual) {
+ ma_b(label, jumpKind);
+ } else if (c == Below) {
+ ; // This condition is always false. No branch required.
+ } else {
+ asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
+ }
+ } else {
+ switch (c) {
+ case Equal:
+ case NotEqual:
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_li(ScratchRegister, imm);
+ ma_b(lhs, ScratchRegister, label, c, jumpKind);
+ break;
+ default:
+ Condition cond = ma_cmp(ScratchRegister, lhs, imm, c);
+ asMasm().branchWithCode(getBranchCode(ScratchRegister, cond), label,
+ jumpKind);
+ }
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_b(Register lhs, ImmPtr imm, Label* l,
+ Condition c, JumpKind jumpKind) {
+ asMasm().ma_b(lhs, ImmWord(uintptr_t(imm.value)), l, c, jumpKind);
+}
+
+void MacroAssemblerMIPSShared::ma_b(Label* label, JumpKind jumpKind) {
+ asMasm().branchWithCode(getBranchCode(BranchIsJump), label, jumpKind);
+}
+
+Assembler::Condition MacroAssemblerMIPSShared::ma_cmp(Register dest,
+ Register lhs,
+ Register rhs,
+ Condition c) {
+ switch (c) {
+ case Above:
+ // bgtu s,t,label =>
+ // sltu at,t,s
+ // bne at,$zero,offs
+ as_sltu(dest, rhs, lhs);
+ return NotEqual;
+ case AboveOrEqual:
+ // bgeu s,t,label =>
+ // sltu at,s,t
+ // beq at,$zero,offs
+ as_sltu(dest, lhs, rhs);
+ return Equal;
+ case Below:
+ // bltu s,t,label =>
+ // sltu at,s,t
+ // bne at,$zero,offs
+ as_sltu(dest, lhs, rhs);
+ return NotEqual;
+ case BelowOrEqual:
+ // bleu s,t,label =>
+ // sltu at,t,s
+ // beq at,$zero,offs
+ as_sltu(dest, rhs, lhs);
+ return Equal;
+ case GreaterThan:
+ // bgt s,t,label =>
+ // slt at,t,s
+ // bne at,$zero,offs
+ as_slt(dest, rhs, lhs);
+ return NotEqual;
+ case GreaterThanOrEqual:
+ // bge s,t,label =>
+ // slt at,s,t
+ // beq at,$zero,offs
+ as_slt(dest, lhs, rhs);
+ return Equal;
+ case LessThan:
+ // blt s,t,label =>
+ // slt at,s,t
+ // bne at,$zero,offs
+ as_slt(dest, lhs, rhs);
+ return NotEqual;
+ case LessThanOrEqual:
+ // ble s,t,label =>
+ // slt at,t,s
+ // beq at,$zero,offs
+ as_slt(dest, rhs, lhs);
+ return Equal;
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+ return Always;
+}
+
+Assembler::Condition MacroAssemblerMIPSShared::ma_cmp(Register dest,
+ Register lhs, Imm32 imm,
+ Condition c) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs != scratch);
+
+ switch (c) {
+ case Above:
+ case BelowOrEqual:
+ if (Imm16::IsInSignedRange(imm.value + 1) && imm.value != -1) {
+ // lhs <= rhs via lhs < rhs + 1 if rhs + 1 does not overflow
+ as_sltiu(dest, lhs, imm.value + 1);
+
+ return (c == BelowOrEqual ? NotEqual : Equal);
+ } else {
+ ma_li(scratch, imm);
+ as_sltu(dest, scratch, lhs);
+ return (c == BelowOrEqual ? Equal : NotEqual);
+ }
+ case AboveOrEqual:
+ case Below:
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_sltiu(dest, lhs, imm.value);
+ } else {
+ ma_li(scratch, imm);
+ as_sltu(dest, lhs, scratch);
+ }
+ return (c == AboveOrEqual ? Equal : NotEqual);
+ case GreaterThan:
+ case LessThanOrEqual:
+ if (Imm16::IsInSignedRange(imm.value + 1)) {
+ // lhs <= rhs via lhs < rhs + 1.
+ as_slti(dest, lhs, imm.value + 1);
+ return (c == LessThanOrEqual ? NotEqual : Equal);
+ } else {
+ ma_li(scratch, imm);
+ as_slt(dest, scratch, lhs);
+ return (c == LessThanOrEqual ? Equal : NotEqual);
+ }
+ case GreaterThanOrEqual:
+ case LessThan:
+ if (Imm16::IsInSignedRange(imm.value)) {
+ as_slti(dest, lhs, imm.value);
+ } else {
+ ma_li(scratch, imm);
+ as_slt(dest, lhs, scratch);
+ }
+ return (c == GreaterThanOrEqual ? Equal : NotEqual);
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+ return Always;
+}
+
+void MacroAssemblerMIPSShared::ma_cmp_set(Register rd, Register rs, Register rt,
+ Condition c) {
+ switch (c) {
+ case Equal:
+ // seq d,s,t =>
+ // xor d,s,t
+ // sltiu d,d,1
+ as_xor(rd, rs, rt);
+ as_sltiu(rd, rd, 1);
+ break;
+ case NotEqual:
+ // sne d,s,t =>
+ // xor d,s,t
+ // sltu d,$zero,d
+ as_xor(rd, rs, rt);
+ as_sltu(rd, zero, rd);
+ break;
+ case Above:
+ // sgtu d,s,t =>
+ // sltu d,t,s
+ as_sltu(rd, rt, rs);
+ break;
+ case AboveOrEqual:
+ // sgeu d,s,t =>
+ // sltu d,s,t
+ // xori d,d,1
+ as_sltu(rd, rs, rt);
+ as_xori(rd, rd, 1);
+ break;
+ case Below:
+ // sltu d,s,t
+ as_sltu(rd, rs, rt);
+ break;
+ case BelowOrEqual:
+ // sleu d,s,t =>
+ // sltu d,t,s
+ // xori d,d,1
+ as_sltu(rd, rt, rs);
+ as_xori(rd, rd, 1);
+ break;
+ case GreaterThan:
+ // sgt d,s,t =>
+ // slt d,t,s
+ as_slt(rd, rt, rs);
+ break;
+ case GreaterThanOrEqual:
+ // sge d,s,t =>
+ // slt d,s,t
+ // xori d,d,1
+ as_slt(rd, rs, rt);
+ as_xori(rd, rd, 1);
+ break;
+ case LessThan:
+ // slt d,s,t
+ as_slt(rd, rs, rt);
+ break;
+ case LessThanOrEqual:
+ // sle d,s,t =>
+ // slt d,t,s
+ // xori d,d,1
+ as_slt(rd, rt, rs);
+ as_xori(rd, rd, 1);
+ break;
+ case Zero:
+ MOZ_ASSERT(rs == rt);
+ // seq d,s,$zero =>
+ // sltiu d,s,1
+ as_sltiu(rd, rs, 1);
+ break;
+ case NonZero:
+ MOZ_ASSERT(rs == rt);
+ // sne d,s,$zero =>
+ // sltu d,$zero,s
+ as_sltu(rd, zero, rs);
+ break;
+ case Signed:
+ MOZ_ASSERT(rs == rt);
+ as_slt(rd, rs, zero);
+ break;
+ case NotSigned:
+ MOZ_ASSERT(rs == rt);
+ // sge d,s,$zero =>
+ // slt d,s,$zero
+ // xori d,d,1
+ as_slt(rd, rs, zero);
+ as_xori(rd, rd, 1);
+ break;
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+}
+
+void MacroAssemblerMIPSShared::compareFloatingPoint(
+ FloatFormat fmt, FloatRegister lhs, FloatRegister rhs, DoubleCondition c,
+ FloatTestKind* testKind, FPConditionBit fcc) {
+ switch (c) {
+ case DoubleOrdered:
+ as_cun(fmt, lhs, rhs, fcc);
+ *testKind = TestForFalse;
+ break;
+ case DoubleEqual:
+ as_ceq(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleNotEqual:
+ as_cueq(fmt, lhs, rhs, fcc);
+ *testKind = TestForFalse;
+ break;
+ case DoubleGreaterThan:
+ as_colt(fmt, rhs, lhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleGreaterThanOrEqual:
+ as_cole(fmt, rhs, lhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleLessThan:
+ as_colt(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleLessThanOrEqual:
+ as_cole(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleUnordered:
+ as_cun(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleEqualOrUnordered:
+ as_cueq(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleNotEqualOrUnordered:
+ as_ceq(fmt, lhs, rhs, fcc);
+ *testKind = TestForFalse;
+ break;
+ case DoubleGreaterThanOrUnordered:
+ as_cult(fmt, rhs, lhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ as_cule(fmt, rhs, lhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleLessThanOrUnordered:
+ as_cult(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ case DoubleLessThanOrEqualOrUnordered:
+ as_cule(fmt, lhs, rhs, fcc);
+ *testKind = TestForTrue;
+ break;
+ default:
+ MOZ_CRASH("Invalid DoubleCondition.");
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_cmp_set_double(Register dest,
+ FloatRegister lhs,
+ FloatRegister rhs,
+ DoubleCondition c) {
+ FloatTestKind moveCondition;
+ compareFloatingPoint(DoubleFloat, lhs, rhs, c, &moveCondition);
+
+#ifdef MIPSR6
+ as_mfc1(dest, FloatRegisters::f24);
+ if (moveCondition == TestForTrue) {
+ as_andi(dest, dest, 0x1);
+ } else {
+ as_addiu(dest, dest, 0x1);
+ }
+#else
+ ma_li(dest, Imm32(1));
+
+ if (moveCondition == TestForTrue) {
+ as_movf(dest, zero);
+ } else {
+ as_movt(dest, zero);
+ }
+#endif
+}
+
+void MacroAssemblerMIPSShared::ma_cmp_set_float32(Register dest,
+ FloatRegister lhs,
+ FloatRegister rhs,
+ DoubleCondition c) {
+ FloatTestKind moveCondition;
+ compareFloatingPoint(SingleFloat, lhs, rhs, c, &moveCondition);
+
+#ifdef MIPSR6
+ as_mfc1(dest, FloatRegisters::f24);
+ if (moveCondition == TestForTrue) {
+ as_andi(dest, dest, 0x1);
+ } else {
+ as_addiu(dest, dest, 0x1);
+ }
+#else
+ ma_li(dest, Imm32(1));
+
+ if (moveCondition == TestForTrue) {
+ as_movf(dest, zero);
+ } else {
+ as_movt(dest, zero);
+ }
+#endif
+}
+
+void MacroAssemblerMIPSShared::ma_cmp_set(Register rd, Register rs, Imm32 imm,
+ Condition c) {
+ if (imm.value == 0) {
+ switch (c) {
+ case Equal:
+ case BelowOrEqual:
+ as_sltiu(rd, rs, 1);
+ break;
+ case NotEqual:
+ case Above:
+ as_sltu(rd, zero, rs);
+ break;
+ case AboveOrEqual:
+ case Below:
+ as_ori(rd, zero, c == AboveOrEqual ? 1 : 0);
+ break;
+ case GreaterThan:
+ case LessThanOrEqual:
+ as_slt(rd, zero, rs);
+ if (c == LessThanOrEqual) {
+ as_xori(rd, rd, 1);
+ }
+ break;
+ case LessThan:
+ case GreaterThanOrEqual:
+ as_slt(rd, rs, zero);
+ if (c == GreaterThanOrEqual) {
+ as_xori(rd, rd, 1);
+ }
+ break;
+ case Zero:
+ as_sltiu(rd, rs, 1);
+ break;
+ case NonZero:
+ as_sltu(rd, zero, rs);
+ break;
+ case Signed:
+ as_slt(rd, rs, zero);
+ break;
+ case NotSigned:
+ as_slt(rd, rs, zero);
+ as_xori(rd, rd, 1);
+ break;
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+ return;
+ }
+
+ switch (c) {
+ case Equal:
+ case NotEqual:
+ MOZ_ASSERT(rs != ScratchRegister);
+ ma_xor(rd, rs, imm);
+ if (c == Equal) {
+ as_sltiu(rd, rd, 1);
+ } else {
+ as_sltu(rd, zero, rd);
+ }
+ break;
+ case Zero:
+ case NonZero:
+ case Signed:
+ case NotSigned:
+ MOZ_CRASH("Invalid condition.");
+ default:
+ Condition cond = ma_cmp(rd, rs, imm, c);
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+
+ if (cond == Equal) as_xori(rd, rd, 1);
+ }
+}
+
+// fp instructions
+void MacroAssemblerMIPSShared::ma_lis(FloatRegister dest, float value) {
+ Imm32 imm(mozilla::BitwiseCast<uint32_t>(value));
+
+ if (imm.value != 0) {
+ ma_li(ScratchRegister, imm);
+ moveToFloat32(ScratchRegister, dest);
+ } else {
+ moveToFloat32(zero, dest);
+ }
+}
+
+void MacroAssemblerMIPSShared::ma_sd(FloatRegister ft, BaseIndex address) {
+ if (isLoongson() && Imm8::IsInSignedRange(address.offset)) {
+ Register index = address.index;
+
+ if (address.scale != TimesOne) {
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+
+ MOZ_ASSERT(SecondScratchReg != address.base);
+ index = SecondScratchReg;
+#ifdef JS_CODEGEN_MIPS64
+ asMasm().ma_dsll(index, address.index, Imm32(shift));
+#else
+ asMasm().ma_sll(index, address.index, Imm32(shift));
+#endif
+ }
+
+ as_gssdx(ft, address.base, index, address.offset);
+ return;
+ }
+
+ asMasm().computeScaledAddress(address, SecondScratchReg);
+ asMasm().ma_sd(ft, Address(SecondScratchReg, address.offset));
+}
+
+void MacroAssemblerMIPSShared::ma_ss(FloatRegister ft, BaseIndex address) {
+ if (isLoongson() && Imm8::IsInSignedRange(address.offset)) {
+ Register index = address.index;
+
+ if (address.scale != TimesOne) {
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+
+ MOZ_ASSERT(SecondScratchReg != address.base);
+ index = SecondScratchReg;
+#ifdef JS_CODEGEN_MIPS64
+ asMasm().ma_dsll(index, address.index, Imm32(shift));
+#else
+ asMasm().ma_sll(index, address.index, Imm32(shift));
+#endif
+ }
+
+ as_gsssx(ft, address.base, index, address.offset);
+ return;
+ }
+
+ asMasm().computeScaledAddress(address, SecondScratchReg);
+ asMasm().ma_ss(ft, Address(SecondScratchReg, address.offset));
+}
+
+void MacroAssemblerMIPSShared::ma_ld(FloatRegister ft, const BaseIndex& src) {
+ asMasm().computeScaledAddress(src, SecondScratchReg);
+ asMasm().ma_ld(ft, Address(SecondScratchReg, src.offset));
+}
+
+void MacroAssemblerMIPSShared::ma_ls(FloatRegister ft, const BaseIndex& src) {
+ asMasm().computeScaledAddress(src, SecondScratchReg);
+ asMasm().ma_ls(ft, Address(SecondScratchReg, src.offset));
+}
+
+void MacroAssemblerMIPSShared::ma_bc1s(FloatRegister lhs, FloatRegister rhs,
+ Label* label, DoubleCondition c,
+ JumpKind jumpKind, FPConditionBit fcc) {
+ FloatTestKind testKind;
+ compareFloatingPoint(SingleFloat, lhs, rhs, c, &testKind, fcc);
+ asMasm().branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
+}
+
+void MacroAssemblerMIPSShared::ma_bc1d(FloatRegister lhs, FloatRegister rhs,
+ Label* label, DoubleCondition c,
+ JumpKind jumpKind, FPConditionBit fcc) {
+ FloatTestKind testKind;
+ compareFloatingPoint(DoubleFloat, lhs, rhs, c, &testKind, fcc);
+ asMasm().branchWithCode(getBranchCode(testKind, fcc), label, jumpKind);
+}
+
+void MacroAssemblerMIPSShared::minMaxDouble(FloatRegister srcDest,
+ FloatRegister second,
+ bool handleNaN, bool isMax) {
+ FloatRegister first = srcDest;
+
+ Assembler::DoubleCondition cond = isMax ? Assembler::DoubleLessThanOrEqual
+ : Assembler::DoubleGreaterThanOrEqual;
+ Label nan, equal, done;
+ FloatTestKind moveCondition;
+
+ // First or second is NaN, result is NaN.
+ ma_bc1d(first, second, &nan, Assembler::DoubleUnordered, ShortJump);
+#ifdef MIPSR6
+ if (isMax) {
+ as_max(DoubleFloat, srcDest, first, second);
+ } else {
+ as_min(DoubleFloat, srcDest, first, second);
+ }
+#else
+ // Make sure we handle -0 and 0 right.
+ ma_bc1d(first, second, &equal, Assembler::DoubleEqual, ShortJump);
+ compareFloatingPoint(DoubleFloat, first, second, cond, &moveCondition);
+ MOZ_ASSERT(TestForTrue == moveCondition);
+ as_movt(DoubleFloat, first, second);
+ ma_b(&done, ShortJump);
+
+ // Check for zero.
+ bind(&equal);
+ asMasm().loadConstantDouble(0.0, ScratchDoubleReg);
+ compareFloatingPoint(DoubleFloat, first, ScratchDoubleReg,
+ Assembler::DoubleEqual, &moveCondition);
+
+ // So now both operands are either -0 or 0.
+ if (isMax) {
+ // -0 + -0 = -0 and -0 + 0 = 0.
+ as_addd(ScratchDoubleReg, first, second);
+ } else {
+ as_negd(ScratchDoubleReg, first);
+ as_subd(ScratchDoubleReg, ScratchDoubleReg, second);
+ as_negd(ScratchDoubleReg, ScratchDoubleReg);
+ }
+ MOZ_ASSERT(TestForTrue == moveCondition);
+ // First is 0 or -0, move max/min to it, else just return it.
+ as_movt(DoubleFloat, first, ScratchDoubleReg);
+#endif
+ ma_b(&done, ShortJump);
+
+ bind(&nan);
+ asMasm().loadConstantDouble(JS::GenericNaN(), srcDest);
+
+ bind(&done);
+}
+
+void MacroAssemblerMIPSShared::minMaxFloat32(FloatRegister srcDest,
+ FloatRegister second,
+ bool handleNaN, bool isMax) {
+ FloatRegister first = srcDest;
+
+ Assembler::DoubleCondition cond = isMax ? Assembler::DoubleLessThanOrEqual
+ : Assembler::DoubleGreaterThanOrEqual;
+ Label nan, equal, done;
+ FloatTestKind moveCondition;
+
+ // First or second is NaN, result is NaN.
+ ma_bc1s(first, second, &nan, Assembler::DoubleUnordered, ShortJump);
+#ifdef MIPSR6
+ if (isMax) {
+ as_max(SingleFloat, srcDest, first, second);
+ } else {
+ as_min(SingleFloat, srcDest, first, second);
+ }
+#else
+ // Make sure we handle -0 and 0 right.
+ ma_bc1s(first, second, &equal, Assembler::DoubleEqual, ShortJump);
+ compareFloatingPoint(SingleFloat, first, second, cond, &moveCondition);
+ MOZ_ASSERT(TestForTrue == moveCondition);
+ as_movt(SingleFloat, first, second);
+ ma_b(&done, ShortJump);
+
+ // Check for zero.
+ bind(&equal);
+ asMasm().loadConstantFloat32(0.0f, ScratchFloat32Reg);
+ compareFloatingPoint(SingleFloat, first, ScratchFloat32Reg,
+ Assembler::DoubleEqual, &moveCondition);
+
+ // So now both operands are either -0 or 0.
+ if (isMax) {
+ // -0 + -0 = -0 and -0 + 0 = 0.
+ as_adds(ScratchFloat32Reg, first, second);
+ } else {
+ as_negs(ScratchFloat32Reg, first);
+ as_subs(ScratchFloat32Reg, ScratchFloat32Reg, second);
+ as_negs(ScratchFloat32Reg, ScratchFloat32Reg);
+ }
+ MOZ_ASSERT(TestForTrue == moveCondition);
+ // First is 0 or -0, move max/min to it, else just return it.
+ as_movt(SingleFloat, first, ScratchFloat32Reg);
+#endif
+ ma_b(&done, ShortJump);
+
+ bind(&nan);
+ asMasm().loadConstantFloat32(JS::GenericNaN(), srcDest);
+
+ bind(&done);
+}
+
+void MacroAssemblerMIPSShared::loadDouble(const Address& address,
+ FloatRegister dest) {
+ asMasm().ma_ld(dest, address);
+}
+
+void MacroAssemblerMIPSShared::loadDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ asMasm().ma_ld(dest, src);
+}
+
+void MacroAssemblerMIPSShared::loadFloatAsDouble(const Address& address,
+ FloatRegister dest) {
+ asMasm().ma_ls(dest, address);
+ as_cvtds(dest, dest);
+}
+
+void MacroAssemblerMIPSShared::loadFloatAsDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ asMasm().loadFloat32(src, dest);
+ as_cvtds(dest, dest);
+}
+
+void MacroAssemblerMIPSShared::loadFloat32(const Address& address,
+ FloatRegister dest) {
+ asMasm().ma_ls(dest, address);
+}
+
+void MacroAssemblerMIPSShared::loadFloat32(const BaseIndex& src,
+ FloatRegister dest) {
+ asMasm().ma_ls(dest, src);
+}
+
+void MacroAssemblerMIPSShared::ma_call(ImmPtr dest) {
+ asMasm().ma_liPatchable(CallReg, dest);
+ as_jalr(CallReg);
+ as_nop();
+}
+
+void MacroAssemblerMIPSShared::ma_jump(ImmPtr dest) {
+ asMasm().ma_liPatchable(ScratchRegister, dest);
+ as_jr(ScratchRegister);
+ as_nop();
+}
+
+MacroAssembler& MacroAssemblerMIPSShared::asMasm() {
+ return *static_cast<MacroAssembler*>(this);
+}
+
+const MacroAssembler& MacroAssemblerMIPSShared::asMasm() const {
+ return *static_cast<const MacroAssembler*>(this);
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// MacroAssembler high-level usage.
+
+void MacroAssembler::flush() {}
+
+// ===============================================================
+// Stack manipulation functions.
+
+void MacroAssembler::Push(Register reg) {
+ ma_push(reg);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(const Imm32 imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(const ImmWord imm) {
+ ma_li(ScratchRegister, imm);
+ ma_push(ScratchRegister);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(const ImmPtr imm) {
+ Push(ImmWord(uintptr_t(imm.value)));
+}
+
+void MacroAssembler::Push(const ImmGCPtr ptr) {
+ ma_li(ScratchRegister, ptr);
+ ma_push(ScratchRegister);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(FloatRegister f) {
+ ma_push(f);
+ adjustFrame(int32_t(f.pushSize()));
+}
+
+void MacroAssembler::Pop(Register reg) {
+ ma_pop(reg);
+ adjustFrame(-int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Pop(FloatRegister f) {
+ ma_pop(f);
+ adjustFrame(-int32_t(f.pushSize()));
+}
+
+void MacroAssembler::Pop(const ValueOperand& val) {
+ popValue(val);
+ adjustFrame(-int32_t(sizeof(Value)));
+}
+
+void MacroAssembler::PopStackPtr() {
+ loadPtr(Address(StackPointer, 0), StackPointer);
+ adjustFrame(-int32_t(sizeof(intptr_t)));
+}
+
+// ===============================================================
+// Simple call functions.
+
+CodeOffset MacroAssembler::call(Register reg) {
+ as_jalr(reg);
+ as_nop();
+ return CodeOffset(currentOffset());
+}
+
+CodeOffset MacroAssembler::call(Label* label) {
+ ma_bal(label);
+ return CodeOffset(currentOffset());
+}
+
+CodeOffset MacroAssembler::callWithPatch() {
+ as_bal(BOffImm16(3 * sizeof(uint32_t)));
+ addPtr(Imm32(5 * sizeof(uint32_t)), ra);
+ // Allocate space which will be patched by patchCall().
+ spew(".space 32bit initValue 0xffff ffff");
+ writeInst(UINT32_MAX);
+ as_lw(ScratchRegister, ra, -(int32_t)(5 * sizeof(uint32_t)));
+ addPtr(ra, ScratchRegister);
+ as_jr(ScratchRegister);
+ as_nop();
+ return CodeOffset(currentOffset());
+}
+
+void MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset) {
+ BufferOffset call(callerOffset - 7 * sizeof(uint32_t));
+
+ BOffImm16 offset = BufferOffset(calleeOffset).diffB<BOffImm16>(call);
+ if (!offset.isInvalid()) {
+ InstImm* bal = (InstImm*)editSrc(call);
+ bal->setBOffImm16(offset);
+ } else {
+ uint32_t u32Offset = callerOffset - 5 * sizeof(uint32_t);
+ uint32_t* u32 =
+ reinterpret_cast<uint32_t*>(editSrc(BufferOffset(u32Offset)));
+ *u32 = calleeOffset - callerOffset;
+ }
+}
+
+CodeOffset MacroAssembler::farJumpWithPatch() {
+ ma_move(SecondScratchReg, ra);
+ as_bal(BOffImm16(3 * sizeof(uint32_t)));
+ as_lw(ScratchRegister, ra, 0);
+ // Allocate space which will be patched by patchFarJump().
+ CodeOffset farJump(currentOffset());
+ spew(".space 32bit initValue 0xffff ffff");
+ writeInst(UINT32_MAX);
+ addPtr(ra, ScratchRegister);
+ as_jr(ScratchRegister);
+ ma_move(ra, SecondScratchReg);
+ return farJump;
+}
+
+void MacroAssembler::patchFarJump(CodeOffset farJump, uint32_t targetOffset) {
+ uint32_t* u32 =
+ reinterpret_cast<uint32_t*>(editSrc(BufferOffset(farJump.offset())));
+ MOZ_ASSERT(*u32 == UINT32_MAX);
+ *u32 = targetOffset - farJump.offset();
+}
+
+CodeOffset MacroAssembler::call(wasm::SymbolicAddress target) {
+ movePtr(target, CallReg);
+ return call(CallReg);
+}
+
+void MacroAssembler::call(const Address& addr) {
+ loadPtr(addr, CallReg);
+ call(CallReg);
+}
+
+void MacroAssembler::call(ImmWord target) { call(ImmPtr((void*)target.value)); }
+
+void MacroAssembler::call(ImmPtr target) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, target, RelocationKind::HARDCODED);
+ ma_call(target);
+}
+
+void MacroAssembler::call(JitCode* c) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(ScratchRegister, ImmPtr(c->raw()));
+ callJitNoProfiler(ScratchRegister);
+}
+
+CodeOffset MacroAssembler::nopPatchableToCall() {
+ // MIPS32 //MIPS64
+ as_nop(); // lui // lui
+ as_nop(); // ori // ori
+ as_nop(); // jalr // drotr32
+ as_nop(); // ori
+#ifdef JS_CODEGEN_MIPS64
+ as_nop(); // jalr
+ as_nop();
+#endif
+ return CodeOffset(currentOffset());
+}
+
+void MacroAssembler::patchNopToCall(uint8_t* call, uint8_t* target) {
+#ifdef JS_CODEGEN_MIPS64
+ Instruction* inst = (Instruction*)call - 6 /* six nops */;
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)target);
+ inst[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+#else
+ Instruction* inst = (Instruction*)call - 4 /* four nops */;
+ Assembler::WriteLuiOriInstructions(inst, &inst[1], ScratchRegister,
+ (uint32_t)target);
+ inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr);
+#endif
+}
+
+void MacroAssembler::patchCallToNop(uint8_t* call) {
+#ifdef JS_CODEGEN_MIPS64
+ Instruction* inst = (Instruction*)call - 6 /* six nops */;
+#else
+ Instruction* inst = (Instruction*)call - 4 /* four nops */;
+#endif
+
+ inst[0].makeNop();
+ inst[1].makeNop();
+ inst[2].makeNop();
+ inst[3].makeNop();
+#ifdef JS_CODEGEN_MIPS64
+ inst[4].makeNop();
+ inst[5].makeNop();
+#endif
+}
+
+void MacroAssembler::pushReturnAddress() { push(ra); }
+
+void MacroAssembler::popReturnAddress() { pop(ra); }
+
+// ===============================================================
+// Jit Frames.
+
+uint32_t MacroAssembler::pushFakeReturnAddress(Register scratch) {
+ CodeLabel cl;
+
+ ma_li(scratch, &cl);
+ Push(scratch);
+ bind(&cl);
+ uint32_t retAddr = currentOffset();
+
+ addCodeLabel(cl);
+ return retAddr;
+}
+
+void MacroAssembler::loadStoreBuffer(Register ptr, Register buffer) {
+ ma_and(buffer, ptr, Imm32(int32_t(~gc::ChunkMask)));
+ loadPtr(Address(buffer, gc::ChunkStoreBufferOffset), buffer);
+}
+
+void MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr,
+ Register temp, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(ptr != temp);
+ MOZ_ASSERT(ptr != SecondScratchReg);
+
+ ma_and(SecondScratchReg, ptr, Imm32(int32_t(~gc::ChunkMask)));
+ branchPtr(InvertCondition(cond),
+ Address(SecondScratchReg, gc::ChunkStoreBufferOffset), ImmWord(0),
+ label);
+}
+
+void MacroAssembler::comment(const char* msg) { Assembler::comment(msg); }
+
+// ===============================================================
+// WebAssembly
+
+FaultingCodeOffset MacroAssembler::wasmTrapInstruction() {
+ FaultingCodeOffset fco = FaultingCodeOffset(currentOffset());
+ as_teq(zero, zero, WASM_TRAP);
+ return fco;
+}
+
+void MacroAssembler::wasmTruncateDoubleToInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ as_truncwd(ScratchFloat32Reg, input);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, output);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+ ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
+}
+
+void MacroAssembler::wasmTruncateFloat32ToInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ as_truncws(ScratchFloat32Reg, input);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, output);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+ ma_b(ScratchRegister, Imm32(0), oolEntry, Assembler::NotEqual);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF32ToI32(FloatRegister input,
+ Register output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ outOfLineWasmTruncateToInt32Check(input, output, MIRType::Float32, flags,
+ rejoin, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF64ToI32(FloatRegister input,
+ Register output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ outOfLineWasmTruncateToInt32Check(input, output, MIRType::Double, flags,
+ rejoin, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF32ToI64(FloatRegister input,
+ Register64 output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ outOfLineWasmTruncateToInt64Check(input, output, MIRType::Float32, flags,
+ rejoin, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF64ToI64(FloatRegister input,
+ Register64 output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ outOfLineWasmTruncateToInt64Check(input, output, MIRType::Double, flags,
+ rejoin, off);
+}
+
+void MacroAssemblerMIPSShared::outOfLineWasmTruncateToInt32Check(
+ FloatRegister input, Register output, MIRType fromType, TruncFlags flags,
+ Label* rejoin, wasm::BytecodeOffset trapOffset) {
+ bool isUnsigned = flags & TRUNC_UNSIGNED;
+ bool isSaturating = flags & TRUNC_SATURATING;
+
+ if (isSaturating) {
+ if (fromType == MIRType::Double) {
+ asMasm().loadConstantDouble(0.0, ScratchDoubleReg);
+ } else {
+ asMasm().loadConstantFloat32(0.0f, ScratchFloat32Reg);
+ }
+
+ if (isUnsigned) {
+ ma_li(output, Imm32(UINT32_MAX));
+
+ FloatTestKind moveCondition;
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
+ fromType == MIRType::Double ? ScratchDoubleReg : ScratchFloat32Reg,
+ Assembler::DoubleLessThanOrUnordered, &moveCondition);
+ MOZ_ASSERT(moveCondition == TestForTrue);
+
+ as_movt(output, zero);
+ } else {
+ // Positive overflow is already saturated to INT32_MAX, so we only have
+ // to handle NaN and negative overflow here.
+
+ FloatTestKind moveCondition;
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input, input,
+ Assembler::DoubleUnordered, &moveCondition);
+ MOZ_ASSERT(moveCondition == TestForTrue);
+
+ as_movt(output, zero);
+
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
+ fromType == MIRType::Double ? ScratchDoubleReg : ScratchFloat32Reg,
+ Assembler::DoubleLessThan, &moveCondition);
+ MOZ_ASSERT(moveCondition == TestForTrue);
+
+ ma_li(ScratchRegister, Imm32(INT32_MIN));
+ as_movt(output, ScratchRegister);
+ }
+
+ MOZ_ASSERT(rejoin->bound());
+ asMasm().jump(rejoin);
+ return;
+ }
+
+ Label inputIsNaN;
+
+ if (fromType == MIRType::Double) {
+ asMasm().branchDouble(Assembler::DoubleUnordered, input, input,
+ &inputIsNaN);
+ } else if (fromType == MIRType::Float32) {
+ asMasm().branchFloat(Assembler::DoubleUnordered, input, input, &inputIsNaN);
+ }
+
+ asMasm().wasmTrap(wasm::Trap::IntegerOverflow, trapOffset);
+ asMasm().bind(&inputIsNaN);
+ asMasm().wasmTrap(wasm::Trap::InvalidConversionToInteger, trapOffset);
+}
+
+void MacroAssemblerMIPSShared::outOfLineWasmTruncateToInt64Check(
+ FloatRegister input, Register64 output_, MIRType fromType, TruncFlags flags,
+ Label* rejoin, wasm::BytecodeOffset trapOffset) {
+ bool isUnsigned = flags & TRUNC_UNSIGNED;
+ bool isSaturating = flags & TRUNC_SATURATING;
+
+ if (isSaturating) {
+#if defined(JS_CODEGEN_MIPS32)
+ // Saturating callouts don't use ool path.
+ return;
+#else
+ Register output = output_.reg;
+
+ if (fromType == MIRType::Double) {
+ asMasm().loadConstantDouble(0.0, ScratchDoubleReg);
+ } else {
+ asMasm().loadConstantFloat32(0.0f, ScratchFloat32Reg);
+ }
+
+ if (isUnsigned) {
+ asMasm().ma_li(output, ImmWord(UINT64_MAX));
+
+ FloatTestKind moveCondition;
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
+ fromType == MIRType::Double ? ScratchDoubleReg : ScratchFloat32Reg,
+ Assembler::DoubleLessThanOrUnordered, &moveCondition);
+ MOZ_ASSERT(moveCondition == TestForTrue);
+
+ as_movt(output, zero);
+
+ } else {
+ // Positive overflow is already saturated to INT64_MAX, so we only have
+ // to handle NaN and negative overflow here.
+
+ FloatTestKind moveCondition;
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input, input,
+ Assembler::DoubleUnordered, &moveCondition);
+ MOZ_ASSERT(moveCondition == TestForTrue);
+
+ as_movt(output, zero);
+
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
+ fromType == MIRType::Double ? ScratchDoubleReg : ScratchFloat32Reg,
+ Assembler::DoubleLessThan, &moveCondition);
+ MOZ_ASSERT(moveCondition == TestForTrue);
+
+ asMasm().ma_li(ScratchRegister, ImmWord(INT64_MIN));
+ as_movt(output, ScratchRegister);
+ }
+
+ MOZ_ASSERT(rejoin->bound());
+ asMasm().jump(rejoin);
+ return;
+#endif
+ }
+
+ Label inputIsNaN;
+
+ if (fromType == MIRType::Double) {
+ asMasm().branchDouble(Assembler::DoubleUnordered, input, input,
+ &inputIsNaN);
+ } else if (fromType == MIRType::Float32) {
+ asMasm().branchFloat(Assembler::DoubleUnordered, input, input, &inputIsNaN);
+ }
+
+#if defined(JS_CODEGEN_MIPS32)
+
+ // Only possible valid input that produces INT64_MIN result.
+ double validInput =
+ isUnsigned ? double(uint64_t(INT64_MIN)) : double(int64_t(INT64_MIN));
+
+ if (fromType == MIRType::Double) {
+ asMasm().loadConstantDouble(validInput, ScratchDoubleReg);
+ asMasm().branchDouble(Assembler::DoubleEqual, input, ScratchDoubleReg,
+ rejoin);
+ } else {
+ asMasm().loadConstantFloat32(float(validInput), ScratchFloat32Reg);
+ asMasm().branchFloat(Assembler::DoubleEqual, input, ScratchDoubleReg,
+ rejoin);
+ }
+
+#endif
+
+ asMasm().wasmTrap(wasm::Trap::IntegerOverflow, trapOffset);
+ asMasm().bind(&inputIsNaN);
+ asMasm().wasmTrap(wasm::Trap::InvalidConversionToInteger, trapOffset);
+}
+
+void MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, AnyRegister output) {
+ wasmLoadImpl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
+}
+
+void MacroAssembler::wasmUnalignedLoad(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register output,
+ Register tmp) {
+ wasmLoadImpl(access, memoryBase, ptr, ptrScratch, AnyRegister(output), tmp);
+}
+
+void MacroAssembler::wasmUnalignedLoadFP(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch,
+ FloatRegister output, Register tmp1) {
+ wasmLoadImpl(access, memoryBase, ptr, ptrScratch, AnyRegister(output), tmp1);
+}
+
+void MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access,
+ AnyRegister value, Register memoryBase,
+ Register ptr, Register ptrScratch) {
+ wasmStoreImpl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
+}
+
+void MacroAssembler::wasmUnalignedStore(const wasm::MemoryAccessDesc& access,
+ Register value, Register memoryBase,
+ Register ptr, Register ptrScratch,
+ Register tmp) {
+ wasmStoreImpl(access, AnyRegister(value), memoryBase, ptr, ptrScratch, tmp);
+}
+
+void MacroAssembler::wasmUnalignedStoreFP(const wasm::MemoryAccessDesc& access,
+ FloatRegister floatValue,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register tmp) {
+ wasmStoreImpl(access, AnyRegister(floatValue), memoryBase, ptr, ptrScratch,
+ tmp);
+}
+
+void MacroAssemblerMIPSShared::wasmLoadImpl(
+ const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
+ Register ptrScratch, AnyRegister output, Register tmp) {
+ access.assertOffsetInGuardPages();
+ uint32_t offset = access.offset();
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ unsigned byteSize = access.byteSize();
+ bool isSigned;
+ bool isFloat = false;
+
+ MOZ_ASSERT(!access.isZeroExtendSimd128Load());
+ MOZ_ASSERT(!access.isSplatSimd128Load());
+ MOZ_ASSERT(!access.isWidenSimd128Load());
+ switch (access.type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ break;
+ case Scalar::Float64:
+ isFloat = true;
+ break;
+ case Scalar::Float32:
+ isFloat = true;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ BaseIndex address(memoryBase, ptr, TimesOne);
+ if (IsUnaligned(access)) {
+ MOZ_ASSERT(tmp != InvalidReg);
+ if (isFloat) {
+ if (byteSize == 4) {
+ asMasm().loadUnalignedFloat32(access, address, tmp, output.fpu());
+ } else {
+ asMasm().loadUnalignedDouble(access, address, tmp, output.fpu());
+ }
+ } else {
+ asMasm().ma_load_unaligned(access, output.gpr(), address, tmp,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ if (isFloat) {
+ if (byteSize == 4) {
+ asMasm().ma_ls(output.fpu(), address);
+ } else {
+ asMasm().ma_ld(output.fpu(), address);
+ }
+ } else {
+ asMasm().ma_load(output.gpr(), address,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerMIPSShared::wasmStoreImpl(
+ const wasm::MemoryAccessDesc& access, AnyRegister value,
+ Register memoryBase, Register ptr, Register ptrScratch, Register tmp) {
+ access.assertOffsetInGuardPages();
+ uint32_t offset = access.offset();
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ unsigned byteSize = access.byteSize();
+ bool isSigned;
+ bool isFloat = false;
+
+ switch (access.type()) {
+ case Scalar::Int8:
+ isSigned = true;
+ break;
+ case Scalar::Uint8:
+ isSigned = false;
+ break;
+ case Scalar::Int16:
+ isSigned = true;
+ break;
+ case Scalar::Uint16:
+ isSigned = false;
+ break;
+ case Scalar::Int32:
+ isSigned = true;
+ break;
+ case Scalar::Uint32:
+ isSigned = false;
+ break;
+ case Scalar::Int64:
+ isSigned = true;
+ break;
+ case Scalar::Float64:
+ isFloat = true;
+ break;
+ case Scalar::Float32:
+ isFloat = true;
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ BaseIndex address(memoryBase, ptr, TimesOne);
+ if (IsUnaligned(access)) {
+ MOZ_ASSERT(tmp != InvalidReg);
+ if (isFloat) {
+ if (byteSize == 4) {
+ asMasm().storeUnalignedFloat32(access, value.fpu(), tmp, address);
+ } else {
+ asMasm().storeUnalignedDouble(access, value.fpu(), tmp, address);
+ }
+ } else {
+ asMasm().ma_store_unaligned(access, value.gpr(), address, tmp,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ return;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ if (isFloat) {
+ if (byteSize == 4) {
+ asMasm().ma_ss(value.fpu(), address);
+ } else {
+ asMasm().ma_sd(value.fpu(), address);
+ }
+ } else {
+ asMasm().ma_store(value.gpr(), address,
+ static_cast<LoadStoreSize>(8 * byteSize),
+ isSigned ? SignExtend : ZeroExtend);
+ }
+ // Only the last emitted instruction is a memory access.
+ asMasm().append(access, asMasm().size() - 4);
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssembler::enterFakeExitFrameForWasm(Register cxreg, Register scratch,
+ ExitFrameType type) {
+ enterFakeExitFrame(cxreg, scratch, type);
+}
+
+// ========================================================================
+// Primitive atomic operations.
+
+template <typename T>
+static void CompareExchange(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ const T& mem, Register oldval, Register newval,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ bool signExtend = Scalar::isSignedIntType(type);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again, end;
+
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll(output, SecondScratchReg, 0);
+ masm.ma_b(output, oldval, &end, Assembler::NotEqual, ShortJump);
+ masm.ma_move(ScratchRegister, newval);
+ masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+ masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+ masm.bind(&end);
+
+ return;
+ }
+
+ masm.as_andi(offsetTemp, SecondScratchReg, 3);
+ masm.subPtr(offsetTemp, SecondScratchReg);
+#if !MOZ_LITTLE_ENDIAN()
+ masm.as_xori(offsetTemp, offsetTemp, 3);
+#endif
+ masm.as_sll(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.as_sllv(maskTemp, maskTemp, offsetTemp);
+ masm.as_nor(maskTemp, zero, maskTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll(ScratchRegister, SecondScratchReg, 0);
+
+ masm.as_srlv(output, ScratchRegister, offsetTemp);
+
+ switch (nbytes) {
+ case 1:
+ if (signExtend) {
+ masm.ma_seb(valueTemp, oldval);
+ masm.ma_seb(output, output);
+ } else {
+ masm.as_andi(valueTemp, oldval, 0xff);
+ masm.as_andi(output, output, 0xff);
+ }
+ break;
+ case 2:
+ if (signExtend) {
+ masm.ma_seh(valueTemp, oldval);
+ masm.ma_seh(output, output);
+ } else {
+ masm.as_andi(valueTemp, oldval, 0xffff);
+ masm.as_andi(output, output, 0xffff);
+ }
+ break;
+ }
+
+ masm.ma_b(output, valueTemp, &end, Assembler::NotEqual, ShortJump);
+
+ masm.as_sllv(valueTemp, newval, offsetTemp);
+ masm.as_and(ScratchRegister, ScratchRegister, maskTemp);
+ masm.as_or(ScratchRegister, ScratchRegister, valueTemp);
+
+ masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+
+ masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ masm.bind(&end);
+}
+
+void MacroAssembler::compareExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const Address& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, nullptr, type, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::compareExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, nullptr, type, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, &access, access.type(), access.sync(), mem, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, &access, access.type(), access.sync(), mem, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, output);
+}
+
+template <typename T>
+static void AtomicExchange(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ bool signExtend = Scalar::isSignedIntType(type);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again;
+
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll(output, SecondScratchReg, 0);
+ masm.ma_move(ScratchRegister, value);
+ masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+ masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ return;
+ }
+
+ masm.as_andi(offsetTemp, SecondScratchReg, 3);
+ masm.subPtr(offsetTemp, SecondScratchReg);
+#if !MOZ_LITTLE_ENDIAN()
+ masm.as_xori(offsetTemp, offsetTemp, 3);
+#endif
+ masm.as_sll(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.as_sllv(maskTemp, maskTemp, offsetTemp);
+ masm.as_nor(maskTemp, zero, maskTemp);
+ switch (nbytes) {
+ case 1:
+ masm.as_andi(valueTemp, value, 0xff);
+ break;
+ case 2:
+ masm.as_andi(valueTemp, value, 0xffff);
+ break;
+ }
+ masm.as_sllv(valueTemp, valueTemp, offsetTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll(output, SecondScratchReg, 0);
+ masm.as_and(ScratchRegister, output, maskTemp);
+ masm.as_or(ScratchRegister, ScratchRegister, valueTemp);
+
+ masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+
+ masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
+ ShortJump);
+
+ masm.as_srlv(output, output, offsetTemp);
+
+ switch (nbytes) {
+ case 1:
+ if (signExtend) {
+ masm.ma_seb(output, output);
+ } else {
+ masm.as_andi(output, output, 0xff);
+ }
+ break;
+ case 2:
+ if (signExtend) {
+ masm.ma_seh(output, output);
+ } else {
+ masm.as_andi(output, output, 0xffff);
+ }
+ break;
+ }
+
+ masm.memoryBarrierAfter(sync);
+}
+
+void MacroAssembler::atomicExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const Address& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, nullptr, type, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, output);
+}
+
+void MacroAssembler::atomicExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, nullptr, type, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, output);
+}
+
+void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, &access, access.type(), access.sync(), mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, &access, access.type(), access.sync(), mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+template <typename T>
+static void AtomicFetchOp(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ AtomicOp op, const T& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ bool signExtend = Scalar::isSignedIntType(type);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again;
+
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll(output, SecondScratchReg, 0);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_addu(ScratchRegister, output, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_subu(ScratchRegister, output, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(ScratchRegister, output, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(ScratchRegister, output, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(ScratchRegister, output, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+ masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ return;
+ }
+
+ masm.as_andi(offsetTemp, SecondScratchReg, 3);
+ masm.subPtr(offsetTemp, SecondScratchReg);
+#if !MOZ_LITTLE_ENDIAN()
+ masm.as_xori(offsetTemp, offsetTemp, 3);
+#endif
+ masm.as_sll(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.as_sllv(maskTemp, maskTemp, offsetTemp);
+ masm.as_nor(maskTemp, zero, maskTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll(ScratchRegister, SecondScratchReg, 0);
+ masm.as_srlv(output, ScratchRegister, offsetTemp);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_addu(valueTemp, output, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_subu(valueTemp, output, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(valueTemp, output, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(valueTemp, output, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(valueTemp, output, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ switch (nbytes) {
+ case 1:
+ masm.as_andi(valueTemp, valueTemp, 0xff);
+ break;
+ case 2:
+ masm.as_andi(valueTemp, valueTemp, 0xffff);
+ break;
+ }
+
+ masm.as_sllv(valueTemp, valueTemp, offsetTemp);
+
+ masm.as_and(ScratchRegister, ScratchRegister, maskTemp);
+ masm.as_or(ScratchRegister, ScratchRegister, valueTemp);
+
+ masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+
+ masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
+ ShortJump);
+
+ switch (nbytes) {
+ case 1:
+ if (signExtend) {
+ masm.ma_seb(output, output);
+ } else {
+ masm.as_andi(output, output, 0xff);
+ }
+ break;
+ case 2:
+ if (signExtend) {
+ masm.ma_seh(output, output);
+ } else {
+ masm.as_andi(output, output, 0xffff);
+ }
+ break;
+ }
+
+ masm.memoryBarrierAfter(sync);
+}
+
+void MacroAssembler::atomicFetchOp(Scalar::Type type,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::atomicFetchOp(Scalar::Type type,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const Address& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ AtomicFetchOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ AtomicFetchOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+template <typename T>
+static void AtomicEffectOp(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ AtomicOp op, const T& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again;
+
+ masm.computeEffectiveAddress(mem, SecondScratchReg);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll(ScratchRegister, SecondScratchReg, 0);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_addu(ScratchRegister, ScratchRegister, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_subu(ScratchRegister, ScratchRegister, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(ScratchRegister, ScratchRegister, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(ScratchRegister, ScratchRegister, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(ScratchRegister, ScratchRegister, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+ masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ return;
+ }
+
+ masm.as_andi(offsetTemp, SecondScratchReg, 3);
+ masm.subPtr(offsetTemp, SecondScratchReg);
+#if !MOZ_LITTLE_ENDIAN()
+ masm.as_xori(offsetTemp, offsetTemp, 3);
+#endif
+ masm.as_sll(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.as_sllv(maskTemp, maskTemp, offsetTemp);
+ masm.as_nor(maskTemp, zero, maskTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, masm.size());
+ }
+
+ masm.as_ll(ScratchRegister, SecondScratchReg, 0);
+ masm.as_srlv(valueTemp, ScratchRegister, offsetTemp);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_addu(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_subu(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(valueTemp, valueTemp, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ switch (nbytes) {
+ case 1:
+ masm.as_andi(valueTemp, valueTemp, 0xff);
+ break;
+ case 2:
+ masm.as_andi(valueTemp, valueTemp, 0xffff);
+ break;
+ }
+
+ masm.as_sllv(valueTemp, valueTemp, offsetTemp);
+
+ masm.as_and(ScratchRegister, ScratchRegister, maskTemp);
+ masm.as_or(ScratchRegister, ScratchRegister, valueTemp);
+
+ masm.as_sc(ScratchRegister, SecondScratchReg, 0);
+
+ masm.ma_b(ScratchRegister, ScratchRegister, &again, Assembler::Zero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const Address& mem, Register valueTemp,
+ Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp);
+}
+
+void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp);
+}
+
+// ========================================================================
+// JS atomic operations.
+
+template <typename T>
+static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, const T& mem,
+ Register oldval, Register newval,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, output.gpr());
+ }
+}
+
+void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const Address& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, temp, output);
+}
+
+void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, temp, output);
+}
+
+template <typename T>
+static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, const T& mem,
+ Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, output.gpr());
+ }
+}
+
+void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const Address& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+template <typename T>
+static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const T& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, output.gpr());
+ }
+}
+
+void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, nullptr, arrayType, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp);
+}
+
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, nullptr, arrayType, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp);
+}
+
+void MacroAssembler::flexibleQuotient32(Register rhs, Register srcDest,
+ bool isUnsigned,
+ const LiveRegisterSet&) {
+ quotient32(rhs, srcDest, isUnsigned);
+}
+
+void MacroAssembler::flexibleRemainder32(Register rhs, Register srcDest,
+ bool isUnsigned,
+ const LiveRegisterSet&) {
+ remainder32(rhs, srcDest, isUnsigned);
+}
+
+void MacroAssembler::flexibleDivMod32(Register rhs, Register srcDest,
+ Register remOutput, bool isUnsigned,
+ const LiveRegisterSet&) {
+ if (isUnsigned) {
+#ifdef MIPSR6
+ as_divu(ScratchRegister, srcDest, rhs);
+ as_modu(remOutput, srcDest, rhs);
+ ma_move(srcDest, ScratchRegister);
+#else
+ as_divu(srcDest, rhs);
+#endif
+ } else {
+#ifdef MIPSR6
+ as_div(ScratchRegister, srcDest, rhs);
+ as_mod(remOutput, srcDest, rhs);
+ ma_move(srcDest, ScratchRegister);
+#else
+ as_div(srcDest, rhs);
+#endif
+ }
+#ifndef MIPSR6
+ as_mfhi(remOutput);
+ as_mflo(srcDest);
+#endif
+}
+
+CodeOffset MacroAssembler::moveNearAddressWithPatch(Register dest) {
+ return movWithPatch(ImmPtr(nullptr), dest);
+}
+
+void MacroAssembler::patchNearAddressMove(CodeLocationLabel loc,
+ CodeLocationLabel target) {
+ PatchDataWithValueCheck(loc, ImmPtr(target.raw()), ImmPtr(nullptr));
+}
+
+// ========================================================================
+// Spectre Mitigations.
+
+void MacroAssembler::speculationBarrier() { MOZ_CRASH(); }
+
+void MacroAssembler::floorFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchFloat32Scope scratch(*this);
+
+ Label skipCheck, done;
+
+ // If Nan, 0 or -0 check for bailout
+ loadConstantFloat32(0.0f, scratch);
+ ma_bc1s(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If binary value is not zero, it is NaN or -0, so we bail.
+ moveFromDoubleLo(src, SecondScratchReg);
+ branch32(Assembler::NotEqual, SecondScratchReg, Imm32(0), fail);
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&done, ShortJump);
+
+ bind(&skipCheck);
+ as_floorws(scratch, src);
+ moveFromDoubleLo(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ bind(&done);
+}
+
+void MacroAssembler::floorDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchDoubleScope scratch(*this);
+
+ Label skipCheck, done;
+
+ // If Nan, 0 or -0 check for bailout
+ loadConstantDouble(0.0, scratch);
+ ma_bc1d(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If high part is not zero, it is NaN or -0, so we bail.
+ moveFromDoubleHi(src, SecondScratchReg);
+ branch32(Assembler::NotEqual, SecondScratchReg, Imm32(0), fail);
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&done, ShortJump);
+
+ bind(&skipCheck);
+ as_floorwd(scratch, src);
+ moveFromDoubleLo(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ bind(&done);
+}
+
+void MacroAssembler::ceilFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchFloat32Scope scratch(*this);
+
+ Label performCeil, done;
+
+ // If x < -1 or x > 0 then perform ceil.
+ loadConstantFloat32(0.0f, scratch);
+ branchFloat(Assembler::DoubleGreaterThan, src, scratch, &performCeil);
+ loadConstantFloat32(-1.0f, scratch);
+ branchFloat(Assembler::DoubleLessThanOrEqual, src, scratch, &performCeil);
+
+ // If binary value is not zero, the input was not 0, so we bail.
+ moveFromFloat32(src, SecondScratchReg);
+ branch32(Assembler::NotEqual, SecondScratchReg, Imm32(0), fail);
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&done, ShortJump);
+
+ bind(&performCeil);
+ as_ceilws(scratch, src);
+ moveFromFloat32(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ bind(&done);
+}
+
+void MacroAssembler::ceilDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchDoubleScope scratch(*this);
+
+ Label performCeil, done;
+
+ // If x < -1 or x > 0 then perform ceil.
+ loadConstantDouble(0, scratch);
+ branchDouble(Assembler::DoubleGreaterThan, src, scratch, &performCeil);
+ loadConstantDouble(-1, scratch);
+ branchDouble(Assembler::DoubleLessThanOrEqual, src, scratch, &performCeil);
+
+ // If high part is not zero, the input was not 0, so we bail.
+ moveFromDoubleHi(src, SecondScratchReg);
+ branch32(Assembler::NotEqual, SecondScratchReg, Imm32(0), fail);
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&done, ShortJump);
+
+ bind(&performCeil);
+ as_ceilwd(scratch, src);
+ moveFromDoubleLo(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ bind(&done);
+}
+
+void MacroAssembler::roundFloat32ToInt32(FloatRegister src, Register dest,
+ FloatRegister temp, Label* fail) {
+ ScratchFloat32Scope scratch(*this);
+
+ Label negative, end, skipCheck;
+
+ // Load biggest number less than 0.5 in the temp register.
+ loadConstantFloat32(GetBiggestNumberLessThan(0.5f), temp);
+
+ // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
+ loadConstantFloat32(0.0f, scratch);
+ ma_bc1s(src, scratch, &negative, Assembler::DoubleLessThan, ShortJump);
+
+ // If Nan, 0 or -0 check for bailout
+ ma_bc1s(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If binary value is not zero, it is NaN or -0, so we bail.
+ moveFromFloat32(src, SecondScratchReg);
+ branch32(Assembler::NotEqual, SecondScratchReg, Imm32(0), fail);
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&end, ShortJump);
+
+ bind(&skipCheck);
+ as_adds(scratch, src, temp);
+ as_floorws(scratch, scratch);
+
+ moveFromFloat32(scratch, dest);
+
+ branchTest32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branchTest32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ jump(&end);
+
+ // Input is negative, but isn't -0.
+ bind(&negative);
+
+ // Inputs in ]-0.5; 0] need to be added 0.5, other negative inputs need to
+ // be added the biggest double less than 0.5.
+ Label loadJoin;
+ loadConstantFloat32(-0.5f, scratch);
+ branchFloat(Assembler::DoubleLessThan, src, scratch, &loadJoin);
+ loadConstantFloat32(0.5f, temp);
+ bind(&loadJoin);
+
+ as_adds(temp, src, temp);
+
+ // If input + 0.5 >= 0, input is a negative number >= -0.5 and the
+ // result is -0.
+ branchFloat(Assembler::DoubleGreaterThanOrEqual, temp, scratch, fail);
+
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ as_floorws(scratch, temp);
+ moveFromFloat32(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+
+ bind(&end);
+}
+
+void MacroAssembler::roundDoubleToInt32(FloatRegister src, Register dest,
+ FloatRegister temp, Label* fail) {
+ ScratchDoubleScope scratch(*this);
+
+ Label negative, end, skipCheck;
+
+ // Load biggest number less than 0.5 in the temp register.
+ loadConstantDouble(GetBiggestNumberLessThan(0.5), temp);
+
+ // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
+ loadConstantDouble(0.0, scratch);
+ ma_bc1d(src, scratch, &negative, Assembler::DoubleLessThan, ShortJump);
+
+ // If Nan, 0 or -0 check for bailout
+ ma_bc1d(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If high part is not zero, it is NaN or -0, so we bail.
+ moveFromDoubleHi(src, SecondScratchReg);
+ branch32(Assembler::NotEqual, SecondScratchReg, Imm32(0), fail);
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&end, ShortJump);
+
+ bind(&skipCheck);
+ as_addd(scratch, src, temp);
+ as_floorwd(scratch, scratch);
+
+ moveFromDoubleLo(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ jump(&end);
+
+ // Input is negative, but isn't -0.
+ bind(&negative);
+
+ // Inputs in ]-0.5; 0] need to be added 0.5, other negative inputs need to
+ // be added the biggest double less than 0.5.
+ Label loadJoin;
+ loadConstantDouble(-0.5, scratch);
+ branchDouble(Assembler::DoubleLessThan, src, scratch, &loadJoin);
+ loadConstantDouble(0.5, temp);
+ bind(&loadJoin);
+
+ addDouble(src, temp);
+
+ // If input + 0.5 >= 0, input is a negative number >= -0.5 and the
+ // result is -0.
+ branchDouble(Assembler::DoubleGreaterThanOrEqual, temp, scratch, fail);
+
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ as_floorwd(scratch, temp);
+ moveFromDoubleLo(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+
+ bind(&end);
+}
+
+void MacroAssembler::truncFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ Label notZero;
+ as_truncws(ScratchFloat32Reg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+
+ ma_b(dest, Imm32(0), &notZero, Assembler::NotEqual, ShortJump);
+ moveFromFloat32(src, ScratchRegister);
+ // Check if src is in ]-1; -0] range by checking the sign bit.
+ as_slt(ScratchRegister, ScratchRegister, zero);
+ bind(&notZero);
+
+ branch32(Assembler::NotEqual, ScratchRegister, Imm32(0), fail);
+}
+
+void MacroAssembler::truncDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ Label notZero;
+ as_truncwd(ScratchFloat32Reg, src);
+ as_cfc1(ScratchRegister, Assembler::FCSR);
+ moveFromFloat32(ScratchFloat32Reg, dest);
+ ma_ext(ScratchRegister, ScratchRegister, Assembler::CauseV, 1);
+
+ ma_b(dest, Imm32(0), &notZero, Assembler::NotEqual, ShortJump);
+ moveFromDoubleHi(src, ScratchRegister);
+ // Check if src is in ]-1; -0] range by checking the sign bit.
+ as_slt(ScratchRegister, ScratchRegister, zero);
+ bind(&notZero);
+
+ branch32(Assembler::NotEqual, ScratchRegister, Imm32(0), fail);
+}
+
+void MacroAssembler::nearbyIntDouble(RoundingMode mode, FloatRegister src,
+ FloatRegister dest) {
+ MOZ_CRASH("not supported on this platform");
+}
+
+void MacroAssembler::nearbyIntFloat32(RoundingMode mode, FloatRegister src,
+ FloatRegister dest) {
+ MOZ_CRASH("not supported on this platform");
+}
+
+void MacroAssembler::copySignDouble(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister output) {
+ MOZ_CRASH("not supported on this platform");
+}
+
+void MacroAssembler::shiftIndex32AndAdd(Register indexTemp32, int shift,
+ Register pointer) {
+ if (IsShiftInScaleRange(shift)) {
+ computeEffectiveAddress(
+ BaseIndex(pointer, indexTemp32, ShiftToScale(shift)), pointer);
+ return;
+ }
+ lshift32(Imm32(shift), indexTemp32);
+ addPtr(indexTemp32, pointer);
+}
+
+//}}} check_macroassembler_style
diff --git a/js/src/jit/mips-shared/MacroAssembler-mips-shared.h b/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
new file mode 100644
index 0000000000..88238accbb
--- /dev/null
+++ b/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
@@ -0,0 +1,258 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_MacroAssembler_mips_shared_h
+#define jit_mips_shared_MacroAssembler_mips_shared_h
+
+#if defined(JS_CODEGEN_MIPS32)
+# include "jit/mips32/Assembler-mips32.h"
+#elif defined(JS_CODEGEN_MIPS64)
+# include "jit/mips64/Assembler-mips64.h"
+#endif
+
+#include "jit/AtomicOp.h"
+
+namespace js {
+namespace jit {
+
+enum LoadStoreSize {
+ SizeByte = 8,
+ SizeHalfWord = 16,
+ SizeWord = 32,
+ SizeDouble = 64
+};
+
+enum LoadStoreExtension { ZeroExtend = 0, SignExtend = 1 };
+
+enum JumpKind { LongJump = 0, ShortJump = 1 };
+
+enum DelaySlotFill { DontFillDelaySlot = 0, FillDelaySlot = 1 };
+
+static Register CallReg = t9;
+
+class MacroAssemblerMIPSShared : public Assembler {
+ protected:
+ // Perform a downcast. Should be removed by Bug 996602.
+ MacroAssembler& asMasm();
+ const MacroAssembler& asMasm() const;
+
+ Condition ma_cmp(Register rd, Register lhs, Register rhs, Condition c);
+ Condition ma_cmp(Register rd, Register lhs, Imm32 imm, Condition c);
+
+ void compareFloatingPoint(FloatFormat fmt, FloatRegister lhs,
+ FloatRegister rhs, DoubleCondition c,
+ FloatTestKind* testKind, FPConditionBit fcc = FCC0);
+
+ public:
+ void ma_move(Register rd, Register rs);
+
+ void ma_li(Register dest, ImmGCPtr ptr);
+
+ void ma_li(Register dest, Imm32 imm);
+ void ma_liPatchable(Register dest, Imm32 imm);
+
+ // Shift operations
+ void ma_sll(Register rd, Register rt, Imm32 shift);
+ void ma_srl(Register rd, Register rt, Imm32 shift);
+ void ma_sra(Register rd, Register rt, Imm32 shift);
+ void ma_ror(Register rd, Register rt, Imm32 shift);
+ void ma_rol(Register rd, Register rt, Imm32 shift);
+
+ void ma_sll(Register rd, Register rt, Register shift);
+ void ma_srl(Register rd, Register rt, Register shift);
+ void ma_sra(Register rd, Register rt, Register shift);
+ void ma_ror(Register rd, Register rt, Register shift);
+ void ma_rol(Register rd, Register rt, Register shift);
+
+ // Negate
+ void ma_negu(Register rd, Register rs);
+
+ void ma_not(Register rd, Register rs);
+
+ // Bit extract/insert
+ void ma_ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void ma_ins(Register rt, Register rs, uint16_t pos, uint16_t size);
+
+ // Sign extend
+ void ma_seb(Register rd, Register rt);
+ void ma_seh(Register rd, Register rt);
+
+ // and
+ void ma_and(Register rd, Register rs);
+ void ma_and(Register rd, Imm32 imm);
+ void ma_and(Register rd, Register rs, Imm32 imm);
+
+ // or
+ void ma_or(Register rd, Register rs);
+ void ma_or(Register rd, Imm32 imm);
+ void ma_or(Register rd, Register rs, Imm32 imm);
+
+ // xor
+ void ma_xor(Register rd, Register rs);
+ void ma_xor(Register rd, Imm32 imm);
+ void ma_xor(Register rd, Register rs, Imm32 imm);
+
+ // word swap byte within halfwords
+ void ma_wsbh(Register rd, Register rt);
+
+ void ma_ctz(Register rd, Register rs);
+
+ // load
+ void ma_load(Register dest, const BaseIndex& src,
+ LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_load_unaligned(Register dest, const BaseIndex& src,
+ LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_load_unaligned(Register dest, const Address& address,
+ LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_load_unaligned(const wasm::MemoryAccessDesc& access, Register dest,
+ const BaseIndex& src, Register temp,
+ LoadStoreSize size, LoadStoreExtension extension);
+
+ // store
+ void ma_store(Register data, const BaseIndex& dest,
+ LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_store(Imm32 imm, const BaseIndex& dest, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_store_unaligned(Register data, const Address& dest,
+ LoadStoreSize size = SizeWord);
+ void ma_store_unaligned(Register data, const BaseIndex& dest,
+ LoadStoreSize size = SizeWord);
+ void ma_store_unaligned(const wasm::MemoryAccessDesc& access, Register data,
+ const BaseIndex& dest, Register temp,
+ LoadStoreSize size, LoadStoreExtension extension);
+
+ // arithmetic based ops
+ // add
+ void ma_addu(Register rd, Register rs, Imm32 imm);
+ void ma_addu(Register rd, Register rs);
+ void ma_addu(Register rd, Imm32 imm);
+ void ma_add32TestCarry(Condition cond, Register rd, Register rs, Register rt,
+ Label* overflow);
+ void ma_add32TestCarry(Condition cond, Register rd, Register rs, Imm32 imm,
+ Label* overflow);
+
+ // subtract
+ void ma_subu(Register rd, Register rs, Imm32 imm);
+ void ma_subu(Register rd, Register rs);
+ void ma_subu(Register rd, Imm32 imm);
+ void ma_sub32TestOverflow(Register rd, Register rs, Imm32 imm,
+ Label* overflow);
+
+ // multiplies. For now, there are only few that we care about.
+ void ma_mul(Register rd, Register rs, Imm32 imm);
+ void ma_mul32TestOverflow(Register rd, Register rs, Register rt,
+ Label* overflow);
+ void ma_mul32TestOverflow(Register rd, Register rs, Imm32 imm,
+ Label* overflow);
+
+ // divisions
+ void ma_div_branch_overflow(Register rd, Register rs, Register rt,
+ Label* overflow);
+ void ma_div_branch_overflow(Register rd, Register rs, Imm32 imm,
+ Label* overflow);
+
+ // fast mod, uses scratch registers, and thus needs to be in the assembler
+ // implicitly assumes that we can overwrite dest at the beginning of the
+ // sequence
+ void ma_mod_mask(Register src, Register dest, Register hold, Register remain,
+ int32_t shift, Label* negZero = nullptr);
+
+ // branches when done from within mips-specific code
+ void ma_b(Register lhs, Register rhs, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, Imm32 imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, ImmPtr imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, ImmGCPtr imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump) {
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_li(ScratchRegister, imm);
+ ma_b(lhs, ScratchRegister, l, c, jumpKind);
+ }
+
+ void ma_b(Label* l, JumpKind jumpKind = LongJump);
+
+ // fp instructions
+ void ma_lis(FloatRegister dest, float value);
+
+ void ma_sd(FloatRegister src, BaseIndex address);
+ void ma_ss(FloatRegister src, BaseIndex address);
+
+ void ma_ld(FloatRegister dest, const BaseIndex& src);
+ void ma_ls(FloatRegister dest, const BaseIndex& src);
+
+ // FP branches
+ void ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label* label,
+ DoubleCondition c, JumpKind jumpKind = LongJump,
+ FPConditionBit fcc = FCC0);
+ void ma_bc1d(FloatRegister lhs, FloatRegister rhs, Label* label,
+ DoubleCondition c, JumpKind jumpKind = LongJump,
+ FPConditionBit fcc = FCC0);
+
+ void ma_call(ImmPtr dest);
+
+ void ma_jump(ImmPtr dest);
+
+ void ma_cmp_set(Register dst, Register lhs, Register rhs, Condition c);
+ void ma_cmp_set(Register dst, Register lhs, Imm32 imm, Condition c);
+ // void ma_cmp_set(Register dst, Address address, Imm32 imm, Condition c);
+ void ma_cmp_set_double(Register dst, FloatRegister lhs, FloatRegister rhs,
+ DoubleCondition c);
+ void ma_cmp_set_float32(Register dst, FloatRegister lhs, FloatRegister rhs,
+ DoubleCondition c);
+
+ void moveToDoubleLo(Register src, FloatRegister dest) { as_mtc1(src, dest); }
+ void moveFromDoubleLo(FloatRegister src, Register dest) {
+ as_mfc1(dest, src);
+ }
+
+ void moveToFloat32(Register src, FloatRegister dest) { as_mtc1(src, dest); }
+ void moveFromFloat32(FloatRegister src, Register dest) { as_mfc1(dest, src); }
+
+ // Evaluate srcDest = minmax<isMax>{Float32,Double}(srcDest, other).
+ // Handle NaN specially if handleNaN is true.
+ void minMaxDouble(FloatRegister srcDest, FloatRegister other, bool handleNaN,
+ bool isMax);
+ void minMaxFloat32(FloatRegister srcDest, FloatRegister other, bool handleNaN,
+ bool isMax);
+
+ void loadDouble(const Address& addr, FloatRegister dest);
+ void loadDouble(const BaseIndex& src, FloatRegister dest);
+
+ // Load a float value into a register, then expand it to a double.
+ void loadFloatAsDouble(const Address& addr, FloatRegister dest);
+ void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest);
+
+ void loadFloat32(const Address& addr, FloatRegister dest);
+ void loadFloat32(const BaseIndex& src, FloatRegister dest);
+
+ void outOfLineWasmTruncateToInt32Check(FloatRegister input, Register output,
+ MIRType fromType, TruncFlags flags,
+ Label* rejoin,
+ wasm::BytecodeOffset trapOffset);
+ void outOfLineWasmTruncateToInt64Check(FloatRegister input, Register64 output,
+ MIRType fromType, TruncFlags flags,
+ Label* rejoin,
+ wasm::BytecodeOffset trapOffset);
+
+ protected:
+ void wasmLoadImpl(const wasm::MemoryAccessDesc& access, Register memoryBase,
+ Register ptr, Register ptrScratch, AnyRegister output,
+ Register tmp);
+ void wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister value,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register tmp);
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_MacroAssembler_mips_shared_h */
diff --git a/js/src/jit/mips-shared/MoveEmitter-mips-shared.cpp b/js/src/jit/mips-shared/MoveEmitter-mips-shared.cpp
new file mode 100644
index 0000000000..5ea8f0b8de
--- /dev/null
+++ b/js/src/jit/mips-shared/MoveEmitter-mips-shared.cpp
@@ -0,0 +1,207 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/mips-shared/MoveEmitter-mips-shared.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void MoveEmitterMIPSShared::emit(const MoveResolver& moves) {
+ if (moves.numCycles()) {
+ // Reserve stack for cycle resolution
+ static_assert(SpillSlotSize == 8);
+ masm.reserveStack(moves.numCycles() * SpillSlotSize);
+ pushedAtCycle_ = masm.framePushed();
+ }
+
+ for (size_t i = 0; i < moves.numMoves(); i++) {
+ emit(moves.getMove(i));
+ }
+}
+
+Address MoveEmitterMIPSShared::cycleSlot(uint32_t slot,
+ uint32_t subslot) const {
+ int32_t offset = masm.framePushed() - pushedAtCycle_;
+ MOZ_ASSERT(Imm16::IsInSignedRange(offset));
+ return Address(StackPointer, offset + slot * sizeof(double) + subslot);
+}
+
+int32_t MoveEmitterMIPSShared::getAdjustedOffset(const MoveOperand& operand) {
+ MOZ_ASSERT(operand.isMemoryOrEffectiveAddress());
+ if (operand.base() != StackPointer) {
+ return operand.disp();
+ }
+
+ // Adjust offset if stack pointer has been moved.
+ return operand.disp() + masm.framePushed() - pushedAtStart_;
+}
+
+Address MoveEmitterMIPSShared::getAdjustedAddress(const MoveOperand& operand) {
+ return Address(operand.base(), getAdjustedOffset(operand));
+}
+
+Register MoveEmitterMIPSShared::tempReg() {
+ spilledReg_ = SecondScratchReg;
+ return SecondScratchReg;
+}
+
+void MoveEmitterMIPSShared::emitMove(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isGeneralReg()) {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(from.reg() != spilledReg_);
+
+ if (to.isGeneralReg()) {
+ masm.movePtr(from.reg(), to.reg());
+ } else if (to.isMemory()) {
+ masm.storePtr(from.reg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else if (from.isMemory()) {
+ if (to.isGeneralReg()) {
+ masm.loadPtr(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ masm.loadPtr(getAdjustedAddress(from), tempReg());
+ masm.storePtr(tempReg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else if (from.isEffectiveAddress()) {
+ if (to.isGeneralReg()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), tempReg());
+ masm.storePtr(tempReg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+}
+
+void MoveEmitterMIPSShared::emitInt32Move(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isGeneralReg()) {
+ // Second scratch register should not be moved by MoveEmitter.
+ MOZ_ASSERT(from.reg() != spilledReg_);
+
+ if (to.isGeneralReg()) {
+ masm.move32(from.reg(), to.reg());
+ } else if (to.isMemory()) {
+ masm.store32(from.reg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else if (from.isMemory()) {
+ if (to.isGeneralReg()) {
+ masm.load32(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ masm.load32(getAdjustedAddress(from), tempReg());
+ masm.store32(tempReg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else if (from.isEffectiveAddress()) {
+ if (to.isGeneralReg()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), tempReg());
+ masm.store32(tempReg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+}
+
+void MoveEmitterMIPSShared::emitFloat32Move(const MoveOperand& from,
+ const MoveOperand& to) {
+ // Ensure that we can use ScratchFloat32Reg in memory move.
+ MOZ_ASSERT_IF(from.isFloatReg(), from.floatReg() != ScratchFloat32Reg);
+ MOZ_ASSERT_IF(to.isFloatReg(), to.floatReg() != ScratchFloat32Reg);
+
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.moveFloat32(from.floatReg(), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ // This should only be used when passing float parameter in a1,a2,a3
+ MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
+ masm.moveFromFloat32(from.floatReg(), to.reg());
+ } else {
+ MOZ_ASSERT(to.isMemory());
+ masm.storeFloat32(from.floatReg(), getAdjustedAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ MOZ_ASSERT(from.isMemory());
+ masm.loadFloat32(getAdjustedAddress(from), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ MOZ_ASSERT(from.isMemory());
+ // This should only be used when passing float parameter in a1,a2,a3
+ MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
+ masm.loadPtr(getAdjustedAddress(from), to.reg());
+ } else {
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.isMemory());
+ masm.loadFloat32(getAdjustedAddress(from), ScratchFloat32Reg);
+ masm.storeFloat32(ScratchFloat32Reg, getAdjustedAddress(to));
+ }
+}
+
+void MoveEmitterMIPSShared::emit(const MoveOp& move) {
+ const MoveOperand& from = move.from();
+ const MoveOperand& to = move.to();
+
+ if (move.isCycleEnd() && move.isCycleBegin()) {
+ // A fun consequence of aliased registers is you can have multiple
+ // cycles at once, and one can end exactly where another begins.
+ breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
+ completeCycle(from, to, move.type(), move.cycleEndSlot());
+ return;
+ }
+
+ if (move.isCycleEnd()) {
+ MOZ_ASSERT(inCycle_);
+ completeCycle(from, to, move.type(), move.cycleEndSlot());
+ MOZ_ASSERT(inCycle_ > 0);
+ inCycle_--;
+ return;
+ }
+
+ if (move.isCycleBegin()) {
+ breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
+ inCycle_++;
+ }
+
+ switch (move.type()) {
+ case MoveOp::FLOAT32:
+ emitFloat32Move(from, to);
+ break;
+ case MoveOp::DOUBLE:
+ emitDoubleMove(from, to);
+ break;
+ case MoveOp::INT32:
+ emitInt32Move(from, to);
+ break;
+ case MoveOp::GENERAL:
+ emitMove(from, to);
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterMIPSShared::assertDone() { MOZ_ASSERT(inCycle_ == 0); }
+
+void MoveEmitterMIPSShared::finish() {
+ assertDone();
+
+ masm.freeStack(masm.framePushed() - pushedAtStart_);
+}
diff --git a/js/src/jit/mips-shared/MoveEmitter-mips-shared.h b/js/src/jit/mips-shared/MoveEmitter-mips-shared.h
new file mode 100644
index 0000000000..81dbaddc45
--- /dev/null
+++ b/js/src/jit/mips-shared/MoveEmitter-mips-shared.h
@@ -0,0 +1,73 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_MoveEmitter_mips_shared_h
+#define jit_mips_shared_MoveEmitter_mips_shared_h
+
+#include "jit/MacroAssembler.h"
+#include "jit/MoveResolver.h"
+
+namespace js {
+namespace jit {
+
+class MoveEmitterMIPSShared {
+ protected:
+ uint32_t inCycle_;
+ MacroAssembler& masm;
+
+ // Original stack push value.
+ uint32_t pushedAtStart_;
+
+ // These store stack offsets to spill locations, snapshotting
+ // codegen->framePushed_ at the time they were allocated. They are -1 if no
+ // stack space has been allocated for that particular spill.
+ int32_t pushedAtCycle_;
+ int32_t pushedAtSpill_;
+
+ // These are registers that are available for temporary use. They may be
+ // assigned InvalidReg. If no corresponding spill space has been assigned,
+ // then these registers do not need to be spilled.
+ Register spilledReg_;
+ FloatRegister spilledFloatReg_;
+
+ void assertDone();
+ Register tempReg();
+ FloatRegister tempFloatReg();
+ Address cycleSlot(uint32_t slot, uint32_t subslot = 0) const;
+ int32_t getAdjustedOffset(const MoveOperand& operand);
+ Address getAdjustedAddress(const MoveOperand& operand);
+
+ void emitMove(const MoveOperand& from, const MoveOperand& to);
+ void emitInt32Move(const MoveOperand& from, const MoveOperand& to);
+ void emitFloat32Move(const MoveOperand& from, const MoveOperand& to);
+ virtual void emitDoubleMove(const MoveOperand& from,
+ const MoveOperand& to) = 0;
+ virtual void breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot) = 0;
+ virtual void completeCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot) = 0;
+ void emit(const MoveOp& move);
+
+ public:
+ MoveEmitterMIPSShared(MacroAssembler& masm)
+ : inCycle_(0),
+ masm(masm),
+ pushedAtStart_(masm.framePushed()),
+ pushedAtCycle_(-1),
+ pushedAtSpill_(-1),
+ spilledReg_(InvalidReg),
+ spilledFloatReg_(InvalidFloatReg) {}
+ ~MoveEmitterMIPSShared() { assertDone(); }
+ void emit(const MoveResolver& moves);
+ void finish();
+
+ void setScratchRegister(Register reg) {}
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_MoveEmitter_mips_shared_h */
diff --git a/js/src/jit/mips-shared/SharedICHelpers-mips-shared-inl.h b/js/src/jit/mips-shared/SharedICHelpers-mips-shared-inl.h
new file mode 100644
index 0000000000..8ae49aef5e
--- /dev/null
+++ b/js/src/jit/mips-shared/SharedICHelpers-mips-shared-inl.h
@@ -0,0 +1,84 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_SharedICHelpers_mips_shared_inl_h
+#define jit_mips_shared_SharedICHelpers_mips_shared_inl_h
+
+#include "jit/BaselineFrame.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+namespace js {
+namespace jit {
+
+inline void EmitBaselineTailCallVM(TrampolinePtr target, MacroAssembler& masm,
+ uint32_t argSize) {
+#ifdef DEBUG
+ Register scratch = R2.scratchReg();
+
+ // Compute frame size.
+ masm.movePtr(FramePointer, scratch);
+ masm.subPtr(StackPointer, scratch);
+
+ // Store frame size without VMFunction arguments for debug assertions.
+ masm.subPtr(Imm32(argSize), scratch);
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(scratch, frameSizeAddr);
+ masm.addPtr(Imm32(argSize), scratch);
+#endif
+
+ // Push frame descriptor and perform the tail call.
+ masm.pushFrameDescriptor(FrameType::BaselineJS);
+
+ // The return address will be pushed by the VM wrapper, for compatibility
+ // with direct calls. Refer to the top of generateVMWrapper().
+ // ICTailCallReg (lr) already contains the return address (as we keep
+ // it there through the stub calls).
+ MOZ_ASSERT(ICTailCallReg == ra);
+
+ masm.jump(target);
+}
+
+inline void EmitBaselineCallVM(TrampolinePtr target, MacroAssembler& masm) {
+ masm.pushFrameDescriptor(FrameType::BaselineStub);
+ masm.call(target);
+}
+
+inline void EmitBaselineEnterStubFrame(MacroAssembler& masm, Register scratch) {
+ MOZ_ASSERT(scratch != ICTailCallReg);
+
+#ifdef DEBUG
+ // Compute frame size.
+ masm.movePtr(FramePointer, scratch);
+ masm.subPtr(StackPointer, scratch);
+
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(scratch, frameSizeAddr);
+#endif
+
+ // Note: when making changes here, don't forget to update
+ // BaselineStubFrame if needed.
+
+ // Push frame descriptor and return address.
+ masm.PushFrameDescriptor(FrameType::BaselineJS);
+ masm.Push(ICTailCallReg);
+
+ // Save old frame pointer, stack pointer and stub reg.
+ masm.Push(FramePointer);
+ masm.movePtr(StackPointer, FramePointer);
+ masm.Push(ICStubReg);
+
+ // Stack should remain aligned.
+ masm.assertStackAlignment(sizeof(Value), 0);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_SharedICHelpers_mips_shared_inl_h */
diff --git a/js/src/jit/mips-shared/SharedICHelpers-mips-shared.h b/js/src/jit/mips-shared/SharedICHelpers-mips-shared.h
new file mode 100644
index 0000000000..979e4b0a42
--- /dev/null
+++ b/js/src/jit/mips-shared/SharedICHelpers-mips-shared.h
@@ -0,0 +1,88 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_mips_shared_SharedICHelpers_mips_shared_h
+#define jit_mips_shared_SharedICHelpers_mips_shared_h
+
+#include "jit/BaselineIC.h"
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICRegisters.h"
+
+namespace js {
+namespace jit {
+
+// Distance from sp to the top Value inside an IC stub (no return address on
+// the stack on MIPS).
+static const size_t ICStackValueOffset = 0;
+
+struct BaselineStubFrame {
+ uintptr_t savedFrame;
+ uintptr_t savedStub;
+ uintptr_t returnAddress;
+ uintptr_t descriptor;
+};
+
+inline void EmitRestoreTailCallReg(MacroAssembler& masm) {
+ // No-op on MIPS because ra register is always holding the return address.
+}
+
+inline void EmitRepushTailCallReg(MacroAssembler& masm) {
+ // No-op on MIPS because ra register is always holding the return address.
+}
+
+inline void EmitCallIC(MacroAssembler& masm, CodeOffset* callOffset) {
+ // The stub pointer must already be in ICStubReg.
+ // Load stubcode pointer from the ICStub.
+ // R2 won't be active when we call ICs, so we can use it as scratch.
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+ // Call the stubcode via a direct jump-and-link
+ masm.call(R2.scratchReg());
+ *callOffset = CodeOffset(masm.currentOffset());
+}
+
+inline void EmitReturnFromIC(MacroAssembler& masm) { masm.branch(ra); }
+
+inline void EmitBaselineLeaveStubFrame(MacroAssembler& masm) {
+ masm.loadPtr(
+ Address(FramePointer, BaselineStubFrameLayout::ICStubOffsetFromFP),
+ ICStubReg);
+ masm.movePtr(FramePointer, StackPointer);
+ masm.Pop(FramePointer);
+
+ // Load the return address.
+ masm.Pop(ICTailCallReg);
+
+ // Discard the frame descriptor.
+ {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.Pop(scratch2);
+ }
+}
+
+template <typename AddrType>
+inline void EmitPreBarrier(MacroAssembler& masm, const AddrType& addr,
+ MIRType type) {
+ // On MIPS, $ra is clobbered by guardedCallPreBarrier. Save it first.
+ masm.push(ra);
+ masm.guardedCallPreBarrier(addr, type);
+ masm.pop(ra);
+}
+
+inline void EmitStubGuardFailure(MacroAssembler& masm) {
+ // Load next stub into ICStubReg
+ masm.loadPtr(Address(ICStubReg, ICCacheIRStub::offsetOfNext()), ICStubReg);
+
+ // Return address is already loaded, just jump to the next stubcode.
+ MOZ_ASSERT(ICTailCallReg == ra);
+ masm.jump(Address(ICStubReg, ICStub::offsetOfStubCode()));
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_mips_shared_SharedICHelpers_mips_shared_h */