summaryrefslogtreecommitdiffstats
path: root/js/src/jit/loong64
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /js/src/jit/loong64
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'js/src/jit/loong64')
-rw-r--r--js/src/jit/loong64/Architecture-loong64.cpp87
-rw-r--r--js/src/jit/loong64/Architecture-loong64.h522
-rw-r--r--js/src/jit/loong64/Assembler-loong64.cpp2478
-rw-r--r--js/src/jit/loong64/Assembler-loong64.h1889
-rw-r--r--js/src/jit/loong64/CodeGenerator-loong64.cpp2796
-rw-r--r--js/src/jit/loong64/CodeGenerator-loong64.h209
-rw-r--r--js/src/jit/loong64/LIR-loong64.h408
-rw-r--r--js/src/jit/loong64/Lowering-loong64.cpp1100
-rw-r--r--js/src/jit/loong64/Lowering-loong64.h110
-rw-r--r--js/src/jit/loong64/MacroAssembler-loong64-inl.h2154
-rw-r--r--js/src/jit/loong64/MacroAssembler-loong64.cpp5555
-rw-r--r--js/src/jit/loong64/MacroAssembler-loong64.h1058
-rw-r--r--js/src/jit/loong64/MoveEmitter-loong64.cpp326
-rw-r--r--js/src/jit/loong64/MoveEmitter-loong64.h76
-rw-r--r--js/src/jit/loong64/SharedICHelpers-loong64-inl.h84
-rw-r--r--js/src/jit/loong64/SharedICHelpers-loong64.h89
-rw-r--r--js/src/jit/loong64/SharedICRegisters-loong64.h42
-rw-r--r--js/src/jit/loong64/Simulator-loong64.cpp4721
-rw-r--r--js/src/jit/loong64/Simulator-loong64.h650
-rw-r--r--js/src/jit/loong64/Trampoline-loong64.cpp760
20 files changed, 25114 insertions, 0 deletions
diff --git a/js/src/jit/loong64/Architecture-loong64.cpp b/js/src/jit/loong64/Architecture-loong64.cpp
new file mode 100644
index 0000000000..6b1069a592
--- /dev/null
+++ b/js/src/jit/loong64/Architecture-loong64.cpp
@@ -0,0 +1,87 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/loong64/Architecture-loong64.h"
+
+#include "jit/FlushICache.h" // js::jit::FlushICache
+#include "jit/loong64/Simulator-loong64.h"
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+Registers::Code Registers::FromName(const char* name) {
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(i), name) == 0) {
+ return Code(i);
+ }
+ }
+
+ return Invalid;
+}
+
+FloatRegisters::Code FloatRegisters::FromName(const char* name) {
+ for (size_t i = 0; i < Total; i++) {
+ if (strcmp(GetName(i), name) == 0) {
+ return Code(i);
+ }
+ }
+
+ return Invalid;
+}
+
+FloatRegisterSet FloatRegister::ReduceSetForPush(const FloatRegisterSet& s) {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ LiveFloatRegisterSet ret;
+ for (FloatRegisterIterator iter(s); iter.more(); ++iter) {
+ ret.addUnchecked(FromCode((*iter).encoding()));
+ }
+ return ret.set();
+}
+
+uint32_t FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s) {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ return s.size() * sizeof(double);
+}
+
+uint32_t FloatRegister::getRegisterDumpOffsetInBytes() {
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ return encoding() * sizeof(double);
+}
+
+bool CPUFlagsHaveBeenComputed() {
+ // TODO(loong64): Add CPU flags support.
+ return true;
+}
+
+uint32_t GetLOONG64Flags() { return 0; }
+
+void FlushICache(void* code, size_t size) {
+#if defined(JS_SIMULATOR)
+ js::jit::SimulatorProcess::FlushICache(code, size);
+
+#elif defined(__GNUC__)
+ intptr_t end = reinterpret_cast<intptr_t>(code) + size;
+ __builtin___clear_cache(reinterpret_cast<char*>(code),
+ reinterpret_cast<char*>(end));
+
+#else
+ _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
+
+#endif
+}
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/loong64/Architecture-loong64.h b/js/src/jit/loong64/Architecture-loong64.h
new file mode 100644
index 0000000000..48745ee37a
--- /dev/null
+++ b/js/src/jit/loong64/Architecture-loong64.h
@@ -0,0 +1,522 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_Architecture_loong64_h
+#define jit_loong64_Architecture_loong64_h
+
+#include "mozilla/MathAlgorithms.h"
+
+#include <algorithm>
+#include <iterator>
+
+#include "jit/shared/Architecture-shared.h"
+
+#include "js/Utility.h"
+
+namespace js {
+namespace jit {
+
+// LoongArch64 has 32 64-bit integer registers, r0 though r31.
+// The program counter is not accessible as a register.
+//
+// SIMD and scalar floating-point registers share a register bank.
+// Floating-point registers are f0 through f31.
+// 128 bit SIMD registers are vr0 through vr31.
+// e.g., f0 is the bottom 64 bits of vr0.
+
+// LoongArch64 INT Register Convention:
+// Name Alias Usage
+// $r0 $zero Constant zero
+// $r1 $ra Return address
+// $r2 $tp TLS
+// $r3 $sp Stack pointer
+// $r4-$r11 $a0-$a7 Argument registers
+// $r4-$r5 $v0-$v1 Return values
+// $r12-$r20 $t0-$t8 Temporary registers
+// $r21 $x Reserved
+// $r22 $fp Frame pointer
+// $r23-$r31 $s0-$s8 Callee-saved registers
+
+// LoongArch64 FP Register Convention:
+// Name Alias Usage
+// $f0-$f7 $fa0-$fa7 Argument registers
+// $f0-$f1 $fv0-$fv1 Return values
+// $f8-f23 $ft0-$ft15 Temporary registers
+// $f24-$f31 $fs0-$fs7 Callee-saved registers
+
+class Registers {
+ public:
+ enum RegisterID {
+ r0 = 0,
+ r1,
+ r2,
+ r3,
+ r4,
+ r5,
+ r6,
+ r7,
+ r8,
+ r9,
+ r10,
+ r11,
+ r12,
+ r13,
+ r14,
+ r15,
+ r16,
+ r17,
+ r18,
+ r19,
+ r20,
+ r21,
+ r22,
+ r23,
+ r24,
+ r25,
+ r26,
+ r27,
+ r28,
+ r29,
+ r30,
+ r31,
+ zero = r0,
+ ra = r1,
+ tp = r2,
+ sp = r3,
+ a0 = r4,
+ a1 = r5,
+ a2 = r6,
+ a3 = r7,
+ a4 = r8,
+ a5 = r9,
+ a6 = r10,
+ a7 = r11,
+ t0 = r12,
+ t1 = r13,
+ t2 = r14,
+ t3 = r15,
+ t4 = r16,
+ t5 = r17,
+ t6 = r18,
+ t7 = r19,
+ t8 = r20,
+ rx = r21,
+ fp = r22,
+ s0 = r23,
+ s1 = r24,
+ s2 = r25,
+ s3 = r26,
+ s4 = r27,
+ s5 = r28,
+ s6 = r29,
+ s7 = r30,
+ s8 = r31,
+ invalid_reg,
+ };
+ typedef uint8_t Code;
+ typedef RegisterID Encoding;
+ typedef uint32_t SetType;
+
+ static const Encoding StackPointer = sp;
+ static const Encoding Invalid = invalid_reg;
+
+ // Content spilled during bailouts.
+ union RegisterContent {
+ uintptr_t r;
+ };
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 4, "SetType must be 32 bits");
+ return mozilla::CountPopulation32(x);
+ }
+ static uint32_t FirstBit(SetType x) {
+ return mozilla::CountTrailingZeroes32(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ return 31 - mozilla::CountLeadingZeroes32(x);
+ }
+
+ static const char* GetName(uint32_t code) {
+ static const char* const Names[] = {
+ "zero", "ra", "tp", "sp", "a0", "a1", "a2", "a3", "a4", "a5", "a6",
+ "a7", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "rx",
+ "fp", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8"};
+ static_assert(Total == std::size(Names), "Table is the correct size");
+ if (code >= Total) {
+ return "invalid";
+ }
+ return Names[code];
+ }
+
+ static Code FromName(const char* name);
+
+ static const uint32_t Total = 32;
+ static const uint32_t TotalPhys = 32;
+ static const uint32_t Allocatable =
+ 23; // No named special-function registers.
+
+ static const SetType AllMask = 0xFFFFFFFF;
+ static const SetType NoneMask = 0x0;
+
+ static const SetType ArgRegMask =
+ (1 << Registers::a0) | (1 << Registers::a1) | (1 << Registers::a2) |
+ (1 << Registers::a3) | (1 << Registers::a4) | (1 << Registers::a5) |
+ (1 << Registers::a6) | (1 << Registers::a7);
+
+ static const SetType VolatileMask =
+ (1 << Registers::a0) | (1 << Registers::a1) | (1 << Registers::a2) |
+ (1 << Registers::a3) | (1 << Registers::a4) | (1 << Registers::a5) |
+ (1 << Registers::a6) | (1 << Registers::a7) | (1 << Registers::t0) |
+ (1 << Registers::t1) | (1 << Registers::t2) | (1 << Registers::t3) |
+ (1 << Registers::t4) | (1 << Registers::t5) | (1 << Registers::t6);
+
+ // We use this constant to save registers when entering functions. This
+ // is why $ra is added here even though it is not "Non Volatile".
+ static const SetType NonVolatileMask =
+ (1 << Registers::ra) | (1 << Registers::fp) | (1 << Registers::s0) |
+ (1 << Registers::s1) | (1 << Registers::s2) | (1 << Registers::s3) |
+ (1 << Registers::s4) | (1 << Registers::s5) | (1 << Registers::s6) |
+ (1 << Registers::s7) | (1 << Registers::s8);
+
+ static const SetType NonAllocatableMask =
+ (1 << Registers::zero) | // Always be zero.
+ (1 << Registers::t7) | // First scratch register.
+ (1 << Registers::t8) | // Second scratch register.
+ (1 << Registers::rx) | // Reserved Register.
+ (1 << Registers::ra) | (1 << Registers::tp) | (1 << Registers::sp) |
+ (1 << Registers::fp);
+
+ static const SetType WrapperMask = VolatileMask;
+
+ // Registers returned from a JS -> JS call.
+ static const SetType JSCallMask = (1 << Registers::a2);
+
+ // Registers returned from a JS -> C call.
+ static const SetType CallMask = (1 << Registers::a0);
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+};
+
+// Smallest integer type that can hold a register bitmask.
+typedef uint32_t PackedRegisterMask;
+
+template <typename T>
+class TypedRegisterSet;
+
+class FloatRegisters {
+ public:
+ enum FPRegisterID {
+ f0 = 0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23, // Scratch register.
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
+ };
+
+ // Eight bits: (invalid << 7) | (kind << 5) | encoding
+ typedef uint8_t Code;
+ typedef FPRegisterID Encoding;
+ typedef uint64_t SetType;
+
+ enum Kind : uint8_t { Double, Single, NumTypes };
+
+ static constexpr Code Invalid = 0x80;
+
+ static const char* GetName(uint32_t code) {
+ static const char* const Names[] = {
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"};
+ static_assert(TotalPhys == std::size(Names), "Table is the correct size");
+ if (code >= Total) {
+ return "invalid";
+ }
+ return Names[code];
+ }
+
+ static Code FromName(const char* name);
+
+ static const uint32_t TotalPhys = 32;
+ static const uint32_t Total = TotalPhys * NumTypes;
+ static const uint32_t Allocatable = 31; // Without f23, the scratch register.
+
+ static_assert(sizeof(SetType) * 8 >= Total,
+ "SetType should be large enough to enumerate all registers.");
+
+ // Magic values which are used to duplicate a mask of physical register for
+ // a specific type of register. A multiplication is used to copy and shift
+ // the bits of the physical register mask.
+ static const SetType SpreadSingle = SetType(1)
+ << (uint32_t(Single) * TotalPhys);
+ static const SetType SpreadDouble = SetType(1)
+ << (uint32_t(Double) * TotalPhys);
+ static const SetType Spread = SpreadSingle | SpreadDouble;
+
+ static const SetType AllPhysMask = ((SetType(1) << TotalPhys) - 1);
+ static const SetType AllMask = AllPhysMask * Spread;
+ static const SetType AllSingleMask = AllPhysMask * SpreadSingle;
+ static const SetType AllDoubleMask = AllPhysMask * SpreadDouble;
+ static const SetType NoneMask = SetType(0);
+
+ // TODO(loong64): Much less than ARM64 here.
+ static const SetType NonVolatileMask =
+ SetType((1 << FloatRegisters::f24) | (1 << FloatRegisters::f25) |
+ (1 << FloatRegisters::f26) | (1 << FloatRegisters::f27) |
+ (1 << FloatRegisters::f28) | (1 << FloatRegisters::f29) |
+ (1 << FloatRegisters::f30) | (1 << FloatRegisters::f31)) *
+ Spread;
+
+ static const SetType VolatileMask = AllMask & ~NonVolatileMask;
+
+ static const SetType WrapperMask = VolatileMask;
+
+ // f23 is the scratch register.
+ static const SetType NonAllocatableMask =
+ (SetType(1) << FloatRegisters::f23) * Spread;
+
+ static const SetType AllocatableMask = AllMask & ~NonAllocatableMask;
+
+ // Content spilled during bailouts.
+ union RegisterContent {
+ float s;
+ double d;
+ };
+
+ static constexpr Encoding encoding(Code c) {
+ // assert() not available in constexpr function.
+ // assert(c < Total);
+ return Encoding(c & 31);
+ }
+
+ static constexpr Kind kind(Code c) {
+ // assert() not available in constexpr function.
+ // assert(c < Total && ((c >> 5) & 3) < NumTypes);
+ return Kind((c >> 5) & 3);
+ }
+
+ static constexpr Code fromParts(uint32_t encoding, uint32_t kind,
+ uint32_t invalid) {
+ return Code((invalid << 7) | (kind << 5) | encoding);
+ }
+};
+
+static const uint32_t SpillSlotSize =
+ std::max(sizeof(Registers::RegisterContent),
+ sizeof(FloatRegisters::RegisterContent));
+
+static const uint32_t ShadowStackSpace = 0;
+static const uint32_t SizeOfReturnAddressAfterCall = 0;
+
+// When our only strategy for far jumps is to encode the offset directly, and
+// not insert any jump islands during assembly for even further jumps, then the
+// architecture restricts us to -2^27 .. 2^27-4, to fit into a signed 28-bit
+// value. We further reduce this range to allow the far-jump inserting code to
+// have some breathing room.
+static const uint32_t JumpImmediateRange = ((1 << 27) - (20 * 1024 * 1024));
+
+struct FloatRegister {
+ typedef FloatRegisters Codes;
+ typedef size_t Code;
+ typedef Codes::Encoding Encoding;
+ typedef Codes::SetType SetType;
+
+ static uint32_t SetSize(SetType x) {
+ static_assert(sizeof(SetType) == 8, "SetType must be 64 bits");
+ x |= x >> FloatRegisters::TotalPhys;
+ x &= FloatRegisters::AllPhysMask;
+ return mozilla::CountPopulation32(x);
+ }
+
+ static uint32_t FirstBit(SetType x) {
+ static_assert(sizeof(SetType) == 8, "SetType");
+ return mozilla::CountTrailingZeroes64(x);
+ }
+ static uint32_t LastBit(SetType x) {
+ static_assert(sizeof(SetType) == 8, "SetType");
+ return 63 - mozilla::CountLeadingZeroes64(x);
+ }
+
+ private:
+ // These fields only hold valid values: an invalid register is always
+ // represented as a valid encoding and kind with the invalid_ bit set.
+ uint8_t encoding_; // 32 encodings
+ uint8_t kind_; // Double, Single; more later
+ bool invalid_;
+
+ typedef Codes::Kind Kind;
+
+ public:
+ constexpr FloatRegister(Encoding encoding, Kind kind)
+ : encoding_(encoding), kind_(kind), invalid_(false) {
+ // assert(uint32_t(encoding) < Codes::TotalPhys);
+ }
+
+ constexpr FloatRegister()
+ : encoding_(0), kind_(FloatRegisters::Double), invalid_(true) {}
+
+ static FloatRegister FromCode(uint32_t i) {
+ MOZ_ASSERT(i < Codes::Total);
+ return FloatRegister(FloatRegisters::encoding(i), FloatRegisters::kind(i));
+ }
+
+ bool isSingle() const {
+ MOZ_ASSERT(!invalid_);
+ return kind_ == FloatRegisters::Single;
+ }
+ bool isDouble() const {
+ MOZ_ASSERT(!invalid_);
+ return kind_ == FloatRegisters::Double;
+ }
+ bool isSimd128() const {
+ MOZ_ASSERT(!invalid_);
+ return false;
+ }
+ bool isInvalid() const { return invalid_; }
+
+ FloatRegister asSingle() const {
+ MOZ_ASSERT(!invalid_);
+ return FloatRegister(Encoding(encoding_), FloatRegisters::Single);
+ }
+ FloatRegister asDouble() const {
+ MOZ_ASSERT(!invalid_);
+ return FloatRegister(Encoding(encoding_), FloatRegisters::Double);
+ }
+ FloatRegister asSimd128() const { MOZ_CRASH(); }
+
+ constexpr uint32_t size() const {
+ MOZ_ASSERT(!invalid_);
+ if (kind_ == FloatRegisters::Double) {
+ return sizeof(double);
+ }
+ MOZ_ASSERT(kind_ == FloatRegisters::Single);
+ return sizeof(float);
+ }
+
+ constexpr Code code() const {
+ // assert(!invalid_);
+ return Codes::fromParts(encoding_, kind_, invalid_);
+ }
+
+ constexpr Encoding encoding() const {
+ MOZ_ASSERT(!invalid_);
+ return Encoding(encoding_);
+ }
+
+ const char* name() const { return FloatRegisters::GetName(code()); }
+ bool volatile_() const {
+ MOZ_ASSERT(!invalid_);
+ return !!((SetType(1) << code()) & FloatRegisters::VolatileMask);
+ }
+ constexpr bool operator!=(FloatRegister other) const {
+ return code() != other.code();
+ }
+ constexpr bool operator==(FloatRegister other) const {
+ return code() == other.code();
+ }
+
+ bool aliases(FloatRegister other) const {
+ return other.encoding_ == encoding_;
+ }
+ // Ensure that two floating point registers' types are equivalent.
+ bool equiv(FloatRegister other) const {
+ MOZ_ASSERT(!invalid_);
+ return kind_ == other.kind_;
+ }
+
+ uint32_t numAliased() const { return Codes::NumTypes; }
+ uint32_t numAlignedAliased() { return numAliased(); }
+
+ FloatRegister aliased(uint32_t aliasIdx) {
+ MOZ_ASSERT(!invalid_);
+ MOZ_ASSERT(aliasIdx < numAliased());
+ return FloatRegister(Encoding(encoding_),
+ Kind((aliasIdx + kind_) % numAliased()));
+ }
+ FloatRegister alignedAliased(uint32_t aliasIdx) {
+ MOZ_ASSERT(aliasIdx < numAliased());
+ return aliased(aliasIdx);
+ }
+ SetType alignedOrDominatedAliasedSet() const {
+ return Codes::Spread << encoding_;
+ }
+
+ static constexpr RegTypeName DefaultType = RegTypeName::Float64;
+
+ template <RegTypeName Name = DefaultType>
+ static SetType LiveAsIndexableSet(SetType s) {
+ return SetType(0);
+ }
+
+ template <RegTypeName Name = DefaultType>
+ static SetType AllocatableAsIndexableSet(SetType s) {
+ static_assert(Name != RegTypeName::Any, "Allocatable set are not iterable");
+ return LiveAsIndexableSet<Name>(s);
+ }
+
+ static TypedRegisterSet<FloatRegister> ReduceSetForPush(
+ const TypedRegisterSet<FloatRegister>& s);
+ static uint32_t GetPushSizeInBytes(const TypedRegisterSet<FloatRegister>& s);
+ uint32_t getRegisterDumpOffsetInBytes();
+};
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Float32>(SetType set) {
+ return set & FloatRegisters::AllSingleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Float64>(SetType set) {
+ return set & FloatRegisters::AllDoubleMask;
+}
+
+template <>
+inline FloatRegister::SetType
+FloatRegister::LiveAsIndexableSet<RegTypeName::Any>(SetType set) {
+ return set;
+}
+
+// LoongArch doesn't have double registers that cannot be treated as float32.
+inline bool hasUnaliasedDouble() { return false; }
+
+// LoongArch doesn't have double registers that alias multiple floats.
+inline bool hasMultiAlias() { return false; }
+
+uint32_t GetLOONG64Flags();
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_Architecture_loong64_h */
diff --git a/js/src/jit/loong64/Assembler-loong64.cpp b/js/src/jit/loong64/Assembler-loong64.cpp
new file mode 100644
index 0000000000..6c7a5f53da
--- /dev/null
+++ b/js/src/jit/loong64/Assembler-loong64.cpp
@@ -0,0 +1,2478 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/loong64/Assembler-loong64.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Maybe.h"
+
+#include "gc/Marking.h"
+#include "jit/AutoWritableJitCode.h"
+#include "jit/ExecutableAllocator.h"
+#include "vm/Realm.h"
+
+using mozilla::DebugOnly;
+
+using namespace js;
+using namespace js::jit;
+
+// Note this is used for inter-wasm calls and may pass arguments and results
+// in floating point registers even if the system ABI does not.
+
+// TODO(loong64): Inconsistent with LoongArch's calling convention.
+// LoongArch floating-point parameters calling convention:
+// The first eight floating-point parameters should be passed in f0-f7, and
+// the other floating point parameters will be passed like integer parameters.
+// But we just pass the other floating-point parameters on stack here.
+ABIArg ABIArgGenerator::next(MIRType type) {
+ switch (type) {
+ case MIRType::Int32:
+ case MIRType::Int64:
+ case MIRType::Pointer:
+ case MIRType::WasmAnyRef:
+ case MIRType::StackResults: {
+ if (intRegIndex_ == NumIntArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(uintptr_t);
+ break;
+ }
+ current_ = ABIArg(Register::FromCode(intRegIndex_ + a0.encoding()));
+ intRegIndex_++;
+ break;
+ }
+ case MIRType::Float32:
+ case MIRType::Double: {
+ if (floatRegIndex_ == NumFloatArgRegs) {
+ current_ = ABIArg(stackOffset_);
+ stackOffset_ += sizeof(double);
+ break;
+ }
+ current_ = ABIArg(FloatRegister(
+ FloatRegisters::Encoding(floatRegIndex_ + f0.encoding()),
+ type == MIRType::Double ? FloatRegisters::Double
+ : FloatRegisters::Single));
+ floatRegIndex_++;
+ break;
+ }
+ case MIRType::Simd128: {
+ MOZ_CRASH("LoongArch does not support simd yet.");
+ break;
+ }
+ default:
+ MOZ_CRASH("Unexpected argument type");
+ }
+ return current_;
+}
+
+// Encode a standard register when it is being used as rd, the rj, and
+// an extra register(rk). These should never be called with an InvalidReg.
+uint32_t js::jit::RJ(Register r) {
+ MOZ_ASSERT(r != InvalidReg);
+ return r.encoding() << RJShift;
+}
+
+uint32_t js::jit::RK(Register r) {
+ MOZ_ASSERT(r != InvalidReg);
+ return r.encoding() << RKShift;
+}
+
+uint32_t js::jit::RD(Register r) {
+ MOZ_ASSERT(r != InvalidReg);
+ return r.encoding() << RDShift;
+}
+
+uint32_t js::jit::FJ(FloatRegister r) { return r.encoding() << RJShift; }
+
+uint32_t js::jit::FK(FloatRegister r) { return r.encoding() << RKShift; }
+
+uint32_t js::jit::FD(FloatRegister r) { return r.encoding() << RDShift; }
+
+uint32_t js::jit::FA(FloatRegister r) { return r.encoding() << FAShift; }
+
+uint32_t js::jit::SA2(uint32_t value) {
+ MOZ_ASSERT(value < 4);
+ return (value & SA2Mask) << SAShift;
+}
+
+uint32_t js::jit::SA3(uint32_t value) {
+ MOZ_ASSERT(value < 8);
+ return (value & SA3Mask) << SAShift;
+}
+
+Register js::jit::toRK(Instruction& i) {
+ return Register::FromCode((i.encode() & RKMask) >> RKShift);
+}
+
+Register js::jit::toRJ(Instruction& i) {
+ return Register::FromCode((i.encode() & RJMask) >> RJShift);
+}
+
+Register js::jit::toRD(Instruction& i) {
+ return Register::FromCode((i.encode() & RDMask) >> RDShift);
+}
+
+Register js::jit::toR(Instruction& i) {
+ return Register::FromCode(i.encode() & RegMask);
+}
+
+void InstImm::extractImm16(BOffImm16* dest) { *dest = BOffImm16(*this); }
+
+void AssemblerLOONG64::finish() {
+ MOZ_ASSERT(!isFinished);
+ isFinished = true;
+}
+
+bool AssemblerLOONG64::appendRawCode(const uint8_t* code, size_t numBytes) {
+ return m_buffer.appendRawCode(code, numBytes);
+}
+
+bool AssemblerLOONG64::reserve(size_t size) {
+ // This buffer uses fixed-size chunks so there's no point in reserving
+ // now vs. on-demand.
+ return !oom();
+}
+
+bool AssemblerLOONG64::swapBuffer(wasm::Bytes& bytes) {
+ // For now, specialize to the one use case. As long as wasm::Bytes is a
+ // Vector, not a linked-list of chunks, there's not much we can do other
+ // than copy.
+ MOZ_ASSERT(bytes.empty());
+ if (!bytes.resize(bytesNeeded())) {
+ return false;
+ }
+ m_buffer.executableCopy(bytes.begin());
+ return true;
+}
+
+void AssemblerLOONG64::copyJumpRelocationTable(uint8_t* dest) {
+ if (jumpRelocations_.length()) {
+ memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length());
+ }
+}
+
+void AssemblerLOONG64::copyDataRelocationTable(uint8_t* dest) {
+ if (dataRelocations_.length()) {
+ memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length());
+ }
+}
+
+AssemblerLOONG64::Condition AssemblerLOONG64::InvertCondition(Condition cond) {
+ switch (cond) {
+ case Equal:
+ return NotEqual;
+ case NotEqual:
+ return Equal;
+ case Zero:
+ return NonZero;
+ case NonZero:
+ return Zero;
+ case LessThan:
+ return GreaterThanOrEqual;
+ case LessThanOrEqual:
+ return GreaterThan;
+ case GreaterThan:
+ return LessThanOrEqual;
+ case GreaterThanOrEqual:
+ return LessThan;
+ case Above:
+ return BelowOrEqual;
+ case AboveOrEqual:
+ return Below;
+ case Below:
+ return AboveOrEqual;
+ case BelowOrEqual:
+ return Above;
+ case Signed:
+ return NotSigned;
+ case NotSigned:
+ return Signed;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+AssemblerLOONG64::DoubleCondition AssemblerLOONG64::InvertCondition(
+ DoubleCondition cond) {
+ switch (cond) {
+ case DoubleOrdered:
+ return DoubleUnordered;
+ case DoubleEqual:
+ return DoubleNotEqualOrUnordered;
+ case DoubleNotEqual:
+ return DoubleEqualOrUnordered;
+ case DoubleGreaterThan:
+ return DoubleLessThanOrEqualOrUnordered;
+ case DoubleGreaterThanOrEqual:
+ return DoubleLessThanOrUnordered;
+ case DoubleLessThan:
+ return DoubleGreaterThanOrEqualOrUnordered;
+ case DoubleLessThanOrEqual:
+ return DoubleGreaterThanOrUnordered;
+ case DoubleUnordered:
+ return DoubleOrdered;
+ case DoubleEqualOrUnordered:
+ return DoubleNotEqual;
+ case DoubleNotEqualOrUnordered:
+ return DoubleEqual;
+ case DoubleGreaterThanOrUnordered:
+ return DoubleLessThanOrEqual;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ return DoubleLessThan;
+ case DoubleLessThanOrUnordered:
+ return DoubleGreaterThanOrEqual;
+ case DoubleLessThanOrEqualOrUnordered:
+ return DoubleGreaterThan;
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+AssemblerLOONG64::Condition AssemblerLOONG64::InvertCmpCondition(
+ Condition cond) {
+ switch (cond) {
+ case Equal:
+ case NotEqual:
+ return cond;
+ case LessThan:
+ return GreaterThan;
+ case LessThanOrEqual:
+ return GreaterThanOrEqual;
+ case GreaterThan:
+ return LessThanOrEqual;
+ case GreaterThanOrEqual:
+ return LessThan;
+ case Above:
+ return Below;
+ case AboveOrEqual:
+ return BelowOrEqual;
+ case Below:
+ return Above;
+ case BelowOrEqual:
+ return AboveOrEqual;
+ default:
+ MOZ_CRASH("no meaningful swapped-operand condition");
+ }
+}
+
+BOffImm16::BOffImm16(InstImm inst)
+ : data((inst.encode() >> Imm16Shift) & Imm16Mask) {}
+
+Instruction* BOffImm16::getDest(Instruction* src) const {
+ return &src[(((int32_t)data << 16) >> 16) + 1];
+}
+
+bool AssemblerLOONG64::oom() const {
+ return AssemblerShared::oom() || m_buffer.oom() || jumpRelocations_.oom() ||
+ dataRelocations_.oom();
+}
+
+// Size of the instruction stream, in bytes.
+size_t AssemblerLOONG64::size() const { return m_buffer.size(); }
+
+// Size of the relocation table, in bytes.
+size_t AssemblerLOONG64::jumpRelocationTableBytes() const {
+ return jumpRelocations_.length();
+}
+
+size_t AssemblerLOONG64::dataRelocationTableBytes() const {
+ return dataRelocations_.length();
+}
+
+// Size of the data table, in bytes.
+size_t AssemblerLOONG64::bytesNeeded() const {
+ return size() + jumpRelocationTableBytes() + dataRelocationTableBytes();
+}
+
+// write a blob of binary into the instruction stream
+BufferOffset AssemblerLOONG64::writeInst(uint32_t x, uint32_t* dest) {
+ MOZ_ASSERT(hasCreator());
+ if (dest == nullptr) {
+ return m_buffer.putInt(x);
+ }
+
+ WriteInstStatic(x, dest);
+ return BufferOffset();
+}
+
+void AssemblerLOONG64::WriteInstStatic(uint32_t x, uint32_t* dest) {
+ MOZ_ASSERT(dest != nullptr);
+ *dest = x;
+}
+
+BufferOffset AssemblerLOONG64::haltingAlign(int alignment) {
+ // TODO(loong64): Implement a proper halting align.
+ return nopAlign(alignment);
+}
+
+BufferOffset AssemblerLOONG64::nopAlign(int alignment) {
+ BufferOffset ret;
+ MOZ_ASSERT(m_buffer.isAligned(4));
+ if (alignment == 8) {
+ if (!m_buffer.isAligned(alignment)) {
+ BufferOffset tmp = as_nop();
+ if (!ret.assigned()) {
+ ret = tmp;
+ }
+ }
+ } else {
+ MOZ_ASSERT((alignment & (alignment - 1)) == 0);
+ while (size() & (alignment - 1)) {
+ BufferOffset tmp = as_nop();
+ if (!ret.assigned()) {
+ ret = tmp;
+ }
+ }
+ }
+ return ret;
+}
+
+// Logical operations.
+BufferOffset AssemblerLOONG64::as_and(Register rd, Register rj, Register rk) {
+ spew("and %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_and, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_or(Register rd, Register rj, Register rk) {
+ spew("or %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_or, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_xor(Register rd, Register rj, Register rk) {
+ spew("xor %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_xor, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_nor(Register rd, Register rj, Register rk) {
+ spew("nor %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_nor, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_andn(Register rd, Register rj, Register rk) {
+ spew("andn %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_andn, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_orn(Register rd, Register rj, Register rk) {
+ spew("orn %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_orn, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_andi(Register rd, Register rj, int32_t ui12) {
+ MOZ_ASSERT(is_uintN(ui12, 12));
+ spew("andi %3s,%3s,0x%x", rd.name(), rj.name(), ui12);
+ return writeInst(InstImm(op_andi, ui12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ori(Register rd, Register rj, int32_t ui12) {
+ MOZ_ASSERT(is_uintN(ui12, 12));
+ spew("ori %3s,%3s,0x%x", rd.name(), rj.name(), ui12);
+ return writeInst(InstImm(op_ori, ui12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_xori(Register rd, Register rj, int32_t ui12) {
+ MOZ_ASSERT(is_uintN(ui12, 12));
+ spew("xori %3s,%3s,0x%x", rd.name(), rj.name(), ui12);
+ return writeInst(InstImm(op_xori, ui12, rj, rd, 12).encode());
+}
+
+// Branch and jump instructions
+BufferOffset AssemblerLOONG64::as_b(JOffImm26 off) {
+ spew("b %d", off.decode());
+ return writeInst(InstJump(op_b, off).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bl(JOffImm26 off) {
+ spew("bl %d", off.decode());
+ return writeInst(InstJump(op_bl, off).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_jirl(Register rd, Register rj,
+ BOffImm16 off) {
+ spew("jirl %3s, %3s, %d", rd.name(), rj.name(), off.decode());
+ return writeInst(InstImm(op_jirl, off, rj, rd).encode());
+}
+
+InstImm AssemblerLOONG64::getBranchCode(JumpOrCall jumpOrCall) {
+ // jirl or beq
+ if (jumpOrCall == BranchIsCall) {
+ return InstImm(op_jirl, BOffImm16(0), zero, ra);
+ }
+
+ return InstImm(op_beq, BOffImm16(0), zero, zero);
+}
+
+InstImm AssemblerLOONG64::getBranchCode(Register rj, Register rd, Condition c) {
+ // beq, bne
+ MOZ_ASSERT(c == AssemblerLOONG64::Equal || c == AssemblerLOONG64::NotEqual);
+ return InstImm(c == AssemblerLOONG64::Equal ? op_beq : op_bne, BOffImm16(0),
+ rj, rd);
+}
+
+InstImm AssemblerLOONG64::getBranchCode(Register rj, Condition c) {
+ // beq, bne, blt, bge
+ switch (c) {
+ case AssemblerLOONG64::Equal:
+ case AssemblerLOONG64::Zero:
+ case AssemblerLOONG64::BelowOrEqual:
+ return InstImm(op_beq, BOffImm16(0), rj, zero);
+ case AssemblerLOONG64::NotEqual:
+ case AssemblerLOONG64::NonZero:
+ case AssemblerLOONG64::Above:
+ return InstImm(op_bne, BOffImm16(0), rj, zero);
+ case AssemblerLOONG64::GreaterThan:
+ return InstImm(op_blt, BOffImm16(0), zero, rj);
+ case AssemblerLOONG64::GreaterThanOrEqual:
+ case AssemblerLOONG64::NotSigned:
+ return InstImm(op_bge, BOffImm16(0), rj, zero);
+ case AssemblerLOONG64::LessThan:
+ case AssemblerLOONG64::Signed:
+ return InstImm(op_blt, BOffImm16(0), rj, zero);
+ case AssemblerLOONG64::LessThanOrEqual:
+ return InstImm(op_bge, BOffImm16(0), zero, rj);
+ default:
+ MOZ_CRASH("Condition not supported.");
+ }
+}
+
+// Code semantics must conform to compareFloatingpoint
+InstImm AssemblerLOONG64::getBranchCode(FPConditionBit cj) {
+ return InstImm(op_bcz, 0, cj, true); // bcnez
+}
+
+// Arithmetic instructions
+BufferOffset AssemblerLOONG64::as_add_w(Register rd, Register rj, Register rk) {
+ spew("add_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_add_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_add_d(Register rd, Register rj, Register rk) {
+ spew("add_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_add_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_sub_w(Register rd, Register rj, Register rk) {
+ spew("sub_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_sub_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_sub_d(Register rd, Register rj, Register rk) {
+ spew("sub_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_sub_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_addi_w(Register rd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("addi_w %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_addi_w, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_addi_d(Register rd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("addi_d %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_addi_d, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_addu16i_d(Register rd, Register rj,
+ int32_t si16) {
+ MOZ_ASSERT(Imm16::IsInSignedRange(si16));
+ spew("addu16i_d %3s,%3s,0x%x", rd.name(), rj.name(), si16);
+ return writeInst(InstImm(op_addu16i_d, Imm16(si16), rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_alsl_w(Register rd, Register rj, Register rk,
+ uint32_t sa2) {
+ MOZ_ASSERT(sa2 < 4);
+ spew("alsl_w %3s,%3s,0x%x", rd.name(), rj.name(), sa2);
+ return writeInst(InstReg(op_alsl_w, sa2, rk, rj, rd, 2).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_alsl_wu(Register rd, Register rj, Register rk,
+ uint32_t sa2) {
+ MOZ_ASSERT(sa2 < 4);
+ spew("alsl_wu %3s,%3s,0x%x", rd.name(), rj.name(), sa2);
+ return writeInst(InstReg(op_alsl_wu, sa2, rk, rj, rd, 2).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_alsl_d(Register rd, Register rj, Register rk,
+ uint32_t sa2) {
+ MOZ_ASSERT(sa2 < 4);
+ spew("alsl_d %3s,%3s,%3s,0x%x", rd.name(), rj.name(), rk.name(), sa2);
+ return writeInst(InstReg(op_alsl_d, sa2, rk, rj, rd, 2).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_lu12i_w(Register rd, int32_t si20) {
+ spew("lu12i_w %3s,0x%x", rd.name(), si20);
+ return writeInst(InstImm(op_lu12i_w, si20, rd, false).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_lu32i_d(Register rd, int32_t si20) {
+ spew("lu32i_d %3s,0x%x", rd.name(), si20);
+ return writeInst(InstImm(op_lu32i_d, si20, rd, false).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_lu52i_d(Register rd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_uintN(si12, 12));
+ spew("lu52i_d %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_lu52i_d, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_slt(Register rd, Register rj, Register rk) {
+ spew("slt %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_slt, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_sltu(Register rd, Register rj, Register rk) {
+ spew("sltu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_sltu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_slti(Register rd, Register rj, int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("slti %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_slti, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_sltui(Register rd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("sltui %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_sltui, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_pcaddi(Register rd, int32_t si20) {
+ spew("pcaddi %3s,0x%x", rd.name(), si20);
+ return writeInst(InstImm(op_pcaddi, si20, rd, false).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_pcaddu12i(Register rd, int32_t si20) {
+ spew("pcaddu12i %3s,0x%x", rd.name(), si20);
+ return writeInst(InstImm(op_pcaddu12i, si20, rd, false).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_pcaddu18i(Register rd, int32_t si20) {
+ spew("pcaddu18i %3s,0x%x", rd.name(), si20);
+ return writeInst(InstImm(op_pcaddu18i, si20, rd, false).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_pcalau12i(Register rd, int32_t si20) {
+ spew("pcalau12i %3s,0x%x", rd.name(), si20);
+ return writeInst(InstImm(op_pcalau12i, si20, rd, false).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mul_w(Register rd, Register rj, Register rk) {
+ spew("mul_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mul_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mulh_w(Register rd, Register rj,
+ Register rk) {
+ spew("mulh_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mulh_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mulh_wu(Register rd, Register rj,
+ Register rk) {
+ spew("mulh_wu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mulh_wu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mul_d(Register rd, Register rj, Register rk) {
+ spew("mul_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mul_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mulh_d(Register rd, Register rj,
+ Register rk) {
+ spew("mulh_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mulh_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mulh_du(Register rd, Register rj,
+ Register rk) {
+ spew("mulh_du %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mulh_du, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mulw_d_w(Register rd, Register rj,
+ Register rk) {
+ spew("mulw_d_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mulw_d_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mulw_d_wu(Register rd, Register rj,
+ Register rk) {
+ spew("mulw_d_wu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mulw_d_wu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_div_w(Register rd, Register rj, Register rk) {
+ spew("div_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_div_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mod_w(Register rd, Register rj, Register rk) {
+ spew("mod_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mod_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_div_wu(Register rd, Register rj,
+ Register rk) {
+ spew("div_wu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_div_wu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mod_wu(Register rd, Register rj,
+ Register rk) {
+ spew("mod_wu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mod_wu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_div_d(Register rd, Register rj, Register rk) {
+ spew("div_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_div_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mod_d(Register rd, Register rj, Register rk) {
+ spew("mod_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mod_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_div_du(Register rd, Register rj,
+ Register rk) {
+ spew("div_du %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_div_du, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_mod_du(Register rd, Register rj,
+ Register rk) {
+ spew("mod_du %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_mod_du, rk, rj, rd).encode());
+}
+
+// Shift instructions
+BufferOffset AssemblerLOONG64::as_sll_w(Register rd, Register rj, Register rk) {
+ spew("sll_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_sll_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_srl_w(Register rd, Register rj, Register rk) {
+ spew("srl_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_srl_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_sra_w(Register rd, Register rj, Register rk) {
+ spew("sra_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_sra_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_rotr_w(Register rd, Register rj,
+ Register rk) {
+ spew("rotr_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_rotr_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_slli_w(Register rd, Register rj,
+ int32_t ui5) {
+ MOZ_ASSERT(is_uintN(ui5, 5));
+ spew("slli_w %3s,%3s,0x%x", rd.name(), rj.name(), ui5);
+ return writeInst(InstImm(op_slli_w, ui5, rj, rd, 5).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_srli_w(Register rd, Register rj,
+ int32_t ui5) {
+ MOZ_ASSERT(is_uintN(ui5, 5));
+ spew("srli_w %3s,%3s,0x%x", rd.name(), rj.name(), ui5);
+ return writeInst(InstImm(op_srli_w, ui5, rj, rd, 5).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_srai_w(Register rd, Register rj,
+ int32_t ui5) {
+ MOZ_ASSERT(is_uintN(ui5, 5));
+ spew("srai_w %3s,%3s,0x%x", rd.name(), rj.name(), ui5);
+ return writeInst(InstImm(op_srai_w, ui5, rj, rd, 5).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_rotri_w(Register rd, Register rj,
+ int32_t ui5) {
+ MOZ_ASSERT(is_uintN(ui5, 5));
+ spew("rotri_w %3s,%3s,0x%x", rd.name(), rj.name(), ui5);
+ return writeInst(InstImm(op_rotri_w, ui5, rj, rd, 5).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_sll_d(Register rd, Register rj, Register rk) {
+ spew("sll_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_sll_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_srl_d(Register rd, Register rj, Register rk) {
+ spew("srl_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_srl_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_sra_d(Register rd, Register rj, Register rk) {
+ spew("sra_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_sra_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_rotr_d(Register rd, Register rj,
+ Register rk) {
+ spew("rotr_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_rotr_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_slli_d(Register rd, Register rj,
+ int32_t ui6) {
+ MOZ_ASSERT(is_uintN(ui6, 6));
+ spew("slli_d %3s,%3s,0x%x", rd.name(), rj.name(), ui6);
+ return writeInst(InstImm(op_slli_d, ui6, rj, rd, 6).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_srli_d(Register rd, Register rj,
+ int32_t ui6) {
+ MOZ_ASSERT(is_uintN(ui6, 6));
+ spew("srli_d %3s,%3s,0x%x", rd.name(), rj.name(), ui6);
+ return writeInst(InstImm(op_srli_d, ui6, rj, rd, 6).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_srai_d(Register rd, Register rj,
+ int32_t ui6) {
+ MOZ_ASSERT(is_uintN(ui6, 6));
+ spew("srai_d %3s,%3s,0x%x", rd.name(), rj.name(), ui6);
+ return writeInst(InstImm(op_srai_d, ui6, rj, rd, 6).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_rotri_d(Register rd, Register rj,
+ int32_t ui6) {
+ MOZ_ASSERT(is_uintN(ui6, 6));
+ spew("rotri_d %3s,%3s,0x%x", rd.name(), rj.name(), ui6);
+ return writeInst(InstImm(op_rotri_d, ui6, rj, rd, 6).encode());
+}
+
+// Bit operation instrucitons
+BufferOffset AssemblerLOONG64::as_ext_w_b(Register rd, Register rj) {
+ spew("ext_w_b %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_ext_w_b, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ext_w_h(Register rd, Register rj) {
+ spew("ext_w_h %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_ext_w_h, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_clo_w(Register rd, Register rj) {
+ spew("clo_w %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_clo_w, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_clz_w(Register rd, Register rj) {
+ spew("clz_w %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_clz_w, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_cto_w(Register rd, Register rj) {
+ spew("cto_w %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_cto_w, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ctz_w(Register rd, Register rj) {
+ spew("ctz_w %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_ctz_w, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_clo_d(Register rd, Register rj) {
+ spew("clo_d %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_clo_d, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_clz_d(Register rd, Register rj) {
+ spew("clz_d %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_clz_d, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_cto_d(Register rd, Register rj) {
+ spew("cto_d %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_cto_d, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ctz_d(Register rd, Register rj) {
+ spew("ctz_d %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_ctz_d, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bytepick_w(Register rd, Register rj,
+ Register rk, int32_t sa2) {
+ MOZ_ASSERT(sa2 < 4);
+ spew("bytepick_w %3s,%3s,%3s, 0x%x", rd.name(), rj.name(), rk.name(), sa2);
+ return writeInst(InstReg(op_bytepick_w, sa2, rk, rj, rd, 2).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bytepick_d(Register rd, Register rj,
+ Register rk, int32_t sa3) {
+ MOZ_ASSERT(sa3 < 8);
+ spew("bytepick_d %3s,%3s,%3s, 0x%x", rd.name(), rj.name(), rk.name(), sa3);
+ return writeInst(InstReg(op_bytepick_d, sa3, rk, rj, rd, 3).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_revb_2h(Register rd, Register rj) {
+ spew("revb_2h %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_revb_2h, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_revb_4h(Register rd, Register rj) {
+ spew("revb_4h %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_revb_4h, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_revb_2w(Register rd, Register rj) {
+ spew("revb_2w %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_revb_2w, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_revb_d(Register rd, Register rj) {
+ spew("revb_d %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_revb_d, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_revh_2w(Register rd, Register rj) {
+ spew("revh_2w %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_revh_2w, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_revh_d(Register rd, Register rj) {
+ spew("revh_d %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_revh_d, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bitrev_4b(Register rd, Register rj) {
+ spew("bitrev_4b %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_bitrev_4b, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bitrev_8b(Register rd, Register rj) {
+ spew("bitrev_8b %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_bitrev_8b, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bitrev_w(Register rd, Register rj) {
+ spew("bitrev_w %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_bitrev_w, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bitrev_d(Register rd, Register rj) {
+ spew("bitrev_d %3s,%3s", rd.name(), rj.name());
+ return writeInst(InstReg(op_bitrev_d, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bstrins_w(Register rd, Register rj,
+ int32_t msbw, int32_t lsbw) {
+ MOZ_ASSERT(lsbw <= msbw);
+ spew("bstrins_w %3s,%3s,0x%x,0x%x", rd.name(), rj.name(), msbw, lsbw);
+ return writeInst(InstImm(op_bstr_w, msbw, lsbw, rj, rd, 5).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bstrins_d(Register rd, Register rj,
+ int32_t msbd, int32_t lsbd) {
+ MOZ_ASSERT(lsbd <= msbd);
+ spew("bstrins_d %3s,%3s,0x%x,0x%x", rd.name(), rj.name(), msbd, lsbd);
+ return writeInst(InstImm(op_bstrins_d, msbd, lsbd, rj, rd, 6).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bstrpick_w(Register rd, Register rj,
+ int32_t msbw, int32_t lsbw) {
+ MOZ_ASSERT(lsbw <= msbw);
+ spew("bstrpick_w %3s,%3s,0x%x,0x%x", rd.name(), rj.name(), msbw, lsbw);
+ return writeInst(InstImm(op_bstr_w, msbw, lsbw, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_bstrpick_d(Register rd, Register rj,
+ int32_t msbd, int32_t lsbd) {
+ MOZ_ASSERT(lsbd <= msbd);
+ spew("bstrpick_d %3s,%3s,0x%x,0x%x", rd.name(), rj.name(), msbd, lsbd);
+ return writeInst(InstImm(op_bstrpick_d, msbd, lsbd, rj, rd, 6).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_maskeqz(Register rd, Register rj,
+ Register rk) {
+ spew("maskeqz %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_maskeqz, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_masknez(Register rd, Register rj,
+ Register rk) {
+ spew("masknez %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_masknez, rk, rj, rd).encode());
+}
+
+// Load and store instructions
+BufferOffset AssemblerLOONG64::as_ld_b(Register rd, Register rj, int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("ld_b %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_ld_b, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ld_h(Register rd, Register rj, int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("ld_h %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_ld_h, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ld_w(Register rd, Register rj, int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("ld_w %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_ld_w, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ld_d(Register rd, Register rj, int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("ld_d %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_ld_d, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ld_bu(Register rd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("ld_bu %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_ld_bu, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ld_hu(Register rd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("ld_hu %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_ld_hu, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ld_wu(Register rd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("ld_wu %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_ld_wu, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_st_b(Register rd, Register rj, int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("st_b %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_st_b, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_st_h(Register rd, Register rj, int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("st_h %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_st_h, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_st_w(Register rd, Register rj, int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("st_w %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_st_w, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_st_d(Register rd, Register rj, int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("st_d %3s,%3s,0x%x", rd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_st_d, si12, rj, rd, 12).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ldx_b(Register rd, Register rj, Register rk) {
+ spew("ldx_b %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ldx_b, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ldx_h(Register rd, Register rj, Register rk) {
+ spew("ldx_h %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ldx_h, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ldx_w(Register rd, Register rj, Register rk) {
+ spew("ldx_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ldx_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ldx_d(Register rd, Register rj, Register rk) {
+ spew("ldx_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ldx_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ldx_bu(Register rd, Register rj,
+ Register rk) {
+ spew("ldx_bu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ldx_bu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ldx_hu(Register rd, Register rj,
+ Register rk) {
+ spew("ldx_hu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ldx_hu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ldx_wu(Register rd, Register rj,
+ Register rk) {
+ spew("ldx_wu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ldx_wu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_stx_b(Register rd, Register rj, Register rk) {
+ spew("stx_b %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_stx_b, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_stx_h(Register rd, Register rj, Register rk) {
+ spew("stx_h %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_stx_h, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_stx_w(Register rd, Register rj, Register rk) {
+ spew("stx_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_stx_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_stx_d(Register rd, Register rj, Register rk) {
+ spew("stx_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_stx_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ldptr_w(Register rd, Register rj,
+ int32_t si14) {
+ MOZ_ASSERT(is_intN(si14, 16) && ((si14 & 0x3) == 0));
+ spew("ldptr_w %3s,%3s,0x%x", rd.name(), rj.name(), si14);
+ return writeInst(InstImm(op_ldptr_w, si14 >> 2, rj, rd, 14).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ldptr_d(Register rd, Register rj,
+ int32_t si14) {
+ MOZ_ASSERT(is_intN(si14, 16) && ((si14 & 0x3) == 0));
+ spew("ldptr_d %3s,%3s,0x%x", rd.name(), rj.name(), si14);
+ return writeInst(InstImm(op_ldptr_d, si14 >> 2, rj, rd, 14).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_stptr_w(Register rd, Register rj,
+ int32_t si14) {
+ MOZ_ASSERT(is_intN(si14, 16) && ((si14 & 0x3) == 0));
+ spew("stptr_w %3s,%3s,0x%x", rd.name(), rj.name(), si14);
+ return writeInst(InstImm(op_stptr_w, si14 >> 2, rj, rd, 14).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_stptr_d(Register rd, Register rj,
+ int32_t si14) {
+ MOZ_ASSERT(is_intN(si14, 16) && ((si14 & 0x3) == 0));
+ spew("stptr_d %3s,%3s,0x%x", rd.name(), rj.name(), si14);
+ return writeInst(InstImm(op_stptr_d, si14 >> 2, rj, rd, 14).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_preld(int32_t hint, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("preld 0x%x,%3s,0x%x", hint, rj.name(), si12);
+ return writeInst(InstImm(op_preld, si12, rj, hint).encode());
+}
+
+// Atomic instructions
+BufferOffset AssemblerLOONG64::as_amswap_w(Register rd, Register rj,
+ Register rk) {
+ spew("amswap_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amswap_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amswap_d(Register rd, Register rj,
+ Register rk) {
+ spew("amswap_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amswap_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amadd_w(Register rd, Register rj,
+ Register rk) {
+ spew("amadd_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amadd_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amadd_d(Register rd, Register rj,
+ Register rk) {
+ spew("amadd_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amadd_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amand_w(Register rd, Register rj,
+ Register rk) {
+ spew("amand_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amand_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amand_d(Register rd, Register rj,
+ Register rk) {
+ spew("amand_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amand_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amor_w(Register rd, Register rj,
+ Register rk) {
+ spew("amor_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amor_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amor_d(Register rd, Register rj,
+ Register rk) {
+ spew("amor_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amor_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amxor_w(Register rd, Register rj,
+ Register rk) {
+ spew("amxor_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amxor_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amxor_d(Register rd, Register rj,
+ Register rk) {
+ spew("amxor_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amxor_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammax_w(Register rd, Register rj,
+ Register rk) {
+ spew("ammax_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammax_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammax_d(Register rd, Register rj,
+ Register rk) {
+ spew("ammax_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammax_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammin_w(Register rd, Register rj,
+ Register rk) {
+ spew("ammin_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammin_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammin_d(Register rd, Register rj,
+ Register rk) {
+ spew("ammin_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammin_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammax_wu(Register rd, Register rj,
+ Register rk) {
+ spew("ammax_wu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammax_wu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammax_du(Register rd, Register rj,
+ Register rk) {
+ spew("ammax_du %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammax_du, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammin_wu(Register rd, Register rj,
+ Register rk) {
+ spew("ammin_wu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammin_wu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammin_du(Register rd, Register rj,
+ Register rk) {
+ spew("ammin_du %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammin_du, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amswap_db_w(Register rd, Register rj,
+ Register rk) {
+ spew("amswap_db_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amswap_db_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amswap_db_d(Register rd, Register rj,
+ Register rk) {
+ spew("amswap_db_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amswap_db_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amadd_db_w(Register rd, Register rj,
+ Register rk) {
+ spew("amadd_db_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amadd_db_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amadd_db_d(Register rd, Register rj,
+ Register rk) {
+ spew("amadd_db_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amadd_db_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amand_db_w(Register rd, Register rj,
+ Register rk) {
+ spew("amand_db_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amand_db_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amand_db_d(Register rd, Register rj,
+ Register rk) {
+ spew("amand_db_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amand_db_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amor_db_w(Register rd, Register rj,
+ Register rk) {
+ spew("amor_db_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amor_db_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amor_db_d(Register rd, Register rj,
+ Register rk) {
+ spew("amor_db_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amor_db_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amxor_db_w(Register rd, Register rj,
+ Register rk) {
+ spew("amxor_db_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amxor_db_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_amxor_db_d(Register rd, Register rj,
+ Register rk) {
+ spew("amxor_db_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_amxor_db_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammax_db_w(Register rd, Register rj,
+ Register rk) {
+ spew("ammax_db_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammax_db_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammax_db_d(Register rd, Register rj,
+ Register rk) {
+ spew("ammax_db_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammax_db_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammin_db_w(Register rd, Register rj,
+ Register rk) {
+ spew("ammin_db_w %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammin_db_w, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammin_db_d(Register rd, Register rj,
+ Register rk) {
+ spew("ammin_db_d %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammin_db_d, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammax_db_wu(Register rd, Register rj,
+ Register rk) {
+ spew("ammax_db_wu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammax_db_wu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammax_db_du(Register rd, Register rj,
+ Register rk) {
+ spew("ammax_db_du %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammax_db_du, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammin_db_wu(Register rd, Register rj,
+ Register rk) {
+ spew("ammin_db_wu %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammin_db_wu, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ammin_db_du(Register rd, Register rj,
+ Register rk) {
+ spew("ammin_db_du %3s,%3s,%3s", rd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_ammin_db_du, rk, rj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ll_w(Register rd, Register rj, int32_t si14) {
+ spew("ll_w %3s,%3s,0x%x", rd.name(), rj.name(), si14);
+ MOZ_ASSERT(is_intN(si14, 16) && ((si14 & 0x3) == 0));
+ return writeInst(InstImm(op_ll_w, si14 >> 2, rj, rd, 14).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ll_d(Register rd, Register rj, int32_t si14) {
+ spew("ll_d %3s,%3s,0x%x", rd.name(), rj.name(), si14);
+ MOZ_ASSERT(is_intN(si14, 16) && ((si14 & 0x3) == 0));
+ return writeInst(InstImm(op_ll_d, si14 >> 2, rj, rd, 14).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_sc_w(Register rd, Register rj, int32_t si14) {
+ spew("sc_w %3s,%3s,0x%x", rd.name(), rj.name(), si14);
+ MOZ_ASSERT(is_intN(si14, 16) && ((si14 & 0x3) == 0));
+ return writeInst(InstImm(op_sc_w, si14 >> 2, rj, rd, 14).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_sc_d(Register rd, Register rj, int32_t si14) {
+ spew("sc_d %3s,%3s,0x%x", rd.name(), rj.name(), si14);
+ MOZ_ASSERT(is_intN(si14, 16) && ((si14 & 0x3) == 0));
+ return writeInst(InstImm(op_sc_d, si14 >> 2, rj, rd, 14).encode());
+}
+
+// Barrier instructions
+BufferOffset AssemblerLOONG64::as_dbar(int32_t hint) {
+ MOZ_ASSERT(is_uintN(hint, 15));
+ spew("dbar 0x%x", hint);
+ return writeInst(InstImm(op_dbar, hint).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ibar(int32_t hint) {
+ MOZ_ASSERT(is_uintN(hint, 15));
+ spew("ibar 0x%x", hint);
+ return writeInst(InstImm(op_ibar, hint).encode());
+}
+
+/* =============================================================== */
+
+// FP Arithmetic instructions
+BufferOffset AssemblerLOONG64::as_fadd_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fadd_s %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fadd_s, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fadd_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fadd_d %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fadd_d, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fsub_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fsub_s %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fsub_s, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fsub_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fsub_d %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fsub_d, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmul_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fmul_s %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fmul_s, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmul_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fmul_d %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fmul_d, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fdiv_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fdiv_s %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fdiv_s, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fdiv_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fdiv_d %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fdiv_d, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmadd_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk, FloatRegister fa) {
+ spew("fmadd_s %3s,%3s,%3s,%3s", fd.name(), fj.name(), fk.name(),
+ fa.name());
+ return writeInst(InstReg(op_fmadd_s, fa, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmadd_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk, FloatRegister fa) {
+ spew("fmadd_d %3s,%3s,%3s,%3s", fd.name(), fj.name(), fk.name(),
+ fa.name());
+ return writeInst(InstReg(op_fmadd_d, fa, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmsub_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk, FloatRegister fa) {
+ spew("fmsub_s %3s,%3s,%3s,%3s", fd.name(), fj.name(), fk.name(),
+ fa.name());
+ return writeInst(InstReg(op_fmsub_s, fa, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmsub_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk, FloatRegister fa) {
+ spew("fmsub_d %3s,%3s,%3s,%3s", fd.name(), fj.name(), fk.name(),
+ fa.name());
+ return writeInst(InstReg(op_fmsub_d, fa, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fnmadd_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk, FloatRegister fa) {
+ spew("fnmadd_s %3s,%3s,%3s,%3s", fd.name(), fj.name(), fk.name(),
+ fa.name());
+ return writeInst(InstReg(op_fnmadd_s, fa, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fnmadd_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk, FloatRegister fa) {
+ spew("fnmadd_d %3s,%3s,%3s,%3s", fd.name(), fj.name(), fk.name(),
+ fa.name());
+ return writeInst(InstReg(op_fnmadd_d, fa, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fnmsub_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk, FloatRegister fa) {
+ spew("fnmsub_s %3s,%3s,%3s,%3s", fd.name(), fj.name(), fk.name(),
+ fa.name());
+ return writeInst(InstReg(op_fnmsub_s, fa, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fnmsub_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk, FloatRegister fa) {
+ spew("fnmsub_d %3s,%3s,%3s,%3s", fd.name(), fj.name(), fk.name(),
+ fa.name());
+ return writeInst(InstReg(op_fnmsub_d, fa, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmax_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fmax_s %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fmax_s, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmax_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fmax_d %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fmax_d, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmin_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fmin_s %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fmin_s, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmin_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fmin_d %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fmin_d, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmaxa_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fmaxa_s %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fmaxa_s, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmaxa_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fmaxa_d %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fmaxa_d, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmina_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fmina_s %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fmina_s, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmina_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk) {
+ spew("fmina_d %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fmina_d, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fabs_s(FloatRegister fd, FloatRegister fj) {
+ spew("fabs_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_fabs_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fabs_d(FloatRegister fd, FloatRegister fj) {
+ spew("fabs_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_fabs_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fneg_s(FloatRegister fd, FloatRegister fj) {
+ spew("fneg_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_fneg_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fneg_d(FloatRegister fd, FloatRegister fj) {
+ spew("fneg_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_fneg_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fsqrt_s(FloatRegister fd, FloatRegister fj) {
+ spew("fsqrt_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_fsqrt_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fsqrt_d(FloatRegister fd, FloatRegister fj) {
+ spew("fsqrt_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_fsqrt_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fcopysign_s(FloatRegister fd,
+ FloatRegister fj,
+ FloatRegister fk) {
+ spew("fcopysign_s %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fcopysign_s, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fcopysign_d(FloatRegister fd,
+ FloatRegister fj,
+ FloatRegister fk) {
+ spew("fcopysign_d %3s,%3s,%3s", fd.name(), fj.name(), fk.name());
+ return writeInst(InstReg(op_fcopysign_d, fk, fj, fd).encode());
+}
+
+// FP compare instructions
+// fcmp.cond.s and fcmp.cond.d instructions
+BufferOffset AssemblerLOONG64::as_fcmp_cor(FloatFormat fmt, FloatRegister fj,
+ FloatRegister fk,
+ FPConditionBit cd) {
+ if (fmt == DoubleFloat) {
+ spew("fcmp_cor_d FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_d, COR, fk, fj, cd).encode());
+ } else {
+ spew("fcmp_cor_s FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_s, COR, fk, fj, cd).encode());
+ }
+}
+
+BufferOffset AssemblerLOONG64::as_fcmp_ceq(FloatFormat fmt, FloatRegister fj,
+ FloatRegister fk,
+ FPConditionBit cd) {
+ if (fmt == DoubleFloat) {
+ spew("fcmp_ceq_d FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_d, CEQ, fk, fj, cd).encode());
+ } else {
+ spew("fcmp_ceq_s FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_s, CEQ, fk, fj, cd).encode());
+ }
+}
+
+BufferOffset AssemblerLOONG64::as_fcmp_cne(FloatFormat fmt, FloatRegister fj,
+ FloatRegister fk,
+ FPConditionBit cd) {
+ if (fmt == DoubleFloat) {
+ spew("fcmp_cne_d FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_d, CNE, fk, fj, cd).encode());
+ } else {
+ spew("fcmp_cne_s FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_s, CNE, fk, fj, cd).encode());
+ }
+}
+
+BufferOffset AssemblerLOONG64::as_fcmp_cle(FloatFormat fmt, FloatRegister fj,
+ FloatRegister fk,
+ FPConditionBit cd) {
+ if (fmt == DoubleFloat) {
+ spew("fcmp_cle_d FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_d, CLE, fk, fj, cd).encode());
+ } else {
+ spew("fcmp_cle_s FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_s, CLE, fk, fj, cd).encode());
+ }
+}
+
+BufferOffset AssemblerLOONG64::as_fcmp_clt(FloatFormat fmt, FloatRegister fj,
+ FloatRegister fk,
+ FPConditionBit cd) {
+ if (fmt == DoubleFloat) {
+ spew("fcmp_clt_d FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_d, CLT, fk, fj, cd).encode());
+ } else {
+ spew("fcmp_clt_s FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_s, CLT, fk, fj, cd).encode());
+ }
+}
+
+BufferOffset AssemblerLOONG64::as_fcmp_cun(FloatFormat fmt, FloatRegister fj,
+ FloatRegister fk,
+ FPConditionBit cd) {
+ if (fmt == DoubleFloat) {
+ spew("fcmp_cun_d FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_d, CUN, fk, fj, cd).encode());
+ } else {
+ spew("fcmp_cun_s FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_s, CUN, fk, fj, cd).encode());
+ }
+}
+
+BufferOffset AssemblerLOONG64::as_fcmp_cueq(FloatFormat fmt, FloatRegister fj,
+ FloatRegister fk,
+ FPConditionBit cd) {
+ if (fmt == DoubleFloat) {
+ spew("fcmp_cueq_d FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_d, CUEQ, fk, fj, cd).encode());
+ } else {
+ spew("fcmp_cueq_s FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_s, CUEQ, fk, fj, cd).encode());
+ }
+}
+
+BufferOffset AssemblerLOONG64::as_fcmp_cune(FloatFormat fmt, FloatRegister fj,
+ FloatRegister fk,
+ FPConditionBit cd) {
+ if (fmt == DoubleFloat) {
+ spew("fcmp_cune_d FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_d, CUNE, fk, fj, cd).encode());
+ } else {
+ spew("fcmp_cune_s FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_s, CUNE, fk, fj, cd).encode());
+ }
+}
+
+BufferOffset AssemblerLOONG64::as_fcmp_cule(FloatFormat fmt, FloatRegister fj,
+ FloatRegister fk,
+ FPConditionBit cd) {
+ if (fmt == DoubleFloat) {
+ spew("fcmp_cule_d FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_d, CULE, fk, fj, cd).encode());
+ } else {
+ spew("fcmp_cule_s FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_s, CULE, fk, fj, cd).encode());
+ }
+}
+
+BufferOffset AssemblerLOONG64::as_fcmp_cult(FloatFormat fmt, FloatRegister fj,
+ FloatRegister fk,
+ FPConditionBit cd) {
+ if (fmt == DoubleFloat) {
+ spew("fcmp_cult_d FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_d, CULT, fk, fj, cd).encode());
+ } else {
+ spew("fcmp_cult_s FCC%d,%3s,%3s", cd, fj.name(), fk.name());
+ return writeInst(InstReg(op_fcmp_cond_s, CULT, fk, fj, cd).encode());
+ }
+}
+
+// FP conversion instructions
+BufferOffset AssemblerLOONG64::as_fcvt_s_d(FloatRegister fd, FloatRegister fj) {
+ spew("fcvt_s_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_fcvt_s_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fcvt_d_s(FloatRegister fd, FloatRegister fj) {
+ spew("fcvt_d_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_fcvt_d_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ffint_s_w(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ffint_s_w %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ffint_s_w, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ffint_s_l(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ffint_s_l %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ffint_s_l, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ffint_d_w(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ffint_d_w %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ffint_d_w, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ffint_d_l(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ffint_d_l %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ffint_d_l, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftint_w_s(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftint_w_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftint_w_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftint_w_d(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftint_w_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftint_w_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftint_l_s(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftint_l_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftint_l_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftint_l_d(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftint_l_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftint_l_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrm_w_s(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrm_w_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrm_w_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrm_w_d(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrm_w_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrm_w_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrm_l_s(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrm_l_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrm_l_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrm_l_d(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrm_l_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrm_l_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrp_w_s(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrp_w_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrp_w_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrp_w_d(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrp_w_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrp_w_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrp_l_s(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrp_l_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrp_l_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrp_l_d(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrp_l_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrp_l_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrz_w_s(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrz_w_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrz_w_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrz_w_d(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrz_w_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrz_w_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrz_l_s(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrz_l_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrz_l_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrz_l_d(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrz_l_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrz_l_d, fj, fd).encode());
+}
+BufferOffset AssemblerLOONG64::as_ftintrne_w_s(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrne_w_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrne_w_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrne_w_d(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrne_w_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrne_w_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrne_l_s(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrne_l_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrne_l_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_ftintrne_l_d(FloatRegister fd,
+ FloatRegister fj) {
+ spew("ftintrne_l_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_ftintrne_l_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_frint_s(FloatRegister fd, FloatRegister fj) {
+ spew("frint_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_frint_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_frint_d(FloatRegister fd, FloatRegister fj) {
+ spew("frint_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_frint_d, fj, fd).encode());
+}
+
+// FP mov instructions
+BufferOffset AssemblerLOONG64::as_fmov_s(FloatRegister fd, FloatRegister fj) {
+ spew("fmov_s %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_fmov_s, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fmov_d(FloatRegister fd, FloatRegister fj) {
+ spew("fmov_d %3s,%3s", fd.name(), fj.name());
+ return writeInst(InstReg(op_fmov_d, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fsel(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk, FPConditionBit ca) {
+ spew("fsel %3s,%3s,%3s,%d", fd.name(), fj.name(), fk.name(), ca);
+ return writeInst(InstReg(op_fsel, ca, fk, fj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movgr2fr_w(FloatRegister fd, Register rj) {
+ spew("movgr2fr_w %3s,%3s", fd.name(), rj.name());
+ return writeInst(InstReg(op_movgr2fr_w, rj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movgr2fr_d(FloatRegister fd, Register rj) {
+ spew("movgr2fr_d %3s,%3s", fd.name(), rj.name());
+ return writeInst(InstReg(op_movgr2fr_d, rj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movgr2frh_w(FloatRegister fd, Register rj) {
+ spew("movgr2frh_w %3s,%3s", fd.name(), rj.name());
+ return writeInst(InstReg(op_movgr2frh_w, rj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movfr2gr_s(Register rd, FloatRegister fj) {
+ spew("movfr2gr_s %3s,%3s", rd.name(), fj.name());
+ return writeInst(InstReg(op_movfr2gr_s, fj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movfr2gr_d(Register rd, FloatRegister fj) {
+ spew("movfr2gr_d %3s,%3s", rd.name(), fj.name());
+ return writeInst(InstReg(op_movfr2gr_d, fj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movfrh2gr_s(Register rd, FloatRegister fj) {
+ spew("movfrh2gr_s %3s,%3s", rd.name(), fj.name());
+ return writeInst(InstReg(op_movfrh2gr_s, fj, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movgr2fcsr(Register rj) {
+ spew("movgr2fcsr %3s", rj.name());
+ return writeInst(InstReg(op_movgr2fcsr, rj, FCSR).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movfcsr2gr(Register rd) {
+ spew("movfcsr2gr %3s", rd.name());
+ return writeInst(InstReg(op_movfcsr2gr, FCSR, rd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movfr2cf(FPConditionBit cd,
+ FloatRegister fj) {
+ spew("movfr2cf %d,%3s", cd, fj.name());
+ return writeInst(InstReg(op_movfr2cf, fj, cd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movcf2fr(FloatRegister fd,
+ FPConditionBit cj) {
+ spew("movcf2fr %3s,%d", fd.name(), cj);
+ return writeInst(InstReg(op_movcf2fr, cj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movgr2cf(FPConditionBit cd, Register rj) {
+ spew("movgr2cf %d,%3s", cd, rj.name());
+ return writeInst(InstReg(op_movgr2cf, rj, cd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_movcf2gr(Register rd, FPConditionBit cj) {
+ spew("movcf2gr %3s,%d", rd.name(), cj);
+ return writeInst(InstReg(op_movcf2gr, cj, rd).encode());
+}
+
+// FP load/store instructions
+BufferOffset AssemblerLOONG64::as_fld_s(FloatRegister fd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("fld_s %3s,%3s,0x%x", fd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_fld_s, si12, rj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fld_d(FloatRegister fd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("fld_d %3s,%3s,0x%x", fd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_fld_d, si12, rj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fst_s(FloatRegister fd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("fst_s %3s,%3s,0x%x", fd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_fst_s, si12, rj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fst_d(FloatRegister fd, Register rj,
+ int32_t si12) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ spew("fst_d %3s,%3s,0x%x", fd.name(), rj.name(), si12);
+ return writeInst(InstImm(op_fst_d, si12, rj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fldx_s(FloatRegister fd, Register rj,
+ Register rk) {
+ spew("fldx_s %3s,%3s,%3s", fd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_fldx_s, rk, rj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fldx_d(FloatRegister fd, Register rj,
+ Register rk) {
+ spew("fldx_d %3s,%3s,%3s", fd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_fldx_d, rk, rj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fstx_s(FloatRegister fd, Register rj,
+ Register rk) {
+ spew("fstx_s %3s,%3s,%3s", fd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_fstx_s, rk, rj, fd).encode());
+}
+
+BufferOffset AssemblerLOONG64::as_fstx_d(FloatRegister fd, Register rj,
+ Register rk) {
+ spew("fstx_d %3s,%3s,%3s", fd.name(), rj.name(), rk.name());
+ return writeInst(InstReg(op_fstx_d, rk, rj, fd).encode());
+}
+
+/* ========================================================================= */
+
+void AssemblerLOONG64::bind(Label* label, BufferOffset boff) {
+ spew(".set Llabel %p", label);
+ // If our caller didn't give us an explicit target to bind to
+ // then we want to bind to the location of the next instruction
+ BufferOffset dest = boff.assigned() ? boff : nextOffset();
+ if (label->used()) {
+ int32_t next;
+
+ // A used label holds a link to branch that uses it.
+ BufferOffset b(label);
+ do {
+ // Even a 0 offset may be invalid if we're out of memory.
+ if (oom()) {
+ return;
+ }
+
+ Instruction* inst = editSrc(b);
+
+ // Second word holds a pointer to the next branch in label's chain.
+ next = inst[1].encode();
+ bind(reinterpret_cast<InstImm*>(inst), b.getOffset(), dest.getOffset());
+
+ b = BufferOffset(next);
+ } while (next != LabelBase::INVALID_OFFSET);
+ }
+ label->bind(dest.getOffset());
+}
+
+void AssemblerLOONG64::retarget(Label* label, Label* target) {
+ spew("retarget %p -> %p", label, target);
+ if (label->used() && !oom()) {
+ if (target->bound()) {
+ bind(label, BufferOffset(target));
+ } else if (target->used()) {
+ // The target is not bound but used. Prepend label's branch list
+ // onto target's.
+ int32_t next;
+ BufferOffset labelBranchOffset(label);
+
+ // Find the head of the use chain for label.
+ do {
+ Instruction* inst = editSrc(labelBranchOffset);
+
+ // Second word holds a pointer to the next branch in chain.
+ next = inst[1].encode();
+ labelBranchOffset = BufferOffset(next);
+ } while (next != LabelBase::INVALID_OFFSET);
+
+ // Then patch the head of label's use chain to the tail of
+ // target's use chain, prepending the entire use chain of target.
+ Instruction* inst = editSrc(labelBranchOffset);
+ int32_t prev = target->offset();
+ target->use(label->offset());
+ inst[1].setData(prev);
+ } else {
+ // The target is unbound and unused. We can just take the head of
+ // the list hanging off of label, and dump that into target.
+ target->use(label->offset());
+ }
+ }
+ label->reset();
+}
+
+void dbg_break() {}
+
+void AssemblerLOONG64::as_break(uint32_t code) {
+ MOZ_ASSERT(code <= MAX_BREAK_CODE);
+ spew("break %d", code);
+ writeInst(InstImm(op_break, code).encode());
+}
+
+// This just stomps over memory with 32 bits of raw data. Its purpose is to
+// overwrite the call of JITed code with 32 bits worth of an offset. This will
+// is only meant to function on code that has been invalidated, so it should
+// be totally safe. Since that instruction will never be executed again, a
+// ICache flush should not be necessary
+void AssemblerLOONG64::PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm) {
+ // Raw is going to be the return address.
+ uint32_t* raw = (uint32_t*)label.raw();
+ // Overwrite the 4 bytes before the return address, which will
+ // end up being the call instruction.
+ *(raw - 1) = imm.value;
+}
+
+uint8_t* AssemblerLOONG64::NextInstruction(uint8_t* inst_, uint32_t* count) {
+ Instruction* inst = reinterpret_cast<Instruction*>(inst_);
+ if (count != nullptr) {
+ *count += sizeof(Instruction);
+ }
+ return reinterpret_cast<uint8_t*>(inst->next());
+}
+
+void AssemblerLOONG64::ToggleToJmp(CodeLocationLabel inst_) {
+ InstImm* inst = (InstImm*)inst_.raw();
+
+ MOZ_ASSERT(inst->extractBitField(31, 26) == (uint32_t)op_addu16i_d >> 26);
+ // We converted beq to addu16i_d, so now we restore it.
+ inst->setOpcode(op_beq, 6);
+}
+
+void AssemblerLOONG64::ToggleToCmp(CodeLocationLabel inst_) {
+ InstImm* inst = (InstImm*)inst_.raw();
+
+ // toggledJump is allways used for short jumps.
+ MOZ_ASSERT(inst->extractBitField(31, 26) == (uint32_t)op_beq >> 26);
+ // Replace "beq $zero, $zero, offset" with "addu16i_d $zero, $zero, offset"
+ inst->setOpcode(op_addu16i_d, 6);
+}
+
+// Since there are no pools in LoongArch64 implementation, this should be
+// simple.
+Instruction* Instruction::next() { return this + 1; }
+
+InstImm AssemblerLOONG64::invertBranch(InstImm branch, BOffImm16 skipOffset) {
+ uint32_t rj = 0;
+ OpcodeField opcode = (OpcodeField)((branch.extractBitField(31, 26)) << 26);
+ switch (opcode) {
+ case op_beq:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_bne, 6);
+ return branch;
+ case op_bne:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_beq, 6);
+ return branch;
+ case op_bge:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_blt, 6);
+ return branch;
+ case op_bgeu:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_bltu, 6);
+ return branch;
+ case op_blt:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_bge, 6);
+ return branch;
+ case op_bltu:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_bgeu, 6);
+ return branch;
+ case op_beqz:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_bnez, 6);
+ return branch;
+ case op_bnez:
+ branch.setBOffImm16(skipOffset);
+ branch.setOpcode(op_beqz, 6);
+ return branch;
+ case op_bcz:
+ branch.setBOffImm16(skipOffset);
+ rj = branch.extractRJ();
+ if (rj & 0x8) {
+ branch.setRJ(rj & 0x17);
+ } else {
+ branch.setRJ(rj | 0x8);
+ }
+ return branch;
+ default:
+ MOZ_CRASH("Error creating long branch.");
+ }
+}
+
+#ifdef JS_JITSPEW
+void AssemblerLOONG64::decodeBranchInstAndSpew(InstImm branch) {
+ OpcodeField opcode = (OpcodeField)((branch.extractBitField(31, 26)) << 26);
+ uint32_t rd_id;
+ uint32_t rj_id;
+ uint32_t cj_id;
+ uint32_t immi = branch.extractImm16Value();
+ switch (opcode) {
+ case op_beq:
+ rd_id = branch.extractRD();
+ rj_id = branch.extractRJ();
+ spew("beq 0x%x,%3s,%3s", (int32_t(immi << 18) >> 16) + 4,
+ Registers::GetName(rj_id), Registers::GetName(rd_id));
+ break;
+ case op_bne:
+ rd_id = branch.extractRD();
+ rj_id = branch.extractRJ();
+ spew("bne 0x%x,%3s,%3s", (int32_t(immi << 18) >> 16) + 4,
+ Registers::GetName(rj_id), Registers::GetName(rd_id));
+ break;
+ case op_bge:
+ rd_id = branch.extractRD();
+ rj_id = branch.extractRJ();
+ spew("bge 0x%x,%3s,%3s", (int32_t(immi << 18) >> 16) + 4,
+ Registers::GetName(rj_id), Registers::GetName(rd_id));
+ break;
+ case op_bgeu:
+ rd_id = branch.extractRD();
+ rj_id = branch.extractRJ();
+ spew("bgeu 0x%x,%3s,%3s", (int32_t(immi << 18) >> 16) + 4,
+ Registers::GetName(rj_id), Registers::GetName(rd_id));
+ break;
+ case op_blt:
+ rd_id = branch.extractRD();
+ rj_id = branch.extractRJ();
+ spew("blt 0x%x,%3s,%3s", (int32_t(immi << 18) >> 16) + 4,
+ Registers::GetName(rj_id), Registers::GetName(rd_id));
+ break;
+ case op_bltu:
+ rd_id = branch.extractRD();
+ rj_id = branch.extractRJ();
+ spew("bltu 0x%x,%3s,%3s", (int32_t(immi << 18) >> 16) + 4,
+ Registers::GetName(rj_id), Registers::GetName(rd_id));
+ break;
+ case op_beqz:
+ rd_id = branch.extractRD();
+ rj_id = branch.extractRJ();
+ spew("beqz 0x%x,%3s,0x%x", (int32_t(immi << 18) >> 16) + 4,
+ Registers::GetName(rj_id), rd_id);
+ break;
+ case op_bnez:
+ rd_id = branch.extractRD();
+ rj_id = branch.extractRJ();
+ spew("bnez 0x%x,%3s,0x%x", (int32_t(immi << 18) >> 16) + 4,
+ Registers::GetName(rj_id), rd_id);
+ break;
+ case op_bcz:
+ rd_id = branch.extractRD();
+ rj_id = branch.extractRJ();
+ cj_id = branch.extractBitField(CJShift + CJBits - 1, CJShift);
+ if (rj_id & 0x8) {
+ spew("bcnez 0x%x,FCC%d,0x%x", (int32_t(immi << 18) >> 16) + 4, cj_id,
+ rd_id);
+ } else {
+ spew("bceqz 0x%x,FCC%d,0x%x", (int32_t(immi << 18) >> 16) + 4, cj_id,
+ rd_id);
+ }
+ break;
+ case op_jirl:
+ rd_id = branch.extractRD();
+ rj_id = branch.extractRJ();
+ spew("beqz 0x%x,%3s,%3s", (int32_t(immi << 18) >> 16) + 4,
+ Registers::GetName(rj_id), Registers::GetName(rd_id));
+ break;
+ default:
+ MOZ_CRASH("Error disassemble branch.");
+ }
+}
+#endif
+
+void Assembler::executableCopy(uint8_t* buffer) {
+ MOZ_ASSERT(isFinished);
+ m_buffer.executableCopy(buffer);
+}
+
+uintptr_t Assembler::GetPointer(uint8_t* instPtr) {
+ Instruction* inst = (Instruction*)instPtr;
+ return Assembler::ExtractLoad64Value(inst);
+}
+
+static JitCode* CodeFromJump(Instruction* jump) {
+ uint8_t* target = (uint8_t*)Assembler::ExtractLoad64Value(jump);
+ return JitCode::FromExecutable(target);
+}
+
+void Assembler::TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ while (reader.more()) {
+ JitCode* child =
+ CodeFromJump((Instruction*)(code->raw() + reader.readUnsigned()));
+ TraceManuallyBarrieredEdge(trc, &child, "rel32");
+ }
+}
+
+static void TraceOneDataRelocation(JSTracer* trc,
+ mozilla::Maybe<AutoWritableJitCode>& awjc,
+ JitCode* code, Instruction* inst) {
+ void* ptr = (void*)Assembler::ExtractLoad64Value(inst);
+ void* prior = ptr;
+
+ // Data relocations can be for Values or for raw pointers. If a Value is
+ // zero-tagged, we can trace it as if it were a raw pointer. If a Value
+ // is not zero-tagged, we have to interpret it as a Value to ensure that the
+ // tag bits are masked off to recover the actual pointer.
+ uintptr_t word = reinterpret_cast<uintptr_t>(ptr);
+ if (word >> JSVAL_TAG_SHIFT) {
+ // This relocation is a Value with a non-zero tag.
+ Value v = Value::fromRawBits(word);
+ TraceManuallyBarrieredEdge(trc, &v, "jit-masm-value");
+ ptr = (void*)v.bitsAsPunboxPointer();
+ } else {
+ // This relocation is a raw pointer or a Value with a zero tag.
+ // No barrier needed since these are constants.
+ TraceManuallyBarrieredGenericPointerEdge(
+ trc, reinterpret_cast<gc::Cell**>(&ptr), "jit-masm-ptr");
+ }
+
+ if (ptr != prior) {
+ if (awjc.isNothing()) {
+ awjc.emplace(code);
+ }
+ Assembler::UpdateLoad64Value(inst, uint64_t(ptr));
+ }
+}
+
+/* static */
+void Assembler::TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader) {
+ mozilla::Maybe<AutoWritableJitCode> awjc;
+ while (reader.more()) {
+ size_t offset = reader.readUnsigned();
+ Instruction* inst = (Instruction*)(code->raw() + offset);
+ TraceOneDataRelocation(trc, awjc, code, inst);
+ }
+}
+
+void Assembler::Bind(uint8_t* rawCode, const CodeLabel& label) {
+ if (label.patchAt().bound()) {
+ auto mode = label.linkMode();
+ intptr_t offset = label.patchAt().offset();
+ intptr_t target = label.target().offset();
+
+ if (mode == CodeLabel::RawPointer) {
+ *reinterpret_cast<const void**>(rawCode + offset) = rawCode + target;
+ } else {
+ MOZ_ASSERT(mode == CodeLabel::MoveImmediate ||
+ mode == CodeLabel::JumpImmediate);
+ Instruction* inst = (Instruction*)(rawCode + offset);
+ Assembler::UpdateLoad64Value(inst, (uint64_t)(rawCode + target));
+ }
+ }
+}
+
+void Assembler::bind(InstImm* inst, uintptr_t branch, uintptr_t target) {
+ int64_t offset = target - branch;
+ InstImm inst_jirl = InstImm(op_jirl, BOffImm16(0), zero, ra);
+ InstImm inst_beq = InstImm(op_beq, BOffImm16(0), zero, zero);
+
+ // If encoded offset is 4, then the jump must be short
+ if (BOffImm16(inst[0]).decode() == 4) {
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop(); // because before set INVALID_OFFSET
+ return;
+ }
+
+ // Generate the long jump for calls because return address has to be the
+ // address after the reserved block.
+ if (inst[0].encode() == inst_jirl.encode()) {
+ addLongJump(BufferOffset(branch), BufferOffset(target));
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister,
+ LabelBase::INVALID_OFFSET);
+ inst[3].makeNop(); // There are 1 nop.
+ inst[4] = InstImm(op_jirl, BOffImm16(0), ScratchRegister, ra);
+ return;
+ }
+
+ if (BOffImm16::IsInRange(offset)) {
+ // Skip trailing nops .
+ bool skipNops = (inst[0].encode() != inst_jirl.encode() &&
+ inst[0].encode() != inst_beq.encode());
+
+ inst[0].setBOffImm16(BOffImm16(offset));
+ inst[1].makeNop();
+
+ if (skipNops) {
+ inst[2] = InstImm(op_bge, BOffImm16(3 * sizeof(uint32_t)), zero, zero);
+ // There are 2 nops after this
+ }
+ return;
+ }
+
+ if (inst[0].encode() == inst_beq.encode()) {
+ // Handle long unconditional jump. Only four 4 instruction.
+ addLongJump(BufferOffset(branch), BufferOffset(target));
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister,
+ LabelBase::INVALID_OFFSET);
+ inst[3] = InstImm(op_jirl, BOffImm16(0), ScratchRegister, zero);
+ } else {
+ // Handle long conditional jump.
+ inst[0] = invertBranch(inst[0], BOffImm16(5 * sizeof(uint32_t)));
+ // No need for a "nop" here because we can clobber scratch.
+ addLongJump(BufferOffset(branch + sizeof(uint32_t)), BufferOffset(target));
+ Assembler::WriteLoad64Instructions(&inst[1], ScratchRegister,
+ LabelBase::INVALID_OFFSET);
+ inst[4] = InstImm(op_jirl, BOffImm16(0), ScratchRegister, zero);
+ }
+}
+
+void Assembler::processCodeLabels(uint8_t* rawCode) {
+ for (const CodeLabel& label : codeLabels_) {
+ Bind(rawCode, label);
+ }
+}
+
+uint32_t Assembler::PatchWrite_NearCallSize() {
+ // Load an address needs 3 instructions, and a jump.
+ return (3 + 1) * sizeof(uint32_t);
+}
+
+void Assembler::PatchWrite_NearCall(CodeLocationLabel start,
+ CodeLocationLabel toCall) {
+ Instruction* inst = (Instruction*)start.raw();
+ uint8_t* dest = toCall.raw();
+
+ // Overwrite whatever instruction used to be here with a call.
+ // Always use long jump for two reasons:
+ // - Jump has to be the same size because of PatchWrite_NearCallSize.
+ // - Return address has to be at the end of replaced block.
+ // Short jump wouldn't be more efficient.
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)dest);
+ inst[3] = InstImm(op_jirl, BOffImm16(0), ScratchRegister, ra);
+}
+
+uint64_t Assembler::ExtractLoad64Value(Instruction* inst0) {
+ InstImm* i0 = (InstImm*)inst0;
+ InstImm* i1 = (InstImm*)i0->next();
+ InstImm* i2 = (InstImm*)i1->next();
+ InstImm* i3 = (InstImm*)i2->next();
+
+ MOZ_ASSERT((i0->extractBitField(31, 25)) == ((uint32_t)op_lu12i_w >> 25));
+ MOZ_ASSERT((i1->extractBitField(31, 22)) == ((uint32_t)op_ori >> 22));
+ MOZ_ASSERT((i2->extractBitField(31, 25)) == ((uint32_t)op_lu32i_d >> 25));
+
+ if ((i3->extractBitField(31, 22)) == ((uint32_t)op_lu52i_d >> 22)) {
+ // Li64
+ uint64_t value =
+ (uint64_t(i0->extractBitField(Imm20Bits + Imm20Shift - 1, Imm20Shift))
+ << 12) |
+ (uint64_t(
+ i1->extractBitField(Imm12Bits + Imm12Shift - 1, Imm12Shift))) |
+ (uint64_t(i2->extractBitField(Imm20Bits + Imm20Shift - 1, Imm20Shift))
+ << 32) |
+ (uint64_t(i3->extractBitField(Imm12Bits + Imm12Shift - 1, Imm12Shift))
+ << 52);
+ return value;
+ } else {
+ // Li48
+ uint64_t value =
+ (uint64_t(i0->extractBitField(Imm20Bits + Imm20Shift - 1, Imm20Shift))
+ << 12) |
+ (uint64_t(
+ i1->extractBitField(Imm12Bits + Imm12Shift - 1, Imm12Shift))) |
+ (uint64_t(i2->extractBitField(Imm20Bits + Imm20Shift - 1, Imm20Shift))
+ << 32);
+
+ return uint64_t((int64_t(value) << 16) >> 16);
+ }
+}
+
+void Assembler::UpdateLoad64Value(Instruction* inst0, uint64_t value) {
+ // Todo: with ma_liPatchable
+ InstImm* i0 = (InstImm*)inst0;
+ InstImm* i1 = (InstImm*)i0->next();
+ InstImm* i2 = (InstImm*)i1->next();
+ InstImm* i3 = (InstImm*)i2->next();
+
+ MOZ_ASSERT((i0->extractBitField(31, 25)) == ((uint32_t)op_lu12i_w >> 25));
+ MOZ_ASSERT((i1->extractBitField(31, 22)) == ((uint32_t)op_ori >> 22));
+ MOZ_ASSERT((i2->extractBitField(31, 25)) == ((uint32_t)op_lu32i_d >> 25));
+
+ if ((i3->extractBitField(31, 22)) == ((uint32_t)op_lu52i_d >> 22)) {
+ // Li64
+ *i0 = InstImm(op_lu12i_w, (int32_t)((value >> 12) & 0xfffff),
+ Register::FromCode(i0->extractRD()), false);
+ *i1 = InstImm(op_ori, (int32_t)(value & 0xfff),
+ Register::FromCode(i1->extractRJ()),
+ Register::FromCode(i1->extractRD()), 12);
+ *i2 = InstImm(op_lu32i_d, (int32_t)((value >> 32) & 0xfffff),
+ Register::FromCode(i2->extractRD()), false);
+ *i3 = InstImm(op_lu52i_d, (int32_t)((value >> 52) & 0xfff),
+ Register::FromCode(i3->extractRJ()),
+ Register::FromCode(i3->extractRD()), 12);
+ } else {
+ // Li48
+ *i0 = InstImm(op_lu12i_w, (int32_t)((value >> 12) & 0xfffff),
+ Register::FromCode(i0->extractRD()), false);
+ *i1 = InstImm(op_ori, (int32_t)(value & 0xfff),
+ Register::FromCode(i1->extractRJ()),
+ Register::FromCode(i1->extractRD()), 12);
+ *i2 = InstImm(op_lu32i_d, (int32_t)((value >> 32) & 0xfffff),
+ Register::FromCode(i2->extractRD()), false);
+ }
+}
+
+void Assembler::WriteLoad64Instructions(Instruction* inst0, Register reg,
+ uint64_t value) {
+ Instruction* inst1 = inst0->next();
+ Instruction* inst2 = inst1->next();
+ *inst0 = InstImm(op_lu12i_w, (int32_t)((value >> 12) & 0xfffff), reg, false);
+ *inst1 = InstImm(op_ori, (int32_t)(value & 0xfff), reg, reg, 12);
+ *inst2 = InstImm(op_lu32i_d, (int32_t)((value >> 32) & 0xfffff), reg, false);
+}
+
+void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
+ ImmPtr newValue, ImmPtr expectedValue) {
+ PatchDataWithValueCheck(label, PatchedImmPtr(newValue.value),
+ PatchedImmPtr(expectedValue.value));
+}
+
+void Assembler::PatchDataWithValueCheck(CodeLocationLabel label,
+ PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue) {
+ Instruction* inst = (Instruction*)label.raw();
+
+ // Extract old Value
+ DebugOnly<uint64_t> value = Assembler::ExtractLoad64Value(inst);
+ MOZ_ASSERT(value == uint64_t(expectedValue.value));
+
+ // Replace with new value
+ Assembler::UpdateLoad64Value(inst, uint64_t(newValue.value));
+}
+
+uint64_t Assembler::ExtractInstructionImmediate(uint8_t* code) {
+ InstImm* inst = (InstImm*)code;
+ return Assembler::ExtractLoad64Value(inst);
+}
+
+void Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) {
+ Instruction* inst = (Instruction*)inst_.raw();
+ InstImm* i0 = (InstImm*)inst;
+ InstImm* i1 = (InstImm*)i0->next();
+ InstImm* i2 = (InstImm*)i1->next();
+ Instruction* i3 = (Instruction*)i2->next();
+
+ MOZ_ASSERT((i0->extractBitField(31, 25)) == ((uint32_t)op_lu12i_w >> 25));
+ MOZ_ASSERT((i1->extractBitField(31, 22)) == ((uint32_t)op_ori >> 22));
+ MOZ_ASSERT((i2->extractBitField(31, 25)) == ((uint32_t)op_lu32i_d >> 25));
+
+ if (enabled) {
+ MOZ_ASSERT((i3->extractBitField(31, 25)) != ((uint32_t)op_lu12i_w >> 25));
+ InstImm jirl = InstImm(op_jirl, BOffImm16(0), ScratchRegister, ra);
+ *i3 = jirl;
+ } else {
+ InstNOP nop;
+ *i3 = nop;
+ }
+}
diff --git a/js/src/jit/loong64/Assembler-loong64.h b/js/src/jit/loong64/Assembler-loong64.h
new file mode 100644
index 0000000000..4e0b8d6b66
--- /dev/null
+++ b/js/src/jit/loong64/Assembler-loong64.h
@@ -0,0 +1,1889 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_Assembler_loong64_h
+#define jit_loong64_Assembler_loong64_h
+
+#include "mozilla/Sprintf.h"
+#include <iterator>
+
+#include "jit/CompactBuffer.h"
+#include "jit/JitCode.h"
+#include "jit/JitSpewer.h"
+#include "jit/loong64/Architecture-loong64.h"
+#include "jit/shared/Assembler-shared.h"
+#include "jit/shared/Disassembler-shared.h"
+#include "jit/shared/IonAssemblerBuffer.h"
+#include "wasm/WasmTypeDecls.h"
+
+namespace js {
+namespace jit {
+
+static constexpr Register zero{Registers::zero};
+static constexpr Register ra{Registers::ra};
+static constexpr Register tp{Registers::tp};
+static constexpr Register sp{Registers::sp};
+static constexpr Register a0{Registers::a0};
+static constexpr Register a1{Registers::a1};
+static constexpr Register a2{Registers::a2};
+static constexpr Register a3{Registers::a3};
+static constexpr Register a4{Registers::a4};
+static constexpr Register a5{Registers::a5};
+static constexpr Register a6{Registers::a6};
+static constexpr Register a7{Registers::a7};
+static constexpr Register t0{Registers::t0};
+static constexpr Register t1{Registers::t1};
+static constexpr Register t2{Registers::t2};
+static constexpr Register t3{Registers::t3};
+static constexpr Register t4{Registers::t4};
+static constexpr Register t5{Registers::t5};
+static constexpr Register t6{Registers::t6};
+static constexpr Register t7{Registers::t7};
+static constexpr Register t8{Registers::t8};
+static constexpr Register rx{Registers::rx};
+static constexpr Register fp{Registers::fp};
+static constexpr Register s0{Registers::s0};
+static constexpr Register s1{Registers::s1};
+static constexpr Register s2{Registers::s2};
+static constexpr Register s3{Registers::s3};
+static constexpr Register s4{Registers::s4};
+static constexpr Register s5{Registers::s5};
+static constexpr Register s6{Registers::s6};
+static constexpr Register s7{Registers::s7};
+static constexpr Register s8{Registers::s8};
+
+static constexpr FloatRegister f0{FloatRegisters::f0, FloatRegisters::Double};
+static constexpr FloatRegister f1{FloatRegisters::f1, FloatRegisters::Double};
+static constexpr FloatRegister f2{FloatRegisters::f2, FloatRegisters::Double};
+static constexpr FloatRegister f3{FloatRegisters::f3, FloatRegisters::Double};
+static constexpr FloatRegister f4{FloatRegisters::f4, FloatRegisters::Double};
+static constexpr FloatRegister f5{FloatRegisters::f5, FloatRegisters::Double};
+static constexpr FloatRegister f6{FloatRegisters::f6, FloatRegisters::Double};
+static constexpr FloatRegister f7{FloatRegisters::f7, FloatRegisters::Double};
+static constexpr FloatRegister f8{FloatRegisters::f8, FloatRegisters::Double};
+static constexpr FloatRegister f9{FloatRegisters::f9, FloatRegisters::Double};
+static constexpr FloatRegister f10{FloatRegisters::f10, FloatRegisters::Double};
+static constexpr FloatRegister f11{FloatRegisters::f11, FloatRegisters::Double};
+static constexpr FloatRegister f12{FloatRegisters::f12, FloatRegisters::Double};
+static constexpr FloatRegister f13{FloatRegisters::f13, FloatRegisters::Double};
+static constexpr FloatRegister f14{FloatRegisters::f14, FloatRegisters::Double};
+static constexpr FloatRegister f15{FloatRegisters::f15, FloatRegisters::Double};
+static constexpr FloatRegister f16{FloatRegisters::f16, FloatRegisters::Double};
+static constexpr FloatRegister f17{FloatRegisters::f17, FloatRegisters::Double};
+static constexpr FloatRegister f18{FloatRegisters::f18, FloatRegisters::Double};
+static constexpr FloatRegister f19{FloatRegisters::f19, FloatRegisters::Double};
+static constexpr FloatRegister f20{FloatRegisters::f20, FloatRegisters::Double};
+static constexpr FloatRegister f21{FloatRegisters::f21, FloatRegisters::Double};
+static constexpr FloatRegister f22{FloatRegisters::f22, FloatRegisters::Double};
+static constexpr FloatRegister f23{FloatRegisters::f23, FloatRegisters::Double};
+static constexpr FloatRegister f24{FloatRegisters::f24, FloatRegisters::Double};
+static constexpr FloatRegister f25{FloatRegisters::f25, FloatRegisters::Double};
+static constexpr FloatRegister f26{FloatRegisters::f26, FloatRegisters::Double};
+static constexpr FloatRegister f27{FloatRegisters::f27, FloatRegisters::Double};
+static constexpr FloatRegister f28{FloatRegisters::f28, FloatRegisters::Double};
+static constexpr FloatRegister f29{FloatRegisters::f29, FloatRegisters::Double};
+static constexpr FloatRegister f30{FloatRegisters::f30, FloatRegisters::Double};
+static constexpr FloatRegister f31{FloatRegisters::f31, FloatRegisters::Double};
+
+static constexpr Register InvalidReg{Registers::Invalid};
+static constexpr FloatRegister InvalidFloatReg;
+
+static constexpr Register StackPointer = sp;
+static constexpr Register FramePointer = fp;
+static constexpr Register ReturnReg = a0;
+static constexpr Register64 ReturnReg64(ReturnReg);
+static constexpr FloatRegister ReturnFloat32Reg{FloatRegisters::f0,
+ FloatRegisters::Single};
+static constexpr FloatRegister ReturnDoubleReg = f0;
+static constexpr FloatRegister ReturnSimd128Reg = InvalidFloatReg;
+
+static constexpr Register ScratchRegister = t7;
+static constexpr Register SecondScratchReg = t8;
+
+// Helper classes for ScratchRegister usage. Asserts that only one piece
+// of code thinks it has exclusive ownership of each scratch register.
+struct ScratchRegisterScope : public AutoRegisterScope {
+ explicit ScratchRegisterScope(MacroAssembler& masm)
+ : AutoRegisterScope(masm, ScratchRegister) {}
+};
+
+struct SecondScratchRegisterScope : public AutoRegisterScope {
+ explicit SecondScratchRegisterScope(MacroAssembler& masm)
+ : AutoRegisterScope(masm, SecondScratchReg) {}
+};
+
+static constexpr FloatRegister ScratchFloat32Reg{FloatRegisters::f23,
+ FloatRegisters::Single};
+static constexpr FloatRegister ScratchDoubleReg = f23;
+static constexpr FloatRegister ScratchSimd128Reg = InvalidFloatReg;
+
+struct ScratchFloat32Scope : public AutoFloatRegisterScope {
+ explicit ScratchFloat32Scope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchFloat32Reg) {}
+};
+
+struct ScratchDoubleScope : public AutoFloatRegisterScope {
+ explicit ScratchDoubleScope(MacroAssembler& masm)
+ : AutoFloatRegisterScope(masm, ScratchDoubleReg) {}
+};
+
+// Use arg reg from EnterJIT function as OsrFrameReg.
+static constexpr Register OsrFrameReg = a3;
+static constexpr Register PreBarrierReg = a1;
+static constexpr Register InterpreterPCReg = t0;
+static constexpr Register CallTempReg0 = t0;
+static constexpr Register CallTempReg1 = t1;
+static constexpr Register CallTempReg2 = t2;
+static constexpr Register CallTempReg3 = t3;
+static constexpr Register CallTempReg4 = t4;
+static constexpr Register CallTempReg5 = t5;
+static constexpr Register CallTempNonArgRegs[] = {t0, t1, t2, t3};
+static const uint32_t NumCallTempNonArgRegs = std::size(CallTempNonArgRegs);
+
+static constexpr Register IntArgReg0 = a0;
+static constexpr Register IntArgReg1 = a1;
+static constexpr Register IntArgReg2 = a2;
+static constexpr Register IntArgReg3 = a3;
+static constexpr Register IntArgReg4 = a4;
+static constexpr Register IntArgReg5 = a5;
+static constexpr Register IntArgReg6 = a6;
+static constexpr Register IntArgReg7 = a7;
+static constexpr Register HeapReg = s7;
+
+// Registers used by RegExpMatcher and RegExpExecMatch stubs (do not use
+// JSReturnOperand).
+static constexpr Register RegExpMatcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpMatcherStringReg = CallTempReg1;
+static constexpr Register RegExpMatcherLastIndexReg = CallTempReg2;
+
+// Registers used by RegExpExecTest stub (do not use ReturnReg).
+static constexpr Register RegExpExecTestRegExpReg = CallTempReg0;
+static constexpr Register RegExpExecTestStringReg = CallTempReg1;
+
+// Registers used by RegExpSearcher stub (do not use ReturnReg).
+static constexpr Register RegExpSearcherRegExpReg = CallTempReg0;
+static constexpr Register RegExpSearcherStringReg = CallTempReg1;
+static constexpr Register RegExpSearcherLastIndexReg = CallTempReg2;
+
+static constexpr Register JSReturnReg_Type = a3;
+static constexpr Register JSReturnReg_Data = a2;
+static constexpr Register JSReturnReg = a2;
+static constexpr ValueOperand JSReturnOperand = ValueOperand(JSReturnReg);
+
+// These registers may be volatile or nonvolatile.
+static constexpr Register ABINonArgReg0 = t0;
+static constexpr Register ABINonArgReg1 = t1;
+static constexpr Register ABINonArgReg2 = t2;
+static constexpr Register ABINonArgReg3 = t3;
+
+// These registers may be volatile or nonvolatile.
+// Note: these three registers are all guaranteed to be different
+static constexpr Register ABINonArgReturnReg0 = t0;
+static constexpr Register ABINonArgReturnReg1 = t1;
+static constexpr Register ABINonVolatileReg = s0;
+
+// This register is guaranteed to be clobberable during the prologue and
+// epilogue of an ABI call which must preserve both ABI argument, return
+// and non-volatile registers.
+static constexpr Register ABINonArgReturnVolatileReg = ra;
+
+// This register may be volatile or nonvolatile.
+// Avoid f23 which is the scratch register.
+static constexpr FloatRegister ABINonArgDoubleReg{FloatRegisters::f21,
+ FloatRegisters::Double};
+
+// Instance pointer argument register for WebAssembly functions. This must not
+// alias any other register used for passing function arguments or return
+// values. Preserved by WebAssembly functions. Must be nonvolatile.
+static constexpr Register InstanceReg = s4;
+
+// Registers used for wasm table calls. These registers must be disjoint
+// from the ABI argument registers, InstanceReg and each other.
+static constexpr Register WasmTableCallScratchReg0 = ABINonArgReg0;
+static constexpr Register WasmTableCallScratchReg1 = ABINonArgReg1;
+static constexpr Register WasmTableCallSigReg = ABINonArgReg2;
+static constexpr Register WasmTableCallIndexReg = ABINonArgReg3;
+
+// Registers used for ref calls.
+static constexpr Register WasmCallRefCallScratchReg0 = ABINonArgReg0;
+static constexpr Register WasmCallRefCallScratchReg1 = ABINonArgReg1;
+static constexpr Register WasmCallRefReg = ABINonArgReg3;
+
+// Registers used for wasm tail calls operations.
+static constexpr Register WasmTailCallInstanceScratchReg = ABINonArgReg1;
+static constexpr Register WasmTailCallRAScratchReg = ra;
+static constexpr Register WasmTailCallFPScratchReg = ABINonArgReg3;
+
+// Register used as a scratch along the return path in the fast js -> wasm stub
+// code. This must not overlap ReturnReg, JSReturnOperand, or InstanceReg.
+// It must be a volatile register.
+static constexpr Register WasmJitEntryReturnScratch = t1;
+
+static constexpr uint32_t ABIStackAlignment = 16;
+static constexpr uint32_t CodeAlignment = 16;
+static constexpr uint32_t JitStackAlignment = 16;
+
+static constexpr uint32_t JitStackValueAlignment =
+ JitStackAlignment / sizeof(Value);
+static_assert(JitStackAlignment % sizeof(Value) == 0 &&
+ JitStackValueAlignment >= 1,
+ "Stack alignment should be a non-zero multiple of sizeof(Value)");
+
+// TODO(loong64): this is just a filler to prevent a build failure. The
+// LoongArch SIMD alignment requirements still need to be explored.
+static constexpr uint32_t SimdMemoryAlignment = 16;
+
+static_assert(CodeAlignment % SimdMemoryAlignment == 0,
+ "Code alignment should be larger than any of the alignments "
+ "which are used for "
+ "the constant sections of the code buffer. Thus it should be "
+ "larger than the "
+ "alignment for SIMD constants.");
+
+static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment;
+static const uint32_t WasmTrapInstructionLength = 4;
+
+// See comments in wasm::GenerateFunctionPrologue. The difference between these
+// is the size of the largest callable prologue on the platform.
+static constexpr uint32_t WasmCheckedCallEntryOffset = 0u;
+
+static constexpr Scale ScalePointer = TimesEight;
+
+// TODO(loong64): Add LoongArch instruction types description.
+
+// LoongArch instruction encoding constants.
+static const uint32_t RJShift = 5;
+static const uint32_t RJBits = 5;
+static const uint32_t RKShift = 10;
+static const uint32_t RKBits = 5;
+static const uint32_t RDShift = 0;
+static const uint32_t RDBits = 5;
+static const uint32_t FJShift = 5;
+static const uint32_t FJBits = 5;
+static const uint32_t FKShift = 10;
+static const uint32_t FKBits = 5;
+static const uint32_t FDShift = 0;
+static const uint32_t FDBits = 5;
+static const uint32_t FAShift = 15;
+static const uint32_t FABits = 5;
+static const uint32_t CJShift = 5;
+static const uint32_t CJBits = 3;
+static const uint32_t CDShift = 0;
+static const uint32_t CDBits = 3;
+static const uint32_t CAShift = 15;
+static const uint32_t CABits = 3;
+static const uint32_t CONDShift = 15;
+static const uint32_t CONDBits = 5;
+
+static const uint32_t SAShift = 15;
+static const uint32_t SA2Bits = 2;
+static const uint32_t SA3Bits = 3;
+static const uint32_t LSBWShift = 10;
+static const uint32_t LSBWBits = 5;
+static const uint32_t LSBDShift = 10;
+static const uint32_t LSBDBits = 6;
+static const uint32_t MSBWShift = 16;
+static const uint32_t MSBWBits = 5;
+static const uint32_t MSBDShift = 16;
+static const uint32_t MSBDBits = 6;
+static const uint32_t Imm5Shift = 10;
+static const uint32_t Imm5Bits = 5;
+static const uint32_t Imm6Shift = 10;
+static const uint32_t Imm6Bits = 6;
+static const uint32_t Imm12Shift = 10;
+static const uint32_t Imm12Bits = 12;
+static const uint32_t Imm14Shift = 10;
+static const uint32_t Imm14Bits = 14;
+static const uint32_t Imm15Shift = 0;
+static const uint32_t Imm15Bits = 15;
+static const uint32_t Imm16Shift = 10;
+static const uint32_t Imm16Bits = 16;
+static const uint32_t Imm20Shift = 5;
+static const uint32_t Imm20Bits = 20;
+static const uint32_t Imm21Shift = 0;
+static const uint32_t Imm21Bits = 21;
+static const uint32_t Imm26Shift = 0;
+static const uint32_t Imm26Bits = 26;
+static const uint32_t CODEShift = 0;
+static const uint32_t CODEBits = 15;
+
+// LoongArch instruction field bit masks.
+static const uint32_t RJMask = (1 << RJBits) - 1;
+static const uint32_t RKMask = (1 << RKBits) - 1;
+static const uint32_t RDMask = (1 << RDBits) - 1;
+static const uint32_t SA2Mask = (1 << SA2Bits) - 1;
+static const uint32_t SA3Mask = (1 << SA3Bits) - 1;
+static const uint32_t CONDMask = (1 << CONDBits) - 1;
+static const uint32_t LSBWMask = (1 << LSBWBits) - 1;
+static const uint32_t LSBDMask = (1 << LSBDBits) - 1;
+static const uint32_t MSBWMask = (1 << MSBWBits) - 1;
+static const uint32_t MSBDMask = (1 << MSBDBits) - 1;
+static const uint32_t CODEMask = (1 << CODEBits) - 1;
+static const uint32_t Imm5Mask = (1 << Imm5Bits) - 1;
+static const uint32_t Imm6Mask = (1 << Imm6Bits) - 1;
+static const uint32_t Imm12Mask = (1 << Imm12Bits) - 1;
+static const uint32_t Imm14Mask = (1 << Imm14Bits) - 1;
+static const uint32_t Imm15Mask = (1 << Imm15Bits) - 1;
+static const uint32_t Imm16Mask = (1 << Imm16Bits) - 1;
+static const uint32_t Imm20Mask = (1 << Imm20Bits) - 1;
+static const uint32_t Imm21Mask = (1 << Imm21Bits) - 1;
+static const uint32_t Imm26Mask = (1 << Imm26Bits) - 1;
+static const uint32_t BOffImm16Mask = ((1 << Imm16Bits) - 1) << Imm16Shift;
+static const uint32_t BOffImm21Mask = ((1 << Imm21Bits) - 1) << Imm21Shift;
+static const uint32_t BOffImm26Mask = ((1 << Imm26Bits) - 1) << Imm26Shift;
+static const uint32_t RegMask = Registers::Total - 1;
+
+// TODO(loong64) Change to syscall?
+static const uint32_t MAX_BREAK_CODE = 1024 - 1;
+static const uint32_t WASM_TRAP = 6; // BRK_OVERFLOW
+
+// TODO(loong64) Change to LoongArch instruction type.
+class Instruction;
+class InstReg;
+class InstImm;
+class InstJump;
+
+uint32_t RJ(Register r);
+uint32_t RK(Register r);
+uint32_t RD(Register r);
+uint32_t FJ(FloatRegister r);
+uint32_t FK(FloatRegister r);
+uint32_t FD(FloatRegister r);
+uint32_t FA(FloatRegister r);
+uint32_t SA2(uint32_t value);
+uint32_t SA2(FloatRegister r);
+uint32_t SA3(uint32_t value);
+uint32_t SA3(FloatRegister r);
+
+Register toRK(Instruction& i);
+Register toRJ(Instruction& i);
+Register toRD(Instruction& i);
+Register toR(Instruction& i);
+
+// LoongArch enums for instruction fields
+enum OpcodeField {
+ op_beqz = 0x10U << 26,
+ op_bnez = 0x11U << 26,
+ op_bcz = 0x12U << 26, // bceqz & bcnez
+ op_jirl = 0x13U << 26,
+ op_b = 0x14U << 26,
+ op_bl = 0x15U << 26,
+ op_beq = 0x16U << 26,
+ op_bne = 0x17U << 26,
+ op_blt = 0x18U << 26,
+ op_bge = 0x19U << 26,
+ op_bltu = 0x1aU << 26,
+ op_bgeu = 0x1bU << 26,
+
+ op_addu16i_d = 0x4U << 26,
+
+ op_lu12i_w = 0xaU << 25,
+ op_lu32i_d = 0xbU << 25,
+ op_pcaddi = 0xcU << 25,
+ op_pcalau12i = 0xdU << 25,
+ op_pcaddu12i = 0xeU << 25,
+ op_pcaddu18i = 0xfU << 25,
+ op_ll_w = 0x20U << 24,
+ op_sc_w = 0x21U << 24,
+ op_ll_d = 0x22U << 24,
+ op_sc_d = 0x23U << 24,
+ op_ldptr_w = 0x24U << 24,
+ op_stptr_w = 0x25U << 24,
+ op_ldptr_d = 0x26U << 24,
+ op_stptr_d = 0x27U << 24,
+ op_bstrins_d = 0x2U << 22,
+ op_bstrpick_d = 0x3U << 22,
+ op_slti = 0x8U << 22,
+ op_sltui = 0x9U << 22,
+ op_addi_w = 0xaU << 22,
+ op_addi_d = 0xbU << 22,
+ op_lu52i_d = 0xcU << 22,
+ op_andi = 0xdU << 22,
+ op_ori = 0xeU << 22,
+ op_xori = 0xfU << 22,
+ op_ld_b = 0xa0U << 22,
+ op_ld_h = 0xa1U << 22,
+ op_ld_w = 0xa2U << 22,
+ op_ld_d = 0xa3U << 22,
+ op_st_b = 0xa4U << 22,
+ op_st_h = 0xa5U << 22,
+ op_st_w = 0xa6U << 22,
+ op_st_d = 0xa7U << 22,
+ op_ld_bu = 0xa8U << 22,
+ op_ld_hu = 0xa9U << 22,
+ op_ld_wu = 0xaaU << 22,
+ op_preld = 0xabU << 22,
+ op_fld_s = 0xacU << 22,
+ op_fst_s = 0xadU << 22,
+ op_fld_d = 0xaeU << 22,
+ op_fst_d = 0xafU << 22,
+ op_bstr_w = 0x3U << 21, // BSTRINS_W & BSTRPICK_W
+ op_fmadd_s = 0x81U << 20,
+ op_fmadd_d = 0x82U << 20,
+ op_fmsub_s = 0x85U << 20,
+ op_fmsub_d = 0x86U << 20,
+ op_fnmadd_s = 0x89U << 20,
+ op_fnmadd_d = 0x8aU << 20,
+ op_fnmsub_s = 0x8dU << 20,
+ op_fnmsub_d = 0x8eU << 20,
+ op_fcmp_cond_s = 0xc1U << 20,
+ op_fcmp_cond_d = 0xc2U << 20,
+
+ op_bytepick_d = 0x3U << 18,
+ op_fsel = 0x340U << 18,
+
+ op_bytepick_w = 0x4U << 17,
+ op_alsl_w = 0x2U << 17,
+ op_alsl_wu = 0x3U << 17,
+ op_alsl_d = 0x16U << 17,
+
+ op_slli_d = 0x41U << 16,
+ op_srli_d = 0x45U << 16,
+ op_srai_d = 0x49U << 16,
+
+ op_slli_w = 0x81U << 15,
+ op_srli_w = 0x89U << 15,
+ op_srai_w = 0x91U << 15,
+ op_add_w = 0x20U << 15,
+ op_add_d = 0x21U << 15,
+ op_sub_w = 0x22U << 15,
+ op_sub_d = 0x23U << 15,
+ op_slt = 0x24U << 15,
+ op_sltu = 0x25U << 15,
+ op_maskeqz = 0x26U << 15,
+ op_masknez = 0x27U << 15,
+ op_nor = 0x28U << 15,
+ op_and = 0x29U << 15,
+ op_or = 0x2aU << 15,
+ op_xor = 0x2bU << 15,
+ op_orn = 0x2cU << 15,
+ op_andn = 0x2dU << 15,
+ op_sll_w = 0x2eU << 15,
+ op_srl_w = 0x2fU << 15,
+ op_sra_w = 0x30U << 15,
+ op_sll_d = 0x31U << 15,
+ op_srl_d = 0x32U << 15,
+ op_sra_d = 0x33U << 15,
+ op_rotr_w = 0x36U << 15,
+ op_rotr_d = 0x37U << 15,
+ op_rotri_w = 0x99U << 15,
+ op_rotri_d = 0x4DU << 16,
+ op_mul_w = 0x38U << 15,
+ op_mulh_w = 0x39U << 15,
+ op_mulh_wu = 0x3aU << 15,
+ op_mul_d = 0x3bU << 15,
+ op_mulh_d = 0x3cU << 15,
+ op_mulh_du = 0x3dU << 15,
+ op_mulw_d_w = 0x3eU << 15,
+ op_mulw_d_wu = 0x3fU << 15,
+ op_div_w = 0x40U << 15,
+ op_mod_w = 0x41U << 15,
+ op_div_wu = 0x42U << 15,
+ op_mod_wu = 0x43U << 15,
+ op_div_d = 0x44U << 15,
+ op_mod_d = 0x45U << 15,
+ op_div_du = 0x46U << 15,
+ op_mod_du = 0x47U << 15,
+ op_break = 0x54U << 15,
+ op_syscall = 0x56U << 15,
+ op_fadd_s = 0x201U << 15,
+ op_fadd_d = 0x202U << 15,
+ op_fsub_s = 0x205U << 15,
+ op_fsub_d = 0x206U << 15,
+ op_fmul_s = 0x209U << 15,
+ op_fmul_d = 0x20aU << 15,
+ op_fdiv_s = 0x20dU << 15,
+ op_fdiv_d = 0x20eU << 15,
+ op_fmax_s = 0x211U << 15,
+ op_fmax_d = 0x212U << 15,
+ op_fmin_s = 0x215U << 15,
+ op_fmin_d = 0x216U << 15,
+ op_fmaxa_s = 0x219U << 15,
+ op_fmaxa_d = 0x21aU << 15,
+ op_fmina_s = 0x21dU << 15,
+ op_fmina_d = 0x21eU << 15,
+ op_fcopysign_s = 0x225U << 15,
+ op_fcopysign_d = 0x226U << 15,
+ op_ldx_b = 0x7000U << 15,
+ op_ldx_h = 0x7008U << 15,
+ op_ldx_w = 0x7010U << 15,
+ op_ldx_d = 0x7018U << 15,
+ op_stx_b = 0x7020U << 15,
+ op_stx_h = 0x7028U << 15,
+ op_stx_w = 0x7030U << 15,
+ op_stx_d = 0x7038U << 15,
+ op_ldx_bu = 0x7040U << 15,
+ op_ldx_hu = 0x7048U << 15,
+ op_ldx_wu = 0x7050U << 15,
+ op_fldx_s = 0x7060U << 15,
+ op_fldx_d = 0x7068U << 15,
+ op_fstx_s = 0x7070U << 15,
+ op_fstx_d = 0x7078U << 15,
+ op_amswap_w = 0x70c0U << 15,
+ op_amswap_d = 0x70c1U << 15,
+ op_amadd_w = 0x70c2U << 15,
+ op_amadd_d = 0x70c3U << 15,
+ op_amand_w = 0x70c4U << 15,
+ op_amand_d = 0x70c5U << 15,
+ op_amor_w = 0x70c6U << 15,
+ op_amor_d = 0x70c7U << 15,
+ op_amxor_w = 0x70c8U << 15,
+ op_amxor_d = 0x70c9U << 15,
+ op_ammax_w = 0x70caU << 15,
+ op_ammax_d = 0x70cbU << 15,
+ op_ammin_w = 0x70ccU << 15,
+ op_ammin_d = 0x70cdU << 15,
+ op_ammax_wu = 0x70ceU << 15,
+ op_ammax_du = 0x70cfU << 15,
+ op_ammin_wu = 0x70d0U << 15,
+ op_ammin_du = 0x70d1U << 15,
+ op_amswap_db_w = 0x70d2U << 15,
+ op_amswap_db_d = 0x70d3U << 15,
+ op_amadd_db_w = 0x70d4U << 15,
+ op_amadd_db_d = 0x70d5U << 15,
+ op_amand_db_w = 0x70d6U << 15,
+ op_amand_db_d = 0x70d7U << 15,
+ op_amor_db_w = 0x70d8U << 15,
+ op_amor_db_d = 0x70d9U << 15,
+ op_amxor_db_w = 0x70daU << 15,
+ op_amxor_db_d = 0x70dbU << 15,
+ op_ammax_db_w = 0x70dcU << 15,
+ op_ammax_db_d = 0x70ddU << 15,
+ op_ammin_db_w = 0x70deU << 15,
+ op_ammin_db_d = 0x70dfU << 15,
+ op_ammax_db_wu = 0x70e0U << 15,
+ op_ammax_db_du = 0x70e1U << 15,
+ op_ammin_db_wu = 0x70e2U << 15,
+ op_ammin_db_du = 0x70e3U << 15,
+ op_dbar = 0x70e4U << 15,
+ op_ibar = 0x70e5U << 15,
+ op_clo_w = 0x4U << 10,
+ op_clz_w = 0x5U << 10,
+ op_cto_w = 0x6U << 10,
+ op_ctz_w = 0x7U << 10,
+ op_clo_d = 0x8U << 10,
+ op_clz_d = 0x9U << 10,
+ op_cto_d = 0xaU << 10,
+ op_ctz_d = 0xbU << 10,
+ op_revb_2h = 0xcU << 10,
+ op_revb_4h = 0xdU << 10,
+ op_revb_2w = 0xeU << 10,
+ op_revb_d = 0xfU << 10,
+ op_revh_2w = 0x10U << 10,
+ op_revh_d = 0x11U << 10,
+ op_bitrev_4b = 0x12U << 10,
+ op_bitrev_8b = 0x13U << 10,
+ op_bitrev_w = 0x14U << 10,
+ op_bitrev_d = 0x15U << 10,
+ op_ext_w_h = 0x16U << 10,
+ op_ext_w_b = 0x17U << 10,
+ op_fabs_s = 0x4501U << 10,
+ op_fabs_d = 0x4502U << 10,
+ op_fneg_s = 0x4505U << 10,
+ op_fneg_d = 0x4506U << 10,
+ op_fsqrt_s = 0x4511U << 10,
+ op_fsqrt_d = 0x4512U << 10,
+ op_fmov_s = 0x4525U << 10,
+ op_fmov_d = 0x4526U << 10,
+ op_movgr2fr_w = 0x4529U << 10,
+ op_movgr2fr_d = 0x452aU << 10,
+ op_movgr2frh_w = 0x452bU << 10,
+ op_movfr2gr_s = 0x452dU << 10,
+ op_movfr2gr_d = 0x452eU << 10,
+ op_movfrh2gr_s = 0x452fU << 10,
+ op_movgr2fcsr = 0x4530U << 10,
+ op_movfcsr2gr = 0x4532U << 10,
+ op_movfr2cf = 0x4534U << 10,
+ op_movgr2cf = 0x4536U << 10,
+ op_fcvt_s_d = 0x4646U << 10,
+ op_fcvt_d_s = 0x4649U << 10,
+ op_ftintrm_w_s = 0x4681U << 10,
+ op_ftintrm_w_d = 0x4682U << 10,
+ op_ftintrm_l_s = 0x4689U << 10,
+ op_ftintrm_l_d = 0x468aU << 10,
+ op_ftintrp_w_s = 0x4691U << 10,
+ op_ftintrp_w_d = 0x4692U << 10,
+ op_ftintrp_l_s = 0x4699U << 10,
+ op_ftintrp_l_d = 0x469aU << 10,
+ op_ftintrz_w_s = 0x46a1U << 10,
+ op_ftintrz_w_d = 0x46a2U << 10,
+ op_ftintrz_l_s = 0x46a9U << 10,
+ op_ftintrz_l_d = 0x46aaU << 10,
+ op_ftintrne_w_s = 0x46b1U << 10,
+ op_ftintrne_w_d = 0x46b2U << 10,
+ op_ftintrne_l_s = 0x46b9U << 10,
+ op_ftintrne_l_d = 0x46baU << 10,
+ op_ftint_w_s = 0x46c1U << 10,
+ op_ftint_w_d = 0x46c2U << 10,
+ op_ftint_l_s = 0x46c9U << 10,
+ op_ftint_l_d = 0x46caU << 10,
+ op_ffint_s_w = 0x4744U << 10,
+ op_ffint_s_l = 0x4746U << 10,
+ op_ffint_d_w = 0x4748U << 10,
+ op_ffint_d_l = 0x474aU << 10,
+ op_frint_s = 0x4791U << 10,
+ op_frint_d = 0x4792U << 10,
+ op_movcf2fr = 0x114d4U << 8,
+ op_movcf2gr = 0x114dcU << 8,
+};
+
+class Operand;
+
+// A BOffImm16 is a 16 bit immediate that is used for branches.
+class BOffImm16 {
+ uint32_t data;
+
+ public:
+ uint32_t encode() {
+ MOZ_ASSERT(!isInvalid());
+ return data;
+ }
+ int32_t decode() {
+ MOZ_ASSERT(!isInvalid());
+ return (int32_t(data << 18) >> 16);
+ }
+
+ explicit BOffImm16(int offset) : data((offset) >> 2 & Imm16Mask) {
+ MOZ_ASSERT((offset & 0x3) == 0);
+ MOZ_ASSERT(IsInRange(offset));
+ }
+ static bool IsInRange(int offset) {
+ if ((offset) < int(unsigned(INT16_MIN) << 2)) {
+ return false;
+ }
+ if ((offset) > (INT16_MAX << 2)) {
+ return false;
+ }
+ return true;
+ }
+ static const uint32_t INVALID = 0x00020000;
+ BOffImm16() : data(INVALID) {}
+
+ bool isInvalid() { return data == INVALID; }
+ Instruction* getDest(Instruction* src) const;
+
+ BOffImm16(InstImm inst);
+};
+
+// A JOffImm26 is a 26 bit immediate that is used for unconditional jumps.
+class JOffImm26 {
+ uint32_t data;
+
+ public:
+ uint32_t encode() {
+ MOZ_ASSERT(!isInvalid());
+ return data;
+ }
+ int32_t decode() {
+ MOZ_ASSERT(!isInvalid());
+ return (int32_t(data << 8) >> 6);
+ }
+
+ explicit JOffImm26(int offset) : data((offset) >> 2 & Imm26Mask) {
+ MOZ_ASSERT((offset & 0x3) == 0);
+ MOZ_ASSERT(IsInRange(offset));
+ }
+ static bool IsInRange(int offset) {
+ if ((offset) < -536870912) {
+ return false;
+ }
+ if ((offset) > 536870908) {
+ return false;
+ }
+ return true;
+ }
+ static const uint32_t INVALID = 0x20000000;
+ JOffImm26() : data(INVALID) {}
+
+ bool isInvalid() { return data == INVALID; }
+ Instruction* getDest(Instruction* src);
+};
+
+class Imm16 {
+ uint16_t value;
+
+ public:
+ Imm16();
+ Imm16(uint32_t imm) : value(imm) {}
+ uint32_t encode() { return value; }
+ int32_t decodeSigned() { return value; }
+ uint32_t decodeUnsigned() { return value; }
+
+ static bool IsInSignedRange(int32_t imm) {
+ return imm >= INT16_MIN && imm <= INT16_MAX;
+ }
+
+ static bool IsInUnsignedRange(uint32_t imm) { return imm <= UINT16_MAX; }
+};
+
+class Imm8 {
+ uint8_t value;
+
+ public:
+ Imm8();
+ Imm8(uint32_t imm) : value(imm) {}
+ uint32_t encode(uint32_t shift) { return value << shift; }
+ int32_t decodeSigned() { return value; }
+ uint32_t decodeUnsigned() { return value; }
+ static bool IsInSignedRange(int32_t imm) {
+ return imm >= INT8_MIN && imm <= INT8_MAX;
+ }
+ static bool IsInUnsignedRange(uint32_t imm) { return imm <= UINT8_MAX; }
+ static Imm8 Lower(Imm16 imm) { return Imm8(imm.decodeSigned() & 0xff); }
+ static Imm8 Upper(Imm16 imm) {
+ return Imm8((imm.decodeSigned() >> 8) & 0xff);
+ }
+};
+
+class Operand {
+ public:
+ enum Tag { REG, FREG, MEM };
+
+ private:
+ Tag tag : 3;
+ uint32_t reg : 5;
+ int32_t offset;
+
+ public:
+ Operand(Register reg_) : tag(REG), reg(reg_.code()) {}
+
+ Operand(FloatRegister freg) : tag(FREG), reg(freg.code()) {}
+
+ Operand(Register base, Imm32 off)
+ : tag(MEM), reg(base.code()), offset(off.value) {}
+
+ Operand(Register base, int32_t off)
+ : tag(MEM), reg(base.code()), offset(off) {}
+
+ Operand(const Address& addr)
+ : tag(MEM), reg(addr.base.code()), offset(addr.offset) {}
+
+ Tag getTag() const { return tag; }
+
+ Register toReg() const {
+ MOZ_ASSERT(tag == REG);
+ return Register::FromCode(reg);
+ }
+
+ FloatRegister toFReg() const {
+ MOZ_ASSERT(tag == FREG);
+ return FloatRegister::FromCode(reg);
+ }
+
+ void toAddr(Register* r, Imm32* dest) const {
+ MOZ_ASSERT(tag == MEM);
+ *r = Register::FromCode(reg);
+ *dest = Imm32(offset);
+ }
+ Address toAddress() const {
+ MOZ_ASSERT(tag == MEM);
+ return Address(Register::FromCode(reg), offset);
+ }
+ int32_t disp() const {
+ MOZ_ASSERT(tag == MEM);
+ return offset;
+ }
+
+ int32_t base() const {
+ MOZ_ASSERT(tag == MEM);
+ return reg;
+ }
+ Register baseReg() const {
+ MOZ_ASSERT(tag == MEM);
+ return Register::FromCode(reg);
+ }
+};
+
+// int check.
+inline bool is_intN(int32_t x, unsigned n) {
+ MOZ_ASSERT((0 < n) && (n < 64));
+ int32_t limit = static_cast<int32_t>(1) << (n - 1);
+ return (-limit <= x) && (x < limit);
+}
+
+inline bool is_uintN(int32_t x, unsigned n) {
+ MOZ_ASSERT((0 < n) && (n < (sizeof(x) * 8)));
+ return !(x >> n);
+}
+
+inline Imm32 Imm64::firstHalf() const { return low(); }
+
+inline Imm32 Imm64::secondHalf() const { return hi(); }
+
+static constexpr int32_t SliceSize = 1024;
+typedef js::jit::AssemblerBuffer<SliceSize, Instruction> LOONGBuffer;
+
+class LOONGBufferWithExecutableCopy : public LOONGBuffer {
+ public:
+ void executableCopy(uint8_t* buffer) {
+ if (this->oom()) {
+ return;
+ }
+
+ for (Slice* cur = head; cur != nullptr; cur = cur->getNext()) {
+ memcpy(buffer, &cur->instructions, cur->length());
+ buffer += cur->length();
+ }
+ }
+
+ bool appendRawCode(const uint8_t* code, size_t numBytes) {
+ if (this->oom()) {
+ return false;
+ }
+ while (numBytes > SliceSize) {
+ this->putBytes(SliceSize, code);
+ numBytes -= SliceSize;
+ code += SliceSize;
+ }
+ this->putBytes(numBytes, code);
+ return !this->oom();
+ }
+};
+
+class AssemblerLOONG64 : public AssemblerShared {
+ public:
+ // TODO(loong64): Should we remove these conditions here?
+ enum Condition {
+ Equal,
+ NotEqual,
+ Above,
+ AboveOrEqual,
+ Below,
+ BelowOrEqual,
+ GreaterThan,
+ GreaterThanOrEqual,
+ GreaterThanOrEqual_Signed,
+ GreaterThanOrEqual_NotSigned,
+ LessThan,
+ LessThan_Signed,
+ LessThan_NotSigned,
+ LessThanOrEqual,
+ Overflow,
+ CarrySet,
+ CarryClear,
+ Signed,
+ NotSigned,
+ Zero,
+ NonZero,
+ Always,
+ };
+
+ enum DoubleCondition {
+ DoubleOrdered,
+ DoubleEqual,
+ DoubleNotEqual,
+ DoubleGreaterThan,
+ DoubleGreaterThanOrEqual,
+ DoubleLessThan,
+ DoubleLessThanOrEqual,
+ DoubleUnordered,
+ DoubleEqualOrUnordered,
+ DoubleNotEqualOrUnordered,
+ DoubleGreaterThanOrUnordered,
+ DoubleGreaterThanOrEqualOrUnordered,
+ DoubleLessThanOrUnordered,
+ DoubleLessThanOrEqualOrUnordered
+ };
+
+ enum FPUCondition {
+ kNoFPUCondition = -1,
+
+ CAF = 0x00,
+ SAF = 0x01,
+ CLT = 0x02,
+ SLT = 0x03,
+ CEQ = 0x04,
+ SEQ = 0x05,
+ CLE = 0x06,
+ SLE = 0x07,
+ CUN = 0x08,
+ SUN = 0x09,
+ CULT = 0x0a,
+ SULT = 0x0b,
+ CUEQ = 0x0c,
+ SUEQ = 0x0d,
+ CULE = 0x0e,
+ SULE = 0x0f,
+ CNE = 0x10,
+ SNE = 0x11,
+ COR = 0x14,
+ SOR = 0x15,
+ CUNE = 0x18,
+ SUNE = 0x19,
+ };
+
+ enum FPConditionBit { FCC0 = 0, FCC1, FFC2, FCC3, FCC4, FCC5, FCC6, FCC7 };
+
+ enum FPControl { FCSR = 0 };
+
+ enum FCSRBit { CauseI = 24, CauseU, CauseO, CauseZ, CauseV };
+
+ enum FloatFormat { SingleFloat, DoubleFloat };
+
+ enum JumpOrCall { BranchIsJump, BranchIsCall };
+
+ enum FloatTestKind { TestForTrue, TestForFalse };
+
+ // :( this should be protected, but since CodeGenerator
+ // wants to use it, It needs to go out here :(
+
+ BufferOffset nextOffset() { return m_buffer.nextOffset(); }
+
+ protected:
+ Instruction* editSrc(BufferOffset bo) { return m_buffer.getInst(bo); }
+
+ // structure for fixing up pc-relative loads/jumps when a the machine code
+ // gets moved (executable copy, gc, etc.)
+ struct RelativePatch {
+ // the offset within the code buffer where the value is loaded that
+ // we want to fix-up
+ BufferOffset offset;
+ void* target;
+ RelocationKind kind;
+
+ RelativePatch(BufferOffset offset, void* target, RelocationKind kind)
+ : offset(offset), target(target), kind(kind) {}
+ };
+
+ js::Vector<RelativePatch, 8, SystemAllocPolicy> jumps_;
+
+ CompactBufferWriter jumpRelocations_;
+ CompactBufferWriter dataRelocations_;
+
+ LOONGBufferWithExecutableCopy m_buffer;
+
+#ifdef JS_JITSPEW
+ Sprinter* printer;
+#endif
+
+ public:
+ AssemblerLOONG64()
+ : m_buffer(),
+#ifdef JS_JITSPEW
+ printer(nullptr),
+#endif
+ isFinished(false) {
+ }
+
+ static Condition InvertCondition(Condition cond);
+ static DoubleCondition InvertCondition(DoubleCondition cond);
+ // This is changing the condition codes for cmp a, b to the same codes for cmp
+ // b, a.
+ static Condition InvertCmpCondition(Condition cond);
+
+ // As opposed to x86/x64 version, the data relocation has to be executed
+ // before to recover the pointer, and not after.
+ void writeDataRelocation(ImmGCPtr ptr) {
+ // Raw GC pointer relocations and Value relocations both end up in
+ // TraceOneDataRelocation.
+ if (ptr.value) {
+ if (gc::IsInsideNursery(ptr.value)) {
+ embedsNurseryPointers_ = true;
+ }
+ dataRelocations_.writeUnsigned(nextOffset().getOffset());
+ }
+ }
+
+ void assertNoGCThings() const {
+#ifdef DEBUG
+ MOZ_ASSERT(dataRelocations_.length() == 0);
+ for (auto& j : jumps_) {
+ MOZ_ASSERT(j.kind == RelocationKind::HARDCODED);
+ }
+#endif
+ }
+
+ public:
+ void setUnlimitedBuffer() { m_buffer.setUnlimited(); }
+ bool oom() const;
+
+ void setPrinter(Sprinter* sp) {
+#ifdef JS_JITSPEW
+ printer = sp;
+#endif
+ }
+
+#ifdef JS_JITSPEW
+ inline void spew(const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3) {
+ if (MOZ_UNLIKELY(printer || JitSpewEnabled(JitSpew_Codegen))) {
+ va_list va;
+ va_start(va, fmt);
+ spew(fmt, va);
+ va_end(va);
+ }
+ }
+
+ void decodeBranchInstAndSpew(InstImm branch);
+#else
+ MOZ_ALWAYS_INLINE void spew(const char* fmt, ...) MOZ_FORMAT_PRINTF(2, 3) {}
+#endif
+
+#ifdef JS_JITSPEW
+ MOZ_COLD void spew(const char* fmt, va_list va) MOZ_FORMAT_PRINTF(2, 0) {
+ // Buffer to hold the formatted string. Note that this may contain
+ // '%' characters, so do not pass it directly to printf functions.
+ char buf[200];
+
+ int i = VsprintfLiteral(buf, fmt, va);
+ if (i > -1) {
+ if (printer) {
+ printer->printf("%s\n", buf);
+ }
+ js::jit::JitSpew(js::jit::JitSpew_Codegen, "%s", buf);
+ }
+ }
+#endif
+
+ Register getStackPointer() const { return StackPointer; }
+
+ protected:
+ bool isFinished;
+
+ public:
+ void finish();
+ bool appendRawCode(const uint8_t* code, size_t numBytes);
+ bool reserve(size_t size);
+ bool swapBuffer(wasm::Bytes& bytes);
+ void executableCopy(void* buffer);
+ void copyJumpRelocationTable(uint8_t* dest);
+ void copyDataRelocationTable(uint8_t* dest);
+
+ // Size of the instruction stream, in bytes.
+ size_t size() const;
+ // Size of the jump relocation table, in bytes.
+ size_t jumpRelocationTableBytes() const;
+ size_t dataRelocationTableBytes() const;
+
+ // Size of the data table, in bytes.
+ size_t bytesNeeded() const;
+
+ // Write a blob of binary into the instruction stream *OR*
+ // into a destination address. If dest is nullptr (the default), then the
+ // instruction gets written into the instruction stream. If dest is not null
+ // it is interpreted as a pointer to the location that we want the
+ // instruction to be written.
+ BufferOffset writeInst(uint32_t x, uint32_t* dest = nullptr);
+ // A static variant for the cases where we don't want to have an assembler
+ // object at all. Normally, you would use the dummy (nullptr) object.
+ static void WriteInstStatic(uint32_t x, uint32_t* dest);
+
+ public:
+ BufferOffset haltingAlign(int alignment);
+ BufferOffset nopAlign(int alignment);
+ BufferOffset as_nop() { return as_andi(zero, zero, 0); }
+
+ // Branch and jump instructions
+ BufferOffset as_b(JOffImm26 off);
+ BufferOffset as_bl(JOffImm26 off);
+ BufferOffset as_jirl(Register rd, Register rj, BOffImm16 off);
+
+ InstImm getBranchCode(JumpOrCall jumpOrCall); // b, bl
+ InstImm getBranchCode(Register rd, Register rj,
+ Condition c); // beq, bne, bge, bgeu, blt, bltu
+ InstImm getBranchCode(Register rj, Condition c); // beqz, bnez
+ InstImm getBranchCode(FPConditionBit cj); // bceqz, bcnez
+
+ // Arithmetic instructions
+ BufferOffset as_add_w(Register rd, Register rj, Register rk);
+ BufferOffset as_add_d(Register rd, Register rj, Register rk);
+ BufferOffset as_sub_w(Register rd, Register rj, Register rk);
+ BufferOffset as_sub_d(Register rd, Register rj, Register rk);
+
+ BufferOffset as_addi_w(Register rd, Register rj, int32_t si12);
+ BufferOffset as_addi_d(Register rd, Register rj, int32_t si12);
+ BufferOffset as_addu16i_d(Register rd, Register rj, int32_t si16);
+
+ BufferOffset as_alsl_w(Register rd, Register rj, Register rk, uint32_t sa2);
+ BufferOffset as_alsl_wu(Register rd, Register rj, Register rk, uint32_t sa2);
+ BufferOffset as_alsl_d(Register rd, Register rj, Register rk, uint32_t sa2);
+
+ BufferOffset as_lu12i_w(Register rd, int32_t si20);
+ BufferOffset as_lu32i_d(Register rd, int32_t si20);
+ BufferOffset as_lu52i_d(Register rd, Register rj, int32_t si12);
+
+ BufferOffset as_slt(Register rd, Register rj, Register rk);
+ BufferOffset as_sltu(Register rd, Register rj, Register rk);
+ BufferOffset as_slti(Register rd, Register rj, int32_t si12);
+ BufferOffset as_sltui(Register rd, Register rj, int32_t si12);
+
+ BufferOffset as_pcaddi(Register rd, int32_t si20);
+ BufferOffset as_pcaddu12i(Register rd, int32_t si20);
+ BufferOffset as_pcaddu18i(Register rd, int32_t si20);
+ BufferOffset as_pcalau12i(Register rd, int32_t si20);
+
+ BufferOffset as_mul_w(Register rd, Register rj, Register rk);
+ BufferOffset as_mulh_w(Register rd, Register rj, Register rk);
+ BufferOffset as_mulh_wu(Register rd, Register rj, Register rk);
+ BufferOffset as_mul_d(Register rd, Register rj, Register rk);
+ BufferOffset as_mulh_d(Register rd, Register rj, Register rk);
+ BufferOffset as_mulh_du(Register rd, Register rj, Register rk);
+
+ BufferOffset as_mulw_d_w(Register rd, Register rj, Register rk);
+ BufferOffset as_mulw_d_wu(Register rd, Register rj, Register rk);
+
+ BufferOffset as_div_w(Register rd, Register rj, Register rk);
+ BufferOffset as_mod_w(Register rd, Register rj, Register rk);
+ BufferOffset as_div_wu(Register rd, Register rj, Register rk);
+ BufferOffset as_mod_wu(Register rd, Register rj, Register rk);
+ BufferOffset as_div_d(Register rd, Register rj, Register rk);
+ BufferOffset as_mod_d(Register rd, Register rj, Register rk);
+ BufferOffset as_div_du(Register rd, Register rj, Register rk);
+ BufferOffset as_mod_du(Register rd, Register rj, Register rk);
+
+ // Logical instructions
+ BufferOffset as_and(Register rd, Register rj, Register rk);
+ BufferOffset as_or(Register rd, Register rj, Register rk);
+ BufferOffset as_xor(Register rd, Register rj, Register rk);
+ BufferOffset as_nor(Register rd, Register rj, Register rk);
+ BufferOffset as_andn(Register rd, Register rj, Register rk);
+ BufferOffset as_orn(Register rd, Register rj, Register rk);
+
+ BufferOffset as_andi(Register rd, Register rj, int32_t ui12);
+ BufferOffset as_ori(Register rd, Register rj, int32_t ui12);
+ BufferOffset as_xori(Register rd, Register rj, int32_t ui12);
+
+ // Shift instructions
+ BufferOffset as_sll_w(Register rd, Register rj, Register rk);
+ BufferOffset as_srl_w(Register rd, Register rj, Register rk);
+ BufferOffset as_sra_w(Register rd, Register rj, Register rk);
+ BufferOffset as_rotr_w(Register rd, Register rj, Register rk);
+
+ BufferOffset as_slli_w(Register rd, Register rj, int32_t ui5);
+ BufferOffset as_srli_w(Register rd, Register rj, int32_t ui5);
+ BufferOffset as_srai_w(Register rd, Register rj, int32_t ui5);
+ BufferOffset as_rotri_w(Register rd, Register rj, int32_t ui5);
+
+ BufferOffset as_sll_d(Register rd, Register rj, Register rk);
+ BufferOffset as_srl_d(Register rd, Register rj, Register rk);
+ BufferOffset as_sra_d(Register rd, Register rj, Register rk);
+ BufferOffset as_rotr_d(Register rd, Register rj, Register rk);
+
+ BufferOffset as_slli_d(Register rd, Register rj, int32_t ui6);
+ BufferOffset as_srli_d(Register rd, Register rj, int32_t ui6);
+ BufferOffset as_srai_d(Register rd, Register rj, int32_t ui6);
+ BufferOffset as_rotri_d(Register rd, Register rj, int32_t ui6);
+
+ // Bit operation instrucitons
+ BufferOffset as_ext_w_b(Register rd, Register rj);
+ BufferOffset as_ext_w_h(Register rd, Register rj);
+
+ BufferOffset as_clo_w(Register rd, Register rj);
+ BufferOffset as_clz_w(Register rd, Register rj);
+ BufferOffset as_cto_w(Register rd, Register rj);
+ BufferOffset as_ctz_w(Register rd, Register rj);
+ BufferOffset as_clo_d(Register rd, Register rj);
+ BufferOffset as_clz_d(Register rd, Register rj);
+ BufferOffset as_cto_d(Register rd, Register rj);
+ BufferOffset as_ctz_d(Register rd, Register rj);
+
+ BufferOffset as_bytepick_w(Register rd, Register rj, Register rk,
+ int32_t sa2);
+ BufferOffset as_bytepick_d(Register rd, Register rj, Register rk,
+ int32_t sa3);
+
+ BufferOffset as_revb_2h(Register rd, Register rj);
+ BufferOffset as_revb_4h(Register rd, Register rj);
+ BufferOffset as_revb_2w(Register rd, Register rj);
+ BufferOffset as_revb_d(Register rd, Register rj);
+
+ BufferOffset as_revh_2w(Register rd, Register rj);
+ BufferOffset as_revh_d(Register rd, Register rj);
+
+ BufferOffset as_bitrev_4b(Register rd, Register rj);
+ BufferOffset as_bitrev_8b(Register rd, Register rj);
+
+ BufferOffset as_bitrev_w(Register rd, Register rj);
+ BufferOffset as_bitrev_d(Register rd, Register rj);
+
+ BufferOffset as_bstrins_w(Register rd, Register rj, int32_t msbw,
+ int32_t lsbw);
+ BufferOffset as_bstrins_d(Register rd, Register rj, int32_t msbd,
+ int32_t lsbd);
+ BufferOffset as_bstrpick_w(Register rd, Register rj, int32_t msbw,
+ int32_t lsbw);
+ BufferOffset as_bstrpick_d(Register rd, Register rj, int32_t msbd,
+ int32_t lsbd);
+
+ BufferOffset as_maskeqz(Register rd, Register rj, Register rk);
+ BufferOffset as_masknez(Register rd, Register rj, Register rk);
+
+ // Load and store instructions
+ BufferOffset as_ld_b(Register rd, Register rj, int32_t si12);
+ BufferOffset as_ld_h(Register rd, Register rj, int32_t si12);
+ BufferOffset as_ld_w(Register rd, Register rj, int32_t si12);
+ BufferOffset as_ld_d(Register rd, Register rj, int32_t si12);
+ BufferOffset as_ld_bu(Register rd, Register rj, int32_t si12);
+ BufferOffset as_ld_hu(Register rd, Register rj, int32_t si12);
+ BufferOffset as_ld_wu(Register rd, Register rj, int32_t si12);
+ BufferOffset as_st_b(Register rd, Register rj, int32_t si12);
+ BufferOffset as_st_h(Register rd, Register rj, int32_t si12);
+ BufferOffset as_st_w(Register rd, Register rj, int32_t si12);
+ BufferOffset as_st_d(Register rd, Register rj, int32_t si12);
+
+ BufferOffset as_ldx_b(Register rd, Register rj, Register rk);
+ BufferOffset as_ldx_h(Register rd, Register rj, Register rk);
+ BufferOffset as_ldx_w(Register rd, Register rj, Register rk);
+ BufferOffset as_ldx_d(Register rd, Register rj, Register rk);
+ BufferOffset as_ldx_bu(Register rd, Register rj, Register rk);
+ BufferOffset as_ldx_hu(Register rd, Register rj, Register rk);
+ BufferOffset as_ldx_wu(Register rd, Register rj, Register rk);
+ BufferOffset as_stx_b(Register rd, Register rj, Register rk);
+ BufferOffset as_stx_h(Register rd, Register rj, Register rk);
+ BufferOffset as_stx_w(Register rd, Register rj, Register rk);
+ BufferOffset as_stx_d(Register rd, Register rj, Register rk);
+
+ BufferOffset as_ldptr_w(Register rd, Register rj, int32_t si14);
+ BufferOffset as_ldptr_d(Register rd, Register rj, int32_t si14);
+ BufferOffset as_stptr_w(Register rd, Register rj, int32_t si14);
+ BufferOffset as_stptr_d(Register rd, Register rj, int32_t si14);
+
+ BufferOffset as_preld(int32_t hint, Register rj, int32_t si12);
+
+ // Atomic instructions
+ BufferOffset as_amswap_w(Register rd, Register rj, Register rk);
+ BufferOffset as_amswap_d(Register rd, Register rj, Register rk);
+ BufferOffset as_amadd_w(Register rd, Register rj, Register rk);
+ BufferOffset as_amadd_d(Register rd, Register rj, Register rk);
+ BufferOffset as_amand_w(Register rd, Register rj, Register rk);
+ BufferOffset as_amand_d(Register rd, Register rj, Register rk);
+ BufferOffset as_amor_w(Register rd, Register rj, Register rk);
+ BufferOffset as_amor_d(Register rd, Register rj, Register rk);
+ BufferOffset as_amxor_w(Register rd, Register rj, Register rk);
+ BufferOffset as_amxor_d(Register rd, Register rj, Register rk);
+ BufferOffset as_ammax_w(Register rd, Register rj, Register rk);
+ BufferOffset as_ammax_d(Register rd, Register rj, Register rk);
+ BufferOffset as_ammin_w(Register rd, Register rj, Register rk);
+ BufferOffset as_ammin_d(Register rd, Register rj, Register rk);
+ BufferOffset as_ammax_wu(Register rd, Register rj, Register rk);
+ BufferOffset as_ammax_du(Register rd, Register rj, Register rk);
+ BufferOffset as_ammin_wu(Register rd, Register rj, Register rk);
+ BufferOffset as_ammin_du(Register rd, Register rj, Register rk);
+
+ BufferOffset as_amswap_db_w(Register rd, Register rj, Register rk);
+ BufferOffset as_amswap_db_d(Register rd, Register rj, Register rk);
+ BufferOffset as_amadd_db_w(Register rd, Register rj, Register rk);
+ BufferOffset as_amadd_db_d(Register rd, Register rj, Register rk);
+ BufferOffset as_amand_db_w(Register rd, Register rj, Register rk);
+ BufferOffset as_amand_db_d(Register rd, Register rj, Register rk);
+ BufferOffset as_amor_db_w(Register rd, Register rj, Register rk);
+ BufferOffset as_amor_db_d(Register rd, Register rj, Register rk);
+ BufferOffset as_amxor_db_w(Register rd, Register rj, Register rk);
+ BufferOffset as_amxor_db_d(Register rd, Register rj, Register rk);
+ BufferOffset as_ammax_db_w(Register rd, Register rj, Register rk);
+ BufferOffset as_ammax_db_d(Register rd, Register rj, Register rk);
+ BufferOffset as_ammin_db_w(Register rd, Register rj, Register rk);
+ BufferOffset as_ammin_db_d(Register rd, Register rj, Register rk);
+ BufferOffset as_ammax_db_wu(Register rd, Register rj, Register rk);
+ BufferOffset as_ammax_db_du(Register rd, Register rj, Register rk);
+ BufferOffset as_ammin_db_wu(Register rd, Register rj, Register rk);
+ BufferOffset as_ammin_db_du(Register rd, Register rj, Register rk);
+
+ BufferOffset as_ll_w(Register rd, Register rj, int32_t si14);
+ BufferOffset as_ll_d(Register rd, Register rj, int32_t si14);
+ BufferOffset as_sc_w(Register rd, Register rj, int32_t si14);
+ BufferOffset as_sc_d(Register rd, Register rj, int32_t si14);
+
+ // Barrier instructions
+ BufferOffset as_dbar(int32_t hint);
+ BufferOffset as_ibar(int32_t hint);
+
+ // FP Arithmetic instructions
+ BufferOffset as_fadd_s(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fadd_d(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fsub_s(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fsub_d(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fmul_s(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fmul_d(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fdiv_s(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fdiv_d(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+
+ BufferOffset as_fmadd_s(FloatRegister fd, FloatRegister fj, FloatRegister fk,
+ FloatRegister fa);
+ BufferOffset as_fmadd_d(FloatRegister fd, FloatRegister fj, FloatRegister fk,
+ FloatRegister fa);
+ BufferOffset as_fmsub_s(FloatRegister fd, FloatRegister fj, FloatRegister fk,
+ FloatRegister fa);
+ BufferOffset as_fmsub_d(FloatRegister fd, FloatRegister fj, FloatRegister fk,
+ FloatRegister fa);
+ BufferOffset as_fnmadd_s(FloatRegister fd, FloatRegister fj, FloatRegister fk,
+ FloatRegister fa);
+ BufferOffset as_fnmadd_d(FloatRegister fd, FloatRegister fj, FloatRegister fk,
+ FloatRegister fa);
+ BufferOffset as_fnmsub_s(FloatRegister fd, FloatRegister fj, FloatRegister fk,
+ FloatRegister fa);
+ BufferOffset as_fnmsub_d(FloatRegister fd, FloatRegister fj, FloatRegister fk,
+ FloatRegister fa);
+
+ BufferOffset as_fmax_s(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fmax_d(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fmin_s(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fmin_d(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+
+ BufferOffset as_fmaxa_s(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fmaxa_d(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fmina_s(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+ BufferOffset as_fmina_d(FloatRegister fd, FloatRegister fj, FloatRegister fk);
+
+ BufferOffset as_fabs_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_fabs_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_fneg_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_fneg_d(FloatRegister fd, FloatRegister fj);
+
+ BufferOffset as_fsqrt_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_fsqrt_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_fcopysign_s(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk);
+ BufferOffset as_fcopysign_d(FloatRegister fd, FloatRegister fj,
+ FloatRegister fk);
+
+ // FP compare instructions (fcmp.cond.s fcmp.cond.d)
+ BufferOffset as_fcmp_cor(FloatFormat fmt, FloatRegister fj, FloatRegister fk,
+ FPConditionBit cd);
+ BufferOffset as_fcmp_ceq(FloatFormat fmt, FloatRegister fj, FloatRegister fk,
+ FPConditionBit cd);
+ BufferOffset as_fcmp_cne(FloatFormat fmt, FloatRegister fj, FloatRegister fk,
+ FPConditionBit cd);
+ BufferOffset as_fcmp_cle(FloatFormat fmt, FloatRegister fj, FloatRegister fk,
+ FPConditionBit cd);
+ BufferOffset as_fcmp_clt(FloatFormat fmt, FloatRegister fj, FloatRegister fk,
+ FPConditionBit cd);
+ BufferOffset as_fcmp_cun(FloatFormat fmt, FloatRegister fj, FloatRegister fk,
+ FPConditionBit cd);
+ BufferOffset as_fcmp_cueq(FloatFormat fmt, FloatRegister fj, FloatRegister fk,
+ FPConditionBit cd);
+ BufferOffset as_fcmp_cune(FloatFormat fmt, FloatRegister fj, FloatRegister fk,
+ FPConditionBit cd);
+ BufferOffset as_fcmp_cule(FloatFormat fmt, FloatRegister fj, FloatRegister fk,
+ FPConditionBit cd);
+ BufferOffset as_fcmp_cult(FloatFormat fmt, FloatRegister fj, FloatRegister fk,
+ FPConditionBit cd);
+
+ // FP conversion instructions
+ BufferOffset as_fcvt_s_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_fcvt_d_s(FloatRegister fd, FloatRegister fj);
+
+ BufferOffset as_ffint_s_w(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ffint_s_l(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ffint_d_w(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ffint_d_l(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftint_w_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftint_w_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftint_l_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftint_l_d(FloatRegister fd, FloatRegister fj);
+
+ BufferOffset as_ftintrm_w_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrm_w_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrm_l_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrm_l_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrp_w_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrp_w_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrp_l_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrp_l_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrz_w_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrz_w_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrz_l_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrz_l_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrne_w_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrne_w_d(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrne_l_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_ftintrne_l_d(FloatRegister fd, FloatRegister fj);
+
+ BufferOffset as_frint_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_frint_d(FloatRegister fd, FloatRegister fj);
+
+ // FP mov instructions
+ BufferOffset as_fmov_s(FloatRegister fd, FloatRegister fj);
+ BufferOffset as_fmov_d(FloatRegister fd, FloatRegister fj);
+
+ BufferOffset as_fsel(FloatRegister fd, FloatRegister fj, FloatRegister fk,
+ FPConditionBit ca);
+
+ BufferOffset as_movgr2fr_w(FloatRegister fd, Register rj);
+ BufferOffset as_movgr2fr_d(FloatRegister fd, Register rj);
+ BufferOffset as_movgr2frh_w(FloatRegister fd, Register rj);
+
+ BufferOffset as_movfr2gr_s(Register rd, FloatRegister fj);
+ BufferOffset as_movfr2gr_d(Register rd, FloatRegister fj);
+ BufferOffset as_movfrh2gr_s(Register rd, FloatRegister fj);
+
+ BufferOffset as_movgr2fcsr(Register rj);
+ BufferOffset as_movfcsr2gr(Register rd);
+
+ BufferOffset as_movfr2cf(FPConditionBit cd, FloatRegister fj);
+ BufferOffset as_movcf2fr(FloatRegister fd, FPConditionBit cj);
+
+ BufferOffset as_movgr2cf(FPConditionBit cd, Register rj);
+ BufferOffset as_movcf2gr(Register rd, FPConditionBit cj);
+
+ // FP load/store instructions
+ BufferOffset as_fld_s(FloatRegister fd, Register rj, int32_t si12);
+ BufferOffset as_fld_d(FloatRegister fd, Register rj, int32_t si12);
+ BufferOffset as_fst_s(FloatRegister fd, Register rj, int32_t si12);
+ BufferOffset as_fst_d(FloatRegister fd, Register rj, int32_t si12);
+
+ BufferOffset as_fldx_s(FloatRegister fd, Register rj, Register rk);
+ BufferOffset as_fldx_d(FloatRegister fd, Register rj, Register rk);
+ BufferOffset as_fstx_s(FloatRegister fd, Register rj, Register rk);
+ BufferOffset as_fstx_d(FloatRegister fd, Register rj, Register rk);
+
+ // label operations
+ void bind(Label* label, BufferOffset boff = BufferOffset());
+ virtual void bind(InstImm* inst, uintptr_t branch, uintptr_t target) = 0;
+ void bind(CodeLabel* label) { label->target()->bind(currentOffset()); }
+ uint32_t currentOffset() { return nextOffset().getOffset(); }
+ void retarget(Label* label, Label* target);
+
+ void call(Label* label);
+ void call(void* target);
+
+ void as_break(uint32_t code);
+
+ public:
+ static bool SupportsFloatingPoint() {
+#if defined(__loongarch_hard_float) || defined(JS_SIMULATOR_LOONG64)
+ return true;
+#else
+ return false;
+#endif
+ }
+ static bool SupportsUnalignedAccesses() { return true; }
+ static bool SupportsFastUnalignedFPAccesses() { return true; }
+
+ static bool HasRoundInstruction(RoundingMode mode) { return false; }
+
+ protected:
+ InstImm invertBranch(InstImm branch, BOffImm16 skipOffset);
+ void addPendingJump(BufferOffset src, ImmPtr target, RelocationKind kind) {
+ enoughMemory_ &= jumps_.append(RelativePatch(src, target.value, kind));
+ if (kind == RelocationKind::JITCODE) {
+ jumpRelocations_.writeUnsigned(src.getOffset());
+ }
+ }
+
+ void addLongJump(BufferOffset src, BufferOffset dst) {
+ CodeLabel cl;
+ cl.patchAt()->bind(src.getOffset());
+ cl.target()->bind(dst.getOffset());
+ cl.setLinkMode(CodeLabel::JumpImmediate);
+ addCodeLabel(std::move(cl));
+ }
+
+ public:
+ void flushBuffer() {}
+
+ void comment(const char* msg) { spew("; %s", msg); }
+
+ static uint32_t NopSize() { return 4; }
+
+ static void PatchWrite_Imm32(CodeLocationLabel label, Imm32 imm);
+
+ static uint8_t* NextInstruction(uint8_t* instruction,
+ uint32_t* count = nullptr);
+
+ static void ToggleToJmp(CodeLocationLabel inst_);
+ static void ToggleToCmp(CodeLocationLabel inst_);
+
+ void verifyHeapAccessDisassembly(uint32_t begin, uint32_t end,
+ const Disassembler::HeapAccess& heapAccess) {
+ // Implement this if we implement a disassembler.
+ }
+}; // AssemblerLOONG64
+
+// andi r0, r0, 0
+const uint32_t NopInst = 0x03400000;
+
+// An Instruction is a structure for both encoding and decoding any and all
+// LoongArch instructions.
+class Instruction {
+ public:
+ uint32_t data;
+
+ protected:
+ // Standard constructor
+ Instruction(uint32_t data_) : data(data_) {}
+ // You should never create an instruction directly. You should create a
+ // more specific instruction which will eventually call one of these
+ // constructors for you.
+
+ public:
+ uint32_t encode() const { return data; }
+
+ void makeNop() { data = NopInst; }
+
+ void setData(uint32_t data) { this->data = data; }
+
+ const Instruction& operator=(const Instruction& src) {
+ data = src.data;
+ return *this;
+ }
+
+ // Extract the one particular bit.
+ uint32_t extractBit(uint32_t bit) { return (encode() >> bit) & 1; }
+ // Extract a bit field out of the instruction
+ uint32_t extractBitField(uint32_t hi, uint32_t lo) {
+ return (encode() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Get the next instruction in the instruction stream.
+ // This does neat things like ignoreconstant pools and their guards.
+ Instruction* next();
+
+ // Sometimes, an api wants a uint32_t (or a pointer to it) rather than
+ // an instruction. raw() just coerces this into a pointer to a uint32_t
+ const uint32_t* raw() const { return &data; }
+ uint32_t size() const { return 4; }
+}; // Instruction
+
+// make sure that it is the right size
+static_assert(sizeof(Instruction) == 4,
+ "Size of Instruction class has to be 4 bytes.");
+
+class InstNOP : public Instruction {
+ public:
+ InstNOP() : Instruction(NopInst) {}
+};
+
+// Class for register type instructions.
+class InstReg : public Instruction {
+ public:
+ InstReg(OpcodeField op, Register rj, Register rd)
+ : Instruction(op | RJ(rj) | RD(rd)) {}
+ InstReg(OpcodeField op, Register rk, Register rj, Register rd)
+ : Instruction(op | RK(rk) | RJ(rj) | RD(rd)) {}
+ InstReg(OpcodeField op, uint32_t sa, Register rk, Register rj, Register rd,
+ uint32_t sa_bit)
+ : Instruction(sa_bit == 2 ? op | SA2(sa) | RK(rk) | RJ(rj) | RD(rd)
+ : op | SA3(sa) | RK(rk) | RJ(rj) | RD(rd)) {
+ MOZ_ASSERT(sa_bit == 2 || sa_bit == 3);
+ }
+ InstReg(OpcodeField op, Register rj, Register rd, bool HasRd)
+ : Instruction(HasRd ? op | RJ(rj) | RD(rd) : op | RK(rj) | RJ(rd)) {}
+
+ // For floating-point
+ InstReg(OpcodeField op, Register rj, FloatRegister fd)
+ : Instruction(op | RJ(rj) | FD(fd)) {}
+ InstReg(OpcodeField op, FloatRegister fj, FloatRegister fd)
+ : Instruction(op | FJ(fj) | FD(fd)) {}
+ InstReg(OpcodeField op, FloatRegister fk, FloatRegister fj, FloatRegister fd)
+ : Instruction(op | FK(fk) | FJ(fj) | FD(fd)) {}
+ InstReg(OpcodeField op, Register rk, Register rj, FloatRegister fd)
+ : Instruction(op | RK(rk) | RJ(rj) | FD(fd)) {}
+ InstReg(OpcodeField op, FloatRegister fa, FloatRegister fk, FloatRegister fj,
+ FloatRegister fd)
+ : Instruction(op | FA(fa) | FK(fk) | FJ(fj) | FD(fd)) {}
+ InstReg(OpcodeField op, AssemblerLOONG64::FPConditionBit ca, FloatRegister fk,
+ FloatRegister fj, FloatRegister fd)
+ : Instruction(op | ca << CAShift | FK(fk) | FJ(fj) | FD(fd)) {
+ MOZ_ASSERT(op == op_fsel);
+ }
+ InstReg(OpcodeField op, FloatRegister fj, Register rd)
+ : Instruction(op | FJ(fj) | RD(rd)) {
+ MOZ_ASSERT((op == op_movfr2gr_s) || (op == op_movfr2gr_d) ||
+ (op == op_movfrh2gr_s));
+ }
+ InstReg(OpcodeField op, Register rj, uint32_t fd)
+ : Instruction(op | RJ(rj) | fd) {
+ MOZ_ASSERT(op == op_movgr2fcsr);
+ }
+ InstReg(OpcodeField op, uint32_t fj, Register rd)
+ : Instruction(op | (fj << FJShift) | RD(rd)) {
+ MOZ_ASSERT(op == op_movfcsr2gr);
+ }
+ InstReg(OpcodeField op, FloatRegister fj, AssemblerLOONG64::FPConditionBit cd)
+ : Instruction(op | FJ(fj) | cd) {
+ MOZ_ASSERT(op == op_movfr2cf);
+ }
+ InstReg(OpcodeField op, AssemblerLOONG64::FPConditionBit cj, FloatRegister fd)
+ : Instruction(op | (cj << CJShift) | FD(fd)) {
+ MOZ_ASSERT(op == op_movcf2fr);
+ }
+ InstReg(OpcodeField op, Register rj, AssemblerLOONG64::FPConditionBit cd)
+ : Instruction(op | RJ(rj) | cd) {
+ MOZ_ASSERT(op == op_movgr2cf);
+ }
+ InstReg(OpcodeField op, AssemblerLOONG64::FPConditionBit cj, Register rd)
+ : Instruction(op | (cj << CJShift) | RD(rd)) {
+ MOZ_ASSERT(op == op_movcf2gr);
+ }
+ InstReg(OpcodeField op, int32_t cond, FloatRegister fk, FloatRegister fj,
+ AssemblerLOONG64::FPConditionBit cd)
+ : Instruction(op | (cond & CONDMask) << CONDShift | FK(fk) | FJ(fj) |
+ (cd & RDMask)) {
+ MOZ_ASSERT(is_uintN(cond, 5));
+ }
+
+ uint32_t extractRK() {
+ return extractBitField(RKShift + RKBits - 1, RKShift);
+ }
+ uint32_t extractRJ() {
+ return extractBitField(RJShift + RJBits - 1, RJShift);
+ }
+ uint32_t extractRD() {
+ return extractBitField(RDShift + RDBits - 1, RDShift);
+ }
+ uint32_t extractSA2() {
+ return extractBitField(SAShift + SA2Bits - 1, SAShift);
+ }
+ uint32_t extractSA3() {
+ return extractBitField(SAShift + SA3Bits - 1, SAShift);
+ }
+};
+
+// Class for branch, load and store instructions with immediate offset.
+class InstImm : public Instruction {
+ public:
+ void extractImm16(BOffImm16* dest);
+ uint32_t genImm(int32_t value, uint32_t value_bits) {
+ uint32_t imm = value & Imm5Mask;
+ if (value_bits == 6) {
+ imm = value & Imm6Mask;
+ } else if (value_bits == 12) {
+ imm = value & Imm12Mask;
+ } else if (value_bits == 14) {
+ imm = value & Imm14Mask;
+ }
+
+ return imm;
+ }
+
+ InstImm(OpcodeField op, int32_t value, Register rj, Register rd,
+ uint32_t value_bits)
+ : Instruction(op | genImm(value, value_bits) << RKShift | RJ(rj) |
+ RD(rd)) {
+ MOZ_ASSERT(value_bits == 5 || value_bits == 6 || value_bits == 12 ||
+ value_bits == 14);
+ }
+ InstImm(OpcodeField op, BOffImm16 off, Register rj, Register rd)
+ : Instruction(op | (off.encode() & Imm16Mask) << Imm16Shift | RJ(rj) |
+ RD(rd)) {}
+ InstImm(OpcodeField op, int32_t si21, Register rj, bool NotHasRd)
+ : Instruction(NotHasRd ? op | (si21 & Imm16Mask) << RKShift | RJ(rj) |
+ (si21 & Imm21Mask) >> 16
+ : op | (si21 & Imm20Mask) << Imm20Shift | RD(rj)) {
+ if (NotHasRd) {
+ MOZ_ASSERT(op == op_beqz || op == op_bnez);
+ MOZ_ASSERT(is_intN(si21, 21));
+ } else {
+ MOZ_ASSERT(op == op_lu12i_w || op == op_lu32i_d || op == op_pcaddi ||
+ op == op_pcaddu12i || op == op_pcaddu18i ||
+ op == op_pcalau12i);
+ // si20
+ MOZ_ASSERT(is_intN(si21, 20) || is_uintN(si21, 20));
+ }
+ }
+ InstImm(OpcodeField op, int32_t si21, AssemblerLOONG64::FPConditionBit cj,
+ bool isNotEqual)
+ : Instruction(isNotEqual
+ ? op | (si21 & Imm16Mask) << RKShift |
+ (cj + 8) << CJShift | (si21 & Imm21Mask) >> 16
+ : op | (si21 & Imm16Mask) << RKShift | cj << CJShift |
+ (si21 & Imm21Mask) >> 16) {
+ MOZ_ASSERT(is_intN(si21, 21));
+ MOZ_ASSERT(op == op_bcz);
+ MOZ_ASSERT(cj >= 0 && cj <= 7);
+ }
+ InstImm(OpcodeField op, Imm16 off, Register rj, Register rd)
+ : Instruction(op | (off.encode() & Imm16Mask) << Imm16Shift | RJ(rj) |
+ RD(rd)) {}
+ InstImm(OpcodeField op, int32_t bit15)
+ : Instruction(op | (bit15 & Imm15Mask)) {
+ MOZ_ASSERT(is_uintN(bit15, 15));
+ }
+
+ InstImm(OpcodeField op, int32_t bit26, bool jump)
+ : Instruction(op | (bit26 & Imm16Mask) << Imm16Shift |
+ (bit26 & Imm26Mask) >> 16) {
+ MOZ_ASSERT(is_intN(bit26, 26));
+ }
+ InstImm(OpcodeField op, int32_t si12, Register rj, int32_t hint)
+ : Instruction(op | (si12 & Imm12Mask) << Imm12Shift | RJ(rj) |
+ (hint & RDMask)) {
+ MOZ_ASSERT(op == op_preld);
+ }
+ InstImm(OpcodeField op, int32_t msb, int32_t lsb, Register rj, Register rd,
+ uint32_t sb_bits)
+ : Instruction((sb_bits == 5)
+ ? op | (msb & MSBWMask) << MSBWShift |
+ (lsb & LSBWMask) << LSBWShift | RJ(rj) | RD(rd)
+ : op | (msb & MSBDMask) << MSBDShift |
+ (lsb & LSBDMask) << LSBDShift | RJ(rj) | RD(rd)) {
+ MOZ_ASSERT(sb_bits == 5 || sb_bits == 6);
+ MOZ_ASSERT(op == op_bstr_w || op == op_bstrins_d || op == op_bstrpick_d);
+ }
+ InstImm(OpcodeField op, int32_t msb, int32_t lsb, Register rj, Register rd)
+ : Instruction(op | (msb & MSBWMask) << MSBWShift |
+ ((lsb + 0x20) & LSBDMask) << LSBWShift | RJ(rj) | RD(rd)) {
+ MOZ_ASSERT(op == op_bstr_w);
+ }
+
+ // For floating-point loads and stores.
+ InstImm(OpcodeField op, int32_t si12, Register rj, FloatRegister fd)
+ : Instruction(op | (si12 & Imm12Mask) << Imm12Shift | RJ(rj) | FD(fd)) {
+ MOZ_ASSERT(is_intN(si12, 12));
+ }
+
+ void setOpcode(OpcodeField op, uint32_t opBits) {
+ // opBits not greater than 24.
+ MOZ_ASSERT(opBits < 25);
+ uint32_t OpcodeShift = 32 - opBits;
+ uint32_t OpcodeMask = ((1 << opBits) - 1) << OpcodeShift;
+ data = (data & ~OpcodeMask) | op;
+ }
+ uint32_t extractRK() {
+ return extractBitField(RKShift + RKBits - 1, RKShift);
+ }
+ uint32_t extractRJ() {
+ return extractBitField(RJShift + RJBits - 1, RJShift);
+ }
+ void setRJ(uint32_t rj) { data = (data & ~RJMask) | (rj << RJShift); }
+ uint32_t extractRD() {
+ return extractBitField(RDShift + RDBits - 1, RDShift);
+ }
+ uint32_t extractImm16Value() {
+ return extractBitField(Imm16Shift + Imm16Bits - 1, Imm16Shift);
+ }
+ void setBOffImm16(BOffImm16 off) {
+ // Reset immediate field and replace it
+ data = (data & ~BOffImm16Mask) | (off.encode() << Imm16Shift);
+ }
+ void setImm21(int32_t off) {
+ // Reset immediate field and replace it
+ uint32_t low16 = (off >> 2) & Imm16Mask;
+ int32_t high5 = (off >> 18) & Imm5Mask;
+ uint32_t fcc_info = (data >> 5) & 0x1F;
+ data = (data & ~BOffImm26Mask) | (low16 << Imm16Shift) | high5 |
+ (fcc_info << 5);
+ }
+};
+
+// Class for Jump type instructions.
+class InstJump : public Instruction {
+ public:
+ InstJump(OpcodeField op, JOffImm26 off)
+ : Instruction(op | (off.encode() & Imm16Mask) << Imm16Shift |
+ (off.encode() & Imm26Mask) >> 16) {
+ MOZ_ASSERT(op == op_b || op == op_bl);
+ }
+
+ void setJOffImm26(JOffImm26 off) {
+ // Reset immediate field and replace it
+ data = (data & ~BOffImm26Mask) |
+ ((off.encode() & Imm16Mask) << Imm16Shift) |
+ ((off.encode() >> 16) & 0x3ff);
+ }
+ uint32_t extractImm26Value() {
+ return extractBitField(Imm26Shift + Imm26Bits - 1, Imm26Shift);
+ }
+};
+
+class ABIArgGenerator {
+ public:
+ ABIArgGenerator()
+ : intRegIndex_(0), floatRegIndex_(0), stackOffset_(0), current_() {}
+
+ ABIArg next(MIRType argType);
+ ABIArg& current() { return current_; }
+ uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
+ void increaseStackOffset(uint32_t bytes) { stackOffset_ += bytes; }
+
+ protected:
+ unsigned intRegIndex_;
+ unsigned floatRegIndex_;
+ uint32_t stackOffset_;
+ ABIArg current_;
+};
+
+class Assembler : public AssemblerLOONG64 {
+ public:
+ Assembler() : AssemblerLOONG64() {}
+
+ static uintptr_t GetPointer(uint8_t*);
+
+ using AssemblerLOONG64::bind;
+
+ static void Bind(uint8_t* rawCode, const CodeLabel& label);
+
+ void processCodeLabels(uint8_t* rawCode);
+
+ static void TraceJumpRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+ static void TraceDataRelocations(JSTracer* trc, JitCode* code,
+ CompactBufferReader& reader);
+
+ void bind(InstImm* inst, uintptr_t branch, uintptr_t target);
+
+ // Copy the assembly code to the given buffer, and perform any pending
+ // relocations relying on the target address.
+ void executableCopy(uint8_t* buffer);
+
+ static uint32_t PatchWrite_NearCallSize();
+
+ static uint64_t ExtractLoad64Value(Instruction* inst0);
+ static void UpdateLoad64Value(Instruction* inst0, uint64_t value);
+ static void WriteLoad64Instructions(Instruction* inst0, Register reg,
+ uint64_t value);
+
+ static void PatchWrite_NearCall(CodeLocationLabel start,
+ CodeLocationLabel toCall);
+ static void PatchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue,
+ ImmPtr expectedValue);
+ static void PatchDataWithValueCheck(CodeLocationLabel label,
+ PatchedImmPtr newValue,
+ PatchedImmPtr expectedValue);
+
+ static uint64_t ExtractInstructionImmediate(uint8_t* code);
+
+ static void ToggleCall(CodeLocationLabel inst_, bool enabled);
+}; // Assembler
+
+static const uint32_t NumIntArgRegs = 8;
+static const uint32_t NumFloatArgRegs = 8;
+
+static inline bool GetIntArgReg(uint32_t usedIntArgs, Register* out) {
+ if (usedIntArgs < NumIntArgRegs) {
+ *out = Register::FromCode(a0.code() + usedIntArgs);
+ return true;
+ }
+ return false;
+}
+
+static inline bool GetFloatArgReg(uint32_t usedFloatArgs, FloatRegister* out) {
+ if (usedFloatArgs < NumFloatArgRegs) {
+ *out = FloatRegister::FromCode(f0.code() + usedFloatArgs);
+ return true;
+ }
+ return false;
+}
+
+// Get a register in which we plan to put a quantity that will be used as an
+// integer argument. This differs from GetIntArgReg in that if we have no more
+// actual argument registers to use we will fall back on using whatever
+// CallTempReg* don't overlap the argument registers, and only fail once those
+// run out too.
+static inline bool GetTempRegForIntArg(uint32_t usedIntArgs,
+ uint32_t usedFloatArgs, Register* out) {
+ // NOTE: We can't properly determine which regs are used if there are
+ // float arguments. If this is needed, we will have to guess.
+ MOZ_ASSERT(usedFloatArgs == 0);
+
+ if (GetIntArgReg(usedIntArgs, out)) {
+ return true;
+ }
+ // Unfortunately, we have to assume things about the point at which
+ // GetIntArgReg returns false, because we need to know how many registers it
+ // can allocate.
+ usedIntArgs -= NumIntArgRegs;
+ if (usedIntArgs >= NumCallTempNonArgRegs) {
+ return false;
+ }
+ *out = CallTempNonArgRegs[usedIntArgs];
+ return true;
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_Assembler_loong64_h */
diff --git a/js/src/jit/loong64/CodeGenerator-loong64.cpp b/js/src/jit/loong64/CodeGenerator-loong64.cpp
new file mode 100644
index 0000000000..4c4dfd18ff
--- /dev/null
+++ b/js/src/jit/loong64/CodeGenerator-loong64.cpp
@@ -0,0 +1,2796 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/loong64/CodeGenerator-loong64.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jsnum.h"
+
+#include "jit/CodeGenerator.h"
+#include "jit/InlineScriptTree.h"
+#include "jit/JitRuntime.h"
+#include "jit/MIR.h"
+#include "jit/MIRGraph.h"
+#include "vm/JSContext.h"
+#include "vm/Realm.h"
+#include "vm/Shape.h"
+
+#include "jit/shared/CodeGenerator-shared-inl.h"
+#include "vm/JSScript-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using JS::GenericNaN;
+using mozilla::FloorLog2;
+using mozilla::NegativeInfinity;
+
+// shared
+CodeGeneratorLOONG64::CodeGeneratorLOONG64(MIRGenerator* gen, LIRGraph* graph,
+ MacroAssembler* masm)
+ : CodeGeneratorShared(gen, graph, masm) {}
+
+Operand CodeGeneratorLOONG64::ToOperand(const LAllocation& a) {
+ if (a.isGeneralReg()) {
+ return Operand(a.toGeneralReg()->reg());
+ }
+ if (a.isFloatReg()) {
+ return Operand(a.toFloatReg()->reg());
+ }
+ return Operand(ToAddress(a));
+}
+
+Operand CodeGeneratorLOONG64::ToOperand(const LAllocation* a) {
+ return ToOperand(*a);
+}
+
+Operand CodeGeneratorLOONG64::ToOperand(const LDefinition* def) {
+ return ToOperand(def->output());
+}
+
+#ifdef JS_PUNBOX64
+Operand CodeGeneratorLOONG64::ToOperandOrRegister64(
+ const LInt64Allocation input) {
+ return ToOperand(input.value());
+}
+#else
+Register64 CodeGeneratorLOONG64::ToOperandOrRegister64(
+ const LInt64Allocation input) {
+ return ToRegister64(input);
+}
+#endif
+
+void CodeGeneratorLOONG64::branchToBlock(Assembler::FloatFormat fmt,
+ FloatRegister lhs, FloatRegister rhs,
+ MBasicBlock* mir,
+ Assembler::DoubleCondition cond) {
+ // Skip past trivial blocks.
+ Label* label = skipTrivialBlocks(mir)->lir()->label();
+ if (fmt == Assembler::DoubleFloat) {
+ masm.branchDouble(cond, lhs, rhs, label);
+ } else {
+ masm.branchFloat(cond, lhs, rhs, label);
+ }
+}
+
+void OutOfLineBailout::accept(CodeGeneratorLOONG64* codegen) {
+ codegen->visitOutOfLineBailout(this);
+}
+
+MoveOperand CodeGeneratorLOONG64::toMoveOperand(LAllocation a) const {
+ if (a.isGeneralReg()) {
+ return MoveOperand(ToRegister(a));
+ }
+ if (a.isFloatReg()) {
+ return MoveOperand(ToFloatRegister(a));
+ }
+ MoveOperand::Kind kind = a.isStackArea() ? MoveOperand::Kind::EffectiveAddress
+ : MoveOperand::Kind::Memory;
+ Address address = ToAddress(a);
+ MOZ_ASSERT((address.offset & 3) == 0);
+
+ return MoveOperand(address, kind);
+}
+
+void CodeGeneratorLOONG64::bailoutFrom(Label* label, LSnapshot* snapshot) {
+ MOZ_ASSERT_IF(!masm.oom(), label->used());
+ MOZ_ASSERT_IF(!masm.oom(), !label->bound());
+
+ encode(snapshot);
+
+ InlineScriptTree* tree = snapshot->mir()->block()->trackedTree();
+ OutOfLineBailout* ool = new (alloc()) OutOfLineBailout(snapshot);
+ addOutOfLineCode(ool,
+ new (alloc()) BytecodeSite(tree, tree->script()->code()));
+
+ masm.retarget(label, ool->entry());
+}
+
+void CodeGeneratorLOONG64::bailout(LSnapshot* snapshot) {
+ Label label;
+ masm.jump(&label);
+ bailoutFrom(&label, snapshot);
+}
+
+bool CodeGeneratorLOONG64::generateOutOfLineCode() {
+ if (!CodeGeneratorShared::generateOutOfLineCode()) {
+ return false;
+ }
+
+ if (deoptLabel_.used()) {
+ // All non-table-based bailouts will go here.
+ masm.bind(&deoptLabel_);
+
+ // Push the frame size, so the handler can recover the IonScript.
+ // Frame size is stored in 'ra' and pushed by GenerateBailoutThunk
+ // We have to use 'ra' because generateBailoutTable will implicitly do
+ // the same.
+ masm.move32(Imm32(frameSize()), ra);
+
+ TrampolinePtr handler = gen->jitRuntime()->getGenericBailoutHandler();
+ masm.jump(handler);
+ }
+
+ return !masm.oom();
+}
+
+class js::jit::OutOfLineTableSwitch
+ : public OutOfLineCodeBase<CodeGeneratorLOONG64> {
+ MTableSwitch* mir_;
+ CodeLabel jumpLabel_;
+
+ void accept(CodeGeneratorLOONG64* codegen) {
+ codegen->visitOutOfLineTableSwitch(this);
+ }
+
+ public:
+ OutOfLineTableSwitch(MTableSwitch* mir) : mir_(mir) {}
+
+ MTableSwitch* mir() const { return mir_; }
+
+ CodeLabel* jumpLabel() { return &jumpLabel_; }
+};
+
+void CodeGeneratorLOONG64::emitTableSwitchDispatch(MTableSwitch* mir,
+ Register index,
+ Register base) {
+ Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
+
+ // Lower value with low value
+ if (mir->low() != 0) {
+ masm.subPtr(Imm32(mir->low()), index);
+ }
+
+ // Jump to default case if input is out of range
+ int32_t cases = mir->numCases();
+ masm.branchPtr(Assembler::AboveOrEqual, index, ImmWord(cases), defaultcase);
+
+ // To fill in the CodeLabels for the case entries, we need to first
+ // generate the case entries (we don't yet know their offsets in the
+ // instruction stream).
+ OutOfLineTableSwitch* ool = new (alloc()) OutOfLineTableSwitch(mir);
+ addOutOfLineCode(ool, mir);
+
+ // Compute the position where a pointer to the right case stands.
+ masm.ma_li(base, ool->jumpLabel());
+
+ BaseIndex pointer(base, index, ScalePointer);
+
+ // Jump to the right case
+ masm.branchToComputedAddress(pointer);
+}
+
+template <typename T>
+void CodeGeneratorLOONG64::emitWasmLoad(T* lir) {
+ const MWasmLoad* mir = lir->mir();
+ SecondScratchRegisterScope scratch2(masm);
+
+ Register memoryBase = ToRegister(lir->memoryBase());
+ Register ptr = ToRegister(lir->ptr());
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ if (mir->base()->type() == MIRType::Int32) {
+ masm.move32To64ZeroExtend(ptr, Register64(scratch2));
+ ptr = scratch2;
+ ptrScratch = ptrScratch != InvalidReg ? scratch2 : InvalidReg;
+ }
+
+ // ptr is a GPR and is either a 32-bit value zero-extended to 64-bit, or a
+ // true 64-bit value.
+ masm.wasmLoad(mir->access(), memoryBase, ptr, ptrScratch,
+ ToAnyRegister(lir->output()));
+}
+
+template <typename T>
+void CodeGeneratorLOONG64::emitWasmStore(T* lir) {
+ const MWasmStore* mir = lir->mir();
+ SecondScratchRegisterScope scratch2(masm);
+
+ Register memoryBase = ToRegister(lir->memoryBase());
+ Register ptr = ToRegister(lir->ptr());
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ if (mir->base()->type() == MIRType::Int32) {
+ masm.move32To64ZeroExtend(ptr, Register64(scratch2));
+ ptr = scratch2;
+ ptrScratch = ptrScratch != InvalidReg ? scratch2 : InvalidReg;
+ }
+
+ // ptr is a GPR and is either a 32-bit value zero-extended to 64-bit, or a
+ // true 64-bit value.
+ masm.wasmStore(mir->access(), ToAnyRegister(lir->value()), memoryBase, ptr,
+ ptrScratch);
+}
+
+void CodeGeneratorLOONG64::generateInvalidateEpilogue() {
+ // Ensure that there is enough space in the buffer for the OsiPoint
+ // patching to occur. Otherwise, we could overwrite the invalidation
+ // epilogue
+ for (size_t i = 0; i < sizeof(void*); i += Assembler::NopSize()) {
+ masm.nop();
+ }
+
+ masm.bind(&invalidate_);
+
+ // Push the return address of the point that we bailed out at to the stack
+ masm.Push(ra);
+
+ // Push the Ion script onto the stack (when we determine what that
+ // pointer is).
+ invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
+
+ // Jump to the invalidator which will replace the current frame.
+ TrampolinePtr thunk = gen->jitRuntime()->getInvalidationThunk();
+
+ masm.jump(thunk);
+}
+
+void CodeGeneratorLOONG64::visitOutOfLineBailout(OutOfLineBailout* ool) {
+ // Push snapshotOffset and make sure stack is aligned.
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.storePtr(ImmWord(ool->snapshot()->snapshotOffset()),
+ Address(StackPointer, 0));
+
+ masm.jump(&deoptLabel_);
+}
+
+void CodeGeneratorLOONG64::visitOutOfLineTableSwitch(
+ OutOfLineTableSwitch* ool) {
+ MTableSwitch* mir = ool->mir();
+
+ masm.haltingAlign(sizeof(void*));
+ masm.bind(ool->jumpLabel());
+ masm.addCodeLabel(*ool->jumpLabel());
+
+ for (size_t i = 0; i < mir->numCases(); i++) {
+ LBlock* caseblock = skipTrivialBlocks(mir->getCase(i))->lir();
+ Label* caseheader = caseblock->label();
+ uint32_t caseoffset = caseheader->offset();
+
+ // The entries of the jump table need to be absolute addresses and thus
+ // must be patched after codegen is finished.
+ CodeLabel cl;
+ masm.writeCodePointer(&cl);
+ cl.target()->bind(caseoffset);
+ masm.addCodeLabel(cl);
+ }
+}
+
+void CodeGeneratorLOONG64::visitOutOfLineWasmTruncateCheck(
+ OutOfLineWasmTruncateCheck* ool) {
+ if (ool->toType() == MIRType::Int32) {
+ masm.outOfLineWasmTruncateToInt32Check(
+ ool->input(), ool->output(), ool->fromType(), ool->flags(),
+ ool->rejoin(), ool->bytecodeOffset());
+ } else {
+ MOZ_ASSERT(ool->toType() == MIRType::Int64);
+ masm.outOfLineWasmTruncateToInt64Check(
+ ool->input(), ool->output64(), ool->fromType(), ool->flags(),
+ ool->rejoin(), ool->bytecodeOffset());
+ }
+}
+
+ValueOperand CodeGeneratorLOONG64::ToValue(LInstruction* ins, size_t pos) {
+ return ValueOperand(ToRegister(ins->getOperand(pos)));
+}
+
+ValueOperand CodeGeneratorLOONG64::ToTempValue(LInstruction* ins, size_t pos) {
+ return ValueOperand(ToRegister(ins->getTemp(pos)));
+}
+
+void CodeGenerator::visitBox(LBox* box) {
+ const LAllocation* in = box->getOperand(0);
+ ValueOperand result = ToOutValue(box);
+
+ masm.moveValue(TypedOrValueRegister(box->type(), ToAnyRegister(in)), result);
+}
+
+void CodeGenerator::visitUnbox(LUnbox* unbox) {
+ MUnbox* mir = unbox->mir();
+
+ Register result = ToRegister(unbox->output());
+
+ if (mir->fallible()) {
+ const ValueOperand value = ToValue(unbox, LUnbox::Input);
+ Label bail;
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.fallibleUnboxInt32(value, result, &bail);
+ break;
+ case MIRType::Boolean:
+ masm.fallibleUnboxBoolean(value, result, &bail);
+ break;
+ case MIRType::Object:
+ masm.fallibleUnboxObject(value, result, &bail);
+ break;
+ case MIRType::String:
+ masm.fallibleUnboxString(value, result, &bail);
+ break;
+ case MIRType::Symbol:
+ masm.fallibleUnboxSymbol(value, result, &bail);
+ break;
+ case MIRType::BigInt:
+ masm.fallibleUnboxBigInt(value, result, &bail);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+ bailoutFrom(&bail, unbox->snapshot());
+ return;
+ }
+
+ LAllocation* input = unbox->getOperand(LUnbox::Input);
+ if (input->isRegister()) {
+ Register inputReg = ToRegister(input);
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.unboxInt32(inputReg, result);
+ break;
+ case MIRType::Boolean:
+ masm.unboxBoolean(inputReg, result);
+ break;
+ case MIRType::Object:
+ masm.unboxObject(inputReg, result);
+ break;
+ case MIRType::String:
+ masm.unboxString(inputReg, result);
+ break;
+ case MIRType::Symbol:
+ masm.unboxSymbol(inputReg, result);
+ break;
+ case MIRType::BigInt:
+ masm.unboxBigInt(inputReg, result);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+ return;
+ }
+
+ Address inputAddr = ToAddress(input);
+ switch (mir->type()) {
+ case MIRType::Int32:
+ masm.unboxInt32(inputAddr, result);
+ break;
+ case MIRType::Boolean:
+ masm.unboxBoolean(inputAddr, result);
+ break;
+ case MIRType::Object:
+ masm.unboxObject(inputAddr, result);
+ break;
+ case MIRType::String:
+ masm.unboxString(inputAddr, result);
+ break;
+ case MIRType::Symbol:
+ masm.unboxSymbol(inputAddr, result);
+ break;
+ case MIRType::BigInt:
+ masm.unboxBigInt(inputAddr, result);
+ break;
+ default:
+ MOZ_CRASH("Given MIRType cannot be unboxed.");
+ }
+}
+
+void CodeGeneratorLOONG64::splitTagForTest(const ValueOperand& value,
+ ScratchTagScope& tag) {
+ masm.splitTag(value.valueReg(), tag);
+}
+
+void CodeGenerator::visitCompareI64(LCompareI64* lir) {
+ MCompare* mir = lir->mir();
+ const mozilla::DebugOnly<MCompare::CompareType> type = mir->compareType();
+ MOZ_ASSERT(type == MCompare::Compare_Int64 ||
+ type == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register lhsReg = ToRegister64(lhs).reg;
+ Register output = ToRegister(lir->output());
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition cond = JSOpToCondition(lir->jsop(), isSigned);
+
+ if (IsConstant(rhs)) {
+ masm.cmpPtrSet(cond, lhsReg, ImmWord(ToInt64(rhs)), output);
+ } else if (rhs.value().isGeneralReg()) {
+ masm.cmpPtrSet(cond, lhsReg, ToRegister64(rhs).reg, output);
+ } else {
+ masm.cmpPtrSet(cond, lhsReg, ToAddress(rhs.value()), output);
+ }
+}
+
+void CodeGenerator::visitCompareI64AndBranch(LCompareI64AndBranch* lir) {
+ MCompare* mir = lir->cmpMir();
+ const mozilla::DebugOnly<MCompare::CompareType> type = mir->compareType();
+ MOZ_ASSERT(type == MCompare::Compare_Int64 ||
+ type == MCompare::Compare_UInt64);
+
+ const LInt64Allocation lhs = lir->getInt64Operand(LCompareI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LCompareI64::Rhs);
+ Register lhsReg = ToRegister64(lhs).reg;
+ bool isSigned = mir->compareType() == MCompare::Compare_Int64;
+ Assembler::Condition cond = JSOpToCondition(lir->jsop(), isSigned);
+
+ if (IsConstant(rhs)) {
+ emitBranch(lhsReg, ImmWord(ToInt64(rhs)), cond, lir->ifTrue(),
+ lir->ifFalse());
+ } else if (rhs.value().isGeneralReg()) {
+ emitBranch(lhsReg, ToRegister64(rhs).reg, cond, lir->ifTrue(),
+ lir->ifFalse());
+ } else {
+ emitBranch(lhsReg, ToAddress(rhs.value()), cond, lir->ifTrue(),
+ lir->ifFalse());
+ }
+}
+
+void CodeGenerator::visitCompare(LCompare* comp) {
+ MCompare* mir = comp->mir();
+ Assembler::Condition cond = JSOpToCondition(mir->compareType(), comp->jsop());
+ const LAllocation* left = comp->getOperand(0);
+ const LAllocation* right = comp->getOperand(1);
+ const LDefinition* def = comp->getDef(0);
+
+ if (mir->compareType() == MCompare::Compare_Object ||
+ mir->compareType() == MCompare::Compare_Symbol ||
+ mir->compareType() == MCompare::Compare_UIntPtr ||
+ mir->compareType() == MCompare::Compare_WasmAnyRef) {
+ if (right->isConstant()) {
+ MOZ_ASSERT(mir->compareType() == MCompare::Compare_UIntPtr);
+ masm.cmpPtrSet(cond, ToRegister(left), Imm32(ToInt32(right)),
+ ToRegister(def));
+ } else if (right->isGeneralReg()) {
+ masm.cmpPtrSet(cond, ToRegister(left), ToRegister(right),
+ ToRegister(def));
+ } else {
+ masm.cmpPtrSet(cond, ToRegister(left), ToAddress(right), ToRegister(def));
+ }
+ return;
+ }
+
+ if (right->isConstant()) {
+ masm.cmp32Set(cond, ToRegister(left), Imm32(ToInt32(right)),
+ ToRegister(def));
+ } else if (right->isGeneralReg()) {
+ masm.cmp32Set(cond, ToRegister(left), ToRegister(right), ToRegister(def));
+ } else {
+ masm.cmp32Set(cond, ToRegister(left), ToAddress(right), ToRegister(def));
+ }
+}
+
+void CodeGenerator::visitCompareAndBranch(LCompareAndBranch* comp) {
+ const MCompare* mir = comp->cmpMir();
+ const MCompare::CompareType type = mir->compareType();
+ const LAllocation* lhs = comp->left();
+ const LAllocation* rhs = comp->right();
+ MBasicBlock* ifTrue = comp->ifTrue();
+ MBasicBlock* ifFalse = comp->ifFalse();
+ Register lhsReg = ToRegister(lhs);
+ const Assembler::Condition cond = JSOpToCondition(type, comp->jsop());
+
+ if (type == MCompare::Compare_Object || type == MCompare::Compare_Symbol ||
+ type == MCompare::Compare_UIntPtr ||
+ type == MCompare::Compare_WasmAnyRef) {
+ if (rhs->isConstant()) {
+ emitBranch(ToRegister(lhs), Imm32(ToInt32(rhs)), cond, ifTrue, ifFalse);
+ } else if (rhs->isGeneralReg()) {
+ emitBranch(lhsReg, ToRegister(rhs), cond, ifTrue, ifFalse);
+ } else {
+ MOZ_CRASH("NYI");
+ }
+ return;
+ }
+
+ if (rhs->isConstant()) {
+ emitBranch(lhsReg, Imm32(ToInt32(comp->right())), cond, ifTrue, ifFalse);
+ } else if (comp->right()->isGeneralReg()) {
+ emitBranch(lhsReg, ToRegister(rhs), cond, ifTrue, ifFalse);
+ } else {
+ // TODO(loong64): emitBranch with 32-bit comparision
+ ScratchRegisterScope scratch(masm);
+ masm.load32(ToAddress(rhs), scratch);
+ emitBranch(lhsReg, Register(scratch), cond, ifTrue, ifFalse);
+ }
+}
+
+void CodeGenerator::visitDivOrModI64(LDivOrModI64* lir) {
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+ Register output = ToRegister(lir->output());
+
+ Label done;
+
+ // Handle divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ // Handle an integer overflow exception from INT64_MIN / -1.
+ if (lir->canBeNegativeOverflow()) {
+ Label notOverflow;
+ masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), &notOverflow);
+ masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), &notOverflow);
+ if (lir->mir()->isMod()) {
+ masm.as_xor(output, output, output);
+ } else {
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, lir->bytecodeOffset());
+ }
+ masm.jump(&done);
+ masm.bind(&notOverflow);
+ }
+
+ if (lir->mir()->isMod()) {
+ masm.as_mod_d(output, lhs, rhs);
+ } else {
+ masm.as_div_d(output, lhs, rhs);
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitUDivOrModI64(LUDivOrModI64* lir) {
+ Register lhs = ToRegister(lir->lhs());
+ Register rhs = ToRegister(lir->rhs());
+ Register output = ToRegister(lir->output());
+
+ Label done;
+
+ // Prevent divide by zero.
+ if (lir->canBeDivideByZero()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, lir->bytecodeOffset());
+ masm.bind(&nonZero);
+ }
+
+ if (lir->mir()->isMod()) {
+ masm.as_mod_du(output, lhs, rhs);
+ } else {
+ masm.as_div_du(output, lhs, rhs);
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGeneratorLOONG64::emitBigIntDiv(LBigIntDiv* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+ masm.as_div_d(/* result= */ dividend, dividend, divisor);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+void CodeGeneratorLOONG64::emitBigIntMod(LBigIntMod* ins, Register dividend,
+ Register divisor, Register output,
+ Label* fail) {
+ // Callers handle division by zero and integer overflow.
+ masm.as_mod_d(/* result= */ dividend, dividend, divisor);
+
+ // Create and return the result.
+ masm.newGCBigInt(output, divisor, initialBigIntHeap(), fail);
+ masm.initializeBigInt(output, dividend);
+}
+
+void CodeGenerator::visitWasmLoadI64(LWasmLoadI64* lir) {
+ const MWasmLoad* mir = lir->mir();
+
+ Register memoryBase = ToRegister(lir->memoryBase());
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ Register ptrReg = ToRegister(lir->ptr());
+ if (mir->base()->type() == MIRType::Int32) {
+ // See comment in visitWasmLoad re the type of 'base'.
+ masm.move32ZeroExtendToPtr(ptrReg, ptrReg);
+ }
+
+ masm.wasmLoadI64(mir->access(), memoryBase, ptrReg, ptrScratch,
+ ToOutRegister64(lir));
+}
+
+void CodeGenerator::visitWasmStoreI64(LWasmStoreI64* lir) {
+ const MWasmStore* mir = lir->mir();
+
+ Register memoryBase = ToRegister(lir->memoryBase());
+ Register ptrScratch = InvalidReg;
+ if (!lir->ptrCopy()->isBogusTemp()) {
+ ptrScratch = ToRegister(lir->ptrCopy());
+ }
+
+ Register ptrReg = ToRegister(lir->ptr());
+ if (mir->base()->type() == MIRType::Int32) {
+ // See comment in visitWasmLoad re the type of 'base'.
+ masm.move32ZeroExtendToPtr(ptrReg, ptrReg);
+ }
+
+ masm.wasmStoreI64(mir->access(), ToRegister64(lir->value()), memoryBase,
+ ptrReg, ptrScratch);
+}
+
+void CodeGenerator::visitWasmSelectI64(LWasmSelectI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+
+ Register cond = ToRegister(lir->condExpr());
+ const LInt64Allocation falseExpr = lir->falseExpr();
+
+ Register64 out = ToOutRegister64(lir);
+ MOZ_ASSERT(ToRegister64(lir->trueExpr()) == out,
+ "true expr is reused for input");
+
+ if (falseExpr.value().isRegister()) {
+ masm.moveIfZero(out.reg, ToRegister(falseExpr.value()), cond);
+ } else {
+ Label done;
+ masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
+ masm.loadPtr(ToAddress(falseExpr.value()), out.reg);
+ masm.bind(&done);
+ }
+}
+
+void CodeGenerator::visitWasmReinterpretFromI64(LWasmReinterpretFromI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Double);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Int64);
+ masm.as_movgr2fr_d(ToFloatRegister(lir->output()), ToRegister(lir->input()));
+}
+
+void CodeGenerator::visitWasmReinterpretToI64(LWasmReinterpretToI64* lir) {
+ MOZ_ASSERT(lir->mir()->type() == MIRType::Int64);
+ MOZ_ASSERT(lir->mir()->input()->type() == MIRType::Double);
+ masm.as_movfr2gr_d(ToRegister(lir->output()), ToFloatRegister(lir->input()));
+}
+
+void CodeGenerator::visitExtendInt32ToInt64(LExtendInt32ToInt64* lir) {
+ const LAllocation* input = lir->getOperand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->isUnsigned()) {
+ masm.as_bstrpick_d(output, ToRegister(input), 31, 0);
+ } else {
+ masm.as_slli_w(output, ToRegister(input), 0);
+ }
+}
+
+void CodeGenerator::visitWrapInt64ToInt32(LWrapInt64ToInt32* lir) {
+ const LAllocation* input = lir->getOperand(0);
+ Register output = ToRegister(lir->output());
+
+ if (lir->mir()->bottomHalf()) {
+ if (input->isMemory()) {
+ masm.load32(ToAddress(input), output);
+ } else {
+ masm.as_slli_w(output, ToRegister(input), 0);
+ }
+ } else {
+ MOZ_CRASH("Not implemented.");
+ }
+}
+
+void CodeGenerator::visitSignExtendInt64(LSignExtendInt64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ switch (lir->mode()) {
+ case MSignExtendInt64::Byte:
+ masm.move32To64SignExtend(input.reg, output);
+ masm.move8SignExtend(output.reg, output.reg);
+ break;
+ case MSignExtendInt64::Half:
+ masm.move32To64SignExtend(input.reg, output);
+ masm.move16SignExtend(output.reg, output.reg);
+ break;
+ case MSignExtendInt64::Word:
+ masm.move32To64SignExtend(input.reg, output);
+ break;
+ }
+}
+
+void CodeGenerator::visitWasmExtendU32Index(LWasmExtendU32Index* lir) {
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ MOZ_ASSERT(input == output);
+ masm.move32To64ZeroExtend(input, Register64(output));
+}
+
+void CodeGenerator::visitWasmWrapU32Index(LWasmWrapU32Index* lir) {
+ Register input = ToRegister(lir->input());
+ Register output = ToRegister(lir->output());
+ MOZ_ASSERT(input == output);
+ masm.move64To32(Register64(input), output);
+}
+
+void CodeGenerator::visitClzI64(LClzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.clz64(input, output.reg);
+}
+
+void CodeGenerator::visitCtzI64(LCtzI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register64 output = ToOutRegister64(lir);
+ masm.ctz64(input, output.reg);
+}
+
+void CodeGenerator::visitNotI64(LNotI64* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ Register output = ToRegister(lir->output());
+
+ masm.ma_cmp_set(output, input.reg, zero, Assembler::Equal);
+}
+
+void CodeGenerator::visitWasmTruncateToInt64(LWasmTruncateToInt64* lir) {
+ FloatRegister input = ToFloatRegister(lir->input());
+ Register64 output = ToOutRegister64(lir);
+
+ MWasmTruncateToInt64* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
+ addOutOfLineCode(ool, mir);
+
+ Label* oolEntry = ool->entry();
+ Label* oolRejoin = ool->rejoin();
+ bool isSaturating = mir->isSaturating();
+
+ if (fromType == MIRType::Double) {
+ if (mir->isUnsigned()) {
+ masm.wasmTruncateDoubleToUInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ } else {
+ masm.wasmTruncateDoubleToInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ }
+ } else {
+ if (mir->isUnsigned()) {
+ masm.wasmTruncateFloat32ToUInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ } else {
+ masm.wasmTruncateFloat32ToInt64(input, output, isSaturating, oolEntry,
+ oolRejoin, InvalidFloatReg);
+ }
+ }
+}
+
+void CodeGenerator::visitInt64ToFloatingPoint(LInt64ToFloatingPoint* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ FloatRegister output = ToFloatRegister(lir->output());
+
+ MIRType outputType = lir->mir()->type();
+ MOZ_ASSERT(outputType == MIRType::Double || outputType == MIRType::Float32);
+
+ if (outputType == MIRType::Double) {
+ if (lir->mir()->isUnsigned()) {
+ masm.convertUInt64ToDouble(input, output, Register::Invalid());
+ } else {
+ masm.convertInt64ToDouble(input, output);
+ }
+ } else {
+ if (lir->mir()->isUnsigned()) {
+ masm.convertUInt64ToFloat32(input, output, Register::Invalid());
+ } else {
+ masm.convertInt64ToFloat32(input, output);
+ }
+ }
+}
+
+void CodeGenerator::visitTestI64AndBranch(LTestI64AndBranch* lir) {
+ Register64 input = ToRegister64(lir->getInt64Operand(0));
+ MBasicBlock* ifTrue = lir->ifTrue();
+ MBasicBlock* ifFalse = lir->ifFalse();
+
+ emitBranch(input.reg, Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
+}
+
+void CodeGenerator::visitTestIAndBranch(LTestIAndBranch* test) {
+ const LAllocation* opd = test->getOperand(0);
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ emitBranch(ToRegister(opd), Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
+}
+
+void CodeGenerator::visitMinMaxD(LMinMaxD* ins) {
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+
+ MOZ_ASSERT(first == ToFloatRegister(ins->output()));
+
+ if (ins->mir()->isMax()) {
+ masm.maxDouble(second, first, true);
+ } else {
+ masm.minDouble(second, first, true);
+ }
+}
+
+void CodeGenerator::visitMinMaxF(LMinMaxF* ins) {
+ FloatRegister first = ToFloatRegister(ins->first());
+ FloatRegister second = ToFloatRegister(ins->second());
+
+ MOZ_ASSERT(first == ToFloatRegister(ins->output()));
+
+ if (ins->mir()->isMax()) {
+ masm.maxFloat32(second, first, true);
+ } else {
+ masm.minFloat32(second, first, true);
+ }
+}
+
+void CodeGenerator::visitAddI(LAddI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
+
+ // If there is no snapshot, we don't need to check for overflow
+ if (!ins->snapshot()) {
+ if (rhs->isConstant()) {
+ masm.ma_add_w(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.as_add_w(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ }
+ return;
+ }
+
+ Label overflow;
+ if (rhs->isConstant()) {
+ masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ Imm32(ToInt32(rhs)), &overflow);
+ } else {
+ masm.ma_add32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ ToRegister(rhs), &overflow);
+ }
+
+ bailoutFrom(&overflow, ins->snapshot());
+}
+
+void CodeGenerator::visitAddI64(LAddI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LAddI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LAddI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.add64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.add64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void CodeGenerator::visitSubI(LSubI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+
+ MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
+
+ // If there is no snapshot, we don't need to check for overflow
+
+ if (!ins->snapshot()) {
+ if (rhs->isConstant()) {
+ masm.ma_sub_w(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
+ } else {
+ masm.as_sub_w(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ }
+ return;
+ }
+
+ Label overflow;
+ if (rhs->isConstant()) {
+ masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ Imm32(ToInt32(rhs)), &overflow);
+ } else {
+ masm.ma_sub32TestOverflow(ToRegister(dest), ToRegister(lhs),
+ ToRegister(rhs), &overflow);
+ }
+
+ bailoutFrom(&overflow, ins->snapshot());
+}
+
+void CodeGenerator::visitSubI64(LSubI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LSubI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LSubI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (IsConstant(rhs)) {
+ masm.sub64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ return;
+ }
+
+ masm.sub64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+}
+
+void CodeGenerator::visitMulI(LMulI* ins) {
+ const LAllocation* lhs = ins->lhs();
+ const LAllocation* rhs = ins->rhs();
+ Register dest = ToRegister(ins->output());
+ MMul* mul = ins->mir();
+
+ MOZ_ASSERT_IF(mul->mode() == MMul::Integer,
+ !mul->canBeNegativeZero() && !mul->canOverflow());
+
+ if (rhs->isConstant()) {
+ int32_t constant = ToInt32(rhs);
+ Register src = ToRegister(lhs);
+
+ // Bailout on -0.0
+ if (mul->canBeNegativeZero() && constant <= 0) {
+ Assembler::Condition cond =
+ (constant == 0) ? Assembler::LessThan : Assembler::Equal;
+ bailoutCmp32(cond, src, Imm32(0), ins->snapshot());
+ }
+
+ switch (constant) {
+ case -1:
+ if (mul->canOverflow()) {
+ bailoutCmp32(Assembler::Equal, src, Imm32(INT32_MIN),
+ ins->snapshot());
+ }
+
+ masm.as_sub_w(dest, zero, src);
+ break;
+ case 0:
+ masm.move32(zero, dest);
+ break;
+ case 1:
+ masm.move32(src, dest);
+ break;
+ case 2:
+ if (mul->canOverflow()) {
+ Label mulTwoOverflow;
+ masm.ma_add32TestOverflow(dest, src, src, &mulTwoOverflow);
+
+ bailoutFrom(&mulTwoOverflow, ins->snapshot());
+ } else {
+ masm.as_add_w(dest, src, src);
+ }
+ break;
+ default:
+ uint32_t shift = FloorLog2(constant);
+
+ if (!mul->canOverflow() && (constant > 0)) {
+ // If it cannot overflow, we can do lots of optimizations.
+ uint32_t rest = constant - (1 << shift);
+
+ // See if the constant has one bit set, meaning it can be
+ // encoded as a bitshift.
+ if ((1 << shift) == constant) {
+ masm.as_slli_w(dest, src, shift % 32);
+ return;
+ }
+
+ // If the constant cannot be encoded as (1<<C1), see if it can
+ // be encoded as (1<<C1) | (1<<C2), which can be computed
+ // using an add and a shift.
+ uint32_t shift_rest = FloorLog2(rest);
+ if (src != dest && (1u << shift_rest) == rest) {
+ masm.as_slli_w(dest, src, (shift - shift_rest) % 32);
+ masm.add32(src, dest);
+ if (shift_rest != 0) {
+ masm.as_slli_w(dest, dest, shift_rest % 32);
+ }
+ return;
+ }
+ }
+
+ if (mul->canOverflow() && (constant > 0) && (src != dest)) {
+ // To stay on the safe side, only optimize things that are a
+ // power of 2.
+
+ if ((1 << shift) == constant) {
+ ScratchRegisterScope scratch(masm);
+ // dest = lhs * pow(2, shift)
+ masm.as_slli_w(dest, src, shift % 32);
+ // At runtime, check (lhs == dest >> shift), if this does
+ // not hold, some bits were lost due to overflow, and the
+ // computation should be resumed as a double.
+ masm.as_srai_w(scratch, dest, shift % 32);
+ bailoutCmp32(Assembler::NotEqual, src, Register(scratch),
+ ins->snapshot());
+ return;
+ }
+ }
+
+ if (mul->canOverflow()) {
+ Label mulConstOverflow;
+ masm.ma_mul32TestOverflow(dest, ToRegister(lhs), Imm32(ToInt32(rhs)),
+ &mulConstOverflow);
+
+ bailoutFrom(&mulConstOverflow, ins->snapshot());
+ } else {
+ masm.ma_mul(dest, src, Imm32(ToInt32(rhs)));
+ }
+ break;
+ }
+ } else {
+ Label multRegOverflow;
+
+ if (mul->canOverflow()) {
+ masm.ma_mul32TestOverflow(dest, ToRegister(lhs), ToRegister(rhs),
+ &multRegOverflow);
+ bailoutFrom(&multRegOverflow, ins->snapshot());
+ } else {
+ masm.as_mul_w(dest, ToRegister(lhs), ToRegister(rhs));
+ }
+
+ if (mul->canBeNegativeZero()) {
+ Label done;
+ masm.ma_b(dest, dest, &done, Assembler::NonZero, ShortJump);
+
+ // Result is -0 if lhs or rhs is negative.
+ // In that case result must be double value so bailout
+ Register scratch = SecondScratchReg;
+ masm.as_or(scratch, ToRegister(lhs), ToRegister(rhs));
+ bailoutCmp32(Assembler::Signed, scratch, scratch, ins->snapshot());
+
+ masm.bind(&done);
+ }
+ }
+}
+
+void CodeGenerator::visitMulI64(LMulI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LMulI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LMulI64::Rhs);
+ const Register64 output = ToOutRegister64(lir);
+ MOZ_ASSERT(ToRegister64(lhs) == output);
+
+ if (IsConstant(rhs)) {
+ int64_t constant = ToInt64(rhs);
+ switch (constant) {
+ case -1:
+ masm.neg64(ToRegister64(lhs));
+ return;
+ case 0:
+ masm.xor64(ToRegister64(lhs), ToRegister64(lhs));
+ return;
+ case 1:
+ // nop
+ return;
+ case 2:
+ masm.as_add_d(output.reg, ToRegister64(lhs).reg, ToRegister64(lhs).reg);
+ return;
+ default:
+ if (constant > 0) {
+ if (mozilla::IsPowerOfTwo(static_cast<uint64_t>(constant + 1))) {
+ ScratchRegisterScope scratch(masm);
+ masm.movePtr(ToRegister64(lhs).reg, scratch);
+ masm.as_slli_d(output.reg, ToRegister64(lhs).reg,
+ FloorLog2(constant + 1));
+ masm.sub64(scratch, output);
+ return;
+ } else if (mozilla::IsPowerOfTwo(
+ static_cast<uint64_t>(constant - 1))) {
+ int32_t shift = mozilla::FloorLog2(constant - 1);
+ if (shift < 5) {
+ masm.as_alsl_d(output.reg, ToRegister64(lhs).reg,
+ ToRegister64(lhs).reg, shift - 1);
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.movePtr(ToRegister64(lhs).reg, scratch);
+ masm.as_slli_d(output.reg, ToRegister64(lhs).reg, shift);
+ masm.add64(scratch, output);
+ }
+ return;
+ }
+ // Use shift if constant is power of 2.
+ int32_t shift = mozilla::FloorLog2(constant);
+ if (int64_t(1) << shift == constant) {
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ return;
+ }
+ }
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(Imm64(constant), ToRegister64(lhs), temp);
+ }
+ } else {
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+ masm.mul64(ToOperandOrRegister64(rhs), ToRegister64(lhs), temp);
+ }
+}
+
+void CodeGenerator::visitDivI(LDivI* ins) {
+ // Extract the registers from this instruction
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register dest = ToRegister(ins->output());
+ Register temp = ToRegister(ins->getTemp(0));
+ MDiv* mir = ins->mir();
+
+ Label done;
+
+ // Handle divide by zero.
+ if (mir->canBeDivideByZero()) {
+ if (mir->trapOnError()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else if (mir->canTruncateInfinities()) {
+ // Truncated division by zero is zero (Infinity|0 == 0)
+ Label notzero;
+ masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&notzero);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Zero, rhs, rhs, ins->snapshot());
+ }
+ }
+
+ // Handle an integer overflow exception from -2147483648 / -1.
+ if (mir->canBeNegativeOverflow()) {
+ Label notMinInt;
+ masm.move32(Imm32(INT32_MIN), temp);
+ masm.ma_b(lhs, temp, &notMinInt, Assembler::NotEqual, ShortJump);
+
+ masm.move32(Imm32(-1), temp);
+ if (mir->trapOnError()) {
+ Label ok;
+ masm.ma_b(rhs, temp, &ok, Assembler::NotEqual);
+ masm.wasmTrap(wasm::Trap::IntegerOverflow, mir->bytecodeOffset());
+ masm.bind(&ok);
+ } else if (mir->canTruncateOverflow()) {
+ // (-INT32_MIN)|0 == INT32_MIN
+ Label skip;
+ masm.ma_b(rhs, temp, &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(INT32_MIN), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, temp, ins->snapshot());
+ }
+ masm.bind(&notMinInt);
+ }
+
+ // Handle negative 0. (0/-Y)
+ if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
+ Label nonzero;
+ masm.ma_b(lhs, lhs, &nonzero, Assembler::NonZero, ShortJump);
+ bailoutCmp32(Assembler::LessThan, rhs, Imm32(0), ins->snapshot());
+ masm.bind(&nonzero);
+ }
+ // Note: above safety checks could not be verified as Ion seems to be
+ // smarter and requires double arithmetic in such cases.
+
+ // All regular. Lets call div.
+ if (mir->canTruncateRemainder()) {
+ masm.as_div_w(dest, lhs, rhs);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+
+ Label remainderNonZero;
+ masm.ma_div_branch_overflow(dest, lhs, rhs, &remainderNonZero);
+ bailoutFrom(&remainderNonZero, ins->snapshot());
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitDivPowTwoI(LDivPowTwoI* ins) {
+ Register lhs = ToRegister(ins->numerator());
+ Register dest = ToRegister(ins->output());
+ Register tmp = ToRegister(ins->getTemp(0));
+ int32_t shift = ins->shift();
+
+ if (shift != 0) {
+ MDiv* mir = ins->mir();
+ if (!mir->isTruncated()) {
+ // If the remainder is going to be != 0, bailout since this must
+ // be a double.
+ masm.as_slli_w(tmp, lhs, (32 - shift) % 32);
+ bailoutCmp32(Assembler::NonZero, tmp, tmp, ins->snapshot());
+ }
+
+ if (!mir->canBeNegativeDividend()) {
+ // Numerator is unsigned, so needs no adjusting. Do the shift.
+ masm.as_srai_w(dest, lhs, shift % 32);
+ return;
+ }
+
+ // Adjust the value so that shifting produces a correctly rounded result
+ // when the numerator is negative. See 10-1 "Signed Division by a Known
+ // Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
+ if (shift > 1) {
+ masm.as_srai_w(tmp, lhs, 31);
+ masm.as_srli_w(tmp, tmp, (32 - shift) % 32);
+ masm.add32(lhs, tmp);
+ } else {
+ masm.as_srli_w(tmp, lhs, (32 - shift) % 32);
+ masm.add32(lhs, tmp);
+ }
+
+ // Do the shift.
+ masm.as_srai_w(dest, tmp, shift % 32);
+ } else {
+ masm.move32(lhs, dest);
+ }
+}
+
+void CodeGenerator::visitModI(LModI* ins) {
+ // Extract the registers from this instruction
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register dest = ToRegister(ins->output());
+ Register callTemp = ToRegister(ins->callTemp());
+ MMod* mir = ins->mir();
+ Label done, prevent;
+
+ masm.move32(lhs, callTemp);
+
+ // Prevent INT_MIN % -1;
+ // The integer division will give INT_MIN, but we want -(double)INT_MIN.
+ if (mir->canBeNegativeDividend()) {
+ masm.ma_b(lhs, Imm32(INT_MIN), &prevent, Assembler::NotEqual, ShortJump);
+ if (mir->isTruncated()) {
+ // (INT_MIN % -1)|0 == 0
+ Label skip;
+ masm.ma_b(rhs, Imm32(-1), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(-1), ins->snapshot());
+ }
+ masm.bind(&prevent);
+ }
+
+ // 0/X (with X < 0) is bad because both of these values *should* be
+ // doubles, and the result should be -0.0, which cannot be represented in
+ // integers. X/0 is bad because it will give garbage (or abort), when it
+ // should give either \infty, -\infty or NAN.
+
+ // Prevent 0 / X (with X < 0) and X / 0
+ // testing X / Y. Compare Y with 0.
+ // There are three cases: (Y < 0), (Y == 0) and (Y > 0)
+ // If (Y < 0), then we compare X with 0, and bail if X == 0
+ // If (Y == 0), then we simply want to bail.
+ // if (Y > 0), we don't bail.
+
+ if (mir->canBeDivideByZero()) {
+ if (mir->isTruncated()) {
+ if (mir->trapOnError()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, mir->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else {
+ Label skip;
+ masm.ma_b(rhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ }
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
+ }
+ }
+
+ if (mir->canBeNegativeDividend()) {
+ Label notNegative;
+ masm.ma_b(rhs, Imm32(0), &notNegative, Assembler::GreaterThan, ShortJump);
+ if (mir->isTruncated()) {
+ // NaN|0 == 0 and (0 % -X)|0 == 0
+ Label skip;
+ masm.ma_b(lhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
+ masm.move32(Imm32(0), dest);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&skip);
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, lhs, Imm32(0), ins->snapshot());
+ }
+ masm.bind(&notNegative);
+ }
+
+ masm.as_mod_w(dest, lhs, rhs);
+
+ // If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
+ if (mir->canBeNegativeDividend()) {
+ if (mir->isTruncated()) {
+ // -0.0|0 == 0
+ } else {
+ MOZ_ASSERT(mir->fallible());
+ // See if X < 0
+ masm.ma_b(dest, Imm32(0), &done, Assembler::NotEqual, ShortJump);
+ bailoutCmp32(Assembler::Signed, callTemp, Imm32(0), ins->snapshot());
+ }
+ }
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitModPowTwoI(LModPowTwoI* ins) {
+ Register in = ToRegister(ins->getOperand(0));
+ Register out = ToRegister(ins->getDef(0));
+ MMod* mir = ins->mir();
+ Label negative, done;
+
+ masm.move32(in, out);
+ masm.ma_b(in, in, &done, Assembler::Zero, ShortJump);
+ // Switch based on sign of the lhs.
+ // Positive numbers are just a bitmask
+ masm.ma_b(in, in, &negative, Assembler::Signed, ShortJump);
+ {
+ masm.and32(Imm32((1 << ins->shift()) - 1), out);
+ masm.ma_b(&done, ShortJump);
+ }
+
+ // Negative numbers need a negate, bitmask, negate
+ {
+ masm.bind(&negative);
+ masm.neg32(out);
+ masm.and32(Imm32((1 << ins->shift()) - 1), out);
+ masm.neg32(out);
+ }
+ if (mir->canBeNegativeDividend()) {
+ if (!mir->isTruncated()) {
+ MOZ_ASSERT(mir->fallible());
+ bailoutCmp32(Assembler::Equal, out, zero, ins->snapshot());
+ } else {
+ // -0|0 == 0
+ }
+ }
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitModMaskI(LModMaskI* ins) {
+ Register src = ToRegister(ins->getOperand(0));
+ Register dest = ToRegister(ins->getDef(0));
+ Register tmp0 = ToRegister(ins->getTemp(0));
+ Register tmp1 = ToRegister(ins->getTemp(1));
+ MMod* mir = ins->mir();
+
+ if (!mir->isTruncated() && mir->canBeNegativeDividend()) {
+ MOZ_ASSERT(mir->fallible());
+
+ Label bail;
+ masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), &bail);
+ bailoutFrom(&bail, ins->snapshot());
+ } else {
+ masm.ma_mod_mask(src, dest, tmp0, tmp1, ins->shift(), nullptr);
+ }
+}
+
+void CodeGenerator::visitBitNotI(LBitNotI* ins) {
+ const LAllocation* input = ins->getOperand(0);
+ const LDefinition* dest = ins->getDef(0);
+ MOZ_ASSERT(!input->isConstant());
+
+ masm.as_nor(ToRegister(dest), ToRegister(input), zero);
+}
+
+void CodeGenerator::visitBitNotI64(LBitNotI64* ins) {
+ const LAllocation* input = ins->getOperand(0);
+ MOZ_ASSERT(!input->isConstant());
+ Register inputReg = ToRegister(input);
+ MOZ_ASSERT(inputReg == ToRegister(ins->output()));
+ masm.as_nor(inputReg, inputReg, zero);
+}
+
+void CodeGenerator::visitBitOpI(LBitOpI* ins) {
+ const LAllocation* lhs = ins->getOperand(0);
+ const LAllocation* rhs = ins->getOperand(1);
+ const LDefinition* dest = ins->getDef(0);
+ // all of these bitops should be either imm32's, or integer registers.
+ switch (ins->bitop()) {
+ case JSOp::BitOr:
+ if (rhs->isConstant()) {
+ masm.ma_or(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)),
+ true);
+ } else {
+ masm.as_or(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ masm.as_slli_w(ToRegister(dest), ToRegister(dest), 0);
+ }
+ break;
+ case JSOp::BitXor:
+ if (rhs->isConstant()) {
+ masm.ma_xor(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)),
+ true);
+ } else {
+ masm.as_xor(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ masm.as_slli_w(ToRegister(dest), ToRegister(dest), 0);
+ }
+ break;
+ case JSOp::BitAnd:
+ if (rhs->isConstant()) {
+ masm.ma_and(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)),
+ true);
+ } else {
+ masm.as_and(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
+ masm.as_slli_w(ToRegister(dest), ToRegister(dest), 0);
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void CodeGenerator::visitBitOpI64(LBitOpI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LBitOpI64::Lhs);
+ const LInt64Allocation rhs = lir->getInt64Operand(LBitOpI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ switch (lir->bitop()) {
+ case JSOp::BitOr:
+ if (IsConstant(rhs)) {
+ masm.or64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.or64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::BitXor:
+ if (IsConstant(rhs)) {
+ masm.xor64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.xor64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::BitAnd:
+ if (IsConstant(rhs)) {
+ masm.and64(Imm64(ToInt64(rhs)), ToRegister64(lhs));
+ } else {
+ masm.and64(ToOperandOrRegister64(rhs), ToRegister64(lhs));
+ }
+ break;
+ default:
+ MOZ_CRASH("unexpected binary opcode");
+ }
+}
+
+void CodeGenerator::visitShiftI(LShiftI* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ const LAllocation* rhs = ins->rhs();
+ Register dest = ToRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ int32_t shift = ToInt32(rhs) & 0x1F;
+ switch (ins->bitop()) {
+ case JSOp::Lsh:
+ if (shift) {
+ masm.as_slli_w(dest, lhs, shift % 32);
+ } else {
+ masm.move32(lhs, dest);
+ }
+ break;
+ case JSOp::Rsh:
+ if (shift) {
+ masm.as_srai_w(dest, lhs, shift % 32);
+ } else {
+ masm.move32(lhs, dest);
+ }
+ break;
+ case JSOp::Ursh:
+ if (shift) {
+ masm.as_srli_w(dest, lhs, shift % 32);
+ } else {
+ // x >>> 0 can overflow.
+ if (ins->mir()->toUrsh()->fallible()) {
+ bailoutCmp32(Assembler::LessThan, lhs, Imm32(0), ins->snapshot());
+ }
+ masm.move32(lhs, dest);
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ } else {
+ // The shift amounts should be AND'ed into the 0-31 range
+ masm.ma_and(dest, ToRegister(rhs), Imm32(0x1F));
+
+ switch (ins->bitop()) {
+ case JSOp::Lsh:
+ masm.as_sll_w(dest, lhs, dest);
+ break;
+ case JSOp::Rsh:
+ masm.as_sra_w(dest, lhs, dest);
+ break;
+ case JSOp::Ursh:
+ masm.as_srl_w(dest, lhs, dest);
+ if (ins->mir()->toUrsh()->fallible()) {
+ // x >>> 0 can overflow.
+ bailoutCmp32(Assembler::LessThan, dest, Imm32(0), ins->snapshot());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ }
+}
+
+void CodeGenerator::visitShiftI64(LShiftI64* lir) {
+ const LInt64Allocation lhs = lir->getInt64Operand(LShiftI64::Lhs);
+ LAllocation* rhs = lir->getOperand(LShiftI64::Rhs);
+
+ MOZ_ASSERT(ToOutRegister64(lir) == ToRegister64(lhs));
+
+ if (rhs->isConstant()) {
+ int32_t shift = int32_t(rhs->toConstant()->toInt64() & 0x3F);
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ if (shift) {
+ masm.lshift64(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::Rsh:
+ if (shift) {
+ masm.rshift64Arithmetic(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ case JSOp::Ursh:
+ if (shift) {
+ masm.rshift64(Imm32(shift), ToRegister64(lhs));
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+ return;
+ }
+
+ switch (lir->bitop()) {
+ case JSOp::Lsh:
+ masm.lshift64(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ case JSOp::Rsh:
+ masm.rshift64Arithmetic(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ case JSOp::Ursh:
+ masm.rshift64(ToRegister(rhs), ToRegister64(lhs));
+ break;
+ default:
+ MOZ_CRASH("Unexpected shift op");
+ }
+}
+
+void CodeGenerator::visitRotateI64(LRotateI64* lir) {
+ MRotate* mir = lir->mir();
+ LAllocation* count = lir->count();
+
+ Register64 input = ToRegister64(lir->input());
+ Register64 output = ToOutRegister64(lir);
+ Register temp = ToTempRegisterOrInvalid(lir->temp());
+
+ MOZ_ASSERT(input == output);
+
+ if (count->isConstant()) {
+ int32_t c = int32_t(count->toConstant()->toInt64() & 0x3F);
+ if (!c) {
+ return;
+ }
+ if (mir->isLeftRotate()) {
+ masm.rotateLeft64(Imm32(c), input, output, temp);
+ } else {
+ masm.rotateRight64(Imm32(c), input, output, temp);
+ }
+ } else {
+ if (mir->isLeftRotate()) {
+ masm.rotateLeft64(ToRegister(count), input, output, temp);
+ } else {
+ masm.rotateRight64(ToRegister(count), input, output, temp);
+ }
+ }
+}
+
+void CodeGenerator::visitUrshD(LUrshD* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register temp = ToRegister(ins->temp());
+
+ const LAllocation* rhs = ins->rhs();
+ FloatRegister out = ToFloatRegister(ins->output());
+
+ if (rhs->isConstant()) {
+ masm.as_srli_w(temp, lhs, ToInt32(rhs) % 32);
+ } else {
+ masm.as_srl_w(temp, lhs, ToRegister(rhs));
+ }
+
+ masm.convertUInt32ToDouble(temp, out);
+}
+
+void CodeGenerator::visitClzI(LClzI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.as_clz_w(output, input);
+}
+
+void CodeGenerator::visitCtzI(LCtzI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.as_ctz_w(output, input);
+}
+
+void CodeGenerator::visitPopcntI(LPopcntI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+ Register tmp = ToRegister(ins->temp0());
+
+ masm.popcnt32(input, output, tmp);
+}
+
+void CodeGenerator::visitPopcntI64(LPopcntI64* ins) {
+ Register64 input = ToRegister64(ins->getInt64Operand(0));
+ Register64 output = ToOutRegister64(ins);
+ Register tmp = ToRegister(ins->getTemp(0));
+
+ masm.popcnt64(input, output, tmp);
+}
+
+void CodeGenerator::visitPowHalfD(LPowHalfD* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+ ScratchDoubleScope fpscratch(masm);
+
+ Label done, skip;
+
+ // Masm.pow(-Infinity, 0.5) == Infinity.
+ masm.loadConstantDouble(NegativeInfinity<double>(), fpscratch);
+ masm.ma_bc_d(input, fpscratch, &skip, Assembler::DoubleNotEqualOrUnordered,
+ ShortJump);
+ masm.as_fneg_d(output, fpscratch);
+ masm.ma_b(&done, ShortJump);
+
+ masm.bind(&skip);
+ // Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
+ // Adding 0 converts any -0 to 0.
+ masm.loadConstantDouble(0.0, fpscratch);
+ masm.as_fadd_d(output, input, fpscratch);
+ masm.as_fsqrt_d(output, output);
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitMathD(LMathD* math) {
+ FloatRegister src1 = ToFloatRegister(math->getOperand(0));
+ FloatRegister src2 = ToFloatRegister(math->getOperand(1));
+ FloatRegister output = ToFloatRegister(math->getDef(0));
+
+ switch (math->jsop()) {
+ case JSOp::Add:
+ masm.as_fadd_d(output, src1, src2);
+ break;
+ case JSOp::Sub:
+ masm.as_fsub_d(output, src1, src2);
+ break;
+ case JSOp::Mul:
+ masm.as_fmul_d(output, src1, src2);
+ break;
+ case JSOp::Div:
+ masm.as_fdiv_d(output, src1, src2);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void CodeGenerator::visitMathF(LMathF* math) {
+ FloatRegister src1 = ToFloatRegister(math->getOperand(0));
+ FloatRegister src2 = ToFloatRegister(math->getOperand(1));
+ FloatRegister output = ToFloatRegister(math->getDef(0));
+
+ switch (math->jsop()) {
+ case JSOp::Add:
+ masm.as_fadd_s(output, src1, src2);
+ break;
+ case JSOp::Sub:
+ masm.as_fsub_s(output, src1, src2);
+ break;
+ case JSOp::Mul:
+ masm.as_fmul_s(output, src1, src2);
+ break;
+ case JSOp::Div:
+ masm.as_fdiv_s(output, src1, src2);
+ break;
+ default:
+ MOZ_CRASH("unexpected opcode");
+ }
+}
+
+void CodeGenerator::visitTruncateDToInt32(LTruncateDToInt32* ins) {
+ emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()),
+ ins->mir());
+}
+
+void CodeGenerator::visitTruncateFToInt32(LTruncateFToInt32* ins) {
+ emitTruncateFloat32(ToFloatRegister(ins->input()), ToRegister(ins->output()),
+ ins->mir());
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateDToInt32(
+ LWasmBuiltinTruncateDToInt32* lir) {
+ emitTruncateDouble(ToFloatRegister(lir->getOperand(0)),
+ ToRegister(lir->getDef(0)), lir->mir());
+}
+
+void CodeGenerator::visitWasmBuiltinTruncateFToInt32(
+ LWasmBuiltinTruncateFToInt32* lir) {
+ emitTruncateFloat32(ToFloatRegister(lir->getOperand(0)),
+ ToRegister(lir->getDef(0)), lir->mir());
+}
+
+void CodeGenerator::visitWasmTruncateToInt32(LWasmTruncateToInt32* lir) {
+ auto input = ToFloatRegister(lir->input());
+ auto output = ToRegister(lir->output());
+
+ MWasmTruncateToInt32* mir = lir->mir();
+ MIRType fromType = mir->input()->type();
+
+ MOZ_ASSERT(fromType == MIRType::Double || fromType == MIRType::Float32);
+
+ auto* ool = new (alloc()) OutOfLineWasmTruncateCheck(mir, input, output);
+ addOutOfLineCode(ool, mir);
+
+ Label* oolEntry = ool->entry();
+ if (mir->isUnsigned()) {
+ if (fromType == MIRType::Double) {
+ masm.wasmTruncateDoubleToUInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else if (fromType == MIRType::Float32) {
+ masm.wasmTruncateFloat32ToUInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+
+ masm.bind(ool->rejoin());
+ return;
+ }
+
+ if (fromType == MIRType::Double) {
+ masm.wasmTruncateDoubleToInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else if (fromType == MIRType::Float32) {
+ masm.wasmTruncateFloat32ToInt32(input, output, mir->isSaturating(),
+ oolEntry);
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+
+ masm.bind(ool->rejoin());
+}
+
+void CodeGenerator::visitCopySignF(LCopySignF* ins) {
+ FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
+ FloatRegister output = ToFloatRegister(ins->getDef(0));
+
+ Register lhsi = ToRegister(ins->getTemp(0));
+ Register rhsi = ToRegister(ins->getTemp(1));
+
+ masm.moveFromFloat32(lhs, lhsi);
+ masm.moveFromFloat32(rhs, rhsi);
+
+ // Combine.
+ masm.as_bstrins_w(rhsi, lhsi, 30, 0);
+
+ masm.moveToFloat32(rhsi, output);
+}
+
+void CodeGenerator::visitCopySignD(LCopySignD* ins) {
+ FloatRegister lhs = ToFloatRegister(ins->getOperand(0));
+ FloatRegister rhs = ToFloatRegister(ins->getOperand(1));
+ FloatRegister output = ToFloatRegister(ins->getDef(0));
+
+ Register lhsi = ToRegister(ins->getTemp(0));
+ Register rhsi = ToRegister(ins->getTemp(1));
+
+ // Manipulate high words of double inputs.
+ masm.moveFromDoubleHi(lhs, lhsi);
+ masm.moveFromDoubleHi(rhs, rhsi);
+
+ // Combine.
+ masm.as_bstrins_w(rhsi, lhsi, 30, 0);
+
+ masm.moveToDoubleHi(rhsi, output);
+}
+
+void CodeGenerator::visitValue(LValue* value) {
+ const ValueOperand out = ToOutValue(value);
+
+ masm.moveValue(value->value(), out);
+}
+
+void CodeGenerator::visitDouble(LDouble* ins) {
+ const LDefinition* out = ins->getDef(0);
+
+ masm.loadConstantDouble(ins->value(), ToFloatRegister(out));
+}
+
+void CodeGenerator::visitFloat32(LFloat32* ins) {
+ const LDefinition* out = ins->getDef(0);
+ masm.loadConstantFloat32(ins->value(), ToFloatRegister(out));
+}
+
+void CodeGenerator::visitTestDAndBranch(LTestDAndBranch* test) {
+ FloatRegister input = ToFloatRegister(test->input());
+ ScratchDoubleScope fpscratch(masm);
+
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ masm.loadConstantDouble(0.0, fpscratch);
+ // If 0, or NaN, the result is false.
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::DoubleFloat, input, fpscratch, ifTrue,
+ Assembler::DoubleNotEqual);
+ } else {
+ branchToBlock(Assembler::DoubleFloat, input, fpscratch, ifFalse,
+ Assembler::DoubleEqualOrUnordered);
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitTestFAndBranch(LTestFAndBranch* test) {
+ FloatRegister input = ToFloatRegister(test->input());
+ ScratchFloat32Scope fpscratch(masm);
+
+ MBasicBlock* ifTrue = test->ifTrue();
+ MBasicBlock* ifFalse = test->ifFalse();
+
+ masm.loadConstantFloat32(0.0f, fpscratch);
+ // If 0, or NaN, the result is false.
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::SingleFloat, input, fpscratch, ifTrue,
+ Assembler::DoubleNotEqual);
+ } else {
+ branchToBlock(Assembler::SingleFloat, input, fpscratch, ifFalse,
+ Assembler::DoubleEqualOrUnordered);
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitCompareD(LCompareD* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+ Register dest = ToRegister(comp->output());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.ma_cmp_set_double(dest, lhs, rhs, cond);
+}
+
+void CodeGenerator::visitCompareF(LCompareF* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+ Register dest = ToRegister(comp->output());
+
+ Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
+ masm.ma_cmp_set_float32(dest, lhs, rhs, cond);
+}
+
+void CodeGenerator::visitCompareDAndBranch(LCompareDAndBranch* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond =
+ JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ MBasicBlock* ifTrue = comp->ifTrue();
+ MBasicBlock* ifFalse = comp->ifFalse();
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifTrue, cond);
+ } else {
+ branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifFalse,
+ Assembler::InvertCondition(cond));
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitCompareFAndBranch(LCompareFAndBranch* comp) {
+ FloatRegister lhs = ToFloatRegister(comp->left());
+ FloatRegister rhs = ToFloatRegister(comp->right());
+
+ Assembler::DoubleCondition cond =
+ JSOpToDoubleCondition(comp->cmpMir()->jsop());
+ MBasicBlock* ifTrue = comp->ifTrue();
+ MBasicBlock* ifFalse = comp->ifFalse();
+
+ if (isNextBlock(ifFalse->lir())) {
+ branchToBlock(Assembler::SingleFloat, lhs, rhs, ifTrue, cond);
+ } else {
+ branchToBlock(Assembler::SingleFloat, lhs, rhs, ifFalse,
+ Assembler::InvertCondition(cond));
+ jumpToBlock(ifTrue);
+ }
+}
+
+void CodeGenerator::visitBitAndAndBranch(LBitAndAndBranch* lir) {
+ ScratchRegisterScope scratch(masm);
+ if (lir->right()->isConstant()) {
+ masm.ma_and(scratch, ToRegister(lir->left()), Imm32(ToInt32(lir->right())));
+ } else {
+ masm.as_and(scratch, ToRegister(lir->left()), ToRegister(lir->right()));
+ }
+ emitBranch(scratch, Register(scratch), lir->cond(), lir->ifTrue(),
+ lir->ifFalse());
+}
+
+void CodeGenerator::visitWasmUint32ToDouble(LWasmUint32ToDouble* lir) {
+ masm.convertUInt32ToDouble(ToRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitWasmUint32ToFloat32(LWasmUint32ToFloat32* lir) {
+ masm.convertUInt32ToFloat32(ToRegister(lir->input()),
+ ToFloatRegister(lir->output()));
+}
+
+void CodeGenerator::visitNotI(LNotI* ins) {
+ masm.cmp32Set(Assembler::Equal, ToRegister(ins->input()), Imm32(0),
+ ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitNotD(LNotD* ins) {
+ // Since this operation is not, we want to set a bit if
+ // the double is falsey, which means 0.0, -0.0 or NaN.
+ FloatRegister in = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+ ScratchDoubleScope fpscratch(masm);
+
+ masm.loadConstantDouble(0.0, fpscratch);
+ masm.ma_cmp_set_double(dest, in, fpscratch,
+ Assembler::DoubleEqualOrUnordered);
+}
+
+void CodeGenerator::visitNotF(LNotF* ins) {
+ // Since this operation is not, we want to set a bit if
+ // the float32 is falsey, which means 0.0, -0.0 or NaN.
+ FloatRegister in = ToFloatRegister(ins->input());
+ Register dest = ToRegister(ins->output());
+ ScratchFloat32Scope fpscratch(masm);
+
+ masm.loadConstantFloat32(0.0f, fpscratch);
+ masm.ma_cmp_set_float32(dest, in, fpscratch,
+ Assembler::DoubleEqualOrUnordered);
+}
+
+void CodeGenerator::visitMemoryBarrier(LMemoryBarrier* ins) {
+ masm.memoryBarrier(ins->type());
+}
+
+void CodeGenerator::visitWasmLoad(LWasmLoad* lir) { emitWasmLoad(lir); }
+
+void CodeGenerator::visitWasmStore(LWasmStore* lir) { emitWasmStore(lir); }
+
+void CodeGenerator::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins) {
+ const MAsmJSLoadHeap* mir = ins->mir();
+ MOZ_ASSERT(!mir->hasMemoryBase());
+
+ const LAllocation* ptr = ins->ptr();
+ const LDefinition* output = ins->output();
+ const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+
+ Register ptrReg = ToRegister(ptr);
+ Scalar::Type accessType = mir->accessType();
+ bool isFloat = accessType == Scalar::Float32 || accessType == Scalar::Float64;
+ Label done;
+
+ if (mir->needsBoundsCheck()) {
+ Label boundsCheckPassed;
+ Register boundsCheckLimitReg = ToRegister(boundsCheckLimit);
+ masm.wasmBoundsCheck32(Assembler::Below, ptrReg, boundsCheckLimitReg,
+ &boundsCheckPassed);
+ // Return a default value in case of a bounds-check failure.
+ if (isFloat) {
+ if (accessType == Scalar::Float32) {
+ masm.loadConstantFloat32(GenericNaN(), ToFloatRegister(output));
+ } else {
+ masm.loadConstantDouble(GenericNaN(), ToFloatRegister(output));
+ }
+ } else {
+ masm.mov(zero, ToRegister(output));
+ }
+ masm.jump(&done);
+ masm.bind(&boundsCheckPassed);
+ }
+
+ // TODO(loong64): zero-extend index in asm.js?
+ SecondScratchRegisterScope scratch2(masm);
+ masm.move32To64ZeroExtend(ptrReg, Register64(scratch2));
+
+ switch (accessType) {
+ case Scalar::Int8:
+ masm.as_ldx_b(ToRegister(output), HeapReg, scratch2);
+ break;
+ case Scalar::Uint8:
+ masm.as_ldx_bu(ToRegister(output), HeapReg, scratch2);
+ break;
+ case Scalar::Int16:
+ masm.as_ldx_h(ToRegister(output), HeapReg, scratch2);
+ break;
+ case Scalar::Uint16:
+ masm.as_ldx_hu(ToRegister(output), HeapReg, scratch2);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ masm.as_ldx_w(ToRegister(output), HeapReg, scratch2);
+ break;
+ case Scalar::Float64:
+ masm.as_fldx_d(ToFloatRegister(output), HeapReg, scratch2);
+ break;
+ case Scalar::Float32:
+ masm.as_fldx_s(ToFloatRegister(output), HeapReg, scratch2);
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ if (done.used()) {
+ masm.bind(&done);
+ }
+}
+
+void CodeGenerator::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins) {
+ const MAsmJSStoreHeap* mir = ins->mir();
+ MOZ_ASSERT(!mir->hasMemoryBase());
+
+ const LAllocation* value = ins->value();
+ const LAllocation* ptr = ins->ptr();
+ const LAllocation* boundsCheckLimit = ins->boundsCheckLimit();
+
+ Register ptrReg = ToRegister(ptr);
+
+ Label done;
+ if (mir->needsBoundsCheck()) {
+ Register boundsCheckLimitReg = ToRegister(boundsCheckLimit);
+ masm.wasmBoundsCheck32(Assembler::AboveOrEqual, ptrReg, boundsCheckLimitReg,
+ &done);
+ }
+
+ // TODO(loong64): zero-extend index in asm.js?
+ SecondScratchRegisterScope scratch2(masm);
+ masm.move32To64ZeroExtend(ptrReg, Register64(scratch2));
+
+ switch (mir->accessType()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ masm.as_stx_b(ToRegister(value), HeapReg, scratch2);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ masm.as_stx_h(ToRegister(value), HeapReg, scratch2);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ masm.as_stx_w(ToRegister(value), HeapReg, scratch2);
+ break;
+ case Scalar::Float64:
+ masm.as_fstx_d(ToFloatRegister(value), HeapReg, scratch2);
+ break;
+ case Scalar::Float32:
+ masm.as_fstx_s(ToFloatRegister(value), HeapReg, scratch2);
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ if (done.used()) {
+ masm.bind(&done);
+ }
+}
+
+void CodeGenerator::visitWasmCompareExchangeHeap(
+ LWasmCompareExchangeHeap* ins) {
+ MWasmCompareExchangeHeap* mir = ins->mir();
+ Register memoryBase = ToRegister(ins->memoryBase());
+ Register ptrReg = ToRegister(ins->ptr());
+ BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Register oldval = ToRegister(ins->oldValue());
+ Register newval = ToRegister(ins->newValue());
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ masm.wasmCompareExchange(mir->access(), srcAddr, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitWasmAtomicExchangeHeap(LWasmAtomicExchangeHeap* ins) {
+ MWasmAtomicExchangeHeap* mir = ins->mir();
+ Register memoryBase = ToRegister(ins->memoryBase());
+ Register ptrReg = ToRegister(ins->ptr());
+ Register value = ToRegister(ins->value());
+ BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ masm.wasmAtomicExchange(mir->access(), srcAddr, value, valueTemp, offsetTemp,
+ maskTemp, ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeap(LWasmAtomicBinopHeap* ins) {
+ MOZ_ASSERT(ins->mir()->hasUses());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ MWasmAtomicBinopHeap* mir = ins->mir();
+ Register memoryBase = ToRegister(ins->memoryBase());
+ Register ptrReg = ToRegister(ins->ptr());
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset());
+
+ masm.wasmAtomicFetchOp(mir->access(), mir->operation(),
+ ToRegister(ins->value()), srcAddr, valueTemp,
+ offsetTemp, maskTemp, ToRegister(ins->output()));
+}
+
+void CodeGenerator::visitWasmAtomicBinopHeapForEffect(
+ LWasmAtomicBinopHeapForEffect* ins) {
+ MOZ_ASSERT(!ins->mir()->hasUses());
+ MOZ_ASSERT(ins->addrTemp()->isBogusTemp());
+
+ MWasmAtomicBinopHeap* mir = ins->mir();
+ Register memoryBase = ToRegister(ins->memoryBase());
+ Register ptrReg = ToRegister(ins->ptr());
+ Register valueTemp = ToTempRegisterOrInvalid(ins->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(ins->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(ins->maskTemp());
+
+ BaseIndex srcAddr(memoryBase, ptrReg, TimesOne, mir->access().offset());
+ masm.wasmAtomicEffectOp(mir->access(), mir->operation(),
+ ToRegister(ins->value()), srcAddr, valueTemp,
+ offsetTemp, maskTemp);
+}
+
+void CodeGenerator::visitWasmStackArg(LWasmStackArg* ins) {
+ const MWasmStackArg* mir = ins->mir();
+ if (ins->arg()->isConstant()) {
+ masm.storePtr(ImmWord(ToInt32(ins->arg())),
+ Address(StackPointer, mir->spOffset()));
+ } else {
+ if (ins->arg()->isGeneralReg()) {
+ masm.storePtr(ToRegister(ins->arg()),
+ Address(StackPointer, mir->spOffset()));
+ } else if (mir->input()->type() == MIRType::Double) {
+ masm.storeDouble(ToFloatRegister(ins->arg()),
+ Address(StackPointer, mir->spOffset()));
+ } else {
+ masm.storeFloat32(ToFloatRegister(ins->arg()),
+ Address(StackPointer, mir->spOffset()));
+ }
+ }
+}
+
+void CodeGenerator::visitWasmStackArgI64(LWasmStackArgI64* ins) {
+ const MWasmStackArg* mir = ins->mir();
+ Address dst(StackPointer, mir->spOffset());
+ if (IsConstant(ins->arg())) {
+ masm.store64(Imm64(ToInt64(ins->arg())), dst);
+ } else {
+ masm.store64(ToRegister64(ins->arg()), dst);
+ }
+}
+
+void CodeGenerator::visitWasmSelect(LWasmSelect* ins) {
+ MIRType mirType = ins->mir()->type();
+
+ Register cond = ToRegister(ins->condExpr());
+ const LAllocation* falseExpr = ins->falseExpr();
+
+ if (mirType == MIRType::Int32 || mirType == MIRType::WasmAnyRef) {
+ Register out = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->trueExpr()) == out,
+ "true expr input is reused for output");
+ if (falseExpr->isRegister()) {
+ masm.moveIfZero(out, ToRegister(falseExpr), cond);
+ } else {
+ masm.cmp32Load32(Assembler::Zero, cond, cond, ToAddress(falseExpr), out);
+ }
+ return;
+ }
+
+ FloatRegister out = ToFloatRegister(ins->output());
+ MOZ_ASSERT(ToFloatRegister(ins->trueExpr()) == out,
+ "true expr input is reused for output");
+
+ if (falseExpr->isFloatReg()) {
+ if (mirType == MIRType::Float32) {
+ masm.ma_fmovz(Assembler::SingleFloat, out, ToFloatRegister(falseExpr),
+ cond);
+ } else if (mirType == MIRType::Double) {
+ masm.ma_fmovz(Assembler::DoubleFloat, out, ToFloatRegister(falseExpr),
+ cond);
+ } else {
+ MOZ_CRASH("unhandled type in visitWasmSelect!");
+ }
+ } else {
+ Label done;
+ masm.ma_b(cond, cond, &done, Assembler::NonZero, ShortJump);
+
+ if (mirType == MIRType::Float32) {
+ masm.loadFloat32(ToAddress(falseExpr), out);
+ } else if (mirType == MIRType::Double) {
+ masm.loadDouble(ToAddress(falseExpr), out);
+ } else {
+ MOZ_CRASH("unhandled type in visitWasmSelect!");
+ }
+
+ masm.bind(&done);
+ }
+}
+
+// We expect to handle only the case where compare is {U,}Int32 and select is
+// {U,}Int32, and the "true" input is reused for the output.
+void CodeGenerator::visitWasmCompareAndSelect(LWasmCompareAndSelect* ins) {
+ bool cmpIs32bit = ins->compareType() == MCompare::Compare_Int32 ||
+ ins->compareType() == MCompare::Compare_UInt32;
+ bool selIs32bit = ins->mir()->type() == MIRType::Int32;
+
+ MOZ_RELEASE_ASSERT(
+ cmpIs32bit && selIs32bit,
+ "CodeGenerator::visitWasmCompareAndSelect: unexpected types");
+
+ Register trueExprAndDest = ToRegister(ins->output());
+ MOZ_ASSERT(ToRegister(ins->ifTrueExpr()) == trueExprAndDest,
+ "true expr input is reused for output");
+
+ Assembler::Condition cond = Assembler::InvertCondition(
+ JSOpToCondition(ins->compareType(), ins->jsop()));
+ const LAllocation* rhs = ins->rightExpr();
+ const LAllocation* falseExpr = ins->ifFalseExpr();
+ Register lhs = ToRegister(ins->leftExpr());
+
+ masm.cmp32Move32(cond, lhs, ToRegister(rhs), ToRegister(falseExpr),
+ trueExprAndDest);
+}
+
+void CodeGenerator::visitWasmReinterpret(LWasmReinterpret* lir) {
+ MOZ_ASSERT(gen->compilingWasm());
+ MWasmReinterpret* ins = lir->mir();
+
+ MIRType to = ins->type();
+ mozilla::DebugOnly<MIRType> from = ins->input()->type();
+
+ switch (to) {
+ case MIRType::Int32:
+ MOZ_ASSERT(from == MIRType::Float32);
+ masm.as_movfr2gr_s(ToRegister(lir->output()),
+ ToFloatRegister(lir->input()));
+ break;
+ case MIRType::Float32:
+ MOZ_ASSERT(from == MIRType::Int32);
+ masm.as_movgr2fr_w(ToFloatRegister(lir->output()),
+ ToRegister(lir->input()));
+ break;
+ case MIRType::Double:
+ case MIRType::Int64:
+ MOZ_CRASH("not handled by this LIR opcode");
+ default:
+ MOZ_CRASH("unexpected WasmReinterpret");
+ }
+}
+
+void CodeGenerator::visitUDivOrMod(LUDivOrMod* ins) {
+ Register lhs = ToRegister(ins->lhs());
+ Register rhs = ToRegister(ins->rhs());
+ Register output = ToRegister(ins->output());
+ Label done;
+
+ // Prevent divide by zero.
+ if (ins->canBeDivideByZero()) {
+ if (ins->mir()->isTruncated()) {
+ if (ins->trapOnError()) {
+ Label nonZero;
+ masm.ma_b(rhs, rhs, &nonZero, Assembler::NonZero);
+ masm.wasmTrap(wasm::Trap::IntegerDivideByZero, ins->bytecodeOffset());
+ masm.bind(&nonZero);
+ } else {
+ // Infinity|0 == 0
+ Label notzero;
+ masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
+ masm.move32(Imm32(0), output);
+ masm.ma_b(&done, ShortJump);
+ masm.bind(&notzero);
+ }
+ } else {
+ bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot());
+ }
+ }
+
+ masm.as_mod_wu(output, lhs, rhs);
+
+ // If the remainder is > 0, bailout since this must be a double.
+ if (ins->mir()->isDiv()) {
+ if (!ins->mir()->toDiv()->canTruncateRemainder()) {
+ bailoutCmp32(Assembler::NonZero, output, output, ins->snapshot());
+ }
+ // Get quotient
+ masm.as_div_wu(output, lhs, rhs);
+ }
+
+ if (!ins->mir()->isTruncated()) {
+ bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot());
+ }
+
+ masm.bind(&done);
+}
+
+void CodeGenerator::visitEffectiveAddress(LEffectiveAddress* ins) {
+ const MEffectiveAddress* mir = ins->mir();
+ Register base = ToRegister(ins->base());
+ Register index = ToRegister(ins->index());
+ Register output = ToRegister(ins->output());
+
+ BaseIndex address(base, index, mir->scale(), mir->displacement());
+ masm.computeEffectiveAddress(address, output);
+}
+
+void CodeGenerator::visitNegI(LNegI* ins) {
+ Register input = ToRegister(ins->input());
+ Register output = ToRegister(ins->output());
+
+ masm.as_sub_w(output, zero, input);
+}
+
+void CodeGenerator::visitNegI64(LNegI64* ins) {
+ Register64 input = ToRegister64(ins->getInt64Operand(0));
+ MOZ_ASSERT(input == ToOutRegister64(ins));
+ masm.neg64(input);
+}
+
+void CodeGenerator::visitNegD(LNegD* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ masm.as_fneg_d(output, input);
+}
+
+void CodeGenerator::visitNegF(LNegF* ins) {
+ FloatRegister input = ToFloatRegister(ins->input());
+ FloatRegister output = ToFloatRegister(ins->output());
+
+ masm.as_fneg_s(output, input);
+}
+
+void CodeGenerator::visitWasmAddOffset(LWasmAddOffset* lir) {
+ MWasmAddOffset* mir = lir->mir();
+ Register base = ToRegister(lir->base());
+ Register out = ToRegister(lir->output());
+
+ Label ok;
+ masm.ma_add32TestCarry(Assembler::CarryClear, out, base, Imm32(mir->offset()),
+ &ok);
+ masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
+ masm.bind(&ok);
+}
+
+void CodeGenerator::visitWasmAddOffset64(LWasmAddOffset64* lir) {
+ MWasmAddOffset* mir = lir->mir();
+ Register64 base = ToRegister64(lir->base());
+ Register64 out = ToOutRegister64(lir);
+
+ Label ok;
+ masm.ma_addPtrTestCarry(Assembler::CarryClear, out.reg, base.reg,
+ ImmWord(mir->offset()), &ok);
+ masm.wasmTrap(wasm::Trap::OutOfBounds, mir->bytecodeOffset());
+ masm.bind(&ok);
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinop(
+ LAtomicTypedArrayElementBinop* lir) {
+ MOZ_ASSERT(!lir->mir()->isForEffect());
+
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register elements = ToRegister(lir->elements());
+ Register outTemp = ToTempRegisterOrInvalid(lir->temp2());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+ Register value = ToRegister(lir->value());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address mem = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp, outTemp, output);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicFetchOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp, outTemp, output);
+ }
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(
+ LAtomicTypedArrayElementBinopForEffect* lir) {
+ MOZ_ASSERT(lir->mir()->isForEffect());
+
+ Register elements = ToRegister(lir->elements());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+ Register value = ToRegister(lir->value());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address mem = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp);
+ } else {
+ BaseIndex mem(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicEffectOpJS(arrayType, Synchronization::Full(),
+ lir->mir()->operation(), value, mem, valueTemp,
+ offsetTemp, maskTemp);
+ }
+}
+
+void CodeGenerator::visitCompareExchangeTypedArrayElement(
+ LCompareExchangeTypedArrayElement* lir) {
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register outTemp = ToTempRegisterOrInvalid(lir->temp());
+
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, outTemp,
+ output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.compareExchangeJS(arrayType, Synchronization::Full(), dest, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, outTemp,
+ output);
+ }
+}
+
+void CodeGenerator::visitAtomicExchangeTypedArrayElement(
+ LAtomicExchangeTypedArrayElement* lir) {
+ Register elements = ToRegister(lir->elements());
+ AnyRegister output = ToAnyRegister(lir->output());
+ Register outTemp = ToTempRegisterOrInvalid(lir->temp());
+
+ Register value = ToRegister(lir->value());
+ Register valueTemp = ToTempRegisterOrInvalid(lir->valueTemp());
+ Register offsetTemp = ToTempRegisterOrInvalid(lir->offsetTemp());
+ Register maskTemp = ToTempRegisterOrInvalid(lir->maskTemp());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value,
+ valueTemp, offsetTemp, maskTemp, outTemp, output);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicExchangeJS(arrayType, Synchronization::Full(), dest, value,
+ valueTemp, offsetTemp, maskTemp, outTemp, output);
+ }
+}
+
+void CodeGenerator::visitCompareExchangeTypedArrayElement64(
+ LCompareExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register oldval = ToRegister(lir->oldval());
+ Register newval = ToRegister(lir->newval());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+ Register out = ToRegister(lir->output());
+ Register64 tempOut(out);
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ masm.loadBigInt64(oldval, temp1);
+ masm.loadBigInt64(newval, tempOut);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.compareExchange64(Synchronization::Full(), dest, temp1, tempOut,
+ temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.compareExchange64(Synchronization::Full(), dest, temp1, tempOut,
+ temp2);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicExchangeTypedArrayElement64(
+ LAtomicExchangeTypedArrayElement64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = Register64(ToRegister(lir->temp2()));
+ Register out = ToRegister(lir->output());
+ Scalar::Type arrayType = lir->mir()->arrayType();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicExchange64(Synchronization::Full(), dest, temp1, temp2);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinop64(
+ LAtomicTypedArrayElementBinop64* lir) {
+ MOZ_ASSERT(lir->mir()->hasUses());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+ Register out = ToRegister(lir->output());
+ Register64 tempOut = Register64(out);
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ tempOut, temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicFetchOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ tempOut, temp2);
+ }
+
+ emitCreateBigInt(lir, arrayType, temp2, out, temp1.scratchReg());
+}
+
+void CodeGenerator::visitAtomicTypedArrayElementBinopForEffect64(
+ LAtomicTypedArrayElementBinopForEffect64* lir) {
+ MOZ_ASSERT(!lir->mir()->hasUses());
+
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+ Register64 temp2 = ToRegister64(lir->temp2());
+
+ Scalar::Type arrayType = lir->mir()->arrayType();
+ AtomicOp atomicOp = lir->mir()->operation();
+
+ masm.loadBigInt64(value, temp1);
+
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), arrayType);
+ masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ temp2);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(arrayType));
+ masm.atomicEffectOp64(Synchronization::Full(), atomicOp, temp1, dest,
+ temp2);
+ }
+}
+
+void CodeGenerator::visitAtomicLoad64(LAtomicLoad64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register temp = ToRegister(lir->temp());
+ Register64 temp64 = ToRegister64(lir->temp64());
+ Register out = ToRegister(lir->output());
+ const MLoadUnboxedScalar* mir = lir->mir();
+
+ Scalar::Type storageType = mir->storageType();
+
+ auto sync = Synchronization::Load();
+ masm.memoryBarrierBefore(sync);
+ if (lir->index()->isConstant()) {
+ Address source =
+ ToAddress(elements, lir->index(), storageType, mir->offsetAdjustment());
+ masm.load64(source, temp64);
+ } else {
+ BaseIndex source(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(storageType), mir->offsetAdjustment());
+ masm.load64(source, temp64);
+ }
+ masm.memoryBarrierAfter(sync);
+ emitCreateBigInt(lir, storageType, temp64, out, temp);
+}
+
+void CodeGenerator::visitAtomicStore64(LAtomicStore64* lir) {
+ Register elements = ToRegister(lir->elements());
+ Register value = ToRegister(lir->value());
+ Register64 temp1 = ToRegister64(lir->temp1());
+
+ Scalar::Type writeType = lir->mir()->writeType();
+
+ masm.loadBigInt64(value, temp1);
+ auto sync = Synchronization::Store();
+ masm.memoryBarrierBefore(sync);
+ if (lir->index()->isConstant()) {
+ Address dest = ToAddress(elements, lir->index(), writeType);
+ masm.store64(temp1, dest);
+ } else {
+ BaseIndex dest(elements, ToRegister(lir->index()),
+ ScaleFromScalarType(writeType));
+ masm.store64(temp1, dest);
+ }
+ masm.memoryBarrierAfter(sync);
+}
+
+void CodeGenerator::visitWasmCompareExchangeI64(LWasmCompareExchangeI64* lir) {
+ Register memoryBase = ToRegister(lir->memoryBase());
+ Register ptr = ToRegister(lir->ptr());
+ Register64 oldValue = ToRegister64(lir->oldValue());
+ Register64 newValue = ToRegister64(lir->newValue());
+ Register64 output = ToOutRegister64(lir);
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(memoryBase, ptr, TimesOne, offset);
+ masm.wasmCompareExchange64(lir->mir()->access(), addr, oldValue, newValue,
+ output);
+}
+
+void CodeGenerator::visitWasmAtomicExchangeI64(LWasmAtomicExchangeI64* lir) {
+ Register memoryBase = ToRegister(lir->memoryBase());
+ Register ptr = ToRegister(lir->ptr());
+ Register64 value = ToRegister64(lir->value());
+ Register64 output = ToOutRegister64(lir);
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(memoryBase, ptr, TimesOne, offset);
+ masm.wasmAtomicExchange64(lir->mir()->access(), addr, value, output);
+}
+
+void CodeGenerator::visitWasmAtomicBinopI64(LWasmAtomicBinopI64* lir) {
+ Register memoryBase = ToRegister(lir->memoryBase());
+ Register ptr = ToRegister(lir->ptr());
+ Register64 value = ToRegister64(lir->value());
+ Register64 output = ToOutRegister64(lir);
+ Register64 temp(ToRegister(lir->getTemp(0)));
+ uint32_t offset = lir->mir()->access().offset();
+
+ BaseIndex addr(memoryBase, ptr, TimesOne, offset);
+
+ masm.wasmAtomicFetchOp64(lir->mir()->access(), lir->mir()->operation(), value,
+ addr, temp, output);
+}
+
+void CodeGenerator::visitNearbyInt(LNearbyInt*) { MOZ_CRASH("NYI"); }
+
+void CodeGenerator::visitNearbyIntF(LNearbyIntF*) { MOZ_CRASH("NYI"); }
+
+void CodeGenerator::visitSimd128(LSimd128* ins) { MOZ_CRASH("No SIMD"); }
+
+void CodeGenerator::visitWasmTernarySimd128(LWasmTernarySimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmBinarySimd128(LWasmBinarySimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmBinarySimd128WithConstant(
+ LWasmBinarySimd128WithConstant* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmVariableShiftSimd128(
+ LWasmVariableShiftSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmConstantShiftSimd128(
+ LWasmConstantShiftSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmSignReplicationSimd128(
+ LWasmSignReplicationSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmShuffleSimd128(LWasmShuffleSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmPermuteSimd128(LWasmPermuteSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReplaceLaneSimd128(LWasmReplaceLaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReplaceInt64LaneSimd128(
+ LWasmReplaceInt64LaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmScalarToSimd128(LWasmScalarToSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmInt64ToSimd128(LWasmInt64ToSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmUnarySimd128(LWasmUnarySimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReduceSimd128(LWasmReduceSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReduceAndBranchSimd128(
+ LWasmReduceAndBranchSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmReduceSimd128ToInt64(
+ LWasmReduceSimd128ToInt64* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmLoadLaneSimd128(LWasmLoadLaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
+
+void CodeGenerator::visitWasmStoreLaneSimd128(LWasmStoreLaneSimd128* ins) {
+ MOZ_CRASH("No SIMD");
+}
diff --git a/js/src/jit/loong64/CodeGenerator-loong64.h b/js/src/jit/loong64/CodeGenerator-loong64.h
new file mode 100644
index 0000000000..5c75f65cd2
--- /dev/null
+++ b/js/src/jit/loong64/CodeGenerator-loong64.h
@@ -0,0 +1,209 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_CodeGenerator_loong64_h
+#define jit_loong64_CodeGenerator_loong64_h
+
+#include "jit/loong64/Assembler-loong64.h"
+#include "jit/shared/CodeGenerator-shared.h"
+
+namespace js {
+namespace jit {
+
+class CodeGeneratorLOONG64;
+class OutOfLineBailout;
+class OutOfLineTableSwitch;
+
+using OutOfLineWasmTruncateCheck =
+ OutOfLineWasmTruncateCheckBase<CodeGeneratorLOONG64>;
+
+class CodeGeneratorLOONG64 : public CodeGeneratorShared {
+ friend class MoveResolverLA;
+
+ protected:
+ CodeGeneratorLOONG64(MIRGenerator* gen, LIRGraph* graph,
+ MacroAssembler* masm);
+
+ NonAssertingLabel deoptLabel_;
+
+ Operand ToOperand(const LAllocation& a);
+ Operand ToOperand(const LAllocation* a);
+ Operand ToOperand(const LDefinition* def);
+
+#ifdef JS_PUNBOX64
+ Operand ToOperandOrRegister64(const LInt64Allocation input);
+#else
+ Register64 ToOperandOrRegister64(const LInt64Allocation input);
+#endif
+
+ MoveOperand toMoveOperand(LAllocation a) const;
+
+ template <typename T1, typename T2>
+ void bailoutCmp32(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ Label bail;
+ masm.branch32(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutTest32(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ Label bail;
+ masm.branchTest32(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ template <typename T1, typename T2>
+ void bailoutCmpPtr(Assembler::Condition c, T1 lhs, T2 rhs,
+ LSnapshot* snapshot) {
+ Label bail;
+ masm.branchPtr(c, lhs, rhs, &bail);
+ bailoutFrom(&bail, snapshot);
+ }
+ void bailoutTestPtr(Assembler::Condition c, Register lhs, Register rhs,
+ LSnapshot* snapshot) {
+ // TODO(loong64) Didn't use branchTestPtr due to '-Wundefined-inline'.
+ MOZ_ASSERT(c == Assembler::Zero || c == Assembler::NonZero ||
+ c == Assembler::Signed || c == Assembler::NotSigned);
+ Label bail;
+ if (lhs == rhs) {
+ masm.ma_b(lhs, rhs, &bail, c);
+ } else {
+ ScratchRegisterScope scratch(masm);
+ masm.as_and(scratch, lhs, rhs);
+ masm.ma_b(scratch, scratch, &bail, c);
+ }
+ bailoutFrom(&bail, snapshot);
+ }
+ void bailoutIfFalseBool(Register reg, LSnapshot* snapshot) {
+ Label bail;
+ ScratchRegisterScope scratch(masm);
+ masm.ma_and(scratch, reg, Imm32(0xFF));
+ masm.ma_b(scratch, scratch, &bail, Assembler::Zero);
+ bailoutFrom(&bail, snapshot);
+ }
+
+ void bailoutFrom(Label* label, LSnapshot* snapshot);
+ void bailout(LSnapshot* snapshot);
+
+ bool generateOutOfLineCode();
+
+ template <typename T>
+ void branchToBlock(Register lhs, T rhs, MBasicBlock* mir,
+ Assembler::Condition cond) {
+ masm.ma_b(lhs, rhs, skipTrivialBlocks(mir)->lir()->label(), cond);
+ }
+ void branchToBlock(Assembler::FloatFormat fmt, FloatRegister lhs,
+ FloatRegister rhs, MBasicBlock* mir,
+ Assembler::DoubleCondition cond);
+
+ // Emits a branch that directs control flow to the true block if |cond| is
+ // true, and the false block if |cond| is false.
+ template <typename T>
+ void emitBranch(Register lhs, T rhs, Assembler::Condition cond,
+ MBasicBlock* mirTrue, MBasicBlock* mirFalse) {
+ if (isNextBlock(mirFalse->lir())) {
+ branchToBlock(lhs, rhs, mirTrue, cond);
+ } else {
+ branchToBlock(lhs, rhs, mirFalse, Assembler::InvertCondition(cond));
+ jumpToBlock(mirTrue);
+ }
+ }
+ void testZeroEmitBranch(Assembler::Condition cond, Register reg,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ emitBranch(reg, Imm32(0), cond, ifTrue, ifFalse);
+ }
+
+ void emitTableSwitchDispatch(MTableSwitch* mir, Register index,
+ Register base);
+
+ template <typename T>
+ void emitWasmLoad(T* ins);
+ template <typename T>
+ void emitWasmStore(T* ins);
+
+ void generateInvalidateEpilogue();
+
+ // Generating a result.
+ template <typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const S& value, const T& mem,
+ Register flagTemp, Register outTemp,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, AnyRegister output);
+
+ // Generating no result.
+ template <typename S, typename T>
+ void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
+ const S& value, const T& mem,
+ Register flagTemp, Register valueTemp,
+ Register offsetTemp, Register maskTemp);
+
+ public:
+ // Out of line visitors.
+ void visitOutOfLineBailout(OutOfLineBailout* ool);
+ void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool);
+ void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool);
+
+ protected:
+ void testNullEmitBranch(Assembler::Condition cond, const ValueOperand& value,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ MOZ_ASSERT(value.valueReg() != SecondScratchReg);
+ masm.splitTag(value.valueReg(), SecondScratchReg);
+ emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_NULL), cond, ifTrue, ifFalse);
+ }
+ void testUndefinedEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ MOZ_ASSERT(value.valueReg() != SecondScratchReg);
+ masm.splitTag(value.valueReg(), SecondScratchReg);
+ emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_UNDEFINED), cond, ifTrue,
+ ifFalse);
+ }
+ void testObjectEmitBranch(Assembler::Condition cond,
+ const ValueOperand& value, MBasicBlock* ifTrue,
+ MBasicBlock* ifFalse) {
+ MOZ_ASSERT(value.valueReg() != SecondScratchReg);
+ masm.splitTag(value.valueReg(), SecondScratchReg);
+ emitBranch(SecondScratchReg, ImmTag(JSVAL_TAG_OBJECT), cond, ifTrue,
+ ifFalse);
+ }
+
+ void emitBigIntDiv(LBigIntDiv* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+ void emitBigIntMod(LBigIntMod* ins, Register dividend, Register divisor,
+ Register output, Label* fail);
+
+ template <typename T>
+ void emitWasmLoadI64(T* ins);
+ template <typename T>
+ void emitWasmStoreI64(T* ins);
+
+ ValueOperand ToValue(LInstruction* ins, size_t pos);
+ ValueOperand ToTempValue(LInstruction* ins, size_t pos);
+
+ // Functions for LTestVAndBranch.
+ void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag);
+};
+
+typedef CodeGeneratorLOONG64 CodeGeneratorSpecific;
+
+// An out-of-line bailout thunk.
+class OutOfLineBailout : public OutOfLineCodeBase<CodeGeneratorLOONG64> {
+ protected:
+ LSnapshot* snapshot_;
+
+ public:
+ OutOfLineBailout(LSnapshot* snapshot) : snapshot_(snapshot) {}
+
+ void accept(CodeGeneratorLOONG64* codegen) override;
+
+ LSnapshot* snapshot() const { return snapshot_; }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_CodeGenerator_loong64_h */
diff --git a/js/src/jit/loong64/LIR-loong64.h b/js/src/jit/loong64/LIR-loong64.h
new file mode 100644
index 0000000000..ff7dc0930e
--- /dev/null
+++ b/js/src/jit/loong64/LIR-loong64.h
@@ -0,0 +1,408 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_LIR_loong64_h
+#define jit_loong64_LIR_loong64_h
+
+namespace js {
+namespace jit {
+
+class LUnbox : public LInstructionHelper<1, 1, 0> {
+ protected:
+ LUnbox(LNode::Opcode opcode, const LAllocation& input)
+ : LInstructionHelper(opcode) {
+ setOperand(0, input);
+ }
+
+ public:
+ LIR_HEADER(Unbox);
+
+ explicit LUnbox(const LAllocation& input) : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+
+ static const size_t Input = 0;
+
+ MUnbox* mir() const { return mir_->toUnbox(); }
+ const char* extraName() const { return StringFromMIRType(mir()->type()); }
+};
+
+class LUnboxFloatingPoint : public LUnbox {
+ MIRType type_;
+
+ public:
+ LIR_HEADER(UnboxFloatingPoint);
+
+ LUnboxFloatingPoint(const LAllocation& input, MIRType type)
+ : LUnbox(classOpcode, input), type_(type) {}
+
+ MIRType type() const { return type_; }
+};
+
+// Convert a 32-bit unsigned integer to a double.
+class LWasmUint32ToDouble : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmUint32ToDouble)
+
+ explicit LWasmUint32ToDouble(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+// Convert a 32-bit unsigned integer to a float32.
+class LWasmUint32ToFloat32 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmUint32ToFloat32)
+
+ explicit LWasmUint32ToFloat32(const LAllocation& input)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, input);
+ }
+};
+
+class LDivI : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(DivI);
+
+ LDivI(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ MDiv* mir() const { return mir_->toDiv(); }
+};
+
+class LDivPowTwoI : public LInstructionHelper<1, 1, 1> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(DivPowTwoI)
+
+ LDivPowTwoI(const LAllocation& lhs, int32_t shift, const LDefinition& temp)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ setTemp(0, temp);
+ }
+
+ const LAllocation* numerator() { return getOperand(0); }
+ int32_t shift() const { return shift_; }
+ MDiv* mir() const { return mir_->toDiv(); }
+};
+
+class LModI : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(ModI);
+
+ LModI(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& callTemp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, callTemp);
+ }
+
+ const LDefinition* callTemp() { return getTemp(0); }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+class LModPowTwoI : public LInstructionHelper<1, 1, 0> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModPowTwoI);
+
+ LModPowTwoI(const LAllocation& lhs, int32_t shift)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ }
+
+ int32_t shift() const { return shift_; }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+class LModMaskI : public LInstructionHelper<1, 1, 2> {
+ const int32_t shift_;
+
+ public:
+ LIR_HEADER(ModMaskI);
+
+ LModMaskI(const LAllocation& lhs, const LDefinition& temp0,
+ const LDefinition& temp1, int32_t shift)
+ : LInstructionHelper(classOpcode), shift_(shift) {
+ setOperand(0, lhs);
+ setTemp(0, temp0);
+ setTemp(1, temp1);
+ }
+
+ int32_t shift() const { return shift_; }
+ MMod* mir() const { return mir_->toMod(); }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitch : public LInstructionHelper<0, 1, 2> {
+ public:
+ LIR_HEADER(TableSwitch);
+
+ LTableSwitch(const LAllocation& in, const LDefinition& inputCopy,
+ const LDefinition& jumpTablePointer, MTableSwitch* ins)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ setTemp(0, inputCopy);
+ setTemp(1, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const { return mir_->toTableSwitch(); }
+ const LAllocation* index() { return getOperand(0); }
+ const LDefinition* tempInt() { return getTemp(0); }
+ // This is added to share the same CodeGenerator prefixes.
+ const LDefinition* tempPointer() { return getTemp(1); }
+};
+
+// Takes a tableswitch with an integer to decide
+class LTableSwitchV : public LInstructionHelper<0, BOX_PIECES, 3> {
+ public:
+ LIR_HEADER(TableSwitchV);
+
+ LTableSwitchV(const LBoxAllocation& input, const LDefinition& inputCopy,
+ const LDefinition& floatCopy,
+ const LDefinition& jumpTablePointer, MTableSwitch* ins)
+ : LInstructionHelper(classOpcode) {
+ setBoxOperand(InputValue, input);
+ setTemp(0, inputCopy);
+ setTemp(1, floatCopy);
+ setTemp(2, jumpTablePointer);
+ setMir(ins);
+ }
+
+ MTableSwitch* mir() const { return mir_->toTableSwitch(); }
+
+ static const size_t InputValue = 0;
+
+ const LDefinition* tempInt() { return getTemp(0); }
+ const LDefinition* tempFloat() { return getTemp(1); }
+ const LDefinition* tempPointer() { return getTemp(2); }
+};
+
+class LMulI : public LBinaryMath<0> {
+ public:
+ LIR_HEADER(MulI);
+
+ LMulI() : LBinaryMath(classOpcode) {}
+
+ MMul* mir() { return mir_->toMul(); }
+};
+
+class LUDivOrMod : public LBinaryMath<0> {
+ public:
+ LIR_HEADER(UDivOrMod);
+
+ LUDivOrMod() : LBinaryMath(classOpcode) {}
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+
+ bool trapOnError() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->trapOnError();
+ }
+ return mir_->toDiv()->trapOnError();
+ }
+
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LWasmCompareExchangeI64
+ : public LInstructionHelper<INT64_PIECES, 2 + INT64_PIECES + INT64_PIECES,
+ 0> {
+ public:
+ LIR_HEADER(WasmCompareExchangeI64);
+
+ LWasmCompareExchangeI64(const LAllocation& ptr,
+ const LInt64Allocation& oldValue,
+ const LInt64Allocation& newValue,
+ const LAllocation& memoryBase)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, oldValue);
+ setInt64Operand(1 + INT64_PIECES, newValue);
+ setOperand(1 + 2 * INT64_PIECES, memoryBase);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation oldValue() { return getInt64Operand(1); }
+ const LInt64Allocation newValue() {
+ return getInt64Operand(1 + INT64_PIECES);
+ }
+ const LAllocation* memoryBase() { return getOperand(1 + 2 * INT64_PIECES); }
+ const MWasmCompareExchangeHeap* mir() const {
+ return mir_->toWasmCompareExchangeHeap();
+ }
+};
+
+class LWasmAtomicExchangeI64
+ : public LInstructionHelper<INT64_PIECES, 2 + INT64_PIECES, 0> {
+ public:
+ LIR_HEADER(WasmAtomicExchangeI64);
+
+ LWasmAtomicExchangeI64(const LAllocation& ptr, const LInt64Allocation& value,
+ const LAllocation& memoryBase)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, value);
+ setOperand(1 + INT64_PIECES, memoryBase);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation value() { return getInt64Operand(1); }
+ const LAllocation* memoryBase() { return getOperand(1 + INT64_PIECES); }
+ const MWasmAtomicExchangeHeap* mir() const {
+ return mir_->toWasmAtomicExchangeHeap();
+ }
+};
+
+class LWasmAtomicBinopI64
+ : public LInstructionHelper<INT64_PIECES, 2 + INT64_PIECES, 2> {
+ public:
+ LIR_HEADER(WasmAtomicBinopI64);
+
+ LWasmAtomicBinopI64(const LAllocation& ptr, const LInt64Allocation& value,
+ const LAllocation& memoryBase)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, ptr);
+ setInt64Operand(1, value);
+ setOperand(1 + INT64_PIECES, memoryBase);
+ }
+
+ const LAllocation* ptr() { return getOperand(0); }
+ const LInt64Allocation value() { return getInt64Operand(1); }
+ const LAllocation* memoryBase() { return getOperand(1 + INT64_PIECES); }
+ const MWasmAtomicBinopHeap* mir() const {
+ return mir_->toWasmAtomicBinopHeap();
+ }
+};
+
+class LDivOrModI64 : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(DivOrModI64)
+
+ LDivOrModI64(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() { return getTemp(0); }
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ bool canBeNegativeOverflow() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeNegativeDividend();
+ }
+ return mir_->toDiv()->canBeNegativeOverflow();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LUDivOrModI64 : public LBinaryMath<1> {
+ public:
+ LIR_HEADER(UDivOrModI64);
+
+ LUDivOrModI64(const LAllocation& lhs, const LAllocation& rhs,
+ const LDefinition& temp)
+ : LBinaryMath(classOpcode) {
+ setOperand(0, lhs);
+ setOperand(1, rhs);
+ setTemp(0, temp);
+ }
+
+ const LDefinition* remainder() { return getTemp(0); }
+ const char* extraName() const {
+ return mir()->isTruncated() ? "Truncated" : nullptr;
+ }
+
+ MBinaryArithInstruction* mir() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ return static_cast<MBinaryArithInstruction*>(mir_);
+ }
+ bool canBeDivideByZero() const {
+ if (mir_->isMod()) {
+ return mir_->toMod()->canBeDivideByZero();
+ }
+ return mir_->toDiv()->canBeDivideByZero();
+ }
+ wasm::BytecodeOffset bytecodeOffset() const {
+ MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
+ if (mir_->isMod()) {
+ return mir_->toMod()->bytecodeOffset();
+ }
+ return mir_->toDiv()->bytecodeOffset();
+ }
+};
+
+class LWasmTruncateToInt64 : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(WasmTruncateToInt64);
+
+ explicit LWasmTruncateToInt64(const LAllocation& in)
+ : LInstructionHelper(classOpcode) {
+ setOperand(0, in);
+ }
+
+ MWasmTruncateToInt64* mir() const { return mir_->toWasmTruncateToInt64(); }
+};
+
+class LInt64ToFloatingPoint : public LInstructionHelper<1, 1, 0> {
+ public:
+ LIR_HEADER(Int64ToFloatingPoint);
+
+ explicit LInt64ToFloatingPoint(const LInt64Allocation& in)
+ : LInstructionHelper(classOpcode) {
+ setInt64Operand(0, in);
+ }
+
+ MInt64ToFloatingPoint* mir() const { return mir_->toInt64ToFloatingPoint(); }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_LIR_loong64_h */
diff --git a/js/src/jit/loong64/Lowering-loong64.cpp b/js/src/jit/loong64/Lowering-loong64.cpp
new file mode 100644
index 0000000000..2b5499ebbc
--- /dev/null
+++ b/js/src/jit/loong64/Lowering-loong64.cpp
@@ -0,0 +1,1100 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/loong64/Lowering-loong64.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/loong64/Assembler-loong64.h"
+#include "jit/Lowering.h"
+#include "jit/MIR.h"
+#include "jit/shared/Lowering-shared-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::FloorLog2;
+
+LTableSwitch* LIRGeneratorLOONG64::newLTableSwitch(const LAllocation& in,
+ const LDefinition& inputCopy,
+ MTableSwitch* tableswitch) {
+ return new (alloc()) LTableSwitch(in, inputCopy, temp(), tableswitch);
+}
+
+LTableSwitchV* LIRGeneratorLOONG64::newLTableSwitchV(
+ MTableSwitch* tableswitch) {
+ return new (alloc()) LTableSwitchV(useBox(tableswitch->getOperand(0)), temp(),
+ tempDouble(), temp(), tableswitch);
+}
+
+void LIRGeneratorLOONG64::lowerForShift(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegisterOrConstant(rhs));
+ define(ins, mir);
+}
+
+template <size_t Temps>
+void LIRGeneratorLOONG64::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+
+ static_assert(LShiftI64::Rhs == INT64_PIECES,
+ "Assume Rhs is located at INT64_PIECES.");
+ static_assert(LRotateI64::Count == INT64_PIECES,
+ "Assume Count is located at INT64_PIECES.");
+
+ ins->setOperand(INT64_PIECES, useRegisterOrConstant(rhs));
+
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+template void LIRGeneratorLOONG64::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+template void LIRGeneratorLOONG64::lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, 1>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+// x = !y
+void LIRGeneratorLOONG64::lowerForALU(LInstructionHelper<1, 1, 0>* ins,
+ MDefinition* mir, MDefinition* input) {
+ ins->setOperand(0, useRegister(input));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+// z = x + y
+void LIRGeneratorLOONG64::lowerForALU(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegisterOrConstant(rhs));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+void LIRGeneratorLOONG64::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins, MDefinition* mir,
+ MDefinition* input) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(input));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorLOONG64::lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs) {
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES, willHaveDifferentLIRNodes(lhs, rhs)
+ ? useInt64OrConstant(rhs)
+ : useInt64OrConstantAtStart(rhs));
+ defineInt64ReuseInput(ins, mir, 0);
+}
+
+void LIRGeneratorLOONG64::lowerForMulInt64(LMulI64* ins, MMul* mir,
+ MDefinition* lhs, MDefinition* rhs) {
+ bool needsTemp = false;
+ bool cannotAliasRhs = false;
+ bool reuseInput = true;
+
+ ins->setInt64Operand(0, useInt64RegisterAtStart(lhs));
+ ins->setInt64Operand(INT64_PIECES,
+ (willHaveDifferentLIRNodes(lhs, rhs) || cannotAliasRhs)
+ ? useInt64OrConstant(rhs)
+ : useInt64OrConstantAtStart(rhs));
+
+ if (needsTemp) {
+ ins->setTemp(0, temp());
+ }
+ if (reuseInput) {
+ defineInt64ReuseInput(ins, mir, 0);
+ } else {
+ defineInt64(ins, mir);
+ }
+}
+
+void LIRGeneratorLOONG64::lowerForFPU(LInstructionHelper<1, 1, 0>* ins,
+ MDefinition* mir, MDefinition* input) {
+ ins->setOperand(0, useRegister(input));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template <size_t Temps>
+void LIRGeneratorLOONG64::lowerForFPU(LInstructionHelper<1, 2, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs,
+ MDefinition* rhs) {
+ ins->setOperand(0, useRegister(lhs));
+ ins->setOperand(1, useRegister(rhs));
+ define(
+ ins, mir,
+ LDefinition(LDefinition::TypeFrom(mir->type()), LDefinition::REGISTER));
+}
+
+template void LIRGeneratorLOONG64::lowerForFPU(LInstructionHelper<1, 2, 0>* ins,
+ MDefinition* mir,
+ MDefinition* lhs,
+ MDefinition* rhs);
+template void LIRGeneratorLOONG64::lowerForFPU(LInstructionHelper<1, 2, 1>* ins,
+ MDefinition* mir,
+ MDefinition* lhs,
+ MDefinition* rhs);
+
+void LIRGeneratorLOONG64::lowerForCompareI64AndBranch(
+ MTest* mir, MCompare* comp, JSOp op, MDefinition* left, MDefinition* right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse) {
+ LCompareI64AndBranch* lir = new (alloc())
+ LCompareI64AndBranch(comp, op, useInt64Register(left),
+ useInt64OrConstant(right), ifTrue, ifFalse);
+ add(lir, mir);
+}
+
+void LIRGeneratorLOONG64::lowerForBitAndAndBranch(LBitAndAndBranch* baab,
+ MInstruction* mir,
+ MDefinition* lhs,
+ MDefinition* rhs) {
+ baab->setOperand(0, useRegisterAtStart(lhs));
+ baab->setOperand(1, useRegisterOrConstantAtStart(rhs));
+ add(baab, mir);
+}
+
+LBoxAllocation LIRGeneratorLOONG64::useBoxFixed(MDefinition* mir, Register reg1,
+ Register reg2,
+ bool useAtStart) {
+ MOZ_ASSERT(mir->type() == MIRType::Value);
+
+ ensureDefined(mir);
+ return LBoxAllocation(LUse(reg1, mir->virtualRegister(), useAtStart));
+}
+
+LAllocation LIRGeneratorLOONG64::useByteOpRegister(MDefinition* mir) {
+ return useRegister(mir);
+}
+
+LAllocation LIRGeneratorLOONG64::useByteOpRegisterAtStart(MDefinition* mir) {
+ return useRegisterAtStart(mir);
+}
+
+LAllocation LIRGeneratorLOONG64::useByteOpRegisterOrNonDoubleConstant(
+ MDefinition* mir) {
+ return useRegisterOrNonDoubleConstant(mir);
+}
+
+LDefinition LIRGeneratorLOONG64::tempByteOpRegister() { return temp(); }
+
+LDefinition LIRGeneratorLOONG64::tempToUnbox() { return temp(); }
+
+void LIRGeneratorLOONG64::lowerUntypedPhiInput(MPhi* phi,
+ uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+}
+void LIRGeneratorLOONG64::lowerInt64PhiInput(MPhi* phi, uint32_t inputPosition,
+ LBlock* block, size_t lirIndex) {
+ lowerTypedPhiInput(phi, inputPosition, block, lirIndex);
+}
+void LIRGeneratorLOONG64::defineInt64Phi(MPhi* phi, size_t lirIndex) {
+ defineTypedPhi(phi, lirIndex);
+}
+
+void LIRGeneratorLOONG64::lowerNegI(MInstruction* ins, MDefinition* input) {
+ define(new (alloc()) LNegI(useRegisterAtStart(input)), ins);
+}
+void LIRGeneratorLOONG64::lowerNegI64(MInstruction* ins, MDefinition* input) {
+ defineInt64ReuseInput(new (alloc()) LNegI64(useInt64RegisterAtStart(input)),
+ ins, 0);
+}
+
+void LIRGeneratorLOONG64::lowerMulI(MMul* mul, MDefinition* lhs,
+ MDefinition* rhs) {
+ LMulI* lir = new (alloc()) LMulI;
+ if (mul->fallible()) {
+ assignSnapshot(lir, mul->bailoutKind());
+ }
+
+ lowerForALU(lir, mul, lhs, rhs);
+}
+
+void LIRGeneratorLOONG64::lowerDivI(MDiv* div) {
+ if (div->isUnsigned()) {
+ lowerUDiv(div);
+ return;
+ }
+
+ // Division instructions are slow. Division by constant denominators can be
+ // rewritten to use other instructions.
+ if (div->rhs()->isConstant()) {
+ int32_t rhs = div->rhs()->toConstant()->toInt32();
+ // Check for division by a positive power of two, which is an easy and
+ // important case to optimize. Note that other optimizations are also
+ // possible; division by negative powers of two can be optimized in a
+ // similar manner as positive powers of two, and division by other
+ // constants can be optimized by a reciprocal multiplication technique.
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LDivPowTwoI* lir =
+ new (alloc()) LDivPowTwoI(useRegister(div->lhs()), shift, temp());
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ define(lir, div);
+ return;
+ }
+ }
+
+ LDivI* lir = new (alloc())
+ LDivI(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+ define(lir, div);
+}
+
+void LIRGeneratorLOONG64::lowerDivI64(MDiv* div) {
+ if (div->isUnsigned()) {
+ lowerUDivI64(div);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc())
+ LDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ defineInt64(lir, div);
+}
+
+void LIRGeneratorLOONG64::lowerModI(MMod* mod) {
+ if (mod->isUnsigned()) {
+ lowerUMod(mod);
+ return;
+ }
+
+ if (mod->rhs()->isConstant()) {
+ int32_t rhs = mod->rhs()->toConstant()->toInt32();
+ int32_t shift = FloorLog2(rhs);
+ if (rhs > 0 && 1 << shift == rhs) {
+ LModPowTwoI* lir =
+ new (alloc()) LModPowTwoI(useRegister(mod->lhs()), shift);
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+ return;
+ } else if (shift < 31 && (1 << (shift + 1)) - 1 == rhs) {
+ LModMaskI* lir = new (alloc())
+ LModMaskI(useRegister(mod->lhs()), temp(LDefinition::GENERAL),
+ temp(LDefinition::GENERAL), shift + 1);
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+ return;
+ }
+ }
+ LModI* lir =
+ new (alloc()) LModI(useRegister(mod->lhs()), useRegister(mod->rhs()),
+ temp(LDefinition::GENERAL));
+
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+ define(lir, mod);
+}
+
+void LIRGeneratorLOONG64::lowerModI64(MMod* mod) {
+ if (mod->isUnsigned()) {
+ lowerUModI64(mod);
+ return;
+ }
+
+ LDivOrModI64* lir = new (alloc())
+ LDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()), temp());
+ defineInt64(lir, mod);
+}
+
+void LIRGeneratorLOONG64::lowerUDiv(MDiv* div) {
+ MDefinition* lhs = div->getOperand(0);
+ MDefinition* rhs = div->getOperand(1);
+
+ LUDivOrMod* lir = new (alloc()) LUDivOrMod;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (div->fallible()) {
+ assignSnapshot(lir, div->bailoutKind());
+ }
+
+ define(lir, div);
+}
+
+void LIRGeneratorLOONG64::lowerUDivI64(MDiv* div) {
+ LUDivOrModI64* lir = new (alloc())
+ LUDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()), temp());
+ defineInt64(lir, div);
+}
+
+void LIRGeneratorLOONG64::lowerUMod(MMod* mod) {
+ MDefinition* lhs = mod->getOperand(0);
+ MDefinition* rhs = mod->getOperand(1);
+
+ LUDivOrMod* lir = new (alloc()) LUDivOrMod;
+ lir->setOperand(0, useRegister(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ if (mod->fallible()) {
+ assignSnapshot(lir, mod->bailoutKind());
+ }
+
+ define(lir, mod);
+}
+
+void LIRGeneratorLOONG64::lowerUModI64(MMod* mod) {
+ LUDivOrModI64* lir = new (alloc())
+ LUDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()), temp());
+ defineInt64(lir, mod);
+}
+
+void LIRGeneratorLOONG64::lowerUrshD(MUrsh* mir) {
+ MDefinition* lhs = mir->lhs();
+ MDefinition* rhs = mir->rhs();
+
+ MOZ_ASSERT(lhs->type() == MIRType::Int32);
+ MOZ_ASSERT(rhs->type() == MIRType::Int32);
+
+ LUrshD* lir = new (alloc())
+ LUrshD(useRegister(lhs), useRegisterOrConstant(rhs), temp());
+ define(lir, mir);
+}
+
+void LIRGeneratorLOONG64::lowerPowOfTwoI(MPow* mir) {
+ int32_t base = mir->input()->toConstant()->toInt32();
+ MDefinition* power = mir->power();
+
+ auto* lir = new (alloc()) LPowOfTwoI(useRegister(power), base);
+ assignSnapshot(lir, mir->bailoutKind());
+ define(lir, mir);
+}
+
+void LIRGeneratorLOONG64::lowerBigIntDiv(MBigIntDiv* ins) {
+ auto* lir = new (alloc()) LBigIntDiv(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorLOONG64::lowerBigIntMod(MBigIntMod* ins) {
+ auto* lir = new (alloc()) LBigIntMod(useRegister(ins->lhs()),
+ useRegister(ins->rhs()), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorLOONG64::lowerBigIntLsh(MBigIntLsh* ins) {
+ auto* lir = new (alloc()) LBigIntLsh(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorLOONG64::lowerBigIntRsh(MBigIntRsh* ins) {
+ auto* lir = new (alloc()) LBigIntRsh(
+ useRegister(ins->lhs()), useRegister(ins->rhs()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorLOONG64::lowerTruncateDToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double);
+
+ define(new (alloc()) LTruncateDToInt32(useRegister(opd), tempDouble()), ins);
+}
+
+void LIRGeneratorLOONG64::lowerTruncateFToInt32(MTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Float32);
+
+ define(new (alloc()) LTruncateFToInt32(useRegister(opd), tempFloat32()), ins);
+}
+
+void LIRGeneratorLOONG64::lowerBuiltinInt64ToFloatingPoint(
+ MBuiltinInt64ToFloatingPoint* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
+
+void LIRGeneratorLOONG64::lowerWasmSelectI(MWasmSelect* select) {
+ auto* lir = new (alloc())
+ LWasmSelect(useRegisterAtStart(select->trueExpr()),
+ useAny(select->falseExpr()), useRegister(select->condExpr()));
+ defineReuseInput(lir, select, LWasmSelect::TrueExprIndex);
+}
+
+void LIRGeneratorLOONG64::lowerWasmSelectI64(MWasmSelect* select) {
+ auto* lir = new (alloc()) LWasmSelectI64(
+ useInt64RegisterAtStart(select->trueExpr()),
+ useInt64(select->falseExpr()), useRegister(select->condExpr()));
+ defineInt64ReuseInput(lir, select, LWasmSelectI64::TrueExprIndex);
+}
+
+// On loong64 we specialize the only cases where compare is {U,}Int32 and select
+// is {U,}Int32.
+bool LIRGeneratorShared::canSpecializeWasmCompareAndSelect(
+ MCompare::CompareType compTy, MIRType insTy) {
+ return insTy == MIRType::Int32 && (compTy == MCompare::Compare_Int32 ||
+ compTy == MCompare::Compare_UInt32);
+}
+
+void LIRGeneratorShared::lowerWasmCompareAndSelect(MWasmSelect* ins,
+ MDefinition* lhs,
+ MDefinition* rhs,
+ MCompare::CompareType compTy,
+ JSOp jsop) {
+ MOZ_ASSERT(canSpecializeWasmCompareAndSelect(compTy, ins->type()));
+ auto* lir = new (alloc()) LWasmCompareAndSelect(
+ useRegister(lhs), useRegister(rhs), compTy, jsop,
+ useRegisterAtStart(ins->trueExpr()), useRegister(ins->falseExpr()));
+ defineReuseInput(lir, ins, LWasmCompareAndSelect::IfTrueExprIndex);
+}
+
+void LIRGeneratorLOONG64::lowerWasmBuiltinTruncateToInt32(
+ MWasmBuiltinTruncateToInt32* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ if (opd->type() == MIRType::Double) {
+ define(new (alloc()) LWasmBuiltinTruncateDToInt32(
+ useRegister(opd), useFixed(ins->instance(), InstanceReg),
+ LDefinition::BogusTemp()),
+ ins);
+ return;
+ }
+
+ define(new (alloc()) LWasmBuiltinTruncateFToInt32(
+ useRegister(opd), useFixed(ins->instance(), InstanceReg),
+ LDefinition::BogusTemp()),
+ ins);
+}
+
+void LIRGeneratorLOONG64::lowerWasmBuiltinTruncateToInt64(
+ MWasmBuiltinTruncateToInt64* ins) {
+ MOZ_CRASH("We don't use it for this architecture");
+}
+
+void LIRGeneratorLOONG64::lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div) {
+ MOZ_CRASH("We don't use runtime div for this architecture");
+}
+
+void LIRGeneratorLOONG64::lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod) {
+ MOZ_CRASH("We don't use runtime mod for this architecture");
+}
+
+void LIRGeneratorLOONG64::lowerAtomicLoad64(MLoadUnboxedScalar* ins) {
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->storageType());
+
+ auto* lir = new (alloc()) LAtomicLoad64(elements, index, temp(), tempInt64());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGeneratorLOONG64::lowerAtomicStore64(MStoreUnboxedScalar* ins) {
+ LUse elements = useRegister(ins->elements());
+ LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->writeType());
+ LAllocation value = useRegister(ins->value());
+
+ add(new (alloc()) LAtomicStore64(elements, index, value, tempInt64()), ins);
+}
+
+void LIRGenerator::visitBox(MBox* box) {
+ MDefinition* opd = box->getOperand(0);
+
+ // If the operand is a constant, emit near its uses.
+ if (opd->isConstant() && box->canEmitAtUses()) {
+ emitAtUses(box);
+ return;
+ }
+
+ if (opd->isConstant()) {
+ define(new (alloc()) LValue(opd->toConstant()->toJSValue()), box,
+ LDefinition(LDefinition::BOX));
+ } else {
+ LBox* ins = new (alloc()) LBox(useRegister(opd), opd->type());
+ define(ins, box, LDefinition(LDefinition::BOX));
+ }
+}
+
+void LIRGenerator::visitUnbox(MUnbox* unbox) {
+ MDefinition* box = unbox->getOperand(0);
+ MOZ_ASSERT(box->type() == MIRType::Value);
+
+ LUnbox* lir;
+ if (IsFloatingPointType(unbox->type())) {
+ lir = new (alloc())
+ LUnboxFloatingPoint(useRegisterAtStart(box), unbox->type());
+ } else if (unbox->fallible()) {
+ // If the unbox is fallible, load the Value in a register first to
+ // avoid multiple loads.
+ lir = new (alloc()) LUnbox(useRegisterAtStart(box));
+ } else {
+ lir = new (alloc()) LUnbox(useAtStart(box));
+ }
+
+ if (unbox->fallible()) {
+ assignSnapshot(lir, unbox->bailoutKind());
+ }
+
+ define(lir, unbox);
+}
+
+void LIRGenerator::visitAbs(MAbs* ins) {
+ define(allocateAbs(ins, useRegisterAtStart(ins->input())), ins);
+}
+
+void LIRGenerator::visitCopySign(MCopySign* ins) {
+ MDefinition* lhs = ins->lhs();
+ MDefinition* rhs = ins->rhs();
+
+ MOZ_ASSERT(IsFloatingPointType(lhs->type()));
+ MOZ_ASSERT(lhs->type() == rhs->type());
+ MOZ_ASSERT(lhs->type() == ins->type());
+
+ LInstructionHelper<1, 2, 2>* lir;
+ if (lhs->type() == MIRType::Double) {
+ lir = new (alloc()) LCopySignD();
+ } else {
+ lir = new (alloc()) LCopySignF();
+ }
+
+ lir->setTemp(0, temp());
+ lir->setTemp(1, temp());
+
+ lir->setOperand(0, useRegisterAtStart(lhs));
+ lir->setOperand(1, useRegister(rhs));
+ defineReuseInput(lir, ins, 0);
+}
+
+void LIRGenerator::visitPowHalf(MPowHalf* ins) {
+ MDefinition* input = ins->input();
+ MOZ_ASSERT(input->type() == MIRType::Double);
+ LPowHalfD* lir = new (alloc()) LPowHalfD(useRegisterAtStart(input));
+ defineReuseInput(lir, ins, 0);
+}
+
+void LIRGenerator::visitExtendInt32ToInt64(MExtendInt32ToInt64* ins) {
+ defineInt64(
+ new (alloc()) LExtendInt32ToInt64(useRegisterAtStart(ins->input())), ins);
+}
+
+void LIRGenerator::visitSignExtendInt64(MSignExtendInt64* ins) {
+ defineInt64(new (alloc())
+ LSignExtendInt64(useInt64RegisterAtStart(ins->input())),
+ ins);
+}
+
+void LIRGenerator::visitInt64ToFloatingPoint(MInt64ToFloatingPoint* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Int64);
+ MOZ_ASSERT(IsFloatingPointType(ins->type()));
+
+ define(new (alloc()) LInt64ToFloatingPoint(useInt64Register(opd)), ins);
+}
+
+void LIRGenerator::visitSubstr(MSubstr* ins) {
+ LSubstr* lir = new (alloc())
+ LSubstr(useRegister(ins->string()), useRegister(ins->begin()),
+ useRegister(ins->length()), temp(), temp(), temp());
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+}
+
+void LIRGenerator::visitCompareExchangeTypedArrayElement(
+ MCompareExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+
+ const LAllocation newval = useRegister(ins->newval());
+ const LAllocation oldval = useRegister(ins->oldval());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LInt64Definition temp1 = tempInt64();
+ LInt64Definition temp2 = tempInt64();
+
+ auto* lir = new (alloc()) LCompareExchangeTypedArrayElement64(
+ elements, index, oldval, newval, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ // If the target is a floating register then we need a temp at the
+ // CodeGenerator level for creating the result.
+
+ LDefinition outTemp = LDefinition::BogusTemp();
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
+ outTemp = temp();
+ }
+
+ if (Scalar::byteSize(ins->arrayType()) < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LCompareExchangeTypedArrayElement* lir = new (alloc())
+ LCompareExchangeTypedArrayElement(elements, index, oldval, newval,
+ outTemp, valueTemp, offsetTemp,
+ maskTemp);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAtomicExchangeTypedArrayElement(
+ MAtomicExchangeTypedArrayElement* ins) {
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+
+ const LAllocation value = useRegister(ins->value());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LInt64Definition temp1 = tempInt64();
+ LDefinition temp2 = temp();
+
+ auto* lir = new (alloc()) LAtomicExchangeTypedArrayElement64(
+ elements, index, value, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ // If the target is a floating register then we need a temp at the
+ // CodeGenerator level for creating the result.
+
+ MOZ_ASSERT(ins->arrayType() <= Scalar::Uint32);
+
+ LDefinition outTemp = LDefinition::BogusTemp();
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32) {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ outTemp = temp();
+ }
+
+ if (Scalar::byteSize(ins->arrayType()) < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LAtomicExchangeTypedArrayElement* lir =
+ new (alloc()) LAtomicExchangeTypedArrayElement(
+ elements, index, value, outTemp, valueTemp, offsetTemp, maskTemp);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAtomicTypedArrayElementBinop(
+ MAtomicTypedArrayElementBinop* ins) {
+ MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
+ MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
+
+ MOZ_ASSERT(ins->elements()->type() == MIRType::Elements);
+ MOZ_ASSERT(ins->index()->type() == MIRType::IntPtr);
+
+ const LUse elements = useRegister(ins->elements());
+ const LAllocation index =
+ useRegisterOrIndexConstant(ins->index(), ins->arrayType());
+ const LAllocation value = useRegister(ins->value());
+
+ if (Scalar::isBigIntType(ins->arrayType())) {
+ LInt64Definition temp1 = tempInt64();
+ LInt64Definition temp2 = tempInt64();
+
+ // Case 1: the result of the operation is not used.
+ //
+ // We can omit allocating the result BigInt.
+
+ if (ins->isForEffect()) {
+ auto* lir = new (alloc()) LAtomicTypedArrayElementBinopForEffect64(
+ elements, index, value, temp1, temp2);
+ add(lir, ins);
+ return;
+ }
+
+ // Case 2: the result of the operation is used.
+
+ auto* lir = new (alloc())
+ LAtomicTypedArrayElementBinop64(elements, index, value, temp1, temp2);
+ define(lir, ins);
+ assignSafepoint(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (Scalar::byteSize(ins->arrayType()) < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ if (ins->isForEffect()) {
+ LAtomicTypedArrayElementBinopForEffect* lir =
+ new (alloc()) LAtomicTypedArrayElementBinopForEffect(
+ elements, index, value, valueTemp, offsetTemp, maskTemp);
+ add(lir, ins);
+ return;
+ }
+
+ // For a Uint32Array with a known double result we need a temp for
+ // the intermediate output.
+
+ LDefinition outTemp = LDefinition::BogusTemp();
+
+ if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
+ outTemp = temp();
+ }
+
+ LAtomicTypedArrayElementBinop* lir =
+ new (alloc()) LAtomicTypedArrayElementBinop(
+ elements, index, value, outTemp, valueTemp, offsetTemp, maskTemp);
+ define(lir, ins);
+}
+
+void LIRGenerator::visitReturnImpl(MDefinition* opd, bool isGenerator) {
+ MOZ_ASSERT(opd->type() == MIRType::Value);
+
+ LReturn* ins = new (alloc()) LReturn(isGenerator);
+ ins->setOperand(0, useFixed(opd, JSReturnReg));
+ add(ins);
+}
+
+void LIRGenerator::visitAsmJSLoadHeap(MAsmJSLoadHeap* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
+ MOZ_ASSERT_IF(ins->needsBoundsCheck(),
+ boundsCheckLimit->type() == MIRType::Int32);
+
+ LAllocation baseAlloc = useRegisterAtStart(base);
+
+ LAllocation limitAlloc = ins->needsBoundsCheck()
+ ? useRegisterAtStart(boundsCheckLimit)
+ : LAllocation();
+
+ // We have no memory-base value, meaning that HeapReg is to be used as the
+ // memory base. This follows from the definition of
+ // FunctionCompiler::maybeLoadMemoryBase() in WasmIonCompile.cpp.
+ MOZ_ASSERT(!ins->hasMemoryBase());
+ auto* lir =
+ new (alloc()) LAsmJSLoadHeap(baseAlloc, limitAlloc, LAllocation());
+ define(lir, ins);
+}
+
+void LIRGenerator::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins) {
+ MDefinition* base = ins->base();
+ MOZ_ASSERT(base->type() == MIRType::Int32);
+
+ MDefinition* boundsCheckLimit = ins->boundsCheckLimit();
+ MOZ_ASSERT_IF(ins->needsBoundsCheck(),
+ boundsCheckLimit->type() == MIRType::Int32);
+
+ LAllocation baseAlloc = useRegisterAtStart(base);
+
+ LAllocation limitAlloc = ins->needsBoundsCheck()
+ ? useRegisterAtStart(boundsCheckLimit)
+ : LAllocation();
+
+ // See comment in LIRGenerator::visitAsmJSStoreHeap just above.
+ MOZ_ASSERT(!ins->hasMemoryBase());
+ add(new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()),
+ limitAlloc, LAllocation()),
+ ins);
+}
+
+void LIRGenerator::visitWasmLoad(MWasmLoad* ins) {
+ MDefinition* base = ins->base();
+ // 'base' is a GPR but may be of either type. If it is 32-bit, it is
+ // sign-extended on loongarch64 platform and we should explicitly promote it
+ // to 64-bit by zero-extension when use it as an index register in memory
+ // accesses.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ LAllocation memoryBase =
+ ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
+ : LGeneralReg(HeapReg);
+
+ LAllocation ptr;
+ ptr = useRegisterAtStart(base);
+
+ if (ins->type() == MIRType::Int64) {
+ auto* lir = new (alloc()) LWasmLoadI64(ptr, memoryBase);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ defineInt64(lir, ins);
+ return;
+ }
+
+ auto* lir = new (alloc()) LWasmLoad(ptr, memoryBase);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmStore(MWasmStore* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+
+ MDefinition* value = ins->value();
+ LAllocation memoryBase =
+ ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
+ : LGeneralReg(HeapReg);
+
+ if (ins->access().type() == Scalar::Int64) {
+ LAllocation baseAlloc = useRegisterAtStart(base);
+ LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
+ auto* lir = new (alloc()) LWasmStoreI64(baseAlloc, valueAlloc, memoryBase);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ add(lir, ins);
+ return;
+ }
+
+ LAllocation baseAlloc = useRegisterAtStart(base);
+ LAllocation valueAlloc = useRegisterAtStart(value);
+ auto* lir = new (alloc()) LWasmStore(baseAlloc, valueAlloc, memoryBase);
+ if (ins->access().offset()) {
+ lir->setTemp(0, tempCopy(base, 0));
+ }
+
+ add(lir, ins);
+}
+
+void LIRGenerator::visitWasmNeg(MWasmNeg* ins) {
+ if (ins->type() == MIRType::Int32) {
+ define(new (alloc()) LNegI(useRegisterAtStart(ins->input())), ins);
+ } else if (ins->type() == MIRType::Float32) {
+ define(new (alloc()) LNegF(useRegisterAtStart(ins->input())), ins);
+ } else {
+ MOZ_ASSERT(ins->type() == MIRType::Double);
+ define(new (alloc()) LNegD(useRegisterAtStart(ins->input())), ins);
+ }
+}
+
+void LIRGenerator::visitWasmTruncateToInt64(MWasmTruncateToInt64* ins) {
+ MDefinition* opd = ins->input();
+ MOZ_ASSERT(opd->type() == MIRType::Double || opd->type() == MIRType::Float32);
+
+ defineInt64(new (alloc()) LWasmTruncateToInt64(useRegister(opd)), ins);
+}
+
+void LIRGenerator::visitWasmUnsignedToDouble(MWasmUnsignedToDouble* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToDouble* lir =
+ new (alloc()) LWasmUint32ToDouble(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmUnsignedToFloat32(MWasmUnsignedToFloat32* ins) {
+ MOZ_ASSERT(ins->input()->type() == MIRType::Int32);
+ LWasmUint32ToFloat32* lir =
+ new (alloc()) LWasmUint32ToFloat32(useRegisterAtStart(ins->input()));
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmCompareExchangeHeap(MWasmCompareExchangeHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+ LAllocation memoryBase =
+ ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
+ : LGeneralReg(HeapReg);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc()) LWasmCompareExchangeI64(
+ useRegister(base), useInt64Register(ins->oldValue()),
+ useInt64Register(ins->newValue()), memoryBase);
+ defineInt64(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->access().byteSize() < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LWasmCompareExchangeHeap* lir = new (alloc())
+ LWasmCompareExchangeHeap(useRegister(base), useRegister(ins->oldValue()),
+ useRegister(ins->newValue()), valueTemp,
+ offsetTemp, maskTemp, memoryBase);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmAtomicExchangeHeap(MWasmAtomicExchangeHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+ LAllocation memoryBase =
+ ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
+ : LGeneralReg(HeapReg);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc()) LWasmAtomicExchangeI64(
+ useRegister(base), useInt64Register(ins->value()), memoryBase);
+ defineInt64(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->access().byteSize() < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ LWasmAtomicExchangeHeap* lir = new (alloc())
+ LWasmAtomicExchangeHeap(useRegister(base), useRegister(ins->value()),
+ valueTemp, offsetTemp, maskTemp, memoryBase);
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmAtomicBinopHeap(MWasmAtomicBinopHeap* ins) {
+ MDefinition* base = ins->base();
+ // See comment in visitWasmLoad re the type of 'base'.
+ MOZ_ASSERT(base->type() == MIRType::Int32 || base->type() == MIRType::Int64);
+ LAllocation memoryBase =
+ ins->hasMemoryBase() ? LAllocation(useRegisterAtStart(ins->memoryBase()))
+ : LGeneralReg(HeapReg);
+
+ if (ins->access().type() == Scalar::Int64) {
+ auto* lir = new (alloc()) LWasmAtomicBinopI64(
+ useRegister(base), useInt64Register(ins->value()), memoryBase);
+ lir->setTemp(0, temp());
+ defineInt64(lir, ins);
+ return;
+ }
+
+ LDefinition valueTemp = LDefinition::BogusTemp();
+ LDefinition offsetTemp = LDefinition::BogusTemp();
+ LDefinition maskTemp = LDefinition::BogusTemp();
+
+ if (ins->access().byteSize() < 4) {
+ valueTemp = temp();
+ offsetTemp = temp();
+ maskTemp = temp();
+ }
+
+ if (!ins->hasUses()) {
+ LWasmAtomicBinopHeapForEffect* lir = new (alloc())
+ LWasmAtomicBinopHeapForEffect(useRegister(base),
+ useRegister(ins->value()), valueTemp,
+ offsetTemp, maskTemp, memoryBase);
+ add(lir, ins);
+ return;
+ }
+
+ LWasmAtomicBinopHeap* lir = new (alloc())
+ LWasmAtomicBinopHeap(useRegister(base), useRegister(ins->value()),
+ valueTemp, offsetTemp, maskTemp, memoryBase);
+
+ define(lir, ins);
+}
+
+void LIRGenerator::visitWasmTernarySimd128(MWasmTernarySimd128* ins) {
+ MOZ_CRASH("ternary SIMD NYI");
+}
+
+void LIRGenerator::visitWasmBinarySimd128(MWasmBinarySimd128* ins) {
+ MOZ_CRASH("binary SIMD NYI");
+}
+
+#ifdef ENABLE_WASM_SIMD
+bool MWasmTernarySimd128::specializeBitselectConstantMaskAsShuffle(
+ int8_t shuffle[16]) {
+ return false;
+}
+#endif
+
+bool MWasmBinarySimd128::specializeForConstantRhs() {
+ // Probably many we want to do here
+ return false;
+}
+
+void LIRGenerator::visitWasmBinarySimd128WithConstant(
+ MWasmBinarySimd128WithConstant* ins) {
+ MOZ_CRASH("binary SIMD with constant NYI");
+}
+
+void LIRGenerator::visitWasmShiftSimd128(MWasmShiftSimd128* ins) {
+ MOZ_CRASH("shift SIMD NYI");
+}
+
+void LIRGenerator::visitWasmShuffleSimd128(MWasmShuffleSimd128* ins) {
+ MOZ_CRASH("shuffle SIMD NYI");
+}
+
+void LIRGenerator::visitWasmReplaceLaneSimd128(MWasmReplaceLaneSimd128* ins) {
+ MOZ_CRASH("replace-lane SIMD NYI");
+}
+
+void LIRGenerator::visitWasmScalarToSimd128(MWasmScalarToSimd128* ins) {
+ MOZ_CRASH("scalar-to-SIMD NYI");
+}
+
+void LIRGenerator::visitWasmUnarySimd128(MWasmUnarySimd128* ins) {
+ MOZ_CRASH("unary SIMD NYI");
+}
+
+void LIRGenerator::visitWasmReduceSimd128(MWasmReduceSimd128* ins) {
+ MOZ_CRASH("reduce-SIMD NYI");
+}
+
+void LIRGenerator::visitWasmLoadLaneSimd128(MWasmLoadLaneSimd128* ins) {
+ MOZ_CRASH("load-lane SIMD NYI");
+}
+
+void LIRGenerator::visitWasmStoreLaneSimd128(MWasmStoreLaneSimd128* ins) {
+ MOZ_CRASH("store-lane SIMD NYI");
+}
diff --git a/js/src/jit/loong64/Lowering-loong64.h b/js/src/jit/loong64/Lowering-loong64.h
new file mode 100644
index 0000000000..6285b13291
--- /dev/null
+++ b/js/src/jit/loong64/Lowering-loong64.h
@@ -0,0 +1,110 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_Lowering_loong64_h
+#define jit_loong64_Lowering_loong64_h
+
+#include "jit/shared/Lowering-shared.h"
+
+namespace js {
+namespace jit {
+
+class LIRGeneratorLOONG64 : public LIRGeneratorShared {
+ protected:
+ LIRGeneratorLOONG64(MIRGenerator* gen, MIRGraph& graph, LIRGraph& lirGraph)
+ : LIRGeneratorShared(gen, graph, lirGraph) {}
+
+ LTableSwitch* newLTableSwitch(const LAllocation& in,
+ const LDefinition& inputCopy,
+ MTableSwitch* ins);
+ LTableSwitchV* newLTableSwitchV(MTableSwitch* ins);
+
+ void lowerForShift(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ template <size_t Temps>
+ void lowerForShiftInt64(
+ LInstructionHelper<INT64_PIECES, INT64_PIECES + 1, Temps>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForALU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* input);
+ void lowerForALU(LInstructionHelper<1, 2, 0>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+ void lowerForALUInt64(LInstructionHelper<INT64_PIECES, INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* input);
+ void lowerForALUInt64(
+ LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>* ins,
+ MDefinition* mir, MDefinition* lhs, MDefinition* rhs);
+ void lowerForMulInt64(LMulI64* ins, MMul* mir, MDefinition* lhs,
+ MDefinition* rhs);
+
+ void lowerForFPU(LInstructionHelper<1, 1, 0>* ins, MDefinition* mir,
+ MDefinition* src);
+ template <size_t Temps>
+ void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ void lowerForCompareI64AndBranch(MTest* mir, MCompare* comp, JSOp op,
+ MDefinition* left, MDefinition* right,
+ MBasicBlock* ifTrue, MBasicBlock* ifFalse);
+ void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
+ MDefinition* lhs, MDefinition* rhs);
+
+ // Returns a box allocation. reg2 is ignored on 64-bit platforms.
+ LBoxAllocation useBoxFixed(MDefinition* mir, Register reg1, Register reg2,
+ bool useAtStart = false);
+
+ LAllocation useByteOpRegister(MDefinition* mir);
+ LAllocation useByteOpRegisterAtStart(MDefinition* mir);
+ LAllocation useByteOpRegisterOrNonDoubleConstant(MDefinition* mir);
+ LDefinition tempByteOpRegister();
+
+ LDefinition tempToUnbox();
+
+ bool needTempForPostBarrier() { return true; }
+
+ void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block,
+ size_t lirIndex);
+ void lowerInt64PhiInput(MPhi*, uint32_t, LBlock*, size_t);
+ void defineInt64Phi(MPhi*, size_t);
+
+ void lowerNegI(MInstruction* ins, MDefinition* input);
+ void lowerNegI64(MInstruction* ins, MDefinition* input);
+ void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
+ void lowerDivI(MDiv* div);
+ void lowerDivI64(MDiv* div);
+ void lowerModI(MMod* mod);
+ void lowerModI64(MMod* mod);
+ void lowerUDiv(MDiv* div);
+ void lowerUDivI64(MDiv* div);
+ void lowerUMod(MMod* mod);
+ void lowerUModI64(MMod* mod);
+ void lowerUrshD(MUrsh* mir);
+ void lowerPowOfTwoI(MPow* mir);
+ void lowerBigIntDiv(MBigIntDiv* ins);
+ void lowerBigIntMod(MBigIntMod* ins);
+ void lowerBigIntLsh(MBigIntLsh* ins);
+ void lowerBigIntRsh(MBigIntRsh* ins);
+ void lowerTruncateDToInt32(MTruncateToInt32* ins);
+ void lowerTruncateFToInt32(MTruncateToInt32* ins);
+ void lowerBuiltinInt64ToFloatingPoint(MBuiltinInt64ToFloatingPoint* ins);
+ void lowerWasmSelectI(MWasmSelect* select);
+ void lowerWasmSelectI64(MWasmSelect* select);
+ void lowerWasmBuiltinTruncateToInt64(MWasmBuiltinTruncateToInt64* ins);
+ void lowerWasmBuiltinTruncateToInt32(MWasmBuiltinTruncateToInt32* ins);
+ void lowerWasmBuiltinDivI64(MWasmBuiltinDivI64* div);
+ void lowerWasmBuiltinModI64(MWasmBuiltinModI64* mod);
+
+ void lowerAtomicLoad64(MLoadUnboxedScalar* ins);
+ void lowerAtomicStore64(MStoreUnboxedScalar* ins);
+};
+
+typedef LIRGeneratorLOONG64 LIRGeneratorSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_Lowering_loong64_h */
diff --git a/js/src/jit/loong64/MacroAssembler-loong64-inl.h b/js/src/jit/loong64/MacroAssembler-loong64-inl.h
new file mode 100644
index 0000000000..7d4005af1b
--- /dev/null
+++ b/js/src/jit/loong64/MacroAssembler-loong64-inl.h
@@ -0,0 +1,2154 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_MacroAssembler_loong64_inl_h
+#define jit_loong64_MacroAssembler_loong64_inl_h
+
+#include "jit/loong64/MacroAssembler-loong64.h"
+
+namespace js {
+namespace jit {
+
+//{{{ check_macroassembler_style
+
+void MacroAssembler::move64(Register64 src, Register64 dest) {
+ movePtr(src.reg, dest.reg);
+}
+
+void MacroAssembler::move64(Imm64 imm, Register64 dest) {
+ movePtr(ImmWord(imm.value), dest.reg);
+}
+
+void MacroAssembler::moveDoubleToGPR64(FloatRegister src, Register64 dest) {
+ moveFromDouble(src, dest.reg);
+}
+
+void MacroAssembler::moveGPR64ToDouble(Register64 src, FloatRegister dest) {
+ moveToDouble(src.reg, dest);
+}
+
+void MacroAssembler::move64To32(Register64 src, Register dest) {
+ as_slli_w(dest, src.reg, 0);
+}
+
+void MacroAssembler::move32To64ZeroExtend(Register src, Register64 dest) {
+ as_bstrpick_d(dest.reg, src, 31, 0);
+}
+
+void MacroAssembler::move8To64SignExtend(Register src, Register64 dest) {
+ move32To64SignExtend(src, dest);
+ move8SignExtend(dest.reg, dest.reg);
+}
+
+void MacroAssembler::move16To64SignExtend(Register src, Register64 dest) {
+ move32To64SignExtend(src, dest);
+ move16SignExtend(dest.reg, dest.reg);
+}
+
+void MacroAssembler::move32To64SignExtend(Register src, Register64 dest) {
+ as_slli_w(dest.reg, src, 0);
+}
+
+void MacroAssembler::moveFloat32ToGPR(FloatRegister src, Register dest) {
+ moveFromFloat32(src, dest);
+}
+
+void MacroAssembler::moveGPRToFloat32(Register src, FloatRegister dest) {
+ moveToFloat32(src, dest);
+}
+
+void MacroAssembler::move8ZeroExtend(Register src, Register dest) {
+ as_bstrpick_d(dest, src, 7, 0);
+}
+
+void MacroAssembler::move8SignExtend(Register src, Register dest) {
+ as_ext_w_b(dest, src);
+}
+
+void MacroAssembler::move16SignExtend(Register src, Register dest) {
+ as_ext_w_h(dest, src);
+}
+
+void MacroAssembler::move32SignExtendToPtr(Register src, Register dest) {
+ as_slli_w(dest, src, 0);
+}
+
+void MacroAssembler::move32ZeroExtendToPtr(Register src, Register dest) {
+ as_bstrpick_d(dest, src, 31, 0);
+}
+
+// ===============================================================
+// Load instructions
+
+void MacroAssembler::load32SignExtendToPtr(const Address& src, Register dest) {
+ load32(src, dest);
+}
+
+void MacroAssembler::loadAbiReturnAddress(Register dest) { movePtr(ra, dest); }
+
+// ===============================================================
+// Logical instructions
+
+void MacroAssembler::not32(Register reg) { as_nor(reg, reg, zero); }
+
+void MacroAssembler::notPtr(Register reg) { as_nor(reg, reg, zero); }
+
+void MacroAssembler::andPtr(Register src, Register dest) {
+ as_and(dest, dest, src);
+}
+
+void MacroAssembler::andPtr(Imm32 imm, Register dest) {
+ ma_and(dest, dest, imm);
+}
+
+void MacroAssembler::and64(Imm64 imm, Register64 dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, ImmWord(imm.value));
+ as_and(dest.reg, dest.reg, scratch);
+}
+
+void MacroAssembler::and64(Register64 src, Register64 dest) {
+ as_and(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::and64(const Operand& src, Register64 dest) {
+ if (src.getTag() == Operand::MEM) {
+ ScratchRegisterScope scratch(*this);
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ and64(scratch64, dest);
+ } else {
+ and64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::and32(Register src, Register dest) {
+ as_and(dest, dest, src);
+}
+
+void MacroAssembler::and32(Imm32 imm, Register dest) {
+ ma_and(dest, dest, imm);
+}
+
+void MacroAssembler::and32(Imm32 imm, const Address& dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(dest, scratch2);
+ and32(imm, scratch2);
+ store32(scratch2, dest);
+}
+
+void MacroAssembler::and32(const Address& src, Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(src, scratch2);
+ as_and(dest, dest, scratch2);
+}
+
+void MacroAssembler::or64(Imm64 imm, Register64 dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, ImmWord(imm.value));
+ as_or(dest.reg, dest.reg, scratch);
+}
+
+void MacroAssembler::or32(Register src, Register dest) {
+ as_or(dest, dest, src);
+}
+
+void MacroAssembler::or32(Imm32 imm, Register dest) { ma_or(dest, dest, imm); }
+
+void MacroAssembler::or32(Imm32 imm, const Address& dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(dest, scratch2);
+ or32(imm, scratch2);
+ store32(scratch2, dest);
+}
+
+void MacroAssembler::xor64(Imm64 imm, Register64 dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, ImmWord(imm.value));
+ as_xor(dest.reg, dest.reg, scratch);
+}
+
+void MacroAssembler::orPtr(Register src, Register dest) {
+ as_or(dest, dest, src);
+}
+
+void MacroAssembler::orPtr(Imm32 imm, Register dest) { ma_or(dest, dest, imm); }
+
+void MacroAssembler::or64(Register64 src, Register64 dest) {
+ as_or(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::or64(const Operand& src, Register64 dest) {
+ if (src.getTag() == Operand::MEM) {
+ ScratchRegisterScope scratch(asMasm());
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ or64(scratch64, dest);
+ } else {
+ or64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::xor64(Register64 src, Register64 dest) {
+ as_xor(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::xor64(const Operand& src, Register64 dest) {
+ if (src.getTag() == Operand::MEM) {
+ ScratchRegisterScope scratch(asMasm());
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ xor64(scratch64, dest);
+ } else {
+ xor64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::xorPtr(Register src, Register dest) {
+ as_xor(dest, dest, src);
+}
+
+void MacroAssembler::xorPtr(Imm32 imm, Register dest) {
+ ma_xor(dest, dest, imm);
+}
+
+void MacroAssembler::xor32(Register src, Register dest) {
+ as_xor(dest, dest, src);
+}
+
+void MacroAssembler::xor32(Imm32 imm, Register dest) {
+ ma_xor(dest, dest, imm);
+}
+
+void MacroAssembler::xor32(Imm32 imm, const Address& dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(dest, scratch2);
+ xor32(imm, scratch2);
+ store32(scratch2, dest);
+}
+
+void MacroAssembler::xor32(const Address& src, Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(src, scratch2);
+ xor32(scratch2, dest);
+}
+
+// ===============================================================
+// Swap instructions
+
+void MacroAssembler::byteSwap16SignExtend(Register reg) {
+ as_revb_2h(reg, reg);
+ as_ext_w_h(reg, reg);
+}
+
+void MacroAssembler::byteSwap16ZeroExtend(Register reg) {
+ as_revb_2h(reg, reg);
+ as_bstrpick_d(reg, reg, 15, 0);
+}
+
+void MacroAssembler::byteSwap32(Register reg) {
+ as_revb_2w(reg, reg);
+ as_slli_w(reg, reg, 0);
+}
+
+void MacroAssembler::byteSwap64(Register64 reg64) {
+ as_revb_d(reg64.reg, reg64.reg);
+}
+
+// ===============================================================
+// Arithmetic functions
+
+void MacroAssembler::addPtr(Register src, Register dest) {
+ as_add_d(dest, dest, src);
+}
+
+void MacroAssembler::addPtr(Imm32 imm, Register dest) {
+ ma_add_d(dest, dest, imm);
+}
+
+void MacroAssembler::addPtr(ImmWord imm, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ movePtr(imm, scratch);
+ addPtr(scratch, dest);
+}
+
+void MacroAssembler::add64(Register64 src, Register64 dest) {
+ addPtr(src.reg, dest.reg);
+}
+
+void MacroAssembler::add64(const Operand& src, Register64 dest) {
+ if (src.getTag() == Operand::MEM) {
+ ScratchRegisterScope scratch(asMasm());
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ add64(scratch64, dest);
+ } else {
+ add64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::add64(Imm32 imm, Register64 dest) {
+ ma_add_d(dest.reg, dest.reg, imm);
+}
+
+void MacroAssembler::add64(Imm64 imm, Register64 dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dest.reg != scratch);
+ mov(ImmWord(imm.value), scratch);
+ as_add_d(dest.reg, dest.reg, scratch);
+}
+
+void MacroAssembler::add32(Register src, Register dest) {
+ as_add_w(dest, dest, src);
+}
+
+void MacroAssembler::add32(Imm32 imm, Register dest) {
+ ma_add_w(dest, dest, imm);
+}
+
+void MacroAssembler::add32(Imm32 imm, Register src, Register dest) {
+ ma_add_w(dest, src, imm);
+}
+
+void MacroAssembler::add32(Imm32 imm, const Address& dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(dest, scratch2);
+ ma_add_w(scratch2, scratch2, imm);
+ store32(scratch2, dest);
+}
+
+void MacroAssembler::addPtr(Imm32 imm, const Address& dest) {
+ ScratchRegisterScope scratch(asMasm());
+ loadPtr(dest, scratch);
+ addPtr(imm, scratch);
+ storePtr(scratch, dest);
+}
+
+void MacroAssembler::addPtr(const Address& src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ loadPtr(src, scratch);
+ addPtr(scratch, dest);
+}
+
+void MacroAssembler::addDouble(FloatRegister src, FloatRegister dest) {
+ as_fadd_d(dest, dest, src);
+}
+
+void MacroAssembler::addFloat32(FloatRegister src, FloatRegister dest) {
+ as_fadd_s(dest, dest, src);
+}
+
+CodeOffset MacroAssembler::sub32FromStackPtrWithPatch(Register dest) {
+ CodeOffset offset = CodeOffset(currentOffset());
+ MacroAssemblerLOONG64::ma_liPatchable(dest, Imm32(0));
+ as_sub_d(dest, StackPointer, dest);
+ return offset;
+}
+
+void MacroAssembler::patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) {
+ // TODO: by wangqing
+ Instruction* inst0 =
+ (Instruction*)m_buffer.getInst(BufferOffset(offset.offset()));
+
+ InstImm* i0 = (InstImm*)inst0;
+ InstImm* i1 = (InstImm*)i0->next();
+
+ MOZ_ASSERT((i0->extractBitField(31, 25)) == ((uint32_t)op_lu12i_w >> 25));
+ MOZ_ASSERT((i1->extractBitField(31, 22)) == ((uint32_t)op_ori >> 22));
+
+ *i0 = InstImm(op_lu12i_w, (int32_t)((imm.value >> 12) & 0xfffff),
+ Register::FromCode(i0->extractRD()), false);
+ *i1 = InstImm(op_ori, (int32_t)(imm.value & 0xfff),
+ Register::FromCode(i1->extractRJ()),
+ Register::FromCode(i1->extractRD()), 12);
+}
+
+void MacroAssembler::subPtr(Register src, Register dest) {
+ as_sub_d(dest, dest, src);
+}
+
+void MacroAssembler::subPtr(Imm32 imm, Register dest) {
+ ma_sub_d(dest, dest, imm);
+}
+
+void MacroAssembler::sub64(Register64 src, Register64 dest) {
+ as_sub_d(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::sub64(const Operand& src, Register64 dest) {
+ if (src.getTag() == Operand::MEM) {
+ ScratchRegisterScope scratch(asMasm());
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ sub64(scratch64, dest);
+ } else {
+ sub64(Register64(src.toReg()), dest);
+ }
+}
+
+void MacroAssembler::sub64(Imm64 imm, Register64 dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dest.reg != scratch);
+ mov(ImmWord(imm.value), scratch);
+ as_sub_d(dest.reg, dest.reg, scratch);
+}
+
+void MacroAssembler::sub32(Register src, Register dest) {
+ as_sub_w(dest, dest, src);
+}
+
+void MacroAssembler::sub32(Imm32 imm, Register dest) {
+ ma_sub_w(dest, dest, imm);
+}
+
+void MacroAssembler::sub32(const Address& src, Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(src, scratch2);
+ as_sub_w(dest, dest, scratch2);
+}
+
+void MacroAssembler::subPtr(Register src, const Address& dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(dest, scratch2);
+ subPtr(src, scratch2);
+ storePtr(scratch2, dest);
+}
+
+void MacroAssembler::subPtr(const Address& addr, Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(addr, scratch2);
+ subPtr(scratch2, dest);
+}
+
+void MacroAssembler::subDouble(FloatRegister src, FloatRegister dest) {
+ as_fsub_d(dest, dest, src);
+}
+
+void MacroAssembler::subFloat32(FloatRegister src, FloatRegister dest) {
+ as_fsub_s(dest, dest, src);
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dest.reg != scratch);
+ mov(ImmWord(imm.value), scratch);
+ as_mul_d(dest.reg, dest.reg, scratch);
+}
+
+void MacroAssembler::mul64(Imm64 imm, const Register64& dest,
+ const Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ mul64(imm, dest);
+}
+
+void MacroAssembler::mul64(const Register64& src, const Register64& dest,
+ const Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ as_mul_d(dest.reg, dest.reg, src.reg);
+}
+
+void MacroAssembler::mul64(const Operand& src, const Register64& dest,
+ const Register temp) {
+ if (src.getTag() == Operand::MEM) {
+ ScratchRegisterScope scratch(asMasm());
+ Register64 scratch64(scratch);
+
+ load64(src.toAddress(), scratch64);
+ mul64(scratch64, dest, temp);
+ } else {
+ mul64(Register64(src.toReg()), dest, temp);
+ }
+}
+
+void MacroAssembler::mulPtr(Register rhs, Register srcDest) {
+ as_mul_d(srcDest, srcDest, rhs);
+}
+
+void MacroAssembler::mulBy3(Register src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(src != scratch);
+ as_add_d(scratch, src, src);
+ as_add_d(dest, scratch, src);
+}
+
+void MacroAssembler::mul32(Register rhs, Register srcDest) {
+ as_mul_w(srcDest, srcDest, rhs);
+}
+
+void MacroAssembler::mul32(Imm32 imm, Register srcDest) {
+ ScratchRegisterScope scratch(asMasm());
+ move32(imm, scratch);
+ mul32(scratch, srcDest);
+}
+
+void MacroAssembler::mulHighUnsigned32(Imm32 imm, Register src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(src != scratch);
+ move32(imm, scratch);
+ as_mulh_wu(dest, src, scratch);
+}
+
+void MacroAssembler::mulFloat32(FloatRegister src, FloatRegister dest) {
+ as_fmul_s(dest, dest, src);
+}
+
+void MacroAssembler::mulDouble(FloatRegister src, FloatRegister dest) {
+ as_fmul_d(dest, dest, src);
+}
+
+void MacroAssembler::mulDoublePtr(ImmPtr imm, Register temp,
+ FloatRegister dest) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+ movePtr(imm, scratch);
+ loadDouble(Address(scratch, 0), fpscratch);
+ mulDouble(fpscratch, dest);
+}
+
+void MacroAssembler::inc64(AbsoluteAddress dest) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_li(scratch, ImmWord(uintptr_t(dest.addr)));
+ as_ld_d(scratch2, scratch, 0);
+ as_addi_d(scratch2, scratch2, 1);
+ as_st_d(scratch2, scratch, 0);
+}
+
+void MacroAssembler::quotient32(Register rhs, Register srcDest,
+ bool isUnsigned) {
+ if (isUnsigned) {
+ as_div_wu(srcDest, srcDest, rhs);
+ } else {
+ as_div_w(srcDest, srcDest, rhs);
+ }
+}
+
+void MacroAssembler::remainder32(Register rhs, Register srcDest,
+ bool isUnsigned) {
+ if (isUnsigned) {
+ as_mod_wu(srcDest, srcDest, rhs);
+ } else {
+ as_mod_w(srcDest, srcDest, rhs);
+ }
+}
+
+void MacroAssembler::divFloat32(FloatRegister src, FloatRegister dest) {
+ as_fdiv_s(dest, dest, src);
+}
+
+void MacroAssembler::divDouble(FloatRegister src, FloatRegister dest) {
+ as_fdiv_d(dest, dest, src);
+}
+
+void MacroAssembler::neg64(Register64 reg) { as_sub_d(reg.reg, zero, reg.reg); }
+
+void MacroAssembler::negPtr(Register reg) { as_sub_d(reg, zero, reg); }
+
+void MacroAssembler::neg32(Register reg) { as_sub_w(reg, zero, reg); }
+
+void MacroAssembler::negateDouble(FloatRegister reg) { as_fneg_d(reg, reg); }
+
+void MacroAssembler::negateFloat(FloatRegister reg) { as_fneg_s(reg, reg); }
+
+void MacroAssembler::abs32(Register src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ as_srai_w(scratch, src, 31);
+ as_xor(dest, src, scratch);
+ as_sub_w(dest, dest, scratch);
+}
+
+void MacroAssembler::absFloat32(FloatRegister src, FloatRegister dest) {
+ as_fabs_s(dest, src);
+}
+
+void MacroAssembler::absDouble(FloatRegister src, FloatRegister dest) {
+ as_fabs_d(dest, src);
+}
+
+void MacroAssembler::sqrtFloat32(FloatRegister src, FloatRegister dest) {
+ as_fsqrt_s(dest, src);
+}
+
+void MacroAssembler::sqrtDouble(FloatRegister src, FloatRegister dest) {
+ as_fsqrt_d(dest, src);
+}
+
+void MacroAssembler::minFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxFloat32(srcDest, other, handleNaN, false);
+}
+
+void MacroAssembler::minDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxDouble(srcDest, other, handleNaN, false);
+}
+
+void MacroAssembler::maxFloat32(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxFloat32(srcDest, other, handleNaN, true);
+}
+
+void MacroAssembler::maxDouble(FloatRegister other, FloatRegister srcDest,
+ bool handleNaN) {
+ minMaxDouble(srcDest, other, handleNaN, true);
+}
+
+// ===============================================================
+// Shift functions
+
+void MacroAssembler::lshift32(Register src, Register dest) {
+ as_sll_w(dest, dest, src);
+}
+
+void MacroAssembler::lshift32(Imm32 imm, Register dest) {
+ as_slli_w(dest, dest, imm.value % 32);
+}
+
+void MacroAssembler::flexibleLshift32(Register src, Register dest) {
+ lshift32(src, dest);
+}
+
+void MacroAssembler::lshift64(Register shift, Register64 dest) {
+ as_sll_d(dest.reg, dest.reg, shift);
+}
+
+void MacroAssembler::lshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ as_slli_d(dest.reg, dest.reg, imm.value);
+}
+
+void MacroAssembler::lshiftPtr(Register shift, Register dest) {
+ as_sll_d(dest, dest, shift);
+}
+
+void MacroAssembler::lshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ as_slli_d(dest, dest, imm.value);
+}
+
+void MacroAssembler::rshift32(Register src, Register dest) {
+ as_srl_w(dest, dest, src);
+}
+
+void MacroAssembler::rshift32(Imm32 imm, Register dest) {
+ as_srli_w(dest, dest, imm.value % 32);
+}
+
+void MacroAssembler::flexibleRshift32(Register src, Register dest) {
+ rshift32(src, dest);
+}
+
+void MacroAssembler::rshift32Arithmetic(Register src, Register dest) {
+ as_sra_w(dest, dest, src);
+}
+
+void MacroAssembler::rshift32Arithmetic(Imm32 imm, Register dest) {
+ as_srai_w(dest, dest, imm.value % 32);
+}
+
+void MacroAssembler::flexibleRshift32Arithmetic(Register src, Register dest) {
+ rshift32Arithmetic(src, dest);
+}
+
+void MacroAssembler::rshift64(Register shift, Register64 dest) {
+ as_srl_d(dest.reg, dest.reg, shift);
+}
+
+void MacroAssembler::rshift64(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ as_srli_d(dest.reg, dest.reg, imm.value);
+}
+
+void MacroAssembler::rshift64Arithmetic(Imm32 imm, Register64 dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ as_srai_d(dest.reg, dest.reg, imm.value);
+}
+
+void MacroAssembler::rshift64Arithmetic(Register shift, Register64 dest) {
+ as_sra_d(dest.reg, dest.reg, shift);
+}
+
+void MacroAssembler::rshiftPtr(Register shift, Register dest) {
+ as_srl_d(dest, dest, shift);
+}
+
+void MacroAssembler::rshiftPtr(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ as_srli_d(dest, dest, imm.value);
+}
+
+void MacroAssembler::rshiftPtrArithmetic(Imm32 imm, Register dest) {
+ MOZ_ASSERT(0 <= imm.value && imm.value < 64);
+ as_srai_d(dest, dest, imm.value);
+}
+
+// ===============================================================
+// Rotation functions
+
+void MacroAssembler::rotateLeft(Register count, Register input, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ as_sub_w(scratch, zero, count);
+ as_rotr_w(dest, input, scratch);
+}
+
+void MacroAssembler::rotateLeft(Imm32 count, Register input, Register dest) {
+ as_rotri_w(dest, input, (32 - count.value) & 31);
+}
+
+void MacroAssembler::rotateLeft64(Register count, Register64 src,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ ScratchRegisterScope scratch(asMasm());
+ as_sub_d(scratch, zero, count);
+ as_rotr_d(dest.reg, src.reg, scratch);
+}
+
+void MacroAssembler::rotateLeft64(Imm32 count, Register64 src, Register64 dest,
+ Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ as_rotri_d(dest.reg, src.reg, (64 - count.value) & 63);
+}
+
+void MacroAssembler::rotateRight(Register count, Register input,
+ Register dest) {
+ as_rotr_w(dest, input, count);
+}
+
+void MacroAssembler::rotateRight(Imm32 count, Register input, Register dest) {
+ as_rotri_w(dest, input, count.value & 31);
+}
+
+void MacroAssembler::rotateRight64(Register count, Register64 src,
+ Register64 dest, Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ as_rotr_d(dest.reg, src.reg, count);
+}
+
+void MacroAssembler::rotateRight64(Imm32 count, Register64 src, Register64 dest,
+ Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ as_rotri_d(dest.reg, src.reg, count.value & 63);
+}
+
+// Bit counting functions
+
+void MacroAssembler::clz64(Register64 src, Register dest) {
+ as_clz_d(dest, src.reg);
+}
+
+void MacroAssembler::ctz64(Register64 src, Register dest) {
+ as_ctz_d(dest, src.reg);
+}
+
+void MacroAssembler::popcnt64(Register64 input, Register64 output,
+ Register tmp) {
+ ScratchRegisterScope scratch(asMasm());
+ as_or(output.reg, input.reg, zero);
+ as_srai_d(tmp, input.reg, 1);
+ ma_li(scratch, Imm32(0x55555555));
+ as_bstrins_d(scratch, scratch, 63, 32);
+ as_and(tmp, tmp, scratch);
+ as_sub_d(output.reg, output.reg, tmp);
+ as_srai_d(tmp, output.reg, 2);
+ ma_li(scratch, Imm32(0x33333333));
+ as_bstrins_d(scratch, scratch, 63, 32);
+ as_and(output.reg, output.reg, scratch);
+ as_and(tmp, tmp, scratch);
+ as_add_d(output.reg, output.reg, tmp);
+ as_srli_d(tmp, output.reg, 4);
+ as_add_d(output.reg, output.reg, tmp);
+ ma_li(scratch, Imm32(0xF0F0F0F));
+ as_bstrins_d(scratch, scratch, 63, 32);
+ as_and(output.reg, output.reg, scratch);
+ ma_li(tmp, Imm32(0x1010101));
+ as_bstrins_d(tmp, tmp, 63, 32);
+ as_mul_d(output.reg, output.reg, tmp);
+ as_srai_d(output.reg, output.reg, 56);
+}
+
+void MacroAssembler::clz32(Register src, Register dest, bool knownNotZero) {
+ as_clz_w(dest, src);
+}
+
+void MacroAssembler::ctz32(Register src, Register dest, bool knownNotZero) {
+ as_ctz_w(dest, src);
+}
+
+void MacroAssembler::popcnt32(Register input, Register output, Register tmp) {
+ // Equivalent to GCC output of mozilla::CountPopulation32()
+ as_or(output, input, zero);
+ as_srai_w(tmp, input, 1);
+ ma_and(tmp, tmp, Imm32(0x55555555));
+ as_sub_w(output, output, tmp);
+ as_srai_w(tmp, output, 2);
+ ma_and(output, output, Imm32(0x33333333));
+ ma_and(tmp, tmp, Imm32(0x33333333));
+ as_add_w(output, output, tmp);
+ as_srli_w(tmp, output, 4);
+ as_add_w(output, output, tmp);
+ ma_and(output, output, Imm32(0xF0F0F0F));
+ as_slli_w(tmp, output, 8);
+ as_add_w(output, output, tmp);
+ as_slli_w(tmp, output, 16);
+ as_add_w(output, output, tmp);
+ as_srai_w(output, output, 24);
+}
+
+// ===============================================================
+// Condition functions
+
+void MacroAssembler::cmp8Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load8ZeroExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(uint8_t(rhs.value)), cond);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load8SignExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(int8_t(rhs.value)), cond);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void MacroAssembler::cmp16Set(Condition cond, Address lhs, Imm32 rhs,
+ Register dest) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load16ZeroExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(uint16_t(rhs.value)), cond);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load16SignExtend(lhs, scratch2);
+ ma_cmp_set(dest, scratch2, Imm32(int16_t(rhs.value)), cond);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+// Also see below for specializations of cmpPtrSet.
+template <typename T1, typename T2>
+void MacroAssembler::cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+
+void MacroAssembler::cmp64Set(Condition cond, Address lhs, Imm64 rhs,
+ Register dest) {
+ ma_cmp_set(dest, lhs, ImmWord(uint64_t(rhs.value)), cond);
+}
+
+template <typename T1, typename T2>
+void MacroAssembler::cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) {
+ ma_cmp_set(dest, lhs, rhs, cond);
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::branch8(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load8ZeroExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(uint8_t(rhs.value)), label);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load8SignExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(int8_t(rhs.value)), label);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void MacroAssembler::branch8(Condition cond, const BaseIndex& lhs, Register rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ computeScaledAddress(lhs, scratch2);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load8ZeroExtend(Address(scratch2, lhs.offset), scratch2);
+ branch32(cond, scratch2, rhs, label);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load8SignExtend(Address(scratch2, lhs.offset), scratch2);
+ branch32(cond, scratch2, rhs, label);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+void MacroAssembler::branch16(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(scratch2 != lhs.base);
+
+ switch (cond) {
+ case Assembler::Equal:
+ case Assembler::NotEqual:
+ case Assembler::Above:
+ case Assembler::AboveOrEqual:
+ case Assembler::Below:
+ case Assembler::BelowOrEqual:
+ load16ZeroExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(uint16_t(rhs.value)), label);
+ break;
+
+ case Assembler::GreaterThan:
+ case Assembler::GreaterThanOrEqual:
+ case Assembler::LessThan:
+ case Assembler::LessThanOrEqual:
+ load16SignExtend(lhs, scratch2);
+ branch32(cond, scratch2, Imm32(int16_t(rhs.value)), label);
+ break;
+
+ default:
+ MOZ_CRASH("unexpected condition");
+ }
+}
+
+template <class L>
+void MacroAssembler::branch32(Condition cond, Register lhs, Register rhs,
+ L label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+template <class L>
+void MacroAssembler::branch32(Condition cond, Register lhs, Imm32 imm,
+ L label) {
+ ma_b(lhs, imm, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const Address& lhs, Register rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(lhs, scratch2);
+ ma_b(scratch2, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(lhs, scratch2);
+ ma_b(scratch2, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(lhs, scratch2);
+ ma_b(scratch2, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(lhs, scratch2);
+ ma_b(scratch2, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(lhs, scratch2);
+ ma_b(scratch2, rhs, label, cond);
+}
+
+void MacroAssembler::branch32(Condition cond, wasm::SymbolicAddress addr,
+ Imm32 imm, Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(addr, scratch2);
+ ma_b(scratch2, imm, label, cond);
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Imm64 val,
+ Label* success, Label* fail) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+ cond == Assembler::LessThan ||
+ cond == Assembler::LessThanOrEqual ||
+ cond == Assembler::GreaterThan ||
+ cond == Assembler::GreaterThanOrEqual ||
+ cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+ cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs.reg, ImmWord(val.value), success);
+ if (fail) {
+ jump(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, Register64 lhs, Register64 rhs,
+ Label* success, Label* fail) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal ||
+ cond == Assembler::LessThan ||
+ cond == Assembler::LessThanOrEqual ||
+ cond == Assembler::GreaterThan ||
+ cond == Assembler::GreaterThanOrEqual ||
+ cond == Assembler::Below || cond == Assembler::BelowOrEqual ||
+ cond == Assembler::Above || cond == Assembler::AboveOrEqual,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs.reg, rhs.reg, success);
+ if (fail) {
+ jump(fail);
+ }
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs, Imm64 val,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs, ImmWord(val.value), label);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ Register64 rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+
+ branchPtr(cond, lhs, rhs.reg, label);
+}
+
+void MacroAssembler::branch64(Condition cond, const Address& lhs,
+ const Address& rhs, Register scratch,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::NotEqual || cond == Assembler::Equal,
+ "other condition codes not supported");
+ MOZ_ASSERT(lhs.base != scratch);
+ MOZ_ASSERT(rhs.base != scratch);
+
+ loadPtr(rhs, scratch);
+ branchPtr(cond, lhs, scratch, label);
+}
+
+template <class L>
+void MacroAssembler::branchPtr(Condition cond, Register lhs, Register rhs,
+ L label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmPtr rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmGCPtr rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchPtr(Condition cond, Register lhs, ImmWord rhs,
+ Label* label) {
+ ma_b(lhs, rhs, label, cond);
+}
+
+template <class L>
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, Register rhs,
+ L label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmPtr rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const Address& lhs, ImmWord rhs,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ Register rhs, Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const AbsoluteAddress& lhs,
+ ImmWord rhs, Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, wasm::SymbolicAddress lhs,
+ Register rhs, Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
+ Register rhs, Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPtr(Condition cond, const BaseIndex& lhs,
+ ImmWord rhs, Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ branchPtr(cond, scratch2, rhs, label);
+}
+
+void MacroAssembler::branchPrivatePtr(Condition cond, const Address& lhs,
+ Register rhs, Label* label) {
+ branchPtr(cond, lhs, rhs, label);
+}
+
+void MacroAssembler::branchFloat(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs, Label* label) {
+ ma_bc_s(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchTruncateFloat32MaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+ as_ftintrz_l_s(fpscratch, src);
+ as_movfcsr2gr(scratch);
+ moveFromDouble(fpscratch, dest);
+ MOZ_ASSERT(Assembler::CauseV < 32);
+ as_bstrpick_w(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
+ ma_b(scratch, Imm32(0), fail, Assembler::NotEqual);
+
+ as_slli_w(dest, dest, 0);
+}
+
+void MacroAssembler::branchTruncateFloat32ToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ convertFloat32ToInt32(src, dest, fail, false);
+}
+
+void MacroAssembler::branchDouble(DoubleCondition cond, FloatRegister lhs,
+ FloatRegister rhs, Label* label) {
+ ma_bc_d(lhs, rhs, label, cond);
+}
+
+void MacroAssembler::branchTruncateDoubleMaybeModUint32(FloatRegister src,
+ Register dest,
+ Label* fail) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+ as_ftintrz_l_d(fpscratch, src);
+ as_movfcsr2gr(scratch);
+ moveFromDouble(fpscratch, dest);
+ MOZ_ASSERT(Assembler::CauseV < 32);
+ as_bstrpick_w(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
+ ma_b(scratch, Imm32(0), fail, Assembler::NotEqual);
+
+ as_slli_w(dest, dest, 0);
+}
+
+void MacroAssembler::branchTruncateDoubleToInt32(FloatRegister src,
+ Register dest, Label* fail) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+
+ // Convert scalar to signed 64-bit fixed-point, rounding toward zero.
+ // In the case of overflow, the output is saturated.
+ // In the case of NaN and -0, the output is zero.
+ as_ftintrz_l_d(fpscratch, src);
+ moveFromDouble(fpscratch, dest);
+
+ // Fail on overflow cases.
+ as_slli_w(scratch, dest, 0);
+ ma_b(dest, scratch, fail, Assembler::NotEqual);
+}
+
+template <typename T>
+void MacroAssembler::branchAdd32(Condition cond, T src, Register dest,
+ Label* overflow) {
+ switch (cond) {
+ case Overflow:
+ ma_add32TestOverflow(dest, dest, src, overflow);
+ break;
+ case CarryClear:
+ case CarrySet:
+ ma_add32TestCarry(cond, dest, dest, src, overflow);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+
+// the type of 'T src' maybe a Register, maybe a Imm32,depends on who call it.
+template <typename T>
+void MacroAssembler::branchSub32(Condition cond, T src, Register dest,
+ Label* overflow) {
+ switch (cond) {
+ case Overflow:
+ ma_sub32TestOverflow(dest, dest, src, overflow);
+ break;
+ case NonZero:
+ case Zero:
+ case Signed:
+ case NotSigned:
+ ma_sub_w(dest, dest, src);
+ ma_b(dest, dest, overflow, cond);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+
+template <typename T>
+void MacroAssembler::branchMul32(Condition cond, T src, Register dest,
+ Label* overflow) {
+ MOZ_ASSERT(cond == Assembler::Overflow);
+ ma_mul32TestOverflow(dest, dest, src, overflow);
+}
+
+template <typename T>
+void MacroAssembler::branchRshift32(Condition cond, T src, Register dest,
+ Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero);
+ rshift32(src, dest);
+ branch32(cond == Zero ? Equal : NotEqual, dest, Imm32(0), label);
+}
+
+void MacroAssembler::branchNeg32(Condition cond, Register reg, Label* label) {
+ MOZ_ASSERT(cond == Overflow);
+ neg32(reg);
+ branch32(Assembler::Equal, reg, Imm32(INT32_MIN), label);
+}
+
+template <typename T>
+void MacroAssembler::branchAddPtr(Condition cond, T src, Register dest,
+ Label* label) {
+ switch (cond) {
+ case Overflow:
+ ma_addPtrTestOverflow(dest, dest, src, label);
+ break;
+ case CarryClear:
+ case CarrySet:
+ ma_addPtrTestCarry(cond, dest, dest, src, label);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+
+template <typename T>
+void MacroAssembler::branchSubPtr(Condition cond, T src, Register dest,
+ Label* label) {
+ switch (cond) {
+ case Overflow:
+ ma_subPtrTestOverflow(dest, dest, src, label);
+ break;
+ case NonZero:
+ case Zero:
+ case Signed:
+ case NotSigned:
+ subPtr(src, dest);
+ ma_b(dest, dest, label, cond);
+ break;
+ default:
+ MOZ_CRASH("NYI");
+ }
+}
+
+void MacroAssembler::branchMulPtr(Condition cond, Register src, Register dest,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Overflow);
+ ma_mulPtrTestOverflow(dest, dest, src, label);
+}
+
+void MacroAssembler::decBranchPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ subPtr(rhs, lhs);
+ branchPtr(cond, lhs, Imm32(0), label);
+}
+
+template <class L>
+void MacroAssembler::branchTest32(Condition cond, Register lhs, Register rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ if (lhs == rhs) {
+ ma_b(lhs, rhs, label, cond);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ as_and(scratch, lhs, rhs);
+ ma_b(scratch, scratch, label, cond);
+ }
+}
+
+template <class L>
+void MacroAssembler::branchTest32(Condition cond, Register lhs, Imm32 rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_and(scratch2, lhs, rhs);
+ ma_b(scratch2, scratch2, label, cond);
+}
+
+void MacroAssembler::branchTest32(Condition cond, const Address& lhs, Imm32 rhs,
+ Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(lhs, scratch2);
+ and32(rhs, scratch2);
+ ma_b(scratch2, scratch2, label, cond);
+}
+
+void MacroAssembler::branchTest32(Condition cond, const AbsoluteAddress& lhs,
+ Imm32 rhs, Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(lhs, scratch2);
+ and32(rhs, scratch2);
+ ma_b(scratch2, scratch2, label, cond);
+}
+
+template <class L>
+void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Register rhs,
+ L label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ if (lhs == rhs) {
+ ma_b(lhs, rhs, label, cond);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ as_and(scratch, lhs, rhs);
+ ma_b(scratch, scratch, label, cond);
+ }
+}
+
+void MacroAssembler::branchTestPtr(Condition cond, Register lhs, Imm32 rhs,
+ Label* label) {
+ MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed ||
+ cond == NotSigned);
+ ScratchRegisterScope scratch(asMasm());
+ ma_and(scratch, lhs, rhs);
+ ma_b(scratch, scratch, label, cond);
+}
+
+void MacroAssembler::branchTestPtr(Condition cond, const Address& lhs,
+ Imm32 rhs, Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ branchTestPtr(cond, scratch2, rhs, label);
+}
+
+template <class L>
+void MacroAssembler::branchTest64(Condition cond, Register64 lhs,
+ Register64 rhs, Register temp, L label) {
+ branchTestPtr(cond, lhs.reg, rhs.reg, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_UNDEFINED), label, cond);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestUndefined(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestUndefined(cond, tag, label);
+}
+
+void MacroAssembler::branchTestUndefined(Condition cond,
+ const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestUndefined(cond, tag, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_INT32), label, cond);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestInt32(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestInt32(cond, tag, label);
+}
+
+void MacroAssembler::branchTestInt32(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestInt32(cond, tag, label);
+}
+
+void MacroAssembler::branchTestInt32Truthy(bool b, const ValueOperand& value,
+ Label* label) {
+ ScratchRegisterScope scratch(*this);
+ as_bstrpick_d(scratch, value.valueReg(), 31, 0);
+ ma_b(scratch, scratch, label, b ? NonZero : Zero);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = (cond == Equal) ? BelowOrEqual : Above;
+ ma_b(tag, ImmTag(JSVAL_TAG_MAX_DOUBLE), label, actual);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestDouble(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestDouble(cond, tag, label);
+}
+
+void MacroAssembler::branchTestDouble(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestDouble(cond, tag, label);
+}
+
+void MacroAssembler::branchTestDoubleTruthy(bool b, FloatRegister value,
+ Label* label) {
+ ScratchDoubleScope fpscratch(*this);
+ ma_lid(fpscratch, 0.0);
+ DoubleCondition cond = b ? DoubleNotEqual : DoubleEqualOrUnordered;
+ ma_bc_d(value, fpscratch, label, cond);
+}
+
+void MacroAssembler::branchTestNumber(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ Condition actual = cond == Equal ? BelowOrEqual : Above;
+ ma_b(tag, ImmTag(JS::detail::ValueUpperInclNumberTag), label, actual);
+}
+
+void MacroAssembler::branchTestNumber(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestNumber(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_BOOLEAN), label, cond);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestBoolean(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestBoolean(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBoolean(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestBoolean(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBooleanTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ unboxBoolean(value, scratch2);
+ ma_b(scratch2, scratch2, label, b ? NonZero : Zero);
+}
+
+void MacroAssembler::branchTestString(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_STRING), label, cond);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestString(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestString(cond, tag, label);
+}
+
+void MacroAssembler::branchTestString(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestString(cond, tag, label);
+}
+
+void MacroAssembler::branchTestStringTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ unboxString(value, scratch2);
+ load32(Address(scratch2, JSString::offsetOfLength()), scratch2);
+ ma_b(scratch2, Imm32(0), label, b ? NotEqual : Equal);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_SYMBOL), label, cond);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestSymbol(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestSymbol(cond, tag, label);
+}
+
+void MacroAssembler::branchTestSymbol(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestSymbol(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_BIGINT), label, cond);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestBigInt(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestBigInt(cond, tag, label);
+}
+
+void MacroAssembler::branchTestBigInt(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ computeEffectiveAddress(address, scratch2);
+ splitTag(scratch2, scratch2);
+ branchTestBigInt(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestBigIntTruthy(bool b, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ unboxBigInt(value, scratch2);
+ load32(Address(scratch2, BigInt::offsetOfDigitLength()), scratch2);
+ ma_b(scratch2, Imm32(0), label, b ? NotEqual : Equal);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_NULL), label, cond);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestNull(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestNull(cond, tag, label);
+}
+
+void MacroAssembler::branchTestNull(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestNull(cond, tag, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_OBJECT), label, cond);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestObject(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestObject(cond, tag, label);
+}
+
+void MacroAssembler::branchTestObject(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestObject(cond, tag, label);
+}
+
+void MacroAssembler::branchTestPrimitive(Condition cond,
+ const ValueOperand& value,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ branchTestPrimitive(cond, scratch2, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond, const Address& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond, const BaseIndex& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+void MacroAssembler::branchTestGCThing(Condition cond,
+ const ValueOperand& address,
+ Label* label) {
+ branchTestGCThingImpl(cond, address, label);
+}
+
+template <typename T>
+void MacroAssembler::branchTestGCThingImpl(Condition cond, const T& address,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ ma_b(tag, ImmTag(JS::detail::ValueLowerInclGCThingTag), label,
+ (cond == Equal) ? AboveOrEqual : Below);
+}
+
+void MacroAssembler::branchTestPrimitive(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JS::detail::ValueUpperExclPrimitiveTag), label,
+ (cond == Equal) ? Below : AboveOrEqual);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, Register tag,
+ Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ma_b(tag, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestMagic(cond, tag, label);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const BaseIndex& address,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(address, scratch2);
+ branchTestMagic(cond, tag, label);
+}
+
+template <class L>
+void MacroAssembler::branchTestMagic(Condition cond, const ValueOperand& value,
+ L label) {
+ SecondScratchRegisterScope scratch2(*this);
+ splitTag(value, scratch2);
+ ma_b(scratch2, ImmTag(JSVAL_TAG_MAGIC), label, cond);
+}
+
+void MacroAssembler::branchTestMagic(Condition cond, const Address& valaddr,
+ JSWhyMagic why, Label* label) {
+ uint64_t magic = MagicValue(why).asRawBits();
+ SecondScratchRegisterScope scratch(*this);
+ loadPtr(valaddr, scratch);
+ ma_b(scratch, ImmWord(magic), label, cond);
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const BaseIndex& lhs,
+ const ValueOperand& rhs, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ branchPtr(cond, lhs, rhs.valueReg(), label);
+}
+
+template <typename T>
+void MacroAssembler::testNumberSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JS::detail::ValueUpperInclNumberTag),
+ cond == Equal ? BelowOrEqual : Above);
+}
+
+template <typename T>
+void MacroAssembler::testBooleanSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_BOOLEAN), cond);
+}
+
+template <typename T>
+void MacroAssembler::testStringSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_STRING), cond);
+}
+
+template <typename T>
+void MacroAssembler::testSymbolSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_SYMBOL), cond);
+}
+
+template <typename T>
+void MacroAssembler::testBigIntSet(Condition cond, const T& src,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(*this);
+ Register tag = extractTag(src, scratch2);
+ ma_cmp_set(dest, tag, ImmTag(JSVAL_TAG_BIGINT), cond);
+}
+
+void MacroAssembler::branchToComputedAddress(const BaseIndex& addr) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(addr, scratch2);
+ branch(scratch2);
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs, Imm32 rhs,
+ Register src, Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ cmp32Set(cond, lhs, rhs, scratch2);
+ moveIfNotZero(dest, src, scratch2);
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ cmp32Set(cond, lhs, rhs, scratch2);
+ moveIfNotZero(dest, src, scratch2);
+}
+
+void MacroAssembler::cmp32Move32(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ SecondScratchRegisterScope scratch2(*this);
+ MOZ_ASSERT(lhs != scratch2 && src != scratch2 && dest != scratch2);
+ load32(rhs, scratch2);
+ cmp32Move32(cond, lhs, scratch2, src, dest);
+}
+
+void MacroAssembler::cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
+ Register src, Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ cmp32Set(cond, lhs, rhs, scratch2);
+ moveIfNotZero(dest, src, scratch2);
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs, Register rhs,
+ Register src, Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ cmpPtrSet(cond, lhs, rhs, scratch2);
+ moveIfNotZero(dest, src, scratch2);
+}
+
+void MacroAssembler::cmpPtrMovePtr(Condition cond, Register lhs,
+ const Address& rhs, Register src,
+ Register dest) {
+ MOZ_CRASH("NYI");
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs,
+ const Address& rhs, const Address& src,
+ Register dest) {
+ ScratchRegisterScope scratch(*this);
+ MOZ_ASSERT(lhs != scratch && dest != scratch);
+ load32(rhs, scratch);
+ cmp32Load32(cond, lhs, scratch, src, dest);
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs, Register rhs,
+ const Address& src, Register dest) {
+ Label skip;
+ branch32(Assembler::InvertCondition(cond), lhs, rhs, &skip);
+ load32(src, dest);
+ bind(&skip);
+}
+
+void MacroAssembler::cmp32Load32(Condition cond, Register lhs, Imm32 rhs,
+ const Address& src, Register dest) {
+ Label skip;
+ branch32(Assembler::InvertCondition(cond), lhs, rhs, &skip);
+ load32(src, dest);
+ bind(&skip);
+}
+
+void MacroAssembler::cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs,
+ const Address& src, Register dest) {
+ Label skip;
+ branch32(Assembler::InvertCondition(cond), lhs, rhs, &skip);
+ loadPtr(src, dest);
+ bind(&skip);
+}
+
+void MacroAssembler::test32LoadPtr(Condition cond, const Address& addr,
+ Imm32 mask, const Address& src,
+ Register dest) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreStringMitigations);
+ Label skip;
+ branchTest32(Assembler::InvertCondition(cond), addr, mask, &skip);
+ loadPtr(src, dest);
+ bind(&skip);
+}
+
+void MacroAssembler::test32MovePtr(Condition cond, const Address& addr,
+ Imm32 mask, Register src, Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::spectreMovePtr(Condition cond, Register src,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::spectreZeroRegister(Condition cond, Register scratch,
+ Register dest) {
+ MOZ_CRASH();
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branch32(Assembler::BelowOrEqual, length, index, failure);
+}
+
+void MacroAssembler::spectreBoundsCheck32(Register index, const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branch32(Assembler::BelowOrEqual, length, index, failure);
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index, Register length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branchPtr(Assembler::BelowOrEqual, length, index, failure);
+}
+
+void MacroAssembler::spectreBoundsCheckPtr(Register index,
+ const Address& length,
+ Register maybeScratch,
+ Label* failure) {
+ MOZ_RELEASE_ASSERT(!JitOptions.spectreIndexMasking);
+ branchPtr(Assembler::BelowOrEqual, length, index, failure);
+}
+
+// ========================================================================
+// Memory access primitives.
+
+FaultingCodeOffset MacroAssembler::storeUncanonicalizedFloat32(
+ FloatRegister src, const Address& addr) {
+ return ma_fst_s(src, addr);
+}
+FaultingCodeOffset MacroAssembler::storeUncanonicalizedFloat32(
+ FloatRegister src, const BaseIndex& addr) {
+ return ma_fst_s(src, addr);
+}
+
+FaultingCodeOffset MacroAssembler::storeUncanonicalizedDouble(
+ FloatRegister src, const Address& addr) {
+ return ma_fst_d(src, addr);
+}
+FaultingCodeOffset MacroAssembler::storeUncanonicalizedDouble(
+ FloatRegister src, const BaseIndex& addr) {
+ return ma_fst_d(src, addr);
+}
+
+void MacroAssembler::memoryBarrier(MemoryBarrierBits barrier) {
+ if (barrier) {
+ as_dbar(0);
+ }
+}
+
+// ===============================================================
+// Clamping functions.
+
+void MacroAssembler::clampIntToUint8(Register reg) {
+ ScratchRegisterScope scratch(*this);
+ // If reg is < 0, then we want to clamp to 0.
+ as_slti(scratch, reg, 0);
+ as_masknez(reg, reg, scratch);
+
+ // If reg is >= 255, then we want to clamp to 255.
+ as_addi_d(reg, reg, -255);
+ as_slt(scratch, reg, zero);
+ as_maskeqz(reg, reg, scratch);
+ as_addi_d(reg, reg, 255);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const ValueOperand& src, Register dest,
+ JSValueType type, Label* fail) {
+ MOZ_ASSERT(type == JSVAL_TYPE_OBJECT || type == JSVAL_TYPE_STRING ||
+ type == JSVAL_TYPE_SYMBOL || type == JSVAL_TYPE_BIGINT);
+ // dest := src XOR mask
+ // scratch := dest >> JSVAL_TAG_SHIFT
+ // fail if scratch != 0
+ //
+ // Note: src and dest can be the same register
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(src.valueReg() != scratch);
+ mov(ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)), scratch);
+ as_xor(dest, src.valueReg(), scratch);
+ as_srli_d(scratch, dest, JSVAL_TAG_SHIFT);
+ ma_b(scratch, Imm32(0), fail, Assembler::NotEqual);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const Address& src, Register dest,
+ JSValueType type, Label* fail) {
+ loadValue(src, ValueOperand(dest));
+ fallibleUnboxPtr(ValueOperand(dest), dest, type, fail);
+}
+
+void MacroAssembler::fallibleUnboxPtr(const BaseIndex& src, Register dest,
+ JSValueType type, Label* fail) {
+ loadValue(src, ValueOperand(dest));
+ fallibleUnboxPtr(ValueOperand(dest), dest, type, fail);
+}
+
+//}}} check_macroassembler_style
+// ===============================================================
+
+// The specializations for cmpPtrSet are outside the braces because
+// check_macroassembler_style can't yet deal with specializations.
+
+template <>
+inline void MacroAssembler::cmpPtrSet(Assembler::Condition cond, Address lhs,
+ ImmPtr rhs, Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(lhs, scratch2);
+ cmpPtrSet(cond, Register(scratch2), rhs, dest);
+}
+
+template <>
+inline void MacroAssembler::cmpPtrSet(Assembler::Condition cond, Register lhs,
+ Address rhs, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs != scratch);
+ loadPtr(rhs, scratch);
+ cmpPtrSet(cond, lhs, Register(scratch), dest);
+}
+
+template <>
+inline void MacroAssembler::cmpPtrSet(Assembler::Condition cond, Address lhs,
+ Register rhs, Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(rhs != scratch2);
+ loadPtr(lhs, scratch2);
+ cmpPtrSet(cond, Register(scratch2), rhs, dest);
+}
+
+template <>
+inline void MacroAssembler::cmp32Set(Assembler::Condition cond, Register lhs,
+ Address rhs, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs != scratch);
+ load32(rhs, scratch);
+ cmp32Set(cond, lhs, Register(scratch), dest);
+}
+
+template <>
+inline void MacroAssembler::cmp32Set(Assembler::Condition cond, Address lhs,
+ Register rhs, Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(rhs != scratch2);
+ load32(lhs, scratch2);
+ cmp32Set(cond, Register(scratch2), rhs, dest);
+}
+
+void MacroAssemblerLOONG64Compat::incrementInt32Value(const Address& addr) {
+ asMasm().add32(Imm32(1), addr);
+}
+
+void MacroAssemblerLOONG64Compat::retn(Imm32 n) {
+ // pc <- [sp]; sp += n
+ loadPtr(Address(StackPointer, 0), ra);
+ asMasm().addPtr(n, StackPointer);
+ as_jirl(zero, ra, BOffImm16(0));
+}
+
+// If source is a double, load into dest.
+// If source is int32, convert to double and store in dest.
+// Else, branch to failure.
+void MacroAssemblerLOONG64Compat::ensureDouble(const ValueOperand& source,
+ FloatRegister dest,
+ Label* failure) {
+ Label isDouble, done;
+ {
+ ScratchTagScope tag(asMasm(), source);
+ splitTagForTest(source, tag);
+ asMasm().branchTestDouble(Assembler::Equal, tag, &isDouble);
+ asMasm().branchTestInt32(Assembler::NotEqual, tag, failure);
+ }
+
+ convertInt32ToDouble(source.valueReg(), dest);
+ jump(&done);
+
+ bind(&isDouble);
+ unboxDouble(source, dest);
+
+ bind(&done);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_MacroAssembler_loong64_inl_h */
diff --git a/js/src/jit/loong64/MacroAssembler-loong64.cpp b/js/src/jit/loong64/MacroAssembler-loong64.cpp
new file mode 100644
index 0000000000..528c120058
--- /dev/null
+++ b/js/src/jit/loong64/MacroAssembler-loong64.cpp
@@ -0,0 +1,5555 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/loong64/MacroAssembler-loong64.h"
+
+#include "jsmath.h"
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/loong64/SharedICRegisters-loong64.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MoveEmitter.h"
+#include "util/Memory.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+namespace js {
+namespace jit {
+
+void MacroAssembler::clampDoubleToUint8(FloatRegister input, Register output) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+ as_ftintrne_l_d(fpscratch, input);
+ as_movfr2gr_d(output, fpscratch);
+ // if (res < 0); res = 0;
+ as_slt(scratch, output, zero);
+ as_masknez(output, output, scratch);
+ // if res > 255; res = 255;
+ as_sltui(scratch, output, 255);
+ as_addi_d(output, output, -255);
+ as_maskeqz(output, output, scratch);
+ as_addi_d(output, output, 255);
+}
+
+bool MacroAssemblerLOONG64Compat::buildOOLFakeExitFrame(void* fakeReturnAddr) {
+ asMasm().PushFrameDescriptor(FrameType::IonJS); // descriptor_
+ asMasm().Push(ImmPtr(fakeReturnAddr));
+ asMasm().Push(FramePointer);
+ return true;
+}
+
+void MacroAssemblerLOONG64Compat::convertUInt32ToDouble(Register src,
+ FloatRegister dest) {
+ ScratchRegisterScope scratch(asMasm());
+ as_bstrpick_d(scratch, src, 31, 0);
+ asMasm().convertInt64ToDouble(Register64(scratch), dest);
+}
+
+void MacroAssemblerLOONG64Compat::convertUInt64ToDouble(Register src,
+ FloatRegister dest) {
+ Label positive, done;
+ ma_b(src, src, &positive, NotSigned, ShortJump);
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ MOZ_ASSERT(src != scratch);
+ MOZ_ASSERT(src != scratch2);
+
+ ma_and(scratch, src, Imm32(1));
+ as_srli_d(scratch2, src, 1);
+ as_or(scratch, scratch, scratch2);
+ as_movgr2fr_d(dest, scratch);
+ as_ffint_d_l(dest, dest);
+ asMasm().addDouble(dest, dest);
+ ma_b(&done, ShortJump);
+
+ bind(&positive);
+ as_movgr2fr_d(dest, src);
+ as_ffint_d_l(dest, dest);
+
+ bind(&done);
+}
+
+void MacroAssemblerLOONG64Compat::convertUInt32ToFloat32(Register src,
+ FloatRegister dest) {
+ ScratchRegisterScope scratch(asMasm());
+ as_bstrpick_d(scratch, src, 31, 0);
+ asMasm().convertInt64ToFloat32(Register64(scratch), dest);
+}
+
+void MacroAssemblerLOONG64Compat::convertDoubleToFloat32(FloatRegister src,
+ FloatRegister dest) {
+ as_fcvt_s_d(dest, src);
+}
+
+const int CauseBitPos = int(Assembler::CauseI);
+const int CauseBitCount = 1 + int(Assembler::CauseV) - int(Assembler::CauseI);
+const int CauseIOrVMask = ((1 << int(Assembler::CauseI)) |
+ (1 << int(Assembler::CauseV))) >>
+ int(Assembler::CauseI);
+
+// Checks whether a double is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void MacroAssemblerLOONG64Compat::convertDoubleToInt32(FloatRegister src,
+ Register dest,
+ Label* fail,
+ bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ moveFromDouble(src, dest);
+ as_rotri_d(dest, dest, 63);
+ ma_b(dest, Imm32(1), fail, Assembler::Equal);
+ }
+
+ ScratchRegisterScope scratch(asMasm());
+ ScratchFloat32Scope fpscratch(asMasm());
+ // Truncate double to int ; if result is inexact or invalid fail.
+ as_ftintrz_w_d(fpscratch, src);
+ as_movfcsr2gr(scratch);
+ moveFromFloat32(fpscratch, dest);
+ as_bstrpick_d(scratch, scratch, CauseBitPos + CauseBitCount - 1, CauseBitPos);
+ as_andi(scratch, scratch,
+ CauseIOrVMask); // masking for Inexact and Invalid flag.
+ ma_b(scratch, zero, fail, Assembler::NotEqual);
+}
+
+void MacroAssemblerLOONG64Compat::convertDoubleToPtr(FloatRegister src,
+ Register dest, Label* fail,
+ bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ moveFromDouble(src, dest);
+ as_rotri_d(dest, dest, 63);
+ ma_b(dest, Imm32(1), fail, Assembler::Equal);
+ }
+
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+ // Truncate double to int64 ; if result is inexact or invalid fail.
+ as_ftintrz_l_d(fpscratch, src);
+ as_movfcsr2gr(scratch);
+ moveFromDouble(fpscratch, dest);
+ as_bstrpick_d(scratch, scratch, CauseBitPos + CauseBitCount - 1, CauseBitPos);
+ as_andi(scratch, scratch,
+ CauseIOrVMask); // masking for Inexact and Invalid flag.
+ ma_b(scratch, zero, fail, Assembler::NotEqual);
+}
+
+// Checks whether a float32 is representable as a 32-bit integer. If so, the
+// integer is written to the output register. Otherwise, a bailout is taken to
+// the given snapshot. This function overwrites the scratch float register.
+void MacroAssemblerLOONG64Compat::convertFloat32ToInt32(
+ FloatRegister src, Register dest, Label* fail, bool negativeZeroCheck) {
+ if (negativeZeroCheck) {
+ moveFromFloat32(src, dest);
+ ma_b(dest, Imm32(INT32_MIN), fail, Assembler::Equal);
+ }
+
+ ScratchRegisterScope scratch(asMasm());
+ ScratchFloat32Scope fpscratch(asMasm());
+ as_ftintrz_w_s(fpscratch, src);
+ as_movfcsr2gr(scratch);
+ moveFromFloat32(fpscratch, dest);
+ MOZ_ASSERT(CauseBitPos + CauseBitCount < 33);
+ MOZ_ASSERT(CauseBitPos < 32);
+ as_bstrpick_w(scratch, scratch, CauseBitPos + CauseBitCount - 1, CauseBitPos);
+ as_andi(scratch, scratch, CauseIOrVMask);
+ ma_b(scratch, zero, fail, Assembler::NotEqual);
+}
+
+void MacroAssemblerLOONG64Compat::convertFloat32ToDouble(FloatRegister src,
+ FloatRegister dest) {
+ as_fcvt_d_s(dest, src);
+}
+
+void MacroAssemblerLOONG64Compat::convertInt32ToFloat32(Register src,
+ FloatRegister dest) {
+ as_movgr2fr_w(dest, src);
+ as_ffint_s_w(dest, dest);
+}
+
+void MacroAssemblerLOONG64Compat::convertInt32ToFloat32(const Address& src,
+ FloatRegister dest) {
+ ma_fld_s(dest, src);
+ as_ffint_s_w(dest, dest);
+}
+
+void MacroAssemblerLOONG64Compat::movq(Register rj, Register rd) {
+ as_or(rd, rj, zero);
+}
+
+void MacroAssemblerLOONG64::ma_li(Register dest, CodeLabel* label) {
+ BufferOffset bo = m_buffer.nextOffset();
+ ma_liPatchable(dest, ImmWord(/* placeholder */ 0));
+ label->patchAt()->bind(bo.getOffset());
+ label->setLinkMode(CodeLabel::MoveImmediate);
+}
+
+void MacroAssemblerLOONG64::ma_li(Register dest, ImmWord imm) {
+ int64_t value = imm.value;
+
+ if (-1 == (value >> 11) || 0 == (value >> 11)) {
+ as_addi_w(dest, zero, value);
+ return;
+ }
+
+ if (0 == (value >> 12)) {
+ as_ori(dest, zero, value);
+ return;
+ }
+
+ if (-1 == (value >> 31) || 0 == (value >> 31)) {
+ as_lu12i_w(dest, (value >> 12) & 0xfffff);
+ } else if (0 == (value >> 32)) {
+ as_lu12i_w(dest, (value >> 12) & 0xfffff);
+ as_bstrins_d(dest, zero, 63, 32);
+ } else if (-1 == (value >> 51) || 0 == (value >> 51)) {
+ if (is_uintN((value >> 12) & 0xfffff, 20)) {
+ as_lu12i_w(dest, (value >> 12) & 0xfffff);
+ }
+ as_lu32i_d(dest, (value >> 32) & 0xfffff);
+ } else if (0 == (value >> 52)) {
+ if (is_uintN((value >> 12) & 0xfffff, 20)) {
+ as_lu12i_w(dest, (value >> 12) & 0xfffff);
+ }
+ as_lu32i_d(dest, (value >> 32) & 0xfffff);
+ as_bstrins_d(dest, zero, 63, 52);
+ } else {
+ if (is_uintN((value >> 12) & 0xfffff, 20)) {
+ as_lu12i_w(dest, (value >> 12) & 0xfffff);
+ }
+ if (is_uintN((value >> 32) & 0xfffff, 20)) {
+ as_lu32i_d(dest, (value >> 32) & 0xfffff);
+ }
+ as_lu52i_d(dest, dest, (value >> 52) & 0xfff);
+ }
+
+ if (is_uintN(value & 0xfff, 12)) {
+ as_ori(dest, dest, value & 0xfff);
+ }
+}
+
+// This method generates lu32i_d, lu12i_w and ori instruction block that can be
+// modified by UpdateLoad64Value, either during compilation (eg.
+// Assembler::bind), or during execution (eg. jit::PatchJump).
+void MacroAssemblerLOONG64::ma_liPatchable(Register dest, ImmPtr imm) {
+ return ma_liPatchable(dest, ImmWord(uintptr_t(imm.value)));
+}
+
+void MacroAssemblerLOONG64::ma_liPatchable(Register dest, ImmWord imm,
+ LiFlags flags) {
+ // hi12, hi20, low20, low12
+ if (Li64 == flags) { // Li64: Imm data
+ m_buffer.ensureSpace(4 * sizeof(uint32_t));
+ as_lu12i_w(dest, imm.value >> 12 & 0xfffff); // low20
+ as_ori(dest, dest, imm.value & 0xfff); // low12
+ as_lu32i_d(dest, imm.value >> 32 & 0xfffff); // hi20
+ as_lu52i_d(dest, dest, imm.value >> 52 & 0xfff); // hi12
+ } else { // Li48 address
+ m_buffer.ensureSpace(3 * sizeof(uint32_t));
+ as_lu12i_w(dest, imm.value >> 12 & 0xfffff); // low20
+ as_ori(dest, dest, imm.value & 0xfff); // low12
+ as_lu32i_d(dest, imm.value >> 32 & 0xfffff); // hi20
+ }
+}
+
+// Memory access ops.
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_ld_b(Register dest,
+ Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+ FaultingCodeOffset fco;
+
+ if (is_intN(offset, 12)) {
+ fco = FaultingCodeOffset(currentOffset());
+ as_ld_b(dest, base, offset);
+ } else if (base != dest) {
+ ma_li(dest, Imm32(offset));
+ fco = FaultingCodeOffset(currentOffset());
+ as_ldx_b(dest, base, dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ fco = FaultingCodeOffset(currentOffset());
+ as_ldx_b(dest, base, scratch);
+ }
+ return fco;
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_ld_bu(Register dest,
+ Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+ FaultingCodeOffset fco;
+
+ if (is_intN(offset, 12)) {
+ fco = FaultingCodeOffset(currentOffset());
+ as_ld_bu(dest, base, offset);
+ } else if (base != dest) {
+ ma_li(dest, Imm32(offset));
+ fco = FaultingCodeOffset(currentOffset());
+ as_ldx_bu(dest, base, dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ fco = FaultingCodeOffset(currentOffset());
+ as_ldx_bu(dest, base, scratch);
+ }
+ return fco;
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_ld_h(Register dest,
+ Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+ FaultingCodeOffset fco;
+
+ if (is_intN(offset, 12)) {
+ fco = FaultingCodeOffset(currentOffset());
+ as_ld_h(dest, base, offset);
+ } else if (base != dest) {
+ ma_li(dest, Imm32(offset));
+ fco = FaultingCodeOffset(currentOffset());
+ as_ldx_h(dest, base, dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ fco = FaultingCodeOffset(currentOffset());
+ as_ldx_h(dest, base, scratch);
+ }
+ return fco;
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_ld_hu(Register dest,
+ Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+ FaultingCodeOffset fco;
+
+ if (is_intN(offset, 12)) {
+ fco = FaultingCodeOffset(currentOffset());
+ as_ld_hu(dest, base, offset);
+ } else if (base != dest) {
+ ma_li(dest, Imm32(offset));
+ fco = FaultingCodeOffset(currentOffset());
+ as_ldx_hu(dest, base, dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ fco = FaultingCodeOffset(currentOffset());
+ as_ldx_hu(dest, base, scratch);
+ }
+ return fco;
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_ld_w(Register dest,
+ Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+ FaultingCodeOffset fco;
+
+ if (is_intN(offset, 12)) {
+ fco = FaultingCodeOffset(currentOffset());
+ as_ld_w(dest, base, offset);
+ } else if (base != dest) {
+ ma_li(dest, Imm32(offset));
+ fco = FaultingCodeOffset(currentOffset());
+ as_ldx_w(dest, base, dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ fco = FaultingCodeOffset(currentOffset());
+ as_ldx_w(dest, base, scratch);
+ }
+ return fco;
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_ld_wu(Register dest,
+ Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+ FaultingCodeOffset fco;
+
+ if (is_intN(offset, 12)) {
+ fco = FaultingCodeOffset(currentOffset());
+ as_ld_wu(dest, base, offset);
+ } else if (base != dest) {
+ ma_li(dest, Imm32(offset));
+ fco = FaultingCodeOffset(currentOffset());
+ as_ldx_wu(dest, base, dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ fco = FaultingCodeOffset(currentOffset());
+ as_ldx_wu(dest, base, scratch);
+ }
+ return fco;
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_ld_d(Register dest,
+ Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+ FaultingCodeOffset fco;
+
+ if (is_intN(offset, 12)) {
+ fco = FaultingCodeOffset(currentOffset());
+ as_ld_d(dest, base, offset);
+ } else if (base != dest) {
+ ma_li(dest, Imm32(offset));
+ fco = FaultingCodeOffset(currentOffset());
+ as_ldx_d(dest, base, dest);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ fco = FaultingCodeOffset(currentOffset());
+ as_ldx_d(dest, base, scratch);
+ }
+ return fco;
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_st_b(Register src,
+ Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+ FaultingCodeOffset fco;
+
+ if (is_intN(offset, 12)) {
+ fco = FaultingCodeOffset(currentOffset());
+ as_st_b(src, base, offset);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(src != scratch);
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ fco = FaultingCodeOffset(currentOffset());
+ as_stx_b(src, base, scratch);
+ }
+ return fco;
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_st_h(Register src,
+ Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+ FaultingCodeOffset fco;
+
+ if (is_intN(offset, 12)) {
+ fco = FaultingCodeOffset(currentOffset());
+ as_st_h(src, base, offset);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(src != scratch);
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ fco = FaultingCodeOffset(currentOffset());
+ as_stx_h(src, base, scratch);
+ }
+ return fco;
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_st_w(Register src,
+ Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+ FaultingCodeOffset fco;
+
+ if (is_intN(offset, 12)) {
+ fco = FaultingCodeOffset(currentOffset());
+ as_st_w(src, base, offset);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(src != scratch);
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ fco = FaultingCodeOffset(currentOffset());
+ as_stx_w(src, base, scratch);
+ }
+ return fco;
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_st_d(Register src,
+ Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+ FaultingCodeOffset fco;
+
+ if (is_intN(offset, 12)) {
+ fco = FaultingCodeOffset(currentOffset());
+ as_st_d(src, base, offset);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(src != scratch);
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ fco = FaultingCodeOffset(currentOffset());
+ as_stx_d(src, base, scratch);
+ }
+ return fco;
+}
+
+// Arithmetic-based ops.
+
+// Add.
+void MacroAssemblerLOONG64::ma_add_d(Register rd, Register rj, Imm32 imm) {
+ if (is_intN(imm.value, 12)) {
+ as_addi_d(rd, rj, imm.value);
+ } else if (rd != rj) {
+ ma_li(rd, imm);
+ as_add_d(rd, rj, rd);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, imm);
+ as_add_d(rd, rj, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_add32TestOverflow(Register rd, Register rj,
+ Register rk, Label* overflow) {
+ ScratchRegisterScope scratch(asMasm());
+ as_add_d(scratch, rj, rk);
+ as_add_w(rd, rj, rk);
+ ma_b(rd, Register(scratch), overflow, Assembler::NotEqual);
+}
+
+void MacroAssemblerLOONG64::ma_add32TestOverflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ // Check for signed range because of as_addi_d
+ if (is_intN(imm.value, 12)) {
+ ScratchRegisterScope scratch(asMasm());
+ as_addi_d(scratch, rj, imm.value);
+ as_addi_w(rd, rj, imm.value);
+ ma_b(rd, scratch, overflow, Assembler::NotEqual);
+ } else {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_li(scratch2, imm);
+ ma_add32TestOverflow(rd, rj, scratch2, overflow);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_addPtrTestOverflow(Register rd, Register rj,
+ Register rk,
+ Label* overflow) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rd != scratch);
+
+ if (rj == rk) {
+ if (rj == rd) {
+ as_or(scratch, rj, zero);
+ rj = scratch;
+ }
+
+ as_add_d(rd, rj, rj);
+ as_xor(scratch, rj, rd);
+ ma_b(scratch, zero, overflow, Assembler::LessThan);
+ } else {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ MOZ_ASSERT(rd != scratch2);
+
+ if (rj == rd) {
+ as_or(scratch2, rj, zero);
+ rj = scratch2;
+ }
+
+ as_add_d(rd, rj, rk);
+ as_slti(scratch, rj, 0);
+ as_slt(scratch2, rd, rj);
+ ma_b(scratch, Register(scratch2), overflow, Assembler::NotEqual);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_addPtrTestOverflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (imm.value == 0) {
+ as_ori(rd, rj, 0);
+ return;
+ }
+
+ if (rj == rd) {
+ as_ori(scratch2, rj, 0);
+ rj = scratch2;
+ }
+
+ ma_add_d(rd, rj, imm);
+
+ if (imm.value > 0) {
+ ma_b(rd, rj, overflow, Assembler::LessThan);
+ } else {
+ MOZ_ASSERT(imm.value < 0);
+ ma_b(rd, rj, overflow, Assembler::GreaterThan);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_addPtrTestOverflow(Register rd, Register rj,
+ ImmWord imm,
+ Label* overflow) {
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ if (imm.value == 0) {
+ as_ori(rd, rj, 0);
+ return;
+ }
+
+ if (rj == rd) {
+ MOZ_ASSERT(rj != scratch2);
+ as_ori(scratch2, rj, 0);
+ rj = scratch2;
+ }
+
+ ma_li(rd, imm);
+ as_add_d(rd, rj, rd);
+
+ if (imm.value > 0) {
+ ma_b(rd, rj, overflow, Assembler::LessThan);
+ } else {
+ MOZ_ASSERT(imm.value < 0);
+ ma_b(rd, rj, overflow, Assembler::GreaterThan);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_addPtrTestCarry(Condition cond, Register rd,
+ Register rj, Register rk,
+ Label* label) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rd != rk);
+ MOZ_ASSERT(rd != scratch);
+ as_add_d(rd, rj, rk);
+ as_sltu(scratch, rd, rk);
+ ma_b(scratch, Register(scratch), label,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+}
+
+void MacroAssemblerLOONG64::ma_addPtrTestCarry(Condition cond, Register rd,
+ Register rj, Imm32 imm,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ // Check for signed range because of as_addi_d
+ if (is_intN(imm.value, 12)) {
+ as_addi_d(rd, rj, imm.value);
+ as_sltui(scratch2, rd, imm.value);
+ ma_b(scratch2, scratch2, label,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+ } else {
+ ma_li(scratch2, imm);
+ ma_addPtrTestCarry(cond, rd, rj, scratch2, label);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_addPtrTestCarry(Condition cond, Register rd,
+ Register rj, ImmWord imm,
+ Label* label) {
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ // Check for signed range because of as_addi_d
+ if (is_intN(imm.value, 12)) {
+ as_addi_d(rd, rj, imm.value);
+ as_sltui(scratch2, rd, imm.value);
+ ma_b(scratch2, scratch2, label,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+ } else {
+ ma_li(scratch2, imm);
+ ma_addPtrTestCarry(cond, rd, rj, scratch2, label);
+ }
+}
+
+// Subtract.
+void MacroAssemblerLOONG64::ma_sub_d(Register rd, Register rj, Imm32 imm) {
+ if (is_intN(-imm.value, 12)) {
+ as_addi_d(rd, rj, -imm.value);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ as_sub_d(rd, rj, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_sub32TestOverflow(Register rd, Register rj,
+ Register rk, Label* overflow) {
+ ScratchRegisterScope scratch(asMasm());
+ as_sub_d(scratch, rj, rk);
+ as_sub_w(rd, rj, rk);
+ ma_b(rd, Register(scratch), overflow, Assembler::NotEqual);
+}
+
+void MacroAssemblerLOONG64::ma_subPtrTestOverflow(Register rd, Register rj,
+ Register rk,
+ Label* overflow) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT_IF(rj == rd, rj != rk);
+ MOZ_ASSERT(rj != scratch2);
+ MOZ_ASSERT(rk != scratch2);
+ MOZ_ASSERT(rd != scratch2);
+
+ Register rj_copy = rj;
+
+ if (rj == rd) {
+ as_or(scratch2, rj, zero);
+ rj_copy = scratch2;
+ }
+
+ {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rd != scratch);
+
+ as_sub_d(rd, rj, rk);
+ // If the sign of rj and rk are the same, no overflow
+ as_xor(scratch, rj_copy, rk);
+ // Check if the sign of rd and rj are the same
+ as_xor(scratch2, rd, rj_copy);
+ as_and(scratch2, scratch2, scratch);
+ }
+
+ ma_b(scratch2, zero, overflow, Assembler::LessThan);
+}
+
+void MacroAssemblerLOONG64::ma_subPtrTestOverflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ // TODO(loong64): Check subPtrTestOverflow
+ MOZ_ASSERT(imm.value != INT32_MIN);
+ ma_addPtrTestOverflow(rd, rj, Imm32(-imm.value), overflow);
+}
+
+void MacroAssemblerLOONG64::ma_mul_d(Register rd, Register rj, Imm32 imm) {
+ // li handles the relocation.
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, imm);
+ as_mul_d(rd, rj, scratch);
+}
+
+void MacroAssemblerLOONG64::ma_mulh_d(Register rd, Register rj, Imm32 imm) {
+ // li handles the relocation.
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, imm);
+ as_mulh_d(rd, rj, scratch);
+}
+
+void MacroAssemblerLOONG64::ma_mulPtrTestOverflow(Register rd, Register rj,
+ Register rk,
+ Label* overflow) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(rd != scratch);
+
+ if (rd == rj) {
+ as_or(scratch, rj, zero);
+ rj = scratch;
+ rk = (rd == rk) ? rj : rk;
+ } else if (rd == rk) {
+ as_or(scratch, rk, zero);
+ rk = scratch;
+ }
+
+ as_mul_d(rd, rj, rk);
+ as_mulh_d(scratch, rj, rk);
+ as_srai_d(scratch2, rd, 63);
+ ma_b(scratch, Register(scratch2), overflow, Assembler::NotEqual);
+}
+
+// Memory.
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_load(
+ Register dest, Address address, LoadStoreSize size,
+ LoadStoreExtension extension) {
+ int32_t encodedOffset;
+ Register base;
+ FaultingCodeOffset fco;
+
+ // TODO: use as_ldx_b/h/w/d, could decrease as_add_d instr.
+ switch (size) {
+ case SizeByte:
+ case SizeHalfWord:
+ if (!is_intN(address.offset, 12)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_add_d(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+
+ fco = FaultingCodeOffset(currentOffset());
+ if (size == SizeByte) {
+ if (ZeroExtend == extension) {
+ as_ld_bu(dest, base, encodedOffset);
+ } else {
+ as_ld_b(dest, base, encodedOffset);
+ }
+ } else {
+ if (ZeroExtend == extension) {
+ as_ld_hu(dest, base, encodedOffset);
+ } else {
+ as_ld_h(dest, base, encodedOffset);
+ }
+ }
+ break;
+ case SizeWord:
+ case SizeDouble:
+ if ((address.offset & 0x3) == 0 &&
+ (size == SizeDouble ||
+ (size == SizeWord && SignExtend == extension))) {
+ if (!Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_add_d(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+
+ fco = FaultingCodeOffset(currentOffset());
+ if (size == SizeWord) {
+ as_ldptr_w(dest, base, encodedOffset);
+ } else {
+ as_ldptr_d(dest, base, encodedOffset);
+ }
+ } else {
+ if (!is_intN(address.offset, 12)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_add_d(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+
+ fco = FaultingCodeOffset(currentOffset());
+ if (size == SizeWord) {
+ if (ZeroExtend == extension) {
+ as_ld_wu(dest, base, encodedOffset);
+ } else {
+ as_ld_w(dest, base, encodedOffset);
+ }
+ } else {
+ as_ld_d(dest, base, encodedOffset);
+ }
+ }
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_load");
+ }
+ return fco;
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_store(
+ Register data, Address address, LoadStoreSize size,
+ LoadStoreExtension extension) {
+ int32_t encodedOffset;
+ Register base;
+ FaultingCodeOffset fco;
+
+ // TODO: use as_stx_b/h/w/d, could decrease as_add_d instr.
+ switch (size) {
+ case SizeByte:
+ case SizeHalfWord:
+ if (!is_intN(address.offset, 12)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_add_d(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+
+ fco = FaultingCodeOffset(currentOffset());
+ if (size == SizeByte) {
+ as_st_b(data, base, encodedOffset);
+ } else {
+ as_st_h(data, base, encodedOffset);
+ }
+ break;
+ case SizeWord:
+ case SizeDouble:
+ if ((address.offset & 0x3) == 0) {
+ if (!Imm16::IsInSignedRange(address.offset)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_add_d(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+
+ fco = FaultingCodeOffset(currentOffset());
+ if (size == SizeWord) {
+ as_stptr_w(data, base, encodedOffset);
+ } else {
+ as_stptr_d(data, base, encodedOffset);
+ }
+ } else {
+ if (!is_intN(address.offset, 12)) {
+ ma_li(ScratchRegister, Imm32(address.offset));
+ as_add_d(ScratchRegister, address.base, ScratchRegister);
+ base = ScratchRegister;
+ encodedOffset = 0;
+ } else {
+ encodedOffset = address.offset;
+ base = address.base;
+ }
+
+ fco = FaultingCodeOffset(currentOffset());
+ if (size == SizeWord) {
+ as_st_w(data, base, encodedOffset);
+ } else {
+ as_st_d(data, base, encodedOffset);
+ }
+ }
+ break;
+ default:
+ MOZ_CRASH("Invalid argument for ma_store");
+ }
+ return fco;
+}
+
+void MacroAssemblerLOONG64Compat::computeScaledAddress(const BaseIndex& address,
+ Register dest) {
+ Register base = address.base;
+ Register index = address.index;
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+
+ if (shift) {
+ MOZ_ASSERT(shift <= 4);
+ as_alsl_d(dest, index, base, shift - 1);
+ } else {
+ as_add_d(dest, base, index);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_pop(Register r) {
+ MOZ_ASSERT(r != StackPointer);
+ as_ld_d(r, StackPointer, 0);
+ as_addi_d(StackPointer, StackPointer, sizeof(intptr_t));
+}
+
+void MacroAssemblerLOONG64::ma_push(Register r) {
+ if (r == StackPointer) {
+ ScratchRegisterScope scratch(asMasm());
+ as_or(scratch, r, zero);
+ as_addi_d(StackPointer, StackPointer, (int32_t) - sizeof(intptr_t));
+ as_st_d(scratch, StackPointer, 0);
+ } else {
+ as_addi_d(StackPointer, StackPointer, (int32_t) - sizeof(intptr_t));
+ as_st_d(r, StackPointer, 0);
+ }
+}
+
+// Branches when done from within loongarch-specific code.
+void MacroAssemblerLOONG64::ma_b(Register lhs, ImmWord imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ if (imm.value <= INT32_MAX) {
+ ma_b(lhs, Imm32(uint32_t(imm.value)), label, c, jumpKind);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs != scratch);
+ ma_li(scratch, imm);
+ ma_b(lhs, Register(scratch), label, c, jumpKind, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_b(Register lhs, Address addr, Label* label,
+ Condition c, JumpKind jumpKind) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs != scratch);
+ ma_ld_d(scratch, addr);
+ ma_b(lhs, Register(scratch), label, c, jumpKind, scratch);
+}
+
+void MacroAssemblerLOONG64::ma_b(Address addr, Imm32 imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ld_d(scratch2, addr);
+ ma_b(Register(scratch2), imm, label, c, jumpKind);
+}
+
+void MacroAssemblerLOONG64::ma_b(Address addr, ImmGCPtr imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ld_d(scratch2, addr);
+ ma_b(Register(scratch2), imm, label, c, jumpKind);
+}
+
+void MacroAssemblerLOONG64::ma_bl(Label* label) {
+ spew("branch .Llabel %p\n", label);
+ if (label->bound()) {
+ // Generate the long jump for calls because return address has to be
+ // the address after the reserved block.
+ addLongJump(nextOffset(), BufferOffset(label->offset()));
+ ScratchRegisterScope scratch(asMasm());
+ ma_liPatchable(scratch, ImmWord(LabelBase::INVALID_OFFSET));
+ as_jirl(ra, scratch, BOffImm16(0));
+ return;
+ }
+
+ // Second word holds a pointer to the next branch in label's chain.
+ uint32_t nextInChain =
+ label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ // Make the whole branch continous in the buffer. The '5'
+ // instructions are writing at below.
+ m_buffer.ensureSpace(5 * sizeof(uint32_t));
+
+ spew("bal .Llabel %p\n", label);
+ BufferOffset bo = writeInst(getBranchCode(BranchIsCall).encode());
+ writeInst(nextInChain);
+ if (!oom()) {
+ label->use(bo.getOffset());
+ }
+ // Leave space for long jump.
+ as_nop();
+ as_nop();
+ as_nop();
+}
+
+void MacroAssemblerLOONG64::branchWithCode(InstImm code, Label* label,
+ JumpKind jumpKind,
+ Register scratch) {
+ // simply output the pointer of one label as its id,
+ // notice that after one label destructor, the pointer will be reused.
+ spew("branch .Llabel %p", label);
+ MOZ_ASSERT(code.encode() !=
+ InstImm(op_jirl, BOffImm16(0), zero, ra).encode());
+ InstImm inst_beq = InstImm(op_beq, BOffImm16(0), zero, zero);
+
+ if (label->bound()) {
+ int32_t offset = label->offset() - m_buffer.nextOffset().getOffset();
+
+ if (BOffImm16::IsInRange(offset)) {
+ jumpKind = ShortJump;
+ }
+
+ // ShortJump
+ if (jumpKind == ShortJump) {
+ MOZ_ASSERT(BOffImm16::IsInRange(offset));
+
+ if (code.extractBitField(31, 26) == ((uint32_t)op_bcz >> 26)) {
+ code.setImm21(offset);
+ } else {
+ code.setBOffImm16(BOffImm16(offset));
+ }
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code);
+#endif
+ writeInst(code.encode());
+ return;
+ }
+
+ // LongJump
+ if (code.encode() == inst_beq.encode()) {
+ // Handle long jump
+ addLongJump(nextOffset(), BufferOffset(label->offset()));
+ if (scratch == Register::Invalid()) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_liPatchable(scratch, ImmWord(LabelBase::INVALID_OFFSET));
+ as_jirl(zero, scratch, BOffImm16(0)); // jr scratch
+ } else {
+ ma_liPatchable(scratch, ImmWord(LabelBase::INVALID_OFFSET));
+ as_jirl(zero, scratch, BOffImm16(0)); // jr scratch
+ }
+ as_nop();
+ return;
+ }
+
+ // OpenLongJump
+ // Handle long conditional branch, the target offset is based on self,
+ // point to next instruction of nop at below.
+ spew("invert branch .Llabel %p", label);
+ InstImm code_r = invertBranch(code, BOffImm16(5 * sizeof(uint32_t)));
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code_r);
+#endif
+ writeInst(code_r.encode());
+ addLongJump(nextOffset(), BufferOffset(label->offset()));
+ if (scratch == Register::Invalid()) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_liPatchable(scratch, ImmWord(LabelBase::INVALID_OFFSET));
+ as_jirl(zero, scratch, BOffImm16(0)); // jr scratch
+ } else {
+ ma_liPatchable(scratch, ImmWord(LabelBase::INVALID_OFFSET));
+ as_jirl(zero, scratch, BOffImm16(0)); // jr scratch
+ }
+ as_nop();
+ return;
+ }
+
+ // Generate open jump and link it to a label.
+
+ // Second word holds a pointer to the next branch in label's chain.
+ uint32_t nextInChain =
+ label->used() ? label->offset() : LabelBase::INVALID_OFFSET;
+
+ if (jumpKind == ShortJump) {
+ // Make the whole branch continous in the buffer.
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
+
+ // Indicate that this is short jump with offset 4.
+ code.setBOffImm16(BOffImm16(4));
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code);
+#endif
+ BufferOffset bo = writeInst(code.encode());
+ writeInst(nextInChain);
+ if (!oom()) {
+ label->use(bo.getOffset());
+ }
+ return;
+ }
+
+ bool conditional = code.encode() != inst_beq.encode();
+
+ // Make the whole branch continous in the buffer. The '5'
+ // instructions are writing at below (contain conditional nop).
+ m_buffer.ensureSpace(5 * sizeof(uint32_t));
+
+#ifdef JS_JITSPEW
+ decodeBranchInstAndSpew(code);
+#endif
+ BufferOffset bo = writeInst(code.encode()); // invert
+ writeInst(nextInChain);
+ if (!oom()) {
+ label->use(bo.getOffset());
+ }
+ // Leave space for potential long jump.
+ as_nop();
+ as_nop();
+ if (conditional) {
+ as_nop();
+ }
+}
+
+void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Register rj, ImmWord imm,
+ Condition c) {
+ if (imm.value <= INT32_MAX) {
+ ma_cmp_set(rd, rj, Imm32(uint32_t(imm.value)), c);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ ma_cmp_set(rd, rj, scratch, c);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Register rj, ImmPtr imm,
+ Condition c) {
+ ma_cmp_set(rd, rj, ImmWord(uintptr_t(imm.value)), c);
+}
+
+void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Address address, Imm32 imm,
+ Condition c) {
+ // TODO(loong64): 32-bit ma_cmp_set?
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ld_w(scratch2, address);
+ ma_cmp_set(rd, Register(scratch2), imm, c);
+}
+
+void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Address address,
+ ImmWord imm, Condition c) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_ld_d(scratch2, address);
+ ma_cmp_set(rd, Register(scratch2), imm, c);
+}
+
+// fp instructions
+void MacroAssemblerLOONG64::ma_lid(FloatRegister dest, double value) {
+ ImmWord imm(mozilla::BitwiseCast<uint64_t>(value));
+
+ if (imm.value != 0) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ moveToDouble(scratch, dest);
+ } else {
+ moveToDouble(zero, dest);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_mv(FloatRegister src, ValueOperand dest) {
+ as_movfr2gr_d(dest.valueReg(), src);
+}
+
+void MacroAssemblerLOONG64::ma_mv(ValueOperand src, FloatRegister dest) {
+ as_movgr2fr_d(dest, src.valueReg());
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_fld_s(FloatRegister dest,
+ Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+ js::wasm::FaultingCodeOffset fco;
+
+ if (is_intN(offset, 12)) {
+ fco = js::wasm::FaultingCodeOffset(currentOffset());
+ as_fld_s(dest, base, offset);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ fco = js::wasm::FaultingCodeOffset(currentOffset());
+ as_fldx_s(dest, base, scratch);
+ }
+ return fco;
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_fld_d(FloatRegister dest,
+ Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+ js::wasm::FaultingCodeOffset fco;
+
+ if (is_intN(offset, 12)) {
+ fco = js::wasm::FaultingCodeOffset(currentOffset());
+ as_fld_d(dest, base, offset);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ fco = js::wasm::FaultingCodeOffset(currentOffset());
+ as_fldx_d(dest, base, scratch);
+ }
+ return fco;
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_fst_s(FloatRegister src,
+ Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+ js::wasm::FaultingCodeOffset fco;
+
+ if (is_intN(offset, 12)) {
+ fco = js::wasm::FaultingCodeOffset(currentOffset());
+ as_fst_s(src, base, offset);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ fco = js::wasm::FaultingCodeOffset(currentOffset());
+ as_fstx_s(src, base, scratch);
+ }
+ return fco;
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_fst_d(FloatRegister src,
+ Address address) {
+ int32_t offset = address.offset;
+ Register base = address.base;
+ js::wasm::FaultingCodeOffset fco;
+
+ if (is_intN(offset, 12)) {
+ fco = js::wasm::FaultingCodeOffset(currentOffset());
+ as_fst_d(src, base, offset);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(base != scratch);
+ ma_li(scratch, Imm32(offset));
+ fco = js::wasm::FaultingCodeOffset(currentOffset());
+ as_fstx_d(src, base, scratch);
+ }
+ return fco;
+}
+
+void MacroAssemblerLOONG64::ma_pop(FloatRegister f) {
+ as_fld_d(f, StackPointer, 0);
+ as_addi_d(StackPointer, StackPointer, sizeof(double));
+}
+
+void MacroAssemblerLOONG64::ma_push(FloatRegister f) {
+ as_addi_d(StackPointer, StackPointer, (int32_t) - sizeof(double));
+ as_fst_d(f, StackPointer, 0);
+}
+
+void MacroAssemblerLOONG64::ma_li(Register dest, ImmGCPtr ptr) {
+ writeDataRelocation(ptr);
+ asMasm().ma_liPatchable(dest, ImmPtr(ptr.value));
+}
+
+void MacroAssemblerLOONG64::ma_li(Register dest, Imm32 imm) {
+ if (is_intN(imm.value, 12)) {
+ as_addi_w(dest, zero, imm.value);
+ } else if (is_uintN(imm.value, 12)) {
+ as_ori(dest, zero, imm.value & 0xfff);
+ } else {
+ as_lu12i_w(dest, imm.value >> 12 & 0xfffff);
+ if (imm.value & 0xfff) {
+ as_ori(dest, dest, imm.value & 0xfff);
+ }
+ }
+}
+
+// This method generates lu12i_w and ori instruction pair that can be modified
+// by UpdateLuiOriValue, either during compilation (eg. Assembler::bind), or
+// during execution (eg. jit::PatchJump).
+void MacroAssemblerLOONG64::ma_liPatchable(Register dest, Imm32 imm) {
+ m_buffer.ensureSpace(2 * sizeof(uint32_t));
+ as_lu12i_w(dest, imm.value >> 12 & 0xfffff);
+ as_ori(dest, dest, imm.value & 0xfff);
+}
+
+void MacroAssemblerLOONG64::ma_fmovz(FloatFormat fmt, FloatRegister fd,
+ FloatRegister fj, Register rk) {
+ Label done;
+ ma_b(rk, zero, &done, Assembler::NotEqual);
+ if (fmt == SingleFloat) {
+ as_fmov_s(fd, fj);
+ } else {
+ as_fmov_d(fd, fj);
+ }
+ bind(&done);
+}
+
+void MacroAssemblerLOONG64::ma_fmovn(FloatFormat fmt, FloatRegister fd,
+ FloatRegister fj, Register rk) {
+ Label done;
+ ma_b(rk, zero, &done, Assembler::Equal);
+ if (fmt == SingleFloat) {
+ as_fmov_s(fd, fj);
+ } else {
+ as_fmov_d(fd, fj);
+ }
+ bind(&done);
+}
+
+void MacroAssemblerLOONG64::ma_and(Register rd, Register rj, Imm32 imm,
+ bool bit32) {
+ if (is_uintN(imm.value, 12)) {
+ as_andi(rd, rj, imm.value);
+ } else if (rd != rj) {
+ ma_li(rd, imm);
+ as_and(rd, rj, rd);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, imm);
+ as_and(rd, rj, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_or(Register rd, Register rj, Imm32 imm,
+ bool bit32) {
+ if (is_uintN(imm.value, 12)) {
+ as_ori(rd, rj, imm.value);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, imm);
+ as_or(rd, rj, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_xor(Register rd, Register rj, Imm32 imm,
+ bool bit32) {
+ if (is_uintN(imm.value, 12)) {
+ as_xori(rd, rj, imm.value);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, imm);
+ as_xor(rd, rj, scratch);
+ }
+}
+
+// Arithmetic-based ops.
+
+// Add.
+void MacroAssemblerLOONG64::ma_add_w(Register rd, Register rj, Imm32 imm) {
+ if (is_intN(imm.value, 12)) {
+ as_addi_w(rd, rj, imm.value);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, imm);
+ as_add_w(rd, rj, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_add32TestCarry(Condition cond, Register rd,
+ Register rj, Register rk,
+ Label* overflow) {
+ MOZ_ASSERT(cond == Assembler::CarrySet || cond == Assembler::CarryClear);
+ MOZ_ASSERT_IF(rd == rj, rk != rd);
+ ScratchRegisterScope scratch(asMasm());
+ as_add_w(rd, rj, rk);
+ as_sltu(scratch, rd, rd == rj ? rk : rj);
+ ma_b(Register(scratch), Register(scratch), overflow,
+ cond == Assembler::CarrySet ? Assembler::NonZero : Assembler::Zero);
+}
+
+void MacroAssemblerLOONG64::ma_add32TestCarry(Condition cond, Register rd,
+ Register rj, Imm32 imm,
+ Label* overflow) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(rj != scratch2);
+ ma_li(scratch2, imm);
+ ma_add32TestCarry(cond, rd, rj, scratch2, overflow);
+}
+
+// Subtract.
+void MacroAssemblerLOONG64::ma_sub_w(Register rd, Register rj, Imm32 imm) {
+ if (is_intN(-imm.value, 12)) {
+ as_addi_w(rd, rj, -imm.value);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, imm);
+ as_sub_w(rd, rj, scratch);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_sub_w(Register rd, Register rj, Register rk) {
+ as_sub_w(rd, rj, rk);
+}
+
+void MacroAssemblerLOONG64::ma_sub32TestOverflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ if (imm.value != INT32_MIN) {
+ asMasm().ma_add32TestOverflow(rd, rj, Imm32(-imm.value), overflow);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, Imm32(imm.value));
+ asMasm().ma_sub32TestOverflow(rd, rj, scratch, overflow);
+ }
+}
+
+void MacroAssemblerLOONG64::ma_mul(Register rd, Register rj, Imm32 imm) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rj != scratch);
+ ma_li(scratch, imm);
+ as_mul_w(rd, rj, scratch);
+}
+
+void MacroAssemblerLOONG64::ma_mul32TestOverflow(Register rd, Register rj,
+ Register rk, Label* overflow) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ as_mulh_w(scratch, rj, rk);
+ as_mul_w(rd, rj, rk);
+ as_srai_w(scratch2, rd, 31);
+ ma_b(scratch, Register(scratch2), overflow, Assembler::NotEqual);
+}
+
+void MacroAssemblerLOONG64::ma_mul32TestOverflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_li(scratch, imm);
+ as_mulh_w(scratch2, rj, scratch);
+ as_mul_w(rd, rj, scratch);
+ as_srai_w(scratch, rd, 31);
+ ma_b(scratch, Register(scratch2), overflow, Assembler::NotEqual);
+}
+
+void MacroAssemblerLOONG64::ma_div_branch_overflow(Register rd, Register rj,
+ Register rk,
+ Label* overflow) {
+ ScratchRegisterScope scratch(asMasm());
+ as_mod_w(scratch, rj, rk);
+ ma_b(scratch, scratch, overflow, Assembler::NonZero);
+ as_div_w(rd, rj, rk);
+}
+
+void MacroAssemblerLOONG64::ma_div_branch_overflow(Register rd, Register rj,
+ Imm32 imm, Label* overflow) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_li(scratch2, imm);
+ ma_div_branch_overflow(rd, rj, scratch2, overflow);
+}
+
+void MacroAssemblerLOONG64::ma_mod_mask(Register src, Register dest,
+ Register hold, Register remain,
+ int32_t shift, Label* negZero) {
+ // MATH:
+ // We wish to compute x % (1<<y) - 1 for a known constant, y.
+ // First, let b = (1<<y) and C = (1<<y)-1, then think of the 32 bit
+ // dividend as a number in base b, namely
+ // c_0*1 + c_1*b + c_2*b^2 ... c_n*b^n
+ // now, since both addition and multiplication commute with modulus,
+ // x % C == (c_0 + c_1*b + ... + c_n*b^n) % C ==
+ // (c_0 % C) + (c_1%C) * (b % C) + (c_2 % C) * (b^2 % C)...
+ // now, since b == C + 1, b % C == 1, and b^n % C == 1
+ // this means that the whole thing simplifies to:
+ // c_0 + c_1 + c_2 ... c_n % C
+ // each c_n can easily be computed by a shift/bitextract, and the modulus
+ // can be maintained by simply subtracting by C whenever the number gets
+ // over C.
+ int32_t mask = (1 << shift) - 1;
+ Label head, negative, sumSigned, done;
+
+ // hold holds -1 if the value was negative, 1 otherwise.
+ // remain holds the remaining bits that have not been processed
+ // SecondScratchReg serves as a temporary location to store extracted bits
+ // into as well as holding the trial subtraction as a temp value dest is
+ // the accumulator (and holds the final result)
+
+ // move the whole value into the remain.
+ as_or(remain, src, zero);
+ // Zero out the dest.
+ ma_li(dest, Imm32(0));
+ // Set the hold appropriately.
+ ma_b(remain, remain, &negative, Signed, ShortJump);
+ ma_li(hold, Imm32(1));
+ ma_b(&head, ShortJump);
+
+ bind(&negative);
+ ma_li(hold, Imm32(-1));
+ as_sub_w(remain, zero, remain);
+
+ // Begin the main loop.
+ bind(&head);
+
+ SecondScratchRegisterScope scratch2(asMasm());
+ // Extract the bottom bits into SecondScratchReg.
+ ma_and(scratch2, remain, Imm32(mask));
+ // Add those bits to the accumulator.
+ as_add_w(dest, dest, scratch2);
+ // Do a trial subtraction
+ ma_sub_w(scratch2, dest, Imm32(mask));
+ // If (sum - C) > 0, store sum - C back into sum, thus performing a
+ // modulus.
+ ma_b(scratch2, Register(scratch2), &sumSigned, Signed, ShortJump);
+ as_or(dest, scratch2, zero);
+ bind(&sumSigned);
+ // Get rid of the bits that we extracted before.
+ as_srli_w(remain, remain, shift);
+ // If the shift produced zero, finish, otherwise, continue in the loop.
+ ma_b(remain, remain, &head, NonZero, ShortJump);
+ // Check the hold to see if we need to negate the result.
+ ma_b(hold, hold, &done, NotSigned, ShortJump);
+
+ // If the hold was non-zero, negate the result to be in line with
+ // what JS wants
+ if (negZero != nullptr) {
+ // Jump out in case of negative zero.
+ ma_b(hold, hold, negZero, Zero);
+ as_sub_w(dest, zero, dest);
+ } else {
+ as_sub_w(dest, zero, dest);
+ }
+
+ bind(&done);
+}
+
+// Memory.
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_load(
+ Register dest, const BaseIndex& src, LoadStoreSize size,
+ LoadStoreExtension extension) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ asMasm().computeScaledAddress(src, scratch2);
+ return asMasm().ma_load(dest, Address(scratch2, src.offset), size, extension);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_store(
+ Register data, const BaseIndex& dest, LoadStoreSize size,
+ LoadStoreExtension extension) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ asMasm().computeScaledAddress(dest, scratch2);
+ return asMasm().ma_store(data, Address(scratch2, dest.offset), size,
+ extension);
+}
+
+void MacroAssemblerLOONG64::ma_store(Imm32 imm, const BaseIndex& dest,
+ LoadStoreSize size,
+ LoadStoreExtension extension) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ // Make sure that scratch2 contains absolute address so that offset is 0.
+ asMasm().computeEffectiveAddress(dest, scratch2);
+
+ ScratchRegisterScope scratch(asMasm());
+ // Scrach register is free now, use it for loading imm value
+ ma_li(scratch, imm);
+
+ // with offset=0 ScratchRegister will not be used in ma_store()
+ // so we can use it as a parameter here
+ asMasm().ma_store(scratch, Address(scratch2, 0), size, extension);
+}
+
+// Branches when done from within loongarch-specific code.
+// TODO(loong64) Optimize ma_b
+void MacroAssemblerLOONG64::ma_b(Register lhs, Register rhs, Label* label,
+ Condition c, JumpKind jumpKind,
+ Register scratch) {
+ switch (c) {
+ case Equal:
+ case NotEqual:
+ asMasm().branchWithCode(getBranchCode(lhs, rhs, c), label, jumpKind,
+ scratch);
+ break;
+ case Always:
+ ma_b(label, jumpKind);
+ break;
+ case Zero:
+ case NonZero:
+ case Signed:
+ case NotSigned:
+ MOZ_ASSERT(lhs == rhs);
+ asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind, scratch);
+ break;
+ default: {
+ Condition cond = ma_cmp(ScratchRegister, lhs, rhs, c);
+ asMasm().branchWithCode(getBranchCode(ScratchRegister, cond), label,
+ jumpKind, scratch);
+ break;
+ }
+ }
+}
+
+void MacroAssemblerLOONG64::ma_b(Register lhs, Imm32 imm, Label* label,
+ Condition c, JumpKind jumpKind) {
+ MOZ_ASSERT(c != Overflow);
+ if (imm.value == 0) {
+ if (c == Always || c == AboveOrEqual) {
+ ma_b(label, jumpKind);
+ } else if (c == Below) {
+ ; // This condition is always false. No branch required.
+ } else {
+ asMasm().branchWithCode(getBranchCode(lhs, c), label, jumpKind);
+ }
+ } else {
+ switch (c) {
+ case Equal:
+ case NotEqual:
+ MOZ_ASSERT(lhs != ScratchRegister);
+ ma_li(ScratchRegister, imm);
+ ma_b(lhs, ScratchRegister, label, c, jumpKind);
+ break;
+ default:
+ Condition cond = ma_cmp(ScratchRegister, lhs, imm, c);
+ asMasm().branchWithCode(getBranchCode(ScratchRegister, cond), label,
+ jumpKind);
+ }
+ }
+}
+
+void MacroAssemblerLOONG64::ma_b(Register lhs, ImmPtr imm, Label* l,
+ Condition c, JumpKind jumpKind) {
+ asMasm().ma_b(lhs, ImmWord(uintptr_t(imm.value)), l, c, jumpKind);
+}
+
+void MacroAssemblerLOONG64::ma_b(Label* label, JumpKind jumpKind) {
+ asMasm().branchWithCode(getBranchCode(BranchIsJump), label, jumpKind);
+}
+
+Assembler::Condition MacroAssemblerLOONG64::ma_cmp(Register dest, Register lhs,
+ Register rhs, Condition c) {
+ switch (c) {
+ case Above:
+ // bgtu s,t,label =>
+ // sltu at,t,s
+ // bne at,$zero,offs
+ as_sltu(dest, rhs, lhs);
+ return NotEqual;
+ case AboveOrEqual:
+ // bgeu s,t,label =>
+ // sltu at,s,t
+ // beq at,$zero,offs
+ as_sltu(dest, lhs, rhs);
+ return Equal;
+ case Below:
+ // bltu s,t,label =>
+ // sltu at,s,t
+ // bne at,$zero,offs
+ as_sltu(dest, lhs, rhs);
+ return NotEqual;
+ case BelowOrEqual:
+ // bleu s,t,label =>
+ // sltu at,t,s
+ // beq at,$zero,offs
+ as_sltu(dest, rhs, lhs);
+ return Equal;
+ case GreaterThan:
+ // bgt s,t,label =>
+ // slt at,t,s
+ // bne at,$zero,offs
+ as_slt(dest, rhs, lhs);
+ return NotEqual;
+ case GreaterThanOrEqual:
+ // bge s,t,label =>
+ // slt at,s,t
+ // beq at,$zero,offs
+ as_slt(dest, lhs, rhs);
+ return Equal;
+ case LessThan:
+ // blt s,t,label =>
+ // slt at,s,t
+ // bne at,$zero,offs
+ as_slt(dest, lhs, rhs);
+ return NotEqual;
+ case LessThanOrEqual:
+ // ble s,t,label =>
+ // slt at,t,s
+ // beq at,$zero,offs
+ as_slt(dest, rhs, lhs);
+ return Equal;
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+ return Always;
+}
+
+Assembler::Condition MacroAssemblerLOONG64::ma_cmp(Register dest, Register lhs,
+ Imm32 imm, Condition c) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_RELEASE_ASSERT(lhs != scratch);
+
+ switch (c) {
+ case Above:
+ case BelowOrEqual:
+ if (imm.value != 0x7fffffff && is_intN(imm.value + 1, 12) &&
+ imm.value != -1) {
+ // lhs <= rhs via lhs < rhs + 1 if rhs + 1 does not overflow
+ as_sltui(dest, lhs, imm.value + 1);
+
+ return (c == BelowOrEqual ? NotEqual : Equal);
+ } else {
+ ma_li(scratch, imm);
+ as_sltu(dest, scratch, lhs);
+ return (c == BelowOrEqual ? Equal : NotEqual);
+ }
+ case AboveOrEqual:
+ case Below:
+ if (is_intN(imm.value, 12)) {
+ as_sltui(dest, lhs, imm.value);
+ } else {
+ ma_li(scratch, imm);
+ as_sltu(dest, lhs, scratch);
+ }
+ return (c == AboveOrEqual ? Equal : NotEqual);
+ case GreaterThan:
+ case LessThanOrEqual:
+ if (imm.value != 0x7fffffff && is_intN(imm.value + 1, 12)) {
+ // lhs <= rhs via lhs < rhs + 1.
+ as_slti(dest, lhs, imm.value + 1);
+ return (c == LessThanOrEqual ? NotEqual : Equal);
+ } else {
+ ma_li(scratch, imm);
+ as_slt(dest, scratch, lhs);
+ return (c == LessThanOrEqual ? Equal : NotEqual);
+ }
+ case GreaterThanOrEqual:
+ case LessThan:
+ if (is_intN(imm.value, 12)) {
+ as_slti(dest, lhs, imm.value);
+ } else {
+ ma_li(scratch, imm);
+ as_slt(dest, lhs, scratch);
+ }
+ return (c == GreaterThanOrEqual ? Equal : NotEqual);
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+ return Always;
+}
+
+// fp instructions
+void MacroAssemblerLOONG64::ma_lis(FloatRegister dest, float value) {
+ Imm32 imm(mozilla::BitwiseCast<uint32_t>(value));
+
+ if (imm.value != 0) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ moveToFloat32(scratch, dest);
+ } else {
+ moveToFloat32(zero, dest);
+ }
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_fst_d(FloatRegister ft,
+ BaseIndex address) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ asMasm().computeScaledAddress(address, scratch2);
+ return asMasm().ma_fst_d(ft, Address(scratch2, address.offset));
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_fst_s(FloatRegister ft,
+ BaseIndex address) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ asMasm().computeScaledAddress(address, scratch2);
+ return asMasm().ma_fst_s(ft, Address(scratch2, address.offset));
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_fld_d(FloatRegister ft,
+ const BaseIndex& src) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ asMasm().computeScaledAddress(src, scratch2);
+ return asMasm().ma_fld_d(ft, Address(scratch2, src.offset));
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::ma_fld_s(FloatRegister ft,
+ const BaseIndex& src) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ asMasm().computeScaledAddress(src, scratch2);
+ return asMasm().ma_fld_s(ft, Address(scratch2, src.offset));
+}
+
+void MacroAssemblerLOONG64::ma_bc_s(FloatRegister lhs, FloatRegister rhs,
+ Label* label, DoubleCondition c,
+ JumpKind jumpKind, FPConditionBit fcc) {
+ compareFloatingPoint(SingleFloat, lhs, rhs, c, fcc);
+ asMasm().branchWithCode(getBranchCode(fcc), label, jumpKind);
+}
+
+void MacroAssemblerLOONG64::ma_bc_d(FloatRegister lhs, FloatRegister rhs,
+ Label* label, DoubleCondition c,
+ JumpKind jumpKind, FPConditionBit fcc) {
+ compareFloatingPoint(DoubleFloat, lhs, rhs, c, fcc);
+ asMasm().branchWithCode(getBranchCode(fcc), label, jumpKind);
+}
+
+void MacroAssemblerLOONG64::ma_call(ImmPtr dest) {
+ asMasm().ma_liPatchable(CallReg, dest);
+ as_jirl(ra, CallReg, BOffImm16(0));
+}
+
+void MacroAssemblerLOONG64::ma_jump(ImmPtr dest) {
+ ScratchRegisterScope scratch(asMasm());
+ asMasm().ma_liPatchable(scratch, dest);
+ as_jirl(zero, scratch, BOffImm16(0));
+}
+
+void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Register rj, Register rk,
+ Condition c) {
+ switch (c) {
+ case Equal:
+ // seq d,s,t =>
+ // xor d,s,t
+ // sltiu d,d,1
+ as_xor(rd, rj, rk);
+ as_sltui(rd, rd, 1);
+ break;
+ case NotEqual:
+ // sne d,s,t =>
+ // xor d,s,t
+ // sltu d,$zero,d
+ as_xor(rd, rj, rk);
+ as_sltu(rd, zero, rd);
+ break;
+ case Above:
+ // sgtu d,s,t =>
+ // sltu d,t,s
+ as_sltu(rd, rk, rj);
+ break;
+ case AboveOrEqual:
+ // sgeu d,s,t =>
+ // sltu d,s,t
+ // xori d,d,1
+ as_sltu(rd, rj, rk);
+ as_xori(rd, rd, 1);
+ break;
+ case Below:
+ // sltu d,s,t
+ as_sltu(rd, rj, rk);
+ break;
+ case BelowOrEqual:
+ // sleu d,s,t =>
+ // sltu d,t,s
+ // xori d,d,1
+ as_sltu(rd, rk, rj);
+ as_xori(rd, rd, 1);
+ break;
+ case GreaterThan:
+ // sgt d,s,t =>
+ // slt d,t,s
+ as_slt(rd, rk, rj);
+ break;
+ case GreaterThanOrEqual:
+ // sge d,s,t =>
+ // slt d,s,t
+ // xori d,d,1
+ as_slt(rd, rj, rk);
+ as_xori(rd, rd, 1);
+ break;
+ case LessThan:
+ // slt d,s,t
+ as_slt(rd, rj, rk);
+ break;
+ case LessThanOrEqual:
+ // sle d,s,t =>
+ // slt d,t,s
+ // xori d,d,1
+ as_slt(rd, rk, rj);
+ as_xori(rd, rd, 1);
+ break;
+ case Zero:
+ MOZ_ASSERT(rj == rk);
+ // seq d,s,$zero =>
+ // sltiu d,s,1
+ as_sltui(rd, rj, 1);
+ break;
+ case NonZero:
+ MOZ_ASSERT(rj == rk);
+ // sne d,s,$zero =>
+ // sltu d,$zero,s
+ as_sltu(rd, zero, rj);
+ break;
+ case Signed:
+ MOZ_ASSERT(rj == rk);
+ as_slt(rd, rj, zero);
+ break;
+ case NotSigned:
+ MOZ_ASSERT(rj == rk);
+ // sge d,s,$zero =>
+ // slt d,s,$zero
+ // xori d,d,1
+ as_slt(rd, rj, zero);
+ as_xori(rd, rd, 1);
+ break;
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+}
+
+void MacroAssemblerLOONG64::ma_cmp_set_double(Register dest, FloatRegister lhs,
+ FloatRegister rhs,
+ DoubleCondition c) {
+ compareFloatingPoint(DoubleFloat, lhs, rhs, c);
+ as_movcf2gr(dest, FCC0);
+}
+
+void MacroAssemblerLOONG64::ma_cmp_set_float32(Register dest, FloatRegister lhs,
+ FloatRegister rhs,
+ DoubleCondition c) {
+ compareFloatingPoint(SingleFloat, lhs, rhs, c);
+ as_movcf2gr(dest, FCC0);
+}
+
+void MacroAssemblerLOONG64::ma_cmp_set(Register rd, Register rj, Imm32 imm,
+ Condition c) {
+ if (imm.value == 0) {
+ switch (c) {
+ case Equal:
+ case BelowOrEqual:
+ as_sltui(rd, rj, 1);
+ break;
+ case NotEqual:
+ case Above:
+ as_sltu(rd, zero, rj);
+ break;
+ case AboveOrEqual:
+ case Below:
+ as_ori(rd, zero, c == AboveOrEqual ? 1 : 0);
+ break;
+ case GreaterThan:
+ case LessThanOrEqual:
+ as_slt(rd, zero, rj);
+ if (c == LessThanOrEqual) {
+ as_xori(rd, rd, 1);
+ }
+ break;
+ case LessThan:
+ case GreaterThanOrEqual:
+ as_slt(rd, rj, zero);
+ if (c == GreaterThanOrEqual) {
+ as_xori(rd, rd, 1);
+ }
+ break;
+ case Zero:
+ as_sltui(rd, rj, 1);
+ break;
+ case NonZero:
+ as_sltu(rd, zero, rj);
+ break;
+ case Signed:
+ as_slt(rd, rj, zero);
+ break;
+ case NotSigned:
+ as_slt(rd, rj, zero);
+ as_xori(rd, rd, 1);
+ break;
+ default:
+ MOZ_CRASH("Invalid condition.");
+ }
+ return;
+ }
+
+ switch (c) {
+ case Equal:
+ case NotEqual:
+ ma_xor(rd, rj, imm);
+ if (c == Equal) {
+ as_sltui(rd, rd, 1);
+ } else {
+ as_sltu(rd, zero, rd);
+ }
+ break;
+ case Zero:
+ case NonZero:
+ case Signed:
+ case NotSigned:
+ MOZ_CRASH("Invalid condition.");
+ default:
+ Condition cond = ma_cmp(rd, rj, imm, c);
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+
+ if (cond == Equal) as_xori(rd, rd, 1);
+ }
+}
+
+void MacroAssemblerLOONG64::compareFloatingPoint(FloatFormat fmt,
+ FloatRegister lhs,
+ FloatRegister rhs,
+ DoubleCondition c,
+ FPConditionBit fcc) {
+ switch (c) {
+ case DoubleOrdered:
+ as_fcmp_cor(fmt, lhs, rhs, fcc);
+ break;
+ case DoubleEqual:
+ as_fcmp_ceq(fmt, lhs, rhs, fcc);
+ break;
+ case DoubleNotEqual:
+ as_fcmp_cne(fmt, lhs, rhs, fcc);
+ break;
+ case DoubleGreaterThan:
+ as_fcmp_clt(fmt, rhs, lhs, fcc);
+ break;
+ case DoubleGreaterThanOrEqual:
+ as_fcmp_cle(fmt, rhs, lhs, fcc);
+ break;
+ case DoubleLessThan:
+ as_fcmp_clt(fmt, lhs, rhs, fcc);
+ break;
+ case DoubleLessThanOrEqual:
+ as_fcmp_cle(fmt, lhs, rhs, fcc);
+ break;
+ case DoubleUnordered:
+ as_fcmp_cun(fmt, lhs, rhs, fcc);
+ break;
+ case DoubleEqualOrUnordered:
+ as_fcmp_cueq(fmt, lhs, rhs, fcc);
+ break;
+ case DoubleNotEqualOrUnordered:
+ as_fcmp_cune(fmt, lhs, rhs, fcc);
+ break;
+ case DoubleGreaterThanOrUnordered:
+ as_fcmp_cult(fmt, rhs, lhs, fcc);
+ break;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ as_fcmp_cule(fmt, rhs, lhs, fcc);
+ break;
+ case DoubleLessThanOrUnordered:
+ as_fcmp_cult(fmt, lhs, rhs, fcc);
+ break;
+ case DoubleLessThanOrEqualOrUnordered:
+ as_fcmp_cule(fmt, lhs, rhs, fcc);
+ break;
+ default:
+ MOZ_CRASH("Invalid DoubleCondition.");
+ }
+}
+
+void MacroAssemblerLOONG64::minMaxDouble(FloatRegister srcDest,
+ FloatRegister second, bool handleNaN,
+ bool isMax) {
+ if (srcDest == second) return;
+
+ Label nan, done;
+
+ // First or second is NaN, result is NaN.
+ ma_bc_d(srcDest, second, &nan, Assembler::DoubleUnordered, ShortJump);
+ if (isMax) {
+ as_fmax_d(srcDest, srcDest, second);
+ } else {
+ as_fmin_d(srcDest, srcDest, second);
+ }
+ ma_b(&done, ShortJump);
+
+ bind(&nan);
+ as_fadd_d(srcDest, srcDest, second);
+
+ bind(&done);
+}
+
+void MacroAssemblerLOONG64::minMaxFloat32(FloatRegister srcDest,
+ FloatRegister second, bool handleNaN,
+ bool isMax) {
+ if (srcDest == second) return;
+
+ Label nan, done;
+
+ // First or second is NaN, result is NaN.
+ ma_bc_s(srcDest, second, &nan, Assembler::DoubleUnordered, ShortJump);
+ if (isMax) {
+ as_fmax_s(srcDest, srcDest, second);
+ } else {
+ as_fmin_s(srcDest, srcDest, second);
+ }
+ ma_b(&done, ShortJump);
+
+ bind(&nan);
+ as_fadd_s(srcDest, srcDest, second);
+
+ bind(&done);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::loadDouble(const Address& address,
+ FloatRegister dest) {
+ return asMasm().ma_fld_d(dest, address);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::loadDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ return asMasm().ma_fld_d(dest, src);
+}
+
+void MacroAssemblerLOONG64::loadFloatAsDouble(const Address& address,
+ FloatRegister dest) {
+ asMasm().ma_fld_s(dest, address);
+ as_fcvt_d_s(dest, dest);
+}
+
+void MacroAssemblerLOONG64::loadFloatAsDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ asMasm().loadFloat32(src, dest);
+ as_fcvt_d_s(dest, dest);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::loadFloat32(const Address& address,
+ FloatRegister dest) {
+ return asMasm().ma_fld_s(dest, address);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64::loadFloat32(const BaseIndex& src,
+ FloatRegister dest) {
+ return asMasm().ma_fld_s(dest, src);
+}
+
+void MacroAssemblerLOONG64::wasmLoadImpl(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch,
+ AnyRegister output, Register tmp) {
+ access.assertOffsetInGuardPages();
+ uint32_t offset = access.offset();
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ append(access, wasm::TrapMachineInsnForLoad(byteSize(access.type())),
+ FaultingCodeOffset(currentOffset()));
+
+ switch (access.type()) {
+ case Scalar::Int8:
+ as_ldx_b(output.gpr(), memoryBase, ptr);
+ break;
+ case Scalar::Uint8:
+ as_ldx_bu(output.gpr(), memoryBase, ptr);
+ break;
+ case Scalar::Int16:
+ as_ldx_h(output.gpr(), memoryBase, ptr);
+ break;
+ case Scalar::Uint16:
+ as_ldx_hu(output.gpr(), memoryBase, ptr);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ as_ldx_w(output.gpr(), memoryBase, ptr);
+ break;
+ case Scalar::Float64:
+ as_fldx_d(output.fpu(), memoryBase, ptr);
+ break;
+ case Scalar::Float32:
+ as_fldx_s(output.fpu(), memoryBase, ptr);
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerLOONG64::wasmStoreImpl(const wasm::MemoryAccessDesc& access,
+ AnyRegister value,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register tmp) {
+ access.assertOffsetInGuardPages();
+ uint32_t offset = access.offset();
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ // The next emitted instruction is a memory access.
+ append(access, wasm::TrapMachineInsnForStore(byteSize(access.type())),
+ FaultingCodeOffset(currentOffset()));
+
+ switch (access.type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ as_stx_b(value.gpr(), memoryBase, ptr);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ as_stx_h(value.gpr(), memoryBase, ptr);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ as_stx_w(value.gpr(), memoryBase, ptr);
+ break;
+ case Scalar::Int64:
+ as_stx_d(value.gpr(), memoryBase, ptr);
+ break;
+ case Scalar::Float64:
+ as_fstx_d(value.fpu(), memoryBase, ptr);
+ break;
+ case Scalar::Float32:
+ as_fstx_s(value.fpu(), memoryBase, ptr);
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerLOONG64Compat::wasmLoadI64Impl(
+ const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
+ Register ptrScratch, Register64 output, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ append(access, wasm::TrapMachineInsnForLoad(byteSize(access.type())),
+ FaultingCodeOffset(currentOffset()));
+
+ switch (access.type()) {
+ case Scalar::Int8:
+ as_ldx_b(output.reg, memoryBase, ptr);
+ break;
+ case Scalar::Uint8:
+ as_ldx_bu(output.reg, memoryBase, ptr);
+ break;
+ case Scalar::Int16:
+ as_ldx_h(output.reg, memoryBase, ptr);
+ break;
+ case Scalar::Uint16:
+ as_ldx_hu(output.reg, memoryBase, ptr);
+ break;
+ case Scalar::Int32:
+ as_ldx_w(output.reg, memoryBase, ptr);
+ break;
+ case Scalar::Uint32:
+ // TODO(loong64): Why need zero-extension here?
+ as_ldx_wu(output.reg, memoryBase, ptr);
+ break;
+ case Scalar::Int64:
+ as_ldx_d(output.reg, memoryBase, ptr);
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerLOONG64Compat::wasmStoreI64Impl(
+ const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
+ Register ptr, Register ptrScratch, Register tmp) {
+ uint32_t offset = access.offset();
+ MOZ_ASSERT_IF(offset, ptrScratch != InvalidReg);
+
+ // Maybe add the offset.
+ if (offset) {
+ asMasm().addPtr(ImmWord(offset), ptrScratch);
+ ptr = ptrScratch;
+ }
+
+ asMasm().memoryBarrierBefore(access.sync());
+ append(access, wasm::TrapMachineInsnForStore(byteSize(access.type())),
+ FaultingCodeOffset(currentOffset()));
+
+ switch (access.type()) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ as_stx_b(value.reg, memoryBase, ptr);
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ as_stx_h(value.reg, memoryBase, ptr);
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ as_stx_w(value.reg, memoryBase, ptr);
+ break;
+ case Scalar::Int64:
+ as_stx_d(value.reg, memoryBase, ptr);
+ break;
+ default:
+ MOZ_CRASH("unexpected array type");
+ }
+
+ asMasm().memoryBarrierAfter(access.sync());
+}
+
+void MacroAssemblerLOONG64::outOfLineWasmTruncateToInt32Check(
+ FloatRegister input, Register output, MIRType fromType, TruncFlags flags,
+ Label* rejoin, wasm::BytecodeOffset trapOffset) {
+ bool isUnsigned = flags & TRUNC_UNSIGNED;
+ bool isSaturating = flags & TRUNC_SATURATING;
+
+ if (isSaturating) {
+ ScratchDoubleScope fpscratch(asMasm());
+ if (fromType == MIRType::Double) {
+ asMasm().loadConstantDouble(0.0, fpscratch);
+ } else {
+ asMasm().loadConstantFloat32(0.0f, fpscratch);
+ }
+
+ if (isUnsigned) {
+ ma_li(output, Imm32(UINT32_MAX));
+
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
+ fpscratch, Assembler::DoubleLessThanOrUnordered);
+
+ ScratchRegisterScope scratch(asMasm());
+ as_movcf2gr(scratch, FCC0);
+ // FCC0 = 1, output = zero; else not change.
+ as_masknez(output, output, scratch);
+ } else {
+ // Positive overflow is already saturated to INT32_MAX, so we only have
+ // to handle NaN and negative overflow here.
+
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input, input,
+ Assembler::DoubleLessThanOrUnordered);
+
+ ScratchRegisterScope scratch(asMasm());
+ as_movcf2gr(scratch, FCC0);
+ // FCC0 = 1, output = zero; else not change.
+ as_masknez(output, output, scratch);
+
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
+ fpscratch, Assembler::DoubleLessThan);
+
+ as_movcf2gr(scratch, FCC0);
+ // FCC0 == 1, move INT32_MIN to output; else not change.
+ as_slli_w(scratch, scratch, 31);
+ as_or(output, output, scratch);
+ }
+
+ MOZ_ASSERT(rejoin->bound());
+ asMasm().jump(rejoin);
+ return;
+ }
+
+ Label inputIsNaN;
+
+ if (fromType == MIRType::Double) {
+ asMasm().branchDouble(Assembler::DoubleUnordered, input, input,
+ &inputIsNaN);
+ } else if (fromType == MIRType::Float32) {
+ asMasm().branchFloat(Assembler::DoubleUnordered, input, input, &inputIsNaN);
+ }
+
+ asMasm().wasmTrap(wasm::Trap::IntegerOverflow, trapOffset);
+ asMasm().bind(&inputIsNaN);
+ asMasm().wasmTrap(wasm::Trap::InvalidConversionToInteger, trapOffset);
+}
+
+void MacroAssemblerLOONG64::outOfLineWasmTruncateToInt64Check(
+ FloatRegister input, Register64 output_, MIRType fromType, TruncFlags flags,
+ Label* rejoin, wasm::BytecodeOffset trapOffset) {
+ bool isUnsigned = flags & TRUNC_UNSIGNED;
+ bool isSaturating = flags & TRUNC_SATURATING;
+
+ if (isSaturating) {
+ ScratchDoubleScope fpscratch(asMasm());
+ Register output = output_.reg;
+
+ if (fromType == MIRType::Double) {
+ asMasm().loadConstantDouble(0.0, fpscratch);
+ } else {
+ asMasm().loadConstantFloat32(0.0f, fpscratch);
+ }
+
+ if (isUnsigned) {
+ asMasm().ma_li(output, ImmWord(UINT64_MAX));
+
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
+ fpscratch, Assembler::DoubleLessThanOrUnordered);
+
+ ScratchRegisterScope scratch(asMasm());
+ as_movcf2gr(scratch, FCC0);
+ // FCC0 = 1, output = zero; else not change.
+ as_masknez(output, output, scratch);
+ } else {
+ // Positive overflow is already saturated to INT64_MAX, so we only have
+ // to handle NaN and negative overflow here.
+
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input, input,
+ Assembler::DoubleLessThanOrUnordered);
+
+ ScratchRegisterScope scratch(asMasm());
+ as_movcf2gr(scratch, FCC0);
+ // FCC0 = 1, output = zero; else not change.
+ as_masknez(output, output, scratch);
+
+ compareFloatingPoint(
+ fromType == MIRType::Double ? DoubleFloat : SingleFloat, input,
+ fpscratch, Assembler::DoubleLessThan);
+
+ as_movcf2gr(scratch, FCC0);
+ // FCC0 == 1, move INT64_MIN to output; else not change.
+ as_slli_d(scratch, scratch, 63);
+ as_or(output, output, scratch);
+ }
+
+ MOZ_ASSERT(rejoin->bound());
+ asMasm().jump(rejoin);
+ return;
+ }
+
+ Label inputIsNaN;
+
+ if (fromType == MIRType::Double) {
+ asMasm().branchDouble(Assembler::DoubleUnordered, input, input,
+ &inputIsNaN);
+ } else if (fromType == MIRType::Float32) {
+ asMasm().branchFloat(Assembler::DoubleUnordered, input, input, &inputIsNaN);
+ }
+
+ asMasm().wasmTrap(wasm::Trap::IntegerOverflow, trapOffset);
+ asMasm().bind(&inputIsNaN);
+ asMasm().wasmTrap(wasm::Trap::InvalidConversionToInteger, trapOffset);
+}
+
+void MacroAssemblerLOONG64Compat::profilerEnterFrame(Register framePtr,
+ Register scratch) {
+ asMasm().loadJSContext(scratch);
+ loadPtr(Address(scratch, offsetof(JSContext, profilingActivation_)), scratch);
+ storePtr(framePtr,
+ Address(scratch, JitActivation::offsetOfLastProfilingFrame()));
+ storePtr(ImmPtr(nullptr),
+ Address(scratch, JitActivation::offsetOfLastProfilingCallSite()));
+}
+
+void MacroAssemblerLOONG64Compat::profilerExitFrame() {
+ jump(asMasm().runtime()->jitRuntime()->getProfilerExitFrameTail());
+}
+
+MacroAssembler& MacroAssemblerLOONG64::asMasm() {
+ return *static_cast<MacroAssembler*>(this);
+}
+
+const MacroAssembler& MacroAssemblerLOONG64::asMasm() const {
+ return *static_cast<const MacroAssembler*>(this);
+}
+
+void MacroAssembler::subFromStackPtr(Imm32 imm32) {
+ if (imm32.value) {
+ asMasm().subPtr(imm32, StackPointer);
+ }
+}
+
+//{{{ check_macroassembler_style
+// ===============================================================
+// MacroAssembler high-level usage.
+
+void MacroAssembler::flush() {}
+
+// ===============================================================
+// Stack manipulation functions.
+
+size_t MacroAssembler::PushRegsInMaskSizeInBytes(LiveRegisterSet set) {
+ return set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
+}
+
+void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
+ int32_t diff =
+ set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
+ const int32_t reserved = diff;
+
+ reserveStack(reserved);
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diff -= sizeof(intptr_t);
+ storePtr(*iter, Address(StackPointer, diff));
+ }
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
+ iter.more(); ++iter) {
+ diff -= sizeof(double);
+ storeDouble(*iter, Address(StackPointer, diff));
+ }
+ MOZ_ASSERT(diff == 0);
+}
+
+void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
+ LiveRegisterSet ignore) {
+ int32_t diff =
+ set.gprs().size() * sizeof(intptr_t) + set.fpus().getPushSizeInBytes();
+ const int32_t reserved = diff;
+
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diff -= sizeof(intptr_t);
+ if (!ignore.has(*iter)) {
+ loadPtr(Address(StackPointer, diff), *iter);
+ }
+ }
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
+ iter.more(); ++iter) {
+ diff -= sizeof(double);
+ if (!ignore.has(*iter)) {
+ loadDouble(Address(StackPointer, diff), *iter);
+ }
+ }
+ MOZ_ASSERT(diff == 0);
+ freeStack(reserved);
+}
+
+void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
+ Register) {
+ FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
+ mozilla::DebugOnly<unsigned> numFpu = fpuSet.size();
+ int32_t diffF = fpuSet.getPushSizeInBytes();
+ mozilla::DebugOnly<int32_t> diffG = set.gprs().size() * sizeof(intptr_t);
+
+ MOZ_ASSERT(dest.offset >= diffG + diffF);
+
+ for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more(); ++iter) {
+ diffG -= sizeof(intptr_t);
+ dest.offset -= sizeof(intptr_t);
+ storePtr(*iter, dest);
+ }
+ MOZ_ASSERT(diffG == 0);
+
+#ifdef ENABLE_WASM_SIMD
+# error "Needs more careful logic if SIMD is enabled"
+#endif
+
+ for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
+ FloatRegister reg = *iter;
+ diffF -= reg.size();
+ numFpu -= 1;
+ dest.offset -= reg.size();
+ if (reg.isDouble()) {
+ storeDouble(reg, dest);
+ } else if (reg.isSingle()) {
+ storeFloat32(reg, dest);
+ } else {
+ MOZ_CRASH("Unknown register type.");
+ }
+ }
+ MOZ_ASSERT(numFpu == 0);
+ diffF -= diffF % sizeof(uintptr_t);
+ MOZ_ASSERT(diffF == 0);
+}
+
+void MacroAssembler::Push(Register reg) {
+ ma_push(reg);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(const Imm32 imm) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ ma_push(scratch);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(const ImmWord imm) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ ma_push(scratch);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(const ImmPtr imm) {
+ Push(ImmWord(uintptr_t(imm.value)));
+}
+
+void MacroAssembler::Push(const ImmGCPtr ptr) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, ptr);
+ ma_push(scratch);
+ adjustFrame(int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Push(FloatRegister f) {
+ ma_push(f);
+ adjustFrame(int32_t(sizeof(double)));
+}
+
+void MacroAssembler::PushBoxed(FloatRegister reg) {
+ subFromStackPtr(Imm32(sizeof(double)));
+ boxDouble(reg, Address(getStackPointer(), 0));
+ adjustFrame(sizeof(double));
+}
+
+void MacroAssembler::Pop(Register reg) {
+ ma_pop(reg);
+ adjustFrame(-int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::Pop(FloatRegister f) {
+ ma_pop(f);
+ adjustFrame(-int32_t(sizeof(double)));
+}
+
+void MacroAssembler::Pop(const ValueOperand& val) {
+ popValue(val);
+ adjustFrame(-int32_t(sizeof(Value)));
+}
+
+void MacroAssembler::PopStackPtr() {
+ loadPtr(Address(StackPointer, 0), StackPointer);
+ adjustFrame(-int32_t(sizeof(intptr_t)));
+}
+
+void MacroAssembler::freeStackTo(uint32_t framePushed) {
+ MOZ_ASSERT(framePushed <= framePushed_);
+ ma_sub_d(StackPointer, FramePointer, Imm32(framePushed));
+ framePushed_ = framePushed;
+}
+
+// ===============================================================
+// Simple call functions.
+
+CodeOffset MacroAssembler::call(Register reg) {
+ as_jirl(ra, reg, BOffImm16(0));
+ return CodeOffset(currentOffset());
+}
+
+CodeOffset MacroAssembler::call(Label* label) {
+ ma_bl(label);
+ return CodeOffset(currentOffset());
+}
+
+CodeOffset MacroAssembler::callWithPatch() {
+ as_bl(JOffImm26(1 * sizeof(uint32_t)));
+ return CodeOffset(currentOffset());
+}
+
+void MacroAssembler::patchCall(uint32_t callerOffset, uint32_t calleeOffset) {
+ BufferOffset call(callerOffset - 1 * sizeof(uint32_t));
+
+ JOffImm26 offset = BufferOffset(calleeOffset).diffB<JOffImm26>(call);
+ if (!offset.isInvalid()) {
+ InstJump* bal = (InstJump*)editSrc(call);
+ bal->setJOffImm26(offset);
+ } else {
+ uint32_t u32Offset = callerOffset - 4 * sizeof(uint32_t);
+ uint32_t* u32 =
+ reinterpret_cast<uint32_t*>(editSrc(BufferOffset(u32Offset)));
+ *u32 = calleeOffset - callerOffset;
+ }
+}
+
+CodeOffset MacroAssembler::farJumpWithPatch() {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ as_pcaddi(scratch, 4);
+ as_ld_w(scratch2, scratch, 0);
+ as_add_d(scratch, scratch, scratch2);
+ as_jirl(zero, scratch, BOffImm16(0));
+ // Allocate space which will be patched by patchFarJump().
+ CodeOffset farJump(currentOffset());
+ spew(".space 32bit initValue 0xffff ffff");
+ writeInst(UINT32_MAX);
+ return farJump;
+}
+
+void MacroAssembler::patchFarJump(CodeOffset farJump, uint32_t targetOffset) {
+ uint32_t* u32 =
+ reinterpret_cast<uint32_t*>(editSrc(BufferOffset(farJump.offset())));
+ MOZ_ASSERT(*u32 == UINT32_MAX);
+ *u32 = targetOffset - farJump.offset();
+}
+
+CodeOffset MacroAssembler::call(wasm::SymbolicAddress target) {
+ movePtr(target, CallReg);
+ return call(CallReg);
+}
+
+void MacroAssembler::call(const Address& addr) {
+ loadPtr(addr, CallReg);
+ call(CallReg);
+}
+
+void MacroAssembler::call(ImmWord target) { call(ImmPtr((void*)target.value)); }
+
+void MacroAssembler::call(ImmPtr target) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, target, RelocationKind::HARDCODED);
+ ma_call(target);
+}
+
+void MacroAssembler::call(JitCode* c) {
+ ScratchRegisterScope scratch(asMasm());
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(scratch, ImmPtr(c->raw()));
+ callJitNoProfiler(scratch);
+}
+
+CodeOffset MacroAssembler::nopPatchableToCall() {
+ // LOONG64
+ as_nop(); // lu12i_w
+ as_nop(); // ori
+ as_nop(); // lu32i_d
+ as_nop(); // jirl
+ return CodeOffset(currentOffset());
+}
+
+void MacroAssembler::patchNopToCall(uint8_t* call, uint8_t* target) {
+ Instruction* inst = (Instruction*)call - 4 /* four nops */;
+ Assembler::WriteLoad64Instructions(inst, ScratchRegister, (uint64_t)target);
+ inst[3] = InstImm(op_jirl, BOffImm16(0), ScratchRegister, ra);
+}
+
+void MacroAssembler::patchCallToNop(uint8_t* call) {
+ Instruction* inst = (Instruction*)call - 4 /* four nops */;
+ inst[0].makeNop(); // lu12i_w
+ inst[1].makeNop(); // ori
+ inst[2].makeNop(); // lu32i_d
+ inst[3].makeNop(); // jirl
+}
+
+void MacroAssembler::pushReturnAddress() { push(ra); }
+
+void MacroAssembler::popReturnAddress() { pop(ra); }
+
+// ===============================================================
+// ABI function calls.
+
+void MacroAssembler::setupUnalignedABICall(Register scratch) {
+ MOZ_ASSERT(!IsCompilingWasm(), "wasm should only use aligned ABI calls");
+ setupNativeABICall();
+ dynamicAlignment_ = true;
+
+ as_or(scratch, StackPointer, zero);
+
+ // Force sp to be aligned
+ asMasm().subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
+ storePtr(scratch, Address(StackPointer, 0));
+}
+
+void MacroAssembler::callWithABIPre(uint32_t* stackAdjust, bool callFromWasm) {
+ MOZ_ASSERT(inCall_);
+ uint32_t stackForCall = abiArgs_.stackBytesConsumedSoFar();
+
+ // Reserve place for $ra.
+ stackForCall += sizeof(intptr_t);
+
+ if (dynamicAlignment_) {
+ stackForCall += ComputeByteAlignment(stackForCall, ABIStackAlignment);
+ } else {
+ uint32_t alignmentAtPrologue = callFromWasm ? sizeof(wasm::Frame) : 0;
+ stackForCall += ComputeByteAlignment(
+ stackForCall + framePushed() + alignmentAtPrologue, ABIStackAlignment);
+ }
+
+ *stackAdjust = stackForCall;
+ reserveStack(stackForCall);
+
+ // Save $ra because call is going to clobber it. Restore it in
+ // callWithABIPost. NOTE: This is needed for calls from SharedIC.
+ // Maybe we can do this differently.
+ storePtr(ra, Address(StackPointer, stackForCall - sizeof(intptr_t)));
+
+ // Position all arguments.
+ {
+ enoughMemory_ &= moveResolver_.resolve();
+ if (!enoughMemory_) {
+ return;
+ }
+
+ MoveEmitter emitter(*this);
+ emitter.emit(moveResolver_);
+ emitter.finish();
+ }
+
+ assertStackAlignment(ABIStackAlignment);
+}
+
+void MacroAssembler::callWithABIPost(uint32_t stackAdjust, ABIType result,
+ bool callFromWasm) {
+ // Restore ra value (as stored in callWithABIPre()).
+ loadPtr(Address(StackPointer, stackAdjust - sizeof(intptr_t)), ra);
+
+ if (dynamicAlignment_) {
+ // Restore sp value from stack (as stored in setupUnalignedABICall()).
+ loadPtr(Address(StackPointer, stackAdjust), StackPointer);
+ // Use adjustFrame instead of freeStack because we already restored sp.
+ adjustFrame(-stackAdjust);
+ } else {
+ freeStack(stackAdjust);
+ }
+
+#ifdef DEBUG
+ MOZ_ASSERT(inCall_);
+ inCall_ = false;
+#endif
+}
+
+void MacroAssembler::callWithABINoProfiler(Register fun, ABIType result) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ // Load the callee in scratch2, no instruction between the movePtr and
+ // call should clobber it. Note that we can't use fun because it may be
+ // one of the IntArg registers clobbered before the call.
+ movePtr(fun, scratch2);
+
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(scratch2);
+ callWithABIPost(stackAdjust, result);
+}
+
+void MacroAssembler::callWithABINoProfiler(const Address& fun, ABIType result) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ // Load the callee in scratch2, as above.
+ loadPtr(fun, scratch2);
+
+ uint32_t stackAdjust;
+ callWithABIPre(&stackAdjust);
+ call(scratch2);
+ callWithABIPost(stackAdjust, result);
+}
+
+// ===============================================================
+// Jit Frames.
+
+uint32_t MacroAssembler::pushFakeReturnAddress(Register scratch) {
+ CodeLabel cl;
+
+ ma_li(scratch, &cl);
+ Push(scratch);
+ bind(&cl);
+ uint32_t retAddr = currentOffset();
+
+ addCodeLabel(cl);
+ return retAddr;
+}
+
+// ===============================================================
+// Move instructions
+
+void MacroAssembler::moveValue(const TypedOrValueRegister& src,
+ const ValueOperand& dest) {
+ if (src.hasValue()) {
+ moveValue(src.valueReg(), dest);
+ return;
+ }
+
+ MIRType type = src.type();
+ AnyRegister reg = src.typedReg();
+
+ if (!IsFloatingPointType(type)) {
+ boxNonDouble(ValueTypeFromMIRType(type), reg.gpr(), dest);
+ return;
+ }
+
+ ScratchDoubleScope fpscratch(asMasm());
+ FloatRegister scratch = fpscratch;
+ FloatRegister freg = reg.fpu();
+ if (type == MIRType::Float32) {
+ convertFloat32ToDouble(freg, scratch);
+ freg = scratch;
+ }
+ boxDouble(freg, dest, scratch);
+}
+
+void MacroAssembler::moveValue(const ValueOperand& src,
+ const ValueOperand& dest) {
+ if (src == dest) {
+ return;
+ }
+ movePtr(src.valueReg(), dest.valueReg());
+}
+
+void MacroAssembler::moveValue(const Value& src, const ValueOperand& dest) {
+ if (!src.isGCThing()) {
+ ma_li(dest.valueReg(), ImmWord(src.asRawBits()));
+ return;
+ }
+
+ writeDataRelocation(src);
+ movWithPatch(ImmWord(src.asRawBits()), dest.valueReg());
+}
+
+// ===============================================================
+// Branch functions
+
+void MacroAssembler::loadStoreBuffer(Register ptr, Register buffer) {
+ ma_and(buffer, ptr, Imm32(int32_t(~gc::ChunkMask)));
+ loadPtr(Address(buffer, gc::ChunkStoreBufferOffset), buffer);
+}
+
+void MacroAssembler::branchPtrInNurseryChunk(Condition cond, Register ptr,
+ Register temp, Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(ptr != temp);
+ MOZ_ASSERT(ptr != ScratchRegister &&
+ ptr != SecondScratchReg); // Both may be used internally.
+ MOZ_ASSERT(temp != ScratchRegister && temp != SecondScratchReg);
+ MOZ_ASSERT(temp != InvalidReg);
+
+ ma_and(temp, ptr, Imm32(int32_t(~gc::ChunkMask)));
+ branchPtr(InvertCondition(cond), Address(temp, gc::ChunkStoreBufferOffset),
+ zero, label);
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ const Address& address,
+ Register temp, Label* label) {
+ branchValueIsNurseryCellImpl(cond, address, temp, label);
+}
+
+void MacroAssembler::branchValueIsNurseryCell(Condition cond,
+ ValueOperand value, Register temp,
+ Label* label) {
+ branchValueIsNurseryCellImpl(cond, value, temp, label);
+}
+
+template <typename T>
+void MacroAssembler::branchValueIsNurseryCellImpl(Condition cond,
+ const T& value, Register temp,
+ Label* label) {
+ MOZ_ASSERT(cond == Assembler::Equal || cond == Assembler::NotEqual);
+ MOZ_ASSERT(temp != InvalidReg);
+ Label done;
+ branchTestGCThing(Assembler::NotEqual, value,
+ cond == Assembler::Equal ? &done : label);
+
+ getGCThingValueChunk(value, temp);
+ loadPtr(Address(temp, gc::ChunkStoreBufferOffset), temp);
+ branchPtr(InvertCondition(cond), temp, zero, label);
+
+ bind(&done);
+}
+
+void MacroAssembler::branchTestValue(Condition cond, const ValueOperand& lhs,
+ const Value& rhs, Label* label) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs.valueReg() != scratch);
+ moveValue(rhs, ValueOperand(scratch));
+ ma_b(lhs.valueReg(), scratch, label, cond);
+}
+
+// ========================================================================
+// Memory access primitives.
+
+template <typename T>
+void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType, const T& dest) {
+ MOZ_ASSERT(valueType < MIRType::Value);
+
+ if (valueType == MIRType::Double) {
+ boxDouble(value.reg().typedReg().fpu(), dest);
+ return;
+ }
+
+ if (value.constant()) {
+ storeValue(value.value(), dest);
+ } else {
+ storeValue(ValueTypeFromMIRType(valueType), value.reg().typedReg().gpr(),
+ dest);
+ }
+}
+
+template void MacroAssembler::storeUnboxedValue(const ConstantOrRegister& value,
+ MIRType valueType,
+ const Address& dest);
+template void MacroAssembler::storeUnboxedValue(
+ const ConstantOrRegister& value, MIRType valueType,
+ const BaseObjectElementIndex& dest);
+
+void MacroAssembler::comment(const char* msg) { Assembler::comment(msg); }
+
+// ===============================================================
+// WebAssembly
+
+FaultingCodeOffset MacroAssembler::wasmTrapInstruction() {
+ FaultingCodeOffset fco = FaultingCodeOffset(currentOffset());
+ as_break(WASM_TRAP); // TODO: as_teq(zero, zero, WASM_TRAP)
+ return fco;
+}
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Register boundsCheckLimit, Label* ok) {
+ ma_b(index, boundsCheckLimit, ok, cond);
+}
+
+void MacroAssembler::wasmBoundsCheck32(Condition cond, Register index,
+ Address boundsCheckLimit, Label* ok) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ load32(boundsCheckLimit, scratch2);
+ ma_b(index, Register(scratch2), ok, cond);
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Register64 boundsCheckLimit, Label* ok) {
+ ma_b(index.reg, boundsCheckLimit.reg, ok, cond);
+}
+
+void MacroAssembler::wasmBoundsCheck64(Condition cond, Register64 index,
+ Address boundsCheckLimit, Label* ok) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(boundsCheckLimit, scratch2);
+ ma_b(index.reg, scratch2, ok, cond);
+}
+
+// FTINTRZ behaves as follows:
+//
+// on NaN it produces zero
+// on too large it produces INT_MAX (for appropriate type)
+// on too small it produces INT_MIN (ditto)
+
+void MacroAssembler::wasmTruncateDoubleToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+ if (!isSaturating) {
+ ma_bc_d(input, input, oolEntry, Assembler::DoubleUnordered);
+ }
+ as_ftintrz_l_d(fpscratch, input);
+ moveFromDouble(fpscratch, output);
+ as_srli_d(scratch, output, 32);
+ as_slli_w(output, output, 0);
+ ma_b(scratch, Imm32(0), oolEntry, Assembler::NotEqual);
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+ if (!isSaturating) {
+ ma_bc_s(input, input, oolEntry, Assembler::DoubleUnordered);
+ }
+ as_ftintrz_l_s(fpscratch, input);
+ moveFromDouble(fpscratch, output);
+ as_srli_d(scratch, output, 32);
+ as_slli_w(output, output, 0);
+ ma_b(scratch, Imm32(0), oolEntry, Assembler::NotEqual);
+}
+
+// Assembler::CauseV is a enum,called FCSRBit. Assembler::CauseV == 16
+void MacroAssembler::wasmTruncateDoubleToInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchFloat32Scope fpscratch(asMasm());
+ as_ftintrz_w_d(fpscratch, input);
+ as_movfcsr2gr(scratch);
+ moveFromFloat32(fpscratch, output);
+ MOZ_ASSERT(Assembler::CauseV < 32);
+ as_bstrpick_w(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
+ ma_b(scratch, Imm32(0), oolEntry, Assembler::NotEqual);
+}
+
+void MacroAssembler::wasmTruncateFloat32ToInt32(FloatRegister input,
+ Register output,
+ bool isSaturating,
+ Label* oolEntry) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchFloat32Scope fpscratch(asMasm());
+ as_ftintrz_w_s(fpscratch, input);
+ as_movfcsr2gr(scratch);
+ moveFromFloat32(fpscratch, output);
+ MOZ_ASSERT(Assembler::CauseV < 32);
+ as_bstrpick_w(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
+ ma_b(scratch, Imm32(0), oolEntry, Assembler::NotEqual);
+}
+
+void MacroAssembler::wasmTruncateDoubleToUInt64(
+ FloatRegister input, Register64 output_, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble) {
+ MOZ_ASSERT(tempDouble.isInvalid());
+ ScratchDoubleScope fpscratch(asMasm());
+ Register output = output_.reg;
+
+ Label done;
+
+ if (!isSaturating) {
+ ma_bc_d(input, input, oolEntry, Assembler::DoubleUnordered);
+ }
+ as_ftintrz_l_d(fpscratch, input);
+ moveFromDouble(fpscratch, output);
+ loadConstantDouble(double(INT64_MAX + 1ULL), fpscratch);
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_li(scratch2, ImmWord(INT64_MAX));
+ // For numbers in -1.[ : ]INT64_MAX range do nothing more
+ ma_b(output, Register(scratch2), &done, Assembler::Below, ShortJump);
+
+ ma_li(scratch2, ImmWord(INT64_MIN));
+ as_fsub_d(fpscratch, input, fpscratch);
+ as_ftintrz_l_d(fpscratch, fpscratch);
+ as_movfcsr2gr(scratch);
+ moveFromDouble(fpscratch, output);
+ as_bstrpick_d(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
+ as_add_d(output, output, scratch2);
+
+ // Guard against negative values that result in 0 due the precision loss.
+ as_sltui(scratch2, output, 1);
+ as_or(scratch, scratch, scratch2);
+
+ ma_b(scratch, zero, oolEntry, Assembler::NotEqual);
+
+ bind(&done);
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToUInt64(
+ FloatRegister input, Register64 output_, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempFloat) {
+ MOZ_ASSERT(tempFloat.isInvalid());
+ ScratchDoubleScope fpscratch(asMasm());
+ Register output = output_.reg;
+
+ Label done;
+
+ if (!isSaturating) {
+ ma_bc_s(input, input, oolEntry, Assembler::DoubleUnordered);
+ }
+ as_ftintrz_l_s(fpscratch, input);
+ moveFromDouble(fpscratch, output);
+ loadConstantFloat32(float(INT64_MAX + 1ULL), fpscratch);
+
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_li(scratch2, ImmWord(INT64_MAX));
+ // For numbers in -1.[ : ]INT64_MAX range do nothing more
+ ma_b(output, Register(scratch2), &done, Assembler::Below, ShortJump);
+
+ ma_li(scratch2, ImmWord(INT64_MIN));
+ as_fsub_s(fpscratch, input, fpscratch);
+ as_ftintrz_l_s(fpscratch, fpscratch);
+ as_movfcsr2gr(scratch);
+ moveFromDouble(fpscratch, output);
+ as_bstrpick_d(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
+ as_add_d(output, output, scratch2);
+
+ // Guard against negative values that result in 0 due the precision loss.
+ as_sltui(scratch2, output, 1);
+ as_or(scratch, scratch, scratch2);
+
+ ma_b(scratch, zero, oolEntry, Assembler::NotEqual);
+
+ bind(&done);
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssembler::wasmTruncateDoubleToInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempDouble) {
+ MOZ_ASSERT(tempDouble.isInvalid());
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+
+ as_ftintrz_l_d(fpscratch, input);
+ as_movfcsr2gr(scratch);
+ moveFromDouble(fpscratch, output.reg);
+ as_bstrpick_d(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
+ ma_b(scratch, zero, oolEntry, Assembler::NotEqual);
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssembler::wasmTruncateFloat32ToInt64(
+ FloatRegister input, Register64 output, bool isSaturating, Label* oolEntry,
+ Label* oolRejoin, FloatRegister tempFloat) {
+ MOZ_ASSERT(tempFloat.isInvalid());
+ ScratchRegisterScope scratch(asMasm());
+ ScratchDoubleScope fpscratch(asMasm());
+
+ as_ftintrz_l_s(fpscratch, input);
+ as_movfcsr2gr(scratch);
+ moveFromDouble(fpscratch, output.reg);
+ as_bstrpick_d(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
+ ma_b(scratch, zero, oolEntry, Assembler::NotEqual);
+
+ if (isSaturating) {
+ bind(oolRejoin);
+ }
+}
+
+void MacroAssembler::oolWasmTruncateCheckF32ToI32(FloatRegister input,
+ Register output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ outOfLineWasmTruncateToInt32Check(input, output, MIRType::Float32, flags,
+ rejoin, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF64ToI32(FloatRegister input,
+ Register output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ outOfLineWasmTruncateToInt32Check(input, output, MIRType::Double, flags,
+ rejoin, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF32ToI64(FloatRegister input,
+ Register64 output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ outOfLineWasmTruncateToInt64Check(input, output, MIRType::Float32, flags,
+ rejoin, off);
+}
+
+void MacroAssembler::oolWasmTruncateCheckF64ToI64(FloatRegister input,
+ Register64 output,
+ TruncFlags flags,
+ wasm::BytecodeOffset off,
+ Label* rejoin) {
+ outOfLineWasmTruncateToInt64Check(input, output, MIRType::Double, flags,
+ rejoin, off);
+}
+
+void MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, AnyRegister output) {
+ wasmLoadImpl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
+}
+
+void MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr,
+ Register ptrScratch, Register64 output) {
+ wasmLoadI64Impl(access, memoryBase, ptr, ptrScratch, output, InvalidReg);
+}
+
+void MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access,
+ AnyRegister value, Register memoryBase,
+ Register ptr, Register ptrScratch) {
+ wasmStoreImpl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
+}
+
+void MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access,
+ Register64 value, Register memoryBase,
+ Register ptr, Register ptrScratch) {
+ wasmStoreI64Impl(access, value, memoryBase, ptr, ptrScratch, InvalidReg);
+}
+
+void MacroAssembler::enterFakeExitFrameForWasm(Register cxreg, Register scratch,
+ ExitFrameType type) {
+ enterFakeExitFrame(cxreg, scratch, type);
+}
+
+// TODO(loong64): widenInt32 should be nop?
+void MacroAssembler::widenInt32(Register r) {
+ move32To64SignExtend(r, Register64(r));
+}
+
+// ========================================================================
+// Convert floating point.
+
+void MacroAssembler::convertUInt64ToFloat32(Register64 src_, FloatRegister dest,
+ Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+
+ Register src = src_.reg;
+ Label positive, done;
+ ma_b(src, src, &positive, NotSigned, ShortJump);
+
+ MOZ_ASSERT(src != scratch);
+ MOZ_ASSERT(src != scratch2);
+
+ ma_and(scratch, src, Imm32(1));
+ as_srli_d(scratch2, src, 1);
+ as_or(scratch, scratch, scratch2);
+ as_movgr2fr_d(dest, scratch);
+ as_ffint_s_l(dest, dest);
+ addFloat32(dest, dest);
+ ma_b(&done, ShortJump);
+
+ bind(&positive);
+ as_movgr2fr_d(dest, src);
+ as_ffint_s_l(dest, dest);
+
+ bind(&done);
+}
+
+void MacroAssembler::convertInt64ToFloat32(Register64 src, FloatRegister dest) {
+ as_movgr2fr_d(dest, src.reg);
+ as_ffint_s_l(dest, dest);
+}
+
+bool MacroAssembler::convertUInt64ToDoubleNeedsTemp() { return false; }
+
+void MacroAssembler::convertUInt64ToDouble(Register64 src, FloatRegister dest,
+ Register temp) {
+ MOZ_ASSERT(temp == Register::Invalid());
+ MacroAssemblerSpecific::convertUInt64ToDouble(src.reg, dest);
+}
+
+void MacroAssembler::convertInt64ToDouble(Register64 src, FloatRegister dest) {
+ as_movgr2fr_d(dest, src.reg);
+ as_ffint_d_l(dest, dest);
+}
+
+void MacroAssembler::convertIntPtrToDouble(Register src, FloatRegister dest) {
+ convertInt64ToDouble(Register64(src), dest);
+}
+
+// ========================================================================
+// Primitive atomic operations.
+
+template <typename T>
+static void CompareExchange(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ const T& mem, Register oldval, Register newval,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ ScratchRegisterScope scratch(masm);
+ SecondScratchRegisterScope scratch2(masm);
+ bool signExtend = Scalar::isSignedIntType(type);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again, end;
+
+ masm.computeEffectiveAddress(mem, scratch);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, wasm::TrapMachineInsn::Load32,
+ FaultingCodeOffset(masm.currentOffset()));
+ }
+
+ masm.as_ll_w(output, scratch, 0);
+ masm.ma_b(output, oldval, &end, Assembler::NotEqual, ShortJump);
+ masm.as_or(scratch2, newval, zero);
+ masm.as_sc_w(scratch2, scratch, 0);
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+ masm.bind(&end);
+
+ return;
+ }
+
+ masm.as_andi(offsetTemp, scratch, 3);
+ masm.subPtr(offsetTemp, scratch);
+ masm.as_slli_w(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.as_sll_w(maskTemp, maskTemp, offsetTemp);
+ masm.as_nor(maskTemp, zero, maskTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, wasm::TrapMachineInsn::Load32,
+ FaultingCodeOffset(masm.currentOffset()));
+ }
+
+ masm.as_ll_w(scratch2, scratch, 0);
+
+ masm.as_srl_w(output, scratch2, offsetTemp);
+
+ switch (nbytes) {
+ case 1:
+ if (signExtend) {
+ masm.as_ext_w_b(valueTemp, oldval);
+ masm.as_ext_w_b(output, output);
+ } else {
+ masm.as_andi(valueTemp, oldval, 0xff);
+ masm.as_andi(output, output, 0xff);
+ }
+ break;
+ case 2:
+ if (signExtend) {
+ masm.as_ext_w_h(valueTemp, oldval);
+ masm.as_ext_w_h(output, output);
+ } else {
+ masm.as_bstrpick_d(valueTemp, oldval, 15, 0);
+ masm.as_bstrpick_d(output, output, 15, 0);
+ }
+ break;
+ }
+
+ masm.ma_b(output, valueTemp, &end, Assembler::NotEqual, ShortJump);
+
+ masm.as_sll_w(valueTemp, newval, offsetTemp);
+ masm.as_and(scratch2, scratch2, maskTemp);
+ masm.as_or(scratch2, scratch2, valueTemp);
+
+ masm.as_sc_w(scratch2, scratch, 0);
+
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ masm.bind(&end);
+}
+
+template <typename T>
+static void CompareExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, const T& mem,
+ Register64 expect, Register64 replace,
+ Register64 output) {
+ MOZ_ASSERT(expect != output && replace != output);
+ ScratchRegisterScope scratch(masm);
+ SecondScratchRegisterScope scratch2(masm);
+ masm.computeEffectiveAddress(mem, scratch);
+
+ Label tryAgain;
+ Label exit;
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&tryAgain);
+
+ if (access) {
+ masm.append(*access, wasm::TrapMachineInsn::Load64,
+ FaultingCodeOffset(masm.currentOffset()));
+ }
+
+ masm.as_ll_d(output.reg, scratch, 0);
+
+ masm.ma_b(output.reg, expect.reg, &exit, Assembler::NotEqual, ShortJump);
+ masm.movePtr(replace.reg, scratch2);
+ masm.as_sc_d(scratch2, scratch, 0);
+ masm.ma_b(scratch2, Register(scratch2), &tryAgain, Assembler::Zero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ masm.bind(&exit);
+}
+
+template <typename T>
+static void AtomicExchange(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ const T& mem, Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ ScratchRegisterScope scratch(masm);
+ SecondScratchRegisterScope scratch2(masm);
+ bool signExtend = Scalar::isSignedIntType(type);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again;
+
+ masm.computeEffectiveAddress(mem, scratch);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, wasm::TrapMachineInsn::Load32,
+ FaultingCodeOffset(masm.currentOffset()));
+ }
+
+ masm.as_ll_w(output, scratch, 0);
+ masm.as_or(scratch2, value, zero);
+ masm.as_sc_w(scratch2, scratch, 0);
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ return;
+ }
+
+ masm.as_andi(offsetTemp, scratch, 3);
+ masm.subPtr(offsetTemp, scratch);
+ masm.as_slli_w(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.as_sll_w(maskTemp, maskTemp, offsetTemp);
+ masm.as_nor(maskTemp, zero, maskTemp);
+ switch (nbytes) {
+ case 1:
+ masm.as_andi(valueTemp, value, 0xff);
+ break;
+ case 2:
+ masm.as_bstrpick_d(valueTemp, value, 15, 0);
+ break;
+ }
+ masm.as_sll_w(valueTemp, valueTemp, offsetTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, wasm::TrapMachineInsn::Load32,
+ FaultingCodeOffset(masm.currentOffset()));
+ }
+
+ masm.as_ll_w(output, scratch, 0);
+ masm.as_and(scratch2, output, maskTemp);
+ masm.as_or(scratch2, scratch2, valueTemp);
+
+ masm.as_sc_w(scratch2, scratch, 0);
+
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
+
+ masm.as_srl_w(output, output, offsetTemp);
+
+ switch (nbytes) {
+ case 1:
+ if (signExtend) {
+ masm.as_ext_w_b(output, output);
+ } else {
+ masm.as_andi(output, output, 0xff);
+ }
+ break;
+ case 2:
+ if (signExtend) {
+ masm.as_ext_w_h(output, output);
+ } else {
+ masm.as_bstrpick_d(output, output, 15, 0);
+ }
+ break;
+ }
+
+ masm.memoryBarrierAfter(sync);
+}
+
+template <typename T>
+static void AtomicExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, const T& mem,
+ Register64 value, Register64 output) {
+ MOZ_ASSERT(value != output);
+ ScratchRegisterScope scratch(masm);
+ SecondScratchRegisterScope scratch2(masm);
+ masm.computeEffectiveAddress(mem, scratch);
+
+ Label tryAgain;
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&tryAgain);
+
+ if (access) {
+ masm.append(*access, wasm::TrapMachineInsn::Load64,
+ FaultingCodeOffset(masm.currentOffset()));
+ }
+
+ masm.as_ll_d(output.reg, scratch, 0);
+
+ masm.movePtr(value.reg, scratch2);
+ masm.as_sc_d(scratch2, scratch, 0);
+ masm.ma_b(scratch2, Register(scratch2), &tryAgain, Assembler::Zero,
+ ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+template <typename T>
+static void AtomicFetchOp(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ AtomicOp op, const T& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ ScratchRegisterScope scratch(masm);
+ SecondScratchRegisterScope scratch2(masm);
+ bool signExtend = Scalar::isSignedIntType(type);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again;
+
+ masm.computeEffectiveAddress(mem, scratch);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, wasm::TrapMachineInsn::Load32,
+ FaultingCodeOffset(masm.currentOffset()));
+ }
+
+ masm.as_ll_w(output, scratch, 0);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_add_w(scratch2, output, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_sub_w(scratch2, output, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(scratch2, output, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(scratch2, output, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(scratch2, output, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ masm.as_sc_w(scratch2, scratch, 0);
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ return;
+ }
+
+ masm.as_andi(offsetTemp, scratch, 3);
+ masm.subPtr(offsetTemp, scratch);
+ masm.as_slli_w(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.as_sll_w(maskTemp, maskTemp, offsetTemp);
+ masm.as_nor(maskTemp, zero, maskTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, wasm::TrapMachineInsn::Load32,
+ FaultingCodeOffset(masm.currentOffset()));
+ }
+
+ masm.as_ll_w(scratch2, scratch, 0);
+ masm.as_srl_w(output, scratch2, offsetTemp);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_add_w(valueTemp, output, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_sub_w(valueTemp, output, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(valueTemp, output, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(valueTemp, output, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(valueTemp, output, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ switch (nbytes) {
+ case 1:
+ masm.as_andi(valueTemp, valueTemp, 0xff);
+ break;
+ case 2:
+ masm.as_bstrpick_d(valueTemp, valueTemp, 15, 0);
+ break;
+ }
+
+ masm.as_sll_w(valueTemp, valueTemp, offsetTemp);
+
+ masm.as_and(scratch2, scratch2, maskTemp);
+ masm.as_or(scratch2, scratch2, valueTemp);
+
+ masm.as_sc_w(scratch2, scratch, 0);
+
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
+
+ switch (nbytes) {
+ case 1:
+ if (signExtend) {
+ masm.as_ext_w_b(output, output);
+ } else {
+ masm.as_andi(output, output, 0xff);
+ }
+ break;
+ case 2:
+ if (signExtend) {
+ masm.as_ext_w_h(output, output);
+ } else {
+ masm.as_bstrpick_d(output, output, 15, 0);
+ }
+ break;
+ }
+
+ masm.memoryBarrierAfter(sync);
+}
+
+template <typename T>
+static void AtomicFetchOp64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ const Synchronization& sync, AtomicOp op,
+ Register64 value, const T& mem, Register64 temp,
+ Register64 output) {
+ MOZ_ASSERT(value != output);
+ MOZ_ASSERT(value != temp);
+ ScratchRegisterScope scratch(masm);
+ SecondScratchRegisterScope scratch2(masm);
+ masm.computeEffectiveAddress(mem, scratch);
+
+ Label tryAgain;
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&tryAgain);
+
+ if (access) {
+ masm.append(*access, wasm::TrapMachineInsn::Load64,
+ FaultingCodeOffset(masm.currentOffset()));
+ }
+
+ masm.as_ll_d(output.reg, scratch, 0);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_add_d(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_sub_d(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(temp.reg, output.reg, value.reg);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(temp.reg, output.reg, value.reg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ masm.as_sc_d(temp.reg, scratch, 0);
+ masm.ma_b(temp.reg, temp.reg, &tryAgain, Assembler::Zero, ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+void MacroAssembler::compareExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const Address& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, nullptr, type, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::compareExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, nullptr, type, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::compareExchange64(const Synchronization& sync,
+ const Address& mem, Register64 expect,
+ Register64 replace, Register64 output) {
+ CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
+}
+
+void MacroAssembler::compareExchange64(const Synchronization& sync,
+ const BaseIndex& mem, Register64 expect,
+ Register64 replace, Register64 output) {
+ CompareExchange64(*this, nullptr, sync, mem, expect, replace, output);
+}
+
+void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, &access, access.type(), access.sync(), mem, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmCompareExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ CompareExchange(*this, &access, access.type(), access.sync(), mem, oldval,
+ newval, valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem,
+ Register64 expect,
+ Register64 replace,
+ Register64 output) {
+ CompareExchange64(*this, &access, access.sync(), mem, expect, replace,
+ output);
+}
+
+void MacroAssembler::wasmCompareExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem,
+ Register64 expect,
+ Register64 replace,
+ Register64 output) {
+ CompareExchange64(*this, &access, access.sync(), mem, expect, replace,
+ output);
+}
+
+void MacroAssembler::atomicExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const Address& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, nullptr, type, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, output);
+}
+
+void MacroAssembler::atomicExchange(Scalar::Type type,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, nullptr, type, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, output);
+}
+
+void MacroAssembler::atomicExchange64(const Synchronization& sync,
+ const Address& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(*this, nullptr, sync, mem, value, output);
+}
+
+void MacroAssembler::atomicExchange64(const Synchronization& sync,
+ const BaseIndex& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(*this, nullptr, sync, mem, value, output);
+}
+
+void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, &access, access.type(), access.sync(), mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicExchange(*this, &access, access.type(), access.sync(), mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::atomicFetchOp(Scalar::Type type,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::atomicFetchOp(Scalar::Type type,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register output) {
+ AtomicFetchOp(*this, nullptr, type, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const Address& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicFetchOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const BaseIndex& mem,
+ Register64 temp, Register64 output) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, output);
+}
+
+void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const Address& mem,
+ Register64 temp) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
+}
+
+void MacroAssembler::atomicEffectOp64(const Synchronization& sync, AtomicOp op,
+ Register64 value, const BaseIndex& mem,
+ Register64 temp) {
+ AtomicFetchOp64(*this, nullptr, sync, op, value, mem, temp, temp);
+}
+
+void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const Address& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ AtomicFetchOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const BaseIndex& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register output) {
+ AtomicFetchOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp, output);
+}
+
+template <typename T>
+static void AtomicEffectOp(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc* access,
+ Scalar::Type type, const Synchronization& sync,
+ AtomicOp op, const T& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ ScratchRegisterScope scratch(masm);
+ SecondScratchRegisterScope scratch2(masm);
+ unsigned nbytes = Scalar::byteSize(type);
+
+ switch (nbytes) {
+ case 1:
+ case 2:
+ break;
+ case 4:
+ MOZ_ASSERT(valueTemp == InvalidReg);
+ MOZ_ASSERT(offsetTemp == InvalidReg);
+ MOZ_ASSERT(maskTemp == InvalidReg);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ Label again;
+
+ masm.computeEffectiveAddress(mem, scratch);
+
+ if (nbytes == 4) {
+ masm.memoryBarrierBefore(sync);
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, wasm::TrapMachineInsn::Load32,
+ FaultingCodeOffset(masm.currentOffset()));
+ }
+
+ masm.as_ll_w(scratch2, scratch, 0);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_add_w(scratch2, scratch2, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_sub_w(scratch2, scratch2, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(scratch2, scratch2, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(scratch2, scratch2, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(scratch2, scratch2, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ masm.as_sc_w(scratch2, scratch, 0);
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+
+ return;
+ }
+
+ masm.as_andi(offsetTemp, scratch, 3);
+ masm.subPtr(offsetTemp, scratch);
+ masm.as_slli_w(offsetTemp, offsetTemp, 3);
+ masm.ma_li(maskTemp, Imm32(UINT32_MAX >> ((4 - nbytes) * 8)));
+ masm.as_sll_w(maskTemp, maskTemp, offsetTemp);
+ masm.as_nor(maskTemp, zero, maskTemp);
+
+ masm.memoryBarrierBefore(sync);
+
+ masm.bind(&again);
+
+ if (access) {
+ masm.append(*access, wasm::TrapMachineInsn::Load32,
+ FaultingCodeOffset(masm.currentOffset()));
+ }
+
+ masm.as_ll_w(scratch2, scratch, 0);
+ masm.as_srl_w(valueTemp, scratch2, offsetTemp);
+
+ switch (op) {
+ case AtomicFetchAddOp:
+ masm.as_add_w(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchSubOp:
+ masm.as_sub_w(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchAndOp:
+ masm.as_and(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchOrOp:
+ masm.as_or(valueTemp, valueTemp, value);
+ break;
+ case AtomicFetchXorOp:
+ masm.as_xor(valueTemp, valueTemp, value);
+ break;
+ default:
+ MOZ_CRASH();
+ }
+
+ switch (nbytes) {
+ case 1:
+ masm.as_andi(valueTemp, valueTemp, 0xff);
+ break;
+ case 2:
+ masm.as_bstrpick_d(valueTemp, valueTemp, 15, 0);
+ break;
+ }
+
+ masm.as_sll_w(valueTemp, valueTemp, offsetTemp);
+
+ masm.as_and(scratch2, scratch2, maskTemp);
+ masm.as_or(scratch2, scratch2, valueTemp);
+
+ masm.as_sc_w(scratch2, scratch, 0);
+
+ masm.ma_b(scratch2, Register(scratch2), &again, Assembler::Zero, ShortJump);
+
+ masm.memoryBarrierAfter(sync);
+}
+
+void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const Address& mem, Register valueTemp,
+ Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp);
+}
+
+void MacroAssembler::wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register value,
+ const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, &access, access.type(), access.sync(), op, mem, value,
+ valueTemp, offsetTemp, maskTemp);
+}
+
+template <typename T>
+static void WasmAtomicExchange64(MacroAssembler& masm,
+ const wasm::MemoryAccessDesc& access,
+ const T& mem, Register64 value,
+ Register64 output) {
+ AtomicExchange64(masm, &access, access.sync(), mem, value, output);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const Address& mem, Register64 src,
+ Register64 output) {
+ WasmAtomicExchange64(*this, access, mem, src, output);
+}
+
+void MacroAssembler::wasmAtomicExchange64(const wasm::MemoryAccessDesc& access,
+ const BaseIndex& mem, Register64 src,
+ Register64 output) {
+ WasmAtomicExchange64(*this, access, mem, src, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const Address& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
+}
+
+void MacroAssembler::wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access,
+ AtomicOp op, Register64 value,
+ const BaseIndex& mem, Register64 temp,
+ Register64 output) {
+ AtomicFetchOp64(*this, &access, access.sync(), op, value, mem, temp, output);
+}
+
+// ========================================================================
+// JS atomic operations.
+
+template <typename T>
+static void CompareExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, const T& mem,
+ Register oldval, Register newval,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.compareExchange(arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, output.gpr());
+ }
+}
+
+template <typename T>
+static void AtomicExchangeJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, const T& mem,
+ Register value, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.atomicExchange(arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, output.gpr());
+ }
+}
+
+template <typename T>
+static void AtomicFetchOpJS(MacroAssembler& masm, Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const T& mem, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ if (arrayType == Scalar::Uint32) {
+ masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, temp);
+ masm.convertUInt32ToDouble(temp, output.fpu());
+ } else {
+ masm.atomicFetchOp(arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, output.gpr());
+ }
+}
+
+void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const Address& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, temp, output);
+}
+
+void MacroAssembler::compareExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register oldval,
+ Register newval, Register valueTemp,
+ Register offsetTemp, Register maskTemp,
+ Register temp, AnyRegister output) {
+ CompareExchangeJS(*this, arrayType, sync, mem, oldval, newval, valueTemp,
+ offsetTemp, maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const Address& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicExchangeJS(Scalar::Type arrayType,
+ const Synchronization& sync,
+ const BaseIndex& mem, Register value,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicExchangeJS(*this, arrayType, sync, mem, value, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicFetchOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp, Register temp,
+ AnyRegister output) {
+ AtomicFetchOpJS(*this, arrayType, sync, op, value, mem, valueTemp, offsetTemp,
+ maskTemp, temp, output);
+}
+
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const BaseIndex& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, nullptr, arrayType, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp);
+}
+
+void MacroAssembler::atomicEffectOpJS(Scalar::Type arrayType,
+ const Synchronization& sync, AtomicOp op,
+ Register value, const Address& mem,
+ Register valueTemp, Register offsetTemp,
+ Register maskTemp) {
+ AtomicEffectOp(*this, nullptr, arrayType, sync, op, mem, value, valueTemp,
+ offsetTemp, maskTemp);
+}
+
+void MacroAssembler::flexibleQuotient32(Register rhs, Register srcDest,
+ bool isUnsigned,
+ const LiveRegisterSet&) {
+ quotient32(rhs, srcDest, isUnsigned);
+}
+
+void MacroAssembler::flexibleRemainder32(Register rhs, Register srcDest,
+ bool isUnsigned,
+ const LiveRegisterSet&) {
+ remainder32(rhs, srcDest, isUnsigned);
+}
+
+void MacroAssembler::flexibleDivMod32(Register rhs, Register srcDest,
+ Register remOutput, bool isUnsigned,
+ const LiveRegisterSet&) {
+ if (isUnsigned) {
+ as_mod_wu(remOutput, srcDest, rhs);
+ as_div_wu(srcDest, srcDest, rhs);
+ } else {
+ as_mod_w(remOutput, srcDest, rhs);
+ as_div_w(srcDest, srcDest, rhs);
+ }
+}
+
+CodeOffset MacroAssembler::moveNearAddressWithPatch(Register dest) {
+ return movWithPatch(ImmPtr(nullptr), dest);
+}
+
+void MacroAssembler::patchNearAddressMove(CodeLocationLabel loc,
+ CodeLocationLabel target) {
+ PatchDataWithValueCheck(loc, ImmPtr(target.raw()), ImmPtr(nullptr));
+}
+
+// ========================================================================
+// Spectre Mitigations.
+
+void MacroAssembler::speculationBarrier() { MOZ_CRASH(); }
+
+void MacroAssembler::floorFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchFloat32Scope fpscratch(asMasm());
+ FloatRegister scratch = fpscratch;
+ Label skipCheck, done;
+
+ // If Nan, 0 or -0 check for bailout
+ loadConstantFloat32(0.0f, scratch);
+ ma_bc_s(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If high part is not zero, it is NaN or -0, so we bail.
+ {
+ ScratchRegisterScope scratch(asMasm());
+ moveFromDoubleLo(src, scratch);
+ branch32(Assembler::NotEqual, scratch, zero, fail);
+ }
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&done, ShortJump);
+
+ bind(&skipCheck);
+ as_ftintrm_w_s(scratch, src);
+ moveFromDoubleLo(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ bind(&done);
+}
+
+void MacroAssembler::floorDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchDoubleScope fpscratch(asMasm());
+ FloatRegister scratch = fpscratch;
+ Label skipCheck, done;
+
+ // If Nan, 0 or -0 check for bailout
+ loadConstantDouble(0.0, scratch);
+ ma_bc_d(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If high part is not zero, it is NaN or -0, so we bail.
+ {
+ ScratchRegisterScope scratch(asMasm());
+ moveFromDoubleHi(src, scratch);
+ branch32(Assembler::NotEqual, scratch, zero, fail);
+ }
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&done, ShortJump);
+
+ bind(&skipCheck);
+ as_ftintrm_w_d(scratch, src);
+ moveFromDoubleLo(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ bind(&done);
+}
+
+void MacroAssembler::ceilFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchFloat32Scope fpscratch(asMasm());
+ FloatRegister scratch = fpscratch;
+ Label performCeil, done;
+
+ // If x < -1 or x > 0 then perform ceil.
+ loadConstantFloat32(0.0f, scratch);
+ branchFloat(Assembler::DoubleGreaterThan, src, scratch, &performCeil);
+ loadConstantFloat32(-1.0f, scratch);
+ branchFloat(Assembler::DoubleLessThanOrEqual, src, scratch, &performCeil);
+
+ // If binary value is not zero, the input was not 0, so we bail.
+ {
+ ScratchRegisterScope scratch(asMasm());
+ moveFromFloat32(src, scratch);
+ branch32(Assembler::NotEqual, scratch, zero, fail);
+ }
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&done, ShortJump);
+
+ bind(&performCeil);
+ as_ftintrp_w_s(scratch, src);
+ moveFromFloat32(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ bind(&done);
+}
+
+void MacroAssembler::ceilDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchDoubleScope fpscratch(asMasm());
+ FloatRegister scratch = fpscratch;
+ Label performCeil, done;
+
+ // If x < -1 or x > 0 then perform ceil.
+ loadConstantDouble(0, scratch);
+ branchDouble(Assembler::DoubleGreaterThan, src, scratch, &performCeil);
+ loadConstantDouble(-1.0, scratch);
+ branchDouble(Assembler::DoubleLessThanOrEqual, src, scratch, &performCeil);
+
+ // If binary value is not zero, the input was not 0, so we bail.
+ {
+ ScratchRegisterScope scratch(asMasm());
+ moveFromDoubleHi(src, scratch);
+ branch32(Assembler::NotEqual, scratch, zero, fail);
+ }
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&done, ShortJump);
+
+ bind(&performCeil);
+ as_ftintrp_w_d(scratch, src);
+ moveFromDoubleLo(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ bind(&done);
+}
+
+void MacroAssembler::roundFloat32ToInt32(FloatRegister src, Register dest,
+ FloatRegister temp, Label* fail) {
+ ScratchFloat32Scope scratch(*this);
+
+ Label negative, end, skipCheck;
+
+ // Load biggest number less than 0.5 in the temp register.
+ loadConstantFloat32(GetBiggestNumberLessThan(0.5f), temp);
+
+ // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
+ loadConstantFloat32(0.0f, scratch);
+ ma_bc_s(src, scratch, &negative, Assembler::DoubleLessThan, ShortJump);
+
+ // If Nan, 0 or -0 check for bailout
+ ma_bc_s(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If binary value is not zero, it is NaN or -0, so we bail.
+ {
+ ScratchRegisterScope scratch(asMasm());
+ moveFromFloat32(src, scratch);
+ branch32(Assembler::NotEqual, scratch, zero, fail);
+ }
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&end, ShortJump);
+
+ bind(&skipCheck);
+ as_fadd_s(scratch, src, temp);
+ as_ftintrm_w_s(scratch, scratch);
+
+ moveFromFloat32(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ jump(&end);
+
+ // Input is negative, but isn't -0.
+ bind(&negative);
+
+ // Inputs in ]-0.5; 0] need to be added 0.5, other negative inputs need to
+ // be added the biggest double less than 0.5.
+ Label loadJoin;
+ loadConstantFloat32(-0.5f, scratch);
+ branchFloat(Assembler::DoubleLessThan, src, scratch, &loadJoin);
+ loadConstantFloat32(0.5f, temp);
+ bind(&loadJoin);
+
+ as_fadd_s(temp, src, temp);
+
+ // If input + 0.5 >= 0, input is a negative number >= -0.5 and the
+ // result is -0.
+ branchFloat(Assembler::DoubleGreaterThanOrEqual, temp, scratch, fail);
+
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ as_ftintrm_w_s(scratch, temp);
+ moveFromFloat32(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+
+ bind(&end);
+}
+
+void MacroAssembler::roundDoubleToInt32(FloatRegister src, Register dest,
+ FloatRegister temp, Label* fail) {
+ ScratchDoubleScope scratch(*this);
+
+ Label negative, end, skipCheck;
+
+ // Load biggest number less than 0.5 in the temp register.
+ loadConstantDouble(GetBiggestNumberLessThan(0.5), temp);
+
+ // Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
+ loadConstantDouble(0.0, scratch);
+ ma_bc_d(src, scratch, &negative, Assembler::DoubleLessThan, ShortJump);
+
+ // If Nan, 0 or -0 check for bailout
+ ma_bc_d(src, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
+
+ // If high part is not zero, it is NaN or -0, so we bail.
+ {
+ ScratchRegisterScope scratch(asMasm());
+ moveFromDoubleHi(src, scratch);
+ branch32(Assembler::NotEqual, scratch, zero, fail);
+ }
+
+ // Input was zero, so return zero.
+ move32(Imm32(0), dest);
+ ma_b(&end, ShortJump);
+
+ bind(&skipCheck);
+ as_fadd_d(scratch, src, temp);
+ as_ftintrm_w_d(scratch, scratch);
+
+ moveFromDoubleLo(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+ branch32(Assembler::Equal, dest, Imm32(INT_MAX), fail);
+
+ jump(&end);
+
+ // Input is negative, but isn't -0.
+ bind(&negative);
+
+ // Inputs in ]-0.5; 0] need to be added 0.5, other negative inputs need to
+ // be added the biggest double less than 0.5.
+ Label loadJoin;
+ loadConstantDouble(-0.5, scratch);
+ branchDouble(Assembler::DoubleLessThan, src, scratch, &loadJoin);
+ loadConstantDouble(0.5, temp);
+ bind(&loadJoin);
+
+ addDouble(src, temp);
+
+ // If input + 0.5 >= 0, input is a negative number >= -0.5 and the
+ // result is -0.
+ branchDouble(Assembler::DoubleGreaterThanOrEqual, temp, scratch, fail);
+
+ // Truncate and round toward zero.
+ // This is off-by-one for everything but integer-valued inputs.
+ as_ftintrm_w_d(scratch, temp);
+ moveFromDoubleLo(scratch, dest);
+
+ branch32(Assembler::Equal, dest, Imm32(INT_MIN), fail);
+
+ bind(&end);
+}
+
+void MacroAssembler::truncFloat32ToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchFloat32Scope fpscratch(asMasm());
+
+ Label notZero;
+ as_ftintrz_w_s(fpscratch, src);
+ as_movfcsr2gr(scratch);
+ moveFromFloat32(fpscratch, dest);
+ as_bstrpick_w(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
+ ma_b(dest, zero, &notZero, Assembler::NotEqual, ShortJump);
+
+ {
+ // dest == zero
+ SecondScratchRegisterScope scratch2(asMasm());
+ moveFromFloat32(src, scratch2);
+ // Check if input is in ]-1; -0] range by checking the sign bit.
+ as_slt(scratch2, scratch2, zero);
+ as_add_d(scratch, scratch, scratch2);
+ }
+
+ bind(&notZero);
+ branch32(Assembler::NotEqual, Register(scratch), zero, fail);
+}
+
+void MacroAssembler::truncDoubleToInt32(FloatRegister src, Register dest,
+ Label* fail) {
+ ScratchRegisterScope scratch(asMasm());
+ ScratchFloat32Scope fpscratch(asMasm());
+
+ Label notZero;
+ as_ftintrz_w_d(fpscratch, src);
+ as_movfcsr2gr(scratch);
+ moveFromFloat32(fpscratch, dest);
+ as_bstrpick_w(scratch, scratch, Assembler::CauseV, Assembler::CauseV);
+ ma_b(dest, zero, &notZero, Assembler::NotEqual, ShortJump);
+
+ {
+ // dest == zero
+ SecondScratchRegisterScope scratch2(asMasm());
+ moveFromDoubleHi(src, scratch2);
+ // Check if input is in ]-1; -0] range by checking the sign bit.
+ as_slt(scratch2, scratch2, zero);
+ as_add_d(scratch, scratch, scratch2);
+ }
+
+ bind(&notZero);
+ branch32(Assembler::NotEqual, Register(scratch), zero, fail);
+}
+
+void MacroAssembler::nearbyIntDouble(RoundingMode mode, FloatRegister src,
+ FloatRegister dest) {
+ MOZ_CRASH("not supported on this platform");
+}
+
+void MacroAssembler::nearbyIntFloat32(RoundingMode mode, FloatRegister src,
+ FloatRegister dest) {
+ MOZ_CRASH("not supported on this platform");
+}
+
+void MacroAssembler::copySignDouble(FloatRegister lhs, FloatRegister rhs,
+ FloatRegister output) {
+ MOZ_CRASH("not supported on this platform");
+}
+
+void MacroAssemblerLOONG64Compat::move32(Imm32 imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerLOONG64Compat::move32(Register src, Register dest) {
+ as_slli_w(dest, src, 0);
+}
+
+void MacroAssemblerLOONG64Compat::movePtr(Register src, Register dest) {
+ as_or(dest, src, zero);
+}
+void MacroAssemblerLOONG64Compat::movePtr(ImmWord imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerLOONG64Compat::movePtr(ImmGCPtr imm, Register dest) {
+ ma_li(dest, imm);
+}
+
+void MacroAssemblerLOONG64Compat::movePtr(ImmPtr imm, Register dest) {
+ movePtr(ImmWord(uintptr_t(imm.value)), dest);
+}
+
+void MacroAssemblerLOONG64Compat::movePtr(wasm::SymbolicAddress imm,
+ Register dest) {
+ append(wasm::SymbolicAccess(CodeOffset(nextOffset().getOffset()), imm));
+ ma_liPatchable(dest, ImmWord(-1));
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64Compat::load8ZeroExtend(
+ const Address& address, Register dest) {
+ return ma_load(dest, address, SizeByte, ZeroExtend);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64Compat::load8ZeroExtend(
+ const BaseIndex& src, Register dest) {
+ return ma_load(dest, src, SizeByte, ZeroExtend);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64Compat::load8SignExtend(
+ const Address& address, Register dest) {
+ return ma_load(dest, address, SizeByte, SignExtend);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64Compat::load8SignExtend(
+ const BaseIndex& src, Register dest) {
+ return ma_load(dest, src, SizeByte, SignExtend);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64Compat::load16ZeroExtend(
+ const Address& address, Register dest) {
+ return ma_load(dest, address, SizeHalfWord, ZeroExtend);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64Compat::load16ZeroExtend(
+ const BaseIndex& src, Register dest) {
+ return ma_load(dest, src, SizeHalfWord, ZeroExtend);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64Compat::load16SignExtend(
+ const Address& address, Register dest) {
+ return ma_load(dest, address, SizeHalfWord, SignExtend);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64Compat::load16SignExtend(
+ const BaseIndex& src, Register dest) {
+ return ma_load(dest, src, SizeHalfWord, SignExtend);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64Compat::load32(const Address& address,
+ Register dest) {
+ return ma_ld_w(dest, address);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64Compat::load32(const BaseIndex& address,
+ Register dest) {
+ Register base = address.base;
+ Register index = address.index;
+ int32_t offset = address.offset;
+ uint32_t shift = Imm32::ShiftOf(address.scale).value;
+ js::wasm::FaultingCodeOffset fco;
+
+ if (offset != 0) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, Imm32(offset));
+ if (shift != 0) {
+ MOZ_ASSERT(shift <= 4);
+ as_alsl_d(scratch, index, scratch, shift - 1);
+ } else {
+ as_add_d(scratch, index, scratch);
+ }
+ fco = js::wasm::FaultingCodeOffset(currentOffset());
+ as_ldx_w(dest, base, scratch);
+ } else if (shift != 0) {
+ ScratchRegisterScope scratch(asMasm());
+ as_slli_d(scratch, index, shift);
+ fco = js::wasm::FaultingCodeOffset(currentOffset());
+ as_ldx_w(dest, base, scratch);
+ } else {
+ fco = js::wasm::FaultingCodeOffset(currentOffset());
+ as_ldx_w(dest, base, index);
+ }
+ return fco;
+}
+
+void MacroAssemblerLOONG64Compat::load32(AbsoluteAddress address,
+ Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ movePtr(ImmPtr(address.addr), scratch);
+ load32(Address(scratch, 0), dest);
+}
+
+void MacroAssemblerLOONG64Compat::load32(wasm::SymbolicAddress address,
+ Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ movePtr(address, scratch);
+ load32(Address(scratch, 0), dest);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64Compat::loadPtr(const Address& address,
+ Register dest) {
+ return ma_ld_d(dest, address);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64Compat::loadPtr(const BaseIndex& src,
+ Register dest) {
+ Register base = src.base;
+ Register index = src.index;
+ int32_t offset = src.offset;
+ uint32_t shift = Imm32::ShiftOf(src.scale).value;
+ js::wasm::FaultingCodeOffset fco;
+
+ if (offset != 0) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, Imm32(offset));
+ if (shift != 0) {
+ MOZ_ASSERT(shift <= 4);
+ as_alsl_d(scratch, index, scratch, shift - 1);
+ } else {
+ as_add_d(scratch, index, scratch);
+ }
+ fco = js::wasm::FaultingCodeOffset(currentOffset());
+ as_ldx_d(dest, base, scratch);
+ } else if (shift != 0) {
+ ScratchRegisterScope scratch(asMasm());
+ as_slli_d(scratch, index, shift);
+ fco = js::wasm::FaultingCodeOffset(currentOffset());
+ as_ldx_d(dest, base, scratch);
+ } else {
+ fco = js::wasm::FaultingCodeOffset(currentOffset());
+ as_ldx_d(dest, base, index);
+ }
+ return fco;
+}
+
+void MacroAssemblerLOONG64Compat::loadPtr(AbsoluteAddress address,
+ Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ movePtr(ImmPtr(address.addr), scratch);
+ loadPtr(Address(scratch, 0), dest);
+}
+
+void MacroAssemblerLOONG64Compat::loadPtr(wasm::SymbolicAddress address,
+ Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ movePtr(address, scratch);
+ loadPtr(Address(scratch, 0), dest);
+}
+
+void MacroAssemblerLOONG64Compat::loadPrivate(const Address& address,
+ Register dest) {
+ loadPtr(address, dest);
+}
+
+void MacroAssemblerLOONG64Compat::store8(Imm32 imm, const Address& address) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_li(scratch2, imm);
+ ma_store(scratch2, address, SizeByte);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64Compat::store8(Register src,
+ const Address& address) {
+ return ma_store(src, address, SizeByte);
+}
+
+void MacroAssemblerLOONG64Compat::store8(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeByte);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64Compat::store8(Register src,
+ const BaseIndex& dest) {
+ return ma_store(src, dest, SizeByte);
+}
+
+void MacroAssemblerLOONG64Compat::store16(Imm32 imm, const Address& address) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_li(scratch2, imm);
+ ma_store(scratch2, address, SizeHalfWord);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64Compat::store16(
+ Register src, const Address& address) {
+ return ma_store(src, address, SizeHalfWord);
+}
+
+void MacroAssemblerLOONG64Compat::store16(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeHalfWord);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64Compat::store16(
+ Register src, const BaseIndex& address) {
+ return ma_store(src, address, SizeHalfWord);
+}
+
+void MacroAssemblerLOONG64Compat::store32(Register src,
+ AbsoluteAddress address) {
+ ScratchRegisterScope scratch(asMasm());
+ movePtr(ImmPtr(address.addr), scratch);
+ store32(src, Address(scratch, 0));
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64Compat::store32(
+ Register src, const Address& address) {
+ return ma_store(src, address, SizeWord);
+}
+
+void MacroAssemblerLOONG64Compat::store32(Imm32 src, const Address& address) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ move32(src, scratch2);
+ ma_store(scratch2, address, SizeWord);
+}
+
+void MacroAssemblerLOONG64Compat::store32(Imm32 imm, const BaseIndex& dest) {
+ ma_store(imm, dest, SizeWord);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64Compat::store32(Register src,
+ const BaseIndex& dest) {
+ return ma_store(src, dest, SizeWord);
+}
+
+template <typename T>
+void MacroAssemblerLOONG64Compat::storePtr(ImmWord imm, T address) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ ma_li(scratch2, imm);
+ ma_store(scratch2, address, SizeDouble);
+}
+
+template void MacroAssemblerLOONG64Compat::storePtr<Address>(ImmWord imm,
+ Address address);
+template void MacroAssemblerLOONG64Compat::storePtr<BaseIndex>(
+ ImmWord imm, BaseIndex address);
+
+template <typename T>
+void MacroAssemblerLOONG64Compat::storePtr(ImmPtr imm, T address) {
+ storePtr(ImmWord(uintptr_t(imm.value)), address);
+}
+
+template void MacroAssemblerLOONG64Compat::storePtr<Address>(ImmPtr imm,
+ Address address);
+template void MacroAssemblerLOONG64Compat::storePtr<BaseIndex>(
+ ImmPtr imm, BaseIndex address);
+
+template <typename T>
+void MacroAssemblerLOONG64Compat::storePtr(ImmGCPtr imm, T address) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ movePtr(imm, scratch2);
+ storePtr(scratch2, address);
+}
+
+template void MacroAssemblerLOONG64Compat::storePtr<Address>(ImmGCPtr imm,
+ Address address);
+template void MacroAssemblerLOONG64Compat::storePtr<BaseIndex>(
+ ImmGCPtr imm, BaseIndex address);
+
+FaultingCodeOffset MacroAssemblerLOONG64Compat::storePtr(
+ Register src, const Address& address) {
+ return ma_st_d(src, address);
+}
+
+FaultingCodeOffset MacroAssemblerLOONG64Compat::storePtr(
+ Register src, const BaseIndex& address) {
+ Register base = address.base;
+ Register index = address.index;
+ int32_t offset = address.offset;
+ int32_t shift = Imm32::ShiftOf(address.scale).value;
+ FaultingCodeOffset fco;
+
+ if ((offset == 0) && (shift == 0)) {
+ fco = FaultingCodeOffset(currentOffset());
+ as_stx_d(src, base, index);
+ } else if (is_intN(offset, 12)) {
+ ScratchRegisterScope scratch(asMasm());
+ if (shift == 0) {
+ as_add_d(scratch, base, index);
+ } else {
+ as_alsl_d(scratch, index, base, shift - 1);
+ }
+ fco = FaultingCodeOffset(currentOffset());
+ as_st_d(src, scratch, offset);
+ } else {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, Imm32(offset));
+ if (shift == 0) {
+ as_add_d(scratch, scratch, index);
+ } else {
+ as_alsl_d(scratch, index, scratch, shift - 1);
+ }
+ fco = FaultingCodeOffset(currentOffset());
+ as_stx_d(src, base, scratch);
+ }
+ return fco;
+}
+
+void MacroAssemblerLOONG64Compat::storePtr(Register src, AbsoluteAddress dest) {
+ ScratchRegisterScope scratch(asMasm());
+ movePtr(ImmPtr(dest.addr), scratch);
+ storePtr(src, Address(scratch, 0));
+}
+
+void MacroAssemblerLOONG64Compat::testNullSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(asMasm());
+ splitTag(value, scratch2);
+ ma_cmp_set(dest, scratch2, ImmTag(JSVAL_TAG_NULL), cond);
+}
+
+void MacroAssemblerLOONG64Compat::testObjectSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(asMasm());
+ splitTag(value, scratch2);
+ ma_cmp_set(dest, scratch2, ImmTag(JSVAL_TAG_OBJECT), cond);
+}
+
+void MacroAssemblerLOONG64Compat::testUndefinedSet(Condition cond,
+ const ValueOperand& value,
+ Register dest) {
+ MOZ_ASSERT(cond == Equal || cond == NotEqual);
+ SecondScratchRegisterScope scratch2(asMasm());
+ splitTag(value, scratch2);
+ ma_cmp_set(dest, scratch2, ImmTag(JSVAL_TAG_UNDEFINED), cond);
+}
+
+void MacroAssemblerLOONG64Compat::unboxInt32(const ValueOperand& operand,
+ Register dest) {
+ as_slli_w(dest, operand.valueReg(), 0);
+}
+
+void MacroAssemblerLOONG64Compat::unboxInt32(Register src, Register dest) {
+ as_slli_w(dest, src, 0);
+}
+
+void MacroAssemblerLOONG64Compat::unboxInt32(const Address& src,
+ Register dest) {
+ load32(Address(src.base, src.offset), dest);
+}
+
+void MacroAssemblerLOONG64Compat::unboxInt32(const BaseIndex& src,
+ Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ computeScaledAddress(src, scratch2);
+ load32(Address(scratch2, src.offset), dest);
+}
+
+void MacroAssemblerLOONG64Compat::unboxBoolean(const ValueOperand& operand,
+ Register dest) {
+ as_slli_w(dest, operand.valueReg(), 0);
+}
+
+void MacroAssemblerLOONG64Compat::unboxBoolean(Register src, Register dest) {
+ as_slli_w(dest, src, 0);
+}
+
+void MacroAssemblerLOONG64Compat::unboxBoolean(const Address& src,
+ Register dest) {
+ ma_ld_w(dest, src);
+}
+
+void MacroAssemblerLOONG64Compat::unboxBoolean(const BaseIndex& src,
+ Register dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ computeScaledAddress(src, scratch2);
+ ma_ld_w(dest, Address(scratch2, src.offset));
+}
+
+void MacroAssemblerLOONG64Compat::unboxDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ as_movgr2fr_d(dest, operand.valueReg());
+}
+
+void MacroAssemblerLOONG64Compat::unboxDouble(const Address& src,
+ FloatRegister dest) {
+ ma_fld_d(dest, Address(src.base, src.offset));
+}
+
+void MacroAssemblerLOONG64Compat::unboxDouble(const BaseIndex& src,
+ FloatRegister dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(src, scratch2);
+ unboxDouble(ValueOperand(scratch2), dest);
+}
+
+void MacroAssemblerLOONG64Compat::unboxString(const ValueOperand& operand,
+ Register dest) {
+ unboxNonDouble(operand, dest, JSVAL_TYPE_STRING);
+}
+
+void MacroAssemblerLOONG64Compat::unboxString(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+}
+
+void MacroAssemblerLOONG64Compat::unboxString(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_STRING);
+}
+
+void MacroAssemblerLOONG64Compat::unboxSymbol(const ValueOperand& operand,
+ Register dest) {
+ unboxNonDouble(operand, dest, JSVAL_TYPE_SYMBOL);
+}
+
+void MacroAssemblerLOONG64Compat::unboxSymbol(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
+}
+
+void MacroAssemblerLOONG64Compat::unboxSymbol(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_SYMBOL);
+}
+
+void MacroAssemblerLOONG64Compat::unboxBigInt(const ValueOperand& operand,
+ Register dest) {
+ unboxNonDouble(operand, dest, JSVAL_TYPE_BIGINT);
+}
+
+void MacroAssemblerLOONG64Compat::unboxBigInt(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+}
+
+void MacroAssemblerLOONG64Compat::unboxBigInt(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_BIGINT);
+}
+
+void MacroAssemblerLOONG64Compat::unboxObject(const ValueOperand& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+}
+
+void MacroAssemblerLOONG64Compat::unboxObject(Register src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+}
+
+void MacroAssemblerLOONG64Compat::unboxObject(const Address& src,
+ Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+}
+
+void MacroAssemblerLOONG64Compat::unboxValue(const ValueOperand& src,
+ AnyRegister dest,
+ JSValueType type) {
+ if (dest.isFloat()) {
+ Label notInt32, end;
+ asMasm().branchTestInt32(Assembler::NotEqual, src, &notInt32);
+ convertInt32ToDouble(src.valueReg(), dest.fpu());
+ ma_b(&end, ShortJump);
+ bind(&notInt32);
+ unboxDouble(src, dest.fpu());
+ bind(&end);
+ } else {
+ unboxNonDouble(src, dest.gpr(), type);
+ }
+}
+
+void MacroAssemblerLOONG64Compat::boxDouble(FloatRegister src,
+ const ValueOperand& dest,
+ FloatRegister) {
+ as_movfr2gr_d(dest.valueReg(), src);
+}
+
+void MacroAssemblerLOONG64Compat::boxNonDouble(JSValueType type, Register src,
+ const ValueOperand& dest) {
+ boxValue(type, src, dest.valueReg());
+}
+
+void MacroAssemblerLOONG64Compat::boolValueToDouble(const ValueOperand& operand,
+ FloatRegister dest) {
+ ScratchRegisterScope scratch(asMasm());
+ convertBoolToInt32(operand.valueReg(), scratch);
+ convertInt32ToDouble(scratch, dest);
+}
+
+void MacroAssemblerLOONG64Compat::int32ValueToDouble(
+ const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToDouble(operand.valueReg(), dest);
+}
+
+void MacroAssemblerLOONG64Compat::boolValueToFloat32(
+ const ValueOperand& operand, FloatRegister dest) {
+ ScratchRegisterScope scratch(asMasm());
+ convertBoolToInt32(operand.valueReg(), scratch);
+ convertInt32ToFloat32(scratch, dest);
+}
+
+void MacroAssemblerLOONG64Compat::int32ValueToFloat32(
+ const ValueOperand& operand, FloatRegister dest) {
+ convertInt32ToFloat32(operand.valueReg(), dest);
+}
+
+void MacroAssemblerLOONG64Compat::loadConstantFloat32(float f,
+ FloatRegister dest) {
+ ma_lis(dest, f);
+}
+
+void MacroAssemblerLOONG64Compat::loadInt32OrDouble(const Address& src,
+ FloatRegister dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ Label end;
+
+ // If it's an int, convert it to double.
+ loadPtr(Address(src.base, src.offset), scratch2);
+ as_movgr2fr_d(dest, scratch2);
+ as_srli_d(scratch2, scratch2, JSVAL_TAG_SHIFT);
+ asMasm().branchTestInt32(Assembler::NotEqual, scratch2, &end);
+ as_ffint_d_w(dest, dest);
+
+ bind(&end);
+}
+
+void MacroAssemblerLOONG64Compat::loadInt32OrDouble(const BaseIndex& addr,
+ FloatRegister dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ Label end;
+
+ // If it's an int, convert it to double.
+ computeScaledAddress(addr, scratch2);
+ // Since we only have one scratch, we need to stomp over it with the tag.
+ loadPtr(Address(scratch2, 0), scratch2);
+ as_movgr2fr_d(dest, scratch2);
+ as_srli_d(scratch2, scratch2, JSVAL_TAG_SHIFT);
+ asMasm().branchTestInt32(Assembler::NotEqual, scratch2, &end);
+ as_ffint_d_w(dest, dest);
+
+ bind(&end);
+}
+
+void MacroAssemblerLOONG64Compat::loadConstantDouble(double dp,
+ FloatRegister dest) {
+ ma_lid(dest, dp);
+}
+
+Register MacroAssemblerLOONG64Compat::extractObject(const Address& address,
+ Register scratch) {
+ loadPtr(Address(address.base, address.offset), scratch);
+ as_bstrpick_d(scratch, scratch, JSVAL_TAG_SHIFT - 1, 0);
+ return scratch;
+}
+
+Register MacroAssemblerLOONG64Compat::extractTag(const Address& address,
+ Register scratch) {
+ loadPtr(Address(address.base, address.offset), scratch);
+ as_bstrpick_d(scratch, scratch, 63, JSVAL_TAG_SHIFT);
+ return scratch;
+}
+
+Register MacroAssemblerLOONG64Compat::extractTag(const BaseIndex& address,
+ Register scratch) {
+ computeScaledAddress(address, scratch);
+ return extractTag(Address(scratch, address.offset), scratch);
+}
+
+/////////////////////////////////////////////////////////////////
+// X86/X64-common/ARM/LoongArch interface.
+/////////////////////////////////////////////////////////////////
+void MacroAssemblerLOONG64Compat::storeValue(ValueOperand val,
+ const Address& dest) {
+ storePtr(val.valueReg(), Address(dest.base, dest.offset));
+}
+
+void MacroAssemblerLOONG64Compat::storeValue(ValueOperand val,
+ const BaseIndex& dest) {
+ storePtr(val.valueReg(), dest);
+}
+
+void MacroAssemblerLOONG64Compat::storeValue(JSValueType type, Register reg,
+ Address dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(dest.base != scratch2);
+
+ tagValue(type, reg, ValueOperand(scratch2));
+ storePtr(scratch2, dest);
+}
+
+void MacroAssemblerLOONG64Compat::storeValue(JSValueType type, Register reg,
+ BaseIndex dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(dest.base != scratch2);
+
+ tagValue(type, reg, ValueOperand(scratch2));
+ storePtr(scratch2, dest);
+}
+
+void MacroAssemblerLOONG64Compat::storeValue(const Value& val, Address dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(dest.base != scratch2);
+
+ if (val.isGCThing()) {
+ writeDataRelocation(val);
+ movWithPatch(ImmWord(val.asRawBits()), scratch2);
+ } else {
+ ma_li(scratch2, ImmWord(val.asRawBits()));
+ }
+ storePtr(scratch2, dest);
+}
+
+void MacroAssemblerLOONG64Compat::storeValue(const Value& val, BaseIndex dest) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ MOZ_ASSERT(dest.base != scratch2);
+
+ if (val.isGCThing()) {
+ writeDataRelocation(val);
+ movWithPatch(ImmWord(val.asRawBits()), scratch2);
+ } else {
+ ma_li(scratch2, ImmWord(val.asRawBits()));
+ }
+ storePtr(scratch2, dest);
+}
+
+void MacroAssemblerLOONG64Compat::loadValue(Address src, ValueOperand val) {
+ loadPtr(src, val.valueReg());
+}
+
+void MacroAssemblerLOONG64Compat::loadValue(const BaseIndex& src,
+ ValueOperand val) {
+ loadPtr(src, val.valueReg());
+}
+
+void MacroAssemblerLOONG64Compat::tagValue(JSValueType type, Register payload,
+ ValueOperand dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dest.valueReg() != scratch);
+
+ if (payload == dest.valueReg()) {
+ as_or(scratch, payload, zero);
+ payload = scratch;
+ }
+ ma_li(dest.valueReg(), ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)));
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ as_bstrins_d(dest.valueReg(), payload, 31, 0);
+ } else {
+ as_bstrins_d(dest.valueReg(), payload, JSVAL_TAG_SHIFT - 1, 0);
+ }
+}
+
+void MacroAssemblerLOONG64Compat::pushValue(ValueOperand val) {
+ push(val.valueReg());
+}
+
+void MacroAssemblerLOONG64Compat::pushValue(const Address& addr) { push(addr); }
+
+void MacroAssemblerLOONG64Compat::popValue(ValueOperand val) {
+ pop(val.valueReg());
+}
+
+void MacroAssemblerLOONG64Compat::breakpoint(uint32_t value) {
+ as_break(value);
+}
+
+void MacroAssemblerLOONG64Compat::handleFailureWithHandlerTail(
+ Label* profilerExitTail, Label* bailoutTail) {
+ // Reserve space for exception information.
+ int size = (sizeof(ResumeFromException) + ABIStackAlignment) &
+ ~(ABIStackAlignment - 1);
+ asMasm().subPtr(Imm32(size), StackPointer);
+ mov(StackPointer, a0); // Use a0 since it is a first function argument
+
+ // Call the handler.
+ using Fn = void (*)(ResumeFromException* rfe);
+ asMasm().setupUnalignedABICall(a1);
+ asMasm().passABIArg(a0);
+ asMasm().callWithABI<Fn, HandleException>(
+ ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ Label entryFrame;
+ Label catch_;
+ Label finally;
+ Label returnBaseline;
+ Label returnIon;
+ Label bailout;
+ Label wasm;
+ Label wasmCatch;
+
+ // Already clobbered a0, so use it...
+ load32(Address(StackPointer, ResumeFromException::offsetOfKind()), a0);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ExceptionResumeKind::EntryFrame), &entryFrame);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Catch),
+ &catch_);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Finally),
+ &finally);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ExceptionResumeKind::ForcedReturnBaseline),
+ &returnBaseline);
+ asMasm().branch32(Assembler::Equal, a0,
+ Imm32(ExceptionResumeKind::ForcedReturnIon), &returnIon);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Bailout),
+ &bailout);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::Wasm),
+ &wasm);
+ asMasm().branch32(Assembler::Equal, a0, Imm32(ExceptionResumeKind::WasmCatch),
+ &wasmCatch);
+
+ breakpoint(); // Invalid kind.
+
+ // No exception handler. Load the error value, restore state and return from
+ // the entry frame.
+ bind(&entryFrame);
+ asMasm().moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+
+ // We're going to be returning by the ion calling convention
+ ma_pop(ra);
+ as_jirl(zero, ra, BOffImm16(0));
+
+ // If we found a catch handler, this must be a baseline frame. Restore
+ // state and jump to the catch block.
+ bind(&catch_);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfTarget()), a0);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ jump(a0);
+
+ // If we found a finally block, this must be a baseline frame. Push three
+ // values expected by the finally block: the exception, the exception stack,
+ // and BooleanValue(true).
+ bind(&finally);
+ ValueOperand exception = ValueOperand(a1);
+ loadValue(Address(sp, ResumeFromException::offsetOfException()), exception);
+
+ ValueOperand exceptionStack = ValueOperand(a2);
+ loadValue(Address(sp, ResumeFromException::offsetOfExceptionStack()),
+ exceptionStack);
+
+ loadPtr(Address(sp, ResumeFromException::offsetOfTarget()), a0);
+ loadPtr(Address(sp, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(sp, ResumeFromException::offsetOfStackPointer()), sp);
+
+ pushValue(exception);
+ pushValue(exceptionStack);
+ pushValue(BooleanValue(true));
+ jump(a0);
+
+ // Return BaselineFrame->returnValue() to the caller.
+ // Used in debug mode and for GeneratorReturn.
+ Label profilingInstrumentation;
+ bind(&returnBaseline);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ loadValue(Address(FramePointer, BaselineFrame::reverseOffsetOfReturnValue()),
+ JSReturnOperand);
+ jump(&profilingInstrumentation);
+
+ // Return the given value to the caller.
+ bind(&returnIon);
+ loadValue(Address(StackPointer, ResumeFromException::offsetOfException()),
+ JSReturnOperand);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+
+ // If profiling is enabled, then update the lastProfilingFrame to refer to
+ // caller frame before returning. This code is shared by ForcedReturnIon
+ // and ForcedReturnBaseline.
+ bind(&profilingInstrumentation);
+ {
+ Label skipProfilingInstrumentation;
+ // Test if profiler enabled.
+ AbsoluteAddress addressOfEnabled(
+ asMasm().runtime()->geckoProfiler().addressOfEnabled());
+ asMasm().branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ jump(profilerExitTail);
+ bind(&skipProfilingInstrumentation);
+ }
+
+ as_or(StackPointer, FramePointer, zero);
+ pop(FramePointer);
+ ret();
+
+ // If we are bailing out to baseline to handle an exception, jump to
+ // the bailout tail stub. Load 1 (true) in ReturnReg to indicate success.
+ bind(&bailout);
+ loadPtr(Address(sp, ResumeFromException::offsetOfBailoutInfo()), a2);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ ma_li(ReturnReg, Imm32(1));
+ jump(bailoutTail);
+
+ // If we are throwing and the innermost frame was a wasm frame, reset SP and
+ // FP; SP is pointing to the unwound return address to the wasm entry, so
+ // we can just ret().
+ bind(&wasm);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ ma_li(InstanceReg, ImmWord(wasm::FailInstanceReg));
+ ret();
+
+ // Found a wasm catch handler, restore state and jump to it.
+ bind(&wasmCatch);
+ loadPtr(Address(sp, ResumeFromException::offsetOfTarget()), a1);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfFramePointer()),
+ FramePointer);
+ loadPtr(Address(StackPointer, ResumeFromException::offsetOfStackPointer()),
+ StackPointer);
+ jump(a1);
+}
+
+CodeOffset MacroAssemblerLOONG64Compat::toggledJump(Label* label) {
+ CodeOffset ret(nextOffset().getOffset());
+ ma_b(label);
+ return ret;
+}
+
+CodeOffset MacroAssemblerLOONG64Compat::toggledCall(JitCode* target,
+ bool enabled) {
+ ScratchRegisterScope scratch(asMasm());
+ BufferOffset bo = nextOffset();
+ CodeOffset offset(bo.getOffset()); // first instruction location,not changed.
+ addPendingJump(bo, ImmPtr(target->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(scratch, ImmPtr(target->raw()));
+ if (enabled) {
+ as_jirl(ra, scratch, BOffImm16(0));
+ } else {
+ as_nop();
+ }
+ MOZ_ASSERT_IF(!oom(), nextOffset().getOffset() - offset.offset() ==
+ ToggledCallSize(nullptr));
+ return offset; // location of first instruction of call instr sequence.
+}
+
+void MacroAssembler::shiftIndex32AndAdd(Register indexTemp32, int shift,
+ Register pointer) {
+ if (IsShiftInScaleRange(shift)) {
+ computeEffectiveAddress(
+ BaseIndex(pointer, indexTemp32, ShiftToScale(shift)), pointer);
+ return;
+ }
+ lshift32(Imm32(shift), indexTemp32);
+ addPtr(indexTemp32, pointer);
+}
+
+#ifdef ENABLE_WASM_TAIL_CALLS
+void MacroAssembler::wasmMarkSlowCall() { mov(ra, ra); }
+
+const int32_t SlowCallMarker = 0x03800021; // ori ra, ra, 0
+
+void MacroAssembler::wasmCheckSlowCallsite(Register ra_, Label* notSlow,
+ Register temp1, Register temp2) {
+ MOZ_ASSERT(ra_ != temp2);
+ load32(Address(ra_, 0), temp2);
+ branch32(Assembler::NotEqual, temp2, Imm32(SlowCallMarker), notSlow);
+}
+#endif // ENABLE_WASM_TAIL_CALLS
+
+//}}} check_macroassembler_style
+
+} // namespace jit
+} // namespace js
diff --git a/js/src/jit/loong64/MacroAssembler-loong64.h b/js/src/jit/loong64/MacroAssembler-loong64.h
new file mode 100644
index 0000000000..908c77e037
--- /dev/null
+++ b/js/src/jit/loong64/MacroAssembler-loong64.h
@@ -0,0 +1,1058 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_MacroAssembler_loong64_h
+#define jit_loong64_MacroAssembler_loong64_h
+
+#include "jit/loong64/Assembler-loong64.h"
+#include "jit/MoveResolver.h"
+#include "wasm/WasmBuiltins.h"
+
+namespace js {
+namespace jit {
+
+enum LoadStoreSize {
+ SizeByte = 8,
+ SizeHalfWord = 16,
+ SizeWord = 32,
+ SizeDouble = 64
+};
+
+enum LoadStoreExtension { ZeroExtend = 0, SignExtend = 1 };
+
+enum JumpKind { LongJump = 0, ShortJump = 1 };
+
+static Register CallReg = t8;
+
+enum LiFlags {
+ Li64 = 0,
+ Li48 = 1,
+};
+
+struct ImmShiftedTag : public ImmWord {
+ explicit ImmShiftedTag(JSValueShiftedTag shtag) : ImmWord((uintptr_t)shtag) {}
+
+ explicit ImmShiftedTag(JSValueType type)
+ : ImmWord(uintptr_t(JSValueShiftedTag(JSVAL_TYPE_TO_SHIFTED_TAG(type)))) {
+ }
+};
+
+struct ImmTag : public Imm32 {
+ ImmTag(JSValueTag mask) : Imm32(int32_t(mask)) {}
+};
+
+static const int defaultShift = 3;
+static_assert(1 << defaultShift == sizeof(JS::Value),
+ "The defaultShift is wrong");
+
+// See documentation for ScratchTagScope and ScratchTagScopeRelease in
+// MacroAssembler-x64.h.
+
+class ScratchTagScope : public SecondScratchRegisterScope {
+ public:
+ ScratchTagScope(MacroAssembler& masm, const ValueOperand&)
+ : SecondScratchRegisterScope(masm) {}
+};
+
+class ScratchTagScopeRelease {
+ ScratchTagScope* ts_;
+
+ public:
+ explicit ScratchTagScopeRelease(ScratchTagScope* ts) : ts_(ts) {
+ ts_->release();
+ }
+ ~ScratchTagScopeRelease() { ts_->reacquire(); }
+};
+
+class MacroAssemblerLOONG64 : public Assembler {
+ protected:
+ // Perform a downcast. Should be removed by Bug 996602.
+ MacroAssembler& asMasm();
+ const MacroAssembler& asMasm() const;
+
+ Condition ma_cmp(Register rd, Register lhs, Register rhs, Condition c);
+ Condition ma_cmp(Register rd, Register lhs, Imm32 imm, Condition c);
+
+ void compareFloatingPoint(FloatFormat fmt, FloatRegister lhs,
+ FloatRegister rhs, DoubleCondition c,
+ FPConditionBit fcc = FCC0);
+
+ public:
+ void ma_li(Register dest, CodeLabel* label);
+ void ma_li(Register dest, ImmWord imm);
+ void ma_liPatchable(Register dest, ImmPtr imm);
+ void ma_liPatchable(Register dest, ImmWord imm, LiFlags flags = Li48);
+
+ // load
+ FaultingCodeOffset ma_ld_b(Register dest, Address address);
+ FaultingCodeOffset ma_ld_h(Register dest, Address address);
+ FaultingCodeOffset ma_ld_w(Register dest, Address address);
+ FaultingCodeOffset ma_ld_d(Register dest, Address address);
+ FaultingCodeOffset ma_ld_bu(Register dest, Address address);
+ FaultingCodeOffset ma_ld_hu(Register dest, Address address);
+ FaultingCodeOffset ma_ld_wu(Register dest, Address address);
+ FaultingCodeOffset ma_load(Register dest, Address address,
+ LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // store
+ FaultingCodeOffset ma_st_b(Register src, Address address);
+ FaultingCodeOffset ma_st_h(Register src, Address address);
+ FaultingCodeOffset ma_st_w(Register src, Address address);
+ FaultingCodeOffset ma_st_d(Register src, Address address);
+ FaultingCodeOffset ma_store(Register data, Address address,
+ LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // arithmetic based ops
+ // add
+ void ma_add_d(Register rd, Register rj, Imm32 imm);
+ void ma_add32TestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_add32TestOverflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+ void ma_addPtrTestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_addPtrTestOverflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+ void ma_addPtrTestOverflow(Register rd, Register rj, ImmWord imm,
+ Label* overflow);
+ void ma_addPtrTestCarry(Condition cond, Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_addPtrTestCarry(Condition cond, Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+ void ma_addPtrTestCarry(Condition cond, Register rd, Register rj, ImmWord imm,
+ Label* overflow);
+
+ // subtract
+ void ma_sub_d(Register rd, Register rj, Imm32 imm);
+ void ma_sub32TestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_subPtrTestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_subPtrTestOverflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+
+ // multiplies. For now, there are only few that we care about.
+ void ma_mul_d(Register rd, Register rj, Imm32 imm);
+ void ma_mulh_d(Register rd, Register rj, Imm32 imm);
+ void ma_mulPtrTestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+
+ // stack
+ void ma_pop(Register r);
+ void ma_push(Register r);
+
+ void branchWithCode(InstImm code, Label* label, JumpKind jumpKind,
+ Register scratch = Register::Invalid());
+ // branches when done from within la-specific code
+ void ma_b(Register lhs, ImmWord imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, Address addr, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Imm32 imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, ImmGCPtr imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Address addr, Register rhs, Label* l, Condition c,
+ JumpKind jumpKind = LongJump) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(rhs != scratch);
+ ma_ld_d(scratch, addr);
+ ma_b(scratch, rhs, l, c, jumpKind);
+ }
+
+ void ma_bl(Label* l);
+
+ // fp instructions
+ void ma_lid(FloatRegister dest, double value);
+
+ void ma_mv(FloatRegister src, ValueOperand dest);
+ void ma_mv(ValueOperand src, FloatRegister dest);
+
+ FaultingCodeOffset ma_fld_s(FloatRegister ft, Address address);
+ FaultingCodeOffset ma_fld_d(FloatRegister ft, Address address);
+ FaultingCodeOffset ma_fst_d(FloatRegister ft, Address address);
+ FaultingCodeOffset ma_fst_s(FloatRegister ft, Address address);
+
+ void ma_pop(FloatRegister f);
+ void ma_push(FloatRegister f);
+
+ void ma_cmp_set(Register dst, Register lhs, ImmWord imm, Condition c);
+ void ma_cmp_set(Register dst, Register lhs, ImmPtr imm, Condition c);
+ void ma_cmp_set(Register dst, Address address, Imm32 imm, Condition c);
+ void ma_cmp_set(Register dst, Address address, ImmWord imm, Condition c);
+
+ void moveIfZero(Register dst, Register src, Register cond) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dst != scratch && cond != scratch);
+ as_masknez(scratch, src, cond);
+ as_maskeqz(dst, dst, cond);
+ as_or(dst, dst, scratch);
+ }
+ void moveIfNotZero(Register dst, Register src, Register cond) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(dst != scratch && cond != scratch);
+ as_maskeqz(scratch, src, cond);
+ as_masknez(dst, dst, cond);
+ as_or(dst, dst, scratch);
+ }
+
+ // These functions abstract the access to high part of the double precision
+ // float register. They are intended to work on both 32 bit and 64 bit
+ // floating point coprocessor.
+ void moveToDoubleHi(Register src, FloatRegister dest) {
+ as_movgr2frh_w(dest, src);
+ }
+ void moveFromDoubleHi(FloatRegister src, Register dest) {
+ as_movfrh2gr_s(dest, src);
+ }
+
+ void moveToDouble(Register src, FloatRegister dest) {
+ as_movgr2fr_d(dest, src);
+ }
+ void moveFromDouble(FloatRegister src, Register dest) {
+ as_movfr2gr_d(dest, src);
+ }
+
+ public:
+ void ma_li(Register dest, ImmGCPtr ptr);
+
+ void ma_li(Register dest, Imm32 imm);
+ void ma_liPatchable(Register dest, Imm32 imm);
+
+ void ma_rotr_w(Register rd, Register rj, Imm32 shift);
+
+ void ma_fmovz(FloatFormat fmt, FloatRegister fd, FloatRegister fj,
+ Register rk);
+ void ma_fmovn(FloatFormat fmt, FloatRegister fd, FloatRegister fj,
+ Register rk);
+
+ void ma_and(Register rd, Register rj, Imm32 imm, bool bit32 = false);
+
+ void ma_or(Register rd, Register rj, Imm32 imm, bool bit32 = false);
+
+ void ma_xor(Register rd, Register rj, Imm32 imm, bool bit32 = false);
+
+ // load
+ FaultingCodeOffset ma_load(Register dest, const BaseIndex& src,
+ LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // store
+ FaultingCodeOffset ma_store(Register data, const BaseIndex& dest,
+ LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+ void ma_store(Imm32 imm, const BaseIndex& dest, LoadStoreSize size = SizeWord,
+ LoadStoreExtension extension = SignExtend);
+
+ // arithmetic based ops
+ // add
+ void ma_add_w(Register rd, Register rj, Imm32 imm);
+ void ma_add32TestCarry(Condition cond, Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_add32TestCarry(Condition cond, Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+
+ // subtract
+ void ma_sub_w(Register rd, Register rj, Imm32 imm);
+ void ma_sub_w(Register rd, Register rj, Register rk);
+ void ma_sub32TestOverflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+
+ // multiplies. For now, there are only few that we care about.
+ void ma_mul(Register rd, Register rj, Imm32 imm);
+ void ma_mul32TestOverflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_mul32TestOverflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+
+ // divisions
+ void ma_div_branch_overflow(Register rd, Register rj, Register rk,
+ Label* overflow);
+ void ma_div_branch_overflow(Register rd, Register rj, Imm32 imm,
+ Label* overflow);
+
+ // fast mod, uses scratch registers, and thus needs to be in the assembler
+ // implicitly assumes that we can overwrite dest at the beginning of the
+ // sequence
+ void ma_mod_mask(Register src, Register dest, Register hold, Register remain,
+ int32_t shift, Label* negZero = nullptr);
+
+ // branches when done from within la-specific code
+ void ma_b(Register lhs, Register rhs, Label* l, Condition c,
+ JumpKind jumpKind = LongJump,
+ Register scratch = Register::Invalid());
+ void ma_b(Register lhs, Imm32 imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, ImmPtr imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump);
+ void ma_b(Register lhs, ImmGCPtr imm, Label* l, Condition c,
+ JumpKind jumpKind = LongJump) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(lhs != scratch);
+ ma_li(scratch, imm);
+ ma_b(lhs, scratch, l, c, jumpKind);
+ }
+
+ void ma_b(Label* l, JumpKind jumpKind = LongJump);
+
+ // fp instructions
+ void ma_lis(FloatRegister dest, float value);
+
+ FaultingCodeOffset ma_fst_d(FloatRegister src, BaseIndex address);
+ FaultingCodeOffset ma_fst_s(FloatRegister src, BaseIndex address);
+
+ FaultingCodeOffset ma_fld_d(FloatRegister dest, const BaseIndex& src);
+ FaultingCodeOffset ma_fld_s(FloatRegister dest, const BaseIndex& src);
+
+ // FP branches
+ void ma_bc_s(FloatRegister lhs, FloatRegister rhs, Label* label,
+ DoubleCondition c, JumpKind jumpKind = LongJump,
+ FPConditionBit fcc = FCC0);
+ void ma_bc_d(FloatRegister lhs, FloatRegister rhs, Label* label,
+ DoubleCondition c, JumpKind jumpKind = LongJump,
+ FPConditionBit fcc = FCC0);
+
+ void ma_call(ImmPtr dest);
+
+ void ma_jump(ImmPtr dest);
+
+ void ma_cmp_set(Register dst, Register lhs, Register rhs, Condition c);
+ void ma_cmp_set(Register dst, Register lhs, Imm32 imm, Condition c);
+ void ma_cmp_set_double(Register dst, FloatRegister lhs, FloatRegister rhs,
+ DoubleCondition c);
+ void ma_cmp_set_float32(Register dst, FloatRegister lhs, FloatRegister rhs,
+ DoubleCondition c);
+
+ void moveToDoubleLo(Register src, FloatRegister dest) {
+ as_movgr2fr_w(dest, src);
+ }
+ void moveFromDoubleLo(FloatRegister src, Register dest) {
+ as_movfr2gr_s(dest, src);
+ }
+
+ void moveToFloat32(Register src, FloatRegister dest) {
+ as_movgr2fr_w(dest, src);
+ }
+ void moveFromFloat32(FloatRegister src, Register dest) {
+ as_movfr2gr_s(dest, src);
+ }
+
+ // Evaluate srcDest = minmax<isMax>{Float32,Double}(srcDest, other).
+ // Handle NaN specially if handleNaN is true.
+ void minMaxDouble(FloatRegister srcDest, FloatRegister other, bool handleNaN,
+ bool isMax);
+ void minMaxFloat32(FloatRegister srcDest, FloatRegister other, bool handleNaN,
+ bool isMax);
+
+ FaultingCodeOffset loadDouble(const Address& addr, FloatRegister dest);
+ FaultingCodeOffset loadDouble(const BaseIndex& src, FloatRegister dest);
+
+ // Load a float value into a register, then expand it to a double.
+ void loadFloatAsDouble(const Address& addr, FloatRegister dest);
+ void loadFloatAsDouble(const BaseIndex& src, FloatRegister dest);
+
+ FaultingCodeOffset loadFloat32(const Address& addr, FloatRegister dest);
+ FaultingCodeOffset loadFloat32(const BaseIndex& src, FloatRegister dest);
+
+ void outOfLineWasmTruncateToInt32Check(FloatRegister input, Register output,
+ MIRType fromType, TruncFlags flags,
+ Label* rejoin,
+ wasm::BytecodeOffset trapOffset);
+ void outOfLineWasmTruncateToInt64Check(FloatRegister input, Register64 output,
+ MIRType fromType, TruncFlags flags,
+ Label* rejoin,
+ wasm::BytecodeOffset trapOffset);
+
+ protected:
+ void wasmLoadImpl(const wasm::MemoryAccessDesc& access, Register memoryBase,
+ Register ptr, Register ptrScratch, AnyRegister output,
+ Register tmp);
+ void wasmStoreImpl(const wasm::MemoryAccessDesc& access, AnyRegister value,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register tmp);
+};
+
+class MacroAssembler;
+
+class MacroAssemblerLOONG64Compat : public MacroAssemblerLOONG64 {
+ public:
+ using MacroAssemblerLOONG64::call;
+
+ MacroAssemblerLOONG64Compat() {}
+
+ void convertBoolToInt32(Register src, Register dest) {
+ ma_and(dest, src, Imm32(0xff));
+ };
+ void convertInt32ToDouble(Register src, FloatRegister dest) {
+ as_movgr2fr_w(dest, src);
+ as_ffint_d_w(dest, dest);
+ };
+ void convertInt32ToDouble(const Address& src, FloatRegister dest) {
+ ma_fld_s(dest, src);
+ as_ffint_d_w(dest, dest);
+ };
+ void convertInt32ToDouble(const BaseIndex& src, FloatRegister dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(scratch != src.base);
+ MOZ_ASSERT(scratch != src.index);
+ computeScaledAddress(src, scratch);
+ convertInt32ToDouble(Address(scratch, src.offset), dest);
+ };
+ void convertUInt32ToDouble(Register src, FloatRegister dest);
+ void convertUInt32ToFloat32(Register src, FloatRegister dest);
+ void convertDoubleToFloat32(FloatRegister src, FloatRegister dest);
+ void convertDoubleToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+ void convertDoubleToPtr(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+ void convertFloat32ToInt32(FloatRegister src, Register dest, Label* fail,
+ bool negativeZeroCheck = true);
+
+ void convertFloat32ToDouble(FloatRegister src, FloatRegister dest);
+ void convertInt32ToFloat32(Register src, FloatRegister dest);
+ void convertInt32ToFloat32(const Address& src, FloatRegister dest);
+
+ void movq(Register rj, Register rd);
+
+ void computeScaledAddress(const BaseIndex& address, Register dest);
+
+ void computeEffectiveAddress(const Address& address, Register dest) {
+ ma_add_d(dest, address.base, Imm32(address.offset));
+ }
+
+ void computeEffectiveAddress(const BaseIndex& address, Register dest) {
+ computeScaledAddress(address, dest);
+ if (address.offset) {
+ ma_add_d(dest, dest, Imm32(address.offset));
+ }
+ }
+
+ void j(Label* dest) { ma_b(dest); }
+
+ void mov(Register src, Register dest) { as_ori(dest, src, 0); }
+ void mov(ImmWord imm, Register dest) { ma_li(dest, imm); }
+ void mov(ImmPtr imm, Register dest) {
+ mov(ImmWord(uintptr_t(imm.value)), dest);
+ }
+ void mov(CodeLabel* label, Register dest) { ma_li(dest, label); }
+ void mov(Register src, Address dest) { MOZ_CRASH("NYI-IC"); }
+ void mov(Address src, Register dest) { MOZ_CRASH("NYI-IC"); }
+
+ void writeDataRelocation(const Value& val) {
+ // Raw GC pointer relocations and Value relocations both end up in
+ // TraceOneDataRelocation.
+ if (val.isGCThing()) {
+ gc::Cell* cell = val.toGCThing();
+ if (cell && gc::IsInsideNursery(cell)) {
+ embedsNurseryPointers_ = true;
+ }
+ dataRelocations_.writeUnsigned(currentOffset());
+ }
+ }
+
+ void branch(JitCode* c) {
+ ScratchRegisterScope scratch(asMasm());
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ImmPtr(c->raw()), RelocationKind::JITCODE);
+ ma_liPatchable(scratch, ImmPtr(c->raw()));
+ as_jirl(zero, scratch, BOffImm16(0));
+ }
+ void branch(const Register reg) { as_jirl(zero, reg, BOffImm16(0)); }
+ void nop() { as_nop(); }
+ void ret() {
+ ma_pop(ra);
+ as_jirl(zero, ra, BOffImm16(0));
+ }
+ inline void retn(Imm32 n);
+ void push(Imm32 imm) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ ma_push(scratch);
+ }
+ void push(ImmWord imm) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ ma_push(scratch);
+ }
+ void push(ImmGCPtr imm) {
+ ScratchRegisterScope scratch(asMasm());
+ ma_li(scratch, imm);
+ ma_push(scratch);
+ }
+ void push(const Address& address) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ loadPtr(address, scratch2);
+ ma_push(scratch2);
+ }
+ void push(Register reg) { ma_push(reg); }
+ void push(FloatRegister reg) { ma_push(reg); }
+ void pop(Register reg) { ma_pop(reg); }
+ void pop(FloatRegister reg) { ma_pop(reg); }
+
+ // Emit a branch that can be toggled to a non-operation. On LOONG64 we use
+ // "andi" instruction to toggle the branch.
+ // See ToggleToJmp(), ToggleToCmp().
+ CodeOffset toggledJump(Label* label);
+
+ // Emit a "jalr" or "nop" instruction. ToggleCall can be used to patch
+ // this instruction.
+ CodeOffset toggledCall(JitCode* target, bool enabled);
+
+ static size_t ToggledCallSize(uint8_t* code) {
+ // Four instructions used in: MacroAssemblerLOONG64Compat::toggledCall
+ return 4 * sizeof(uint32_t);
+ }
+
+ CodeOffset pushWithPatch(ImmWord imm) {
+ ScratchRegisterScope scratch(asMasm());
+ CodeOffset offset = movWithPatch(imm, scratch);
+ ma_push(scratch);
+ return offset;
+ }
+
+ CodeOffset movWithPatch(ImmWord imm, Register dest) {
+ CodeOffset offset = CodeOffset(currentOffset());
+ ma_liPatchable(dest, imm, Li64);
+ return offset;
+ }
+ CodeOffset movWithPatch(ImmPtr imm, Register dest) {
+ CodeOffset offset = CodeOffset(currentOffset());
+ ma_liPatchable(dest, imm);
+ return offset;
+ }
+
+ void writeCodePointer(CodeLabel* label) {
+ label->patchAt()->bind(currentOffset());
+ label->setLinkMode(CodeLabel::RawPointer);
+ m_buffer.ensureSpace(sizeof(void*));
+ writeInst(-1);
+ writeInst(-1);
+ }
+
+ void jump(Label* label) { ma_b(label); }
+ void jump(Register reg) { as_jirl(zero, reg, BOffImm16(0)); }
+ void jump(const Address& address) {
+ ScratchRegisterScope scratch(asMasm());
+ loadPtr(address, scratch);
+ as_jirl(zero, scratch, BOffImm16(0));
+ }
+
+ void jump(JitCode* code) { branch(code); }
+
+ void jump(ImmPtr ptr) {
+ BufferOffset bo = m_buffer.nextOffset();
+ addPendingJump(bo, ptr, RelocationKind::HARDCODED);
+ ma_jump(ptr);
+ }
+
+ void jump(TrampolinePtr code) { jump(ImmPtr(code.value)); }
+
+ void splitTag(Register src, Register dest) {
+ as_srli_d(dest, src, JSVAL_TAG_SHIFT);
+ }
+
+ void splitTag(const ValueOperand& operand, Register dest) {
+ splitTag(operand.valueReg(), dest);
+ }
+
+ void splitTagForTest(const ValueOperand& value, ScratchTagScope& tag) {
+ splitTag(value, tag);
+ }
+
+ // unboxing code
+ void unboxNonDouble(const ValueOperand& operand, Register dest,
+ JSValueType type) {
+ unboxNonDouble(operand.valueReg(), dest, type);
+ }
+
+ template <typename T>
+ void unboxNonDouble(T src, Register dest, JSValueType type) {
+ MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE);
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ load32(src, dest);
+ return;
+ }
+ loadPtr(src, dest);
+ unboxNonDouble(dest, dest, type);
+ }
+
+ void unboxNonDouble(Register src, Register dest, JSValueType type) {
+ MOZ_ASSERT(type != JSVAL_TYPE_DOUBLE);
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ as_slli_w(dest, src, 0);
+ return;
+ }
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(scratch != src);
+ mov(ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)), scratch);
+ as_xor(dest, src, scratch);
+ }
+
+ template <typename T>
+ void unboxObjectOrNull(const T& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ static_assert(JS::detail::ValueObjectOrNullBit ==
+ (uint64_t(0x8) << JSVAL_TAG_SHIFT));
+ as_bstrins_d(dest, zero, JSVAL_TAG_SHIFT + 3, JSVAL_TAG_SHIFT + 3);
+ }
+
+ void unboxGCThingForGCBarrier(const Address& src, Register dest) {
+ loadPtr(src, dest);
+ as_bstrpick_d(dest, dest, JSVAL_TAG_SHIFT - 1, 0);
+ }
+ void unboxGCThingForGCBarrier(const ValueOperand& src, Register dest) {
+ as_bstrpick_d(dest, src.valueReg(), JSVAL_TAG_SHIFT - 1, 0);
+ }
+
+ void unboxWasmAnyRefGCThingForGCBarrier(const Address& src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(scratch != dest);
+ movePtr(ImmWord(wasm::AnyRef::GCThingMask), scratch);
+ loadPtr(src, dest);
+ as_and(dest, dest, scratch);
+ }
+
+ // Like unboxGCThingForGCBarrier, but loads the GC thing's chunk base.
+ void getGCThingValueChunk(const Address& src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ MOZ_ASSERT(scratch != dest);
+ loadPtr(src, dest);
+ movePtr(ImmWord(JS::detail::ValueGCThingPayloadChunkMask), scratch);
+ as_and(dest, dest, scratch);
+ }
+ void getGCThingValueChunk(const ValueOperand& src, Register dest) {
+ MOZ_ASSERT(src.valueReg() != dest);
+ movePtr(ImmWord(JS::detail::ValueGCThingPayloadChunkMask), dest);
+ as_and(dest, dest, src.valueReg());
+ }
+
+ void getWasmAnyRefGCThingChunk(Register src, Register dest) {
+ MOZ_ASSERT(src != dest);
+ movePtr(ImmWord(wasm::AnyRef::GCThingChunkMask), dest);
+ as_and(dest, dest, src);
+ }
+
+ void unboxInt32(const ValueOperand& operand, Register dest);
+ void unboxInt32(Register src, Register dest);
+ void unboxInt32(const Address& src, Register dest);
+ void unboxInt32(const BaseIndex& src, Register dest);
+ void unboxBoolean(const ValueOperand& operand, Register dest);
+ void unboxBoolean(Register src, Register dest);
+ void unboxBoolean(const Address& src, Register dest);
+ void unboxBoolean(const BaseIndex& src, Register dest);
+ void unboxDouble(const ValueOperand& operand, FloatRegister dest);
+ void unboxDouble(Register src, Register dest);
+ void unboxDouble(const Address& src, FloatRegister dest);
+ void unboxDouble(const BaseIndex& src, FloatRegister dest);
+ void unboxString(const ValueOperand& operand, Register dest);
+ void unboxString(Register src, Register dest);
+ void unboxString(const Address& src, Register dest);
+ void unboxSymbol(const ValueOperand& src, Register dest);
+ void unboxSymbol(Register src, Register dest);
+ void unboxSymbol(const Address& src, Register dest);
+ void unboxBigInt(const ValueOperand& operand, Register dest);
+ void unboxBigInt(Register src, Register dest);
+ void unboxBigInt(const Address& src, Register dest);
+ void unboxObject(const ValueOperand& src, Register dest);
+ void unboxObject(Register src, Register dest);
+ void unboxObject(const Address& src, Register dest);
+ void unboxObject(const BaseIndex& src, Register dest) {
+ unboxNonDouble(src, dest, JSVAL_TYPE_OBJECT);
+ }
+ void unboxValue(const ValueOperand& src, AnyRegister dest, JSValueType type);
+
+ void notBoolean(const ValueOperand& val) {
+ as_xori(val.valueReg(), val.valueReg(), 1);
+ }
+
+ // boxing code
+ void boxDouble(FloatRegister src, const ValueOperand& dest, FloatRegister);
+ void boxNonDouble(JSValueType type, Register src, const ValueOperand& dest);
+
+ // Extended unboxing API. If the payload is already in a register, returns
+ // that register. Otherwise, provides a move to the given scratch register,
+ // and returns that.
+ [[nodiscard]] Register extractObject(const Address& address,
+ Register scratch);
+ [[nodiscard]] Register extractObject(const ValueOperand& value,
+ Register scratch) {
+ unboxObject(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractString(const ValueOperand& value,
+ Register scratch) {
+ unboxString(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractSymbol(const ValueOperand& value,
+ Register scratch) {
+ unboxSymbol(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractInt32(const ValueOperand& value,
+ Register scratch) {
+ unboxInt32(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractBoolean(const ValueOperand& value,
+ Register scratch) {
+ unboxBoolean(value, scratch);
+ return scratch;
+ }
+ [[nodiscard]] Register extractTag(const Address& address, Register scratch);
+ [[nodiscard]] Register extractTag(const BaseIndex& address, Register scratch);
+ [[nodiscard]] Register extractTag(const ValueOperand& value,
+ Register scratch) {
+ splitTag(value, scratch);
+ return scratch;
+ }
+
+ inline void ensureDouble(const ValueOperand& source, FloatRegister dest,
+ Label* failure);
+
+ void boolValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToDouble(const ValueOperand& operand, FloatRegister dest);
+ void loadInt32OrDouble(const Address& src, FloatRegister dest);
+ void loadInt32OrDouble(const BaseIndex& addr, FloatRegister dest);
+ void loadConstantDouble(double dp, FloatRegister dest);
+
+ void boolValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void int32ValueToFloat32(const ValueOperand& operand, FloatRegister dest);
+ void loadConstantFloat32(float f, FloatRegister dest);
+
+ void testNullSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testObjectSet(Condition cond, const ValueOperand& value, Register dest);
+
+ void testUndefinedSet(Condition cond, const ValueOperand& value,
+ Register dest);
+
+ template <typename T>
+ void loadUnboxedValue(const T& address, MIRType type, AnyRegister dest) {
+ if (dest.isFloat()) {
+ loadInt32OrDouble(address, dest.fpu());
+ } else {
+ unboxNonDouble(address, dest.gpr(), ValueTypeFromMIRType(type));
+ }
+ }
+
+ void storeUnboxedPayload(ValueOperand value, BaseIndex address, size_t nbytes,
+ JSValueType type) {
+ switch (nbytes) {
+ case 8: {
+ ScratchRegisterScope scratch(asMasm());
+ SecondScratchRegisterScope scratch2(asMasm());
+ if (type == JSVAL_TYPE_OBJECT) {
+ unboxObjectOrNull(value, scratch2);
+ } else {
+ unboxNonDouble(value, scratch2, type);
+ }
+ computeEffectiveAddress(address, scratch);
+ as_st_d(scratch2, scratch, 0);
+ return;
+ }
+ case 4:
+ store32(value.valueReg(), address);
+ return;
+ case 1:
+ store8(value.valueReg(), address);
+ return;
+ default:
+ MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void storeUnboxedPayload(ValueOperand value, Address address, size_t nbytes,
+ JSValueType type) {
+ switch (nbytes) {
+ case 8: {
+ SecondScratchRegisterScope scratch2(asMasm());
+ if (type == JSVAL_TYPE_OBJECT) {
+ unboxObjectOrNull(value, scratch2);
+ } else {
+ unboxNonDouble(value, scratch2, type);
+ }
+ storePtr(scratch2, address);
+ return;
+ }
+ case 4:
+ store32(value.valueReg(), address);
+ return;
+ case 1:
+ store8(value.valueReg(), address);
+ return;
+ default:
+ MOZ_CRASH("Bad payload width");
+ }
+ }
+
+ void boxValue(JSValueType type, Register src, Register dest) {
+ ScratchRegisterScope scratch(asMasm());
+ if (src == dest) {
+ as_ori(scratch, src, 0);
+ src = scratch;
+ }
+#ifdef DEBUG
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ Label upper32BitsSignExtended;
+ as_slli_w(dest, src, 0);
+ ma_b(src, dest, &upper32BitsSignExtended, Equal, ShortJump);
+ breakpoint();
+ bind(&upper32BitsSignExtended);
+ }
+#endif
+ ma_li(dest, ImmWord(JSVAL_TYPE_TO_SHIFTED_TAG(type)));
+ if (type == JSVAL_TYPE_INT32 || type == JSVAL_TYPE_BOOLEAN) {
+ as_bstrins_d(dest, src, 31, 0);
+ } else {
+ as_bstrins_d(dest, src, JSVAL_TAG_SHIFT - 1, 0);
+ }
+ }
+
+ void storeValue(ValueOperand val, const Address& dest);
+ void storeValue(ValueOperand val, const BaseIndex& dest);
+ void storeValue(JSValueType type, Register reg, Address dest);
+ void storeValue(JSValueType type, Register reg, BaseIndex dest);
+ void storeValue(const Value& val, Address dest);
+ void storeValue(const Value& val, BaseIndex dest);
+ void storeValue(const Address& src, const Address& dest, Register temp) {
+ loadPtr(src, temp);
+ storePtr(temp, dest);
+ }
+
+ void storePrivateValue(Register src, const Address& dest) {
+ storePtr(src, dest);
+ }
+ void storePrivateValue(ImmGCPtr imm, const Address& dest) {
+ storePtr(imm, dest);
+ }
+
+ void loadValue(Address src, ValueOperand val);
+ void loadValue(const BaseIndex& src, ValueOperand val);
+
+ void loadUnalignedValue(const Address& src, ValueOperand dest) {
+ loadValue(src, dest);
+ }
+
+ void tagValue(JSValueType type, Register payload, ValueOperand dest);
+
+ void pushValue(ValueOperand val);
+ void popValue(ValueOperand val);
+ void pushValue(const Value& val) {
+ if (val.isGCThing()) {
+ ScratchRegisterScope scratch(asMasm());
+ writeDataRelocation(val);
+ movWithPatch(ImmWord(val.asRawBits()), scratch);
+ push(scratch);
+ } else {
+ push(ImmWord(val.asRawBits()));
+ }
+ }
+ void pushValue(JSValueType type, Register reg) {
+ SecondScratchRegisterScope scratch2(asMasm());
+ boxValue(type, reg, scratch2);
+ push(scratch2);
+ }
+ void pushValue(const Address& addr);
+ void pushValue(const BaseIndex& addr, Register scratch) {
+ loadValue(addr, ValueOperand(scratch));
+ pushValue(ValueOperand(scratch));
+ }
+
+ void handleFailureWithHandlerTail(Label* profilerExitTail,
+ Label* bailoutTail);
+
+ /////////////////////////////////////////////////////////////////
+ // Common interface.
+ /////////////////////////////////////////////////////////////////
+ public:
+ // The following functions are exposed for use in platform-shared code.
+
+ inline void incrementInt32Value(const Address& addr);
+
+ void move32(Imm32 imm, Register dest);
+ void move32(Register src, Register dest);
+
+ void movePtr(Register src, Register dest);
+ void movePtr(ImmWord imm, Register dest);
+ void movePtr(ImmPtr imm, Register dest);
+ void movePtr(wasm::SymbolicAddress imm, Register dest);
+ void movePtr(ImmGCPtr imm, Register dest);
+
+ FaultingCodeOffset load8SignExtend(const Address& address, Register dest);
+ FaultingCodeOffset load8SignExtend(const BaseIndex& src, Register dest);
+
+ FaultingCodeOffset load8ZeroExtend(const Address& address, Register dest);
+ FaultingCodeOffset load8ZeroExtend(const BaseIndex& src, Register dest);
+
+ FaultingCodeOffset load16SignExtend(const Address& address, Register dest);
+ FaultingCodeOffset load16SignExtend(const BaseIndex& src, Register dest);
+
+ template <typename S>
+ void load16UnalignedSignExtend(const S& src, Register dest) {
+ load16SignExtend(src, dest);
+ }
+
+ FaultingCodeOffset load16ZeroExtend(const Address& address, Register dest);
+ FaultingCodeOffset load16ZeroExtend(const BaseIndex& src, Register dest);
+
+ template <typename S>
+ void load16UnalignedZeroExtend(const S& src, Register dest) {
+ load16ZeroExtend(src, dest);
+ }
+
+ FaultingCodeOffset load32(const Address& address, Register dest);
+ FaultingCodeOffset load32(const BaseIndex& address, Register dest);
+ void load32(AbsoluteAddress address, Register dest);
+ void load32(wasm::SymbolicAddress address, Register dest);
+
+ template <typename S>
+ void load32Unaligned(const S& src, Register dest) {
+ load32(src, dest);
+ }
+
+ FaultingCodeOffset load64(const Address& address, Register64 dest) {
+ return loadPtr(address, dest.reg);
+ }
+ FaultingCodeOffset load64(const BaseIndex& address, Register64 dest) {
+ return loadPtr(address, dest.reg);
+ }
+
+ template <typename S>
+ void load64Unaligned(const S& src, Register64 dest) {
+ load64(src, dest);
+ }
+
+ FaultingCodeOffset loadPtr(const Address& address, Register dest);
+ FaultingCodeOffset loadPtr(const BaseIndex& src, Register dest);
+ void loadPtr(AbsoluteAddress address, Register dest);
+ void loadPtr(wasm::SymbolicAddress address, Register dest);
+
+ void loadPrivate(const Address& address, Register dest);
+
+ FaultingCodeOffset store8(Register src, const Address& address);
+ FaultingCodeOffset store8(Register src, const BaseIndex& address);
+ void store8(Imm32 imm, const Address& address);
+ void store8(Imm32 imm, const BaseIndex& address);
+
+ FaultingCodeOffset store16(Register src, const Address& address);
+ FaultingCodeOffset store16(Register src, const BaseIndex& address);
+ void store16(Imm32 imm, const Address& address);
+ void store16(Imm32 imm, const BaseIndex& address);
+
+ template <typename T>
+ void store16Unaligned(Register src, const T& dest) {
+ store16(src, dest);
+ }
+
+ FaultingCodeOffset store32(Register src, const Address& address);
+ FaultingCodeOffset store32(Register src, const BaseIndex& address);
+ void store32(Register src, AbsoluteAddress address);
+ void store32(Imm32 src, const Address& address);
+ void store32(Imm32 src, const BaseIndex& address);
+
+ template <typename T>
+ void store32Unaligned(Register src, const T& dest) {
+ store32(src, dest);
+ }
+
+ void store64(Imm64 imm, Address address) {
+ storePtr(ImmWord(imm.value), address);
+ }
+ void store64(Imm64 imm, const BaseIndex& address) {
+ storePtr(ImmWord(imm.value), address);
+ }
+
+ FaultingCodeOffset store64(Register64 src, Address address) {
+ return storePtr(src.reg, address);
+ }
+
+ FaultingCodeOffset store64(Register64 src, const BaseIndex& address) {
+ return storePtr(src.reg, address);
+ }
+
+ template <typename T>
+ void store64Unaligned(Register64 src, const T& dest) {
+ store64(src, dest);
+ }
+
+ template <typename T>
+ void storePtr(ImmWord imm, T address);
+ template <typename T>
+ void storePtr(ImmPtr imm, T address);
+ template <typename T>
+ void storePtr(ImmGCPtr imm, T address);
+ void storePtr(Register src, AbsoluteAddress dest);
+ FaultingCodeOffset storePtr(Register src, const Address& address);
+ FaultingCodeOffset storePtr(Register src, const BaseIndex& address);
+
+ void moveDouble(FloatRegister src, FloatRegister dest) {
+ as_fmov_d(dest, src);
+ }
+
+ void zeroDouble(FloatRegister reg) { moveToDouble(zero, reg); }
+
+ void convertUInt64ToDouble(Register src, FloatRegister dest);
+
+ void breakpoint(uint32_t value = 0);
+
+ void checkStackAlignment() {
+#ifdef DEBUG
+ Label aligned;
+ ScratchRegisterScope scratch(asMasm());
+ as_andi(scratch, sp, ABIStackAlignment - 1);
+ ma_b(scratch, zero, &aligned, Equal, ShortJump);
+ breakpoint();
+ bind(&aligned);
+#endif
+ };
+
+ static void calculateAlignedStackPointer(void** stackPointer);
+
+ void cmpPtrSet(Assembler::Condition cond, Address lhs, ImmPtr rhs,
+ Register dest);
+ void cmpPtrSet(Assembler::Condition cond, Register lhs, Address rhs,
+ Register dest);
+ void cmpPtrSet(Assembler::Condition cond, Address lhs, Register rhs,
+ Register dest);
+
+ void cmp32Set(Assembler::Condition cond, Register lhs, Address rhs,
+ Register dest);
+
+ protected:
+ bool buildOOLFakeExitFrame(void* fakeReturnAddr);
+
+ void wasmLoadI64Impl(const wasm::MemoryAccessDesc& access,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register64 output, Register tmp);
+ void wasmStoreI64Impl(const wasm::MemoryAccessDesc& access, Register64 value,
+ Register memoryBase, Register ptr, Register ptrScratch,
+ Register tmp);
+
+ public:
+ void lea(Operand addr, Register dest) {
+ ma_add_d(dest, addr.baseReg(), Imm32(addr.disp()));
+ }
+
+ void abiret() { as_jirl(zero, ra, BOffImm16(0)); }
+
+ void moveFloat32(FloatRegister src, FloatRegister dest) {
+ as_fmov_s(dest, src);
+ }
+
+ // Instrumentation for entering and leaving the profiler.
+ void profilerEnterFrame(Register framePtr, Register scratch);
+ void profilerExitFrame();
+};
+
+typedef MacroAssemblerLOONG64Compat MacroAssemblerSpecific;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_MacroAssembler_loong64_h */
diff --git a/js/src/jit/loong64/MoveEmitter-loong64.cpp b/js/src/jit/loong64/MoveEmitter-loong64.cpp
new file mode 100644
index 0000000000..a12378be83
--- /dev/null
+++ b/js/src/jit/loong64/MoveEmitter-loong64.cpp
@@ -0,0 +1,326 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/loong64/MoveEmitter-loong64.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+void MoveEmitterLOONG64::breakCycle(const MoveOperand& from,
+ const MoveOperand& to, MoveOp::Type type,
+ uint32_t slotId) {
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (A -> B), which we reach first. We save B, then allow
+ // the original move to continue.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ ScratchFloat32Scope fpscratch32(masm);
+ masm.loadFloat32(getAdjustedAddress(to), fpscratch32);
+ masm.storeFloat32(fpscratch32, cycleSlot(slotId));
+ } else {
+ masm.storeFloat32(to.floatReg(), cycleSlot(slotId));
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ ScratchDoubleScope fpscratch64(masm);
+ masm.loadDouble(getAdjustedAddress(to), fpscratch64);
+ masm.storeDouble(fpscratch64, cycleSlot(slotId));
+ } else {
+ masm.storeDouble(to.floatReg(), cycleSlot(slotId));
+ }
+ break;
+ case MoveOp::INT32:
+ if (to.isMemory()) {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.load32(getAdjustedAddress(to), scratch2);
+ masm.store32(scratch2, cycleSlot(0));
+ } else {
+ masm.store32(to.reg(), cycleSlot(0));
+ }
+ break;
+ case MoveOp::GENERAL:
+ if (to.isMemory()) {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.loadPtr(getAdjustedAddress(to), scratch2);
+ masm.storePtr(scratch2, cycleSlot(0));
+ } else {
+ masm.storePtr(to.reg(), cycleSlot(0));
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterLOONG64::completeCycle(const MoveOperand& from,
+ const MoveOperand& to, MoveOp::Type type,
+ uint32_t slotId) {
+ // There is some pattern:
+ // (A -> B)
+ // (B -> A)
+ //
+ // This case handles (B -> A), which we reach last. We emit a move from the
+ // saved value of B, to A.
+ switch (type) {
+ case MoveOp::FLOAT32:
+ if (to.isMemory()) {
+ ScratchFloat32Scope fpscratch32(masm);
+ masm.loadFloat32(cycleSlot(slotId), fpscratch32);
+ masm.storeFloat32(fpscratch32, getAdjustedAddress(to));
+ } else {
+ masm.loadFloat32(cycleSlot(slotId), to.floatReg());
+ }
+ break;
+ case MoveOp::DOUBLE:
+ if (to.isMemory()) {
+ ScratchDoubleScope fpscratch64(masm);
+ masm.loadDouble(cycleSlot(slotId), fpscratch64);
+ masm.storeDouble(fpscratch64, getAdjustedAddress(to));
+ } else {
+ masm.loadDouble(cycleSlot(slotId), to.floatReg());
+ }
+ break;
+ case MoveOp::INT32:
+ MOZ_ASSERT(slotId == 0);
+ if (to.isMemory()) {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.load32(cycleSlot(0), scratch2);
+ masm.store32(scratch2, getAdjustedAddress(to));
+ } else {
+ masm.load32(cycleSlot(0), to.reg());
+ }
+ break;
+ case MoveOp::GENERAL:
+ MOZ_ASSERT(slotId == 0);
+ if (to.isMemory()) {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.loadPtr(cycleSlot(0), scratch2);
+ masm.storePtr(scratch2, getAdjustedAddress(to));
+ } else {
+ masm.loadPtr(cycleSlot(0), to.reg());
+ }
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterLOONG64::emit(const MoveResolver& moves) {
+ if (moves.numCycles()) {
+ // Reserve stack for cycle resolution
+ static_assert(SpillSlotSize == 8);
+ masm.reserveStack(moves.numCycles() * SpillSlotSize);
+ pushedAtCycle_ = masm.framePushed();
+ }
+
+ for (size_t i = 0; i < moves.numMoves(); i++) {
+ emit(moves.getMove(i));
+ }
+}
+
+Address MoveEmitterLOONG64::cycleSlot(uint32_t slot, uint32_t subslot) const {
+ int32_t offset = masm.framePushed() - pushedAtCycle_;
+ MOZ_ASSERT(Imm16::IsInSignedRange(offset));
+ return Address(StackPointer, offset + slot * sizeof(double) + subslot);
+}
+
+int32_t MoveEmitterLOONG64::getAdjustedOffset(const MoveOperand& operand) {
+ MOZ_ASSERT(operand.isMemoryOrEffectiveAddress());
+ if (operand.base() != StackPointer) {
+ return operand.disp();
+ }
+
+ // Adjust offset if stack pointer has been moved.
+ return operand.disp() + masm.framePushed() - pushedAtStart_;
+}
+
+Address MoveEmitterLOONG64::getAdjustedAddress(const MoveOperand& operand) {
+ return Address(operand.base(), getAdjustedOffset(operand));
+}
+
+void MoveEmitterLOONG64::emitMove(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isGeneralReg()) {
+ if (to.isGeneralReg()) {
+ masm.movePtr(from.reg(), to.reg());
+ } else if (to.isMemory()) {
+ masm.storePtr(from.reg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else if (from.isMemory()) {
+ if (to.isGeneralReg()) {
+ masm.loadPtr(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.loadPtr(getAdjustedAddress(from), scratch2);
+ masm.storePtr(scratch2, getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else if (from.isEffectiveAddress()) {
+ if (to.isGeneralReg()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.computeEffectiveAddress(getAdjustedAddress(from), scratch2);
+ masm.storePtr(scratch2, getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+ } else {
+ MOZ_CRASH("Invalid emitMove arguments.");
+ }
+}
+
+void MoveEmitterLOONG64::emitInt32Move(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isGeneralReg()) {
+ if (to.isGeneralReg()) {
+ masm.move32(from.reg(), to.reg());
+ } else if (to.isMemory()) {
+ masm.store32(from.reg(), getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else if (from.isMemory()) {
+ if (to.isGeneralReg()) {
+ masm.load32(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.load32(getAdjustedAddress(from), scratch2);
+ masm.store32(scratch2, getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else if (from.isEffectiveAddress()) {
+ if (to.isGeneralReg()) {
+ masm.computeEffectiveAddress(getAdjustedAddress(from), to.reg());
+ } else if (to.isMemory()) {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.computeEffectiveAddress(getAdjustedAddress(from), scratch2);
+ masm.store32(scratch2, getAdjustedAddress(to));
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+ } else {
+ MOZ_CRASH("Invalid emitInt32Move arguments.");
+ }
+}
+
+void MoveEmitterLOONG64::emitFloat32Move(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.moveFloat32(from.floatReg(), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ // This should only be used when passing float parameter in a1,a2,a3
+ MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
+ masm.moveFromFloat32(from.floatReg(), to.reg());
+ } else {
+ MOZ_ASSERT(to.isMemory());
+ masm.storeFloat32(from.floatReg(), getAdjustedAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ MOZ_ASSERT(from.isMemory());
+ masm.loadFloat32(getAdjustedAddress(from), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ MOZ_ASSERT(from.isMemory());
+ // This should only be used when passing float parameter in a1,a2,a3
+ MOZ_ASSERT(to.reg() == a1 || to.reg() == a2 || to.reg() == a3);
+ masm.loadPtr(getAdjustedAddress(from), to.reg());
+ } else {
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.isMemory());
+ ScratchFloat32Scope fpscratch32(masm);
+ masm.loadFloat32(getAdjustedAddress(from), fpscratch32);
+ masm.storeFloat32(fpscratch32, getAdjustedAddress(to));
+ }
+}
+
+void MoveEmitterLOONG64::emitDoubleMove(const MoveOperand& from,
+ const MoveOperand& to) {
+ if (from.isFloatReg()) {
+ if (to.isFloatReg()) {
+ masm.moveDouble(from.floatReg(), to.floatReg());
+ } else if (to.isGeneralReg()) {
+ masm.moveFromDouble(from.floatReg(), to.reg());
+ } else {
+ MOZ_ASSERT(to.isMemory());
+ masm.storeDouble(from.floatReg(), getAdjustedAddress(to));
+ }
+ } else if (to.isFloatReg()) {
+ if (from.isMemory()) {
+ masm.loadDouble(getAdjustedAddress(from), to.floatReg());
+ } else {
+ masm.moveToDouble(from.reg(), to.floatReg());
+ }
+ } else {
+ MOZ_ASSERT(from.isMemory());
+ MOZ_ASSERT(to.isMemory());
+ ScratchDoubleScope fpscratch64(masm);
+ masm.loadDouble(getAdjustedAddress(from), fpscratch64);
+ masm.storeDouble(fpscratch64, getAdjustedAddress(to));
+ }
+}
+
+void MoveEmitterLOONG64::emit(const MoveOp& move) {
+ const MoveOperand& from = move.from();
+ const MoveOperand& to = move.to();
+
+ if (move.isCycleEnd() && move.isCycleBegin()) {
+ // A fun consequence of aliased registers is you can have multiple
+ // cycles at once, and one can end exactly where another begins.
+ breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
+ completeCycle(from, to, move.type(), move.cycleEndSlot());
+ return;
+ }
+
+ if (move.isCycleEnd()) {
+ MOZ_ASSERT(inCycle_);
+ completeCycle(from, to, move.type(), move.cycleEndSlot());
+ MOZ_ASSERT(inCycle_ > 0);
+ inCycle_--;
+ return;
+ }
+
+ if (move.isCycleBegin()) {
+ breakCycle(from, to, move.endCycleType(), move.cycleBeginSlot());
+ inCycle_++;
+ }
+
+ switch (move.type()) {
+ case MoveOp::FLOAT32:
+ emitFloat32Move(from, to);
+ break;
+ case MoveOp::DOUBLE:
+ emitDoubleMove(from, to);
+ break;
+ case MoveOp::INT32:
+ emitInt32Move(from, to);
+ break;
+ case MoveOp::GENERAL:
+ emitMove(from, to);
+ break;
+ default:
+ MOZ_CRASH("Unexpected move type");
+ }
+}
+
+void MoveEmitterLOONG64::assertDone() { MOZ_ASSERT(inCycle_ == 0); }
+
+void MoveEmitterLOONG64::finish() {
+ assertDone();
+
+ masm.freeStack(masm.framePushed() - pushedAtStart_);
+}
diff --git a/js/src/jit/loong64/MoveEmitter-loong64.h b/js/src/jit/loong64/MoveEmitter-loong64.h
new file mode 100644
index 0000000000..1481c8f973
--- /dev/null
+++ b/js/src/jit/loong64/MoveEmitter-loong64.h
@@ -0,0 +1,76 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_MoveEmitter_loong64_h
+#define jit_loong64_MoveEmitter_loong64_h
+
+#include "jit/MacroAssembler.h"
+#include "jit/MoveResolver.h"
+
+namespace js {
+namespace jit {
+
+class MoveEmitterLOONG64 {
+ void emitDoubleMove(const MoveOperand& from, const MoveOperand& to);
+ void breakCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+ void completeCycle(const MoveOperand& from, const MoveOperand& to,
+ MoveOp::Type type, uint32_t slot);
+
+ protected:
+ uint32_t inCycle_;
+ MacroAssembler& masm;
+
+ // Original stack push value.
+ uint32_t pushedAtStart_;
+
+ // These store stack offsets to spill locations, snapshotting
+ // codegen->framePushed_ at the time they were allocated. They are -1 if no
+ // stack space has been allocated for that particular spill.
+ int32_t pushedAtCycle_;
+ int32_t pushedAtSpill_;
+
+ // These are registers that are available for temporary use. They may be
+ // assigned InvalidReg. If no corresponding spill space has been assigned,
+ // then these registers do not need to be spilled.
+ Register spilledReg_;
+ FloatRegister spilledFloatReg_;
+
+ void assertDone();
+ Register tempReg();
+ FloatRegister tempFloatReg();
+ Address cycleSlot(uint32_t slot, uint32_t subslot = 0) const;
+ int32_t getAdjustedOffset(const MoveOperand& operand);
+ Address getAdjustedAddress(const MoveOperand& operand);
+
+ void emitMove(const MoveOperand& from, const MoveOperand& to);
+ void emitInt32Move(const MoveOperand& from, const MoveOperand& to);
+ void emitFloat32Move(const MoveOperand& from, const MoveOperand& to);
+ void emit(const MoveOp& move);
+
+ public:
+ MoveEmitterLOONG64(MacroAssembler& masm)
+ : inCycle_(0),
+ masm(masm),
+ pushedAtStart_(masm.framePushed()),
+ pushedAtCycle_(-1),
+ pushedAtSpill_(-1),
+ spilledReg_(InvalidReg),
+ spilledFloatReg_(InvalidFloatReg) {}
+
+ ~MoveEmitterLOONG64() { assertDone(); }
+
+ void emit(const MoveResolver& moves);
+ void finish();
+ void setScratchRegister(Register reg) {}
+};
+
+typedef MoveEmitterLOONG64 MoveEmitter;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_MoveEmitter_loong64_h */
diff --git a/js/src/jit/loong64/SharedICHelpers-loong64-inl.h b/js/src/jit/loong64/SharedICHelpers-loong64-inl.h
new file mode 100644
index 0000000000..33da43a13b
--- /dev/null
+++ b/js/src/jit/loong64/SharedICHelpers-loong64-inl.h
@@ -0,0 +1,84 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_SharedICHelpers_loong64_inl_h
+#define jit_loong64_SharedICHelpers_loong64_inl_h
+
+#include "jit/BaselineFrame.h"
+#include "jit/SharedICHelpers.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+namespace js {
+namespace jit {
+
+inline void EmitBaselineTailCallVM(TrampolinePtr target, MacroAssembler& masm,
+ uint32_t argSize) {
+#ifdef DEBUG
+ Register scratch = R2.scratchReg();
+
+ // Compute frame size.
+ masm.movePtr(FramePointer, scratch);
+ masm.subPtr(StackPointer, scratch);
+
+ // Store frame size without VMFunction arguments for debug assertions.
+ masm.subPtr(Imm32(argSize), scratch);
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(scratch, frameSizeAddr);
+ masm.addPtr(Imm32(argSize), scratch);
+#endif
+
+ // Push frame descriptor and perform the tail call.
+ masm.pushFrameDescriptor(FrameType::BaselineJS);
+
+ MOZ_ASSERT(ICTailCallReg == ra);
+ // The return address will be pushed by the VM wrapper, for compatibility
+ // with direct calls. Refer to the top of generateVMWrapper().
+ // ICTailCallReg (ra) already contains the return address (as we keep
+ // it there through the stub calls).
+
+ masm.jump(target);
+}
+
+inline void EmitBaselineCallVM(TrampolinePtr target, MacroAssembler& masm) {
+ masm.pushFrameDescriptor(FrameType::BaselineStub);
+ masm.call(target);
+}
+
+inline void EmitBaselineEnterStubFrame(MacroAssembler& masm, Register scratch) {
+ MOZ_ASSERT(scratch != ICTailCallReg);
+
+#ifdef DEBUG
+ // Compute frame size.
+ masm.movePtr(FramePointer, scratch);
+ masm.subPtr(StackPointer, scratch);
+
+ Address frameSizeAddr(FramePointer,
+ BaselineFrame::reverseOffsetOfDebugFrameSize());
+ masm.store32(scratch, frameSizeAddr);
+#endif
+
+ // Note: when making changes here, don't forget to update
+ // BaselineStubFrame if needed.
+
+ // Push frame descriptor and return address.
+ masm.PushFrameDescriptor(FrameType::BaselineJS);
+ masm.Push(ICTailCallReg);
+
+ // Save old frame pointer, stack pointer and stub reg.
+ masm.Push(FramePointer);
+ masm.movePtr(StackPointer, FramePointer);
+ masm.Push(ICStubReg);
+
+ // Stack should remain aligned.
+ masm.assertStackAlignment(sizeof(Value), 0);
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_SharedICHelpers_loong64_inl_h */
diff --git a/js/src/jit/loong64/SharedICHelpers-loong64.h b/js/src/jit/loong64/SharedICHelpers-loong64.h
new file mode 100644
index 0000000000..55479f4734
--- /dev/null
+++ b/js/src/jit/loong64/SharedICHelpers-loong64.h
@@ -0,0 +1,89 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_SharedICHelpers_loong64_h
+#define jit_loong64_SharedICHelpers_loong64_h
+
+#include "jit/BaselineIC.h"
+#include "jit/JitFrames.h"
+#include "jit/MacroAssembler.h"
+#include "jit/SharedICRegisters.h"
+
+namespace js {
+namespace jit {
+
+// Distance from sp to the top Value inside an IC stub (no return address on
+// the stack on LoongArch).
+static const size_t ICStackValueOffset = 0;
+
+struct BaselineStubFrame {
+ uintptr_t savedFrame;
+ uintptr_t savedStub;
+ uintptr_t returnAddress;
+ uintptr_t descriptor;
+};
+
+inline void EmitRestoreTailCallReg(MacroAssembler& masm) {
+ // No-op on LA because ra register is always holding the return address.
+}
+
+inline void EmitRepushTailCallReg(MacroAssembler& masm) {
+ // No-op on LA because ra register is always holding the return address.
+}
+
+inline void EmitCallIC(MacroAssembler& masm, CodeOffset* callOffset) {
+ // The stub pointer must already be in ICStubReg.
+ // Load stubcode pointer from the ICStub.
+ // R2 won't be active when we call ICs, so we can use it as scratch.
+ masm.loadPtr(Address(ICStubReg, ICStub::offsetOfStubCode()), R2.scratchReg());
+
+ // Call the stubcode via a direct jump-and-link
+ masm.call(R2.scratchReg());
+ *callOffset = CodeOffset(masm.currentOffset());
+}
+
+inline void EmitReturnFromIC(MacroAssembler& masm) { masm.branch(ra); }
+
+inline void EmitBaselineLeaveStubFrame(MacroAssembler& masm) {
+ masm.loadPtr(
+ Address(FramePointer, BaselineStubFrameLayout::ICStubOffsetFromFP),
+ ICStubReg);
+
+ masm.movePtr(FramePointer, StackPointer);
+ masm.Pop(FramePointer);
+
+ // Load the return address.
+ masm.Pop(ICTailCallReg);
+
+ // Discard the frame descriptor.
+ {
+ SecondScratchRegisterScope scratch2(masm);
+ masm.Pop(scratch2);
+ }
+}
+
+template <typename AddrType>
+inline void EmitPreBarrier(MacroAssembler& masm, const AddrType& addr,
+ MIRType type) {
+ // On LoongArch, $ra is clobbered by guardedCallPreBarrier. Save it first.
+ masm.push(ra);
+ masm.guardedCallPreBarrier(addr, type);
+ masm.pop(ra);
+}
+
+inline void EmitStubGuardFailure(MacroAssembler& masm) {
+ // Load next stub into ICStubReg
+ masm.loadPtr(Address(ICStubReg, ICCacheIRStub::offsetOfNext()), ICStubReg);
+
+ // Return address is already loaded, just jump to the next stubcode.
+ MOZ_ASSERT(ICTailCallReg == ra);
+ masm.jump(Address(ICStubReg, ICStub::offsetOfStubCode()));
+}
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_SharedICHelpers_loong64_h */
diff --git a/js/src/jit/loong64/SharedICRegisters-loong64.h b/js/src/jit/loong64/SharedICRegisters-loong64.h
new file mode 100644
index 0000000000..d51336c1d2
--- /dev/null
+++ b/js/src/jit/loong64/SharedICRegisters-loong64.h
@@ -0,0 +1,42 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef jit_loong64_SharedICRegisters_loong64_h
+#define jit_loong64_SharedICRegisters_loong64_h
+
+#include "jit/loong64/Assembler-loong64.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+
+namespace js {
+namespace jit {
+
+// ValueOperands R0, R1, and R2.
+// R0 == JSReturnReg, and R2 uses registers not preserved across calls. R1 value
+// should be preserved across calls.
+static constexpr ValueOperand R0(a2);
+static constexpr ValueOperand R1(s1);
+static constexpr ValueOperand R2(a0);
+
+// ICTailCallReg and ICStubReg
+// These use registers that are not preserved across calls.
+static constexpr Register ICTailCallReg = ra;
+static constexpr Register ICStubReg = t0;
+
+// Note that ICTailCallReg is actually just the link register.
+// In LoongArch code emission, we do not clobber ICTailCallReg since we keep
+// the return address for calls there.
+
+// FloatReg0 must be equal to ReturnFloatReg.
+static constexpr FloatRegister FloatReg0 = f0;
+static constexpr FloatRegister FloatReg1 = f1;
+static constexpr FloatRegister FloatReg2 = f2;
+static constexpr FloatRegister FloatReg3 = f3;
+
+} // namespace jit
+} // namespace js
+
+#endif /* jit_loong64_SharedICRegisters_loong64_h */
diff --git a/js/src/jit/loong64/Simulator-loong64.cpp b/js/src/jit/loong64/Simulator-loong64.cpp
new file mode 100644
index 0000000000..faf8bf326a
--- /dev/null
+++ b/js/src/jit/loong64/Simulator-loong64.cpp
@@ -0,0 +1,4721 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/loong64/Simulator-loong64.h"
+
+#include <float.h>
+#include <limits>
+
+#include "jit/AtomicOperations.h"
+#include "jit/loong64/Assembler-loong64.h"
+#include "js/Conversions.h"
+#include "threading/LockGuard.h"
+#include "vm/JSContext.h"
+#include "vm/Runtime.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmSignalHandlers.h"
+
+#define I8(v) static_cast<int8_t>(v)
+#define I16(v) static_cast<int16_t>(v)
+#define U16(v) static_cast<uint16_t>(v)
+#define I32(v) static_cast<int32_t>(v)
+#define U32(v) static_cast<uint32_t>(v)
+#define I64(v) static_cast<int64_t>(v)
+#define U64(v) static_cast<uint64_t>(v)
+#define I128(v) static_cast<__int128_t>(v)
+#define U128(v) static_cast<__uint128_t>(v)
+
+#define I32_CHECK(v) \
+ ({ \
+ MOZ_ASSERT(I64(I32(v)) == I64(v)); \
+ I32((v)); \
+ })
+
+namespace js {
+namespace jit {
+
+static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
+ uint64_t u0, v0, w0;
+ int64_t u1, v1, w1, w2, t;
+
+ u0 = u & 0xFFFFFFFFL;
+ u1 = u >> 32;
+ v0 = v & 0xFFFFFFFFL;
+ v1 = v >> 32;
+
+ w0 = u0 * v0;
+ t = u1 * v0 + (w0 >> 32);
+ w1 = t & 0xFFFFFFFFL;
+ w2 = t >> 32;
+ w1 = u0 * v1 + w1;
+
+ return u1 * v1 + w2 + (w1 >> 32);
+}
+
+static uint64_t MultiplyHighUnsigned(uint64_t u, uint64_t v) {
+ uint64_t u0, v0, w0;
+ uint64_t u1, v1, w1, w2, t;
+
+ u0 = u & 0xFFFFFFFFL;
+ u1 = u >> 32;
+ v0 = v & 0xFFFFFFFFL;
+ v1 = v >> 32;
+
+ w0 = u0 * v0;
+ t = u1 * v0 + (w0 >> 32);
+ w1 = t & 0xFFFFFFFFL;
+ w2 = t >> 32;
+ w1 = u0 * v1 + w1;
+
+ return u1 * v1 + w2 + (w1 >> 32);
+}
+
+// Precondition: 0 <= shift < 32
+inline constexpr uint32_t RotateRight32(uint32_t value, uint32_t shift) {
+ return (value >> shift) | (value << ((32 - shift) & 31));
+}
+
+// Precondition: 0 <= shift < 32
+inline constexpr uint32_t RotateLeft32(uint32_t value, uint32_t shift) {
+ return (value << shift) | (value >> ((32 - shift) & 31));
+}
+
+// Precondition: 0 <= shift < 64
+inline constexpr uint64_t RotateRight64(uint64_t value, uint64_t shift) {
+ return (value >> shift) | (value << ((64 - shift) & 63));
+}
+
+// Precondition: 0 <= shift < 64
+inline constexpr uint64_t RotateLeft64(uint64_t value, uint64_t shift) {
+ return (value << shift) | (value >> ((64 - shift) & 63));
+}
+
+// break instr with MAX_BREAK_CODE.
+static const Instr kCallRedirInstr = op_break | CODEMask;
+
+// -----------------------------------------------------------------------------
+// LoongArch64 assembly various constants.
+
+class SimInstruction {
+ public:
+ enum {
+ kInstrSize = 4,
+ // On LoongArch, PC cannot actually be directly accessed. We behave as if PC
+ // was always the value of the current instruction being executed.
+ kPCReadOffset = 0
+ };
+
+ // Get the raw instruction bits.
+ inline Instr instructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void setInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int bit(int nr) const { return (instructionBits() >> nr) & 1; }
+
+ // Read a bit field out of the instruction bits.
+ inline int bits(int hi, int lo) const {
+ return (instructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+ }
+
+ // Instruction type.
+ enum Type {
+ kUnsupported = -1,
+ kOp6Type,
+ kOp7Type,
+ kOp8Type,
+ kOp10Type,
+ kOp11Type,
+ kOp12Type,
+ kOp14Type,
+ kOp15Type,
+ kOp16Type,
+ kOp17Type,
+ kOp22Type,
+ kOp24Type
+ };
+
+ // Get the encoding type of the instruction.
+ Type instructionType() const;
+
+ inline int rjValue() const { return bits(RJShift + RJBits - 1, RJShift); }
+
+ inline int rkValue() const { return bits(RKShift + RKBits - 1, RKShift); }
+
+ inline int rdValue() const { return bits(RDShift + RDBits - 1, RDShift); }
+
+ inline int sa2Value() const { return bits(SAShift + SA2Bits - 1, SAShift); }
+
+ inline int sa3Value() const { return bits(SAShift + SA3Bits - 1, SAShift); }
+
+ inline int lsbwValue() const {
+ return bits(LSBWShift + LSBWBits - 1, LSBWShift);
+ }
+
+ inline int msbwValue() const {
+ return bits(MSBWShift + MSBWBits - 1, MSBWShift);
+ }
+
+ inline int lsbdValue() const {
+ return bits(LSBDShift + LSBDBits - 1, LSBDShift);
+ }
+
+ inline int msbdValue() const {
+ return bits(MSBDShift + MSBDBits - 1, MSBDShift);
+ }
+
+ inline int fdValue() const { return bits(FDShift + FDBits - 1, FDShift); }
+
+ inline int fjValue() const { return bits(FJShift + FJBits - 1, FJShift); }
+
+ inline int fkValue() const { return bits(FKShift + FKBits - 1, FKShift); }
+
+ inline int faValue() const { return bits(FAShift + FABits - 1, FAShift); }
+
+ inline int cdValue() const { return bits(CDShift + CDBits - 1, CDShift); }
+
+ inline int cjValue() const { return bits(CJShift + CJBits - 1, CJShift); }
+
+ inline int caValue() const { return bits(CAShift + CABits - 1, CAShift); }
+
+ inline int condValue() const {
+ return bits(CONDShift + CONDBits - 1, CONDShift);
+ }
+
+ inline int imm5Value() const {
+ return bits(Imm5Shift + Imm5Bits - 1, Imm5Shift);
+ }
+
+ inline int imm6Value() const {
+ return bits(Imm6Shift + Imm6Bits - 1, Imm6Shift);
+ }
+
+ inline int imm12Value() const {
+ return bits(Imm12Shift + Imm12Bits - 1, Imm12Shift);
+ }
+
+ inline int imm14Value() const {
+ return bits(Imm14Shift + Imm14Bits - 1, Imm14Shift);
+ }
+
+ inline int imm16Value() const {
+ return bits(Imm16Shift + Imm16Bits - 1, Imm16Shift);
+ }
+
+ inline int imm20Value() const {
+ return bits(Imm20Shift + Imm20Bits - 1, Imm20Shift);
+ }
+
+ inline int32_t imm26Value() const {
+ return bits(Imm26Shift + Imm26Bits - 1, Imm26Shift);
+ }
+
+ // Say if the instruction is a debugger break/trap.
+ bool isTrap() const;
+
+ private:
+ SimInstruction() = delete;
+ SimInstruction(const SimInstruction& other) = delete;
+ void operator=(const SimInstruction& other) = delete;
+};
+
+bool SimInstruction::isTrap() const {
+ // is break??
+ switch (bits(31, 15) << 15) {
+ case op_break:
+ return (instructionBits() != kCallRedirInstr) && (bits(15, 0) != 6);
+ default:
+ return false;
+ };
+}
+
+SimInstruction::Type SimInstruction::instructionType() const {
+ SimInstruction::Type kType = kUnsupported;
+
+ // Check for kOp6Type
+ switch (bits(31, 26) << 26) {
+ case op_beqz:
+ case op_bnez:
+ case op_bcz:
+ case op_jirl:
+ case op_b:
+ case op_bl:
+ case op_beq:
+ case op_bne:
+ case op_blt:
+ case op_bge:
+ case op_bltu:
+ case op_bgeu:
+ case op_addu16i_d:
+ kType = kOp6Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp7Type
+ switch (bits(31, 25) << 25) {
+ case op_lu12i_w:
+ case op_lu32i_d:
+ case op_pcaddi:
+ case op_pcalau12i:
+ case op_pcaddu12i:
+ case op_pcaddu18i:
+ kType = kOp7Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp8Type
+ switch (bits(31, 24) << 24) {
+ case op_ll_w:
+ case op_sc_w:
+ case op_ll_d:
+ case op_sc_d:
+ case op_ldptr_w:
+ case op_stptr_w:
+ case op_ldptr_d:
+ case op_stptr_d:
+ kType = kOp8Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp10Type
+ switch (bits(31, 22) << 22) {
+ case op_bstrins_d:
+ case op_bstrpick_d:
+ case op_slti:
+ case op_sltui:
+ case op_addi_w:
+ case op_addi_d:
+ case op_lu52i_d:
+ case op_andi:
+ case op_ori:
+ case op_xori:
+ case op_ld_b:
+ case op_ld_h:
+ case op_ld_w:
+ case op_ld_d:
+ case op_st_b:
+ case op_st_h:
+ case op_st_w:
+ case op_st_d:
+ case op_ld_bu:
+ case op_ld_hu:
+ case op_ld_wu:
+ case op_preld:
+ case op_fld_s:
+ case op_fst_s:
+ case op_fld_d:
+ case op_fst_d:
+ case op_bstr_w: // BSTRINS_W & BSTRPICK_W
+ kType = kOp10Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp11Type
+ switch (bits(31, 21) << 21) {
+ case op_bstr_w:
+ kType = kOp11Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp12Type
+ switch (bits(31, 20) << 20) {
+ case op_fmadd_s:
+ case op_fmadd_d:
+ case op_fmsub_s:
+ case op_fmsub_d:
+ case op_fnmadd_s:
+ case op_fnmadd_d:
+ case op_fnmsub_s:
+ case op_fnmsub_d:
+ case op_fcmp_cond_s:
+ case op_fcmp_cond_d:
+ kType = kOp12Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp14Type
+ switch (bits(31, 18) << 18) {
+ case op_bytepick_d:
+ case op_fsel:
+ kType = kOp14Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp15Type
+ switch (bits(31, 17) << 17) {
+ case op_bytepick_w:
+ case op_alsl_w:
+ case op_alsl_wu:
+ case op_alsl_d:
+ kType = kOp15Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp16Type
+ switch (bits(31, 16) << 16) {
+ case op_slli_d:
+ case op_srli_d:
+ case op_srai_d:
+ case op_rotri_d:
+ kType = kOp16Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp17Type
+ switch (bits(31, 15) << 15) {
+ case op_slli_w:
+ case op_srli_w:
+ case op_srai_w:
+ case op_rotri_w:
+ case op_add_w:
+ case op_add_d:
+ case op_sub_w:
+ case op_sub_d:
+ case op_slt:
+ case op_sltu:
+ case op_maskeqz:
+ case op_masknez:
+ case op_nor:
+ case op_and:
+ case op_or:
+ case op_xor:
+ case op_orn:
+ case op_andn:
+ case op_sll_w:
+ case op_srl_w:
+ case op_sra_w:
+ case op_sll_d:
+ case op_srl_d:
+ case op_sra_d:
+ case op_rotr_w:
+ case op_rotr_d:
+ case op_mul_w:
+ case op_mul_d:
+ case op_mulh_d:
+ case op_mulh_du:
+ case op_mulh_w:
+ case op_mulh_wu:
+ case op_mulw_d_w:
+ case op_mulw_d_wu:
+ case op_div_w:
+ case op_mod_w:
+ case op_div_wu:
+ case op_mod_wu:
+ case op_div_d:
+ case op_mod_d:
+ case op_div_du:
+ case op_mod_du:
+ case op_break:
+ case op_fadd_s:
+ case op_fadd_d:
+ case op_fsub_s:
+ case op_fsub_d:
+ case op_fmul_s:
+ case op_fmul_d:
+ case op_fdiv_s:
+ case op_fdiv_d:
+ case op_fmax_s:
+ case op_fmax_d:
+ case op_fmin_s:
+ case op_fmin_d:
+ case op_fmaxa_s:
+ case op_fmaxa_d:
+ case op_fmina_s:
+ case op_fmina_d:
+ case op_fcopysign_s:
+ case op_fcopysign_d:
+ case op_ldx_b:
+ case op_ldx_h:
+ case op_ldx_w:
+ case op_ldx_d:
+ case op_stx_b:
+ case op_stx_h:
+ case op_stx_w:
+ case op_stx_d:
+ case op_ldx_bu:
+ case op_ldx_hu:
+ case op_ldx_wu:
+ case op_fldx_s:
+ case op_fldx_d:
+ case op_fstx_s:
+ case op_fstx_d:
+ case op_amswap_w:
+ case op_amswap_d:
+ case op_amadd_w:
+ case op_amadd_d:
+ case op_amand_w:
+ case op_amand_d:
+ case op_amor_w:
+ case op_amor_d:
+ case op_amxor_w:
+ case op_amxor_d:
+ case op_ammax_w:
+ case op_ammax_d:
+ case op_ammin_w:
+ case op_ammin_d:
+ case op_ammax_wu:
+ case op_ammax_du:
+ case op_ammin_wu:
+ case op_ammin_du:
+ case op_amswap_db_w:
+ case op_amswap_db_d:
+ case op_amadd_db_w:
+ case op_amadd_db_d:
+ case op_amand_db_w:
+ case op_amand_db_d:
+ case op_amor_db_w:
+ case op_amor_db_d:
+ case op_amxor_db_w:
+ case op_amxor_db_d:
+ case op_ammax_db_w:
+ case op_ammax_db_d:
+ case op_ammin_db_w:
+ case op_ammin_db_d:
+ case op_ammax_db_wu:
+ case op_ammax_db_du:
+ case op_ammin_db_wu:
+ case op_ammin_db_du:
+ case op_dbar:
+ case op_ibar:
+ kType = kOp17Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp22Type
+ switch (bits(31, 10) << 10) {
+ case op_clo_w:
+ case op_clz_w:
+ case op_cto_w:
+ case op_ctz_w:
+ case op_clo_d:
+ case op_clz_d:
+ case op_cto_d:
+ case op_ctz_d:
+ case op_revb_2h:
+ case op_revb_4h:
+ case op_revb_2w:
+ case op_revb_d:
+ case op_revh_2w:
+ case op_revh_d:
+ case op_bitrev_4b:
+ case op_bitrev_8b:
+ case op_bitrev_w:
+ case op_bitrev_d:
+ case op_ext_w_h:
+ case op_ext_w_b:
+ case op_fabs_s:
+ case op_fabs_d:
+ case op_fneg_s:
+ case op_fneg_d:
+ case op_fsqrt_s:
+ case op_fsqrt_d:
+ case op_fmov_s:
+ case op_fmov_d:
+ case op_movgr2fr_w:
+ case op_movgr2fr_d:
+ case op_movgr2frh_w:
+ case op_movfr2gr_s:
+ case op_movfr2gr_d:
+ case op_movfrh2gr_s:
+ case op_movfcsr2gr:
+ case op_movfr2cf:
+ case op_movgr2cf:
+ case op_fcvt_s_d:
+ case op_fcvt_d_s:
+ case op_ftintrm_w_s:
+ case op_ftintrm_w_d:
+ case op_ftintrm_l_s:
+ case op_ftintrm_l_d:
+ case op_ftintrp_w_s:
+ case op_ftintrp_w_d:
+ case op_ftintrp_l_s:
+ case op_ftintrp_l_d:
+ case op_ftintrz_w_s:
+ case op_ftintrz_w_d:
+ case op_ftintrz_l_s:
+ case op_ftintrz_l_d:
+ case op_ftintrne_w_s:
+ case op_ftintrne_w_d:
+ case op_ftintrne_l_s:
+ case op_ftintrne_l_d:
+ case op_ftint_w_s:
+ case op_ftint_w_d:
+ case op_ftint_l_s:
+ case op_ftint_l_d:
+ case op_ffint_s_w:
+ case op_ffint_s_l:
+ case op_ffint_d_w:
+ case op_ffint_d_l:
+ case op_frint_s:
+ case op_frint_d:
+ kType = kOp22Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp24Type
+ switch (bits(31, 8) << 8) {
+ case op_movcf2fr:
+ case op_movcf2gr:
+ kType = kOp24Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ return kType;
+}
+
+// C/C++ argument slots size.
+const int kCArgSlotCount = 0;
+const int kCArgsSlotsSize = kCArgSlotCount * sizeof(uintptr_t);
+
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
+
+ char* validityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+
+ char* cachedData(int offset) { return &data_[offset]; }
+
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
+
+// Protects the icache() and redirection() properties of the
+// Simulator.
+class AutoLockSimulatorCache : public LockGuard<Mutex> {
+ using Base = LockGuard<Mutex>;
+
+ public:
+ explicit AutoLockSimulatorCache()
+ : Base(SimulatorProcess::singleton_->cacheLock_) {}
+};
+
+mozilla::Atomic<size_t, mozilla::ReleaseAcquire>
+ SimulatorProcess::ICacheCheckingDisableCount(
+ 1); // Checking is disabled by default.
+SimulatorProcess* SimulatorProcess::singleton_ = nullptr;
+
+int64_t Simulator::StopSimAt = -1;
+
+Simulator* Simulator::Create() {
+ auto sim = MakeUnique<Simulator>();
+ if (!sim) {
+ return nullptr;
+ }
+
+ if (!sim->init()) {
+ return nullptr;
+ }
+
+ int64_t stopAt;
+ char* stopAtStr = getenv("LOONG64_SIM_STOP_AT");
+ if (stopAtStr && sscanf(stopAtStr, "%" PRIi64, &stopAt) == 1) {
+ fprintf(stderr, "\nStopping simulation at icount %" PRIi64 "\n", stopAt);
+ Simulator::StopSimAt = stopAt;
+ }
+
+ return sim.release();
+}
+
+void Simulator::Destroy(Simulator* sim) { js_delete(sim); }
+
+// The loong64Debugger class is used by the simulator while debugging simulated
+// code.
+class loong64Debugger {
+ public:
+ explicit loong64Debugger(Simulator* sim) : sim_(sim) {}
+
+ void stop(SimInstruction* instr);
+ void debug();
+ // Print all registers with a nice formatting.
+ void printAllRegs();
+ void printAllRegsIncludingFPU();
+
+ private:
+ // We set the breakpoint code to 0x7fff to easily recognize it.
+ static const Instr kBreakpointInstr = op_break | (0x7fff & CODEMask);
+ static const Instr kNopInstr = 0x0;
+
+ Simulator* sim_;
+
+ int64_t getRegisterValue(int regnum);
+ int64_t getFPURegisterValueLong(int regnum);
+ float getFPURegisterValueFloat(int regnum);
+ double getFPURegisterValueDouble(int regnum);
+ bool getValue(const char* desc, int64_t* value);
+
+ // Set or delete a breakpoint. Returns true if successful.
+ bool setBreakpoint(SimInstruction* breakpc);
+ bool deleteBreakpoint(SimInstruction* breakpc);
+
+ // Undo and redo all breakpoints. This is needed to bracket disassembly and
+ // execution to skip past breakpoints when run from the debugger.
+ void undoBreakpoints();
+ void redoBreakpoints();
+};
+
+static void UNIMPLEMENTED() {
+ printf("UNIMPLEMENTED instruction.\n");
+ MOZ_CRASH();
+}
+static void UNREACHABLE() {
+ printf("UNREACHABLE instruction.\n");
+ MOZ_CRASH();
+}
+static void UNSUPPORTED() {
+ printf("Unsupported instruction.\n");
+ MOZ_CRASH();
+}
+
+void loong64Debugger::stop(SimInstruction* instr) {
+ // Get the stop code.
+ uint32_t code = instr->bits(25, 6);
+ // Retrieve the encoded address, which comes just after this stop.
+ char* msg =
+ *reinterpret_cast<char**>(sim_->get_pc() + SimInstruction::kInstrSize);
+ // Update this stop description.
+ if (!sim_->watchedStops_[code].desc_) {
+ sim_->watchedStops_[code].desc_ = msg;
+ }
+ // Print the stop message and code if it is not the default code.
+ if (code != kMaxStopCode) {
+ printf("Simulator hit stop %u: %s\n", code, msg);
+ } else {
+ printf("Simulator hit %s\n", msg);
+ }
+ sim_->set_pc(sim_->get_pc() + 2 * SimInstruction::kInstrSize);
+ debug();
+}
+
+int64_t loong64Debugger::getRegisterValue(int regnum) {
+ if (regnum == kPCRegister) {
+ return sim_->get_pc();
+ }
+ return sim_->getRegister(regnum);
+}
+
+int64_t loong64Debugger::getFPURegisterValueLong(int regnum) {
+ return sim_->getFpuRegister(regnum);
+}
+
+float loong64Debugger::getFPURegisterValueFloat(int regnum) {
+ return sim_->getFpuRegisterFloat(regnum);
+}
+
+double loong64Debugger::getFPURegisterValueDouble(int regnum) {
+ return sim_->getFpuRegisterDouble(regnum);
+}
+
+bool loong64Debugger::getValue(const char* desc, int64_t* value) {
+ Register reg = Register::FromName(desc);
+ if (reg != InvalidReg) {
+ *value = getRegisterValue(reg.code());
+ return true;
+ }
+
+ if (strncmp(desc, "0x", 2) == 0) {
+ return sscanf(desc + 2, "%lx", reinterpret_cast<uint64_t*>(value)) == 1;
+ }
+ return sscanf(desc, "%lu", reinterpret_cast<uint64_t*>(value)) == 1;
+}
+
+bool loong64Debugger::setBreakpoint(SimInstruction* breakpc) {
+ // Check if a breakpoint can be set. If not return without any side-effects.
+ if (sim_->break_pc_ != nullptr) {
+ return false;
+ }
+
+ // Set the breakpoint.
+ sim_->break_pc_ = breakpc;
+ sim_->break_instr_ = breakpc->instructionBits();
+ // Not setting the breakpoint instruction in the code itself. It will be set
+ // when the debugger shell continues.
+ return true;
+}
+
+bool loong64Debugger::deleteBreakpoint(SimInstruction* breakpc) {
+ if (sim_->break_pc_ != nullptr) {
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+ }
+
+ sim_->break_pc_ = nullptr;
+ sim_->break_instr_ = 0;
+ return true;
+}
+
+void loong64Debugger::undoBreakpoints() {
+ if (sim_->break_pc_) {
+ sim_->break_pc_->setInstructionBits(sim_->break_instr_);
+ }
+}
+
+void loong64Debugger::redoBreakpoints() {
+ if (sim_->break_pc_) {
+ sim_->break_pc_->setInstructionBits(kBreakpointInstr);
+ }
+}
+
+void loong64Debugger::printAllRegs() {
+ int64_t value;
+ for (uint32_t i = 0; i < Registers::Total; i++) {
+ value = getRegisterValue(i);
+ printf("%3s: 0x%016" PRIx64 " %20" PRIi64 " ", Registers::GetName(i),
+ value, value);
+
+ if (i % 2) {
+ printf("\n");
+ }
+ }
+ printf("\n");
+
+ value = getRegisterValue(Simulator::pc);
+ printf(" pc: 0x%016" PRIx64 "\n", value);
+}
+
+void loong64Debugger::printAllRegsIncludingFPU() {
+ printAllRegs();
+
+ printf("\n\n");
+ // f0, f1, f2, ... f31.
+ for (uint32_t i = 0; i < FloatRegisters::TotalPhys; i++) {
+ printf("%3s: 0x%016" PRIi64 "\tflt: %-8.4g\tdbl: %-16.4g\n",
+ FloatRegisters::GetName(i), getFPURegisterValueLong(i),
+ getFPURegisterValueFloat(i), getFPURegisterValueDouble(i));
+ }
+}
+
+static char* ReadLine(const char* prompt) {
+ UniqueChars result;
+ char lineBuf[256];
+ int offset = 0;
+ bool keepGoing = true;
+ fprintf(stdout, "%s", prompt);
+ fflush(stdout);
+ while (keepGoing) {
+ if (fgets(lineBuf, sizeof(lineBuf), stdin) == nullptr) {
+ // fgets got an error. Just give up.
+ return nullptr;
+ }
+ int len = strlen(lineBuf);
+ if (len > 0 && lineBuf[len - 1] == '\n') {
+ // Since we read a new line we are done reading the line. This
+ // will exit the loop after copying this buffer into the result.
+ keepGoing = false;
+ }
+ if (!result) {
+ // Allocate the initial result and make room for the terminating '\0'
+ result.reset(js_pod_malloc<char>(len + 1));
+ if (!result) {
+ return nullptr;
+ }
+ } else {
+ // Allocate a new result with enough room for the new addition.
+ int new_len = offset + len + 1;
+ char* new_result = js_pod_malloc<char>(new_len);
+ if (!new_result) {
+ return nullptr;
+ }
+ // Copy the existing input into the new array and set the new
+ // array as the result.
+ memcpy(new_result, result.get(), offset * sizeof(char));
+ result.reset(new_result);
+ }
+ // Copy the newly read line into the result.
+ memcpy(result.get() + offset, lineBuf, len * sizeof(char));
+ offset += len;
+ }
+
+ MOZ_ASSERT(result);
+ result[offset] = '\0';
+ return result.release();
+}
+
+static void DisassembleInstruction(uint64_t pc) {
+ printf("Not supported on loongarch64 yet\n");
+}
+
+void loong64Debugger::debug() {
+ intptr_t lastPC = -1;
+ bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = {cmd, arg1, arg2};
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ // Undo all set breakpoints while running in the debugger shell. This will
+ // make them invisible to all commands.
+ undoBreakpoints();
+
+ while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
+ if (lastPC != sim_->get_pc()) {
+ DisassembleInstruction(sim_->get_pc());
+ printf(" 0x%016" PRIi64 " \n", sim_->get_pc());
+ lastPC = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == nullptr) {
+ break;
+ } else {
+ char* last_input = sim_->lastDebuggerInput();
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->setLastDebuggerInput(line);
+ }
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = sscanf(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ SimInstruction* instr =
+ reinterpret_cast<SimInstruction*>(sim_->get_pc());
+ if (!instr->isTrap()) {
+ sim_->instructionDecode(
+ reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ } else {
+ // Allow si to jump over generated breakpoints.
+ printf("/!\\ Jumping over generated breakpoint.\n");
+ sim_->set_pc(sim_->get_pc() + SimInstruction::kInstrSize);
+ }
+ sim_->icount_++;
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Execute the one instruction we broke at with breakpoints disabled.
+ sim_->instructionDecode(
+ reinterpret_cast<SimInstruction*>(sim_->get_pc()));
+ sim_->icount_++;
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ if (strcmp(arg1, "all") == 0) {
+ printAllRegs();
+ } else if (strcmp(arg1, "allf") == 0) {
+ printAllRegsIncludingFPU();
+ } else {
+ Register reg = Register::FromName(arg1);
+ FloatRegisters::Code fReg = FloatRegisters::FromName(arg1);
+ if (reg != InvalidReg) {
+ value = getRegisterValue(reg.code());
+ printf("%s: 0x%016" PRIi64 " %20" PRIi64 " \n", arg1, value,
+ value);
+ } else if (fReg != FloatRegisters::Invalid) {
+ printf("%3s: 0x%016" PRIi64 "\tflt: %-8.4g\tdbl: %-16.4g\n",
+ FloatRegisters::GetName(fReg),
+ getFPURegisterValueLong(fReg),
+ getFPURegisterValueFloat(fReg),
+ getFPURegisterValueDouble(fReg));
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ printf("print <register> or print <fpu register> single\n");
+ }
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int64_t* cur = nullptr;
+ int64_t* end = nullptr;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int64_t*>(sim_->getRegister(Simulator::sp));
+ } else { // Command "mem".
+ int64_t value;
+ if (!getValue(arg1, &value)) {
+ printf("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int64_t*>(value);
+ next_arg++;
+ }
+
+ int64_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!getValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ printf(" %p: 0x%016" PRIx64 " %20" PRIi64, cur, *cur, *cur);
+ printf("\n");
+ cur++;
+ }
+
+ } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0) ||
+ (strcmp(cmd, "di") == 0)) {
+ uint8_t* cur = nullptr;
+ uint8_t* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ end = cur + (10 * SimInstruction::kInstrSize);
+ } else if (argc == 2) {
+ Register reg = Register::FromName(arg1);
+ if (reg != InvalidReg || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ int64_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * SimInstruction::kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ int64_t value;
+ if (getValue(arg1, &value)) {
+ cur = reinterpret_cast<uint8_t*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * SimInstruction::kInstrSize);
+ }
+ }
+ } else {
+ int64_t value1;
+ int64_t value2;
+ if (getValue(arg1, &value1) && getValue(arg2, &value2)) {
+ cur = reinterpret_cast<uint8_t*>(value1);
+ end = cur + (value2 * SimInstruction::kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ DisassembleInstruction(uint64_t(cur));
+ cur += SimInstruction::kInstrSize;
+ }
+ } else if (strcmp(cmd, "gdb") == 0) {
+ printf("relinquishing control to gdb\n");
+ asm("int $3");
+ printf("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0) {
+ if (argc == 2) {
+ int64_t value;
+ if (getValue(arg1, &value)) {
+ if (!setBreakpoint(reinterpret_cast<SimInstruction*>(value))) {
+ printf("setting breakpoint failed\n");
+ }
+ } else {
+ printf("%s unrecognized\n", arg1);
+ }
+ } else {
+ printf("break <address>\n");
+ }
+ } else if (strcmp(cmd, "del") == 0) {
+ if (!deleteBreakpoint(nullptr)) {
+ printf("deleting breakpoint failed\n");
+ }
+ } else if (strcmp(cmd, "flags") == 0) {
+ printf("No flags on LOONG64 !\n");
+ } else if (strcmp(cmd, "stop") == 0) {
+ int64_t value;
+ intptr_t stop_pc = sim_->get_pc() - 2 * SimInstruction::kInstrSize;
+ SimInstruction* stop_instr = reinterpret_cast<SimInstruction*>(stop_pc);
+ SimInstruction* msg_address = reinterpret_cast<SimInstruction*>(
+ stop_pc + SimInstruction::kInstrSize);
+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+ // Remove the current stop.
+ if (sim_->isStopInstruction(stop_instr)) {
+ stop_instr->setInstructionBits(kNopInstr);
+ msg_address->setInstructionBits(kNopInstr);
+ } else {
+ printf("Not at debugger stop.\n");
+ }
+ } else if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ printf("Stop information:\n");
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->printStopInfo(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->printStopInfo(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->enableStop(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->enableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->disableStop(i);
+ }
+ } else if (getValue(arg2, &value)) {
+ sim_->disableStop(value);
+ } else {
+ printf("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ printf("Wrong usage. Use help command for more information.\n");
+ }
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ printf("cont\n");
+ printf(" continue execution (alias 'c')\n");
+ printf("stepi\n");
+ printf(" step one instruction (alias 'si')\n");
+ printf("print <register>\n");
+ printf(" print register content (alias 'p')\n");
+ printf(" use register name 'all' to print all registers\n");
+ printf("printobject <register>\n");
+ printf(" print an object from a register (alias 'po')\n");
+ printf("stack [<words>]\n");
+ printf(" dump stack content, default dump 10 words)\n");
+ printf("mem <address> [<words>]\n");
+ printf(" dump memory content, default dump 10 words)\n");
+ printf("flags\n");
+ printf(" print flags\n");
+ printf("disasm [<instructions>]\n");
+ printf("disasm [<address/register>]\n");
+ printf("disasm [[<address/register>] <instructions>]\n");
+ printf(" disassemble code, default is 10 instructions\n");
+ printf(" from pc (alias 'di')\n");
+ printf("gdb\n");
+ printf(" enter gdb\n");
+ printf("break <address>\n");
+ printf(" set a break point on the address\n");
+ printf("del\n");
+ printf(" delete the breakpoint\n");
+ printf("stop feature:\n");
+ printf(" Description:\n");
+ printf(" Stops are debug instructions inserted by\n");
+ printf(" the Assembler::stop() function.\n");
+ printf(" When hitting a stop, the Simulator will\n");
+ printf(" stop and and give control to the Debugger.\n");
+ printf(" All stop codes are watched:\n");
+ printf(" - They can be enabled / disabled: the Simulator\n");
+ printf(" will / won't stop when hitting them.\n");
+ printf(" - The Simulator keeps track of how many times they \n");
+ printf(" are met. (See the info command.) Going over a\n");
+ printf(" disabled stop still increases its counter. \n");
+ printf(" Commands:\n");
+ printf(" stop info all/<code> : print infos about number <code>\n");
+ printf(" or all stop(s).\n");
+ printf(" stop enable/disable all/<code> : enables / disables\n");
+ printf(" all or number <code> stop(s)\n");
+ printf(" stop unstop\n");
+ printf(" ignore the stop instruction at the current location\n");
+ printf(" from now on\n");
+ } else {
+ printf("Unknown command: %s\n", cmd);
+ }
+ }
+ }
+
+ // Add all the breakpoints back to stop execution and enter the debugger
+ // shell when hit.
+ redoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+static bool AllOnOnePage(uintptr_t start, int size) {
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
+
+void Simulator::setLastDebuggerInput(char* input) {
+ js_free(lastDebuggerInput_);
+ lastDebuggerInput_ = input;
+}
+
+static CachePage* GetCachePageLocked(SimulatorProcess::ICacheMap& i_cache,
+ void* page) {
+ SimulatorProcess::ICacheMap::AddPtr p = i_cache.lookupForAdd(page);
+ if (p) {
+ return p->value();
+ }
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ CachePage* new_page = js_new<CachePage>();
+ if (!new_page || !i_cache.add(p, page, new_page)) {
+ oomUnsafe.crash("Simulator CachePage");
+ }
+ return new_page;
+}
+
+// Flush from start up to and not including start + size.
+static void FlushOnePageLocked(SimulatorProcess::ICacheMap& i_cache,
+ intptr_t start, int size) {
+ MOZ_ASSERT(size <= CachePage::kPageSize);
+ MOZ_ASSERT(AllOnOnePage(start, size - 1));
+ MOZ_ASSERT((start & CachePage::kLineMask) == 0);
+ MOZ_ASSERT((size & CachePage::kLineMask) == 0);
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(i_cache, page);
+ char* valid_bytemap = cache_page->validityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+static void FlushICacheLocked(SimulatorProcess::ICacheMap& i_cache,
+ void* start_addr, size_t size) {
+ intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+ int intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePageLocked(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ MOZ_ASSERT((start & CachePage::kPageMask) == 0);
+ offset = 0;
+ }
+ if (size != 0) {
+ FlushOnePageLocked(i_cache, start, size);
+ }
+}
+
+/* static */
+void SimulatorProcess::checkICacheLocked(SimInstruction* instr) {
+ intptr_t address = reinterpret_cast<intptr_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePageLocked(icache(), page);
+ char* cache_valid_byte = cache_page->validityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->cachedData(offset & ~CachePage::kLineMask);
+
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ mozilla::DebugOnly<int> cmpret =
+ memcmp(reinterpret_cast<void*>(instr), cache_page->cachedData(offset),
+ SimInstruction::kInstrSize);
+ MOZ_ASSERT(cmpret == 0);
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
+}
+
+HashNumber SimulatorProcess::ICacheHasher::hash(const Lookup& l) {
+ return U32(reinterpret_cast<uintptr_t>(l)) >> 2;
+}
+
+bool SimulatorProcess::ICacheHasher::match(const Key& k, const Lookup& l) {
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(k) & CachePage::kPageMask) == 0);
+ MOZ_ASSERT((reinterpret_cast<intptr_t>(l) & CachePage::kPageMask) == 0);
+ return k == l;
+}
+
+/* static */
+void SimulatorProcess::FlushICache(void* start_addr, size_t size) {
+ if (!ICacheCheckingDisableCount) {
+ AutoLockSimulatorCache als;
+ js::jit::FlushICacheLocked(icache(), start_addr, size);
+ }
+}
+
+Simulator::Simulator() {
+ // Set up simulator support first. Some of this information is needed to
+ // setup the architecture state.
+
+ // Note, allocation and anything that depends on allocated memory is
+ // deferred until init(), in order to handle OOM properly.
+
+ stack_ = nullptr;
+ stackLimit_ = 0;
+ pc_modified_ = false;
+ icount_ = 0;
+ break_count_ = 0;
+ break_pc_ = nullptr;
+ break_instr_ = 0;
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+
+ // Set up architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < Register::kNumSimuRegisters; i++) {
+ registers_[i] = 0;
+ }
+ for (int i = 0; i < Simulator::FPURegister::kNumFPURegisters; i++) {
+ FPUregisters_[i] = 0;
+ }
+
+ for (int i = 0; i < kNumCFRegisters; i++) {
+ CFregisters_[i] = 0;
+ }
+
+ FCSR_ = 0;
+ LLBit_ = false;
+ LLAddr_ = 0;
+ lastLLValue_ = 0;
+
+ // The ra and pc are initialized to a known bad value that will cause an
+ // access violation if the simulator ever tries to execute it.
+ registers_[pc] = bad_ra;
+ registers_[ra] = bad_ra;
+
+ for (int i = 0; i < kNumExceptions; i++) {
+ exceptions[i] = 0;
+ }
+
+ lastDebuggerInput_ = nullptr;
+}
+
+bool Simulator::init() {
+ // Allocate 2MB for the stack. Note that we will only use 1MB, see below.
+ static const size_t stackSize = 2 * 1024 * 1024;
+ stack_ = js_pod_malloc<char>(stackSize);
+ if (!stack_) {
+ return false;
+ }
+
+ // Leave a safety margin of 1MB to prevent overrunning the stack when
+ // pushing values (total stack size is 2MB).
+ stackLimit_ = reinterpret_cast<uintptr_t>(stack_) + 1024 * 1024;
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<int64_t>(stack_) + stackSize - 64;
+
+ return true;
+}
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator. The external reference will be a function compiled for the
+// host architecture. We need to call that function instead of trying to
+// execute it with the simulator. We do that by redirecting the external
+// reference to a swi (software-interrupt) instruction that is handled by
+// the simulator. We write the original destination of the jump just at a known
+// offset from the swi instruction so the simulator knows what to call.
+class Redirection {
+ friend class SimulatorProcess;
+
+ // sim's lock must already be held.
+ Redirection(void* nativeFunction, ABIFunctionType type)
+ : nativeFunction_(nativeFunction),
+ swiInstruction_(kCallRedirInstr),
+ type_(type),
+ next_(nullptr) {
+ next_ = SimulatorProcess::redirection();
+ if (!SimulatorProcess::ICacheCheckingDisableCount) {
+ FlushICacheLocked(SimulatorProcess::icache(), addressOfSwiInstruction(),
+ SimInstruction::kInstrSize);
+ }
+ SimulatorProcess::setRedirection(this);
+ }
+
+ public:
+ void* addressOfSwiInstruction() { return &swiInstruction_; }
+ void* nativeFunction() const { return nativeFunction_; }
+ ABIFunctionType type() const { return type_; }
+
+ static Redirection* Get(void* nativeFunction, ABIFunctionType type) {
+ AutoLockSimulatorCache als;
+
+ Redirection* current = SimulatorProcess::redirection();
+ for (; current != nullptr; current = current->next_) {
+ if (current->nativeFunction_ == nativeFunction) {
+ MOZ_ASSERT(current->type() == type);
+ return current;
+ }
+ }
+
+ // Note: we can't use js_new here because the constructor is private.
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ Redirection* redir = js_pod_malloc<Redirection>(1);
+ if (!redir) {
+ oomUnsafe.crash("Simulator redirection");
+ }
+ new (redir) Redirection(nativeFunction, type);
+ return redir;
+ }
+
+ static Redirection* FromSwiInstruction(SimInstruction* swiInstruction) {
+ uint8_t* addrOfSwi = reinterpret_cast<uint8_t*>(swiInstruction);
+ uint8_t* addrOfRedirection =
+ addrOfSwi - offsetof(Redirection, swiInstruction_);
+ return reinterpret_cast<Redirection*>(addrOfRedirection);
+ }
+
+ private:
+ void* nativeFunction_;
+ uint32_t swiInstruction_;
+ ABIFunctionType type_;
+ Redirection* next_;
+};
+
+Simulator::~Simulator() { js_free(stack_); }
+
+SimulatorProcess::SimulatorProcess()
+ : cacheLock_(mutexid::SimulatorCacheLock), redirection_(nullptr) {
+ if (getenv("LOONG64_SIM_ICACHE_CHECKS")) {
+ ICacheCheckingDisableCount = 0;
+ }
+}
+
+SimulatorProcess::~SimulatorProcess() {
+ Redirection* r = redirection_;
+ while (r) {
+ Redirection* next = r->next_;
+ js_delete(r);
+ r = next;
+ }
+}
+
+/* static */
+void* Simulator::RedirectNativeFunction(void* nativeFunction,
+ ABIFunctionType type) {
+ Redirection* redirection = Redirection::Get(nativeFunction, type);
+ return redirection->addressOfSwiInstruction();
+}
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::Current() {
+ JSContext* cx = TlsContext.get();
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
+ return cx->simulator();
+}
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::setRegister(int reg, int64_t value) {
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters));
+ if (reg == pc) {
+ pc_modified_ = true;
+ }
+
+ // Zero register always holds 0.
+ registers_[reg] = (reg == 0) ? 0 : value;
+}
+
+void Simulator::setFpuRegister(int fpureg, int64_t value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ FPUregisters_[fpureg] = value;
+}
+
+void Simulator::setFpuRegisterHiWord(int fpureg, int32_t value) {
+ // Set ONLY upper 32-bits, leaving lower bits untouched.
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ int32_t* phiword;
+ phiword = (reinterpret_cast<int32_t*>(&FPUregisters_[fpureg])) + 1;
+
+ *phiword = value;
+}
+
+void Simulator::setFpuRegisterWord(int fpureg, int32_t value) {
+ // Set ONLY lower 32-bits, leaving upper bits untouched.
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ int32_t* pword;
+ pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]);
+
+ *pword = value;
+}
+
+void Simulator::setFpuRegisterWordInvalidResult(float original, float rounded,
+ int fpureg) {
+ double max_int32 = static_cast<double>(INT32_MAX);
+ double min_int32 = static_cast<double>(INT32_MIN);
+
+ if (std::isnan(original)) {
+ setFpuRegisterWord(fpureg, 0);
+ } else if (rounded > max_int32) {
+ setFpuRegister(fpureg, kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ setFpuRegister(fpureg, kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::setFpuRegisterWordInvalidResult(double original, double rounded,
+ int fpureg) {
+ double max_int32 = static_cast<double>(INT32_MAX);
+ double min_int32 = static_cast<double>(INT32_MIN);
+
+ if (std::isnan(original)) {
+ setFpuRegisterWord(fpureg, 0);
+ } else if (rounded > max_int32) {
+ setFpuRegisterWord(fpureg, kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ setFpuRegisterWord(fpureg, kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::setFpuRegisterInvalidResult(float original, float rounded,
+ int fpureg) {
+ double max_int32 = static_cast<double>(INT32_MAX);
+ double min_int32 = static_cast<double>(INT32_MIN);
+
+ if (std::isnan(original)) {
+ setFpuRegister(fpureg, 0);
+ } else if (rounded > max_int32) {
+ setFpuRegister(fpureg, kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ setFpuRegister(fpureg, kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::setFpuRegisterInvalidResult(double original, double rounded,
+ int fpureg) {
+ double max_int32 = static_cast<double>(INT32_MAX);
+ double min_int32 = static_cast<double>(INT32_MIN);
+
+ if (std::isnan(original)) {
+ setFpuRegister(fpureg, 0);
+ } else if (rounded > max_int32) {
+ setFpuRegister(fpureg, kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ setFpuRegister(fpureg, kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::setFpuRegisterInvalidResult64(float original, float rounded,
+ int fpureg) {
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = static_cast<double>(INT64_MAX);
+ double min_int64 = static_cast<double>(INT64_MIN);
+
+ if (std::isnan(original)) {
+ setFpuRegister(fpureg, 0);
+ } else if (rounded >= max_int64) {
+ setFpuRegister(fpureg, kFPU64InvalidResult);
+ } else if (rounded < min_int64) {
+ setFpuRegister(fpureg, kFPU64InvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::setFpuRegisterInvalidResult64(double original, double rounded,
+ int fpureg) {
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = static_cast<double>(INT64_MAX);
+ double min_int64 = static_cast<double>(INT64_MIN);
+
+ if (std::isnan(original)) {
+ setFpuRegister(fpureg, 0);
+ } else if (rounded >= max_int64) {
+ setFpuRegister(fpureg, kFPU64InvalidResult);
+ } else if (rounded < min_int64) {
+ setFpuRegister(fpureg, kFPU64InvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::setFpuRegisterFloat(int fpureg, float value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<float*>(&FPUregisters_[fpureg]) = value;
+}
+
+void Simulator::setFpuRegisterDouble(int fpureg, double value) {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ *mozilla::BitwiseCast<double*>(&FPUregisters_[fpureg]) = value;
+}
+
+void Simulator::setCFRegister(int cfreg, bool value) {
+ MOZ_ASSERT((cfreg >= 0) && (cfreg < kNumCFRegisters));
+ CFregisters_[cfreg] = value;
+}
+
+bool Simulator::getCFRegister(int cfreg) const {
+ MOZ_ASSERT((cfreg >= 0) && (cfreg < kNumCFRegisters));
+ return CFregisters_[cfreg];
+}
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int64_t Simulator::getRegister(int reg) const {
+ MOZ_ASSERT((reg >= 0) && (reg < Register::kNumSimuRegisters));
+ if (reg == 0) {
+ return 0;
+ }
+ return registers_[reg] + ((reg == pc) ? SimInstruction::kPCReadOffset : 0);
+}
+
+int64_t Simulator::getFpuRegister(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return FPUregisters_[fpureg];
+}
+
+int32_t Simulator::getFpuRegisterWord(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg]);
+}
+
+int32_t Simulator::getFpuRegisterSignedWord(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg]);
+}
+
+int32_t Simulator::getFpuRegisterHiWord(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *((mozilla::BitwiseCast<int32_t*>(&FPUregisters_[fpureg])) + 1);
+}
+
+float Simulator::getFpuRegisterFloat(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<float*>(&FPUregisters_[fpureg]);
+}
+
+double Simulator::getFpuRegisterDouble(int fpureg) const {
+ MOZ_ASSERT((fpureg >= 0) &&
+ (fpureg < Simulator::FPURegister::kNumFPURegisters));
+ return *mozilla::BitwiseCast<double*>(&FPUregisters_[fpureg]);
+}
+
+void Simulator::setCallResultDouble(double result) {
+ setFpuRegisterDouble(f0, result);
+}
+
+void Simulator::setCallResultFloat(float result) {
+ setFpuRegisterFloat(f0, result);
+}
+
+void Simulator::setCallResult(int64_t res) { setRegister(a0, res); }
+
+void Simulator::setCallResult(__int128_t res) {
+ setRegister(a0, I64(res));
+ setRegister(a1, I64(res >> 64));
+}
+
+// Helper functions for setting and testing the FCSR register's bits.
+void Simulator::setFCSRBit(uint32_t cc, bool value) {
+ if (value) {
+ FCSR_ |= (1 << cc);
+ } else {
+ FCSR_ &= ~(1 << cc);
+ }
+}
+
+bool Simulator::testFCSRBit(uint32_t cc) { return FCSR_ & (1 << cc); }
+
+unsigned int Simulator::getFCSRRoundingMode() {
+ return FCSR_ & kFPURoundingModeMask;
+}
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+template <typename T>
+bool Simulator::setFCSRRoundError(double original, double rounded) {
+ bool ret = false;
+
+ setFCSRBit(kFCSRInexactCauseBit, false);
+ setFCSRBit(kFCSRUnderflowCauseBit, false);
+ setFCSRBit(kFCSROverflowCauseBit, false);
+ setFCSRBit(kFCSRInvalidOpCauseBit, false);
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ setFCSRBit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ setFCSRBit(kFCSRInexactFlagBit, true);
+ setFCSRBit(kFCSRInexactCauseBit, true);
+ }
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ setFCSRBit(kFCSRUnderflowFlagBit, true);
+ setFCSRBit(kFCSRUnderflowCauseBit, true);
+ ret = true;
+ }
+
+ if ((long double)rounded > (long double)std::numeric_limits<T>::max() ||
+ (long double)rounded < (long double)std::numeric_limits<T>::min()) {
+ setFCSRBit(kFCSROverflowFlagBit, true);
+ setFCSRBit(kFCSROverflowCauseBit, true);
+ // The reference is not really clear but it seems this is required:
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ setFCSRBit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+// For cvt instructions only
+template <typename T>
+void Simulator::roundAccordingToFCSR(T toRound, T* rounded,
+ int32_t* rounded_int) {
+ switch ((FCSR_ >> 8) & 3) {
+ case kRoundToNearest:
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ *rounded_int -= 1;
+ *rounded -= 1.;
+ }
+ break;
+ case kRoundToZero:
+ *rounded = trunc(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ case kRoundToPlusInf:
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ case kRoundToMinusInf:
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ }
+}
+
+template <typename T>
+void Simulator::round64AccordingToFCSR(T toRound, T* rounded,
+ int64_t* rounded_int) {
+ switch ((FCSR_ >> 8) & 3) {
+ case kRoundToNearest:
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ *rounded_int -= 1;
+ *rounded -= 1.;
+ }
+ break;
+ case kRoundToZero:
+ *rounded = trunc(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ case kRoundToPlusInf:
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ case kRoundToMinusInf:
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ }
+}
+
+// Raw access to the PC register.
+void Simulator::set_pc(int64_t value) {
+ pc_modified_ = true;
+ registers_[pc] = value;
+}
+
+bool Simulator::has_bad_pc() const {
+ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+int64_t Simulator::get_pc() const { return registers_[pc]; }
+
+JS::ProfilingFrameIterator::RegisterState Simulator::registerState() {
+ wasm::RegisterState state;
+ state.pc = (void*)get_pc();
+ state.fp = (void*)getRegister(fp);
+ state.sp = (void*)getRegister(sp);
+ state.lr = (void*)getRegister(ra);
+ return state;
+}
+
+uint8_t Simulator::readBU(uint64_t addr) {
+ if (handleWasmSegFault(addr, 1)) {
+ return 0xff;
+ }
+
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ return *ptr;
+}
+
+int8_t Simulator::readB(uint64_t addr) {
+ if (handleWasmSegFault(addr, 1)) {
+ return -1;
+ }
+
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ return *ptr;
+}
+
+void Simulator::writeB(uint64_t addr, uint8_t value) {
+ if (handleWasmSegFault(addr, 1)) {
+ return;
+ }
+
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ *ptr = value;
+}
+
+void Simulator::writeB(uint64_t addr, int8_t value) {
+ if (handleWasmSegFault(addr, 1)) {
+ return;
+ }
+
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ *ptr = value;
+}
+
+uint16_t Simulator::readHU(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return 0xffff;
+ }
+
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ return *ptr;
+}
+
+int16_t Simulator::readH(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return -1;
+ }
+
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ return *ptr;
+}
+
+void Simulator::writeH(uint64_t addr, uint16_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return;
+ }
+
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+}
+
+void Simulator::writeH(uint64_t addr, int16_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 2)) {
+ return;
+ }
+
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+}
+
+uint32_t Simulator::readWU(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ return -1;
+ }
+
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ return *ptr;
+}
+
+int32_t Simulator::readW(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ return -1;
+ }
+
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ return *ptr;
+}
+
+void Simulator::writeW(uint64_t addr, uint32_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ return;
+ }
+
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+}
+
+void Simulator::writeW(uint64_t addr, int32_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 4)) {
+ return;
+ }
+
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+}
+
+int64_t Simulator::readDW(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ return -1;
+ }
+
+ intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+ return *ptr;
+}
+
+void Simulator::writeDW(uint64_t addr, int64_t value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ return;
+ }
+
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+}
+
+double Simulator::readD(uint64_t addr, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ return NAN;
+ }
+
+ double* ptr = reinterpret_cast<double*>(addr);
+ return *ptr;
+}
+
+void Simulator::writeD(uint64_t addr, double value, SimInstruction* instr) {
+ if (handleWasmSegFault(addr, 8)) {
+ return;
+ }
+
+ double* ptr = reinterpret_cast<double*>(addr);
+ LLBit_ = false;
+ *ptr = value;
+ return;
+}
+
+int Simulator::loadLinkedW(uint64_t addr, SimInstruction* instr) {
+ if ((addr & 3) == 0) {
+ if (handleWasmSegFault(addr, 4)) {
+ return -1;
+ }
+
+ volatile int32_t* ptr = reinterpret_cast<volatile int32_t*>(addr);
+ int32_t value = *ptr;
+ lastLLValue_ = value;
+ LLAddr_ = addr;
+ // Note that any memory write or "external" interrupt should reset this
+ // value to false.
+ LLBit_ = true;
+ return value;
+ }
+ printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int Simulator::storeConditionalW(uint64_t addr, int value,
+ SimInstruction* instr) {
+ // Correct behavior in this case, as defined by architecture, is to just
+ // return 0, but there is no point at allowing that. It is certainly an
+ // indicator of a bug.
+ if (addr != LLAddr_) {
+ printf("SC to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIx64
+ ", expected: 0x%016" PRIx64 "\n",
+ addr, reinterpret_cast<intptr_t>(instr), LLAddr_);
+ MOZ_CRASH();
+ }
+
+ if ((addr & 3) == 0) {
+ SharedMem<int32_t*> ptr =
+ SharedMem<int32_t*>::shared(reinterpret_cast<int32_t*>(addr));
+
+ if (!LLBit_) {
+ return 0;
+ }
+
+ LLBit_ = false;
+ LLAddr_ = 0;
+ int32_t expected = int32_t(lastLLValue_);
+ int32_t old =
+ AtomicOperations::compareExchangeSeqCst(ptr, expected, int32_t(value));
+ return (old == expected) ? 1 : 0;
+ }
+ printf("Unaligned SC at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int64_t Simulator::loadLinkedD(uint64_t addr, SimInstruction* instr) {
+ if ((addr & kPointerAlignmentMask) == 0) {
+ if (handleWasmSegFault(addr, 8)) {
+ return -1;
+ }
+
+ volatile int64_t* ptr = reinterpret_cast<volatile int64_t*>(addr);
+ int64_t value = *ptr;
+ lastLLValue_ = value;
+ LLAddr_ = addr;
+ // Note that any memory write or "external" interrupt should reset this
+ // value to false.
+ LLBit_ = true;
+ return value;
+ }
+ printf("Unaligned write at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+int Simulator::storeConditionalD(uint64_t addr, int64_t value,
+ SimInstruction* instr) {
+ // Correct behavior in this case, as defined by architecture, is to just
+ // return 0, but there is no point at allowing that. It is certainly an
+ // indicator of a bug.
+ if (addr != LLAddr_) {
+ printf("SC to bad address: 0x%016" PRIx64 ", pc=0x%016" PRIx64
+ ", expected: 0x%016" PRIx64 "\n",
+ addr, reinterpret_cast<intptr_t>(instr), LLAddr_);
+ MOZ_CRASH();
+ }
+
+ if ((addr & kPointerAlignmentMask) == 0) {
+ SharedMem<int64_t*> ptr =
+ SharedMem<int64_t*>::shared(reinterpret_cast<int64_t*>(addr));
+
+ if (!LLBit_) {
+ return 0;
+ }
+
+ LLBit_ = false;
+ LLAddr_ = 0;
+ int64_t expected = lastLLValue_;
+ int64_t old =
+ AtomicOperations::compareExchangeSeqCst(ptr, expected, int64_t(value));
+ return (old == expected) ? 1 : 0;
+ }
+ printf("Unaligned SC at 0x%016" PRIx64 ", pc=0x%016" PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ MOZ_CRASH();
+ return 0;
+}
+
+uintptr_t Simulator::stackLimit() const { return stackLimit_; }
+
+uintptr_t* Simulator::addressOfStackLimit() { return &stackLimit_; }
+
+bool Simulator::overRecursed(uintptr_t newsp) const {
+ if (newsp == 0) {
+ newsp = getRegister(sp);
+ }
+ return newsp <= stackLimit();
+}
+
+bool Simulator::overRecursedWithExtra(uint32_t extra) const {
+ uintptr_t newsp = getRegister(sp) - extra;
+ return newsp <= stackLimit();
+}
+
+// Unsupported instructions use format to print an error and stop execution.
+void Simulator::format(SimInstruction* instr, const char* format) {
+ printf("Simulator found unsupported instruction:\n 0x%016lx: %s\n",
+ reinterpret_cast<intptr_t>(instr), format);
+ MOZ_CRASH();
+}
+
+inline int32_t Simulator::rj_reg(SimInstruction* instr) const {
+ return instr->rjValue();
+}
+
+inline int64_t Simulator::rj(SimInstruction* instr) const {
+ return getRegister(rj_reg(instr));
+}
+
+inline uint64_t Simulator::rj_u(SimInstruction* instr) const {
+ return static_cast<uint64_t>(getRegister(rj_reg(instr)));
+}
+
+inline int32_t Simulator::rk_reg(SimInstruction* instr) const {
+ return instr->rkValue();
+}
+
+inline int64_t Simulator::rk(SimInstruction* instr) const {
+ return getRegister(rk_reg(instr));
+}
+
+inline uint64_t Simulator::rk_u(SimInstruction* instr) const {
+ return static_cast<uint64_t>(getRegister(rk_reg(instr)));
+}
+
+inline int32_t Simulator::rd_reg(SimInstruction* instr) const {
+ return instr->rdValue();
+}
+
+inline int64_t Simulator::rd(SimInstruction* instr) const {
+ return getRegister(rd_reg(instr));
+}
+
+inline uint64_t Simulator::rd_u(SimInstruction* instr) const {
+ return static_cast<uint64_t>(getRegister(rd_reg(instr)));
+}
+
+inline int32_t Simulator::fa_reg(SimInstruction* instr) const {
+ return instr->faValue();
+}
+
+inline float Simulator::fa_float(SimInstruction* instr) const {
+ return getFpuRegisterFloat(fa_reg(instr));
+}
+
+inline double Simulator::fa_double(SimInstruction* instr) const {
+ return getFpuRegisterDouble(fa_reg(instr));
+}
+
+inline int32_t Simulator::fj_reg(SimInstruction* instr) const {
+ return instr->fjValue();
+}
+
+inline float Simulator::fj_float(SimInstruction* instr) const {
+ return getFpuRegisterFloat(fj_reg(instr));
+}
+
+inline double Simulator::fj_double(SimInstruction* instr) const {
+ return getFpuRegisterDouble(fj_reg(instr));
+}
+
+inline int32_t Simulator::fk_reg(SimInstruction* instr) const {
+ return instr->fkValue();
+}
+
+inline float Simulator::fk_float(SimInstruction* instr) const {
+ return getFpuRegisterFloat(fk_reg(instr));
+}
+
+inline double Simulator::fk_double(SimInstruction* instr) const {
+ return getFpuRegisterDouble(fk_reg(instr));
+}
+
+inline int32_t Simulator::fd_reg(SimInstruction* instr) const {
+ return instr->fdValue();
+}
+
+inline float Simulator::fd_float(SimInstruction* instr) const {
+ return getFpuRegisterFloat(fd_reg(instr));
+}
+
+inline double Simulator::fd_double(SimInstruction* instr) const {
+ return getFpuRegisterDouble(fd_reg(instr));
+}
+
+inline int32_t Simulator::cj_reg(SimInstruction* instr) const {
+ return instr->cjValue();
+}
+
+inline bool Simulator::cj(SimInstruction* instr) const {
+ return getCFRegister(cj_reg(instr));
+}
+
+inline int32_t Simulator::cd_reg(SimInstruction* instr) const {
+ return instr->cdValue();
+}
+
+inline bool Simulator::cd(SimInstruction* instr) const {
+ return getCFRegister(cd_reg(instr));
+}
+
+inline int32_t Simulator::ca_reg(SimInstruction* instr) const {
+ return instr->caValue();
+}
+
+inline bool Simulator::ca(SimInstruction* instr) const {
+ return getCFRegister(ca_reg(instr));
+}
+
+inline uint32_t Simulator::sa2(SimInstruction* instr) const {
+ return instr->sa2Value();
+}
+
+inline uint32_t Simulator::sa3(SimInstruction* instr) const {
+ return instr->sa3Value();
+}
+
+inline uint32_t Simulator::ui5(SimInstruction* instr) const {
+ return instr->imm5Value();
+}
+
+inline uint32_t Simulator::ui6(SimInstruction* instr) const {
+ return instr->imm6Value();
+}
+
+inline uint32_t Simulator::lsbw(SimInstruction* instr) const {
+ return instr->lsbwValue();
+}
+
+inline uint32_t Simulator::msbw(SimInstruction* instr) const {
+ return instr->msbwValue();
+}
+
+inline uint32_t Simulator::lsbd(SimInstruction* instr) const {
+ return instr->lsbdValue();
+}
+
+inline uint32_t Simulator::msbd(SimInstruction* instr) const {
+ return instr->msbdValue();
+}
+
+inline uint32_t Simulator::cond(SimInstruction* instr) const {
+ return instr->condValue();
+}
+
+inline int32_t Simulator::si12(SimInstruction* instr) const {
+ return (instr->imm12Value() << 20) >> 20;
+}
+
+inline uint32_t Simulator::ui12(SimInstruction* instr) const {
+ return instr->imm12Value();
+}
+
+inline int32_t Simulator::si14(SimInstruction* instr) const {
+ return (instr->imm14Value() << 18) >> 18;
+}
+
+inline int32_t Simulator::si16(SimInstruction* instr) const {
+ return (instr->imm16Value() << 16) >> 16;
+}
+
+inline int32_t Simulator::si20(SimInstruction* instr) const {
+ return (instr->imm20Value() << 12) >> 12;
+}
+
+ABI_FUNCTION_TYPE_SIM_PROTOTYPES
+
+// Software interrupt instructions are used by the simulator to call into C++.
+void Simulator::softwareInterrupt(SimInstruction* instr) {
+ // the break_ instruction could get us here.
+ mozilla::DebugOnly<int32_t> opcode_hi15 = instr->bits(31, 17);
+ MOZ_ASSERT(opcode_hi15 == 0x15);
+ uint32_t code = instr->bits(14, 0);
+
+ if (instr->instructionBits() == kCallRedirInstr) {
+ Redirection* redirection = Redirection::FromSwiInstruction(instr);
+ uintptr_t nativeFn =
+ reinterpret_cast<uintptr_t>(redirection->nativeFunction());
+
+ // Get the SP for reading stack arguments
+ int64_t* sp_ = reinterpret_cast<int64_t*>(getRegister(sp));
+
+ // Store argument register values in local variables for ease of use below.
+ int64_t a0_ = getRegister(a0);
+ int64_t a1_ = getRegister(a1);
+ int64_t a2_ = getRegister(a2);
+ int64_t a3_ = getRegister(a3);
+ int64_t a4_ = getRegister(a4);
+ int64_t a5_ = getRegister(a5);
+ int64_t a6_ = getRegister(a6);
+ int64_t a7_ = getRegister(a7);
+ float f0_s = getFpuRegisterFloat(f0);
+ float f1_s = getFpuRegisterFloat(f1);
+ float f2_s = getFpuRegisterFloat(f2);
+ float f3_s = getFpuRegisterFloat(f3);
+ float f4_s = getFpuRegisterFloat(f4);
+ double f0_d = getFpuRegisterDouble(f0);
+ double f1_d = getFpuRegisterDouble(f1);
+ double f2_d = getFpuRegisterDouble(f2);
+ double f3_d = getFpuRegisterDouble(f3);
+
+ // This is dodgy but it works because the C entry stubs are never moved.
+ // See comment in codegen-arm.cc and bug 1242173.
+ int64_t saved_ra = getRegister(ra);
+
+ bool stack_aligned = (getRegister(sp) & (ABIStackAlignment - 1)) == 0;
+ if (!stack_aligned) {
+ fprintf(stderr, "Runtime call with unaligned stack!\n");
+ MOZ_CRASH();
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ switch (redirection->type()) {
+ ABI_FUNCTION_TYPE_LOONGARCH64_SIM_DISPATCH
+
+ default:
+ MOZ_CRASH("Unknown function type.");
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ setRegister(ra, saved_ra);
+ set_pc(getRegister(ra));
+ } else if ((instr->bits(31, 15) << 15 == op_break) && code == kWasmTrapCode) {
+ uint8_t* newPC;
+ if (wasm::HandleIllegalInstruction(registerState(), &newPC)) {
+ set_pc(int64_t(newPC));
+ return;
+ }
+ } else if ((instr->bits(31, 15) << 15 == op_break) && code <= kMaxStopCode &&
+ code != 6) {
+ if (isWatchpoint(code)) {
+ // printWatchpoint(code);
+ } else {
+ increaseStopCounter(code);
+ handleStop(code, instr);
+ }
+ } else {
+ // All remaining break_ codes, and all traps are handled here.
+ loong64Debugger dbg(this);
+ dbg.debug();
+ }
+}
+
+// Stop helper functions.
+bool Simulator::isWatchpoint(uint32_t code) {
+ return (code <= kMaxWatchpointCode);
+}
+
+void Simulator::printWatchpoint(uint32_t code) {
+ loong64Debugger dbg(this);
+ ++break_count_;
+ printf("\n---- break %d marker: %20" PRIi64 " (instr count: %20" PRIi64
+ ") ----\n",
+ code, break_count_, icount_);
+ dbg.printAllRegs(); // Print registers and continue running.
+}
+
+void Simulator::handleStop(uint32_t code, SimInstruction* instr) {
+ // Stop if it is enabled, otherwise go on jumping over the stop
+ // and the message address.
+ if (isEnabledStop(code)) {
+ loong64Debugger dbg(this);
+ dbg.stop(instr);
+ } else {
+ set_pc(get_pc() + 1 * SimInstruction::kInstrSize);
+ }
+}
+
+bool Simulator::isStopInstruction(SimInstruction* instr) {
+ int32_t opcode_hi15 = instr->bits(31, 17);
+ uint32_t code = static_cast<uint32_t>(instr->bits(14, 0));
+ return (opcode_hi15 == 0x15) && code > kMaxWatchpointCode &&
+ code <= kMaxStopCode;
+}
+
+bool Simulator::isEnabledStop(uint32_t code) {
+ MOZ_ASSERT(code <= kMaxStopCode);
+ MOZ_ASSERT(code > kMaxWatchpointCode);
+ return !(watchedStops_[code].count_ & kStopDisabledBit);
+}
+
+void Simulator::enableStop(uint32_t code) {
+ if (!isEnabledStop(code)) {
+ watchedStops_[code].count_ &= ~kStopDisabledBit;
+ }
+}
+
+void Simulator::disableStop(uint32_t code) {
+ if (isEnabledStop(code)) {
+ watchedStops_[code].count_ |= kStopDisabledBit;
+ }
+}
+
+void Simulator::increaseStopCounter(uint32_t code) {
+ MOZ_ASSERT(code <= kMaxStopCode);
+ if ((watchedStops_[code].count_ & ~(1 << 31)) == 0x7fffffff) {
+ printf(
+ "Stop counter for code %i has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n",
+ code);
+ watchedStops_[code].count_ = 0;
+ enableStop(code);
+ } else {
+ watchedStops_[code].count_++;
+ }
+}
+
+// Print a stop status.
+void Simulator::printStopInfo(uint32_t code) {
+ if (code <= kMaxWatchpointCode) {
+ printf("That is a watchpoint, not a stop.\n");
+ return;
+ } else if (code > kMaxStopCode) {
+ printf("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
+ return;
+ }
+ const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watchedStops_[code].count_ & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watchedStops_[code].desc_) {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n", code, code, state,
+ count, watchedStops_[code].desc_);
+ } else {
+ printf("stop %i - 0x%x: \t%s, \tcounter = %i\n", code, code, state,
+ count);
+ }
+ }
+}
+
+void Simulator::signalExceptions() {
+ for (int i = 1; i < kNumExceptions; i++) {
+ if (exceptions[i] != 0) {
+ MOZ_CRASH("Error: Exception raised.");
+ }
+ }
+}
+
+// ReverseBits(value) returns |value| in reverse bit order.
+template <typename T>
+T ReverseBits(T value) {
+ MOZ_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
+ (sizeof(value) == 4) || (sizeof(value) == 8));
+ T result = 0;
+ for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
+ result = (result << 1) | (value & 1);
+ value >>= 1;
+ }
+ return result;
+}
+
+// Min/Max template functions for Double and Single arguments.
+
+template <typename T>
+static T FPAbs(T a);
+
+template <>
+double FPAbs<double>(double a) {
+ return fabs(a);
+}
+
+template <>
+float FPAbs<float>(float a) {
+ return fabsf(a);
+}
+
+enum class MaxMinKind : int { kMin = 0, kMax = 1 };
+
+template <typename T>
+static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T* result) {
+ if (std::isnan(a) && std::isnan(b)) {
+ *result = a;
+ } else if (std::isnan(a)) {
+ *result = b;
+ } else if (std::isnan(b)) {
+ *result = a;
+ } else if (b == a) {
+ // Handle -0.0 == 0.0 case.
+ // std::signbit() returns int 0 or 1 so subtracting MaxMinKind::kMax
+ // negates the result.
+ *result = std::signbit(b) - static_cast<int>(kind) ? b : a;
+ } else {
+ return false;
+ }
+ return true;
+}
+
+template <typename T>
+static T FPUMin(T a, T b) {
+ T result;
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
+ return result;
+ } else {
+ return b < a ? b : a;
+ }
+}
+
+template <typename T>
+static T FPUMax(T a, T b) {
+ T result;
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, &result)) {
+ return result;
+ } else {
+ return b > a ? b : a;
+ }
+}
+
+template <typename T>
+static T FPUMinA(T a, T b) {
+ T result;
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
+ if (FPAbs(a) < FPAbs(b)) {
+ result = a;
+ } else if (FPAbs(b) < FPAbs(a)) {
+ result = b;
+ } else {
+ result = a < b ? a : b;
+ }
+ }
+ return result;
+}
+
+template <typename T>
+static T FPUMaxA(T a, T b) {
+ T result;
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
+ if (FPAbs(a) > FPAbs(b)) {
+ result = a;
+ } else if (FPAbs(b) > FPAbs(a)) {
+ result = b;
+ } else {
+ result = a > b ? a : b;
+ }
+ }
+ return result;
+}
+
+enum class KeepSign : bool { no = false, yes };
+
+// Handle execution based on instruction types.
+// decodeTypeImmediate
+void Simulator::decodeTypeOp6(SimInstruction* instr) {
+ // Next pc.
+ int64_t next_pc = bad_ra;
+
+ // Used for memory instructions.
+ int64_t alu_out = 0;
+
+ // Branch instructions common part.
+ auto BranchAndLinkHelper = [this, &next_pc](SimInstruction* instr) {
+ int64_t current_pc = get_pc();
+ setRegister(ra, current_pc + SimInstruction::kInstrSize);
+ int32_t offs26_low16 =
+ static_cast<uint32_t>(instr->bits(25, 10) << 16) >> 16;
+ int32_t offs26_high10 = static_cast<int32_t>(instr->bits(9, 0) << 22) >> 6;
+ int32_t offs26 = offs26_low16 | offs26_high10;
+ next_pc = current_pc + (offs26 << 2);
+ set_pc(next_pc);
+ };
+
+ auto BranchOff16Helper = [this, &next_pc](SimInstruction* instr,
+ bool do_branch) {
+ int64_t current_pc = get_pc();
+ int32_t offs16 = static_cast<int32_t>(instr->bits(25, 10) << 16) >> 16;
+ int32_t offs = do_branch ? (offs16 << 2) : SimInstruction::kInstrSize;
+ next_pc = current_pc + offs;
+ set_pc(next_pc);
+ };
+
+ auto BranchOff21Helper = [this, &next_pc](SimInstruction* instr,
+ bool do_branch) {
+ int64_t current_pc = get_pc();
+ int32_t offs21_low16 =
+ static_cast<uint32_t>(instr->bits(25, 10) << 16) >> 16;
+ int32_t offs21_high5 = static_cast<int32_t>(instr->bits(4, 0) << 27) >> 11;
+ int32_t offs = offs21_low16 | offs21_high5;
+ offs = do_branch ? (offs << 2) : SimInstruction::kInstrSize;
+ next_pc = current_pc + offs;
+ set_pc(next_pc);
+ };
+
+ auto BranchOff26Helper = [this, &next_pc](SimInstruction* instr) {
+ int64_t current_pc = get_pc();
+ int32_t offs26_low16 =
+ static_cast<uint32_t>(instr->bits(25, 10) << 16) >> 16;
+ int32_t offs26_high10 = static_cast<int32_t>(instr->bits(9, 0) << 22) >> 6;
+ int32_t offs26 = offs26_low16 | offs26_high10;
+ next_pc = current_pc + (offs26 << 2);
+ set_pc(next_pc);
+ };
+
+ auto JumpOff16Helper = [this, &next_pc](SimInstruction* instr) {
+ int32_t offs16 = static_cast<int32_t>(instr->bits(25, 10) << 16) >> 16;
+ setRegister(rd_reg(instr), get_pc() + SimInstruction::kInstrSize);
+ next_pc = rj(instr) + (offs16 << 2);
+ set_pc(next_pc);
+ };
+
+ switch (instr->bits(31, 26) << 26) {
+ case op_addu16i_d: {
+ int32_t si16_upper = static_cast<int32_t>(si16(instr)) << 16;
+ alu_out = static_cast<int64_t>(si16_upper) + rj(instr);
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_beqz: {
+ BranchOff21Helper(instr, rj(instr) == 0);
+ break;
+ }
+ case op_bnez: {
+ BranchOff21Helper(instr, rj(instr) != 0);
+ break;
+ }
+ case op_bcz: {
+ if (instr->bits(9, 8) == 0b00) {
+ // BCEQZ
+ BranchOff21Helper(instr, cj(instr) == false);
+ } else if (instr->bits(9, 8) == 0b01) {
+ // BCNEZ
+ BranchOff21Helper(instr, cj(instr) == true);
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ case op_jirl: {
+ JumpOff16Helper(instr);
+ break;
+ }
+ case op_b: {
+ BranchOff26Helper(instr);
+ break;
+ }
+ case op_bl: {
+ BranchAndLinkHelper(instr);
+ break;
+ }
+ case op_beq: {
+ BranchOff16Helper(instr, rj(instr) == rd(instr));
+ break;
+ }
+ case op_bne: {
+ BranchOff16Helper(instr, rj(instr) != rd(instr));
+ break;
+ }
+ case op_blt: {
+ BranchOff16Helper(instr, rj(instr) < rd(instr));
+ break;
+ }
+ case op_bge: {
+ BranchOff16Helper(instr, rj(instr) >= rd(instr));
+ break;
+ }
+ case op_bltu: {
+ BranchOff16Helper(instr, rj_u(instr) < rd_u(instr));
+ break;
+ }
+ case op_bgeu: {
+ BranchOff16Helper(instr, rj_u(instr) >= rd_u(instr));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp7(SimInstruction* instr) {
+ int64_t alu_out;
+
+ switch (instr->bits(31, 25) << 25) {
+ case op_lu12i_w: {
+ int32_t si20_upper = static_cast<int32_t>(si20(instr) << 12);
+ setRegister(rd_reg(instr), static_cast<int64_t>(si20_upper));
+ break;
+ }
+ case op_lu32i_d: {
+ int32_t si20_signExtend = static_cast<int32_t>(si20(instr) << 12) >> 12;
+ int64_t lower_32bit_mask = 0xFFFFFFFF;
+ alu_out = (static_cast<int64_t>(si20_signExtend) << 32) |
+ (rd(instr) & lower_32bit_mask);
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_pcaddi: {
+ int32_t si20_signExtend = static_cast<int32_t>(si20(instr) << 12) >> 10;
+ int64_t current_pc = get_pc();
+ alu_out = static_cast<int64_t>(si20_signExtend) + current_pc;
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_pcalau12i: {
+ int32_t si20_signExtend = static_cast<int32_t>(si20(instr) << 12);
+ int64_t current_pc = get_pc();
+ int64_t clear_lower12bit_mask = 0xFFFFFFFFFFFFF000;
+ alu_out = static_cast<int64_t>(si20_signExtend) + current_pc;
+ setRegister(rd_reg(instr), alu_out & clear_lower12bit_mask);
+ break;
+ }
+ case op_pcaddu12i: {
+ int32_t si20_signExtend = static_cast<int32_t>(si20(instr) << 12);
+ int64_t current_pc = get_pc();
+ alu_out = static_cast<int64_t>(si20_signExtend) + current_pc;
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_pcaddu18i: {
+ int64_t si20_signExtend = (static_cast<int64_t>(si20(instr)) << 44) >> 26;
+ int64_t current_pc = get_pc();
+ alu_out = si20_signExtend + current_pc;
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp8(SimInstruction* instr) {
+ int64_t addr = 0x0;
+ int64_t si14_se = (static_cast<int64_t>(si14(instr)) << 50) >> 48;
+
+ switch (instr->bits(31, 24) << 24) {
+ case op_ldptr_w: {
+ setRegister(rd_reg(instr), readW(rj(instr) + si14_se, instr));
+ break;
+ }
+ case op_stptr_w: {
+ writeW(rj(instr) + si14_se, static_cast<int32_t>(rd(instr)), instr);
+ break;
+ }
+ case op_ldptr_d: {
+ setRegister(rd_reg(instr), readDW(rj(instr) + si14_se, instr));
+ break;
+ }
+ case op_stptr_d: {
+ writeDW(rj(instr) + si14_se, rd(instr), instr);
+ break;
+ }
+ case op_ll_w: {
+ addr = si14_se + rj(instr);
+ setRegister(rd_reg(instr), loadLinkedW(addr, instr));
+ break;
+ }
+ case op_sc_w: {
+ addr = si14_se + rj(instr);
+ setRegister(
+ rd_reg(instr),
+ storeConditionalW(addr, static_cast<int32_t>(rd(instr)), instr));
+ break;
+ }
+ case op_ll_d: {
+ addr = si14_se + rj(instr);
+ setRegister(rd_reg(instr), loadLinkedD(addr, instr));
+ break;
+ }
+ case op_sc_d: {
+ addr = si14_se + rj(instr);
+ setRegister(rd_reg(instr), storeConditionalD(addr, rd(instr), instr));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp10(SimInstruction* instr) {
+ int64_t alu_out = 0x0;
+ int64_t si12_se = (static_cast<int64_t>(si12(instr)) << 52) >> 52;
+ uint64_t si12_ze = (static_cast<uint64_t>(ui12(instr)) << 52) >> 52;
+
+ switch (instr->bits(31, 22) << 22) {
+ case op_bstrins_d: {
+ uint8_t lsbd_ = lsbd(instr);
+ uint8_t msbd_ = msbd(instr);
+ MOZ_ASSERT(lsbd_ <= msbd_);
+ uint8_t size = msbd_ - lsbd_ + 1;
+ if (size < 64) {
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out =
+ (rd_u(instr) & ~(mask << lsbd_)) | ((rj_u(instr) & mask) << lsbd_);
+ setRegister(rd_reg(instr), alu_out);
+ } else if (size == 64) {
+ setRegister(rd_reg(instr), rj(instr));
+ }
+ break;
+ }
+ case op_bstrpick_d: {
+ uint8_t lsbd_ = lsbd(instr);
+ uint8_t msbd_ = msbd(instr);
+ MOZ_ASSERT(lsbd_ <= msbd_);
+ uint8_t size = msbd_ - lsbd_ + 1;
+ if (size < 64) {
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = (rj_u(instr) & (mask << lsbd_)) >> lsbd_;
+ setRegister(rd_reg(instr), alu_out);
+ } else if (size == 64) {
+ setRegister(rd_reg(instr), rj(instr));
+ }
+ break;
+ }
+ case op_slti: {
+ setRegister(rd_reg(instr), rj(instr) < si12_se ? 1 : 0);
+ break;
+ }
+ case op_sltui: {
+ setRegister(rd_reg(instr),
+ rj_u(instr) < static_cast<uint64_t>(si12_se) ? 1 : 0);
+ break;
+ }
+ case op_addi_w: {
+ int32_t alu32_out =
+ static_cast<int32_t>(rj(instr)) + static_cast<int32_t>(si12_se);
+ setRegister(rd_reg(instr), alu32_out);
+ break;
+ }
+ case op_addi_d: {
+ setRegister(rd_reg(instr), rj(instr) + si12_se);
+ break;
+ }
+ case op_lu52i_d: {
+ int64_t si12_se = static_cast<int64_t>(si12(instr)) << 52;
+ uint64_t mask = (1ULL << 52) - 1;
+ alu_out = si12_se + (rj(instr) & mask);
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_andi: {
+ setRegister(rd_reg(instr), rj(instr) & si12_ze);
+ break;
+ }
+ case op_ori: {
+ setRegister(rd_reg(instr), rj_u(instr) | si12_ze);
+ break;
+ }
+ case op_xori: {
+ setRegister(rd_reg(instr), rj_u(instr) ^ si12_ze);
+ break;
+ }
+ case op_ld_b: {
+ setRegister(rd_reg(instr), readB(rj(instr) + si12_se));
+ break;
+ }
+ case op_ld_h: {
+ setRegister(rd_reg(instr), readH(rj(instr) + si12_se, instr));
+ break;
+ }
+ case op_ld_w: {
+ setRegister(rd_reg(instr), readW(rj(instr) + si12_se, instr));
+ break;
+ }
+ case op_ld_d: {
+ setRegister(rd_reg(instr), readDW(rj(instr) + si12_se, instr));
+ break;
+ }
+ case op_st_b: {
+ writeB(rj(instr) + si12_se, static_cast<int8_t>(rd(instr)));
+ break;
+ }
+ case op_st_h: {
+ writeH(rj(instr) + si12_se, static_cast<int16_t>(rd(instr)), instr);
+ break;
+ }
+ case op_st_w: {
+ writeW(rj(instr) + si12_se, static_cast<int32_t>(rd(instr)), instr);
+ break;
+ }
+ case op_st_d: {
+ writeDW(rj(instr) + si12_se, rd(instr), instr);
+ break;
+ }
+ case op_ld_bu: {
+ setRegister(rd_reg(instr), readBU(rj(instr) + si12_se));
+ break;
+ }
+ case op_ld_hu: {
+ setRegister(rd_reg(instr), readHU(rj(instr) + si12_se, instr));
+ break;
+ }
+ case op_ld_wu: {
+ setRegister(rd_reg(instr), readWU(rj(instr) + si12_se, instr));
+ break;
+ }
+ case op_fld_s: {
+ setFpuRegister(fd_reg(instr), kFPUInvalidResult); // Trash upper 32 bits.
+ setFpuRegisterWord(fd_reg(instr), readW(rj(instr) + si12_se, instr));
+ break;
+ }
+ case op_fst_s: {
+ int32_t alu_out_32 = static_cast<int32_t>(getFpuRegister(fd_reg(instr)));
+ writeW(rj(instr) + si12_se, alu_out_32, instr);
+ break;
+ }
+ case op_fld_d: {
+ setFpuRegisterDouble(fd_reg(instr), readD(rj(instr) + si12_se, instr));
+ break;
+ }
+ case op_fst_d: {
+ writeD(rj(instr) + si12_se, getFpuRegisterDouble(fd_reg(instr)), instr);
+ break;
+ }
+ case op_preld:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp11(SimInstruction* instr) {
+ int64_t alu_out = 0x0;
+
+ switch (instr->bits(31, 21) << 21) {
+ case op_bstr_w: {
+ MOZ_ASSERT(instr->bit(21) == 1);
+ uint8_t lsbw_ = lsbw(instr);
+ uint8_t msbw_ = msbw(instr);
+ MOZ_ASSERT(lsbw_ <= msbw_);
+ uint8_t size = msbw_ - lsbw_ + 1;
+ uint64_t mask = (1ULL << size) - 1;
+ if (instr->bit(15) == 0) {
+ // BSTRINS_W
+ alu_out = static_cast<int32_t>((rd_u(instr) & ~(mask << lsbw_)) |
+ ((rj_u(instr) & mask) << lsbw_));
+ } else {
+ // BSTRPICK_W
+ alu_out =
+ static_cast<int32_t>((rj_u(instr) & (mask << lsbw_)) >> lsbw_);
+ }
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp12(SimInstruction* instr) {
+ switch (instr->bits(31, 20) << 20) {
+ case op_fmadd_s: {
+ setFpuRegisterFloat(
+ fd_reg(instr),
+ std::fma(fj_float(instr), fk_float(instr), fa_float(instr)));
+ break;
+ }
+ case op_fmadd_d: {
+ setFpuRegisterDouble(
+ fd_reg(instr),
+ std::fma(fj_double(instr), fk_double(instr), fa_double(instr)));
+ break;
+ }
+ case op_fmsub_s: {
+ setFpuRegisterFloat(
+ fd_reg(instr),
+ std::fma(-fj_float(instr), fk_float(instr), fa_float(instr)));
+ break;
+ }
+ case op_fmsub_d: {
+ setFpuRegisterDouble(
+ fd_reg(instr),
+ std::fma(-fj_double(instr), fk_double(instr), fa_double(instr)));
+ break;
+ }
+ case op_fnmadd_s: {
+ setFpuRegisterFloat(
+ fd_reg(instr),
+ std::fma(-fj_float(instr), fk_float(instr), -fa_float(instr)));
+ break;
+ }
+ case op_fnmadd_d: {
+ setFpuRegisterDouble(
+ fd_reg(instr),
+ std::fma(-fj_double(instr), fk_double(instr), -fa_double(instr)));
+ break;
+ }
+ case op_fnmsub_s: {
+ setFpuRegisterFloat(
+ fd_reg(instr),
+ std::fma(fj_float(instr), fk_float(instr), -fa_float(instr)));
+ break;
+ }
+ case op_fnmsub_d: {
+ setFpuRegisterDouble(
+ fd_reg(instr),
+ std::fma(fj_double(instr), fk_double(instr), -fa_double(instr)));
+ break;
+ }
+ case op_fcmp_cond_s: {
+ MOZ_ASSERT(instr->bits(4, 3) == 0);
+ float fj = fj_float(instr);
+ float fk = fk_float(instr);
+ switch (cond(instr)) {
+ case AssemblerLOONG64::CAF: {
+ setCFRegister(cd_reg(instr), false);
+ break;
+ }
+ case AssemblerLOONG64::CUN: {
+ setCFRegister(cd_reg(instr), std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::CEQ: {
+ setCFRegister(cd_reg(instr), fj == fk);
+ break;
+ }
+ case AssemblerLOONG64::CUEQ: {
+ setCFRegister(cd_reg(instr),
+ (fj == fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::CLT: {
+ setCFRegister(cd_reg(instr), fj < fk);
+ break;
+ }
+ case AssemblerLOONG64::CULT: {
+ setCFRegister(cd_reg(instr),
+ (fj < fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::CLE: {
+ setCFRegister(cd_reg(instr), fj <= fk);
+ break;
+ }
+ case AssemblerLOONG64::CULE: {
+ setCFRegister(cd_reg(instr),
+ (fj <= fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::CNE: {
+ setCFRegister(cd_reg(instr), (fj < fk) || (fj > fk));
+ break;
+ }
+ case AssemblerLOONG64::COR: {
+ setCFRegister(cd_reg(instr), !std::isnan(fj) && !std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::CUNE: {
+ setCFRegister(cd_reg(instr), (fj < fk) || (fj > fk) ||
+ std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::SAF:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SUN:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SEQ:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SUEQ:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SLT:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SULT:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SLE:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SULE:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SNE:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SOR:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SUNE:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case op_fcmp_cond_d: {
+ MOZ_ASSERT(instr->bits(4, 3) == 0);
+ double fj = fj_double(instr);
+ double fk = fk_double(instr);
+ switch (cond(instr)) {
+ case AssemblerLOONG64::CAF: {
+ setCFRegister(cd_reg(instr), false);
+ break;
+ }
+ case AssemblerLOONG64::CUN: {
+ setCFRegister(cd_reg(instr), std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::CEQ: {
+ setCFRegister(cd_reg(instr), fj == fk);
+ break;
+ }
+ case AssemblerLOONG64::CUEQ: {
+ setCFRegister(cd_reg(instr),
+ (fj == fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::CLT: {
+ setCFRegister(cd_reg(instr), fj < fk);
+ break;
+ }
+ case AssemblerLOONG64::CULT: {
+ setCFRegister(cd_reg(instr),
+ (fj < fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::CLE: {
+ setCFRegister(cd_reg(instr), fj <= fk);
+ break;
+ }
+ case AssemblerLOONG64::CULE: {
+ setCFRegister(cd_reg(instr),
+ (fj <= fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::CNE: {
+ setCFRegister(cd_reg(instr), (fj < fk) || (fj > fk));
+ break;
+ }
+ case AssemblerLOONG64::COR: {
+ setCFRegister(cd_reg(instr), !std::isnan(fj) && !std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::CUNE: {
+ setCFRegister(cd_reg(instr),
+ (fj != fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case AssemblerLOONG64::SAF:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SUN:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SEQ:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SUEQ:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SLT:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SULT:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SLE:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SULE:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SNE:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SOR:
+ UNIMPLEMENTED();
+ break;
+ case AssemblerLOONG64::SUNE:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp14(SimInstruction* instr) {
+ int64_t alu_out = 0x0;
+
+ switch (instr->bits(31, 18) << 18) {
+ case op_bytepick_d: {
+ uint8_t sa = sa3(instr) * 8;
+ if (sa == 0) {
+ alu_out = rk(instr);
+ } else {
+ int64_t mask = (1ULL << 63) >> (sa - 1);
+ int64_t rk_hi = (rk(instr) & (~mask)) << sa;
+ int64_t rj_lo = (rj(instr) & mask) >> (64 - sa);
+ alu_out = rk_hi | rj_lo;
+ }
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_fsel: {
+ MOZ_ASSERT(instr->bits(19, 18) == 0);
+ if (ca(instr) == 0) {
+ setFpuRegisterDouble(fd_reg(instr), fj_double(instr));
+ } else {
+ setFpuRegisterDouble(fd_reg(instr), fk_double(instr));
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp15(SimInstruction* instr) {
+ int64_t alu_out = 0x0;
+ int32_t alu32_out = 0x0;
+
+ switch (instr->bits(31, 17) << 17) {
+ case op_bytepick_w: {
+ MOZ_ASSERT(instr->bit(17) == 0);
+ uint8_t sa = sa2(instr) * 8;
+ if (sa == 0) {
+ alu32_out = static_cast<int32_t>(rk(instr));
+ } else {
+ int32_t mask = (1 << 31) >> (sa - 1);
+ int32_t rk_hi = (static_cast<int32_t>(rk(instr)) & (~mask)) << sa;
+ int32_t rj_lo = (static_cast<uint32_t>(rj(instr)) & mask) >> (32 - sa);
+ alu32_out = rk_hi | rj_lo;
+ }
+ setRegister(rd_reg(instr), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case op_alsl_w: {
+ uint8_t sa = sa2(instr) + 1;
+ alu32_out = (static_cast<int32_t>(rj(instr)) << sa) +
+ static_cast<int32_t>(rk(instr));
+ setRegister(rd_reg(instr), alu32_out);
+ break;
+ }
+ case op_alsl_wu: {
+ uint8_t sa = sa2(instr) + 1;
+ alu32_out = (static_cast<int32_t>(rj(instr)) << sa) +
+ static_cast<int32_t>(rk(instr));
+ setRegister(rd_reg(instr), static_cast<uint32_t>(alu32_out));
+ break;
+ }
+ case op_alsl_d: {
+ MOZ_ASSERT(instr->bit(17) == 0);
+ uint8_t sa = sa2(instr) + 1;
+ alu_out = (rj(instr) << sa) + rk(instr);
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp16(SimInstruction* instr) {
+ int64_t alu_out;
+ switch (instr->bits(31, 16) << 16) {
+ case op_slli_d: {
+ MOZ_ASSERT(instr->bit(17) == 0);
+ MOZ_ASSERT(instr->bits(17, 16) == 0b01);
+ setRegister(rd_reg(instr), rj(instr) << ui6(instr));
+ break;
+ }
+ case op_srli_d: {
+ MOZ_ASSERT(instr->bit(17) == 0);
+ setRegister(rd_reg(instr), rj_u(instr) >> ui6(instr));
+ break;
+ }
+ case op_srai_d: {
+ MOZ_ASSERT(instr->bit(17) == 0);
+ setRegister(rd_reg(instr), rj(instr) >> ui6(instr));
+ break;
+ }
+ case op_rotri_d: {
+ MOZ_ASSERT(instr->bit(17) == 0);
+ MOZ_ASSERT(instr->bits(17, 16) == 0b01);
+ alu_out = static_cast<int64_t>(RotateRight64(rj_u(instr), ui6(instr)));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp17(SimInstruction* instr) {
+ int64_t alu_out;
+ int32_t alu32_out;
+
+ switch (instr->bits(31, 15) << 15) {
+ case op_slli_w: {
+ MOZ_ASSERT(instr->bit(17) == 0);
+ MOZ_ASSERT(instr->bits(17, 15) == 0b001);
+ alu32_out = static_cast<int32_t>(rj(instr)) << ui5(instr);
+ setRegister(rd_reg(instr), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case op_srai_w: {
+ MOZ_ASSERT(instr->bit(17) == 0);
+ MOZ_ASSERT(instr->bits(17, 15) == 0b001);
+ alu32_out = static_cast<int32_t>(rj(instr)) >> ui5(instr);
+ setRegister(rd_reg(instr), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case op_rotri_w: {
+ MOZ_ASSERT(instr->bit(17) == 0);
+ MOZ_ASSERT(instr->bits(17, 15) == 0b001);
+ alu32_out = static_cast<int32_t>(
+ RotateRight32(static_cast<const uint32_t>(rj_u(instr)),
+ static_cast<const uint32_t>(ui5(instr))));
+ setRegister(rd_reg(instr), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case op_srli_w: {
+ MOZ_ASSERT(instr->bit(17) == 0);
+ MOZ_ASSERT(instr->bits(17, 15) == 0b001);
+ alu32_out = static_cast<uint32_t>(rj(instr)) >> ui5(instr);
+ setRegister(rd_reg(instr), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case op_add_w: {
+ int32_t alu32_out = static_cast<int32_t>(rj(instr) + rk(instr));
+ // Sign-extend result of 32bit operation into 64bit register.
+ setRegister(rd_reg(instr), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case op_add_d:
+ setRegister(rd_reg(instr), rj(instr) + rk(instr));
+ break;
+ case op_sub_w: {
+ int32_t alu32_out = static_cast<int32_t>(rj(instr) - rk(instr));
+ // Sign-extend result of 32bit operation into 64bit register.
+ setRegister(rd_reg(instr), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case op_sub_d:
+ setRegister(rd_reg(instr), rj(instr) - rk(instr));
+ break;
+ case op_slt:
+ setRegister(rd_reg(instr), rj(instr) < rk(instr) ? 1 : 0);
+ break;
+ case op_sltu:
+ setRegister(rd_reg(instr), rj_u(instr) < rk_u(instr) ? 1 : 0);
+ break;
+ case op_maskeqz:
+ setRegister(rd_reg(instr), rk(instr) == 0 ? 0 : rj(instr));
+ break;
+ case op_masknez:
+ setRegister(rd_reg(instr), rk(instr) != 0 ? 0 : rj(instr));
+ break;
+ case op_nor:
+ setRegister(rd_reg(instr), ~(rj(instr) | rk(instr)));
+ break;
+ case op_and:
+ setRegister(rd_reg(instr), rj(instr) & rk(instr));
+ break;
+ case op_or:
+ setRegister(rd_reg(instr), rj(instr) | rk(instr));
+ break;
+ case op_xor:
+ setRegister(rd_reg(instr), rj(instr) ^ rk(instr));
+ break;
+ case op_orn:
+ setRegister(rd_reg(instr), rj(instr) | (~rk(instr)));
+ break;
+ case op_andn:
+ setRegister(rd_reg(instr), rj(instr) & (~rk(instr)));
+ break;
+ case op_sll_w:
+ setRegister(rd_reg(instr), (int32_t)rj(instr) << (rk_u(instr) % 32));
+ break;
+ case op_srl_w: {
+ alu_out =
+ static_cast<int32_t>((uint32_t)rj_u(instr) >> (rk_u(instr) % 32));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_sra_w:
+ setRegister(rd_reg(instr), (int32_t)rj(instr) >> (rk_u(instr) % 32));
+ break;
+ case op_sll_d:
+ setRegister(rd_reg(instr), rj(instr) << (rk_u(instr) % 64));
+ break;
+ case op_srl_d: {
+ alu_out = static_cast<int64_t>(rj_u(instr) >> (rk_u(instr) % 64));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_sra_d:
+ setRegister(rd_reg(instr), rj(instr) >> (rk_u(instr) % 64));
+ break;
+ case op_rotr_w: {
+ alu_out = static_cast<int32_t>(
+ RotateRight32(static_cast<const uint32_t>(rj_u(instr)),
+ static_cast<const uint32_t>(rk_u(instr) % 32)));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_rotr_d: {
+ alu_out = static_cast<int64_t>(
+ RotateRight64((rj_u(instr)), (rk_u(instr) % 64)));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_mul_w: {
+ alu_out =
+ static_cast<int32_t>(rj(instr)) * static_cast<int32_t>(rk(instr));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_mulh_w: {
+ int32_t rj_lo = static_cast<int32_t>(rj(instr));
+ int32_t rk_lo = static_cast<int32_t>(rk(instr));
+ alu_out = static_cast<int64_t>(rj_lo) * static_cast<int64_t>(rk_lo);
+ setRegister(rd_reg(instr), alu_out >> 32);
+ break;
+ }
+ case op_mulh_wu: {
+ uint32_t rj_lo = static_cast<uint32_t>(rj_u(instr));
+ uint32_t rk_lo = static_cast<uint32_t>(rk_u(instr));
+ alu_out = static_cast<uint64_t>(rj_lo) * static_cast<uint64_t>(rk_lo);
+ setRegister(rd_reg(instr), alu_out >> 32);
+ break;
+ }
+ case op_mul_d:
+ setRegister(rd_reg(instr), rj(instr) * rk(instr));
+ break;
+ case op_mulh_d:
+ setRegister(rd_reg(instr), MultiplyHighSigned(rj(instr), rk(instr)));
+ break;
+ case op_mulh_du:
+ setRegister(rd_reg(instr),
+ MultiplyHighUnsigned(rj_u(instr), rk_u(instr)));
+ break;
+ case op_mulw_d_w: {
+ int64_t rj_i32 = static_cast<int32_t>(rj(instr));
+ int64_t rk_i32 = static_cast<int32_t>(rk(instr));
+ setRegister(rd_reg(instr), rj_i32 * rk_i32);
+ break;
+ }
+ case op_mulw_d_wu: {
+ uint64_t rj_u32 = static_cast<uint32_t>(rj_u(instr));
+ uint64_t rk_u32 = static_cast<uint32_t>(rk_u(instr));
+ setRegister(rd_reg(instr), rj_u32 * rk_u32);
+ break;
+ }
+ case op_div_w: {
+ int32_t rj_i32 = static_cast<int32_t>(rj(instr));
+ int32_t rk_i32 = static_cast<int32_t>(rk(instr));
+ if (rj_i32 == INT_MIN && rk_i32 == -1) {
+ setRegister(rd_reg(instr), INT_MIN);
+ } else if (rk_i32 != 0) {
+ setRegister(rd_reg(instr), rj_i32 / rk_i32);
+ }
+ break;
+ }
+ case op_mod_w: {
+ int32_t rj_i32 = static_cast<int32_t>(rj(instr));
+ int32_t rk_i32 = static_cast<int32_t>(rk(instr));
+ if (rj_i32 == INT_MIN && rk_i32 == -1) {
+ setRegister(rd_reg(instr), 0);
+ } else if (rk_i32 != 0) {
+ setRegister(rd_reg(instr), rj_i32 % rk_i32);
+ }
+ break;
+ }
+ case op_div_wu: {
+ uint32_t rj_u32 = static_cast<uint32_t>(rj(instr));
+ uint32_t rk_u32 = static_cast<uint32_t>(rk(instr));
+ if (rk_u32 != 0) {
+ setRegister(rd_reg(instr), static_cast<int32_t>(rj_u32 / rk_u32));
+ }
+ break;
+ }
+ case op_mod_wu: {
+ uint32_t rj_u32 = static_cast<uint32_t>(rj(instr));
+ uint32_t rk_u32 = static_cast<uint32_t>(rk(instr));
+ if (rk_u32 != 0) {
+ setRegister(rd_reg(instr), static_cast<int32_t>(rj_u32 % rk_u32));
+ }
+ break;
+ }
+ case op_div_d: {
+ if (rj(instr) == INT64_MIN && rk(instr) == -1) {
+ setRegister(rd_reg(instr), INT64_MIN);
+ } else if (rk(instr) != 0) {
+ setRegister(rd_reg(instr), rj(instr) / rk(instr));
+ }
+ break;
+ }
+ case op_mod_d: {
+ if (rj(instr) == LONG_MIN && rk(instr) == -1) {
+ setRegister(rd_reg(instr), 0);
+ } else if (rk(instr) != 0) {
+ setRegister(rd_reg(instr), rj(instr) % rk(instr));
+ }
+ break;
+ }
+ case op_div_du: {
+ if (rk_u(instr) != 0) {
+ setRegister(rd_reg(instr),
+ static_cast<int64_t>(rj_u(instr) / rk_u(instr)));
+ }
+ break;
+ }
+ case op_mod_du: {
+ if (rk_u(instr) != 0) {
+ setRegister(rd_reg(instr),
+ static_cast<int64_t>(rj_u(instr) % rk_u(instr)));
+ }
+ break;
+ }
+ case op_break:
+ softwareInterrupt(instr);
+ break;
+ case op_fadd_s: {
+ setFpuRegisterFloat(fd_reg(instr), fj_float(instr) + fk_float(instr));
+ break;
+ }
+ case op_fadd_d: {
+ setFpuRegisterDouble(fd_reg(instr), fj_double(instr) + fk_double(instr));
+ break;
+ }
+ case op_fsub_s: {
+ setFpuRegisterFloat(fd_reg(instr), fj_float(instr) - fk_float(instr));
+ break;
+ }
+ case op_fsub_d: {
+ setFpuRegisterDouble(fd_reg(instr), fj_double(instr) - fk_double(instr));
+ break;
+ }
+ case op_fmul_s: {
+ setFpuRegisterFloat(fd_reg(instr), fj_float(instr) * fk_float(instr));
+ break;
+ }
+ case op_fmul_d: {
+ setFpuRegisterDouble(fd_reg(instr), fj_double(instr) * fk_double(instr));
+ break;
+ }
+ case op_fdiv_s: {
+ setFpuRegisterFloat(fd_reg(instr), fj_float(instr) / fk_float(instr));
+ break;
+ }
+
+ case op_fdiv_d: {
+ setFpuRegisterDouble(fd_reg(instr), fj_double(instr) / fk_double(instr));
+ break;
+ }
+ case op_fmax_s: {
+ setFpuRegisterFloat(fd_reg(instr),
+ FPUMax(fk_float(instr), fj_float(instr)));
+ break;
+ }
+ case op_fmax_d: {
+ setFpuRegisterDouble(fd_reg(instr),
+ FPUMax(fk_double(instr), fj_double(instr)));
+ break;
+ }
+ case op_fmin_s: {
+ setFpuRegisterFloat(fd_reg(instr),
+ FPUMin(fk_float(instr), fj_float(instr)));
+ break;
+ }
+ case op_fmin_d: {
+ setFpuRegisterDouble(fd_reg(instr),
+ FPUMin(fk_double(instr), fj_double(instr)));
+ break;
+ }
+ case op_fmaxa_s: {
+ setFpuRegisterFloat(fd_reg(instr),
+ FPUMaxA(fk_float(instr), fj_float(instr)));
+ break;
+ }
+ case op_fmaxa_d: {
+ setFpuRegisterDouble(fd_reg(instr),
+ FPUMaxA(fk_double(instr), fj_double(instr)));
+ break;
+ }
+ case op_fmina_s: {
+ setFpuRegisterFloat(fd_reg(instr),
+ FPUMinA(fk_float(instr), fj_float(instr)));
+ break;
+ }
+ case op_fmina_d: {
+ setFpuRegisterDouble(fd_reg(instr),
+ FPUMinA(fk_double(instr), fj_double(instr)));
+ break;
+ }
+ case op_ldx_b:
+ setRegister(rd_reg(instr), readB(rj(instr) + rk(instr)));
+ break;
+ case op_ldx_h:
+ setRegister(rd_reg(instr), readH(rj(instr) + rk(instr), instr));
+ break;
+ case op_ldx_w:
+ setRegister(rd_reg(instr), readW(rj(instr) + rk(instr), instr));
+ break;
+ case op_ldx_d:
+ setRegister(rd_reg(instr), readDW(rj(instr) + rk(instr), instr));
+ break;
+ case op_stx_b:
+ writeB(rj(instr) + rk(instr), static_cast<int8_t>(rd(instr)));
+ break;
+ case op_stx_h:
+ writeH(rj(instr) + rk(instr), static_cast<int16_t>(rd(instr)), instr);
+ break;
+ case op_stx_w:
+ writeW(rj(instr) + rk(instr), static_cast<int32_t>(rd(instr)), instr);
+ break;
+ case op_stx_d:
+ writeDW(rj(instr) + rk(instr), rd(instr), instr);
+ break;
+ case op_ldx_bu:
+ setRegister(rd_reg(instr), readBU(rj(instr) + rk(instr)));
+ break;
+ case op_ldx_hu:
+ setRegister(rd_reg(instr), readHU(rj(instr) + rk(instr), instr));
+ break;
+ case op_ldx_wu:
+ setRegister(rd_reg(instr), readWU(rj(instr) + rk(instr), instr));
+ break;
+ case op_fldx_s:
+ setFpuRegister(fd_reg(instr), kFPUInvalidResult); // Trash upper 32 bits.
+ setFpuRegisterWord(fd_reg(instr), readW(rj(instr) + rk(instr), instr));
+ break;
+ case op_fldx_d:
+ setFpuRegister(fd_reg(instr), kFPUInvalidResult); // Trash upper 32 bits.
+ setFpuRegisterDouble(fd_reg(instr), readD(rj(instr) + rk(instr), instr));
+ break;
+ case op_fstx_s: {
+ int32_t alu_out_32 = static_cast<int32_t>(getFpuRegister(fd_reg(instr)));
+ writeW(rj(instr) + rk(instr), alu_out_32, instr);
+ break;
+ }
+ case op_fstx_d: {
+ writeD(rj(instr) + rk(instr), getFpuRegisterDouble(fd_reg(instr)), instr);
+ break;
+ }
+ case op_amswap_w:
+ UNIMPLEMENTED();
+ break;
+ case op_amswap_d:
+ UNIMPLEMENTED();
+ break;
+ case op_amadd_w:
+ UNIMPLEMENTED();
+ break;
+ case op_amadd_d:
+ UNIMPLEMENTED();
+ break;
+ case op_amand_w:
+ UNIMPLEMENTED();
+ break;
+ case op_amand_d:
+ UNIMPLEMENTED();
+ break;
+ case op_amor_w:
+ UNIMPLEMENTED();
+ break;
+ case op_amor_d:
+ UNIMPLEMENTED();
+ break;
+ case op_amxor_w:
+ UNIMPLEMENTED();
+ break;
+ case op_amxor_d:
+ UNIMPLEMENTED();
+ break;
+ case op_ammax_w:
+ UNIMPLEMENTED();
+ break;
+ case op_ammax_d:
+ UNIMPLEMENTED();
+ break;
+ case op_ammin_w:
+ UNIMPLEMENTED();
+ break;
+ case op_ammin_d:
+ UNIMPLEMENTED();
+ break;
+ case op_ammax_wu:
+ UNIMPLEMENTED();
+ break;
+ case op_ammax_du:
+ UNIMPLEMENTED();
+ break;
+ case op_ammin_wu:
+ UNIMPLEMENTED();
+ break;
+ case op_ammin_du:
+ UNIMPLEMENTED();
+ break;
+ case op_amswap_db_w:
+ UNIMPLEMENTED();
+ break;
+ case op_amswap_db_d:
+ UNIMPLEMENTED();
+ break;
+ case op_amadd_db_w:
+ UNIMPLEMENTED();
+ break;
+ case op_amadd_db_d:
+ UNIMPLEMENTED();
+ break;
+ case op_amand_db_w:
+ UNIMPLEMENTED();
+ break;
+ case op_amand_db_d:
+ UNIMPLEMENTED();
+ break;
+ case op_amor_db_w:
+ UNIMPLEMENTED();
+ break;
+ case op_amor_db_d:
+ UNIMPLEMENTED();
+ break;
+ case op_amxor_db_w:
+ UNIMPLEMENTED();
+ break;
+ case op_amxor_db_d:
+ UNIMPLEMENTED();
+ break;
+ case op_ammax_db_w:
+ UNIMPLEMENTED();
+ break;
+ case op_ammax_db_d:
+ UNIMPLEMENTED();
+ break;
+ case op_ammin_db_w:
+ UNIMPLEMENTED();
+ break;
+ case op_ammin_db_d:
+ UNIMPLEMENTED();
+ break;
+ case op_ammax_db_wu:
+ UNIMPLEMENTED();
+ break;
+ case op_ammax_db_du:
+ UNIMPLEMENTED();
+ break;
+ case op_ammin_db_wu:
+ UNIMPLEMENTED();
+ break;
+ case op_ammin_db_du:
+ UNIMPLEMENTED();
+ break;
+ case op_dbar:
+ // TODO(loong64): dbar simulation
+ break;
+ case op_ibar:
+ UNIMPLEMENTED();
+ break;
+ case op_fcopysign_s:
+ UNIMPLEMENTED();
+ break;
+ case op_fcopysign_d:
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp22(SimInstruction* instr) {
+ int64_t alu_out;
+
+ switch (instr->bits(31, 10) << 10) {
+ case op_clz_w: {
+ alu_out = U32(rj_u(instr)) ? __builtin_clz(U32(rj_u(instr))) : 32;
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_ctz_w: {
+ alu_out = U32(rj_u(instr)) ? __builtin_ctz(U32(rj_u(instr))) : 32;
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_clz_d: {
+ alu_out = U64(rj_u(instr)) ? __builtin_clzll(U64(rj_u(instr))) : 64;
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_ctz_d: {
+ alu_out = U64(rj_u(instr)) ? __builtin_ctzll(U64(rj_u(instr))) : 64;
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_revb_2h: {
+ uint32_t input = static_cast<uint32_t>(rj(instr));
+ uint64_t output = 0;
+
+ uint32_t mask = 0xFF000000;
+ for (int i = 0; i < 4; i++) {
+ uint32_t tmp = mask & input;
+ if (i % 2 == 0) {
+ tmp = tmp >> 8;
+ } else {
+ tmp = tmp << 8;
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(static_cast<int32_t>(output));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_revb_4h: {
+ uint64_t input = rj_u(instr);
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFF00000000000000;
+ for (int i = 0; i < 8; i++) {
+ uint64_t tmp = mask & input;
+ if (i % 2 == 0) {
+ tmp = tmp >> 8;
+ } else {
+ tmp = tmp << 8;
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_revb_2w: {
+ uint64_t input = rj_u(instr);
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFF000000FF000000;
+ for (int i = 0; i < 4; i++) {
+ uint64_t tmp = mask & input;
+ if (i <= 1) {
+ tmp = tmp >> (24 - i * 16);
+ } else {
+ tmp = tmp << (i * 16 - 24);
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_revb_d: {
+ uint64_t input = rj_u(instr);
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFF00000000000000;
+ for (int i = 0; i < 8; i++) {
+ uint64_t tmp = mask & input;
+ if (i <= 3) {
+ tmp = tmp >> (56 - i * 16);
+ } else {
+ tmp = tmp << (i * 16 - 56);
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_revh_2w: {
+ uint64_t input = rj_u(instr);
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFFFF000000000000;
+ for (int i = 0; i < 4; i++) {
+ uint64_t tmp = mask & input;
+ if (i % 2 == 0) {
+ tmp = tmp >> 16;
+ } else {
+ tmp = tmp << 16;
+ }
+ output = output | tmp;
+ mask = mask >> 16;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_revh_d: {
+ uint64_t input = rj_u(instr);
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFFFF000000000000;
+ for (int i = 0; i < 4; i++) {
+ uint64_t tmp = mask & input;
+ if (i <= 1) {
+ tmp = tmp >> (48 - i * 32);
+ } else {
+ tmp = tmp << (i * 32 - 48);
+ }
+ output = output | tmp;
+ mask = mask >> 16;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_bitrev_4b: {
+ uint32_t input = static_cast<uint32_t>(rj(instr));
+ uint32_t output = 0;
+ uint8_t i_byte, o_byte;
+
+ // Reverse the bit in byte for each individual byte
+ for (int i = 0; i < 4; i++) {
+ output = output >> 8;
+ i_byte = input & 0xFF;
+
+ // Fast way to reverse bits in byte
+ // Devised by Sean Anderson, July 13, 2001
+ o_byte = static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
+ (i_byte * 0x8020LU & 0x88440LU)) *
+ 0x10101LU >>
+ 16);
+
+ output = output | (static_cast<uint32_t>(o_byte << 24));
+ input = input >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(static_cast<int32_t>(output));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_bitrev_8b: {
+ uint64_t input = rj_u(instr);
+ uint64_t output = 0;
+ uint8_t i_byte, o_byte;
+
+ // Reverse the bit in byte for each individual byte
+ for (int i = 0; i < 8; i++) {
+ output = output >> 8;
+ i_byte = input & 0xFF;
+
+ // Fast way to reverse bits in byte
+ // Devised by Sean Anderson, July 13, 2001
+ o_byte = static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
+ (i_byte * 0x8020LU & 0x88440LU)) *
+ 0x10101LU >>
+ 16);
+
+ output = output | (static_cast<uint64_t>(o_byte) << 56);
+ input = input >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_bitrev_w: {
+ uint32_t input = static_cast<uint32_t>(rj(instr));
+ uint32_t output = 0;
+ output = ReverseBits(input);
+ alu_out = static_cast<int64_t>(static_cast<int32_t>(output));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_bitrev_d: {
+ alu_out = static_cast<int64_t>(ReverseBits(rj_u(instr)));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_ext_w_b: {
+ uint8_t input = static_cast<uint8_t>(rj(instr));
+ alu_out = static_cast<int64_t>(static_cast<int8_t>(input));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_ext_w_h: {
+ uint16_t input = static_cast<uint16_t>(rj(instr));
+ alu_out = static_cast<int64_t>(static_cast<int16_t>(input));
+ setRegister(rd_reg(instr), alu_out);
+ break;
+ }
+ case op_fabs_s: {
+ setFpuRegisterFloat(fd_reg(instr), std::abs(fj_float(instr)));
+ break;
+ }
+ case op_fabs_d: {
+ setFpuRegisterDouble(fd_reg(instr), std::abs(fj_double(instr)));
+ break;
+ }
+ case op_fneg_s: {
+ setFpuRegisterFloat(fd_reg(instr), -fj_float(instr));
+ break;
+ }
+ case op_fneg_d: {
+ setFpuRegisterDouble(fd_reg(instr), -fj_double(instr));
+ break;
+ }
+ case op_fsqrt_s: {
+ if (fj_float(instr) >= 0) {
+ setFpuRegisterFloat(fd_reg(instr), std::sqrt(fj_float(instr)));
+ } else {
+ setFpuRegisterFloat(fd_reg(instr), std::sqrt(-1)); // qnan
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ }
+ break;
+ }
+ case op_fsqrt_d: {
+ if (fj_double(instr) >= 0) {
+ setFpuRegisterDouble(fd_reg(instr), std::sqrt(fj_double(instr)));
+ } else {
+ setFpuRegisterDouble(fd_reg(instr), std::sqrt(-1)); // qnan
+ setFCSRBit(kFCSRInvalidOpFlagBit, true);
+ }
+ break;
+ }
+ case op_fmov_s: {
+ setFpuRegisterFloat(fd_reg(instr), fj_float(instr));
+ break;
+ }
+ case op_fmov_d: {
+ setFpuRegisterDouble(fd_reg(instr), fj_double(instr));
+ break;
+ }
+ case op_movgr2fr_w: {
+ setFpuRegisterWord(fd_reg(instr), static_cast<int32_t>(rj(instr)));
+ break;
+ }
+ case op_movgr2fr_d: {
+ setFpuRegister(fd_reg(instr), rj(instr));
+ break;
+ }
+ case op_movgr2frh_w: {
+ setFpuRegisterHiWord(fd_reg(instr), static_cast<int32_t>(rj(instr)));
+ break;
+ }
+ case op_movfr2gr_s: {
+ setRegister(rd_reg(instr),
+ static_cast<int64_t>(getFpuRegisterWord(fj_reg(instr))));
+ break;
+ }
+ case op_movfr2gr_d: {
+ setRegister(rd_reg(instr), getFpuRegister(fj_reg(instr)));
+ break;
+ }
+ case op_movfrh2gr_s: {
+ setRegister(rd_reg(instr), getFpuRegisterHiWord(fj_reg(instr)));
+ break;
+ }
+ case op_movgr2fcsr: {
+ // fcsr could be 0-3
+ MOZ_ASSERT(rd_reg(instr) < 4);
+ FCSR_ = static_cast<uint32_t>(rj(instr));
+ break;
+ }
+ case op_movfcsr2gr: {
+ setRegister(rd_reg(instr), FCSR_);
+ break;
+ }
+ case op_fcvt_s_d: {
+ setFpuRegisterFloat(fd_reg(instr), static_cast<float>(fj_double(instr)));
+ break;
+ }
+ case op_fcvt_d_s: {
+ setFpuRegisterDouble(fd_reg(instr), static_cast<double>(fj_float(instr)));
+ break;
+ }
+ case op_ftintrm_w_s: {
+ float fj = fj_float(instr);
+ float rounded = std::floor(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegisterWord(fd_reg(instr), result);
+ if (setFCSRRoundError<int32_t>(fj, rounded)) {
+ setFpuRegisterWordInvalidResult(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrm_w_d: {
+ double fj = fj_double(instr);
+ double rounded = std::floor(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegisterWord(fd_reg(instr), result);
+ if (setFCSRRoundError<int32_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrm_l_s: {
+ float fj = fj_float(instr);
+ float rounded = std::floor(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ setFpuRegister(fd_reg(instr), result);
+ if (setFCSRRoundError<int64_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult64(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrm_l_d: {
+ double fj = fj_double(instr);
+ double rounded = std::floor(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ setFpuRegister(fd_reg(instr), result);
+ if (setFCSRRoundError<int64_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult64(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrp_w_s: {
+ float fj = fj_float(instr);
+ float rounded = std::ceil(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegisterWord(fd_reg(instr), result);
+ if (setFCSRRoundError<int32_t>(fj, rounded)) {
+ setFpuRegisterWordInvalidResult(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrp_w_d: {
+ double fj = fj_double(instr);
+ double rounded = std::ceil(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegisterWord(fd_reg(instr), result);
+ if (setFCSRRoundError<int32_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrp_l_s: {
+ float fj = fj_float(instr);
+ float rounded = std::ceil(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ setFpuRegister(fd_reg(instr), result);
+ if (setFCSRRoundError<int64_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult64(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrp_l_d: {
+ double fj = fj_double(instr);
+ double rounded = std::ceil(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ setFpuRegister(fd_reg(instr), result);
+ if (setFCSRRoundError<int64_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult64(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrz_w_s: {
+ float fj = fj_float(instr);
+ float rounded = std::trunc(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegisterWord(fd_reg(instr), result);
+ if (setFCSRRoundError<int32_t>(fj, rounded)) {
+ setFpuRegisterWordInvalidResult(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrz_w_d: {
+ double fj = fj_double(instr);
+ double rounded = std::trunc(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ setFpuRegisterWord(fd_reg(instr), result);
+ if (setFCSRRoundError<int32_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrz_l_s: {
+ float fj = fj_float(instr);
+ float rounded = std::trunc(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ setFpuRegister(fd_reg(instr), result);
+ if (setFCSRRoundError<int64_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult64(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrz_l_d: {
+ double fj = fj_double(instr);
+ double rounded = std::trunc(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ setFpuRegister(fd_reg(instr), result);
+ if (setFCSRRoundError<int64_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult64(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrne_w_s: {
+ float fj = fj_float(instr);
+ float rounded = std::floor(fj + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - fj == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegisterWord(fd_reg(instr), result);
+ if (setFCSRRoundError<int32_t>(fj, rounded)) {
+ setFpuRegisterWordInvalidResult(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrne_w_d: {
+ double fj = fj_double(instr);
+ double rounded = std::floor(fj + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - fj == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegisterWord(fd_reg(instr), result);
+ if (setFCSRRoundError<int32_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrne_l_s: {
+ float fj = fj_float(instr);
+ float rounded = std::floor(fj + 0.5);
+ int64_t result = static_cast<int64_t>(rounded);
+ if ((result & 1) != 0 && result - fj == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegister(fd_reg(instr), result);
+ if (setFCSRRoundError<int64_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult64(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftintrne_l_d: {
+ double fj = fj_double(instr);
+ double rounded = std::floor(fj + 0.5);
+ int64_t result = static_cast<int64_t>(rounded);
+ if ((result & 1) != 0 && result - fj == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ setFpuRegister(fd_reg(instr), result);
+ if (setFCSRRoundError<int64_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult64(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftint_w_s: {
+ float fj = fj_float(instr);
+ float rounded;
+ int32_t result;
+ roundAccordingToFCSR<float>(fj, &rounded, &result);
+ setFpuRegisterWord(fd_reg(instr), result);
+ if (setFCSRRoundError<int32_t>(fj, rounded)) {
+ setFpuRegisterWordInvalidResult(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftint_w_d: {
+ double fj = fj_double(instr);
+ double rounded;
+ int32_t result;
+ roundAccordingToFCSR<double>(fj, &rounded, &result);
+ setFpuRegisterWord(fd_reg(instr), result);
+ if (setFCSRRoundError<int32_t>(fj, rounded)) {
+ setFpuRegisterWordInvalidResult(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftint_l_s: {
+ float fj = fj_float(instr);
+ float rounded;
+ int64_t result;
+ round64AccordingToFCSR<float>(fj, &rounded, &result);
+ setFpuRegister(fd_reg(instr), result);
+ if (setFCSRRoundError<int64_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult64(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ftint_l_d: {
+ double fj = fj_double(instr);
+ double rounded;
+ int64_t result;
+ round64AccordingToFCSR<double>(fj, &rounded, &result);
+ setFpuRegister(fd_reg(instr), result);
+ if (setFCSRRoundError<int64_t>(fj, rounded)) {
+ setFpuRegisterInvalidResult64(fj, rounded, fd_reg(instr));
+ }
+ break;
+ }
+ case op_ffint_s_w: {
+ alu_out = getFpuRegisterSignedWord(fj_reg(instr));
+ setFpuRegisterFloat(fd_reg(instr), static_cast<float>(alu_out));
+ break;
+ }
+ case op_ffint_s_l: {
+ alu_out = getFpuRegister(fj_reg(instr));
+ setFpuRegisterFloat(fd_reg(instr), static_cast<float>(alu_out));
+ break;
+ }
+ case op_ffint_d_w: {
+ alu_out = getFpuRegisterSignedWord(fj_reg(instr));
+ setFpuRegisterDouble(fd_reg(instr), static_cast<double>(alu_out));
+ break;
+ }
+ case op_ffint_d_l: {
+ alu_out = getFpuRegister(fj_reg(instr));
+ setFpuRegisterDouble(fd_reg(instr), static_cast<double>(alu_out));
+ break;
+ }
+ case op_frint_s: {
+ float fj = fj_float(instr);
+ float result, temp_result;
+ double temp;
+ float upper = std::ceil(fj);
+ float lower = std::floor(fj);
+ switch (getFCSRRoundingMode()) {
+ case kRoundToNearest:
+ if (upper - fj < fj - lower) {
+ result = upper;
+ } else if (upper - fj > fj - lower) {
+ result = lower;
+ } else {
+ temp_result = upper / 2;
+ float reminder = std::modf(temp_result, &temp);
+ if (reminder == 0) {
+ result = upper;
+ } else {
+ result = lower;
+ }
+ }
+ break;
+ case kRoundToZero:
+ result = (fj > 0 ? lower : upper);
+ break;
+ case kRoundToPlusInf:
+ result = upper;
+ break;
+ case kRoundToMinusInf:
+ result = lower;
+ break;
+ }
+ setFpuRegisterFloat(fd_reg(instr), result);
+ if (result != fj) {
+ setFCSRBit(kFCSRInexactFlagBit, true);
+ }
+ break;
+ }
+ case op_frint_d: {
+ double fj = fj_double(instr);
+ double result, temp, temp_result;
+ double upper = std::ceil(fj);
+ double lower = std::floor(fj);
+ switch (getFCSRRoundingMode()) {
+ case kRoundToNearest:
+ if (upper - fj < fj - lower) {
+ result = upper;
+ } else if (upper - fj > fj - lower) {
+ result = lower;
+ } else {
+ temp_result = upper / 2;
+ double reminder = std::modf(temp_result, &temp);
+ if (reminder == 0) {
+ result = upper;
+ } else {
+ result = lower;
+ }
+ }
+ break;
+ case kRoundToZero:
+ result = (fj > 0 ? lower : upper);
+ break;
+ case kRoundToPlusInf:
+ result = upper;
+ break;
+ case kRoundToMinusInf:
+ result = lower;
+ break;
+ }
+ setFpuRegisterDouble(fd_reg(instr), result);
+ if (result != fj) {
+ setFCSRBit(kFCSRInexactFlagBit, true);
+ }
+ break;
+ }
+ case op_movfr2cf:
+ printf("Sim UNIMPLEMENTED: MOVFR2CF\n");
+ UNIMPLEMENTED();
+ break;
+ case op_movgr2cf:
+ printf("Sim UNIMPLEMENTED: MOVGR2CF\n");
+ UNIMPLEMENTED();
+ break;
+ case op_clo_w:
+ printf("Sim UNIMPLEMENTED: FCO_W\n");
+ UNIMPLEMENTED();
+ break;
+ case op_cto_w:
+ printf("Sim UNIMPLEMENTED: FTO_W\n");
+ UNIMPLEMENTED();
+ break;
+ case op_clo_d:
+ printf("Sim UNIMPLEMENTED: FLO_D\n");
+ UNIMPLEMENTED();
+ break;
+ case op_cto_d:
+ printf("Sim UNIMPLEMENTED: FTO_D\n");
+ UNIMPLEMENTED();
+ break;
+ // Unimplemented opcodes raised an error in the configuration step before,
+ // so we can use the default here to set the destination register in common
+ // cases.
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::decodeTypeOp24(SimInstruction* instr) {
+ switch (instr->bits(31, 8) << 8) {
+ case op_movcf2fr:
+ UNIMPLEMENTED();
+ break;
+ case op_movcf2gr:
+ setRegister(rd_reg(instr), getCFRegister(cj_reg(instr)));
+ break;
+ UNIMPLEMENTED();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+// Executes the current instruction.
+void Simulator::instructionDecode(SimInstruction* instr) {
+ if (!SimulatorProcess::ICacheCheckingDisableCount) {
+ AutoLockSimulatorCache als;
+ SimulatorProcess::checkICacheLocked(instr);
+ }
+ pc_modified_ = false;
+
+ switch (instr->instructionType()) {
+ case SimInstruction::kOp6Type:
+ decodeTypeOp6(instr);
+ break;
+ case SimInstruction::kOp7Type:
+ decodeTypeOp7(instr);
+ break;
+ case SimInstruction::kOp8Type:
+ decodeTypeOp8(instr);
+ break;
+ case SimInstruction::kOp10Type:
+ decodeTypeOp10(instr);
+ break;
+ case SimInstruction::kOp11Type:
+ decodeTypeOp11(instr);
+ break;
+ case SimInstruction::kOp12Type:
+ decodeTypeOp12(instr);
+ break;
+ case SimInstruction::kOp14Type:
+ decodeTypeOp14(instr);
+ break;
+ case SimInstruction::kOp15Type:
+ decodeTypeOp15(instr);
+ break;
+ case SimInstruction::kOp16Type:
+ decodeTypeOp16(instr);
+ break;
+ case SimInstruction::kOp17Type:
+ decodeTypeOp17(instr);
+ break;
+ case SimInstruction::kOp22Type:
+ decodeTypeOp22(instr);
+ break;
+ case SimInstruction::kOp24Type:
+ decodeTypeOp24(instr);
+ break;
+ default:
+ UNSUPPORTED();
+ }
+ if (!pc_modified_) {
+ setRegister(pc,
+ reinterpret_cast<int64_t>(instr) + SimInstruction::kInstrSize);
+ }
+}
+
+void Simulator::enable_single_stepping(SingleStepCallback cb, void* arg) {
+ single_stepping_ = true;
+ single_step_callback_ = cb;
+ single_step_callback_arg_ = arg;
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+}
+
+void Simulator::disable_single_stepping() {
+ if (!single_stepping_) {
+ return;
+ }
+ single_step_callback_(single_step_callback_arg_, this, (void*)get_pc());
+ single_stepping_ = false;
+ single_step_callback_ = nullptr;
+ single_step_callback_arg_ = nullptr;
+}
+
+template <bool enableStopSimAt>
+void Simulator::execute() {
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+
+ // Get the PC to simulate. Cannot use the accessor here as we need the
+ // raw PC value and not the one used as input to arithmetic instructions.
+ int64_t program_counter = get_pc();
+
+ while (program_counter != end_sim_pc) {
+ if (enableStopSimAt && (icount_ == Simulator::StopSimAt)) {
+ loong64Debugger dbg(this);
+ dbg.debug();
+ } else {
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this,
+ (void*)program_counter);
+ }
+ SimInstruction* instr =
+ reinterpret_cast<SimInstruction*>(program_counter);
+ instructionDecode(instr);
+ icount_++;
+ }
+ program_counter = get_pc();
+ }
+
+ if (single_stepping_) {
+ single_step_callback_(single_step_callback_arg_, this, nullptr);
+ }
+}
+
+void Simulator::callInternal(uint8_t* entry) {
+ // Prepare to execute the code at entry.
+ setRegister(pc, reinterpret_cast<int64_t>(entry));
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ setRegister(ra, end_sim_pc);
+
+ // Remember the values of callee-saved registers.
+ // The code below assumes that r9 is not used as sb (static base) in
+ // simulator code and therefore is regarded as a callee-saved register.
+ int64_t s0_val = getRegister(s0);
+ int64_t s1_val = getRegister(s1);
+ int64_t s2_val = getRegister(s2);
+ int64_t s3_val = getRegister(s3);
+ int64_t s4_val = getRegister(s4);
+ int64_t s5_val = getRegister(s5);
+ int64_t s6_val = getRegister(s6);
+ int64_t s7_val = getRegister(s7);
+ int64_t s8_val = getRegister(s8);
+ int64_t gp_val = getRegister(gp);
+ int64_t sp_val = getRegister(sp);
+ int64_t tp_val = getRegister(tp);
+ int64_t fp_val = getRegister(fp);
+
+ // Set up the callee-saved registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ int64_t callee_saved_value = icount_;
+ setRegister(s0, callee_saved_value);
+ setRegister(s1, callee_saved_value);
+ setRegister(s2, callee_saved_value);
+ setRegister(s3, callee_saved_value);
+ setRegister(s4, callee_saved_value);
+ setRegister(s5, callee_saved_value);
+ setRegister(s6, callee_saved_value);
+ setRegister(s7, callee_saved_value);
+ setRegister(s8, callee_saved_value);
+ setRegister(gp, callee_saved_value);
+ setRegister(tp, callee_saved_value);
+ setRegister(fp, callee_saved_value);
+
+ // Start the simulation.
+ if (Simulator::StopSimAt != -1) {
+ execute<true>();
+ } else {
+ execute<false>();
+ }
+
+ // Check that the callee-saved registers have been preserved.
+ MOZ_ASSERT(callee_saved_value == getRegister(s0));
+ MOZ_ASSERT(callee_saved_value == getRegister(s1));
+ MOZ_ASSERT(callee_saved_value == getRegister(s2));
+ MOZ_ASSERT(callee_saved_value == getRegister(s3));
+ MOZ_ASSERT(callee_saved_value == getRegister(s4));
+ MOZ_ASSERT(callee_saved_value == getRegister(s5));
+ MOZ_ASSERT(callee_saved_value == getRegister(s6));
+ MOZ_ASSERT(callee_saved_value == getRegister(s7));
+ MOZ_ASSERT(callee_saved_value == getRegister(s8));
+ MOZ_ASSERT(callee_saved_value == getRegister(gp));
+ MOZ_ASSERT(callee_saved_value == getRegister(tp));
+ MOZ_ASSERT(callee_saved_value == getRegister(fp));
+
+ // Restore callee-saved registers with the original value.
+ setRegister(s0, s0_val);
+ setRegister(s1, s1_val);
+ setRegister(s2, s2_val);
+ setRegister(s3, s3_val);
+ setRegister(s4, s4_val);
+ setRegister(s5, s5_val);
+ setRegister(s6, s6_val);
+ setRegister(s7, s7_val);
+ setRegister(s8, s8_val);
+ setRegister(gp, gp_val);
+ setRegister(sp, sp_val);
+ setRegister(tp, tp_val);
+ setRegister(fp, fp_val);
+}
+
+int64_t Simulator::call(uint8_t* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+
+ int64_t original_stack = getRegister(sp);
+ // Compute position of stack on entry to generated code.
+ int64_t entry_stack = original_stack;
+ if (argument_count > kCArgSlotCount) {
+ entry_stack = entry_stack - argument_count * sizeof(int64_t);
+ } else {
+ entry_stack = entry_stack - kCArgsSlotsSize;
+ }
+
+ entry_stack &= ~U64(ABIStackAlignment - 1);
+
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+
+ // Setup the arguments.
+ for (int i = 0; i < argument_count; i++) {
+ js::jit::Register argReg;
+ if (GetIntArgReg(i, &argReg)) {
+ setRegister(argReg.code(), va_arg(parameters, int64_t));
+ } else {
+ stack_argument[i] = va_arg(parameters, int64_t);
+ }
+ }
+
+ va_end(parameters);
+ setRegister(sp, entry_stack);
+
+ callInternal(entry);
+
+ // Pop stack passed arguments.
+ MOZ_ASSERT(entry_stack == getRegister(sp));
+ setRegister(sp, original_stack);
+
+ int64_t result = getRegister(a0);
+ return result;
+}
+
+uintptr_t Simulator::pushAddress(uintptr_t address) {
+ int new_sp = getRegister(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ setRegister(sp, new_sp);
+ return new_sp;
+}
+
+uintptr_t Simulator::popAddress() {
+ int current_sp = getRegister(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ setRegister(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+
+} // namespace jit
+} // namespace js
+
+js::jit::Simulator* JSContext::simulator() const { return simulator_; }
diff --git a/js/src/jit/loong64/Simulator-loong64.h b/js/src/jit/loong64/Simulator-loong64.h
new file mode 100644
index 0000000000..233f218256
--- /dev/null
+++ b/js/src/jit/loong64/Simulator-loong64.h
@@ -0,0 +1,650 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80: */
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef jit_loong64_Simulator_loong64_h
+#define jit_loong64_Simulator_loong64_h
+
+#ifdef JS_SIMULATOR_LOONG64
+
+# include "mozilla/Atomics.h"
+
+# include "jit/IonTypes.h"
+# include "js/ProfilingFrameIterator.h"
+# include "threading/Thread.h"
+# include "vm/MutexIDs.h"
+# include "wasm/WasmSignalHandlers.h"
+
+namespace js {
+
+namespace jit {
+
+class JitActivation;
+
+class Simulator;
+class Redirection;
+class CachePage;
+class AutoLockSimulator;
+
+// When the SingleStepCallback is called, the simulator is about to execute
+// sim->get_pc() and the current machine state represents the completed
+// execution of the previous pc.
+typedef void (*SingleStepCallback)(void* arg, Simulator* sim, void* pc);
+
+const intptr_t kPointerAlignment = 8;
+const intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
+
+const intptr_t kDoubleAlignment = 8;
+const intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
+
+// Number of general purpose registers.
+const int kNumRegisters = 32;
+
+// In the simulator, the PC register is simulated as the 34th register.
+const int kPCRegister = 32;
+
+// Number coprocessor registers.
+const int kNumFPURegisters = 32;
+
+// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
+// TODO fcsr0 fcsr1 fcsr2 fcsr3
+const int kFCSRRegister = 0;
+const int kInvalidFPUControlRegister = -1;
+const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
+const int32_t kFPUInvalidResultNegative = static_cast<int32_t>(1u << 31);
+const uint64_t kFPU64InvalidResult =
+ static_cast<uint64_t>(static_cast<uint64_t>(1) << 63) - 1;
+const int64_t kFPU64InvalidResultNegative =
+ static_cast<int64_t>(static_cast<uint64_t>(1) << 63);
+
+const uint32_t kFPURoundingModeShift = 8;
+const uint32_t kFPURoundingModeMask = 0b11 << kFPURoundingModeShift;
+
+// FPU rounding modes.
+enum FPURoundingMode {
+ RN = 0b00 << kFPURoundingModeShift, // Round to Nearest.
+ RZ = 0b01 << kFPURoundingModeShift, // Round towards zero.
+ RP = 0b10 << kFPURoundingModeShift, // Round towards Plus Infinity.
+ RM = 0b11 << kFPURoundingModeShift, // Round towards Minus Infinity.
+
+ // Aliases.
+ kRoundToNearest = RN,
+ kRoundToZero = RZ,
+ kRoundToPlusInf = RP,
+ kRoundToMinusInf = RM,
+
+ mode_round = RN,
+ mode_ceil = RP,
+ mode_floor = RM,
+ mode_trunc = RZ
+};
+
+// FCSR constants.
+const uint32_t kFCSRInexactFlagBit = 16;
+const uint32_t kFCSRUnderflowFlagBit = 17;
+const uint32_t kFCSROverflowFlagBit = 18;
+const uint32_t kFCSRDivideByZeroFlagBit = 19;
+const uint32_t kFCSRInvalidOpFlagBit = 20;
+
+const uint32_t kFCSRInexactCauseBit = 24;
+const uint32_t kFCSRUnderflowCauseBit = 25;
+const uint32_t kFCSROverflowCauseBit = 26;
+const uint32_t kFCSRDivideByZeroCauseBit = 27;
+const uint32_t kFCSRInvalidOpCauseBit = 28;
+
+const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
+const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
+const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
+const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
+const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+
+const uint32_t kFCSRFlagMask =
+ kFCSRInexactFlagMask | kFCSRUnderflowFlagMask | kFCSROverflowFlagMask |
+ kFCSRDivideByZeroFlagMask | kFCSRInvalidOpFlagMask;
+
+const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
+
+// On LoongArch64 Simulator breakpoints can have different codes:
+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
+// the simulator will run through them and print the registers.
+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
+// instructions (see Assembler::stop()).
+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
+// debugger.
+const uint32_t kMaxWatchpointCode = 31;
+const uint32_t kMaxStopCode = 127;
+const uint32_t kWasmTrapCode = 6;
+
+// -----------------------------------------------------------------------------
+// Utility functions
+
+typedef uint32_t Instr;
+class SimInstruction;
+
+// Per thread simulator state.
+class Simulator {
+ friend class loong64Debugger;
+
+ public:
+ // Registers are declared in order.
+ enum Register {
+ no_reg = -1,
+ zero_reg = 0,
+ ra,
+ gp,
+ sp,
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ t0,
+ t1,
+ t2,
+ t3,
+ t4,
+ t5,
+ t6,
+ t7,
+ t8,
+ tp,
+ fp,
+ s0,
+ s1,
+ s2,
+ s3,
+ s4,
+ s5,
+ s6,
+ s7,
+ s8,
+ pc, // pc must be the last register.
+ kNumSimuRegisters,
+ // aliases
+ v0 = a0,
+ v1 = a1,
+ };
+
+ // Condition flag registers.
+ enum CFRegister {
+ fcc0,
+ fcc1,
+ fcc2,
+ fcc3,
+ fcc4,
+ fcc5,
+ fcc6,
+ fcc7,
+ kNumCFRegisters
+ };
+
+ // Floating point registers.
+ enum FPURegister {
+ f0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
+ kNumFPURegisters
+ };
+
+ // Returns nullptr on OOM.
+ static Simulator* Create();
+
+ static void Destroy(Simulator* simulator);
+
+ // Constructor/destructor are for internal use only; use the static methods
+ // above.
+ Simulator();
+ ~Simulator();
+
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ static Simulator* Current();
+
+ static inline uintptr_t StackLimit() {
+ return Simulator::Current()->stackLimit();
+ }
+
+ uintptr_t* addressOfStackLimit();
+
+ // Accessors for register state. Reading the pc value adheres to the LOONG64
+ // architecture specification and is off by a 8 from the currently executing
+ // instruction.
+ void setRegister(int reg, int64_t value);
+ int64_t getRegister(int reg) const;
+ // Same for FPURegisters.
+ void setFpuRegister(int fpureg, int64_t value);
+ void setFpuRegisterWord(int fpureg, int32_t value);
+ void setFpuRegisterHiWord(int fpureg, int32_t value);
+ void setFpuRegisterFloat(int fpureg, float value);
+ void setFpuRegisterDouble(int fpureg, double value);
+
+ void setFpuRegisterWordInvalidResult(float original, float rounded,
+ int fpureg);
+ void setFpuRegisterWordInvalidResult(double original, double rounded,
+ int fpureg);
+ void setFpuRegisterInvalidResult(float original, float rounded, int fpureg);
+ void setFpuRegisterInvalidResult(double original, double rounded, int fpureg);
+ void setFpuRegisterInvalidResult64(float original, float rounded, int fpureg);
+ void setFpuRegisterInvalidResult64(double original, double rounded,
+ int fpureg);
+
+ int64_t getFpuRegister(int fpureg) const;
+ // int32_t getFpuRegisterLo(int fpureg) const;
+ // int32_t getFpuRegisterHi(int fpureg) const;
+ int32_t getFpuRegisterWord(int fpureg) const;
+ int32_t getFpuRegisterSignedWord(int fpureg) const;
+ int32_t getFpuRegisterHiWord(int fpureg) const;
+ float getFpuRegisterFloat(int fpureg) const;
+ double getFpuRegisterDouble(int fpureg) const;
+
+ void setCFRegister(int cfreg, bool value);
+ bool getCFRegister(int cfreg) const;
+
+ void set_fcsr_rounding_mode(FPURoundingMode mode);
+
+ void setFCSRBit(uint32_t cc, bool value);
+ bool testFCSRBit(uint32_t cc);
+ unsigned int getFCSRRoundingMode();
+ template <typename T>
+ bool setFCSRRoundError(double original, double rounded);
+ bool setFCSRRound64Error(float original, float rounded);
+
+ template <typename T>
+ void roundAccordingToFCSR(T toRound, T* rounded, int32_t* rounded_int);
+
+ template <typename T>
+ void round64AccordingToFCSR(T toRound, T* rounded, int64_t* rounded_int);
+
+ // Special case of set_register and get_register to access the raw PC value.
+ void set_pc(int64_t value);
+ int64_t get_pc() const;
+
+ template <typename T>
+ T get_pc_as() const {
+ return reinterpret_cast<T>(get_pc());
+ }
+
+ void enable_single_stepping(SingleStepCallback cb, void* arg);
+ void disable_single_stepping();
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t stackLimit() const;
+ bool overRecursed(uintptr_t newsp = 0) const;
+ bool overRecursedWithExtra(uint32_t extra) const;
+
+ // Executes LOONG64 instructions until the PC reaches end_sim_pc.
+ template <bool enableStopSimAt>
+ void execute();
+
+ // Sets up the simulator state and grabs the result on return.
+ int64_t call(uint8_t* entry, int argument_count, ...);
+
+ // Push an address onto the JS stack.
+ uintptr_t pushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t popAddress();
+
+ // Debugger input.
+ void setLastDebuggerInput(char* input);
+ char* lastDebuggerInput() { return lastDebuggerInput_; }
+
+ // Returns true if pc register contains one of the 'SpecialValues' defined
+ // below (bad_ra, end_sim_pc).
+ bool has_bad_pc() const;
+
+ private:
+ enum SpecialValues {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_ra = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the ra is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2,
+ // Unpredictable value.
+ Unpredictable = 0xbadbeaf
+ };
+
+ bool init();
+
+ // Unsupported instructions use Format to print an error and stop execution.
+ void format(SimInstruction* instr, const char* format);
+
+ // Read and write memory.
+ inline uint8_t readBU(uint64_t addr);
+ inline int8_t readB(uint64_t addr);
+ inline void writeB(uint64_t addr, uint8_t value);
+ inline void writeB(uint64_t addr, int8_t value);
+
+ inline uint16_t readHU(uint64_t addr, SimInstruction* instr);
+ inline int16_t readH(uint64_t addr, SimInstruction* instr);
+ inline void writeH(uint64_t addr, uint16_t value, SimInstruction* instr);
+ inline void writeH(uint64_t addr, int16_t value, SimInstruction* instr);
+
+ inline uint32_t readWU(uint64_t addr, SimInstruction* instr);
+ inline int32_t readW(uint64_t addr, SimInstruction* instr);
+ inline void writeW(uint64_t addr, uint32_t value, SimInstruction* instr);
+ inline void writeW(uint64_t addr, int32_t value, SimInstruction* instr);
+
+ inline int64_t readDW(uint64_t addr, SimInstruction* instr);
+ inline void writeDW(uint64_t addr, int64_t value, SimInstruction* instr);
+
+ inline double readD(uint64_t addr, SimInstruction* instr);
+ inline void writeD(uint64_t addr, double value, SimInstruction* instr);
+
+ inline int32_t loadLinkedW(uint64_t addr, SimInstruction* instr);
+ inline int storeConditionalW(uint64_t addr, int32_t value,
+ SimInstruction* instr);
+
+ inline int64_t loadLinkedD(uint64_t addr, SimInstruction* instr);
+ inline int storeConditionalD(uint64_t addr, int64_t value,
+ SimInstruction* instr);
+
+ // Executing is handled based on the instruction type.
+ void decodeTypeOp6(SimInstruction* instr);
+ void decodeTypeOp7(SimInstruction* instr);
+ void decodeTypeOp8(SimInstruction* instr);
+ void decodeTypeOp10(SimInstruction* instr);
+ void decodeTypeOp11(SimInstruction* instr);
+ void decodeTypeOp12(SimInstruction* instr);
+ void decodeTypeOp14(SimInstruction* instr);
+ void decodeTypeOp15(SimInstruction* instr);
+ void decodeTypeOp16(SimInstruction* instr);
+ void decodeTypeOp17(SimInstruction* instr);
+ void decodeTypeOp22(SimInstruction* instr);
+ void decodeTypeOp24(SimInstruction* instr);
+
+ inline int32_t rj_reg(SimInstruction* instr) const;
+ inline int64_t rj(SimInstruction* instr) const;
+ inline uint64_t rj_u(SimInstruction* instr) const;
+ inline int32_t rk_reg(SimInstruction* instr) const;
+ inline int64_t rk(SimInstruction* instr) const;
+ inline uint64_t rk_u(SimInstruction* instr) const;
+ inline int32_t rd_reg(SimInstruction* instr) const;
+ inline int64_t rd(SimInstruction* instr) const;
+ inline uint64_t rd_u(SimInstruction* instr) const;
+ inline int32_t fa_reg(SimInstruction* instr) const;
+ inline float fa_float(SimInstruction* instr) const;
+ inline double fa_double(SimInstruction* instr) const;
+
+ inline int32_t fj_reg(SimInstruction* instr) const;
+ inline float fj_float(SimInstruction* instr) const;
+ inline double fj_double(SimInstruction* instr) const;
+
+ inline int32_t fk_reg(SimInstruction* instr) const;
+ inline float fk_float(SimInstruction* instr) const;
+ inline double fk_double(SimInstruction* instr) const;
+ inline int32_t fd_reg(SimInstruction* instr) const;
+ inline float fd_float(SimInstruction* instr) const;
+ inline double fd_double(SimInstruction* instr) const;
+
+ inline int32_t cj_reg(SimInstruction* instr) const;
+ inline bool cj(SimInstruction* instr) const;
+
+ inline int32_t cd_reg(SimInstruction* instr) const;
+ inline bool cd(SimInstruction* instr) const;
+
+ inline int32_t ca_reg(SimInstruction* instr) const;
+ inline bool ca(SimInstruction* instr) const;
+ inline uint32_t sa2(SimInstruction* instr) const;
+ inline uint32_t sa3(SimInstruction* instr) const;
+ inline uint32_t ui5(SimInstruction* instr) const;
+ inline uint32_t ui6(SimInstruction* instr) const;
+ inline uint32_t lsbw(SimInstruction* instr) const;
+ inline uint32_t msbw(SimInstruction* instr) const;
+ inline uint32_t lsbd(SimInstruction* instr) const;
+ inline uint32_t msbd(SimInstruction* instr) const;
+ inline uint32_t cond(SimInstruction* instr) const;
+ inline int32_t si12(SimInstruction* instr) const;
+ inline uint32_t ui12(SimInstruction* instr) const;
+ inline int32_t si14(SimInstruction* instr) const;
+ inline int32_t si16(SimInstruction* instr) const;
+ inline int32_t si20(SimInstruction* instr) const;
+
+ // Used for breakpoints.
+ void softwareInterrupt(SimInstruction* instr);
+
+ // Stop helper functions.
+ bool isWatchpoint(uint32_t code);
+ void printWatchpoint(uint32_t code);
+ void handleStop(uint32_t code, SimInstruction* instr);
+ bool isStopInstruction(SimInstruction* instr);
+ bool isEnabledStop(uint32_t code);
+ void enableStop(uint32_t code);
+ void disableStop(uint32_t code);
+ void increaseStopCounter(uint32_t code);
+ void printStopInfo(uint32_t code);
+
+ JS::ProfilingFrameIterator::RegisterState registerState();
+
+ // Handle any wasm faults, returning true if the fault was handled.
+ // This method is rather hot so inline the normal (no-wasm) case.
+ bool MOZ_ALWAYS_INLINE handleWasmSegFault(uint64_t addr, unsigned numBytes) {
+ if (MOZ_LIKELY(!js::wasm::CodeExists)) {
+ return false;
+ }
+
+ uint8_t* newPC;
+ if (!js::wasm::MemoryAccessTraps(registerState(), (uint8_t*)addr, numBytes,
+ &newPC)) {
+ return false;
+ }
+
+ LLBit_ = false;
+ set_pc(int64_t(newPC));
+ return true;
+ }
+
+ // Executes one instruction.
+ void instructionDecode(SimInstruction* instr);
+
+ public:
+ static int64_t StopSimAt;
+
+ // Runtime call support.
+ static void* RedirectNativeFunction(void* nativeFunction,
+ ABIFunctionType type);
+
+ private:
+ enum Exception {
+ kNone,
+ kIntegerOverflow,
+ kIntegerUnderflow,
+ kDivideByZero,
+ kNumExceptions
+ };
+ int16_t exceptions[kNumExceptions];
+
+ // Exceptions.
+ void signalExceptions();
+
+ // Handle return value for runtime FP functions.
+ void setCallResultDouble(double result);
+ void setCallResultFloat(float result);
+ void setCallResult(int64_t res);
+ void setCallResult(__int128 res);
+
+ void callInternal(uint8_t* entry);
+
+ // Architecture state.
+ // Registers.
+ int64_t registers_[kNumSimuRegisters];
+ // Floating point Registers.
+ int64_t FPUregisters_[kNumFPURegisters];
+ // Condition flags Registers.
+ bool CFregisters_[kNumCFRegisters];
+ // FPU control register.
+ uint32_t FCSR_;
+
+ bool LLBit_;
+ uintptr_t LLAddr_;
+ int64_t lastLLValue_;
+
+ // Simulator support.
+ char* stack_;
+ uintptr_t stackLimit_;
+ bool pc_modified_;
+ int64_t icount_;
+ int64_t break_count_;
+
+ // Debugger input.
+ char* lastDebuggerInput_;
+
+ // Registered breakpoints.
+ SimInstruction* break_pc_;
+ Instr break_instr_;
+
+ // Single-stepping support
+ bool single_stepping_;
+ SingleStepCallback single_step_callback_;
+ void* single_step_callback_arg_;
+
+ // A stop is watched if its code is less than kNumOfWatchedStops.
+ // Only watched stops support enabling/disabling and the counter feature.
+ static const uint32_t kNumOfWatchedStops = 256;
+
+ // Stop is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1U << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watchedStops_[code].count is unset.
+ // The value watchedStops_[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count_;
+ char* desc_;
+ };
+ StopCountAndDesc watchedStops_[kNumOfWatchedStops];
+};
+
+// Process wide simulator state.
+class SimulatorProcess {
+ friend class Redirection;
+ friend class AutoLockSimulatorCache;
+
+ private:
+ // ICache checking.
+ struct ICacheHasher {
+ typedef void* Key;
+ typedef void* Lookup;
+ static HashNumber hash(const Lookup& l);
+ static bool match(const Key& k, const Lookup& l);
+ };
+
+ public:
+ typedef HashMap<void*, CachePage*, ICacheHasher, SystemAllocPolicy> ICacheMap;
+
+ static mozilla::Atomic<size_t, mozilla::ReleaseAcquire>
+ ICacheCheckingDisableCount;
+ static void FlushICache(void* start, size_t size);
+
+ static void checkICacheLocked(SimInstruction* instr);
+
+ static bool initialize() {
+ singleton_ = js_new<SimulatorProcess>();
+ return singleton_;
+ }
+ static void destroy() {
+ js_delete(singleton_);
+ singleton_ = nullptr;
+ }
+
+ SimulatorProcess();
+ ~SimulatorProcess();
+
+ private:
+ static SimulatorProcess* singleton_;
+
+ // This lock creates a critical section around 'redirection_' and
+ // 'icache_', which are referenced both by the execution engine
+ // and by the off-thread compiler (see Redirection::Get in the cpp file).
+ Mutex cacheLock_;
+
+ Redirection* redirection_;
+ ICacheMap icache_;
+
+ public:
+ static ICacheMap& icache() {
+ // Technically we need the lock to access the innards of the
+ // icache, not to take its address, but the latter condition
+ // serves as a useful complement to the former.
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ return singleton_->icache_;
+ }
+
+ static Redirection* redirection() {
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ return singleton_->redirection_;
+ }
+
+ static void setRedirection(js::jit::Redirection* redirection) {
+ singleton_->cacheLock_.assertOwnedByCurrentThread();
+ singleton_->redirection_ = redirection;
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif /* JS_SIMULATOR_LOONG64 */
+
+#endif /* jit_loong64_Simulator_loong64_h */
diff --git a/js/src/jit/loong64/Trampoline-loong64.cpp b/js/src/jit/loong64/Trampoline-loong64.cpp
new file mode 100644
index 0000000000..c38b5b4994
--- /dev/null
+++ b/js/src/jit/loong64/Trampoline-loong64.cpp
@@ -0,0 +1,760 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jit/Bailouts.h"
+#include "jit/BaselineFrame.h"
+#include "jit/CalleeToken.h"
+#include "jit/JitFrames.h"
+#include "jit/JitRuntime.h"
+#include "jit/loong64/SharedICHelpers-loong64.h"
+#include "jit/PerfSpewer.h"
+#include "jit/VMFunctions.h"
+#include "vm/JitActivation.h" // js::jit::JitActivation
+#include "vm/JSContext.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+
+// All registers to save and restore. This includes the stack pointer, since we
+// use the ability to reference register values on the stack by index.
+static const LiveRegisterSet AllRegs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::AllMask),
+ FloatRegisterSet(FloatRegisters::AllMask));
+
+static_assert(sizeof(uintptr_t) == sizeof(uint64_t), "Not 32-bit clean.");
+
+struct EnterJITRegs {
+ double f31;
+ double f30;
+ double f29;
+ double f28;
+ double f27;
+ double f26;
+ double f25;
+ double f24;
+
+ // uintptr_t align;
+
+ // non-volatile registers.
+ uint64_t ra;
+ uint64_t fp;
+ uint64_t s8;
+ uint64_t s7;
+ uint64_t s6;
+ uint64_t s5;
+ uint64_t s4;
+ uint64_t s3;
+ uint64_t s2;
+ uint64_t s1;
+ uint64_t s0;
+ // Save reg_vp(a7) on stack, use it after call jit code.
+ uint64_t a7;
+};
+
+static void GenerateReturn(MacroAssembler& masm, int returnCode) {
+ MOZ_ASSERT(masm.framePushed() == sizeof(EnterJITRegs));
+
+ // Restore non-volatile registers
+ masm.as_ld_d(s0, StackPointer, offsetof(EnterJITRegs, s0));
+ masm.as_ld_d(s1, StackPointer, offsetof(EnterJITRegs, s1));
+ masm.as_ld_d(s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.as_ld_d(s3, StackPointer, offsetof(EnterJITRegs, s3));
+ masm.as_ld_d(s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.as_ld_d(s5, StackPointer, offsetof(EnterJITRegs, s5));
+ masm.as_ld_d(s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.as_ld_d(s7, StackPointer, offsetof(EnterJITRegs, s7));
+ masm.as_ld_d(s8, StackPointer, offsetof(EnterJITRegs, s8));
+ masm.as_ld_d(fp, StackPointer, offsetof(EnterJITRegs, fp));
+ masm.as_ld_d(ra, StackPointer, offsetof(EnterJITRegs, ra));
+
+ // Restore non-volatile floating point registers
+ masm.as_fld_d(f24, StackPointer, offsetof(EnterJITRegs, f24));
+ masm.as_fld_d(f25, StackPointer, offsetof(EnterJITRegs, f25));
+ masm.as_fld_d(f26, StackPointer, offsetof(EnterJITRegs, f26));
+ masm.as_fld_d(f27, StackPointer, offsetof(EnterJITRegs, f27));
+ masm.as_fld_d(f28, StackPointer, offsetof(EnterJITRegs, f28));
+ masm.as_fld_d(f29, StackPointer, offsetof(EnterJITRegs, f29));
+ masm.as_fld_d(f30, StackPointer, offsetof(EnterJITRegs, f30));
+ masm.as_fld_d(f31, StackPointer, offsetof(EnterJITRegs, f31));
+
+ masm.freeStack(sizeof(EnterJITRegs));
+
+ masm.branch(ra);
+}
+
+static void GeneratePrologue(MacroAssembler& masm) {
+ masm.reserveStack(sizeof(EnterJITRegs));
+
+ masm.as_st_d(s0, StackPointer, offsetof(EnterJITRegs, s0));
+ masm.as_st_d(s1, StackPointer, offsetof(EnterJITRegs, s1));
+ masm.as_st_d(s2, StackPointer, offsetof(EnterJITRegs, s2));
+ masm.as_st_d(s3, StackPointer, offsetof(EnterJITRegs, s3));
+ masm.as_st_d(s4, StackPointer, offsetof(EnterJITRegs, s4));
+ masm.as_st_d(s5, StackPointer, offsetof(EnterJITRegs, s5));
+ masm.as_st_d(s6, StackPointer, offsetof(EnterJITRegs, s6));
+ masm.as_st_d(s7, StackPointer, offsetof(EnterJITRegs, s7));
+ masm.as_st_d(s8, StackPointer, offsetof(EnterJITRegs, s8));
+ masm.as_st_d(fp, StackPointer, offsetof(EnterJITRegs, fp));
+ masm.as_st_d(ra, StackPointer, offsetof(EnterJITRegs, ra));
+ masm.as_st_d(a7, StackPointer, offsetof(EnterJITRegs, a7));
+
+ masm.as_fst_d(f24, StackPointer, offsetof(EnterJITRegs, f24));
+ masm.as_fst_d(f25, StackPointer, offsetof(EnterJITRegs, f25));
+ masm.as_fst_d(f26, StackPointer, offsetof(EnterJITRegs, f26));
+ masm.as_fst_d(f27, StackPointer, offsetof(EnterJITRegs, f27));
+ masm.as_fst_d(f28, StackPointer, offsetof(EnterJITRegs, f28));
+ masm.as_fst_d(f29, StackPointer, offsetof(EnterJITRegs, f29));
+ masm.as_fst_d(f30, StackPointer, offsetof(EnterJITRegs, f30));
+ masm.as_fst_d(f31, StackPointer, offsetof(EnterJITRegs, f31));
+}
+
+// Generates a trampoline for calling Jit compiled code from a C++ function.
+// The trampoline use the EnterJitCode signature, with the standard x64 fastcall
+// calling convention.
+void JitRuntime::generateEnterJIT(JSContext* cx, MacroAssembler& masm) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateEnterJIT");
+
+ enterJITOffset_ = startTrampolineCode(masm);
+
+ const Register reg_code = IntArgReg0;
+ const Register reg_argc = IntArgReg1;
+ const Register reg_argv = IntArgReg2;
+ const mozilla::DebugOnly<Register> reg_frame = IntArgReg3;
+ const Register reg_token = IntArgReg4;
+ const Register reg_chain = IntArgReg5;
+ const Register reg_values = IntArgReg6;
+ const Register reg_vp = IntArgReg7;
+
+ MOZ_ASSERT(OsrFrameReg == reg_frame);
+
+ GeneratePrologue(masm);
+
+ // Save stack pointer as baseline frame.
+ masm.movePtr(StackPointer, FramePointer);
+
+ // Load the number of actual arguments into s3.
+ masm.unboxInt32(Address(reg_vp, 0), s3);
+
+ /***************************************************************
+ Loop over argv vector, push arguments onto stack in reverse order
+ ***************************************************************/
+
+ // if we are constructing, that also needs to include newTarget
+ {
+ Label noNewTarget;
+ masm.branchTest32(Assembler::Zero, reg_token,
+ Imm32(CalleeToken_FunctionConstructing), &noNewTarget);
+
+ masm.add32(Imm32(1), reg_argc);
+
+ masm.bind(&noNewTarget);
+ }
+
+ // Make stack algined
+ masm.ma_and(s0, reg_argc, Imm32(1));
+ masm.ma_sub_d(s1, zero, Imm32(sizeof(Value)));
+ masm.as_maskeqz(s1, s1, s0);
+ masm.as_add_d(StackPointer, StackPointer, s1);
+
+ masm.as_slli_d(s0, reg_argc, 3); // Value* argv
+ masm.addPtr(reg_argv, s0); // s0 = &argv[argc]
+
+ // Loop over arguments, copying them from an unknown buffer onto the Ion
+ // stack so they can be accessed from JIT'ed code.
+ Label header, footer;
+ // If there aren't any arguments, don't do anything
+ masm.ma_b(s0, reg_argv, &footer, Assembler::BelowOrEqual, ShortJump);
+ {
+ masm.bind(&header);
+
+ masm.subPtr(Imm32(sizeof(Value)), s0);
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+
+ ValueOperand value = ValueOperand(s6);
+ masm.loadValue(Address(s0, 0), value);
+ masm.storeValue(value, Address(StackPointer, 0));
+
+ masm.ma_b(s0, reg_argv, &header, Assembler::Above, ShortJump);
+ }
+ masm.bind(&footer);
+
+ masm.push(reg_token);
+ masm.pushFrameDescriptorForJitCall(FrameType::CppToJSJit, s3, s3);
+
+ CodeLabel returnLabel;
+ Label oomReturnLabel;
+ {
+ // Handle Interpreter -> Baseline OSR.
+ AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
+ MOZ_ASSERT(!regs.has(FramePointer));
+ regs.take(OsrFrameReg);
+ regs.take(reg_code);
+ MOZ_ASSERT(!regs.has(ReturnReg), "ReturnReg matches reg_code");
+
+ Label notOsr;
+ masm.ma_b(OsrFrameReg, OsrFrameReg, &notOsr, Assembler::Zero, ShortJump);
+
+ Register numStackValues = reg_values;
+ regs.take(numStackValues);
+ Register scratch = regs.takeAny();
+
+ // Push return address.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.ma_li(scratch, &returnLabel);
+ masm.storePtr(scratch, Address(StackPointer, 0));
+
+ // Push previous frame pointer.
+ masm.subPtr(Imm32(sizeof(uintptr_t)), StackPointer);
+ masm.storePtr(FramePointer, Address(StackPointer, 0));
+
+ // Reserve frame.
+ Register framePtr = FramePointer;
+ masm.movePtr(StackPointer, framePtr);
+ masm.subPtr(Imm32(BaselineFrame::Size()), StackPointer);
+
+ Register framePtrScratch = regs.takeAny();
+ masm.movePtr(sp, framePtrScratch);
+
+ // Reserve space for locals and stack values.
+ masm.as_slli_d(scratch, numStackValues, 3);
+ masm.subPtr(scratch, StackPointer);
+
+ // Enter exit frame.
+ masm.reserveStack(3 * sizeof(uintptr_t));
+ masm.storePtr(
+ ImmWord(MakeFrameDescriptor(FrameType::BaselineJS)),
+ Address(StackPointer, 2 * sizeof(uintptr_t))); // Frame descriptor
+ masm.storePtr(
+ zero, Address(StackPointer, sizeof(uintptr_t))); // fake return address
+ masm.storePtr(FramePointer, Address(StackPointer, 0));
+
+ // No GC things to mark, push a bare token.
+ masm.loadJSContext(scratch);
+ masm.enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
+
+ masm.reserveStack(2 * sizeof(uintptr_t));
+ masm.storePtr(framePtr,
+ Address(StackPointer, sizeof(uintptr_t))); // BaselineFrame
+ masm.storePtr(reg_code, Address(StackPointer, 0)); // jitcode
+
+ using Fn = bool (*)(BaselineFrame* frame, InterpreterFrame* interpFrame,
+ uint32_t numStackValues);
+ masm.setupUnalignedABICall(scratch);
+ masm.passABIArg(framePtrScratch); // BaselineFrame
+ masm.passABIArg(OsrFrameReg); // InterpreterFrame
+ masm.passABIArg(numStackValues);
+ masm.callWithABI<Fn, jit::InitBaselineFrameForOsr>(
+ ABIType::General, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ regs.add(OsrFrameReg);
+ Register jitcode = regs.takeAny();
+ masm.loadPtr(Address(StackPointer, 0), jitcode);
+ masm.loadPtr(Address(StackPointer, sizeof(uintptr_t)), framePtr);
+ masm.freeStack(2 * sizeof(uintptr_t));
+
+ Label error;
+ masm.freeStack(ExitFrameLayout::SizeWithFooter());
+ masm.branchIfFalseBool(ReturnReg, &error);
+
+ // If OSR-ing, then emit instrumentation for setting lastProfilerFrame
+ // if profiler instrumentation is enabled.
+ {
+ Label skipProfilingInstrumentation;
+ AbsoluteAddress addressOfEnabled(
+ cx->runtime()->geckoProfiler().addressOfEnabled());
+ masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0),
+ &skipProfilingInstrumentation);
+ masm.profilerEnterFrame(framePtr, scratch);
+ masm.bind(&skipProfilingInstrumentation);
+ }
+
+ masm.jump(jitcode);
+
+ // OOM: load error value, discard return address and previous frame
+ // pointer and return.
+ masm.bind(&error);
+ masm.movePtr(framePtr, StackPointer);
+ masm.addPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
+ masm.jump(&oomReturnLabel);
+
+ masm.bind(&notOsr);
+ // Load the scope chain in R1.
+ MOZ_ASSERT(R1.scratchReg() != reg_code);
+ masm.as_or(R1.scratchReg(), reg_chain, zero);
+ }
+
+ // The call will push the return address and frame pointer on the stack, thus
+ // we check that the stack would be aligned once the call is complete.
+ masm.assertStackAlignment(JitStackAlignment, 2 * sizeof(uintptr_t));
+
+ // Call the function with pushing return address to stack.
+ masm.callJitNoProfiler(reg_code);
+
+ {
+ // Interpreter -> Baseline OSR will return here.
+ masm.bind(&returnLabel);
+ masm.addCodeLabel(returnLabel);
+ masm.bind(&oomReturnLabel);
+ }
+
+ // Discard arguments and padding. Set sp to the address of the EnterJITRegs
+ // on the stack.
+ masm.mov(FramePointer, StackPointer);
+
+ // Store the returned value into the vp
+ masm.as_ld_d(reg_vp, StackPointer, offsetof(EnterJITRegs, a7));
+ masm.storeValue(JSReturnOperand, Address(reg_vp, 0));
+
+ // Restore non-volatile registers and return.
+ GenerateReturn(masm, ShortJump);
+}
+
+// static
+mozilla::Maybe<::JS::ProfilingFrameIterator::RegisterState>
+JitRuntime::getCppEntryRegisters(JitFrameLayout* frameStackAddress) {
+ // Not supported, or not implemented yet.
+ // TODO: Implement along with the corresponding stack-walker changes, in
+ // coordination with the Gecko Profiler, see bug 1635987 and follow-ups.
+ return mozilla::Nothing{};
+}
+
+void JitRuntime::generateInvalidator(MacroAssembler& masm, Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateInvalidator");
+
+ invalidatorOffset_ = startTrampolineCode(masm);
+
+ // Stack has to be alligned here. If not, we will have to fix it.
+ masm.checkStackAlignment();
+
+ // Push registers such that we can access them from [base + code].
+ masm.PushRegsInMask(AllRegs);
+
+ // Pass pointer to InvalidationBailoutStack structure.
+ masm.movePtr(StackPointer, a0);
+
+ // Reserve place for BailoutInfo pointer. Two words to ensure alignment for
+ // setupAlignedABICall.
+ masm.subPtr(Imm32(2 * sizeof(uintptr_t)), StackPointer);
+ // Pass pointer to BailoutInfo
+ masm.movePtr(StackPointer, a1);
+
+ using Fn = bool (*)(InvalidationBailoutStack* sp, BaselineBailoutInfo** info);
+ masm.setupAlignedABICall();
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI<Fn, InvalidationBailout>(
+ ABIType::General, CheckUnsafeCallWithABI::DontCheckOther);
+
+ masm.pop(a2);
+
+ // Pop the machine state and the dead frame.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in r2.
+ masm.jump(bailoutTail);
+}
+
+void JitRuntime::generateArgumentsRectifier(MacroAssembler& masm,
+ ArgumentsRectifierKind kind) {
+ // Do not erase the frame pointer in this function.
+
+ AutoCreatedBy acb(masm, "JitRuntime::generateArgumentsRectifier");
+
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ argumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ trialInliningArgumentsRectifierOffset_ = startTrampolineCode(masm);
+ break;
+ }
+ masm.pushReturnAddress();
+ // Caller:
+ // [arg2] [arg1] [this] [[argc] [callee] [descr] [raddr]] <- sp
+
+ // Frame prologue.
+ //
+ // NOTE: if this changes, fix the Baseline bailout code too!
+ // See BaselineStackBuilder::calculatePrevFramePtr and
+ // BaselineStackBuilder::buildRectifierFrame (in BaselineBailouts.cpp).
+ masm.push(FramePointer);
+ masm.mov(StackPointer, FramePointer);
+
+ // Load argc.
+ masm.loadNumActualArgs(FramePointer, s3);
+
+ Register numActArgsReg = a6;
+ Register calleeTokenReg = a7;
+ Register numArgsReg = a5;
+
+ // Load |nformals| into numArgsReg.
+ masm.loadPtr(
+ Address(FramePointer, RectifierFrameLayout::offsetOfCalleeToken()),
+ calleeTokenReg);
+ masm.mov(calleeTokenReg, numArgsReg);
+ masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), numArgsReg);
+ masm.loadFunctionArgCount(numArgsReg, numArgsReg);
+
+ // Stash another copy in t3, since we are going to do destructive operations
+ // on numArgsReg
+ masm.mov(numArgsReg, t3);
+
+ static_assert(
+ CalleeToken_FunctionConstructing == 1,
+ "Ensure that we can use the constructing bit to count the value");
+ masm.mov(calleeTokenReg, t2);
+ masm.ma_and(t2, t2, Imm32(uint32_t(CalleeToken_FunctionConstructing)));
+
+ // Including |this|, and |new.target|, there are (|nformals| + 1 +
+ // isConstructing) arguments to push to the stack. Then we push a
+ // JitFrameLayout. We compute the padding expressed in the number of extra
+ // |undefined| values to push on the stack.
+ static_assert(
+ sizeof(JitFrameLayout) % JitStackAlignment == 0,
+ "No need to consider the JitFrameLayout for aligning the stack");
+ static_assert(
+ JitStackAlignment % sizeof(Value) == 0,
+ "Ensure that we can pad the stack by pushing extra UndefinedValue");
+
+ MOZ_ASSERT(mozilla::IsPowerOfTwo(JitStackValueAlignment));
+ masm.add32(
+ Imm32(JitStackValueAlignment - 1 /* for padding */ + 1 /* for |this| */),
+ numArgsReg);
+ masm.add32(t2, numArgsReg);
+ masm.and32(Imm32(~(JitStackValueAlignment - 1)), numArgsReg);
+
+ // Load the number of |undefined|s to push into t1. Subtract 1 for |this|.
+ masm.as_sub_d(t1, numArgsReg, s3);
+ masm.sub32(Imm32(1), t1);
+
+ // Caller:
+ // [arg2] [arg1] [this] [ [argc] [callee] [descr] [raddr] ] <- sp
+ // '--- s3 ----'
+ //
+ // Rectifier frame:
+ // [fp'] [undef] [undef] [undef] [arg2] [arg1] [this] [ [argc] [callee]
+ // [descr] [raddr] ]
+ // '-------- t1 ---------' '--- s3 ----'
+
+ // Copy number of actual arguments into numActArgsReg.
+ masm.mov(s3, numActArgsReg);
+
+ masm.moveValue(UndefinedValue(), ValueOperand(t0));
+
+ // Push undefined. (including the padding)
+ {
+ Label undefLoopTop;
+
+ masm.bind(&undefLoopTop);
+ masm.sub32(Imm32(1), t1);
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.storeValue(ValueOperand(t0), Address(StackPointer, 0));
+
+ masm.ma_b(t1, t1, &undefLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // Get the topmost argument.
+ static_assert(sizeof(Value) == 8, "TimesEight is used to skip arguments");
+
+ // Get the topmost argument.
+ masm.as_slli_d(t0, s3, 3); // t0 <- nargs * 8
+ masm.as_add_d(t1, FramePointer, t0); // t1 <- fp(saved sp) + nargs * 8
+ masm.addPtr(Imm32(sizeof(RectifierFrameLayout)), t1);
+
+ // Push arguments, |nargs| + 1 times (to include |this|).
+ masm.addPtr(Imm32(1), s3);
+ {
+ Label copyLoopTop;
+
+ masm.bind(&copyLoopTop);
+ masm.sub32(Imm32(1), s3);
+ masm.subPtr(Imm32(sizeof(Value)), StackPointer);
+ masm.loadValue(Address(t1, 0), ValueOperand(t0));
+ masm.storeValue(ValueOperand(t0), Address(StackPointer, 0));
+ masm.subPtr(Imm32(sizeof(Value)), t1);
+
+ masm.ma_b(s3, s3, &copyLoopTop, Assembler::NonZero, ShortJump);
+ }
+
+ // if constructing, copy newTarget
+ {
+ Label notConstructing;
+
+ masm.branchTest32(Assembler::Zero, calleeTokenReg,
+ Imm32(CalleeToken_FunctionConstructing),
+ &notConstructing);
+
+ // thisFrame[numFormals] = prevFrame[argc]
+ ValueOperand newTarget(t0);
+
+ // Load vp[argc]. Add sizeof(Value) for |this|.
+ BaseIndex newTargetSrc(FramePointer, numActArgsReg, TimesEight,
+ sizeof(RectifierFrameLayout) + sizeof(Value));
+ masm.loadValue(newTargetSrc, newTarget);
+
+ // Again, 1 for |this|
+ BaseIndex newTargetDest(StackPointer, t3, TimesEight, sizeof(Value));
+ masm.storeValue(newTarget, newTargetDest);
+
+ masm.bind(&notConstructing);
+ }
+
+ // Caller:
+ // [arg2] [arg1] [this] [ [argc] [callee] [descr] [raddr] ]
+ //
+ //
+ // Rectifier frame:
+ // [fp'] <- fp [undef] [undef] [undef] [arg2] [arg1] [this] <- sp [ [argc]
+ // [callee] [descr] [raddr] ]
+ //
+
+ // Construct JitFrameLayout.
+ masm.push(calleeTokenReg);
+ masm.pushFrameDescriptorForJitCall(FrameType::Rectifier, numActArgsReg,
+ numActArgsReg);
+
+ // Call the target function.
+ masm.andPtr(Imm32(uint32_t(CalleeTokenMask)), calleeTokenReg);
+ switch (kind) {
+ case ArgumentsRectifierKind::Normal:
+ masm.loadJitCodeRaw(calleeTokenReg, t1);
+ argumentsRectifierReturnOffset_ = masm.callJitNoProfiler(t1);
+ break;
+ case ArgumentsRectifierKind::TrialInlining:
+ Label noBaselineScript, done;
+ masm.loadBaselineJitCodeRaw(calleeTokenReg, t1, &noBaselineScript);
+ masm.callJitNoProfiler(t1);
+ masm.jump(&done);
+
+ // See BaselineCacheIRCompiler::emitCallInlinedFunction.
+ masm.bind(&noBaselineScript);
+ masm.loadJitCodeRaw(calleeTokenReg, t1);
+ masm.callJitNoProfiler(t1);
+ masm.bind(&done);
+ break;
+ }
+
+ masm.mov(FramePointer, StackPointer);
+ masm.pop(FramePointer);
+ masm.ret();
+}
+
+/* - When bailout is done via out of line code (lazy bailout).
+ * Frame size is stored in $ra (look at
+ * CodeGeneratorLOONG64::generateOutOfLineCode()) and thunk code should save it
+ * on stack. Other difference is that members snapshotOffset_ and padding_ are
+ * pushed to the stack by CodeGeneratorLOONG64::visitOutOfLineBailout().
+ */
+static void PushBailoutFrame(MacroAssembler& masm, Register spArg) {
+ // Push the frameSize_ stored in ra
+ // See: CodeGeneratorLOONG64::generateOutOfLineCode()
+ masm.push(ra);
+
+ // Push registers such that we can access them from [base + code].
+ masm.PushRegsInMask(AllRegs);
+
+ // Put pointer to BailoutStack as first argument to the Bailout()
+ masm.movePtr(StackPointer, spArg);
+}
+
+static void GenerateBailoutThunk(MacroAssembler& masm, Label* bailoutTail) {
+ PushBailoutFrame(masm, a0);
+
+ // Make space for Bailout's bailoutInfo outparam.
+ masm.reserveStack(sizeof(void*));
+ masm.movePtr(StackPointer, a1);
+
+ // Call the bailout function.
+ using Fn = bool (*)(BailoutStack* sp, BaselineBailoutInfo** info);
+ masm.setupUnalignedABICall(a2);
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI<Fn, Bailout>(ABIType::General,
+ CheckUnsafeCallWithABI::DontCheckOther);
+
+ // Get the bailoutInfo outparam.
+ masm.pop(a2);
+
+ // Remove both the bailout frame and the topmost Ion frame's stack.
+ masm.moveToStackPtr(FramePointer);
+
+ // Jump to shared bailout tail. The BailoutInfo pointer has to be in a2.
+ masm.jump(bailoutTail);
+}
+
+void JitRuntime::generateBailoutHandler(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutHandler");
+
+ bailoutHandlerOffset_ = startTrampolineCode(masm);
+
+ GenerateBailoutThunk(masm, bailoutTail);
+}
+
+bool JitRuntime::generateVMWrapper(JSContext* cx, MacroAssembler& masm,
+ VMFunctionId id, const VMFunctionData& f,
+ DynFn nativeFun, uint32_t* wrapperOffset) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateVMWrapper");
+
+ *wrapperOffset = startTrampolineCode(masm);
+
+ // Avoid conflicts with argument registers while discarding the result after
+ // the function call.
+ AllocatableGeneralRegisterSet regs(Register::Codes::WrapperMask);
+
+ static_assert(
+ (Register::Codes::VolatileMask & ~Register::Codes::WrapperMask) == 0,
+ "Wrapper register set should be a superset of Volatile register set.");
+
+ // The context is the first argument; a0 is the first argument register.
+ Register cxreg = a0;
+ regs.take(cxreg);
+
+ // On link-register platforms, it is the responsibility of the VM *callee* to
+ // push the return address, while the caller must ensure that the address
+ // is stored in ra on entry. This allows the VM wrapper to work with both
+ // direct calls and tail calls.
+ masm.pushReturnAddress();
+
+ // Push the frame pointer to finish the exit frame, then link it up.
+ masm.Push(FramePointer);
+ masm.moveStackPtrTo(FramePointer);
+ masm.loadJSContext(cxreg);
+ masm.enterExitFrame(cxreg, regs.getAny(), id);
+
+ // Reserve space for the outparameter.
+ masm.reserveVMFunctionOutParamSpace(f);
+
+ masm.setupUnalignedABICallDontSaveRestoreSP();
+ masm.passABIArg(cxreg);
+
+ size_t argDisp = ExitFrameLayout::Size();
+
+ // Copy any arguments.
+ for (uint32_t explicitArg = 0; explicitArg < f.explicitArgs; explicitArg++) {
+ switch (f.argProperties(explicitArg)) {
+ case VMFunctionData::WordByValue:
+ if (f.argPassedInFloatReg(explicitArg)) {
+ masm.passABIArg(MoveOperand(FramePointer, argDisp), ABIType::Float64);
+ } else {
+ masm.passABIArg(MoveOperand(FramePointer, argDisp), ABIType::General);
+ }
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::WordByRef:
+ masm.passABIArg(MoveOperand(FramePointer, argDisp,
+ MoveOperand::Kind::EffectiveAddress),
+ ABIType::General);
+ argDisp += sizeof(void*);
+ break;
+ case VMFunctionData::DoubleByValue:
+ case VMFunctionData::DoubleByRef:
+ MOZ_CRASH(
+ "NYI: LOONG64 callVM should not be used with 128bits values.");
+ break;
+ }
+ }
+
+ // Copy the implicit outparam, if any.
+ const int32_t outParamOffset =
+ -int32_t(ExitFooterFrame::Size()) - f.sizeOfOutParamStackSlot();
+ if (f.outParam != Type_Void) {
+ masm.passABIArg(MoveOperand(FramePointer, outParamOffset,
+ MoveOperand::Kind::EffectiveAddress),
+ ABIType::General);
+ }
+
+ masm.callWithABI(nativeFun, ABIType::General,
+ CheckUnsafeCallWithABI::DontCheckHasExitFrame);
+
+ // Test for failure.
+ switch (f.failType()) {
+ case Type_Cell:
+ masm.branchTestPtr(Assembler::Zero, a0, a0, masm.failureLabel());
+ break;
+ case Type_Bool:
+ // Called functions return bools, which are 0/false and non-zero/true
+ masm.branchIfFalseBool(a0, masm.failureLabel());
+ break;
+ case Type_Void:
+ break;
+ default:
+ MOZ_CRASH("unknown failure kind");
+ }
+
+ // Load the outparam.
+ masm.loadVMFunctionOutParam(f, Address(FramePointer, outParamOffset));
+
+ // Pop frame and restore frame pointer.
+ masm.moveToStackPtr(FramePointer);
+ masm.pop(FramePointer);
+
+ // Return. Subtract sizeof(void*) for the frame pointer.
+ masm.retn(Imm32(sizeof(ExitFrameLayout) - sizeof(void*) +
+ f.explicitStackSlots() * sizeof(void*) +
+ f.extraValuesToPop * sizeof(Value)));
+
+ return true;
+}
+
+uint32_t JitRuntime::generatePreBarrier(JSContext* cx, MacroAssembler& masm,
+ MIRType type) {
+ AutoCreatedBy acb(masm, "JitRuntime::generatePreBarrier");
+
+ uint32_t offset = startTrampolineCode(masm);
+
+ MOZ_ASSERT(PreBarrierReg == a1);
+ Register temp1 = a0;
+ Register temp2 = a2;
+ Register temp3 = a3;
+ masm.push(temp1);
+ masm.push(temp2);
+ masm.push(temp3);
+
+ Label noBarrier;
+ masm.emitPreBarrierFastPath(cx->runtime(), type, temp1, temp2, temp3,
+ &noBarrier);
+
+ // Call into C++ to mark this GC thing.
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+
+ LiveRegisterSet save;
+ save.set() = RegisterSet(GeneralRegisterSet(Registers::VolatileMask),
+ FloatRegisterSet(FloatRegisters::VolatileMask));
+ masm.push(ra);
+ masm.PushRegsInMask(save);
+
+ masm.movePtr(ImmPtr(cx->runtime()), a0);
+
+ masm.setupUnalignedABICall(a2);
+ masm.passABIArg(a0);
+ masm.passABIArg(a1);
+ masm.callWithABI(JitPreWriteBarrier(type));
+
+ masm.PopRegsInMask(save);
+ masm.ret();
+
+ masm.bind(&noBarrier);
+ masm.pop(temp3);
+ masm.pop(temp2);
+ masm.pop(temp1);
+ masm.abiret();
+
+ return offset;
+}
+
+void JitRuntime::generateBailoutTailStub(MacroAssembler& masm,
+ Label* bailoutTail) {
+ AutoCreatedBy acb(masm, "JitRuntime::generateBailoutTailStub");
+
+ masm.bind(bailoutTail);
+ masm.generateBailoutTail(a1, a2);
+}